Skip to content

Commit

Permalink
network general indent
Browse files Browse the repository at this point in the history
  • Loading branch information
koriavinash1 committed Jan 31, 2018
1 parent e6b2be9 commit 09b27a4
Show file tree
Hide file tree
Showing 2 changed files with 146 additions and 154 deletions.
Empty file added src/runner.py
Empty file.
300 changes: 146 additions & 154 deletions src/train_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,191 +3,183 @@


def getWeightAndBias(weights_shape, bias_shape, collection_name, non_zero_bias=True):
"""
TODO: Check Change initializer
"""
initializer = tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False, seed=None, dtype=tf.float32)
W = tf.get_variable(shape = weights_shape, name = 'weight_matrix',
initializer = initializer)
tf.add_to_collection('l2_norm_variables',W)
tf.add_to_collection(collection_name,W)

if non_zero_bias:
bias_initializer = tf.zeros_initializer()
b = tf.get_variable(name = 'biases', shape = bias_shape, initializer = bias_initializer)
tf.add_to_collection('l2_norm_variables',b)
tf.add_to_collection(collection_name,b)
else:
b = tf.constant(0.0, name = 'constant_zero_biases', shape = bias_shape)
return W, b
"""
TODO: Check Change initializer
"""
initializer = tf.contrib.layers.variance_scaling_initializer(factor=2.0, mode='FAN_IN', uniform=False, seed=None, dtype=tf.float32)
W = tf.get_variable(shape = weights_shape, name = 'weight_matrix',
initializer = initializer)
tf.add_to_collection('l2_norm_variables',W)
tf.add_to_collection(collection_name,W)

if non_zero_bias:
bias_initializer = tf.zeros_initializer()
b = tf.get_variable(name = 'biases', shape = bias_shape, initializer = bias_initializer)
tf.add_to_collection('l2_norm_variables',b)
tf.add_to_collection(collection_name,b)
else:
b = tf.constant(0.0, name = 'constant_zero_biases', shape = bias_shape)
return W, b

def getInputsPlaceholder(*shape):
return tf.placeholder(tf.float32, shape, name='Inputs')
return tf.placeholder(tf.float32, shape, name='Inputs')

def getTargetsPlaceholder(*shape):
return tf.placeholder(tf.uint8, shape, name='Targets')
return tf.placeholder(tf.uint8, shape, name='Targets')

def OneHot(targets,num_class):
return tf.one_hot(targets,num_class,1,0)

def Softmax(logits):
return tf.nn.softmax(logits,name = 'softmax')
return tf.nn.softmax(logits,name = 'softmax')

def Dropout(inputs, is_training, keep_prob = 0.7):
keep_prob_pl = tf.cond(is_training, lambda : tf.constant(keep_prob), lambda : tf.constant(1.0))
return tf.nn.dropout(inputs,keep_prob_pl)
keep_prob_pl = tf.cond(is_training, lambda : tf.constant(keep_prob), lambda : tf.constant(1.0))
return tf.nn.dropout(inputs,keep_prob_pl)

def Conv2D(inputs, weights_shape, collection_name = '', stride = 1, padding = 'VALID', non_zero_bias=True):
"""
### TO-DO ###
If we want some specific padding value
"""
strides = [1,stride,stride,1]

W_shape = weights_shape
b_shape = [weights_shape[3]]
W, b = getWeightAndBias(W_shape, b_shape,collection_name = collection_name, non_zero_bias=non_zero_bias)
output = tf.nn.conv2d(inputs, W, strides, padding)
output = tf.add(output, b, name ='add_bias')
return output
"""
### TO-DO ###
If we want some specific padding value
"""
strides = [1,stride,stride,1]

W_shape = weights_shape
b_shape = [weights_shape[3]]
W, b = getWeightAndBias(W_shape, b_shape,collection_name = collection_name, non_zero_bias=non_zero_bias)
output = tf.nn.conv2d(inputs, W, strides, padding)
output = tf.add(output, b, name ='add_bias')
return output

def TransposeConv2D(inputs, n_filters_keep, collection_name = '', filter_size = (3,3), stride = (2,2), padding = 'SAME', non_zero_bias=True):
"""
### TO-DO ###
"""
def deconv_output_length(input_length, filter_size, padding, stride):
"""Determines output length of a transposed convolution given input length.
Arguments:
input_length: integer.
filter_size: integer.
padding: one of "same", "valid", "full".
stride: integer.
Returns:
The output length (integer).
"""
if input_length is None:
return None

input_length *= stride
if padding == 'VALID':
input_length += max(filter_size - stride, 0)
elif padding == 'FULL':
input_length -= (stride + filter_size - 2)
return input_length

input_shape = tf.shape(inputs)
batch_size, height, width = input_shape[0], input_shape[1], input_shape[2]
kernel_h, kernel_w = filter_size
stride_h, stride_w = stride

# Infer the dynamic output shape:
out_height = deconv_output_length(height,
kernel_h,
padding,
stride_h)
out_width = deconv_output_length(width,
kernel_w,
padding,
stride_w)

output_shape = (batch_size, out_height, out_width, n_filters_keep)
output_shape_tensor = tf.stack(output_shape)
strides = [1, stride_h, stride_w, 1]
W_shape = [kernel_h, kernel_w, n_filters_keep, inputs.get_shape()[-1].value]
b_shape = [n_filters_keep]
W, b = getWeightAndBias(W_shape, b_shape, collection_name = collection_name, non_zero_bias=non_zero_bias)
output = tf.nn.conv2d_transpose (inputs, W, output_shape_tensor, strides, padding=padding)
output = tf.add(output, b, name ='add_bias')
return output
"""
### TO-DO ###
"""
def deconv_output_length(input_length, filter_size, padding, stride):
if input_length is None:
return None

input_length *= stride
if padding == 'VALID':
input_length += max(filter_size - stride, 0)
elif padding == 'FULL':
input_length -= (stride + filter_size - 2)
return input_length

input_shape = tf.shape(inputs)
batch_size, height, width = input_shape[0], input_shape[1], input_shape[2]
kernel_h, kernel_w = filter_size
stride_h, stride_w = stride

# Infer the dynamic output shape:
out_height = deconv_output_length(height,
kernel_h,
padding,
stride_h)
out_width = deconv_output_length(width,
kernel_w,
padding,
stride_w)

output_shape = (batch_size, out_height, out_width, n_filters_keep)
output_shape_tensor = tf.stack(output_shape)
strides = [1, stride_h, stride_w, 1]
W_shape = [kernel_h, kernel_w, n_filters_keep, inputs.get_shape()[-1].value]
b_shape = [n_filters_keep]
W, b = getWeightAndBias(W_shape, b_shape, collection_name = collection_name, non_zero_bias=non_zero_bias)
output = tf.nn.conv2d_transpose (inputs, W, output_shape_tensor, strides, padding=padding)
output = tf.add(output, b, name ='add_bias')
return output

def Elu(x):
return tf.nn.elu(x)
return tf.nn.elu(x)

def ReLU(x):
return tf.nn.relu(x)
return tf.nn.relu(x)

def MaxPool2(x):
output = tf.nn.max_pool(x,ksize = [1,2,2,1],strides = [1,2,2,1],padding = 'VALID')
return output
output = tf.nn.max_pool(x,ksize = [1,2,2,1],strides = [1,2,2,1],padding = 'VALID')
return output

def BatchNorm(inputs, is_training, decay = 0.9, epsilon=1e-3, isEnabled=True):
# TODO: Check the effect of batch_norm is True Always
# It is observed that batch norm affects the quality of segmentation results
if not isEnabled:
return inputs
# is_training=tf.constant(True, dtype=tf.bool)

with tf.device('/cpu:0'):
scale = tf.get_variable(name = 'scale', shape = inputs.get_shape()[-1],
initializer = tf.constant_initializer(1.0),dtype = tf.float32)
tf.add_to_collection('l2_norm_variables', scale)
beta = tf.get_variable(name = 'beta', shape = inputs.get_shape()[-1],
initializer = tf.constant_initializer(0.0),dtype = tf.float32)
tf.add_to_collection('l2_norm_variables',beta)
pop_mean = tf.Variable(tf.zeros([inputs.get_shape()[-1]]), trainable=False)
pop_var = tf.Variable(tf.ones([inputs.get_shape()[-1]]), trainable=False)
axis = list(range(len(inputs.get_shape())-1))

def Train(inputs, pop_mean, pop_var, scale, beta):
batch_mean, batch_var = tf.nn.moments(inputs,axis)
train_mean = tf.assign(pop_mean,
pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var,
pop_var * decay + batch_var * (1 - decay))

mean_distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(pop_mean, batch_mean))))
var_distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(pop_var, batch_var))))

with tf.control_dependencies([train_mean, train_var]):
return tf.nn.batch_normalization(inputs,
batch_mean, batch_var, beta, scale, epsilon)
def Eval(inputs, pop_mean, pop_var, scale, beta):
return tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, epsilon)

return tf.cond(is_training, lambda: Train(inputs, pop_mean, pop_var, scale, beta),
lambda: Eval(inputs, pop_mean, pop_var, scale, beta))
# TODO: Check the effect of batch_norm is True Always
# It is observed that batch norm affects the quality of segmentation results
if not isEnabled:
return inputs
# is_training=tf.constant(True, dtype=tf.bool)

with tf.device('/cpu:0'):
scale = tf.get_variable(name = 'scale', shape = inputs.get_shape()[-1],
initializer = tf.constant_initializer(1.0),dtype = tf.float32)
tf.add_to_collection('l2_norm_variables', scale)
beta = tf.get_variable(name = 'beta', shape = inputs.get_shape()[-1],
initializer = tf.constant_initializer(0.0),dtype = tf.float32)
tf.add_to_collection('l2_norm_variables',beta)
pop_mean = tf.Variable(tf.zeros([inputs.get_shape()[-1]]), trainable=False)
pop_var = tf.Variable(tf.ones([inputs.get_shape()[-1]]), trainable=False)
axis = list(range(len(inputs.get_shape())-1))

def Train(inputs, pop_mean, pop_var, scale, beta):
batch_mean, batch_var = tf.nn.moments(inputs,axis)
train_mean = tf.assign(pop_mean,
pop_mean * decay + batch_mean * (1 - decay))
train_var = tf.assign(pop_var,
pop_var * decay + batch_var * (1 - decay))

mean_distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(pop_mean, batch_mean))))
var_distance = tf.sqrt(tf.reduce_sum(tf.square(tf.subtract(pop_var, batch_var))))

with tf.control_dependencies([train_mean, train_var]):
return tf.nn.batch_normalization(inputs,
batch_mean, batch_var, beta, scale, epsilon)

def Eval(inputs, pop_mean, pop_var, scale, beta):
return tf.nn.batch_normalization(inputs, pop_mean, pop_var, beta, scale, epsilon)

return tf.cond(is_training, lambda: Train(inputs, pop_mean, pop_var, scale, beta),
lambda: Eval(inputs, pop_mean, pop_var, scale, beta))

def ConvEluBatchNormDropout(inputs, shape, stride = 1, padding = 'VALID', bn_mode = tf.placeholder_with_default(False, shape = []), drop_mode = tf.placeholder_with_default(False, shape = []), keep_prob = 0.7, collections = []):
return Dropout(BatchNorm(ConvElu(inputs,shape,stride,padding, collections = collections),bn_mode, collections = collections), drop_mode,keep_prob)
return Dropout(BatchNorm(ConvElu(inputs,shape,stride,padding, collections = collections),bn_mode, collections = collections), drop_mode,keep_prob)

def SpatialBilinearUpsampling(x,factor = 2):
shape = [tf.shape(x)[1]*factor,tf.shape(x)[2]*factor]
return tf.image.resize_bilinear(x,shape)
shape = [tf.shape(x)[1]*factor,tf.shape(x)[2]*factor]
return tf.image.resize_bilinear(x,shape)

def TransitionDown(inputs, n_filters,collection_name, keep_prob=0.8, is_training=tf.constant(False,dtype=tf.bool)):
""" Apply first a BN_ReLu_conv layer with filter size = 1, and a max pooling with a factor 2 """
l = BN_eLU_Conv(inputs, n_filters,collection_name=collection_name, filter_size=1, keep_prob=keep_prob, is_training=is_training)
l = MaxPool2(l)
""" Apply first a BN_ReLu_conv layer with filter size = 1, and a max pooling with a factor 2 """
l = BN_eLU_Conv(inputs, n_filters,collection_name=collection_name, filter_size=1, keep_prob=keep_prob, is_training=is_training)
l = MaxPool2(l)

return l
return l

def BN_eLU_Conv(inputs, n_filters,collection_name, filter_size=3, keep_prob=0.8, is_training=tf.constant(False,dtype=tf.bool), drop_BN=False, use_elu=True):
l = inputs
if not drop_BN:
l = BatchNorm(l, is_training=is_training)
if use_elu:
l = Elu(l)
else:
l = ReLU(l)
l = Conv2D(l, [filter_size, filter_size, l.get_shape()[-1].value, n_filters], collection_name = collection_name, padding='SAME')
l = Dropout(l, is_training=is_training,keep_prob=keep_prob)
return l
l = inputs
if not drop_BN:
l = BatchNorm(l, is_training=is_training)
if use_elu:
l = Elu(l)
else:
l = ReLU(l)
l = Conv2D(l, [filter_size, filter_size, l.get_shape()[-1].value, n_filters], collection_name = collection_name, padding='SAME')
l = Dropout(l, is_training=is_training,keep_prob=keep_prob)
return l

def dice_multiclass(output, target, loss_type='sorensen', axis=[0,1,2], smooth=1e-5):
inse = tf.reduce_sum(output * target, axis=axis)
if loss_type == 'jaccard':
l = tf.reduce_sum(output * output, axis=axis)
r = tf.reduce_sum(target * target, axis=axis)
elif loss_type == 'sorensen':
l = tf.reduce_sum(output, axis=axis)
r = tf.reduce_sum(target, axis=axis)
else:
raise Exception("Unknow loss_type")
# dice = 2 * (inse) / (l + r)
# epsilon = 1e-5
# dice = tf.clip_by_value(dice, 0, 1.0-epsilon) # if all empty, dice = 1

dice = (2. * inse + smooth) / (l + r + smooth)
##Attention: Return dice/jaccard score of all the classes in the batch if axis=0
# dice = tf.reduce_mean(dice, axis=0)
return dice
inse = tf.reduce_sum(output * target, axis=axis)
if loss_type == 'jaccard':
l = tf.reduce_sum(output * output, axis=axis)
r = tf.reduce_sum(target * target, axis=axis)
elif loss_type == 'sorensen':
l = tf.reduce_sum(output, axis=axis)
r = tf.reduce_sum(target, axis=axis)
else:
raise Exception("Unknow loss_type")
# dice = 2 * (inse) / (l + r)
# epsilon = 1e-5
# dice = tf.clip_by_value(dice, 0, 1.0-epsilon) # if all empty, dice = 1

dice = (2. * inse + smooth) / (l + r + smooth)
##Attention: Return dice/jaccard score of all the classes in the batch if axis=0
# dice = tf.reduce_mean(dice, axis=0)
return dice

0 comments on commit 09b27a4

Please sign in to comment.