Skip to content

Commit

Permalink
shortcut
Browse files Browse the repository at this point in the history
  • Loading branch information
zhangpf2 committed Jan 30, 2018
1 parent 6439d16 commit 0bb457c
Show file tree
Hide file tree
Showing 2 changed files with 11 additions and 11 deletions.
8 changes: 4 additions & 4 deletions model.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def _nets(self, X, reuse=False):
with tf.variable_scope('mobilenetv2', reuse=reuse):
net = conv2d_block(X, 32, 3, 2, w_d, is_train, name='conv1_1') # size/2

net = res_block(net, exp, 16, 1, w_d, is_train, name='res1_1', shortcut=False)
net = res_block(net, exp, 16, 1, w_d, is_train, name='res1_1')

net = res_block(net, exp, 24, 2, w_d, is_train, name='res2_1') # size/4
net = res_block(net, exp, 24, 1, w_d, is_train, name='res2_2')
Expand All @@ -94,11 +94,11 @@ def _nets(self, X, reuse=False):
net = res_block(net, exp, 160, 1, w_d, is_train, name='res6_2')
net = res_block(net, exp, 160, 1, w_d, is_train, name='res6_3')

net = res_block(net, exp, 320, 1, w_d, is_train, name='res7_1', shortcut=False)
net = res_block(net, exp, 320, 1, w_d, is_train, name='res7_1')

net = pwise_block(net, 1280, w_d, is_train, name='conv8_1')
net = pwise_block(net, 1280, w_d, is_train, name='conv8_1', bias=False)
net = global_avg(net)
logits = flatten(conv_1x1(net, self.n_classes, w_d, name='logits'))
logits = flatten(conv_1x1(net, self.n_classes, w_d, name='logits', bias=False))

pred=tf.nn.softmax(logits, name='prob')
return logits, pred
Expand Down
14 changes: 7 additions & 7 deletions ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,15 +15,15 @@ def batch_norm(x, momentum=0.9, epsilon=1e-5, train=True, name='bn'):
scope=name)


def conv2d(input_, output_dim, k_h, k_w, d_h, d_w, weight_decay, stddev=0.02, name='conv2d'):
def conv2d(input_, output_dim, k_h, k_w, d_h, d_w, weight_decay, stddev=0.02, name='conv2d', bias=True):
with tf.variable_scope(name):
w = tf.get_variable('w', [k_h, k_w, input_.get_shape()[-1], output_dim],
initializer=tf.truncated_normal_initializer(stddev=stddev),
regularizer=tf.contrib.layers.l2_regularizer(weight_decay))
conv = tf.nn.conv2d(input_, w, strides=[1, d_h, d_w, 1], padding='SAME')

biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.nn.bias_add(conv, biases)
if bias:
biases = tf.get_variable('biases', [output_dim], initializer=tf.constant_initializer(0.0))
conv = tf.nn.bias_add(conv, biases)

return conv

Expand All @@ -36,13 +36,13 @@ def conv2d_block(input, out_dim, k, s, weight_decay, is_train, name):
return net


def conv_1x1(input, output_dim, weight_decay, name):
def conv_1x1(input, output_dim, weight_decay, name, bias=True):
with tf.name_scope(name):
return conv2d(input, output_dim, 1,1,1,1, weight_decay,stddev=0.02, name=name)

def pwise_block(input, output_dim, weight_decay, is_train, name):
def pwise_block(input, output_dim, weight_decay, is_train, name, bias=True):
with tf.name_scope(name):
out=conv_1x1(input, output_dim, weight_decay, name)
out=conv_1x1(input, output_dim, weight_decay, name, bias)
out=batch_norm(out, train=is_train, name='pwb')
out=relu(out)
return out
Expand Down

0 comments on commit 0bb457c

Please sign in to comment.