-
Notifications
You must be signed in to change notification settings - Fork 3
/
model_Unet_1.py
188 lines (141 loc) · 8.58 KB
/
model_Unet_1.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
# Unet
import tensorflow as tf
def weight_variable(shape, name=None):
initial = tf.truncated_normal(shape, stddev=0.1)
return tf.Variable(initial, name=name)
# initializer = tf.contrib.layers.variance_scaling_initializer()
# W1 = tf.Variable(initializer(shape))
# return W1
def max_pool_2x2(x):
return tf.nn.max_pool(x, ksize=[1, 2, 2, 1],
strides=[1, 2, 2, 1], padding='SAME')
def inference(inpt, img_W, img_H, batch_size, isTrain):
# Connect up all the downsampling layers.
inpt = tf.reshape(inpt,[batch_size,img_W,img_H,1])
with tf.variable_scope('conv1'):
W_conv1 = weight_variable([3, 3, 32, 1])
conv1 = tf.nn.conv2d_transpose(inpt,W_conv1,output_shape=[batch_size, img_W, img_H, 32],strides=[1,1,1,1],padding="SAME")
conv1 = tf.layers.batch_normalization(conv1, training = isTrain)
conv1 = tf.nn.relu(conv1)
W_conv1_1 = weight_variable([3, 3, 32, 32])
conv1_1 = tf.nn.conv2d(conv1, W_conv1_1, strides=[1, 1, 1, 1], padding="SAME")
conv1_1 = tf.layers.batch_normalization(conv1_1, training = isTrain)
conv1_1 = tf.nn.leaky_relu(conv1_1)
with tf.variable_scope('Max_Pooling_1'):
Maxpool_1 = max_pool_2x2(conv1_1)
with tf.variable_scope('conv2'):
W_conv2 = weight_variable([3, 3, 32, 64])
conv2 = tf.nn.conv2d(Maxpool_1, W_conv2, strides=[1, 1, 1, 1], padding="SAME")
conv2 = tf.layers.batch_normalization(conv2, training = isTrain)
conv2 = tf.nn.leaky_relu(conv2)
W_conv2_1 = weight_variable([3, 3, 64, 64])
conv2_1 = tf.nn.conv2d(conv2, W_conv2_1, strides=[1, 1, 1, 1], padding="SAME")
conv2_1 = tf.layers.batch_normalization(conv2_1, training = isTrain)
conv2_1 = tf.nn.leaky_relu(conv2_1)
with tf.variable_scope('Max_Pooling_2'):
Maxpool_2 = max_pool_2x2(conv2_1)
with tf.variable_scope('conv3'):
W_conv3 = weight_variable([3, 3, 64, 128])
conv3 = tf.nn.conv2d(Maxpool_2, W_conv3, strides=[1, 1, 1, 1], padding="SAME")
conv3 = tf.layers.batch_normalization(conv3, training = isTrain)
conv3 = tf.nn.relu(conv3)
W_conv3_1 = weight_variable([3, 3, 128, 128])
conv3_1 = tf.nn.conv2d(conv3, W_conv3_1, strides=[1, 1, 1, 1], padding="SAME")
conv3_1 = tf.layers.batch_normalization(conv3_1, training = isTrain)
conv3_1 = tf.nn.leaky_relu(conv3_1)
with tf.variable_scope('Max_Pooling_3'):
Maxpool_3 = max_pool_2x2(conv3_1)
with tf.variable_scope('conv4'):
W_conv4 = weight_variable([3, 3, 128, 256])
conv4 = tf.nn.conv2d(Maxpool_3, W_conv4, strides=[1, 1, 1, 1], padding="SAME")
conv4 = tf.layers.batch_normalization(conv4, training = isTrain)
conv4 = tf.nn.leaky_relu(conv4)
W_conv4_1 = weight_variable([3, 3, 256, 256])
conv4_1 = tf.nn.conv2d(conv4, W_conv4_1, strides=[1, 1, 1, 1], padding="SAME")
conv4_1 = tf.layers.batch_normalization(conv4_1, training = isTrain)
conv4_1 = tf.nn.leaky_relu(conv4_1)
with tf.variable_scope('Max_Pooling_4'):
Maxpool_4 = max_pool_2x2(conv4_1)
with tf.variable_scope('conv5'):
W_conv5 = weight_variable([3, 3, 256, 512])
conv5 = tf.nn.conv2d(Maxpool_4, W_conv5, strides=[1, 1, 1, 1], padding="SAME")
conv5 = tf.layers.batch_normalization(conv5, training = isTrain)
conv5 = tf.nn.leaky_relu(conv5)
W_conv5_1 = weight_variable([3, 3, 512, 512])
conv5_1 = tf.nn.conv2d(conv5, W_conv5_1, strides=[1, 1, 1, 1], padding="SAME")
conv5_1 = tf.layers.batch_normalization(conv5_1, training = isTrain)
conv5_1 = tf.nn.leaky_relu(conv5_1)
with tf.variable_scope('conv6'):
W_conv6 = weight_variable([3, 3, 256, 512])
conv6 = tf.nn.conv2d_transpose(conv5_1,W_conv6,output_shape=[batch_size, int(img_W/8), int(img_W/8), 256],strides=[1,2,2,1],padding="SAME")
conv6 = tf.layers.batch_normalization(conv6, training = isTrain)
conv6 = tf.nn.leaky_relu(conv6)
merge1 = tf.concat([conv4_1,conv6], axis = 3)
W_conv6_1 = weight_variable([3, 3, 512, 256])
conv6_1 = tf.nn.conv2d(merge1, W_conv6_1, strides=[1, 1, 1, 1], padding="SAME")
conv6_1 = tf.layers.batch_normalization(conv6_1, training = isTrain)
conv6_1 = tf.nn.leaky_relu(conv6_1)
W_conv6_2 = weight_variable([3, 3, 256, 256])
conv6_2 = tf.nn.conv2d(conv6_1, W_conv6_2, strides=[1, 1, 1, 1], padding="SAME")
conv6_2 = tf.layers.batch_normalization(conv6_2, training = isTrain)
conv6_2 = tf.nn.leaky_relu(conv6_2)
with tf.variable_scope('conv7'):
W_conv7 = weight_variable([3, 3, 128, 256])
conv7 = tf.nn.conv2d_transpose(conv6_2,W_conv7,output_shape=[batch_size, int(img_W/4), int(img_H/4), 128],strides=[1,2,2,1],padding="SAME")
conv7 = tf.layers.batch_normalization(conv7, training = isTrain)
conv7 = tf.nn.leaky_relu(conv7)
merge2 = tf.concat([conv3_1,conv7], axis = 3)
W_conv7_1 = weight_variable([3, 3, 256, 128])
conv7_1 = tf.nn.conv2d(merge2, W_conv7_1, strides=[1, 1, 1, 1], padding="SAME")
conv7_1 = tf.layers.batch_normalization(conv7_1, training = isTrain)
conv7_1 = tf.nn.leaky_relu(conv7_1)
W_conv7_2 = weight_variable([3, 3, 128, 128])
conv7_2 = tf.nn.conv2d(conv7_1, W_conv7_2, strides=[1, 1, 1, 1], padding="SAME")
conv7_2 = tf.layers.batch_normalization(conv7_2, training = isTrain)
conv7_2 = tf.nn.leaky_relu(conv7_2)
with tf.variable_scope('conv8'):
W_conv8 = weight_variable([3, 3, 64, 128])
conv8 = tf.nn.conv2d_transpose(conv7_2,W_conv8,output_shape=[batch_size, int(img_W/2), int(img_H/2), 64],strides=[1,2,2,1],padding="SAME")
conv8 = tf.layers.batch_normalization(conv8, training = isTrain)
conv8 = tf.nn.leaky_relu(conv8)
merge3 = tf.concat([conv2_1,conv8], axis = 3)
W_conv8_1 = weight_variable([3, 3, 128, 64])
conv8_1 = tf.nn.conv2d(merge3, W_conv8_1, strides=[1, 1, 1, 1], padding="SAME")
conv8_1 = tf.layers.batch_normalization(conv8_1, training = isTrain)
conv8_1 = tf.nn.leaky_relu(conv8_1)
W_conv8_2 = weight_variable([3, 3, 64, 64])
conv8_2 = tf.nn.conv2d(conv8_1, W_conv8_2, strides=[1, 1, 1, 1], padding="SAME")
conv8_2 = tf.layers.batch_normalization(conv8_2, training = isTrain)
conv8_2 = tf.nn.leaky_relu(conv8_2)
with tf.variable_scope('conv9'):
W_conv9 = weight_variable([3, 3, 32, 64])
conv9 = tf.nn.conv2d_transpose(conv8_2,W_conv9,output_shape=[batch_size, img_W, img_H, 32],strides=[1,2,2,1],padding="SAME")
conv9 = tf.layers.batch_normalization(conv9, training = isTrain)
conv9 = tf.nn.leaky_relu(conv9)
merge4 = tf.concat([conv1_1,conv9], axis = 3)
W_conv9_1 = weight_variable([3, 3, 64, 32])
conv9_1 = tf.nn.conv2d(merge4, W_conv9_1, strides=[1, 1, 1, 1], padding="SAME")
conv9_1 = tf.layers.batch_normalization(conv9_1, training = isTrain)
conv9_1 = tf.nn.leaky_relu(conv9_1)
W_conv9_2 = weight_variable([3, 3, 32, 32])
conv9_2 = tf.nn.conv2d(conv9_1, W_conv9_2, strides=[1, 1, 1, 1], padding="SAME")
conv9_2 = tf.layers.batch_normalization(conv9_2, training = isTrain)
conv9_2 = tf.nn.leaky_relu(conv9_2)
with tf.variable_scope('conv10'):
W_conv10 = weight_variable([3, 3, 32, 1])
conv10 = tf.nn.conv2d(conv9_2, W_conv10, strides=[1, 1, 1, 1], padding="SAME")
conv10 = tf.layers.batch_normalization(conv10, training = isTrain)
conv10 = tf.nn.sigmoid(conv10)
with tf.variable_scope('output'):
# Amp_out = conv10[:,:,:,0]
Pha_out = conv10
Pha_out = tf.reshape(Pha_out,[batch_size,img_W,img_H,1])
# Pha_out = tf.reshape(Pha_out,[img_W,img_H])
# Amp_out = tf.abs(Amp_out)
# Pha_out = tf.abs(Pha_out)
return Pha_out