1313os .environ ['MLP_DATA_DIR' ] = '/disk/scratch/mlp/data'
1414os .environ ['OUTPUT_DIR' ] = '$HOME/experiments'
1515
16-
1716# check necessary environment variables are defined
1817assert 'MLP_DATA_DIR' in os .environ , (
1918 'An environment variable MLP_DATA_DIR must be set to the path containing'
2221 'An environment variable OUTPUT_DIR must be set to the path to write'
2322 ' output to before running script.' )
2423
25-
26-
2724# load data
2825train_data = data_providers .CIFAR10DataProvider ('train' , batch_size = 100 )
2926valid_data = data_providers .CIFAR10DataProvider ('valid' , batch_size = 100 )
3027valid_inputs = valid_data .inputs
3128valid_targets = valid_data .to_one_of_k (valid_data .targets )
3229
3330
34-
35-
36-
37-
38-
3931# Convolutional layer with non-linearity
4032def conv2d (x , w , b , activation = tf .nn .relu ):
4133 conv = tf .nn .conv2d (x , w , strides = [1 ,1 ,1 ,1 ], padding = 'SAME' )
4234 return activation (tf .nn .bias_add (conv , b ))
4335
44-
4536# fully connected layer with non-linearity
4637def fc (x , w , b , activation = tf .nn .relu ):
4738 y = tf .nn .bias_add (tf .matmul (x , w ), b )
@@ -50,9 +41,6 @@ def fc(x, w, b, activation=tf.nn.relu):
5041 return y
5142
5243
53-
54-
55-
5644with tf .name_scope ('data' ):
5745 inputs = tf .placeholder (tf .float32 , [None , 3072 ], name = 'inputs' )
5846 targets = tf .placeholder (tf .float32 , [None , train_data .num_classes ], name = 'targets' )
@@ -116,8 +104,6 @@ def fc(x, w, b, activation=tf.nn.relu):
116104 fc3 = fc (fc2 , weights ['W_fc3' ], biases ['b_fc3' ], activation = 'None' )
117105 outputs = fc3
118106
119-
120-
121107# Anneal to learning rate
122108global_step = tf .Variable (0 , trainable = False )
123109boundaries = [20 , 40 , 60 , 80 ]
@@ -131,11 +117,6 @@ def fc(x, w, b, activation=tf.nn.relu):
131117 + tf .nn .l2_loss (weights ['W_conv4' ])+ tf .nn .l2_loss (weights ['W_fc1' ])+ tf .nn .l2_loss (weights ['W_fc2' ])+ \
132118 tf .nn .l2_loss (weights ['W_fc3' ])
133119
134-
135-
136-
137-
138-
139120with tf .name_scope ('error' ):
140121 error = tf .reduce_mean (
141122 tf .nn .softmax_cross_entropy_with_logits (outputs , targets ) + 0.001 * weight_decay )
@@ -147,11 +128,8 @@ def fc(x, w, b, activation=tf.nn.relu):
147128 train_step = tf .train .MomentumOptimizer (learning_rate = 0.01 , momentum = 0.9 ).minimize (error , global_step = global_step )
148129 #train_step = tf.train.RMSPropOptimizer(learning_rate=0.001).minimize(error)
149130
150-
151-
152131# ---------------------------------------------------------------------------------
153132
154-
155133# add summary operations
156134tf .summary .scalar ('error' , error )
157135tf .summary .scalar ('accuracy' , accuracy )
@@ -168,19 +146,13 @@ def fc(x, w, b, activation=tf.nn.relu):
168146valid_writer = tf .summary .FileWriter (os .path .join (exp_dir , 'valid-summaries' ))
169147saver = tf .train .Saver ()
170148
171-
172149# create arrays to store run train / valid set stats
173150num_epoch = 75
174151train_accuracy = np .zeros (num_epoch )
175152train_error = np .zeros (num_epoch )
176153valid_accuracy = np .zeros (num_epoch )
177154valid_error = np .zeros (num_epoch )
178155
179-
180-
181-
182-
183-
184156# create session and run training loop
185157#sess = tf.Session()
186158NUM_THREADS = 8
@@ -218,7 +190,6 @@ def fc(x, w, b, activation=tf.nn.relu):
218190 print (' err(valid)={0:.4f} acc(valid)={1:.4f}'
219191 .format (valid_error [e ], valid_accuracy [e ]))
220192
221-
222193# close writer and session objects
223194train_writer .close ()
224195valid_writer .close ()
0 commit comments