1+ from  tensorflow .examples .tutorials .mnist  import  input_data 
2+ mnist  =  input_data .read_data_sets ("." , one_hot = True , reshape = False )
3+ import  tensorflow  as  tf 
4+ 
5+ # Parameters 
6+ learning_rate  =  0.001 
7+ training_epochs  =  20 
8+ batch_size  =  128   # Decrease batch size if you don't have enough memory 
9+ display_step  =  1 
10+ 
11+ n_input  =  784   # MNIST data input (img shape: 28*28) 
12+ n_classes  =  10   # MNIST total classes (0-9 digits) 
13+ n_hidden_layer  =  256  # layer number of features 
14+ 
15+ # Store layers weight & bias 
16+ weights  =  {
17+     'hidden_layer' : tf .Variable (tf .random_normal ([n_input , n_hidden_layer ])),
18+     'out' : tf .Variable (tf .random_normal ([n_hidden_layer , n_classes ]))
19+ }
20+ biases  =  {
21+     'hidden_layer' : tf .Variable (tf .random_normal ([n_hidden_layer ])),
22+     'out' : tf .Variable (tf .random_normal ([n_classes ]))
23+ }
24+ 
25+ x  =  tf .placeholder ("float" , [None , 28 , 28 , 1 ])
26+ y  =  tf .placeholder ("float" , [None , n_classes ])
27+ 
28+ x_flat  =  tf .reshape (x , [- 1 , n_input ])
29+ 
30+ # Hidden layer with RELU activation 
31+ layer_1  =  tf .add (tf .matmul (x_flat , weights ['hidden_layer' ]),\
32+     biases ['hidden_layer' ])
33+ layer_1  =  tf .nn .relu (layer_1 )
34+ # Output layer with linear activation 
35+ logits  =  tf .add (tf .matmul (layer_1 , weights ['out' ]), biases ['out' ])
36+ 
37+ # Define loss and optimizer 
38+ cost  =  tf .reduce_mean (tf .nn .softmax_cross_entropy_with_logits (logits = logits , labels = y ))
39+ optimizer  =  tf .train .GradientDescentOptimizer (learning_rate = learning_rate ).minimize (cost )
40+ 
41+ # Initializing the variables 
42+ init  =  tf .global_variables_initializer ()
43+ 
44+ # Launch the graph 
45+ with  tf .Session () as  sess :
46+     sess .run (init )
47+     # Training cycle 
48+     for  epoch  in  range (training_epochs ):
49+         total_batch  =  int (mnist .train .num_examples / batch_size )
50+         # Loop over all batches 
51+         for  i  in  range (total_batch ):
52+             batch_x , batch_y  =  mnist .train .next_batch (batch_size )
53+             # Run optimization op (backprop) and cost op (to get loss value) 
54+             sess .run (optimizer , feed_dict = {x : batch_x , y : batch_y })
0 commit comments