Skip to content

Commit fbaadff

Browse files
author
tangzhankun
authored
Merge pull request apache#5 from tangzhankun/zhankun-HDL
fix python sample code sytax error
2 parents ead946e + c6378d7 commit fbaadff

File tree

2 files changed

+107
-107
lines changed
  • hadoop-deeplearning-project/YARN-TensorFlow/hadoop-yarn-applications-tensorflow/samples/between-graph

2 files changed

+107
-107
lines changed

hadoop-deeplearning-project/YARN-TensorFlow/hadoop-yarn-applications-tensorflow/samples/between-graph/job.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ def loop():
3131

3232

3333
def cmd(i, target):
34-
subprocess.call('python your-tensorflow-script.py --ps ' + FLAGS.ps + ' --wk ' + FLAGS.wk + ' --job_name="worker"' +
34+
subprocess.call('python mnist-client.py --ps ' + FLAGS.ps + ' --wk ' + FLAGS.wk + ' --job_name="worker"' +
3535
' --task_index=' + str(i) + ' --target=' + target, shell=True)
3636

3737

hadoop-deeplearning-project/YARN-TensorFlow/hadoop-yarn-applications-tensorflow/samples/between-graph/mnist-client.py

Lines changed: 106 additions & 106 deletions
Original file line numberDiff line numberDiff line change
@@ -27,109 +27,109 @@
2727
from tensorflow.examples.tutorials.mnist import input_data
2828
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
2929

30-
# Between-graph replication
31-
with tf.device(tf.train.replica_device_setter(
32-
worker_device="/job:worker/task:%d" % FLAGS.task_index,
33-
cluster=cluster)):
34-
35-
# count the number of updates
36-
global_step = tf.get_variable('global_step', [],
37-
initializer = tf.constant_initializer(0),
38-
trainable = False)
39-
40-
# input images
41-
with tf.name_scope('input'):
42-
# None -> batch size can be any size, 784 -> flattened mnist image
43-
x = tf.placeholder(tf.float32, shape=[None, 784], name="x-input")
44-
# target 10 output classes
45-
y_ = tf.placeholder(tf.float32, shape=[None, 10], name="y-input")
46-
47-
# model parameters will change during training so we use tf.Variable
48-
tf.set_random_seed(1)
49-
with tf.name_scope("weights"):
50-
W1 = tf.Variable(tf.random_normal([784, 100]))
51-
W2 = tf.Variable(tf.random_normal([100, 10]))
52-
53-
# bias
54-
with tf.name_scope("biases"):
55-
b1 = tf.Variable(tf.zeros([100]))
56-
b2 = tf.Variable(tf.zeros([10]))
57-
58-
# implement model
59-
with tf.name_scope("softmax"):
60-
# y is our prediction
61-
z2 = tf.add(tf.matmul(x,W1),b1)
62-
a2 = tf.nn.sigmoid(z2)
63-
z3 = tf.add(tf.matmul(a2,W2),b2)
64-
y = tf.nn.softmax(z3)
65-
66-
# specify cost function
67-
with tf.name_scope('cross_entropy'):
68-
# this is our cost
69-
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
70-
71-
# specify optimizer
72-
with tf.name_scope('train'):
73-
# optimizer is an "operation" which we can execute in a session
74-
grad_op = tf.train.GradientDescentOptimizer(learning_rate)
75-
76-
with tf.name_scope('Accuracy'):
77-
# accuracy
78-
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
79-
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
80-
81-
# create a summary for our cost and accuracy
82-
tf.scalar_summary("cost", cross_entropy)
83-
tf.scalar_summary("accuracy", accuracy)
84-
85-
# merge all summaries into a single "operation" which we can execute in a session
86-
summary_op = tf.merge_all_summaries()
87-
init_op = tf.initialize_all_variables()
88-
print("Variables initialized ...")
89-
90-
sv = tf.train.Supervisor(is_chief=(FLAGS.task_index == 0),
91-
global_step=global_step,
92-
init_op=init_op)
93-
94-
begin_time = time.time()
95-
frequency = 100
96-
with sv.prepare_or_wait_for_session(target) as sess:
97-
98-
# create log writer object (this will log on every machine)
99-
writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph())
100-
101-
# perform training cycles
102-
start_time = time.time()
103-
for epoch in range(training_epochs):
104-
105-
# number of batches in one epoch
106-
batch_count = int(mnist.train.num_examples/batch_size)
107-
108-
count = 0
109-
for i in range(batch_count):
110-
batch_x, batch_y = mnist.train.next_batch(batch_size)
111-
112-
# perform the operations we defined earlier on batch
113-
_, cost, summary, step = sess.run(
114-
[train_op, cross_entropy, summary_op, global_step],
115-
feed_dict={x: batch_x, y_: batch_y})
116-
writer.add_summary(summary, step)
117-
118-
count += 1
119-
if count % frequency == 0 or i+1 == batch_count:
120-
elapsed_time = time.time() - start_time
121-
start_time = time.time()
122-
print("Step: %d," % (step+1),
123-
" Epoch: %2d," % (epoch+1),
124-
" Batch: %3d of %3d," % (i+1, batch_count),
125-
" Cost: %.4f," % cost,
126-
" AvgTime: %3.2fms" % float(elapsed_time*1000/frequency))
127-
count = 0
128-
129-
130-
print("Test-Accuracy: %2.2f" % sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
131-
print("Total Time: %3.2fs" % float(time.time() - begin_time))
132-
print("Final Cost: %.4f" % cost)
133-
134-
sv.stop()
135-
print("done")
30+
# Between-graph replication
31+
with tf.device(tf.train.replica_device_setter(
32+
worker_device="/job:worker/task:%d" % FLAGS.task_index,
33+
cluster=cluster)):
34+
35+
# count the number of updates
36+
global_step = tf.get_variable('global_step', [],
37+
initializer = tf.constant_initializer(0),
38+
trainable = False)
39+
40+
# input images
41+
with tf.name_scope('input'):
42+
# None -> batch size can be any size, 784 -> flattened mnist image
43+
x = tf.placeholder(tf.float32, shape=[None, 784], name="x-input")
44+
# target 10 output classes
45+
y_ = tf.placeholder(tf.float32, shape=[None, 10], name="y-input")
46+
47+
# model parameters will change during training so we use tf.Variable
48+
tf.set_random_seed(1)
49+
with tf.name_scope("weights"):
50+
W1 = tf.Variable(tf.random_normal([784, 100]))
51+
W2 = tf.Variable(tf.random_normal([100, 10]))
52+
53+
# bias
54+
with tf.name_scope("biases"):
55+
b1 = tf.Variable(tf.zeros([100]))
56+
b2 = tf.Variable(tf.zeros([10]))
57+
58+
# implement model
59+
with tf.name_scope("softmax"):
60+
# y is our prediction
61+
z2 = tf.add(tf.matmul(x,W1),b1)
62+
a2 = tf.nn.sigmoid(z2)
63+
z3 = tf.add(tf.matmul(a2,W2),b2)
64+
y = tf.nn.softmax(z3)
65+
66+
# specify cost function
67+
with tf.name_scope('cross_entropy'):
68+
# this is our cost
69+
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
70+
71+
# specify optimizer
72+
with tf.name_scope('train'):
73+
# optimizer is an "operation" which we can execute in a session
74+
grad_op = tf.train.GradientDescentOptimizer(learning_rate)
75+
76+
with tf.name_scope('Accuracy'):
77+
# accuracy
78+
correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
79+
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
80+
81+
# create a summary for our cost and accuracy
82+
tf.scalar_summary("cost", cross_entropy)
83+
tf.scalar_summary("accuracy", accuracy)
84+
85+
# merge all summaries into a single "operation" which we can execute in a session
86+
summary_op = tf.merge_all_summaries()
87+
init_op = tf.initialize_all_variables()
88+
print("Variables initialized ...")
89+
90+
sv = tf.train.Supervisor(is_chief=(FLAGS.task_index == 0),
91+
global_step=global_step,
92+
init_op=init_op)
93+
94+
begin_time = time.time()
95+
frequency = 100
96+
with sv.prepare_or_wait_for_session(FLAGS.target) as sess:
97+
98+
# create log writer object (this will log on every machine)
99+
writer = tf.train.SummaryWriter(logs_path, graph=tf.get_default_graph())
100+
101+
# perform training cycles
102+
start_time = time.time()
103+
for epoch in range(training_epochs):
104+
105+
# number of batches in one epoch
106+
batch_count = int(mnist.train.num_examples/batch_size)
107+
108+
count = 0
109+
for i in range(batch_count):
110+
batch_x, batch_y = mnist.train.next_batch(batch_size)
111+
112+
# perform the operations we defined earlier on batch
113+
_, cost, summary, step = sess.run(
114+
[train_op, cross_entropy, summary_op, global_step],
115+
feed_dict={x: batch_x, y_: batch_y})
116+
writer.add_summary(summary, step)
117+
118+
count += 1
119+
if count % frequency == 0 or i+1 == batch_count:
120+
elapsed_time = time.time() - start_time
121+
start_time = time.time()
122+
print("Step: %d," % (step+1),
123+
" Epoch: %2d," % (epoch+1),
124+
" Batch: %3d of %3d," % (i+1, batch_count),
125+
" Cost: %.4f," % cost,
126+
" AvgTime: %3.2fms" % float(elapsed_time*1000/frequency))
127+
count = 0
128+
129+
130+
print("Test-Accuracy: %2.2f" % sess.run(accuracy, feed_dict={x: mnist.test.images, y_: mnist.test.labels}))
131+
print("Total Time: %3.2fs" % float(time.time() - begin_time))
132+
print("Final Cost: %.4f" % cost)
133+
134+
sv.stop()
135+
print("done")

0 commit comments

Comments
 (0)