Skip to content

Commit

Permalink
benchmark script support multi card train
Browse files Browse the repository at this point in the history
  • Loading branch information
kolinwei committed May 16, 2018
1 parent 48ac978 commit ed812bd
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 14 deletions.
12 changes: 6 additions & 6 deletions benchmark/fluid/mnist.py
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,7 @@ def run_benchmark(model, args):
paddle.dataset.mnist.train(), batch_size=args.batch_size)

accuracy = fluid.metrics.Accuracy()
train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=avg_cost.name)
iters, num_samples, start_time = 0, 0, time.time()
for pass_id in range(args.pass_num):
accuracy.reset()
Expand All @@ -175,17 +176,16 @@ def run_benchmark(model, args):
y_data = np.array(map(lambda x: x[1], data)).astype("int64")
y_data = y_data.reshape([len(y_data), 1])

outs = exe.run(
fluid.default_main_program(),
outs = train_exe.run(
feed={"pixel": img_data,
"label": y_data},
fetch_list=[avg_cost, batch_acc, batch_size_tensor]
fetch_list=[avg_cost.name, batch_acc.name, batch_size_tensor.name]
) # The accuracy is the accumulation of batches, but not the current batch.
accuracy.update(value=outs[1], weight=outs[2])
accuracy.update(value=np.array(np.mean(outs[1])), weight=np.mean(np.array(outs[2])))
iters += 1
num_samples += len(y_data)
loss = np.array(outs[0])
acc = np.array(outs[1])
loss = np.mean(np.array(outs[0]))
acc = np.mean(np.array(outs[1]))
train_losses.append(loss)
train_accs.append(acc)
print("Pass: %d, Iter: %d, Loss: %f, Accuracy: %f" %
Expand Down
10 changes: 6 additions & 4 deletions benchmark/fluid/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,7 @@ def test(exe):
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
accuracy = fluid.average.WeightedAverage()
train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=avg_cost.name)
if args.use_fake_data:
data = train_reader().next()
image = np.array(map(lambda x: x[0].reshape(dshape), data)).astype(
Expand All @@ -264,14 +265,15 @@ def test(exe):
data)).astype('float32')
label = np.array(map(lambda x: x[1], data)).astype('int64')
label = label.reshape([-1, 1])
loss, acc, weight = exe.run(
fluid.default_main_program(),
loss, acc, weight = train_exe.run(
feed={'data': image,
'label': label},
fetch_list=[avg_cost, batch_acc, batch_size_tensor])
fetch_list=[avg_cost.name, batch_acc.name, batch_size_tensor.name])
iters += 1
num_samples += len(label)
accuracy.add(value=acc, weight=weight)
accuracy.add(value=np.array(np.mean(acc)), weight=np.mean(weight))
loss = np.mean(np.array(loss))
acc = np.mean(np.array(acc))
train_losses.append(loss)
train_accs.append(acc)
print("Pass: %d, Iter: %d, Loss: %f, Accuracy: %f" %
Expand Down
10 changes: 6 additions & 4 deletions benchmark/fluid/vgg.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,7 @@ def test(exe):

iters, num_samples, start_time = 0, 0, time.time()
accuracy = fluid.average.WeightedAverage()
train_exe = fluid.ParallelExecutor(use_cuda=True, loss_name=avg_cost.name)
for pass_id in range(args.pass_num):
accuracy.reset()
train_accs = []
Expand All @@ -184,14 +185,15 @@ def test(exe):
y_data = np.array(map(lambda x: x[1], data)).astype("int64")
y_data = y_data.reshape([-1, 1])

loss, acc, weight = exe.run(
fluid.default_main_program(),
loss, acc, weight = train_exe.run(
feed={"pixel": img_data,
"label": y_data},
fetch_list=[avg_cost, batch_acc, batch_size_tensor])
accuracy.add(value=acc, weight=weight)
fetch_list=[avg_cost.name, batch_acc.name, batch_size_tensor.name])
accuracy.add(value=np.array(np.mean(acc)), weight=np.mean(weight))
iters += 1
num_samples += len(y_data)
loss = np.mean(np.array(loss))
acc = np.mean(np.array(acc))
print(
"Pass = %d, Iter = %d, Loss = %f, Accuracy = %f" %
(pass_id, iters, loss, acc)
Expand Down

0 comments on commit ed812bd

Please sign in to comment.