Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[FixUnitTest] Fix a Batch Size Random Unit Test Failure #24718

Merged
merged 7 commits into from
Jun 1, 2020
Prev Previous commit
Found a precision problem that small batch size can cause CPU test fa…
…ilure.

test=develop
  • Loading branch information
zhhsplendid committed Jun 1, 2020
commit 0e051aa2c55bc9bbb8a45e2015987ed9c9c36a14
11 changes: 7 additions & 4 deletions python/paddle/fluid/tests/unittests/seresnext_net.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,8 +172,11 @@ def optimizer(learning_rate=0.01):
model = SE_ResNeXt50Small


def batch_size():
return 4
def batch_size(use_cuda):
if use_cuda:
# Paddle uses 8GB P4 GPU for unittest so we decreased the batch size.
return 8
return 12


def iter(use_cuda):
Expand All @@ -183,9 +186,9 @@ def iter(use_cuda):


gpu_img, gpu_label = init_data(
batch_size=batch_size(), img_shape=img_shape, label_range=999)
batch_size=batch_size(use_cuda=True), img_shape=img_shape, label_range=999)
cpu_img, cpu_label = init_data(
batch_size=batch_size(), img_shape=img_shape, label_range=999)
batch_size=batch_size(use_cuda=False), img_shape=img_shape, label_range=999)
feed_dict_gpu = {"image": gpu_img, "label": gpu_label}
feed_dict_cpu = {"image": cpu_img, "label": cpu_label}

Expand Down
4 changes: 2 additions & 2 deletions python/paddle/fluid/tests/unittests/seresnext_test_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def _compare_result_with_origin_model(self,
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_cuda),
iter=seresnext_net.iter(use_cuda),
batch_size=seresnext_net.batch_size(),
batch_size=seresnext_net.batch_size(use_cuda),
use_cuda=use_cuda,
use_reduce=False,
optimizer=seresnext_net.optimizer)
Expand All @@ -41,7 +41,7 @@ def _compare_result_with_origin_model(self,
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_cuda),
iter=seresnext_net.iter(use_cuda),
batch_size=seresnext_net.batch_size(),
batch_size=seresnext_net.batch_size(use_cuda),
use_cuda=use_cuda)

if compare_seperately:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -28,15 +28,15 @@ def _compare_reduce_and_allreduce(self, use_cuda, delta2=1e-5):
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_cuda),
iter=seresnext_net.iter(use_cuda),
batch_size=seresnext_net.batch_size(),
batch_size=seresnext_net.batch_size(use_cuda),
use_cuda=use_cuda,
use_reduce=False,
optimizer=seresnext_net.optimizer)
reduce_first_loss, reduce_last_loss = self.check_network_convergence(
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_cuda),
iter=seresnext_net.iter(use_cuda),
batch_size=seresnext_net.batch_size(),
batch_size=seresnext_net.batch_size(use_cuda),
use_cuda=use_cuda,
use_reduce=True,
optimizer=seresnext_net.optimizer)
Expand All @@ -53,7 +53,7 @@ def _compare_reduce_and_allreduce(self, use_cuda, delta2=1e-5):
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_cuda),
iter=seresnext_net.iter(use_cuda),
batch_size=seresnext_net.batch_size(),
batch_size=seresnext_net.batch_size(use_cuda),
use_cuda=use_cuda,
use_reduce=False,
optimizer=seresnext_net.optimizer,
Expand All @@ -63,7 +63,7 @@ def _compare_reduce_and_allreduce(self, use_cuda, delta2=1e-5):
seresnext_net.model,
feed_dict=seresnext_net.feed_dict(use_cuda),
iter=seresnext_net.iter(use_cuda),
batch_size=seresnext_net.batch_size(),
batch_size=seresnext_net.batch_size(use_cuda),
use_cuda=use_cuda,
use_reduce=True,
optimizer=seresnext_net.optimizer,
Expand Down