Skip to content

Commit c454e00

Browse files
committed
modify ut
1 parent 36acffc commit c454e00

File tree

2 files changed

+5
-12
lines changed

2 files changed

+5
-12
lines changed

python/paddle/fluid/tests/unittests/hybrid_parallel_pp_fp16.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -125,16 +125,13 @@ def test_pp_model(self):
125125
scaler_a.minimize(optimizer_a, loss_a)
126126
optimizer_a.clear_grad()
127127
scheduler_a.step()
128-
print("\n loss_a : ", loss_a, "\n")
129128

130-
with paddle.amp.auto_cast(enable=True, level='O2'):
131129
loss_b = model_b.train_batch(
132130
[img, label], optimizer_b, scheduler_b, scaler=scaler_b)
133-
print("\n loss_b : ", loss_b, "\n")
134131

135132
print("loss: ", loss_a.numpy(), loss_b.numpy())
136133
np.testing.assert_allclose(
137-
loss_a.numpy(), loss_b.numpy(), rtol=1e-4)
134+
loss_a.numpy(), loss_b.numpy(), rtol=5e-3)
138135

139136

140137
if __name__ == "__main__":

python/paddle/fluid/tests/unittests/test_dygraph_recompute.py

Lines changed: 4 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -121,14 +121,10 @@ def run_model(recompute_block=[],
121121
x_data = np.random.randn(batch_size, input_size).astype(np.float32)
122122
x = paddle.to_tensor(x_data)
123123
# x.stop_gradient = False
124-
if not pure_fp16:
125-
with paddle.amp.auto_cast(True):
126-
y_pred = model(x)
127-
loss = y_pred.mean()
128-
else:
129-
with paddle.amp.auto_cast(True, level='O2'):
130-
y_pred = model(x)
131-
loss = y_pred.mean()
124+
level = 'O2' if pure_fp16 else 'O1'
125+
with paddle.amp.auto_cast(True, level=level):
126+
y_pred = model(x)
127+
loss = y_pred.mean()
132128
if enable_autocast:
133129
scaler.scale(loss).backward()
134130
scaler.minimize(optimizer, loss)

0 commit comments

Comments
 (0)