Skip to content

Commit

Permalink
[CodeStyle][UP031] fix /test/legacy_test/* - part 10 (PaddlePaddle#…
Browse files Browse the repository at this point in the history
  • Loading branch information
gouzil authored Jun 29, 2024
1 parent 7a353b3 commit 2b4a472
Show file tree
Hide file tree
Showing 19 changed files with 43 additions and 56 deletions.
16 changes: 4 additions & 12 deletions test/legacy_test/auto_parallel_autoconvert.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,12 +150,8 @@ def setUp(self):
np.random.seed(2021)

def tearDown(self):
os.remove(
f"./model_state_rank{str(paddle.distributed.get_rank())}.pdmodel"
)
os.remove(
f"./dist_attr_rank{str(paddle.distributed.get_rank())}.pdattr"
)
os.remove(f"./model_state_rank{paddle.distributed.get_rank()}.pdmodel")
os.remove(f"./dist_attr_rank{paddle.distributed.get_rank()}.pdattr")

def test_mlp_mp2pp(self):
set_default_distributed_context(None)
Expand Down Expand Up @@ -245,12 +241,8 @@ def setUp(self):
np.random.seed(2021)

def tearDown(self):
os.remove(
f"./model_state_rank{str(paddle.distributed.get_rank())}.pdmodel"
)
os.remove(
f"./dist_attr_rank{str(paddle.distributed.get_rank())}.pdattr"
)
os.remove(f"./model_state_rank{paddle.distributed.get_rank()}.pdmodel")
os.remove(f"./dist_attr_rank{paddle.distributed.get_rank()}.pdattr")

def test_mlp_pp2mp(self):
set_default_distributed_context(None)
Expand Down
6 changes: 3 additions & 3 deletions test/legacy_test/auto_parallel_op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -494,7 +494,7 @@ def check_eager_auto_parallel(self):
# check eager auto parallel forward
if len(actual_ret) != len(self.eager_forward_desire):
msg = (
f"The eager auto parallel out tensor nums is different with eager out tensor nums on {str(self.place)}."
f"The eager auto parallel out tensor nums is different with eager out tensor nums on {self.place}."
f'eager auto parallel out tensor nums = {len(actual_ret)}, eager out tensor nums = {len(self.eager_forward_desire)}. \n'
)
raise RuntimeError(msg)
Expand Down Expand Up @@ -713,7 +713,7 @@ def check_eager_auto_parallel(self):
# check eager auto parallel forward
if len(actual_forward_res) != len(self.eager_forward_desire):
msg = (
f"The eager auto parallel out tensor nums is different with eager out tensor nums on {str(self.place)}."
f"The eager auto parallel out tensor nums is different with eager out tensor nums on {self.place}."
f'eager auto parallel out tensor nums = {len(actual_forward_res)}, eager out tensor nums = {len(self.eager_forward_desire)}. \n'
)
raise RuntimeError(msg)
Expand All @@ -739,7 +739,7 @@ def check_eager_auto_parallel(self):
# check eager auto parallel grad
if len(actual_grad_res) != len(self.eager_grad_desire):
msg = (
f"The eager auto parallel grad out tensor nums is different with eager grad out tensor nums on {str(self.place)}."
f"The eager auto parallel grad out tensor nums is different with eager grad out tensor nums on {self.place}."
f'eager auto parallel grad out tensor nums = {len(actual_grad_res)}, eager grad out tensor nums = {len(self.eager_grad_desire)}. \n'
)
raise RuntimeError(msg)
Expand Down
6 changes: 2 additions & 4 deletions test/legacy_test/benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,9 +85,7 @@ def timeit_output(self, iters=100):
for place in places:
elapses.append(self.timeit_output_with_place(place, iters))
for place, elapse in zip(places, elapses):
print(
f"One pass of ({self.op_type}_op) at {str(place)} cost {elapse}"
)
print(f"One pass of ({self.op_type}_op) at {place} cost {elapse}")

def timeit_grad_with_place(self, place, iters=100):
inputs_to_check = self._get_input_names()
Expand All @@ -108,5 +106,5 @@ def timeit_grad(self, iters=100):
elapses.append(self.timeit_grad_with_place(place, iters))
for place, elapse in zip(places, elapses):
print(
f"One pass of ({self.op_type}_grad_op) at {str(place)} cost {elapse}"
f"One pass of ({self.op_type}_grad_op) at {place} cost {elapse}"
)
4 changes: 2 additions & 2 deletions test/legacy_test/dist_fleet_sync_batch_norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ def train(args):
rank = paddle.distributed.get_rank()
filepath = os.path.join(
args.data_dir,
f'input_{rank}_{args.only_forward}_{str(args.dtype)}_{args.layout}.npy',
f'input_{rank}_{args.only_forward}_{args.dtype}_{args.layout}.npy',
)
data = np.load(filepath)

Expand All @@ -108,7 +108,7 @@ def train(args):
for i in range(0, len(sync_bn_fetches)):
file_path = os.path.join(
args.data_dir,
f'output_{rank}_{args.only_forward}_{str(args.dtype)}_{i}.npy',
f'output_{rank}_{args.only_forward}_{args.dtype}_{i}.npy',
)
np.save(file_path, sync_bn_fetches[i])

Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/gradient_checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,7 @@ def fail_test(msg):
if not np.allclose(a, n, rtol, atol):
msg = (
f'Jacobian mismatch for output {y_idx} in y '
f'with respect to input {x_idx} in x on {str(place)},\n'
f'with respect to input {x_idx} in x on {place},\n'
f'numerical:{n}\nanalytical:{a}\n'
)
return fail_test(msg)
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/op.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ def __call__(self, *args, **kwargs):
new_attr.scalars.MergeFrom(item)
else:
raise NotImplementedError(
f"A not supported attribute type: {str(attr.type)}."
f"A not supported attribute type: {attr.type}."
)
for attr_name, defalut_val in self.__extra_attrs__.items():
user_defined_attr = kwargs.get(attr_name, None)
Expand Down Expand Up @@ -212,7 +212,7 @@ def __call__(self, *args, **kwargs):
new_attr.scalars.MergeFrom(item)
else:
raise NotImplementedError(
f"A not supported attribute type: {str(attr_type)}."
f"A not supported attribute type: {attr_type}."
)

return op_desc
Expand Down
8 changes: 4 additions & 4 deletions test/legacy_test/op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -2936,7 +2936,7 @@ def _assert_is_close(
atol=atol,
equal_nan=False,
err_msg=(
f"Operator {self.op_type} error, {msg_prefix} variable {name} (shape: {str(a.shape)}, dtype: {self.dtype}) max gradient diff over limit"
f"Operator {self.op_type} error, {msg_prefix} variable {name} (shape: {a.shape}, dtype: {self.dtype}) max gradient diff over limit"
),
)
else:
Expand Down Expand Up @@ -3150,7 +3150,7 @@ def check_grad_with_place_for_static(
analytic_grads,
inputs_to_check,
max_relative_error,
f"Gradient Check On {str(place)}",
f"Gradient Check On {place}",
atol=atol,
)

Expand Down Expand Up @@ -3425,7 +3425,7 @@ def check_grad_with_place(
dygraph_dygraph_grad,
inputs_to_check,
max_relative_error,
f"Gradient Check On {str(place)}",
f"Gradient Check On {place}",
atol=atol,
)

Expand Down Expand Up @@ -3465,7 +3465,7 @@ def check_grad_with_place(
pir_grad,
inputs_to_check,
max_relative_error,
f"Gradient Check On {str(place)}",
f"Gradient Check On {place}",
atol=atol,
)

Expand Down
14 changes: 7 additions & 7 deletions test/legacy_test/prim_op_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -689,7 +689,7 @@ def check_static_comp(self):
# check static forward
if len(ret) != len(self.eager_desire):
msg = (
f"The static comp forward api out tensor nums is different with eager forward api out tensor nums on {str(self.place)}."
f"The static comp forward api out tensor nums is different with eager forward api out tensor nums on {self.place}."
f'when enable_fw_comp is {self.enable_fw_comp}, static comp forward api out tensor nums = {len(ret)}, eager forward api out tensor nums = {len(self.eager_desire)}. \n'
)
raise RuntimeError(msg)
Expand Down Expand Up @@ -772,7 +772,7 @@ def check_jit_comp(self):
# check jit comp forward
if len(ret) != len(self.eager_desire):
msg = (
f"The jit comp forward api out tensor nums is different with eager forward api out tensor nums on {str(self.place)}."
f"The jit comp forward api out tensor nums is different with eager forward api out tensor nums on {self.place}."
f'when enable_fw_comp is {self.enable_fw_comp}, jit comp forward api out tensor nums = {len(ret)}, eager forward api out tensor nums = {len(self.eager_desire)}. \n'
)
raise RuntimeError(msg)
Expand Down Expand Up @@ -865,7 +865,7 @@ def check_jit_comp_with_cinn(self):
# check jit comp forward
if len(ret) != len(self.eager_desire):
msg = (
f"The jit comp with cinn forward api out tensor nums is different with eager forward api out tensor nums on {str(self.place)}."
f"The jit comp with cinn forward api out tensor nums is different with eager forward api out tensor nums on {self.place}."
f'when enable_fw_comp is {self.enable_fw_comp}, enable_cinn is {core.is_compiled_with_cinn() and self.enable_cinn}, jit comp forward api out tensor nums = {len(ret)}, eager forward api out tensor nums = {len(self.eager_desire)}. \n'
)
raise RuntimeError(msg)
Expand Down Expand Up @@ -1062,7 +1062,7 @@ def check_eager_comp(self):
# check static forward
if len(actual_ret) != len(self.eager_desire):
msg = (
f"The eager comp grad out tensor nums is different with eager grad out tensor nums on {str(self.place)}."
f"The eager comp grad out tensor nums is different with eager grad out tensor nums on {self.place}."
f'when enable_rev_comp is {self.enable_rev_comp}, eager comp grad api out tensor nums = {len(actual_ret)}, eager grad out tensor nums = {len(self.eager_desire)}. \n'
)
raise RuntimeError(msg)
Expand Down Expand Up @@ -1180,7 +1180,7 @@ def check_static_comp(self):
# check static grad out
if len(actual_ret) != len(self.eager_desire):
msg = (
f"The static comp grad out tensor nums is different with eager grad out tensor nums on {str(self.place)}."
f"The static comp grad out tensor nums is different with eager grad out tensor nums on {self.place}."
f'when enable_fw_comp is {self.enable_fw_comp},enable_rev_comp is {self.enable_rev_comp}, static comp grad out tensor nums = {len(actual_ret)}, eager grad out tensor nums = {len(self.eager_desire)}. \n'
)
raise RuntimeError(msg)
Expand Down Expand Up @@ -1294,7 +1294,7 @@ def check_jit_comp(self):
# check jit comp grad out
if len(ret) != len(self.eager_desire):
msg = (
f"The jit comp grad out tensor nums is different with eager grad out tensor nums on {str(self.place)}."
f"The jit comp grad out tensor nums is different with eager grad out tensor nums on {self.place}."
f'when enable_fw_comp is {self.enable_fw_comp}, enable_rev_comp is {self.enable_rev_comp}, jit comp grad out tensor nums = {len(ret)}, eager grad out tensor nums = {len(self.eager_desire)}. \n'
)
raise RuntimeError(msg)
Expand Down Expand Up @@ -1421,7 +1421,7 @@ def check_jit_comp_with_cinn(self):
# check jit comp grad out
if len(ret) != len(self.eager_desire):
msg = (
f"The jit comp with cinn grad out tensor nums is different with eager grad out tensor nums on {str(self.place)}."
f"The jit comp with cinn grad out tensor nums is different with eager grad out tensor nums on {self.place}."
f'when enable_fw_comp is {self.enable_fw_comp}, enable_rev_comp is {self.enable_rev_comp}, enable_cinn is {self.enable_cinn and core.is_compiled_with_cinn()}, jit comp grad out tensor nums = {len(ret)}, eager grad out tensor nums = {len(self.eager_desire)}. \n'
)
raise RuntimeError(msg)
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_conv2d_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,13 +34,13 @@ def conv2d_forward_naive(
):
if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
raise ValueError(
f"Unknown Attr(padding_algorithm): '{str(padding_algorithm)}'. "
f"Unknown Attr(padding_algorithm): '{padding_algorithm}'. "
"It can only be 'SAME' or 'VALID'."
)

if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
f"Unknown Attr(data_format): '{str(data_format)}' ."
f"Unknown Attr(data_format): '{data_format}' ."
"It can only be 'NCHW' or 'NHWC'."
)

Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_conv2d_transpose_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ def conv2dtranspose_forward_naive(input_, filter_, attrs):
padding_algorithm = attrs['padding_algorithm']
if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
raise ValueError(
f"Unknown Attr(padding_algorithm): '{str(padding_algorithm)}'. "
f"Unknown Attr(padding_algorithm): '{padding_algorithm}'. "
"It can only be 'SAME' or 'VALID'."
)

Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_conv3d_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,13 +37,13 @@ def conv3d_forward_naive(
):
if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
raise ValueError(
f"Unknown Attr(padding_algorithm): '{str(padding_algorithm)}'. "
f"Unknown Attr(padding_algorithm): '{padding_algorithm}'. "
"It can only be 'SAME' or 'VALID'."
)

if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError(
f"Unknown Attr(data_format): '{str(data_format)}' ."
f"Unknown Attr(data_format): '{data_format}' ."
"It can only be 'NCDHW' or 'NDHWC'."
)

Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_conv3d_transpose_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def conv3dtranspose_forward_naive(input_, filter_, attrs):
padding_algorithm = attrs['padding_algorithm']
if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
raise ValueError(
f"Unknown Attr(padding_algorithm): '{str(padding_algorithm)}'. "
f"Unknown Attr(padding_algorithm): '{padding_algorithm}'. "
"It can only be 'SAME' or 'VALID'."
)

Expand Down
4 changes: 1 addition & 3 deletions test/legacy_test/test_fused_dropout_add_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,9 +101,7 @@ def setUp(self):
self.mode = mode
self.seed = seed

cls_name = (
f"{parent.__name__}_{dtype}_{mode}_{str(training)}_{str(p)}_{str(seed)}"
)
cls_name = f"{parent.__name__}_{dtype}_{mode}_{training}_{p}_{seed}"
TestFusedDropoutAddCase.__name__ = cls_name
globals()[cls_name] = TestFusedDropoutAddCase

Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_group_norm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -209,7 +209,7 @@ def do_compare_between_place(self):
gpu_grads,
inputs_to_check,
0.005,
f"Gradient Check On {str(place)}",
f"Gradient Check On {place}",
)

def test_check_grad(self):
Expand Down
5 changes: 2 additions & 3 deletions test/legacy_test/test_poisson_nll_loss.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,14 +34,13 @@ def ref_poisson_nll_loss(
):
if epsilon <= 0:
raise ValueError(
"The value of `epsilon` in PoissonNLLLoss should be positive, but received %f, which is not allowed"
% epsilon
f"The value of `epsilon` in PoissonNLLLoss should be positive, but received {epsilon:f}, which is not allowed"
)

if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
"The value of 'reduction' in SoftMarginLoss should be 'sum', 'mean' or 'none', but "
"received %s, which is not allowed." % reduction
f"received {reduction}, which is not allowed."
)
loss_out = 0
if log_input:
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_pool2d_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -179,8 +179,8 @@ def _get_padding_with_SAME(input_shape, pool_size, pool_stride):
padding_algorithm = padding_algorithm.upper()
if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
raise ValueError(
"Unknown Attr(padding_algorithm): '%s'. "
"It can only be 'SAME' or 'VALID'." % str(padding_algorithm)
f"Unknown Attr(padding_algorithm): '{padding_algorithm}'. "
"It can only be 'SAME' or 'VALID'."
)

if padding_algorithm == "VALID":
Expand Down
4 changes: 2 additions & 2 deletions test/legacy_test/test_pool3d_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -63,8 +63,8 @@ def _get_padding_with_SAME(input_shape, pool_size, pool_stride):
padding_algorithm = padding_algorithm.upper()
if padding_algorithm not in ["SAME", "VALID", "EXPLICIT"]:
raise ValueError(
"Unknown Attr(padding_algorithm): '%s'. "
"It can only be 'SAME' or 'VALID'." % str(padding_algorithm)
f"Unknown Attr(padding_algorithm): '{padding_algorithm}'. "
"It can only be 'SAME' or 'VALID'."
)

if padding_algorithm == "VALID":
Expand Down
2 changes: 1 addition & 1 deletion test/legacy_test/test_sync_batch_norm_op.py
Original file line number Diff line number Diff line change
Expand Up @@ -217,7 +217,7 @@ def _compare_impl(self, place, layout, only_forward):
for id in range(core.get_cuda_device_count()):
filepath = os.path.join(
self.data_dir.name,
f'input_{id}_{only_forward}_{str(self.dtype.__name__)}_{layout}.npy',
f'input_{id}_{only_forward}_{self.dtype.__name__}_{layout}.npy',
)
np.save(filepath, data[id * stride : (id + 1) * stride])
data = create_or_get_tensor(
Expand Down
6 changes: 3 additions & 3 deletions test/legacy_test/test_zero_dim_binary_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,7 @@ def test_dygraph_binary(self):
# 1) x is 0D, y is 0D
x_np = np.random.randint(-10, 10, [])
y_np = np.random.randint(-10, 10, [])
out_np = eval('np.%s(x_np, y_np)' % api.__name__)
out_np = eval(f'np.{api.__name__}(x_np, y_np)')

x = paddle.to_tensor(x_np)
y = paddle.to_tensor(y_np)
Expand All @@ -180,7 +180,7 @@ def test_dygraph_binary(self):
# 2) x is ND, y is 0D
x_np = np.random.randint(-10, 10, [3, 5])
y_np = np.random.randint(-10, 10, [])
out_np = eval('np.%s(x_np, y_np)' % api.__name__)
out_np = eval(f'np.{api.__name__}(x_np, y_np)')

x = paddle.to_tensor(x_np)
y = paddle.to_tensor(y_np)
Expand All @@ -192,7 +192,7 @@ def test_dygraph_binary(self):
# 3) x is 0D , y is ND
x_np = np.random.randint(-10, 10, [])
y_np = np.random.randint(-10, 10, [3, 5])
out_np = eval('np.%s(x_np, y_np)' % api.__name__)
out_np = eval(f'np.{api.__name__}(x_np, y_np)')

x = paddle.to_tensor(x_np)
y = paddle.to_tensor(y_np)
Expand Down

0 comments on commit 2b4a472

Please sign in to comment.