Skip to content

Commit

Permalink
[CodeStyle][UP031] fix some python/paddle/ - part 8 (PaddlePaddle#6…
Browse files Browse the repository at this point in the history
…5552)


---------

Co-authored-by: SigureMo <sigure.qaq@gmail.com>
  • Loading branch information
gouzil and SigureMo authored Jun 28, 2024
1 parent 3402921 commit 99b35c1
Show file tree
Hide file tree
Showing 16 changed files with 124 additions and 156 deletions.
21 changes: 10 additions & 11 deletions python/paddle/distributed/auto_parallel/static/converter.py
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ def _check_tensor_dict(self, tensors_dict):
)
if not isinstance(tensors_dict, dict):
raise TypeError(
f"The type of 'tensors_dict' should be 'dict', but got '{str(type(tensors_dict))}'."
f"The type of 'tensors_dict' should be 'dict', but got '{type(tensors_dict)}'."
)
return tensors_dict

Expand All @@ -67,7 +67,7 @@ def _check_pre_strategy(self, pre_strategy):
if not isinstance(pre_strategy, dict):
raise TypeError(
"The type of 'pre_strategy' should be 'dict', "
f"but got '{str(type(pre_strategy))}'."
f"but got '{type(pre_strategy)}'."
)
return pre_strategy

Expand All @@ -80,7 +80,7 @@ def _check_cur_strategy(self, cur_strategy):
if not isinstance(cur_strategy, dict):
raise TypeError(
"The type of 'cur_strategy' should be 'dict', "
f"but got '{str(type(cur_strategy))}'."
f"but got '{type(cur_strategy)}'."
)
return cur_strategy

Expand Down Expand Up @@ -150,7 +150,7 @@ def convert(self, strict=True):
)
except ValueError as err:
raise ValueError(
f"Fail to convert tensor '{str(tensor_name)}'. " + str(err)
f"Fail to convert tensor '{tensor_name}'. {err}"
)

for tensor_name in self._pre_strategy:
Expand All @@ -176,15 +176,15 @@ def convert(self, strict=True):
tensor_not_in_cur = set(tensor_not_in_cur) - set(tensor_match_with_cur)
if tensor_not_in_pre:
warnings.warn(
f"tensors [{str(tensor_not_in_pre)}] are not found in last training strategy."
f"tensors [{tensor_not_in_pre}] are not found in last training strategy."
)
if tensor_not_in_cur:
warnings.warn(
f"tensors [{str(tensor_not_in_cur)}] are not found in current training strategy."
f"tensors [{tensor_not_in_cur}] are not found in current training strategy."
)
if tensor_not_in_ckpt:
warnings.warn(
f"tensors [{str(tensor_not_in_ckpt)}] are found in pre_strategy, but are not found"
f"tensors [{tensor_not_in_ckpt}] are found in pre_strategy, but are not found"
"in checkpoint files, please check your checkpoint files."
)

Expand Down Expand Up @@ -215,8 +215,7 @@ def convert_with_prefix_match(
)
except ValueError as err:
raise ValueError(
f"Fail to convert tensor '{str(cur_name)}' by '{str(pre_name)}'. "
+ str(err)
f"Fail to convert tensor '{cur_name}' by '{pre_name}'. {err}"
)
self._logger.info(
f"tensor [{cur_name}] is matched with tensor [{pre_name}]"
Expand Down Expand Up @@ -302,7 +301,7 @@ def merge_with_dist_attr(tensor_list, dist_attr):

if len(partition_tensor_list) != 1:
raise ValueError(
f"Fail to merge tensor with dist_attr '{str(dist_attr)}'."
f"Fail to merge tensor with dist_attr '{dist_attr}'."
)
complete_tensor = partition_tensor_list[0][0]
return complete_tensor
Expand All @@ -327,7 +326,7 @@ def slice_with_dist_attr(tensor, dist_attr):
)
if sliced_tensor_index not in range(len(sliced_tensor_list)):
raise ValueError(
f"Fail to slice tensor with dist_attr '{str(dist_attr)}'."
f"Fail to slice tensor with dist_attr '{dist_attr}'."
)
sliced_tensor = sliced_tensor_list[sliced_tensor_index]
return sliced_tensor
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/nn/clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -532,7 +532,7 @@ def __init__(self, clip_norm):
self.clip_norm = float(clip_norm)

def __str__(self):
return "Gradient Clip By Norm, clip_norm=%f" % self.clip_norm
return f"Gradient Clip By Norm, clip_norm={self.clip_norm:f}"

def _clip_gradients(self, params_grads):
params_and_grads = []
Expand Down Expand Up @@ -676,7 +676,7 @@ def __init__(
self._async_add_n = None

def __str__(self):
return "Gradient Clip By GlobalNorm, global_norm=%f" % (self.clip_norm)
return f"Gradient Clip By GlobalNorm, global_norm={self.clip_norm:f}"

@imperative_base.no_grad()
def _dygraph_clip(self, params_grads):
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/functional/activation.py
Original file line number Diff line number Diff line change
Expand Up @@ -929,7 +929,7 @@ def maxout(
if axis not in [1, -1, 3]:
raise ValueError(
"Attr(axis) should be 1 when data format is NCHW, -1 or 3 when data format is NHWC. Received "
"Attr(axis): %s." % str(axis)
f"Attr(axis): {axis}."
)
if axis == -1:
axis = 3
Expand Down
4 changes: 2 additions & 2 deletions python/paddle/nn/functional/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -1462,7 +1462,7 @@ def dropout2d(
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format)
f"Attr(data_format): {data_format}."
)

return dropout(
Expand Down Expand Up @@ -1524,7 +1524,7 @@ def dropout3d(
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
"Attr(data_format): %s." % str(data_format)
f"Attr(data_format): {data_format}."
)

return dropout(
Expand Down
24 changes: 12 additions & 12 deletions python/paddle/nn/functional/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,23 +102,23 @@ def _channel_last(data_format, num_dims):
if data_format not in ['NCL', 'NLC']:
raise ValueError(
"Attr(data_format) should be 'NCL' or 'NLC'. Received "
"Attr(data_format): %s" % str(data_format)
f"Attr(data_format): {data_format}"
)
else:
return True if data_format == "NLC" else False
if num_dims == 2:
if data_format not in ['NCHW', 'NHWC']:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s" % str(data_format)
f"Attr(data_format): {data_format}"
)
else:
return True if data_format == "NHWC" else False
if num_dims == 3:
if data_format not in ['NCDHW', 'NDHWC']:
raise ValueError(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
"Attr(data_format): %s" % str(data_format)
f"Attr(data_format): {data_format}"
)
else:
return True if data_format == "NDHWC" else False
Expand Down Expand Up @@ -702,9 +702,9 @@ def max_pool1d(


def _unpool_output_size(x, kernel_size, stride, padding, output_size):
assert output_size is None or isinstance(output_size, (list, tuple)), (
"Required output_size is None|list|tuple, but received %s" % output_size
)
assert output_size is None or isinstance(
output_size, (list, tuple)
), f"Required output_size is None|list|tuple, but received {output_size}"
input_size = x.shape
default_size = []
for d in range(len(kernel_size)):
Expand Down Expand Up @@ -818,7 +818,7 @@ def max_unpool1d(
if data_format not in ["NCL"]:
raise ValueError(
"Attr(data_format) should be 'NCL'. Received "
"Attr(data_format): %s." % str(data_format)
f"Attr(data_format): {data_format}."
)
data_format = "NCHW"
x = unsqueeze(x, [2])
Expand Down Expand Up @@ -963,7 +963,7 @@ def max_unpool2d(
if data_format not in ["NCHW"]:
raise ValueError(
"Attr(data_format) should be 'NCHW'. Received "
"Attr(data_format): %s." % str(data_format)
f"Attr(data_format): {data_format}."
)

output_size = _unpool_output_size(
Expand Down Expand Up @@ -1092,7 +1092,7 @@ def max_unpool3d(
if data_format not in ["NCDHW"]:
raise ValueError(
"Attr(data_format) should be 'NCDHW'. Received "
"Attr(data_format): %s." % str(data_format)
f"Attr(data_format): {data_format}."
)

output_size = _unpool_output_size(
Expand Down Expand Up @@ -1197,7 +1197,7 @@ def max_pool2d(
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format)
f"Attr(data_format): {data_format}."
)

channel_last = True if data_format == "NHWC" else False
Expand Down Expand Up @@ -1583,7 +1583,7 @@ def adaptive_avg_pool2d(
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'. Received "
"Attr(data_format): %s." % str(data_format)
f"Attr(data_format): {data_format}."
)

if data_format == "NCHW":
Expand Down Expand Up @@ -1723,7 +1723,7 @@ def adaptive_avg_pool3d(
if data_format not in ["NCDHW", "NDHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received "
"Attr(data_format): %s." % str(data_format)
f"Attr(data_format): {data_format}."
)

if data_format == "NCDHW":
Expand Down
3 changes: 1 addition & 2 deletions python/paddle/optimizer/adamw.py
Original file line number Diff line number Diff line change
Expand Up @@ -235,8 +235,7 @@ def __init__(

if not isinstance(learning_rate, (float, LRScheduler)):
raise TypeError(
"learning rate should be float or LRScheduler, got %s here"
% type(learning_rate)
f"learning rate should be float or LRScheduler, got {type(learning_rate)} here"
)
if grad_clip is not None:
if not isinstance(grad_clip, GradientClipBase):
Expand Down
21 changes: 7 additions & 14 deletions python/paddle/optimizer/lr.py
Original file line number Diff line number Diff line change
Expand Up @@ -1192,8 +1192,7 @@ def __init__(
):
if not isinstance(milestones, (tuple, list)):
raise TypeError(
"The type of 'milestones' in 'MultiStepDecay' must be 'tuple, list', but received %s."
% type(milestones)
f"The type of 'milestones' in 'MultiStepDecay' must be 'tuple, list', but received {type(milestones)}."
)

if not all(
Expand Down Expand Up @@ -1314,8 +1313,7 @@ def __init__(
) -> None:
if not isinstance(step_size, int):
raise TypeError(
"The type of 'step_size' must be 'int', but received %s."
% type(step_size)
f"The type of 'step_size' must be 'int', but received {type(step_size)}."
)
if gamma >= 1.0:
raise ValueError('gamma should be < 1.0.')
Expand Down Expand Up @@ -1425,8 +1423,7 @@ def __init__(
):
if not callable(lr_lambda):
raise TypeError(
"The type of 'lr_lambda' in 'LambdaDecay' must be 'function', but received %s."
% type(lr_lambda)
f"The type of 'lr_lambda' in 'LambdaDecay' must be 'function', but received {type(lr_lambda)}."
)

self.lr_lambda = lr_lambda
Expand Down Expand Up @@ -1571,8 +1568,7 @@ def __init__(
self.threshold_mode = threshold_mode
if not isinstance(learning_rate, (float, int)):
raise TypeError(
"The type of 'learning_rate' in 'ReduceOnPlateau' must be 'float', but received %s."
% type(learning_rate)
f"The type of 'learning_rate' in 'ReduceOnPlateau' must be 'float', but received {type(learning_rate)}."
)

self.patience = patience
Expand Down Expand Up @@ -1779,13 +1775,11 @@ def __init__(
) -> None:
if not isinstance(T_max, int):
raise TypeError(
"The type of 'T_max' in 'CosineAnnealingDecay' must be 'int', but received %s."
% type(T_max)
f"The type of 'T_max' in 'CosineAnnealingDecay' must be 'int', but received {type(T_max)}."
)
if not isinstance(eta_min, (float, int)):
raise TypeError(
"The type of 'eta_min' in 'CosineAnnealingDecay' must be 'float, int', but received %s."
% type(eta_min)
f"The type of 'eta_min' in 'CosineAnnealingDecay' must be 'float, int', but received {type(eta_min)}."
)
assert T_max > 0 and isinstance(
T_max, int
Expand Down Expand Up @@ -1876,8 +1870,7 @@ def __init__(
) -> None:
if not callable(lr_lambda):
raise TypeError(
"The type of 'lr_lambda' in 'MultiplicativeDecay' must be 'function', but received %s."
% type(lr_lambda)
f"The type of 'lr_lambda' in 'MultiplicativeDecay' must be 'function', but received {type(lr_lambda)}."
)

self.lr_lambda = lr_lambda
Expand Down
15 changes: 5 additions & 10 deletions python/paddle/optimizer/optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -245,15 +245,13 @@ def __init__(
):
logging.info(
"If regularizer of a Parameter has been set by 'paddle.ParamAttr' or 'static.WeightNormParamAttr' already. "
"The weight_decay[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
% weight_decay.__str__()
f"The weight_decay[{weight_decay}] in Optimizer will not take effect, and it will only be applied to other Parameters!"
)
break

if not isinstance(learning_rate, (float, LRScheduler)):
raise TypeError(
"learning rate should be float or LRScheduler, got %s here"
% type(learning_rate)
f"learning rate should be float or LRScheduler, got {type(learning_rate)} here"
)
if grad_clip is not None:
if not isinstance(grad_clip, paddle.nn.clip.GradientClipBase):
Expand Down Expand Up @@ -611,8 +609,7 @@ def set_lr(self, value: float) -> None:
"""
if not isinstance(value, (int, float)):
raise TypeError(
"The type of 'value' in optimizer.set_lr must be float, but received %s."
% (type(value))
f"The type of 'value' in optimizer.set_lr must be float, but received {type(value)}."
)
if isinstance(self._learning_rate, LRScheduler):
raise RuntimeError(
Expand Down Expand Up @@ -684,8 +681,7 @@ def set_lr_scheduler(self, scheduler: LRScheduler) -> None:

if not isinstance(scheduler, LRScheduler):
raise TypeError(
"The type of 'scheduler' in optimizer.set_lr_schduler must be LRScheduler, but received %s."
% (type(scheduler))
f"The type of 'scheduler' in optimizer.set_lr_schduler must be LRScheduler, but received {type(scheduler)}."
)
self._learning_rate = scheduler

Expand Down Expand Up @@ -1694,8 +1690,7 @@ def append_regularization_ops(
repeate_regularizer = True
logging.info(
"If regularizer of a Parameter has been set by 'base.ParamAttr' or 'base.WeightNormParamAttr' already. "
"The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!"
% regularization.__str__()
f"The Regularization[{regularization}] in Optimizer will not take effect, and it will only be applied to other Parameters!"
)
with param.block.program._optimized_guard([param, grad]):
new_grad = self._create_regularization_of_grad(
Expand Down
18 changes: 10 additions & 8 deletions python/paddle/profiler/timer.py
Original file line number Diff line number Diff line change
Expand Up @@ -261,10 +261,12 @@ def _print_summary(self, benchmark):
return
print('Perf Summary'.center(100, '='))
if summary['reader_ratio'] != 0:
print('Reader Ratio: ' + '%.3f' % (summary['reader_ratio']) + '%')
print(
'Time Unit: s, IPS Unit: %s' % (benchmark.current_event.speed_unit)
)
print(
'Reader Ratio: '
+ '{:.3f}'.format(summary['reader_ratio'])
+ '%'
)
print(f'Time Unit: s, IPS Unit: {benchmark.current_event.speed_unit}')
print(
'|',
''.center(15),
Expand All @@ -283,9 +285,9 @@ def _print_summary(self, benchmark):
self._print_stats('ips', summary['ips_summary'])

def _print_stats(self, item, message_dict):
avg_str = '%.5f' % (message_dict['avg'])
max_str = '%.5f' % (message_dict['max'])
min_str = '%.5f' % (message_dict['min'])
avg_str = '{:.5f}'.format(message_dict['avg'])
max_str = '{:.5f}'.format(message_dict['max'])
min_str = '{:.5f}'.format(message_dict['min'])
print(
'|',
item.center(15),
Expand Down Expand Up @@ -379,7 +381,7 @@ def step_info(self, unit):
reader_average = self.current_event.reader_average()
batch_average = self.current_event.batch_average()
if reader_average:
message += ' reader_cost: %.5f s' % (reader_average)
message += f' reader_cost: {reader_average:.5f} s'
if batch_average:
if self.current_event.speed_mode == 'steps/s':
self.current_event.speed_unit = 'steps/s'
Expand Down
Loading

0 comments on commit 99b35c1

Please sign in to comment.