From 99b35c17c00df393f1bcce70a84c0f801318d19e Mon Sep 17 00:00:00 2001 From: gouzil <66515297+gouzil@users.noreply.github.com> Date: Sat, 29 Jun 2024 02:48:21 +0800 Subject: [PATCH] [CodeStyle][UP031] fix some `python/paddle/` - part 8 (#65552) --------- Co-authored-by: SigureMo --- .../auto_parallel/static/converter.py | 21 +++--- python/paddle/nn/clip.py | 4 +- python/paddle/nn/functional/activation.py | 2 +- python/paddle/nn/functional/common.py | 4 +- python/paddle/nn/functional/pooling.py | 24 +++---- python/paddle/optimizer/adamw.py | 3 +- python/paddle/optimizer/lr.py | 21 ++---- python/paddle/optimizer/optimizer.py | 15 ++--- python/paddle/profiler/timer.py | 18 +++--- python/paddle/static/io.py | 22 +++---- python/paddle/static/io_utils.py | 3 +- python/paddle/static/pir_io.py | 12 ++-- .../static/quantization/quantization_pass.py | 64 +++++++++---------- python/paddle/tensor/creation.py | 2 +- python/paddle/tensor/manipulation.py | 33 ++++------ python/paddle/tensor/math.py | 32 +++++----- 16 files changed, 124 insertions(+), 156 deletions(-) diff --git a/python/paddle/distributed/auto_parallel/static/converter.py b/python/paddle/distributed/auto_parallel/static/converter.py index 7f1dcbb696e77..43ee062602c47 100644 --- a/python/paddle/distributed/auto_parallel/static/converter.py +++ b/python/paddle/distributed/auto_parallel/static/converter.py @@ -54,7 +54,7 @@ def _check_tensor_dict(self, tensors_dict): ) if not isinstance(tensors_dict, dict): raise TypeError( - f"The type of 'tensors_dict' should be 'dict', but got '{str(type(tensors_dict))}'." + f"The type of 'tensors_dict' should be 'dict', but got '{type(tensors_dict)}'." ) return tensors_dict @@ -67,7 +67,7 @@ def _check_pre_strategy(self, pre_strategy): if not isinstance(pre_strategy, dict): raise TypeError( "The type of 'pre_strategy' should be 'dict', " - f"but got '{str(type(pre_strategy))}'." + f"but got '{type(pre_strategy)}'." ) return pre_strategy @@ -80,7 +80,7 @@ def _check_cur_strategy(self, cur_strategy): if not isinstance(cur_strategy, dict): raise TypeError( "The type of 'cur_strategy' should be 'dict', " - f"but got '{str(type(cur_strategy))}'." + f"but got '{type(cur_strategy)}'." ) return cur_strategy @@ -150,7 +150,7 @@ def convert(self, strict=True): ) except ValueError as err: raise ValueError( - f"Fail to convert tensor '{str(tensor_name)}'. " + str(err) + f"Fail to convert tensor '{tensor_name}'. {err}" ) for tensor_name in self._pre_strategy: @@ -176,15 +176,15 @@ def convert(self, strict=True): tensor_not_in_cur = set(tensor_not_in_cur) - set(tensor_match_with_cur) if tensor_not_in_pre: warnings.warn( - f"tensors [{str(tensor_not_in_pre)}] are not found in last training strategy." + f"tensors [{tensor_not_in_pre}] are not found in last training strategy." ) if tensor_not_in_cur: warnings.warn( - f"tensors [{str(tensor_not_in_cur)}] are not found in current training strategy." + f"tensors [{tensor_not_in_cur}] are not found in current training strategy." ) if tensor_not_in_ckpt: warnings.warn( - f"tensors [{str(tensor_not_in_ckpt)}] are found in pre_strategy, but are not found" + f"tensors [{tensor_not_in_ckpt}] are found in pre_strategy, but are not found" "in checkpoint files, please check your checkpoint files." ) @@ -215,8 +215,7 @@ def convert_with_prefix_match( ) except ValueError as err: raise ValueError( - f"Fail to convert tensor '{str(cur_name)}' by '{str(pre_name)}'. " - + str(err) + f"Fail to convert tensor '{cur_name}' by '{pre_name}'. {err}" ) self._logger.info( f"tensor [{cur_name}] is matched with tensor [{pre_name}]" @@ -302,7 +301,7 @@ def merge_with_dist_attr(tensor_list, dist_attr): if len(partition_tensor_list) != 1: raise ValueError( - f"Fail to merge tensor with dist_attr '{str(dist_attr)}'." + f"Fail to merge tensor with dist_attr '{dist_attr}'." ) complete_tensor = partition_tensor_list[0][0] return complete_tensor @@ -327,7 +326,7 @@ def slice_with_dist_attr(tensor, dist_attr): ) if sliced_tensor_index not in range(len(sliced_tensor_list)): raise ValueError( - f"Fail to slice tensor with dist_attr '{str(dist_attr)}'." + f"Fail to slice tensor with dist_attr '{dist_attr}'." ) sliced_tensor = sliced_tensor_list[sliced_tensor_index] return sliced_tensor diff --git a/python/paddle/nn/clip.py b/python/paddle/nn/clip.py index c7ef5f64a5ea7..4702751e28305 100644 --- a/python/paddle/nn/clip.py +++ b/python/paddle/nn/clip.py @@ -532,7 +532,7 @@ def __init__(self, clip_norm): self.clip_norm = float(clip_norm) def __str__(self): - return "Gradient Clip By Norm, clip_norm=%f" % self.clip_norm + return f"Gradient Clip By Norm, clip_norm={self.clip_norm:f}" def _clip_gradients(self, params_grads): params_and_grads = [] @@ -676,7 +676,7 @@ def __init__( self._async_add_n = None def __str__(self): - return "Gradient Clip By GlobalNorm, global_norm=%f" % (self.clip_norm) + return f"Gradient Clip By GlobalNorm, global_norm={self.clip_norm:f}" @imperative_base.no_grad() def _dygraph_clip(self, params_grads): diff --git a/python/paddle/nn/functional/activation.py b/python/paddle/nn/functional/activation.py index 8456b67e6adb2..766d122fe8cae 100644 --- a/python/paddle/nn/functional/activation.py +++ b/python/paddle/nn/functional/activation.py @@ -929,7 +929,7 @@ def maxout( if axis not in [1, -1, 3]: raise ValueError( "Attr(axis) should be 1 when data format is NCHW, -1 or 3 when data format is NHWC. Received " - "Attr(axis): %s." % str(axis) + f"Attr(axis): {axis}." ) if axis == -1: axis = 3 diff --git a/python/paddle/nn/functional/common.py b/python/paddle/nn/functional/common.py index 99e8c6b4354c5..9fa8904a3611a 100644 --- a/python/paddle/nn/functional/common.py +++ b/python/paddle/nn/functional/common.py @@ -1462,7 +1462,7 @@ def dropout2d( if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " - "Attr(data_format): %s." % str(data_format) + f"Attr(data_format): {data_format}." ) return dropout( @@ -1524,7 +1524,7 @@ def dropout3d( if data_format not in ["NCDHW", "NDHWC"]: raise ValueError( "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received " - "Attr(data_format): %s." % str(data_format) + f"Attr(data_format): {data_format}." ) return dropout( diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index 688ef69cb61cf..d5f7abe36b924 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -102,7 +102,7 @@ def _channel_last(data_format, num_dims): if data_format not in ['NCL', 'NLC']: raise ValueError( "Attr(data_format) should be 'NCL' or 'NLC'. Received " - "Attr(data_format): %s" % str(data_format) + f"Attr(data_format): {data_format}" ) else: return True if data_format == "NLC" else False @@ -110,7 +110,7 @@ def _channel_last(data_format, num_dims): if data_format not in ['NCHW', 'NHWC']: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " - "Attr(data_format): %s" % str(data_format) + f"Attr(data_format): {data_format}" ) else: return True if data_format == "NHWC" else False @@ -118,7 +118,7 @@ def _channel_last(data_format, num_dims): if data_format not in ['NCDHW', 'NDHWC']: raise ValueError( "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received " - "Attr(data_format): %s" % str(data_format) + f"Attr(data_format): {data_format}" ) else: return True if data_format == "NDHWC" else False @@ -702,9 +702,9 @@ def max_pool1d( def _unpool_output_size(x, kernel_size, stride, padding, output_size): - assert output_size is None or isinstance(output_size, (list, tuple)), ( - "Required output_size is None|list|tuple, but received %s" % output_size - ) + assert output_size is None or isinstance( + output_size, (list, tuple) + ), f"Required output_size is None|list|tuple, but received {output_size}" input_size = x.shape default_size = [] for d in range(len(kernel_size)): @@ -818,7 +818,7 @@ def max_unpool1d( if data_format not in ["NCL"]: raise ValueError( "Attr(data_format) should be 'NCL'. Received " - "Attr(data_format): %s." % str(data_format) + f"Attr(data_format): {data_format}." ) data_format = "NCHW" x = unsqueeze(x, [2]) @@ -963,7 +963,7 @@ def max_unpool2d( if data_format not in ["NCHW"]: raise ValueError( "Attr(data_format) should be 'NCHW'. Received " - "Attr(data_format): %s." % str(data_format) + f"Attr(data_format): {data_format}." ) output_size = _unpool_output_size( @@ -1092,7 +1092,7 @@ def max_unpool3d( if data_format not in ["NCDHW"]: raise ValueError( "Attr(data_format) should be 'NCDHW'. Received " - "Attr(data_format): %s." % str(data_format) + f"Attr(data_format): {data_format}." ) output_size = _unpool_output_size( @@ -1197,7 +1197,7 @@ def max_pool2d( if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " - "Attr(data_format): %s." % str(data_format) + f"Attr(data_format): {data_format}." ) channel_last = True if data_format == "NHWC" else False @@ -1583,7 +1583,7 @@ def adaptive_avg_pool2d( if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'. Received " - "Attr(data_format): %s." % str(data_format) + f"Attr(data_format): {data_format}." ) if data_format == "NCHW": @@ -1723,7 +1723,7 @@ def adaptive_avg_pool3d( if data_format not in ["NCDHW", "NDHWC"]: raise ValueError( "Attr(data_format) should be 'NCDHW' or 'NDHWC'. Received " - "Attr(data_format): %s." % str(data_format) + f"Attr(data_format): {data_format}." ) if data_format == "NCDHW": diff --git a/python/paddle/optimizer/adamw.py b/python/paddle/optimizer/adamw.py index 8f244af82d680..fbbd943fd5fbc 100644 --- a/python/paddle/optimizer/adamw.py +++ b/python/paddle/optimizer/adamw.py @@ -235,8 +235,7 @@ def __init__( if not isinstance(learning_rate, (float, LRScheduler)): raise TypeError( - "learning rate should be float or LRScheduler, got %s here" - % type(learning_rate) + f"learning rate should be float or LRScheduler, got {type(learning_rate)} here" ) if grad_clip is not None: if not isinstance(grad_clip, GradientClipBase): diff --git a/python/paddle/optimizer/lr.py b/python/paddle/optimizer/lr.py index 73b29f87474f4..98a834388f203 100644 --- a/python/paddle/optimizer/lr.py +++ b/python/paddle/optimizer/lr.py @@ -1192,8 +1192,7 @@ def __init__( ): if not isinstance(milestones, (tuple, list)): raise TypeError( - "The type of 'milestones' in 'MultiStepDecay' must be 'tuple, list', but received %s." - % type(milestones) + f"The type of 'milestones' in 'MultiStepDecay' must be 'tuple, list', but received {type(milestones)}." ) if not all( @@ -1314,8 +1313,7 @@ def __init__( ) -> None: if not isinstance(step_size, int): raise TypeError( - "The type of 'step_size' must be 'int', but received %s." - % type(step_size) + f"The type of 'step_size' must be 'int', but received {type(step_size)}." ) if gamma >= 1.0: raise ValueError('gamma should be < 1.0.') @@ -1425,8 +1423,7 @@ def __init__( ): if not callable(lr_lambda): raise TypeError( - "The type of 'lr_lambda' in 'LambdaDecay' must be 'function', but received %s." - % type(lr_lambda) + f"The type of 'lr_lambda' in 'LambdaDecay' must be 'function', but received {type(lr_lambda)}." ) self.lr_lambda = lr_lambda @@ -1571,8 +1568,7 @@ def __init__( self.threshold_mode = threshold_mode if not isinstance(learning_rate, (float, int)): raise TypeError( - "The type of 'learning_rate' in 'ReduceOnPlateau' must be 'float', but received %s." - % type(learning_rate) + f"The type of 'learning_rate' in 'ReduceOnPlateau' must be 'float', but received {type(learning_rate)}." ) self.patience = patience @@ -1779,13 +1775,11 @@ def __init__( ) -> None: if not isinstance(T_max, int): raise TypeError( - "The type of 'T_max' in 'CosineAnnealingDecay' must be 'int', but received %s." - % type(T_max) + f"The type of 'T_max' in 'CosineAnnealingDecay' must be 'int', but received {type(T_max)}." ) if not isinstance(eta_min, (float, int)): raise TypeError( - "The type of 'eta_min' in 'CosineAnnealingDecay' must be 'float, int', but received %s." - % type(eta_min) + f"The type of 'eta_min' in 'CosineAnnealingDecay' must be 'float, int', but received {type(eta_min)}." ) assert T_max > 0 and isinstance( T_max, int @@ -1876,8 +1870,7 @@ def __init__( ) -> None: if not callable(lr_lambda): raise TypeError( - "The type of 'lr_lambda' in 'MultiplicativeDecay' must be 'function', but received %s." - % type(lr_lambda) + f"The type of 'lr_lambda' in 'MultiplicativeDecay' must be 'function', but received {type(lr_lambda)}." ) self.lr_lambda = lr_lambda diff --git a/python/paddle/optimizer/optimizer.py b/python/paddle/optimizer/optimizer.py index eebde60fdea5c..0c8ed9541d204 100644 --- a/python/paddle/optimizer/optimizer.py +++ b/python/paddle/optimizer/optimizer.py @@ -245,15 +245,13 @@ def __init__( ): logging.info( "If regularizer of a Parameter has been set by 'paddle.ParamAttr' or 'static.WeightNormParamAttr' already. " - "The weight_decay[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!" - % weight_decay.__str__() + f"The weight_decay[{weight_decay}] in Optimizer will not take effect, and it will only be applied to other Parameters!" ) break if not isinstance(learning_rate, (float, LRScheduler)): raise TypeError( - "learning rate should be float or LRScheduler, got %s here" - % type(learning_rate) + f"learning rate should be float or LRScheduler, got {type(learning_rate)} here" ) if grad_clip is not None: if not isinstance(grad_clip, paddle.nn.clip.GradientClipBase): @@ -611,8 +609,7 @@ def set_lr(self, value: float) -> None: """ if not isinstance(value, (int, float)): raise TypeError( - "The type of 'value' in optimizer.set_lr must be float, but received %s." - % (type(value)) + f"The type of 'value' in optimizer.set_lr must be float, but received {type(value)}." ) if isinstance(self._learning_rate, LRScheduler): raise RuntimeError( @@ -684,8 +681,7 @@ def set_lr_scheduler(self, scheduler: LRScheduler) -> None: if not isinstance(scheduler, LRScheduler): raise TypeError( - "The type of 'scheduler' in optimizer.set_lr_schduler must be LRScheduler, but received %s." - % (type(scheduler)) + f"The type of 'scheduler' in optimizer.set_lr_schduler must be LRScheduler, but received {type(scheduler)}." ) self._learning_rate = scheduler @@ -1694,8 +1690,7 @@ def append_regularization_ops( repeate_regularizer = True logging.info( "If regularizer of a Parameter has been set by 'base.ParamAttr' or 'base.WeightNormParamAttr' already. " - "The Regularization[%s] in Optimizer will not take effect, and it will only be applied to other Parameters!" - % regularization.__str__() + f"The Regularization[{regularization}] in Optimizer will not take effect, and it will only be applied to other Parameters!" ) with param.block.program._optimized_guard([param, grad]): new_grad = self._create_regularization_of_grad( diff --git a/python/paddle/profiler/timer.py b/python/paddle/profiler/timer.py index 8e2effe74ff4c..e50a7b01868b9 100644 --- a/python/paddle/profiler/timer.py +++ b/python/paddle/profiler/timer.py @@ -261,10 +261,12 @@ def _print_summary(self, benchmark): return print('Perf Summary'.center(100, '=')) if summary['reader_ratio'] != 0: - print('Reader Ratio: ' + '%.3f' % (summary['reader_ratio']) + '%') - print( - 'Time Unit: s, IPS Unit: %s' % (benchmark.current_event.speed_unit) - ) + print( + 'Reader Ratio: ' + + '{:.3f}'.format(summary['reader_ratio']) + + '%' + ) + print(f'Time Unit: s, IPS Unit: {benchmark.current_event.speed_unit}') print( '|', ''.center(15), @@ -283,9 +285,9 @@ def _print_summary(self, benchmark): self._print_stats('ips', summary['ips_summary']) def _print_stats(self, item, message_dict): - avg_str = '%.5f' % (message_dict['avg']) - max_str = '%.5f' % (message_dict['max']) - min_str = '%.5f' % (message_dict['min']) + avg_str = '{:.5f}'.format(message_dict['avg']) + max_str = '{:.5f}'.format(message_dict['max']) + min_str = '{:.5f}'.format(message_dict['min']) print( '|', item.center(15), @@ -379,7 +381,7 @@ def step_info(self, unit): reader_average = self.current_event.reader_average() batch_average = self.current_event.batch_average() if reader_average: - message += ' reader_cost: %.5f s' % (reader_average) + message += f' reader_cost: {reader_average:.5f} s' if batch_average: if self.current_event.speed_mode == 'steps/s': self.current_event.speed_unit = 'steps/s' diff --git a/python/paddle/static/io.py b/python/paddle/static/io.py index 7e2dd81f09dce..4aaa380b0ae24 100644 --- a/python/paddle/static/io.py +++ b/python/paddle/static/io.py @@ -188,8 +188,7 @@ def normalize_program(program, feed_vars, fetch_vars, **kwargs): return normalize_pir_program(program, feed_vars, fetch_vars, **kwargs) if not isinstance(program, Program): raise TypeError( - "program type must be `base.Program`, but received `%s`" - % type(program) + f"program type must be `base.Program`, but received `{type(program)}`" ) if not isinstance(feed_vars, list): feed_vars = [feed_vars] @@ -1225,8 +1224,7 @@ def load_vars( main_program = default_main_program() if not isinstance(main_program, Program): raise TypeError( - "The type of input main_program is invalid, expected type is base.Program, but received %s" - % type(main_program) + f"The type of input main_program is invalid, expected type is base.Program, but received {type(main_program)}" ) load_vars( @@ -1245,8 +1243,7 @@ def load_vars( if not isinstance(main_program, Program): raise TypeError( - "The type of input main_program is invalid, expected type is base.Program, but received %s" - % type(main_program) + f"The type of input main_program is invalid, expected type is base.Program, but received {type(main_program)}" ) # save origin param shape @@ -1580,8 +1577,9 @@ def load(program, model_path, executor=None, var_list=None): if len(binary_file_set) > 0: unused_var_list = " ".join(list(binary_file_set)) _logger.warning( - "variable file [ %s ] not used" - % (" ".join(list(binary_file_set))) + "variable file [ {} ] not used".format( + " ".join(list(binary_file_set)) + ) ) try: load_vars( @@ -1895,9 +1893,11 @@ def clone_var_to_block(block, var): shape=var.shape, dtype=var.dtype, type=var.type, - lod_level=var.lod_level - if var.desc.type() == core.VarDesc.VarType.LOD_TENSOR - else None, + lod_level=( + var.lod_level + if var.desc.type() == core.VarDesc.VarType.LOD_TENSOR + else None + ), persistable=True, ) diff --git a/python/paddle/static/io_utils.py b/python/paddle/static/io_utils.py index 5c77b032e7c19..132e796204177 100644 --- a/python/paddle/static/io_utils.py +++ b/python/paddle/static/io_utils.py @@ -77,8 +77,7 @@ def _get_valid_program(program=None): ) if not isinstance(program, paddle.static.Program): raise TypeError( - "The type of input program is invalid, expected type is base.Program, but received %s" - % type(program) + f"The type of input program is invalid, expected type is base.Program, but received {type(program)}" ) return program diff --git a/python/paddle/static/pir_io.py b/python/paddle/static/pir_io.py index 73ab886e99998..86f51446ff950 100644 --- a/python/paddle/static/pir_io.py +++ b/python/paddle/static/pir_io.py @@ -163,8 +163,7 @@ def pir_prune_with_input(program, feed_vars, target_vars): """ if not isinstance(program, paddle.static.Program): raise TypeError( - "program type must be `paddle.static.Program`, but received `%s`" - % type(program) + f"program type must be `paddle.static.Program`, but received `{type(program)}`" ) total_ops = program.global_block().ops @@ -266,8 +265,7 @@ def normalize_pir_program(program, feed_vars, fetch_vars, **kwargs): """ if not isinstance(program, paddle.static.Program): raise TypeError( - "program type must be `paddle.static.Program`, but received `%s`" - % type(program) + f"program type must be `paddle.static.Program`, but received `{type(program)}`" ) if not isinstance(feed_vars, list): feed_vars = [feed_vars] @@ -482,8 +480,7 @@ def load_vars_pir( if not isinstance(main_program, paddle.static.Program): raise TypeError( - "The type of input main_program is invalid, expected type is paddle.static.Program, but received %s" - % type(main_program) + f"The type of input main_program is invalid, expected type is paddle.static.Program, but received {type(main_program)}" ) param, opt = get_pir_parameters(main_program) vars = param + opt @@ -503,8 +500,7 @@ def load_vars_pir( if not isinstance(main_program, paddle.static.Program): raise TypeError( - "The type of input main_program is invalid, expected type is paddle.static.Program, but received %s" - % type(main_program) + f"The type of input main_program is invalid, expected type is paddle.static.Program, but received {type(main_program)}" ) # TODO(chenzhiyang):save origin param shape, check vars diff --git a/python/paddle/static/quantization/quantization_pass.py b/python/paddle/static/quantization/quantization_pass.py index 9d8a70ffcdaee..70535ea816b44 100644 --- a/python/paddle/static/quantization/quantization_pass.py +++ b/python/paddle/static/quantization/quantization_pass.py @@ -209,15 +209,14 @@ def __init__( ), "The activation quantization type does not support 'channel_wise_abs_max'." if activation_quantize_type not in quant_type: raise ValueError( - "Unknown activation_quantize_type : '%s'. It can only be " + f"Unknown activation_quantize_type : '{activation_quantize_type}'. It can only be " "'abs_max' or 'range_abs_max' or 'moving_average_abs_max'." - % (str(activation_quantize_type)) ) if weight_quantize_type not in quant_type: raise ValueError( - "Unknown weight_quantize_type: '%s'. It can only be " + f"Unknown weight_quantize_type: '{weight_quantize_type}'. It can only be " "'abs_max' or 'channel_wise_abs_max' or 'range_abs_max' " - "or 'moving_average_abs_max'." % (str(weight_quantize_type)) + "or 'moving_average_abs_max'." ) self._activation_quantize_type = activation_quantize_type @@ -231,7 +230,7 @@ def __init__( op + " is not supported for quantization." ) self._quantizable_grad_ops = [ - '%s_grad' % (op) for op in self._quantizable_ops + f'{op}_grad' for op in self._quantizable_ops ] self._is_test = is_test self._global_step = None @@ -1037,19 +1036,19 @@ def _quantized_var_name(self, var_name): """ Return quantized variable name for the input `var_name`. """ - return "%s.quantized" % (var_name) + return f"{var_name}.quantized" def _dequantized_var_name(self, var_name): """ Return dequantized variable name for the input `var_name`. """ - return "%s.dequantized" % (var_name) + return f"{var_name}.dequantized" def _quantized_scale_name(self, var_name): """ Return the scale name of quantized variable for the input `var_name`. """ - return "%s@scale" % (var_name) + return f"{var_name}@scale" def _is_skip_quant(self, graph, op_node): """ @@ -1267,9 +1266,7 @@ def _insert_post_channel_dequant_op(self, graph, op_node, quant_axis): if original_var_name in persistable_vars: assert isinstance( scale_v, list - ), 'The scale of parameter %s is not a list.' % ( - original_var_name - ) + ), f'The scale of parameter {original_var_name} is not a list.' channel_scale = np.array(scale_v) else: assert isinstance(scale_v, IrNode) @@ -1277,8 +1274,8 @@ def _insert_post_channel_dequant_op(self, graph, op_node, quant_axis): if len(op_node.output_arg_names()) != 1: raise ValueError( - "Only support one output, but op %s has" - " more than one output." % (op_node.name()) + f"Only support one output, but op {op_node.name()} has" + " more than one output." ) output_var_node = graph._find_node_by_name( @@ -1354,9 +1351,7 @@ def _insert_post_dequant_op(self, graph, op_node): if original_var_name in persistable_vars: assert self._is_float( scale_v - ), 'The scale of parameter %s is not a float.' % ( - original_var_name - ) + ), f'The scale of parameter {original_var_name} is not a float.' scale_v = 1e-8 if scale_v == 0.0 else scale_v max_range *= param_range / scale_v else: @@ -1366,8 +1361,8 @@ def _insert_post_dequant_op(self, graph, op_node): if len(op_node.output_arg_names()) != 1: raise ValueError( - "Only support one output, but op %s has" - " more than one output." % (op_node.name()) + f"Only support one output, but op {op_node.name()} has" + " more than one output." ) output_var_node = graph._find_node_by_name( @@ -1438,7 +1433,7 @@ def _dequantized_var_name(self, var_name): """ Return dequantized variable name for the input `var_name`. """ - return "%s.dequantized" % (var_name) + return f"{var_name}.dequantized" def _is_float(self, v): return isinstance(v, (float, np.float16, np.float32, np.float64)) @@ -1748,7 +1743,7 @@ def _scale_name(self, var_name): """ Return the scale name for the var named `var_name`. """ - return "%s@scale" % (var_name) + return f"{var_name}@scale" class OutScaleForInferencePass: @@ -1820,7 +1815,7 @@ def _scale_name(self, var_name): """ Return the scale name for the var named `var_name`. """ - return "%s@scale" % (var_name) + return f"{var_name}@scale" class AddQuantDequantPass: @@ -1875,7 +1870,7 @@ def __init__( op_type + " is not supported for quantization." ) self._quantizable_grad_op_type = [ - '%s_grad' % (op) for op in self._quantizable_op_type + f'{op}_grad' for op in self._quantizable_op_type ] assert self._scope is not None, "scope must not be None." @@ -2335,25 +2330,25 @@ def _quantized_var_name(self, var_name): """ Return quantized variable name for the input `var_name`. """ - return "%s.quantized" % (var_name) + return f"{var_name}.quantized" def _dequantized_var_name(self, var_name): """ Return dequantized variable name for the input `var_name`. """ - return "%s.dequantized" % (var_name) + return f"{var_name}.dequantized" def _quantized_scale_name(self, var_name): """ Return the scale name of quantized variable for the input `var_name`. """ - return "%s@scale" % (var_name) + return f"{var_name}@scale" def _zero_point_name(self, var_name): """ Return the scale name for the var named `var_name`. """ - return "%s@zero_point" % (var_name) + return f"{var_name}@zero_point" class QuantizationTransformPassV2(QuantizationTransformPass): @@ -2479,15 +2474,14 @@ def __init__( ), "The activation quantization type does not support 'channel_wise_abs_max'." if activation_quantize_type not in quant_type: raise ValueError( - "Unknown activation_quantize_type : '%s'. It can only be " + f"Unknown activation_quantize_type : '{activation_quantize_type}'. It can only be " "'abs_max' or 'range_abs_max' or 'moving_average_abs_max'." - % (str(activation_quantize_type)) ) if weight_quantize_type not in quant_type: raise ValueError( - "Unknown weight_quantize_type: '%s'. It can only be " + f"Unknown weight_quantize_type: '{weight_quantize_type}'. It can only be " "'abs_max' or 'channel_wise_abs_max' or 'range_abs_max' " - "or 'moving_average_abs_max'." % (str(weight_quantize_type)) + "or 'moving_average_abs_max'." ) self._activation_quantize_type = activation_quantize_type @@ -2501,7 +2495,7 @@ def __init__( op + " is not supported for quantization." ) self._quantizable_grad_ops = [ - '%s_grad' % (op) for op in self._quantizable_ops + f'{op}_grad' for op in self._quantizable_ops ] self._is_test = is_test self._global_step = None @@ -2863,7 +2857,7 @@ def __init__( op_type + " is not supported for quantization." ) self._quantizable_grad_op_type = [ - '%s_grad' % (op) for op in self._quantizable_op_type + f'{op}_grad' for op in self._quantizable_op_type ] assert self._scope is not None, "scope must not be None." @@ -3147,13 +3141,13 @@ def _quantized_var_name(self, var_name): """ Return quantized variable name for the input `var_name`. """ - return "%s.quantized" % (var_name) + return f"{var_name}.quantized" def _zero_point_name(self, var_name): """ Return the scale name for the var named `var_name`. """ - return "%s@zero_point" % (var_name) + return f"{var_name}@zero_point" class QuantWeightPass: @@ -3386,7 +3380,7 @@ def _scale_name(self, var_name): """ Return the scale name for the var named `var_name`. """ - return "%s@scale" % (var_name) + return f"{var_name}@scale" def _insert_quant_dequant_op(self, graph, var_node): assert var_node.is_var(), f'{var_node.name()} is not a var' diff --git a/python/paddle/tensor/creation.py b/python/paddle/tensor/creation.py index 5a8d1ab85ba3f..93de942addc16 100644 --- a/python/paddle/tensor/creation.py +++ b/python/paddle/tensor/creation.py @@ -1913,7 +1913,7 @@ def __check_input(input, offset, dim1, dim2): input_shape = list(input.shape) assert len(input_shape) >= 1, ( "Input must be at least 1-dimensional, " - "But received Input's dimensional: %s.\n" % len(input_shape) + f"But received Input's dimensional: {len(input_shape)}.\n" ) assert np.abs(dim1) <= len(input_shape), ( diff --git a/python/paddle/tensor/manipulation.py b/python/paddle/tensor/manipulation.py index 3fb3b3105ddda..5e853e2c20795 100644 --- a/python/paddle/tensor/manipulation.py +++ b/python/paddle/tensor/manipulation.py @@ -863,30 +863,25 @@ def crop( def _attr_shape_check(shape_val): if not isinstance(shape_val, int): raise TypeError( - "Attr(shape)'s dtype of Op(crop_tensor) should be int32, but received: %s." - % type(shape_val) + f"Attr(shape)'s dtype of Op(crop_tensor) should be int32, but received: {type(shape_val)}." ) if shape_val == 0: raise ValueError( - "Attr(shape) of Op(crop_tensor) should not be zero, but received: %s." - % str(shape_val) + f"Attr(shape) of Op(crop_tensor) should not be zero, but received: {shape_val}." ) if shape_val < -1: raise ValueError( - "When the element in Attr(shape) of Op(crop_tensor) is negative, only -1 is supported, but received: %s." - % str(shape_val) + f"When the element in Attr(shape) of Op(crop_tensor) is negative, only -1 is supported, but received: {shape_val}." ) def _attr_offsets_check(offset_val): if not isinstance(offset_val, int): raise TypeError( - "Attr(offsets)'s dtype of Op(crop_tensor) should be int32, but received: %s." - % type(offset_val) + f"Attr(offsets)'s dtype of Op(crop_tensor) should be int32, but received: {type(offset_val)}." ) if offset_val < 0: raise ValueError( - "Attr(offsets) of Op(crop_tensor) should be greater or equal to zero, but received: %s." - % str(offset_val) + f"Attr(offsets) of Op(crop_tensor) should be greater or equal to zero, but received: {offset_val}." ) if in_pir_mode(): @@ -1020,8 +1015,7 @@ def fill_(x: Tensor, value: float) -> Tensor: """ if not isinstance(value, (float, int)): raise TypeError( - "The type of 'value' must be int or float, but received %s." - % (type(value)) + f"The type of 'value' must be int or float, but received {type(value)}." ) return _C_ops.fill_(x, value) @@ -1431,8 +1425,7 @@ def concat( assert len(input) == 1, ( "If the elements of 'input' in concat are Variable(LoDTensorArray), " - "number of the elements must be 1, but received %s." - % len(input) + f"number of the elements must be 1, but received {len(input)}." ) out_index = helper.create_variable_for_type_inference(dtype="int32") helper.append_op( @@ -2212,7 +2205,7 @@ def stack( if x[0].is_dense_tensor_array_type(): assert len(x) == 1, ( "If the elements of 'x' in stack are Variable(LoDTensorArray), " - "number of the elements must be 1, but received %s." % len(x) + f"number of the elements must be 1, but received {len(x)}." ) out, _ = _C_ops.array_to_tensor(x, axis, True) return out @@ -2225,7 +2218,7 @@ def stack( if x[0].desc.type() == core.VarDesc.VarType.LOD_TENSOR_ARRAY: assert len(x) == 1, ( "If the elements of 'x' in stack are Variable(LoDTensorArray), " - "number of the elements must be 1, but received %s." % len(x) + f"number of the elements must be 1, but received {len(x)}." ) out_index = helper.create_variable_for_type_inference(dtype="int32") @@ -2638,7 +2631,7 @@ def split( elif not isinstance(num_or_sections, int): raise TypeError( "The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but " - "received %s." % (type(num_or_sections)) + f"received {type(num_or_sections)}." ) if isinstance(num_or_sections, int): @@ -2657,7 +2650,7 @@ def split( if not isinstance(num_or_sections, (int, list, tuple)): raise TypeError( "The type of 'num_or_sections' in split must be int, list or tuple in imperative mode, but " - "received %s." % (type(num_or_sections)) + f"received {type(num_or_sections)}." ) if isinstance(num_or_sections, int): assert num_or_sections > 0, 'num_or_sections must be than 0.' @@ -3929,7 +3922,7 @@ def unbind(input: Tensor, axis: int = 0) -> list[Tensor]: """ if not isinstance(axis, (int)): raise TypeError( - "The type of 'axis' must be int, but received %s." % (type(axis)) + f"The type of 'axis' must be int, but received {type(axis)}." ) if axis not in range(-input.ndim, input.ndim): @@ -6391,7 +6384,7 @@ def put_along_axis( if in_dynamic_or_pir_mode(): if convert_dtype(indices.dtype) not in ['int32', 'int64']: raise TypeError( - f"The data type of indices should be one of ['int32', 'int64'], but got {str(convert_dtype(indices.dtype))}" + f"The data type of indices should be one of ['int32', 'int64'], but got {convert_dtype(indices.dtype)}" ) return _C_ops.put_along_axis( arr, indices, values, axis, reduce, include_self diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index b838ca66f5bbc..2441b3aae4c2b 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -585,7 +585,7 @@ def pow_(x: Tensor, y: float | Tensor, name: str | None = None) -> Tensor: if isinstance(y, (int, float)): return _C_ops.pow_(x, y) else: - raise TypeError('y must be scalar type, but received: %s ' % (type(y))) + raise TypeError(f'y must be scalar type, but received: {type(y)} ') OP_NAMEMAPPING = { @@ -1153,10 +1153,12 @@ def _elementwise_op_with_axis(x, y, axis=-1, name=None, op_type="Undefined"): assert ( in_dynamic_or_pir_mode() ), "You can only call `_elementwise_op_with_axis` function within in_dynamic_or_pir_mode" - assert op_type in ["add", "subtract", "multiply", "divide"], ( - "op_name input error! _elementwise_op_with_axis is an inner function to replace elementwise_add/sub/mul/div. Input op_name=%s, Expect op_name=[add|subtract|multiply|divide]\n" - % op_type - ) + assert op_type in [ + "add", + "subtract", + "multiply", + "divide", + ], f"op_name input error! _elementwise_op_with_axis is an inner function to replace elementwise_add/sub/mul/div. Input op_name={op_type}, Expect op_name=[add|subtract|multiply|divide]\n" op = getattr(_C_ops, op_type) x_shape = list(x.shape) y_shape = list(y.shape) @@ -3901,7 +3903,7 @@ def __check_input(x, offset, axis1, axis2): input_shape = list(x.shape) assert len(input_shape) >= 2, ( "The x must be at least 2-dimensional, " - "But received Input x's dimensional: %s.\n" % len(input_shape) + f"But received Input x's dimensional: {len(input_shape)}.\n" ) axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1 @@ -4033,7 +4035,7 @@ def __check_input(x, offset, axis1, axis2): input_shape = list(x.shape) assert len(input_shape) >= 2, ( "The x must be at least 2-dimensional, " - "But received Input x's dimensional: %s.\n" % len(input_shape) + f"But received Input x's dimensional: {len(input_shape)}.\n" ) axis1_ = axis1 if axis1 >= 0 else len(input_shape) + axis1 @@ -5559,8 +5561,7 @@ def multigammaln(x: Tensor, p: int, name: str | None = None) -> Tensor: 26.09257698 , 170.68318176]) """ assert p >= 1, ( - "The p must be greater than or equal to 1, " - "But received p is %s.\n" % p + "The p must be greater than or equal to 1, " f"But received p is {p}.\n" ) c = 0.25 * p * (p - 1) * math.log(math.pi) b = 0.5 * paddle.arange(start=(1 - p), end=1, step=1, dtype=x.dtype) @@ -5574,8 +5575,7 @@ def multigammaln_(x: Tensor, p: int, name: str | None = None) -> Tensor: Please refer to :ref:`api_paddle_multigammaln`. """ assert p >= 1, ( - "The p must be greater than or equal to 1, " - "But received p is %s.\n" % p + "The p must be greater than or equal to 1, " f"But received p is {p}.\n" ) c = 0.25 * p * (p - 1) * math.log(math.pi) c = paddle.to_tensor(c, dtype=x.dtype) @@ -7409,12 +7409,11 @@ def polygamma(x: Tensor, n: int, name: str | None = None) -> Tensor: """ if not isinstance(n, int): raise TypeError( - "The input of n must be int type, but received: %s " % (type(n)) + f"The input of n must be int type, but received: {type(n)} " ) if n < 0: raise ValueError( - "The input of n must be greater than or equal to 0. But received n = %s" - % (n) + f"The input of n must be greater than or equal to 0. But received n = {n}" ) if n == 0: return digamma(x) @@ -7445,12 +7444,11 @@ def polygamma_(x: Tensor, n: int, name: str | None = None) -> Tensor: """ if not isinstance(n, int): raise TypeError( - "The input of n must be int type, but received: %s " % (type(n)) + f"The input of n must be int type, but received: {type(n)} " ) if n < 0: raise ValueError( - "The input of n must be greater than or equal to 0. But received n = %s" - % (n) + f"The input of n must be greater than or equal to 0. But received n = {n}" ) if n == 0: return digamma_(x)