Skip to content

Commit

Permalink
[CodeStyle][UP031] fix /{python, test, tools}/* - part 19 (PaddlePa…
Browse files Browse the repository at this point in the history
  • Loading branch information
gouzil authored Jul 1, 2024
1 parent 8957bfa commit d5803cb
Show file tree
Hide file tree
Showing 18 changed files with 139 additions and 166 deletions.
10 changes: 5 additions & 5 deletions paddle/fluid/operators/generator/templates/operator_utils.c.j2
Original file line number Diff line number Diff line change
Expand Up @@ -423,7 +423,7 @@ class {{op["op_name"] | to_pascal_case}}Op : public framework::OperatorWithKerne
{% set kernel = op["kernel"] %}
{% if kernel["data_type"] is not none or kernel["backend"] is not none
or kernel["force_backend"] is not none
or "complex_promote" in op or "data_transform" in op
or "complex_promote" in op or "data_transform" in op
or "get_expected_kernel_type" in op%}
protected:
{% if kernel["data_type"] is not none or kernel["backend"] is not none
Expand Down Expand Up @@ -629,7 +629,7 @@ class {{name | to_pascal_case}}OpMaker : public framework::SingleGradOpMaker<T>
grad_op->SetInput("{{attr | to_int_array_tensors_name}}", this->Input("{{attr | to_int_array_tensors_name}}"));
}
{% endif %}
{% elif attr["typename"] is scalar and
{% elif attr["typename"] is scalar and
("is_support_tensor" not in attr or attr["is_support_tensor"] is false)%}
if (this->HasInput("{{attr | to_scalar_tensor_name}}")) {
grad_op->SetInput("{{attr | to_scalar_tensor_name}}", this->Input("{{attr | to_scalar_tensor_name}}"));
Expand Down Expand Up @@ -848,7 +848,7 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO
auto {{outputs[i] + "_t"}} = this->GetSingleInputGrad({{name_in_forward_orig | to_opmaker_name}});
{% elif output_typename == "Tensor[]" %}
auto {{outputs[i] + "_t"}} = this->GetMultiInputGrad({{name_in_forward_orig | to_opmaker_name}});
{%- endif %}
{%- endif %}
{%- endfor %}
{%- endmacro %}

Expand All @@ -864,7 +864,7 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO
{{outputs[i]}}[i] = &{{outputs[i] + "_t"}}[i];
}
{{outputs[i]}} = this->GetOutputPtr({{outputs[i]}});
{%- endif %}
{%- endif %}
{%- endfor %}
{%- endmacro %}

Expand All @@ -876,7 +876,7 @@ class {{op_name | to_composite_grad_opmaker_name}} : public prim::CompositeGradO
auto {{outputs[i] + "_name"}} = this->GetOutputName({{outputs[i] + "_t"}});
{% elif output_typename == "Tensor[]" %}
auto {{outputs[i] + "_name"}} = this->GetOutputName({{outputs[i] + "_t"}});
{%- endif %}
{%- endif %}
{%- endfor %}
{%- endmacro %}

Expand Down
18 changes: 9 additions & 9 deletions paddle/phi/api/generator/tensor_operants_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -489,9 +489,9 @@ def gene_operants_base(self):
def get_declare_args_without_first_tensor(self, inplace_flag=False):
func_name = self.get_api_func_name()
declare_args = self.get_input_tensor_args(inplace_flag)
assert len(declare_args) >= 1, (
"Error! Api %s has no Tensor inputs" % func_name
)
assert (
len(declare_args) >= 1
), f"Error! Api {func_name} has no Tensor inputs"
first_input_type = " ".join(declare_args[0].split(" ")[:-1])
# NOTE(HongyuJia): Do not consider "const paddle::optional<Tensor>&"
assert (
Expand All @@ -510,9 +510,9 @@ def get_declare_args_without_first_tensor(self, inplace_flag=False):
def get_define_args_without_first_tensor(self, inplace_flag=False):
func_name = self.get_api_func_name()
define_args = self.get_input_tensor_args(inplace_flag)
assert len(define_args) >= 1, (
"Error! Api %s has no Tensor inputs" % func_name
)
assert (
len(define_args) >= 1
), f"Error! Api {func_name} has no Tensor inputs"
first_input_type = " ".join(define_args[0].split(" ")[:-1])
# NOTE(HongyuJia): Do not consider "const paddle::optional<Tensor>&"
assert (
Expand All @@ -525,9 +525,9 @@ def get_define_args_without_first_tensor(self, inplace_flag=False):

def gene_tensor_api_implementation(self):
func_name = self.get_api_func_name()
assert len(self.inputs['names']) >= 1, (
"Error! Api %s has no Tensor inputs" % func_name
)
assert (
len(self.inputs['names']) >= 1
), f"Error! Api {func_name} has no Tensor inputs"
# remove first Tensor argument
func_args = self.inputs['names'][1:] + self.attrs['names']
if len(func_args) > 0:
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/autograd/backward_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ def remove_op(block, op, state):

if value in state.sumvaluegrad_to_value:
raise ValueError(
'input_grad in [%s] is value which need to sum ', op.name()
f'input_grad in [%s] is value which need to sum {op.name()}'
)
# NOTE(SigureMo): Ensure access to the op's results before removing it.
# Otherwise, the op will be deconstructed and access the num_results
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/autograd/ir_backward.py
Original file line number Diff line number Diff line change
Expand Up @@ -1216,7 +1216,7 @@ def append_backward(loss, parameter_list=None, no_grad_set=None):
for i, param in enumerate(parameter_list):
check_type(
param,
'parameter_list[%s]' % i,
f'parameter_list[{i}]',
paddle.pir.Value,
'base.backward.append_backward',
)
Expand Down
26 changes: 13 additions & 13 deletions python/paddle/base/backward.py
Original file line number Diff line number Diff line change
Expand Up @@ -160,8 +160,7 @@ def sort_checkpoints(self, checkpoints_name):
for name in checkpoints_name:
if name not in self.var_op_deps:
_logger.info(
"Recompute Optimizer: deleted %s from checkpoints, because it is not used in paddle program."
% name
f"Recompute Optimizer: deleted {name} from checkpoints, because it is not used in paddle program."
)
elif self.var_op_deps[name]["var_as_output_ops"] == []:
# input nodes
Expand Down Expand Up @@ -1079,8 +1078,9 @@ def _append_backward_ops_with_checkpoints_(
if op.has_attr("sub_block"):
raise Exception(
"Recompute don't support ops with sub_block"
"invoke op: %s"
% _pretty_op_desc_(op.desc, "with_sub_block")
"invoke op: {}".format(
_pretty_op_desc_(op.desc, "with_sub_block")
)
)
grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
op.desc, no_grad_dict[block.idx], []
Expand Down Expand Up @@ -1109,8 +1109,9 @@ def _append_backward_ops_with_checkpoints_(
if op.has_attr("sub_block"):
raise Exception(
"Recompute don't support ops with sub_block"
"invoke op: %s"
% _pretty_op_desc_(op.desc, "with_sub_block")
"invoke op: {}".format(
_pretty_op_desc_(op.desc, "with_sub_block")
)
)
grad_op_desc, op_grad_to_var = core.get_grad_op_desc(
op.desc, no_grad_dict[block.idx], []
Expand Down Expand Up @@ -1139,8 +1140,9 @@ def _append_backward_ops_with_checkpoints_(
if op.has_attr("sub_block"):
raise Exception(
"Recompute don't support ops with sub_block"
"invoke op: %s"
% _pretty_op_desc_(op.desc, "with_sub_block")
"invoke op: {}".format(
_pretty_op_desc_(op.desc, "with_sub_block")
)
)
input_and_output_names = []
input_and_output_names.extend(op.desc.input_arg_names())
Expand Down Expand Up @@ -1922,8 +1924,7 @@ def _get_no_grad_set_name(no_grad_set):
no_grad_set_name.add(no_grad_var)
else:
raise TypeError(
"The type of no_grad_set's member must be paddle.base.Variable or str, but received %s."
% (type(no_grad_var))
f"The type of no_grad_set's member must be paddle.base.Variable or str, but received {type(no_grad_var)}."
)
else:
raise TypeError(
Expand All @@ -1941,8 +1942,7 @@ def _get_no_grad_set_value(no_grad_set):
no_grad_set_value.add(no_grad_value)
else:
raise TypeError(
"The type of no_grad_set's member must be paddle.pir.Value, but received %s."
% (type(no_grad_value))
f"The type of no_grad_set's member must be paddle.pir.Value, but received {type(no_grad_value)}."
)
else:
raise TypeError(
Expand Down Expand Up @@ -2250,7 +2250,7 @@ def append_backward(
for i, param in enumerate(parameter_list):
check_type(
param,
'parameter_list[%s]' % i,
f'parameter_list[{i}]',
(framework.Variable, str),
'base.backward.append_backward',
)
Expand Down
9 changes: 3 additions & 6 deletions python/paddle/base/compiler.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,8 +143,7 @@ def __init__(self, program_or_graph, build_strategy=None):
self._program = program_or_graph
else:
raise TypeError(
"The type of program_to_graph parameter is wrong, expected Graph or Program, but received %s"
% type(program_or_graph)
f"The type of program_to_graph parameter is wrong, expected Graph or Program, but received {type(program_or_graph)}"
)

self._scope = None
Expand Down Expand Up @@ -481,8 +480,7 @@ def patch_program_cache(ipu_strategy):
def patch_getter(self, item):
if not isinstance(item, CacheKey):
raise ValueError(
'type(item) should be CacheKey, but received %s'
% type(item).__name__
f'type(item) should be CacheKey, but received {type(item).__name__}'
)
item_id = hash(item)
self._recent_key = item_id
Expand Down Expand Up @@ -1061,8 +1059,7 @@ def __init__(self, program=None, scope=None, ipu_strategy=None):

if not isinstance(program, framework.Program):
raise TypeError(
"The type of program is wrong, expected Program, but got %s"
% type(program)
f"The type of program is wrong, expected Program, but got {type(program)}"
)

self._program = program
Expand Down
Loading

0 comments on commit d5803cb

Please sign in to comment.