Skip to content

Commit

Permalink
[CodeStyle][F821] fix remaining F821 issues (#47968)
Browse files Browse the repository at this point in the history
* [CodeStyle][F821] fix remained F821 issues

* refine comment

* fix _set_item
  • Loading branch information
SigureMo authored Nov 15, 2022
1 parent aa08b76 commit 4e09b08
Show file tree
Hide file tree
Showing 10 changed files with 41 additions and 19 deletions.
6 changes: 5 additions & 1 deletion .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ ignore =

# F, see https://flake8.pycqa.org/en/latest/user/error-codes.html
F405,
F811,F821,F841,
F811,F841,

# W, see https://pycodestyle.pycqa.org/en/latest/intro.html#error-codes
W503
Expand All @@ -33,3 +33,7 @@ per-file-ignores =
python/paddle/fluid/tests/unittests/collective/fleet/test_hdfs1.py:E101,W191
# Ignore unused imports in __init__.py
__init__.py: F401
# Ignore undefined variables in CMake config and some dygraph_to_static tests
.cmake-format.py: F821
python/paddle/fluid/tests/unittests/dygraph_to_static/test_loop.py: F821
python/paddle/fluid/tests/unittests/dygraph_to_static/test_closure_analysis.py: F821
8 changes: 6 additions & 2 deletions python/paddle/distributed/auto_parallel/completion.py
Original file line number Diff line number Diff line change
Expand Up @@ -1510,8 +1510,12 @@ def _get_op_by_id(ops, id):
self._dist_context.set_op_dist_attr_for_program(
grad_op, grad_op_dist_attr
)
grad_op_dist_attr.impl_type = fwd_op_dist_attr.impl_type
grad_op_dist_attr.impl_idx = fwd_op_dist_attr.impl_idx
grad_op_dist_attr.impl_type = (
fwd_op_dist_attr.impl_type # noqa: F821
)
grad_op_dist_attr.impl_idx = (
fwd_op_dist_attr.impl_idx # noqa: F821
)

continue

Expand Down
4 changes: 2 additions & 2 deletions python/paddle/distributed/auto_parallel/cost/tensor_cost.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,9 +100,9 @@ def calc_cost(self):

if dtype == paddle.float32 or dtype == paddle.int32:
dtype_factor = 4
elif node.dtype == paddle.int64:
elif dtype == paddle.int64:
dtype_factor = 8
elif node.dtype == paddle.uint8:
elif dtype == paddle.uint8:
dtype_factor = 1
else:
dtype_factor = 2
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ def forward(ctx, *args, **kwargs):
def backward(ctx, *args, **kwargs):
raise RuntimeError(
"primitive operator does NOT have backward function, op type: {}".format(
str(op.type)
str(op.type) # noqa: F821
)
)

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -299,6 +299,8 @@ def vlist_of_dict(x):
print(paddle.jit.to_static(vlist_of_dict)(x))

def test4(self):
import numpy as np

def vlist_of_dict(x):
a = np.array([1, 2, 3])
for i in range(3):
Expand All @@ -310,6 +312,8 @@ def vlist_of_dict(x):
print(paddle.jit.to_static(vlist_of_dict)(x))

def test5(self):
import numpy as np

def vlist_of_dict(x):
a = np.array([1, 2, 3])
for i in range(3):
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,9 @@ def forward(
blocks = self.block(inputs)
for i, block in enumerate(blocks):
if i > 0:
block = fluid.layers.concat(input=[route, block], axis=1)
block = fluid.layers.concat(
input=[route, block], axis=1 # noqa: F821
)
route, tip = self.yolo_blocks[i](block)
block_out = self.block_outputs[i](tip)
self.outputs.append(block_out)
Expand Down
12 changes: 6 additions & 6 deletions python/paddle/fluid/tests/unittests/gradient_checker.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def _get_item(t, i, np_dtype):
raise ValueError("Not supported data type " + str(np_dtype))


def _set_item(t, i, e, np_dtype):
def _set_item(t, i, e, np_dtype, place):
if np_dtype == np.float16:
np_t = np.array(t).astype(np.float16)
shape = np_t.shape
Expand Down Expand Up @@ -145,14 +145,14 @@ def run():
for i in range(x_size):
orig = _get_item(x_t, i, np_type)
x_pos = orig + delta
_set_item(x_t, i, x_pos, np_type)
_set_item(x_t, i, x_pos, np_type, place)
y_pos = run()

x_neg = orig - delta
_set_item(x_t, i, x_neg, np_type)
_set_item(x_t, i, x_neg, np_type, place)
y_neg = run()

_set_item(x_t, i, orig, np_type)
_set_item(x_t, i, orig, np_type, place)

for j in range(len(y)):
jacobian[j][i, :] = (y_pos[j] - y_neg[j]) / delta / 2.0
Expand Down Expand Up @@ -207,7 +207,7 @@ def _compute_analytical_jacobian(program, x, y, place, scope):
filted_idx, filted_dx = zip(*filted)

for i in range(y_size):
_set_item(dy_t, i, 1, np_type)
_set_item(dy_t, i, 1, np_type, place)

dx_res = exe.run(program, scope=scope, fetch_list=filted_dx)

Expand All @@ -220,7 +220,7 @@ def _compute_analytical_jacobian(program, x, y, place, scope):
dx[dx_idx].shape, dtype=np_type
).flatten()

_set_item(dy_t, i, 0, np_type)
_set_item(dy_t, i, 0, np_type, place)

return jacobian

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,12 @@ def __init__(

def forward(self, input):
if _global_parallel_strategy == "pp":
auto.shard_tensor(self.linear0.weight, PP_MESH_0, [None, None])
auto.shard_tensor(self.linear1.weight, PP_MESH_1, [None, None])
auto.shard_tensor(
self.linear0.weight, PP_MESH_0, [None, None] # noqa: F821
)
auto.shard_tensor(
self.linear1.weight, PP_MESH_1, [None, None] # noqa: F821
)
else:
auto.shard_tensor(
self.linear0.weight, _global_process_mesh, [None, None]
Expand Down Expand Up @@ -93,8 +97,8 @@ def mlp_forward(train_program, start_program):
)

if _global_parallel_strategy == "pp":
auto.shard_tensor(input, PP_MESH_0, [None, None])
auto.shard_tensor(label, PP_MESH_1, [None, None])
auto.shard_tensor(input, PP_MESH_0, [None, None]) # noqa: F821
auto.shard_tensor(label, PP_MESH_1, [None, None]) # noqa: F821
elif _global_parallel_strategy == "dp":
auto.shard_tensor(input, _global_process_mesh, ["x", None])
else:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,9 @@ def set_data(self):
lod_level_i = np.random.randint(
low=1,
high=5,
size=self.num_seq if i == 0 else sum(lod_level_i),
size=self.num_seq
if i == 0
else sum(lod_level_i), # noqa: F821
).tolist()
data_lod.append(lod_level_i)
data_value = np.random.random(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,9 @@ def set_data(self):
lod_level_i = np.random.randint(
low=1,
high=5,
size=self.batch_size if i == 0 else sum(lod_level_i),
size=self.batch_size
if i == 0
else sum(lod_level_i), # noqa: F821
).tolist()
data_lod.append(lod_level_i)
data_value = np.random.random(
Expand Down

0 comments on commit 4e09b08

Please sign in to comment.