Skip to content

Commit 080d704

Browse files
authored
Fix Pylint warnings (#41644)
* Fix pylint warnings Signed-off-by: Yuanyuan Chen <cyyever@outlook.com> * More fixes Signed-off-by: Yuanyuan Chen <cyyever@outlook.com> * Raise with an exception Signed-off-by: Yuanyuan Chen <cyyever@outlook.com> --------- Signed-off-by: Yuanyuan Chen <cyyever@outlook.com>
1 parent c01ceff commit 080d704

22 files changed

+21
-28
lines changed

examples/legacy/seq2seq/run_distributed_eval.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -252,8 +252,7 @@ def gather_results_from_each_node(num_replicas, save_dir, timeout) -> list[dict[
252252
return json_data
253253
except JSONDecodeError:
254254
continue
255-
else:
256-
raise TimeoutError("Rank 0 gave up on waiting for other processes")
255+
raise TimeoutError("Rank 0 gave up on waiting for other processes")
257256
# Unreachable
258257

259258

src/transformers/models/edgetam/configuration_edgetam.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,8 +93,6 @@ def __init__(
9393
if isinstance(backbone_config, dict):
9494
backbone_config["model_type"] = backbone_config.get("model_type", "timm_wrapper")
9595
backbone_config = CONFIG_MAPPING[backbone_config["model_type"]](**backbone_config)
96-
elif isinstance(backbone_config, AutoConfig):
97-
backbone_config = backbone_config
9896
elif backbone_config is None:
9997
backbone_config = AutoConfig.from_pretrained(
10098
"timm/repvit_m1.dist_in1k",

src/transformers/models/edgetam/modular_edgetam.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,8 +116,6 @@ def __init__(
116116
if isinstance(backbone_config, dict):
117117
backbone_config["model_type"] = backbone_config.get("model_type", "timm_wrapper")
118118
backbone_config = CONFIG_MAPPING[backbone_config["model_type"]](**backbone_config)
119-
elif isinstance(backbone_config, AutoConfig):
120-
backbone_config = backbone_config
121119
elif backbone_config is None:
122120
backbone_config = AutoConfig.from_pretrained(
123121
"timm/repvit_m1.dist_in1k",

src/transformers/models/qwen2_vl/video_processing_qwen2_vl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ def sample_frames(
162162
)
163163
max_frames = math.floor(min(max_frames, total_num_frames) / temporal_patch_size) * temporal_patch_size
164164
num_frames = total_num_frames / metadata.fps * fps
165-
num_frames = min(min(max(num_frames, min_frames), max_frames), total_num_frames)
165+
num_frames = min(max(num_frames, min_frames), max_frames, total_num_frames)
166166
num_frames = math.floor(num_frames / temporal_patch_size) * temporal_patch_size
167167

168168
if num_frames > total_num_frames:

src/transformers/models/qwen3_vl/video_processing_qwen3_vl.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def sample_frames(
164164
"Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results."
165165
)
166166
num_frames = int(total_num_frames / metadata.fps * fps)
167-
num_frames = min(min(max(num_frames, self.min_frames), self.max_frames), total_num_frames)
167+
num_frames = min(max(num_frames, self.min_frames), self.max_frames, total_num_frames)
168168

169169
if num_frames is None:
170170
num_frames = min(max(total_num_frames, self.min_frames), self.max_frames)

src/transformers/models/switch_transformers/modeling_switch_transformers.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -230,7 +230,6 @@ def __init__(self, config: SwitchTransformersConfig, is_sparse=False):
230230
def forward(self, hidden_states, **kwargs):
231231
forwarded_states = self.layer_norm(hidden_states)
232232
forwarded_states = self.mlp(forwarded_states)
233-
forwarded_states = forwarded_states
234233
output = hidden_states + self.dropout(forwarded_states)
235234
return output
236235

src/transformers/models/switch_transformers/modular_switch_transformers.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -250,7 +250,6 @@ def __init__(self, config: SwitchTransformersConfig, is_sparse=False):
250250
def forward(self, hidden_states, **kwargs):
251251
forwarded_states = self.layer_norm(hidden_states)
252252
forwarded_states = self.mlp(forwarded_states)
253-
forwarded_states = forwarded_states
254253
output = hidden_states + self.dropout(forwarded_states)
255254
return output
256255

src/transformers/models/video_llama_3/video_processing_video_llama_3.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ def sample_frames(
163163
)
164164
max_frames = math.floor(min(max_frames, total_num_frames) / temporal_patch_size) * temporal_patch_size
165165
num_frames = total_num_frames / metadata.fps * fps
166-
num_frames = min(min(max(num_frames, min_frames), max_frames), total_num_frames)
166+
num_frames = min(max(num_frames, min_frames), max_frames, total_num_frames)
167167
num_frames = math.floor(num_frames / temporal_patch_size) * temporal_patch_size
168168

169169
if num_frames > total_num_frames:

src/transformers/pipelines/question_answering.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -86,10 +86,10 @@ def decode_spans(
8686

8787

8888
def select_starts_ends(
89-
start,
90-
end,
91-
p_mask,
92-
attention_mask,
89+
start: np.ndarray,
90+
end: np.ndarray,
91+
p_mask: np.ndarray,
92+
attention_mask: np.ndarray,
9393
min_null_score=1000000,
9494
top_k=1,
9595
handle_impossible_answer=False,

tests/models/bart/test_modeling_bart.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -514,7 +514,7 @@ def assert_tensors_close(a, b, atol=1e-12, prefix=""):
514514
try:
515515
if torch.allclose(a, b, atol=atol):
516516
return True
517-
raise
517+
raise Exception
518518
except Exception:
519519
pct_different = (torch.gt((a - b).abs(), atol)).float().mean().item()
520520
if a.numel() > 100:

0 commit comments

Comments
 (0)