Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions examples/legacy/pytorch-lightning/run_ner.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,12 +72,12 @@ def prepare_data(self):
self.labels,
args.max_seq_length,
self.tokenizer,
cls_token_at_end=bool(self.config.model_type in ["xlnet"]),
cls_token_at_end=bool(self.config.model_type == "xlnet"),
cls_token=self.tokenizer.cls_token,
cls_token_segment_id=2 if self.config.model_type in ["xlnet"] else 0,
cls_token_segment_id=2 if self.config.model_type == "xlnet" else 0,
sep_token=self.tokenizer.sep_token,
sep_token_extra=False,
pad_on_left=bool(self.config.model_type in ["xlnet"]),
pad_on_left=bool(self.config.model_type == "xlnet"),
pad_token=self.tokenizer.pad_token_id,
pad_token_segment_id=self.tokenizer.pad_token_type_id,
pad_token_label_id=self.pad_token_label_id,
Expand Down
4 changes: 2 additions & 2 deletions examples/legacy/token-classification/utils_ner.py
Original file line number Diff line number Diff line change
Expand Up @@ -246,10 +246,10 @@ def __init__(
labels,
max_seq_length,
tokenizer,
cls_token_at_end=bool(model_type in ["xlnet"]),
cls_token_at_end=bool(model_type == "xlnet"),
# xlnet has a cls token at the end
cls_token=tokenizer.cls_token,
cls_token_segment_id=2 if model_type in ["xlnet"] else 0,
cls_token_segment_id=2 if model_type == "xlnet" else 0,
sep_token=tokenizer.sep_token,
sep_token_extra=False,
# roberta uses an extra separator b/w pairs of sentences, cf. github.com/pytorch/fairseq/commit/1684e166e3da03f5b600dbb7855cb98ddfcd0805
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/audio_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -587,7 +587,7 @@ def window_function(
window = np.hamming(length)
elif name in ["hann", "hann_window"]:
window = np.hanning(length)
elif name in ["povey"]:
elif name == "povey":
window = np.power(np.hanning(length), 0.85)
else:
raise ValueError(f"Unknown window function '{name}'")
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/generation/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -883,7 +883,7 @@ def _prepare_decoder_input_ids_for_generation(
self.config.model_type == "vision-encoder-decoder" and "donut" in self.config.encoder.model_type.lower()
):
pass
elif self.config.model_type in ["whisper"]:
elif self.config.model_type == "whisper":
pass
# user input but doesn't start with decoder_start_token_id -> prepend decoder_start_token_id (and adjust
# decoder_attention_mask if provided)
Expand Down
6 changes: 3 additions & 3 deletions src/transformers/integrations/ggml.py
Original file line number Diff line number Diff line change
Expand Up @@ -329,11 +329,11 @@ def _gguf_parse_value(_value, data_type):
_value = int(_value[0])
elif data_type in [6, 12]:
_value = float(_value[0])
elif data_type in [7]:
elif data_type == 7:
_value = bool(_value[0])
elif data_type in [8]:
elif data_type == 8:
_value = array("B", list(_value)).tobytes().decode()
elif data_type in [9]:
elif data_type == 9:
_value = _gguf_parse_value(_value, array_data_type)
return _value

Expand Down
2 changes: 1 addition & 1 deletion src/transformers/modeling_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4001,7 +4001,7 @@ def save_pretrained(
if _is_dtensor_available and isinstance(state_dict[tensor], DTensor):
full_tensor = state_dict[tensor].full_tensor()
# to get the correctly ordered tensor we need to repack if packed
if _get_parameter_tp_plan(tensor, self._tp_plan) in ("local_packed_rowwise",):
if _get_parameter_tp_plan(tensor, self._tp_plan) == "local_packed_rowwise":
full_tensor = repack_weights(full_tensor, -1, self._tp_size, 2)
shard[tensor] = full_tensor.contiguous() # only do contiguous after it's permuted correctly
else:
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/altclip/configuration_altclip.py
Original file line number Diff line number Diff line change
Expand Up @@ -303,7 +303,7 @@ def __init__(

# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
if key in text_config and value != text_config[key] and key != "transformers_version":
# If specified in `text_config_dict`
if key in text_config_dict:
message = (
Expand Down Expand Up @@ -335,7 +335,7 @@ def __init__(

# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
if key in vision_config and value != vision_config[key] and key != "transformers_version":
# If specified in `vision_config_dict`
if key in vision_config_dict:
message = (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,7 @@ def __init__(

# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
if key in text_config and value != text_config[key] and key != "transformers_version":
# If specified in `text_config_dict`
if key in text_config_dict:
message = (
Expand Down Expand Up @@ -338,7 +338,7 @@ def __init__(

# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
if key in vision_config and value != vision_config[key] and key != "transformers_version":
# If specified in `vision_config_dict`
if key in vision_config_dict:
message = (
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/clip/configuration_clip.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ def __init__(

# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
if key in text_config and value != text_config[key] and key != "transformers_version":
# If specified in `text_config_dict`
if key in text_config_dict:
message = (
Expand Down Expand Up @@ -327,7 +327,7 @@ def __init__(

# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
if key in vision_config and value != vision_config[key] and key != "transformers_version":
# If specified in `vision_config_dict`
if key in vision_config_dict:
message = (
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/clipseg/configuration_clipseg.py
Original file line number Diff line number Diff line change
Expand Up @@ -307,7 +307,7 @@ def __init__(

# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
if key in text_config and value != text_config[key] and key != "transformers_version":
# If specified in `text_config_dict`
if key in text_config_dict:
message = (
Expand Down Expand Up @@ -339,7 +339,7 @@ def __init__(

# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
if key in vision_config and value != vision_config[key] and key != "transformers_version":
# If specified in `vision_config_dict`
if key in vision_config_dict:
message = (
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/dpt/modeling_dpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -864,7 +864,7 @@ def __init__(self, config: DPTConfig):
self.config = config

# postprocessing: only required in case of a non-hierarchical backbone (e.g. ViT, BEiT)
if config.backbone_config is not None and config.backbone_config.model_type in ["swinv2"]:
if config.backbone_config is not None and config.backbone_config.model_type == "swinv2":
self.reassemble_stage = None
else:
self.reassemble_stage = DPTReassembleStage(config)
Expand Down
12 changes: 4 additions & 8 deletions src/transformers/models/flava/configuration_flava.py
Original file line number Diff line number Diff line change
Expand Up @@ -516,7 +516,7 @@ def __init__(

# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
if key in text_config and value != text_config[key] and key != "transformers_version":
# If specified in `text_config_dict`
if key in text_config_dict:
message = (
Expand Down Expand Up @@ -548,7 +548,7 @@ def __init__(

# Give a warning if the values exist in both `_image_config_dict` and `image_config` but being different.
for key, value in _image_config_dict.items():
if key in image_config and value != image_config[key] and key not in ["transformers_version"]:
if key in image_config and value != image_config[key] and key != "transformers_version":
# If specified in `image_config_dict`
if key in image_config_dict:
message = (
Expand Down Expand Up @@ -576,11 +576,7 @@ def __init__(
# Give a warning if the values exist in both `_multimodal_config_dict` and `multimodal_config` but being
# different.
for key, value in _multimodal_config_dict.items():
if (
key in multimodal_config
and value != multimodal_config[key]
and key not in ["transformers_version"]
):
if key in multimodal_config and value != multimodal_config[key] and key != "transformers_version":
# If specified in `multimodal_config_dict`
if key in multimodal_config_dict:
message = (
Expand Down Expand Up @@ -611,7 +607,7 @@ def __init__(
if (
key in image_codebook_config
and value != image_codebook_config[key]
and key not in ["transformers_version"]
and key != "transformers_version"
):
# If specified in `image_codebook_config_dict`
if key in image_codebook_config_dict:
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/groupvit/configuration_groupvit.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ def __init__(

# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
if key in text_config and value != text_config[key] and key != "transformers_version":
# If specified in `text_config_dict`
if key in text_config_dict:
message = (
Expand Down Expand Up @@ -320,7 +320,7 @@ def __init__(

# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
if key in vision_config and value != vision_config[key] and key != "transformers_version":
# If specified in `vision_config_dict`
if key in vision_config_dict:
message = (
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -60,15 +60,18 @@ def load_tf_weights_in_imagegpt(model, config, imagegpt_checkpoint_path):

# adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
# which are not required for using pretrained model
if any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
) or name[-1] in ["_step"]:
if (
any(
n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
for n in name
)
or name[-1] == "_step"
):
logger.info("Skipping {}".format("/".join(name)))
continue

pointer = model
if name[-1] not in ["wtet"]:
if name[-1] != "wtet":
pointer = getattr(pointer, "transformer")

for m_name in name:
Expand Down
8 changes: 2 additions & 6 deletions src/transformers/models/kosmos2_5/modeling_kosmos2_5.py
Original file line number Diff line number Diff line change
Expand Up @@ -273,9 +273,7 @@ class Kosmos2_5ModelOutput(ModelOutput):
vision_model_output: BaseModelOutputWithPooling = None

def to_tuple(self) -> tuple[Any]:
return tuple(
(self[k] if k not in ["vision_model_output"] else getattr(self, k).to_tuple()) for k in self.keys()
)
return tuple((self[k] if k != "vision_model_output" else getattr(self, k).to_tuple()) for k in self.keys())


@dataclass
Expand Down Expand Up @@ -333,9 +331,7 @@ class Kosmos2_5ForConditionalGenerationModelOutput(ModelOutput):
vision_model_output: BaseModelOutputWithPooling = None

def to_tuple(self) -> tuple[Any]:
return tuple(
(self[k] if k not in ["vision_model_output"] else getattr(self, k).to_tuple()) for k in self.keys()
)
return tuple((self[k] if k != "vision_model_output" else getattr(self, k).to_tuple()) for k in self.keys())


# Copied from transformers.models.pix2struct.modeling_pix2struct.Pix2StructLayerNorm with Pix2Struct->Kosmos2_5
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/llama/convert_llama_weights_to_hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -398,7 +398,7 @@ def permute(w, n_heads, dim1=dim, dim2=dim):
max_position_embeddings=max_position_embeddings,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=llama_version in ["3.2"],
tie_word_embeddings=llama_version == "3.2",
)

config.save_pretrained(tmp_model_path)
Expand Down Expand Up @@ -451,7 +451,7 @@ def __init__(self, vocab_file, special_tokens=None, instruct=False, llama_versio
# Prevents a null chat_template, which triggers
# a parsing warning in the Hub.
additional_kwargs = {}
if instruct or llama_version in ["Guard-3"]:
if instruct or llama_version == "Guard-3":
model_id, revision = templates_for_version.get(llama_version, (None, None))
if model_id is not None:
from transformers import AutoTokenizer
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -277,7 +277,7 @@ def __init__(

# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
if key in text_config and value != text_config[key] and key != "transformers_version":
# If specified in `text_config_dict`
if key in text_config_dict:
message = (
Expand Down Expand Up @@ -309,7 +309,7 @@ def __init__(

# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
if key in vision_config and value != vision_config[key] and key != "transformers_version":
# If specified in `vision_config_dict`
if key in vision_config_dict:
message = (
Expand Down
4 changes: 2 additions & 2 deletions src/transformers/models/perceiver/modeling_perceiver.py
Original file line number Diff line number Diff line change
Expand Up @@ -2804,7 +2804,7 @@ class PerceiverAudioPostprocessor(nn.Module):
def __init__(self, config: PerceiverConfig, in_channels: int, postproc_type: str = "patches") -> None:
super().__init__()

if postproc_type not in ("patches",): # to be supported: 'conv', 'patches', 'pixels'
if postproc_type != "patches": # to be supported: 'conv', 'patches', 'pixels'
raise ValueError("Invalid postproc_type!")

# Architecture parameters:
Expand Down Expand Up @@ -3137,7 +3137,7 @@ def __init__(
super().__init__()
self.config = config

if prep_type not in ("patches",):
if prep_type != "patches":
raise ValueError(f"Prep_type {prep_type} is invalid, can only be 'patches'.")

if concat_or_add_pos not in ["concat", "add"]:
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/phi3/configuration_phi3.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ def _rope_scaling_validation(self):
rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_short_factor = self.rope_scaling.get("short_factor", None)
rope_scaling_long_factor = self.rope_scaling.get("long_factor", None)
if rope_scaling_type is None or rope_scaling_type not in ["longrope"]:
if rope_scaling_type is None or rope_scaling_type != "longrope":
raise ValueError(f"`rope_scaling`'s type field must be one of ['longrope'], got {rope_scaling_type}")
if not (
isinstance(rope_scaling_short_factor, list)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -452,7 +452,7 @@ def _rope_scaling_validation(self):
rope_scaling_type = self.rope_scaling.get("type", None)
rope_scaling_short_factor = self.rope_scaling.get("short_factor", None)
rope_scaling_long_factor = self.rope_scaling.get("long_factor", None)
if rope_scaling_type is None or rope_scaling_type not in ["longrope"]:
if rope_scaling_type is None or rope_scaling_type != "longrope":
raise ValueError(f"`rope_scaling`'s type field must be one of ['longrope'], got {rope_scaling_type}")
if not (
isinstance(rope_scaling_short_factor, list)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3848,7 +3848,7 @@ def generate(
self.config.talker_config.text_config.vocab_size - 1024,
self.config.talker_config.text_config.vocab_size,
)
if i not in (self.config.talker_config.codec_eos_token_id,)
if i != self.config.talker_config.codec_eos_token_id
] # Suppress additional special tokens, should not be predicted
talker_kwargs = {
"max_new_tokens": talker_max_new_tokens,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2425,7 +2425,7 @@ def generate(
self.config.talker_config.text_config.vocab_size - 1024,
self.config.talker_config.text_config.vocab_size,
)
if i not in (self.config.talker_config.codec_eos_token_id,)
if i != self.config.talker_config.codec_eos_token_id
] # Suppress additional special tokens, should not be predicted
talker_kwargs = {
"max_new_tokens": talker_max_new_tokens,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def load_tf_weights_in_tapas(model, config, tf_checkpoint_path):
continue
# in case the model is TapasForMaskedLM, we skip the pooler
if isinstance(model, TapasForMaskedLM):
if any(n in ["pooler"] for n in name):
if any(n == "pooler" for n in name):
logger.info(f"Skipping {'/'.join(name)}")
continue
# if first scope name starts with "bert", change it to "tapas"
Expand Down
Loading