Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/pytorch/language-modeling/run_clm_no_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def parse_args():
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
help="If passed, will use a slow tokenizer (not backed by the Hugging Face Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
Expand Down
2 changes: 1 addition & 1 deletion examples/pytorch/language-modeling/run_fim_no_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ def parse_args():
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
help="If passed, will use a slow tokenizer (not backed by the Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
Expand Down
2 changes: 1 addition & 1 deletion examples/pytorch/language-modeling/run_mlm_no_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ def parse_args():
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
help="If passed, will use a slow tokenizer (not backed by the Hugging Face Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
Expand Down
2 changes: 1 addition & 1 deletion examples/pytorch/multiple-choice/run_swag_no_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def parse_args():
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
help="If passed, will use a slow tokenizer (not backed by the HuggingFace Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
Expand Down
2 changes: 1 addition & 1 deletion examples/pytorch/question-answering/run_qa_no_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -151,7 +151,7 @@ def parse_args():
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
help="If passed, will use a slow tokenizer (not backed by the Hugging Face Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -223,7 +223,7 @@ def parse_args():
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
help="If passed, will use a slow tokenizer (not backed by the Hugging Face Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def parse_args():
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
help="If passed, will use a slow tokenizer (not backed by the Hugging Face Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
Expand Down
2 changes: 1 addition & 1 deletion examples/pytorch/translation/run_translation_no_trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ def parse_args():
parser.add_argument(
"--use_slow_tokenizer",
action="store_true",
help="If passed, will use a slow tokenizer (not backed by the 🤗 Tokenizers library).",
help="If passed, will use a slow tokenizer (not backed by the Hugging Face Tokenizers library).",
)
parser.add_argument(
"--per_device_train_batch_size",
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/data/datasets/glue.py
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,7 @@ def __init__(
cache_dir: Optional[str] = None,
):
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"This dataset will be removed from the library soon, preprocessing should be handled with the Hugging Face Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py",
FutureWarning,
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/data/metrics/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@


DEPRECATION_WARNING = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"This metric will be removed from the library soon, metrics should be handled with the Hugging Face Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/data/processors/glue.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@
logger = logging.get_logger(__name__)

DEPRECATION_WARNING = (
"This {0} will be removed from the library soon, preprocessing should be handled with the 🤗 Datasets "
"This {0} will be removed from the library soon, preprocessing should be handled with the Hugging Face Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/integrations/integration_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@


if os.getenv("WANDB_MODE") == "offline":
print("⚙️ Running in WANDB offline mode")
print("[INFO] Running in WANDB offline mode")

from .. import PreTrainedModel, TrainingArguments
from .. import __version__ as version
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -272,7 +272,9 @@ def convert_audio_spectrogram_transformer_checkpoint(model_name, pytorch_dump_fo
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)

args = parser.parse_args()
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/bros/convert_bros_to_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def convert_bros_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_h
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
help="Whether or not to push the converted model and processor to the Hugging Face hub.",
)

args = parser.parse_args()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -257,7 +257,9 @@ def convert_clipseg_checkpoint(model_name, checkpoint_path, pytorch_dump_folder_
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)

args = parser.parse_args()
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/dac/convert_dac_checkpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -274,7 +274,7 @@ def convert_checkpoint(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the Hugging Face hub."
)
parser.add_argument("--sample_rate", default=None, type=str, help="Sample rate used by DacFeatureExtractor")
args = parser.parse_args()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ def load_data2vec(path):
max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
success = torch.allclose(our_output, their_output, atol=1e-3)
print("Do both models output the same tensors?", "🔥" if success else "💩")
print("Do both models output the same tensors?", "[PASS]" if success else "[FAIL]")
if not success:
raise Exception("Something went wRoNg")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ def convert_data2vec_checkpoint_to_pytorch(
max_absolute_diff = torch.max(torch.abs(our_output - their_output)).item()
print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-7
success = torch.allclose(our_output, their_output, atol=1e-3)
print("Do both models output the same tensors?", "🔥" if success else "💩")
print("Do both models output the same tensors?", "[PASS]" if success else "[FAIL]")
if not success:
raise Exception("Something went wRoNg")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ def main():

print(f"max_absolute_diff = {max_absolute_diff}")
success = torch.allclose(hf_output, orig_model_output, atol=1e-3)
print("Do both models output the same tensors?", "🔥" if success else "💩")
print("Do both models output the same tensors?", "[PASS]" if success else "[FAIL]")
if not success:
raise Exception("Something went wRoNg")

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,9 @@ def convert_deformable_detr_checkpoint(
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
args = parser.parse_args()
convert_deformable_detr_checkpoint(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -299,9 +299,9 @@ def check_and_map_params(hf_param, gluon_param):
success = np.allclose(gluon_layer, hf_layer, atol=1e-3)

if success:
print("✔️ Both model do output the same tensors")
print("[SUCCESS] Both models do output the same tensors")
else:
print(" Both model do **NOT** output the same tensors")
print("[FAIL] Both models do **NOT** output the same tensors")
print("Absolute difference is:", max_absolute_diff)


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -313,7 +313,9 @@ def convert_deta_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub):
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
args = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,9 @@ def convert_deta_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub):
help="Path to the folder to output PyTorch model.",
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
args = parser.parse_args()
convert_deta_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
4 changes: 3 additions & 1 deletion src/transformers/models/dinov2/convert_dinov2_to_hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,7 +278,9 @@ def convert_dinov2_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)

args = parser.parse_args()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -284,7 +284,9 @@ def convert_dinov2_with_registers_checkpoint(model_name, pytorch_dump_folder_pat
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)

args = parser.parse_args()
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/donut/convert_donut_to_pytorch.py
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ def convert_donut_checkpoint(model_name, pytorch_dump_folder_path=None, push_to_
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub.",
help="Whether or not to push the converted model and processor to the Hugging Face hub.",
)

args = parser.parse_args()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,7 @@ def convert_checkpoint(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the Hugging Face hub."
)

args = parser.parse_args()
Expand Down
6 changes: 3 additions & 3 deletions src/transformers/models/esm/convert_esm.py
Original file line number Diff line number Diff line change
Expand Up @@ -316,7 +316,7 @@ def convert_esm_checkpoint_to_pytorch(
hf_tokens = hf_tokenizer([row[1] for row in sample_data], return_tensors="pt", padding=True)
success = torch.all(hf_tokens["input_ids"] == batch_tokens)

print("Do both models tokenizers output the same tokens?", "🔥" if success else "💩")
print("Do both models tokenizers output the same tokens?", "[PASS]" if success else "[FAIL]")
if not success:
raise Exception("Tokenization does not match!")

Expand Down Expand Up @@ -348,7 +348,7 @@ def convert_esm_checkpoint_to_pytorch(
success = torch.allclose(our_output, their_output, atol=1e-5)

print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-5
print("Do both models output the same tensors?", "🔥" if success else "💩")
print("Do both models output the same tensors?", "[PASS]" if success else "[FAIL]")

if not success:
raise Exception("Something went wRoNg")
Expand All @@ -362,7 +362,7 @@ def convert_esm_checkpoint_to_pytorch(

print("Contact prediction testing:")
print(f"max_absolute_diff = {max_absolute_diff}") # ~ 1e-5
print("Do both models output the same tensors?", "🔥" if success else "💩")
print("Do both models output the same tensors?", "[PASS]" if success else "[FAIL]")

if not success:
raise Exception("Something went wRoNg")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ def convert_FastSpeech2ConformerModel_checkpoint(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the Hugging Face hub."
)

args = parser.parse_args()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ def convert_hifigan_checkpoint(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the Hugging Face hub."
)

args = parser.parse_args()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def convert_FastSpeech2ConformerWithHifiGan_checkpoint(
help="Path to the output `FastSpeech2ConformerModel` PyTorch model.",
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the Hugging Face hub."
)

args = parser.parse_args()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -250,7 +250,9 @@ def main():
)

parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
args = parser.parse_args()
write_tokenizer(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -481,7 +481,9 @@ def convert_grounding_dino_checkpoint(args):
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
parser.add_argument(
"--verify_logits", action="store_false", help="Whether or not to verify logits after conversion."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ def convert_groupvit_checkpoint(
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
help="Whether or not to push the converted model and processor to the Hugging Face hub using the provided `model_name`.",
)
args = parser.parse_args()

Expand Down
4 changes: 3 additions & 1 deletion src/transformers/models/hiera/convert_hiera_to_hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -353,7 +353,9 @@ def convert_hiera_checkpoint(args):
help="Whether or not to verify the logits against the original implementation.",
)
parser.add_argument(
"--push-to-hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
"--push-to-hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
parser.add_argument(
"--base-model",
Expand Down
2 changes: 1 addition & 1 deletion src/transformers/models/ijepa/convert_ijepa_to_hf.py
Original file line number Diff line number Diff line change
Expand Up @@ -253,7 +253,7 @@ def main():
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the model to the 🤗 Hub.",
help="Whether or not to push the model to the Hugging Face Hub.",
)
parser.add_argument(
"--verify_logits", action="store_false", help="Whether or not to verify logits after conversion."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -433,7 +433,9 @@ def main():
)

parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
args = parser.parse_args()
write_tokenizer(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,9 @@ def convert_llava_to_hf(model_id, pytorch_dump_folder_path, push_to_hub=False):
"--pytorch_dump_folder_path", type=str, required=True, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
args = parser.parse_args()

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,9 @@ def convert_llava_to_hf(model_id, pytorch_dump_folder_path, push_to_hub=False):
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model to the Hugging Face hub.",
)
args = parser.parse_args()

Expand Down
Loading