diff --git a/docs/source/en/model_doc/t5.mdx b/docs/source/en/model_doc/t5.mdx index c312b3df8157..8034fc010a1c 100644 --- a/docs/source/en/model_doc/t5.mdx +++ b/docs/source/en/model_doc/t5.mdx @@ -252,10 +252,9 @@ The example above only shows a single example. You can also do batched inference >>> model = T5ForConditionalGeneration.from_pretrained("t5-small") >>> task_prefix = "translate English to German: " ->>> sentences = [ -... "The house is wonderful.", -... "I like to work in NYC.", ->>> ] # use different length sentences to test batching +>>> # use different length sentences to test batching +>>> sentences = ["The house is wonderful.", "I like to work in NYC."] + >>> inputs = tokenizer([task_prefix + sentence for sentence in sentences], return_tensors="pt", padding=True) >>> output_sequences = model.generate( diff --git a/src/transformers/models/beit/modeling_beit.py b/src/transformers/models/beit/modeling_beit.py index 73eaf26b7ee3..1f5fc44dc120 100755 --- a/src/transformers/models/beit/modeling_beit.py +++ b/src/transformers/models/beit/modeling_beit.py @@ -1210,14 +1210,14 @@ def forward( Examples: ```python - >>> from transformers import BeitFeatureExtractor, BeitForSemanticSegmentation + >>> from transformers import AutoFeatureExtractor, BeitForSemanticSegmentation >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) - >>> feature_extractor = BeitFeatureExtractor.from_pretrained("microsoft/beit-base-finetuned-ade-640-640") + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("microsoft/beit-base-finetuned-ade-640-640") >>> model = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640") >>> inputs = feature_extractor(images=image, return_tensors="pt") diff --git a/src/transformers/models/data2vec/modeling_data2vec_vision.py b/src/transformers/models/data2vec/modeling_data2vec_vision.py index 3e3d4cc4f341..0e286a773d31 100644 --- a/src/transformers/models/data2vec/modeling_data2vec_vision.py +++ b/src/transformers/models/data2vec/modeling_data2vec_vision.py @@ -1140,14 +1140,14 @@ def forward( Examples: ```python - >>> from transformers import Data2VecVisionFeatureExtractor, Data2VecVisionForSemanticSegmentation + >>> from transformers import AutoFeatureExtractor, Data2VecVisionForSemanticSegmentation >>> from PIL import Image >>> import requests >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> image = Image.open(requests.get(url, stream=True).raw) - >>> feature_extractor = Data2VecVisionFeatureExtractor.from_pretrained("facebook/data2vec-vision-base") + >>> feature_extractor = AutoFeatureExtractor.from_pretrained("facebook/data2vec-vision-base") >>> model = Data2VecVisionForSemanticSegmentation.from_pretrained("facebook/data2vec-vision-base") >>> inputs = feature_extractor(images=image, return_tensors="pt")