@@ -1916,14 +1916,14 @@ def test_large_generation_multilingual(self):
19161916 input_features , do_sample = False , max_length = 20 , language = "<|de|>" , task = "transcribe"
19171917 )
19181918 transcript = processor .batch_decode (generated_ids , skip_special_tokens = True )[0 ]
1919- EXPECTED_TRANSCRIPT = " Mein sechster Sohn scheint, wenigstens auf den ersten Blick, "
1919+ EXPECTED_TRANSCRIPT = " Denken Sie, soeben walten meine Gedanken bei Ihnen in Adela "
19201920 self .assertEqual (transcript , EXPECTED_TRANSCRIPT )
19211921
19221922 generated_ids = model .generate (
19231923 input_features , do_sample = False , max_length = 20 , language = "<|de|>" , task = "translate"
19241924 )
19251925 transcript = processor .batch_decode (generated_ids , skip_special_tokens = True )[0 ]
1926- EXPECTED_TRANSCRIPT = " My sixth son seems, at least at first glance, the most deeply-minded "
1926+ EXPECTED_TRANSCRIPT = " Think, my thoughts were just rolling with you in Adelaide, and I "
19271927 self .assertEqual (transcript , EXPECTED_TRANSCRIPT )
19281928
19291929 @slow
@@ -2238,7 +2238,7 @@ def test_tiny_token_timestamp_generation(self):
22382238 input_features , max_length = 448 , return_timestamps = True , return_token_timestamps = True
22392239 )
22402240
2241- self .assertEqual (generate_outputs . sequences .shape , generate_outputs . token_timestamps .shape )
2241+ self .assertEqual (generate_outputs [ " sequences" ] .shape , generate_outputs [ " token_timestamps" ] .shape )
22422242
22432243 # fmt: off
22442244 EXPECTED_OUTPUT = torch .tensor ([
@@ -2249,7 +2249,7 @@ def test_tiny_token_timestamp_generation(self):
22492249 ])
22502250 # fmt: on
22512251
2252- self .assertTrue (torch .allclose (generate_outputs . token_timestamps .to ("cpu" ), EXPECTED_OUTPUT ))
2252+ self .assertTrue (torch .allclose (generate_outputs [ " token_timestamps" ] .to ("cpu" ), EXPECTED_OUTPUT ))
22532253
22542254 @slow
22552255 def test_large_token_timestamp_generation (self ):
@@ -2268,7 +2268,7 @@ def test_large_token_timestamp_generation(self):
22682268 ** input_features , max_length = 448 , return_timestamps = True , return_token_timestamps = True
22692269 )
22702270
2271- self .assertEqual (generate_outputs . sequences .shape , generate_outputs . token_timestamps .shape )
2271+ self .assertEqual (generate_outputs [ " sequences" ] .shape , generate_outputs [ " token_timestamps" ] .shape )
22722272
22732273 # fmt: off
22742274 EXPECTED_OUTPUT = torch .tensor ([
@@ -2279,7 +2279,7 @@ def test_large_token_timestamp_generation(self):
22792279 ])
22802280 # fmt: on
22812281
2282- self .assertTrue (torch .allclose (generate_outputs . token_timestamps .to ("cpu" ), EXPECTED_OUTPUT ))
2282+ self .assertTrue (torch .allclose (generate_outputs [ " token_timestamps" ] .to ("cpu" ), EXPECTED_OUTPUT ))
22832283
22842284 @slow
22852285 def test_tiny_token_timestamp_batch_generation (self ):
@@ -2306,9 +2306,9 @@ def test_tiny_token_timestamp_batch_generation(self):
23062306 )
23072307
23082308 # task id and lang id prompts should not have timestamp tokens
2309- self .assertEqual (generate_outputs . sequences .shape [- 1 ] - 2 , generate_outputs . token_timestamps .shape [- 1 ])
2309+ self .assertEqual (generate_outputs [ " sequences" ] .shape [- 1 ] - 2 , generate_outputs [ " token_timestamps" ] .shape [- 1 ])
23102310
2311- self .assertEqual (len (generate_outputs . sequences ), num_return_sequences * num_samples )
2311+ self .assertEqual (len (generate_outputs [ " sequences" ] ), num_return_sequences * num_samples )
23122312
23132313 @slow
23142314 def test_tiny_token_timestamp_generation_longform (self ):
@@ -2799,7 +2799,7 @@ def test_whisper_shortform_single_batch_prev_cond(self):
27992799
28002800 torch .manual_seed (0 )
28012801 result = model .generate (input_features , ** gen_kwargs )
2802- decoded = processor .batch_decode (result . sequences , skip_special_tokens = True )
2802+ decoded = processor .batch_decode (result , skip_special_tokens = True )
28032803
28042804 assert decoded == EXPECTED_TEXT
28052805
@@ -2814,7 +2814,7 @@ def test_whisper_shortform_single_batch_prev_cond(self):
28142814
28152815 torch .manual_seed (0 )
28162816 result = model .generate (input_features , ** gen_kwargs )
2817- decoded = processor .batch_decode (result . sequences , skip_special_tokens = True )
2817+ decoded = processor .batch_decode (result , skip_special_tokens = True )
28182818
28192819 assert decoded == EXPECTED_TEXT1
28202820
@@ -3114,7 +3114,7 @@ def test_whisper_shortform_multi_batch_hard_prev_cond(self):
31143114 }
31153115
31163116 result = model .generate (** inputs , ** gen_kwargs )
3117- decoded_all = processor .batch_decode (result . sequences , skip_special_tokens = True )
3117+ decoded_all = processor .batch_decode (result , skip_special_tokens = True )
31183118
31193119 for i in range (num_samples ):
31203120 if isinstance (EXPECTED_TEXT [i ], str ):
0 commit comments