@@ -1367,37 +1367,51 @@ async def test_long_seed(client: openai.AsyncOpenAI):
1367
1367
or "less_than_equal" in exc_info .value .message )
1368
1368
1369
1369
1370
+ @pytest .mark .asyncio
1370
1371
@pytest .mark .parametrize (
1371
1372
"model_name" ,
1372
1373
[MODEL_NAME ],
1373
1374
)
1374
1375
async def test_tokenize (server , client : openai .AsyncOpenAI , model_name : str ):
1376
+ base_url = str (client .base_url )[:- 3 ]
1375
1377
tokenizer = get_tokenizer (tokenizer_name = MODEL_NAME , tokenizer_mode = "fast" )
1376
1378
1377
1379
for add_special in [False , True ]:
1378
1380
prompt = "This is a test prompt."
1379
1381
tokens = tokenizer .encode (prompt , add_special_tokens = add_special )
1380
1382
1381
- response = requests .post ("http://localhost:8000 /tokenize" ,
1383
+ response = requests .post (base_url + " /tokenize" ,
1382
1384
json = {
1383
1385
"add_special_tokens" : add_special ,
1386
+ "model" : model_name ,
1384
1387
"prompt" : prompt
1385
1388
})
1386
- assert response .json () == {"tokens" : tokens }
1389
+ response .raise_for_status ()
1390
+ assert response .json () == {
1391
+ "tokens" : tokens ,
1392
+ "count" : len (tokens ),
1393
+ "max_model_len" : 8192
1394
+ }
1387
1395
1388
1396
1397
+ @pytest .mark .asyncio
1389
1398
@pytest .mark .parametrize (
1390
1399
"model_name" ,
1391
1400
[MODEL_NAME ],
1392
1401
)
1393
1402
async def test_detokenize (server , client : openai .AsyncOpenAI , model_name : str ):
1394
- tokenizer = get_tokenizer (tokenizer_name = MODEL_NAME )
1403
+ base_url = str (client .base_url )[:- 3 ]
1404
+ tokenizer = get_tokenizer (tokenizer_name = MODEL_NAME , tokenizer_mode = "fast" )
1395
1405
1396
1406
prompt = "This is a test prompt."
1397
1407
tokens = tokenizer .encode (prompt , add_special_tokens = False )
1398
1408
1399
- response = requests .post ("http://localhost:8000/detokenize" ,
1400
- json = {"tokens" : tokens })
1409
+ response = requests .post (base_url + "detokenize" ,
1410
+ json = {
1411
+ "model" : model_name ,
1412
+ "tokens" : tokens
1413
+ })
1414
+ response .raise_for_status ()
1401
1415
assert response .json () == {"prompt" : prompt }
1402
1416
1403
1417
0 commit comments