@@ -355,24 +355,6 @@ static json oaicompat_completion_params_parse(
355
355
356
356
llama_params[" __oaicompat" ] = true ;
357
357
358
- // Map OpenAI parameters to llama.cpp parameters
359
- //
360
- // For parameters that are defined by the OpenAI documentation (e.g.
361
- // temperature), we explicitly specify OpenAI's intended default; we
362
- // need to do that because sometimes OpenAI disagrees with llama.cpp
363
- //
364
- // https://platform.openai.com/docs/api-reference/chat/create
365
- llama_sampling_params default_sparams;
366
- llama_params[" model" ] = json_value (body, " model" , std::string (" unknown" ));
367
- llama_params[" frequency_penalty" ] = json_value (body, " frequency_penalty" , 0.0 );
368
- llama_params[" logit_bias" ] = json_value (body, " logit_bias" , json::object ());
369
- llama_params[" n_predict" ] = json_value (body, " max_tokens" , -1 );
370
- llama_params[" presence_penalty" ] = json_value (body, " presence_penalty" , 0.0 );
371
- llama_params[" seed" ] = json_value (body, " seed" , LLAMA_DEFAULT_SEED);
372
- llama_params[" stream" ] = json_value (body, " stream" , false );
373
- llama_params[" temperature" ] = json_value (body, " temperature" , 1.0 );
374
- llama_params[" top_p" ] = json_value (body, " top_p" , 1.0 );
375
-
376
358
// Apply chat template to the list of messages
377
359
llama_params[" prompt" ] = format_chat (model, chat_template, body.at (" messages" ));
378
360
0 commit comments