@@ -11,7 +11,6 @@ use async_openai::types::{ChatCompletionRequestUserMessageArgs, CreateChatComple
1111use colored:: Colorize ;
1212
1313use crate :: profile;
14- // use crate::config::format_prompt; // Temporarily comment out
1514use crate :: config:: AppConfig ;
1615
1716// Cached tokenizer for performance
@@ -22,8 +21,7 @@ const MODEL_GPT4: &str = "gpt-4";
2221const MODEL_GPT4_OPTIMIZED : & str = "gpt-4o" ;
2322const MODEL_GPT4_MINI : & str = "gpt-4o-mini" ;
2423const MODEL_GPT4_1 : & str = "gpt-4.1" ;
25- // TODO: Get this from config.rs or a shared constants module
26- const DEFAULT_MODEL_NAME : & str = "gpt-4.1" ;
24+ const DEFAULT_MODEL_NAME : & str = crate :: config:: DEFAULT_MODEL ;
2725
2826/// Represents the available AI models for commit message generation.
2927/// Each model has different capabilities and token limits.
@@ -211,17 +209,19 @@ impl From<String> for Model {
211209 }
212210}
213211
214- fn get_tokenizer ( _model_str : & str ) -> CoreBPE {
215- // TODO: This should be based on the model string, but for now we'll just use cl100k_base
216- // which is used by gpt-3.5-turbo and gpt-4
217- tiktoken_rs:: cl100k_base ( ) . expect ( "Failed to create tokenizer" )
212+ fn get_tokenizer ( model_str : & str ) -> CoreBPE {
213+ match model_str {
214+ "gpt-4" | "gpt-4o" | "gpt-4o-mini" | "gpt-4.1" => {
215+ tiktoken_rs:: cl100k_base ( )
216+ }
217+ _ => tiktoken_rs:: cl100k_base ( ) // fallback
218+ } . expect ( "Failed to create tokenizer" )
218219}
219220
220221pub async fn run ( settings : AppConfig , content : String ) -> Result < String > {
221222 let model_str = settings. model . as_deref ( ) . unwrap_or ( DEFAULT_MODEL_NAME ) ;
222223
223224 let client = async_openai:: Client :: new ( ) ;
224- // let prompt = format_prompt(&content, &settings.prompt(), settings.template())?; // Temporarily comment out
225225 let prompt = content; // Use raw content as prompt for now
226226 let model: Model = settings
227227 . model
@@ -239,15 +239,13 @@ pub async fn run(settings: AppConfig, content: String) -> Result<String> {
239239 ) ;
240240 }
241241
242- // TODO: Make temperature configurable
243- let temperature_value = 0.7 ;
242+ let temperature_value = settings. temperature . unwrap_or ( crate :: config:: DEFAULT_TEMPERATURE ) ;
244243
245244 log:: info!(
246245 "Using model: {}, Tokens: {}, Max tokens: {}, Temperature: {}" ,
247246 model_str. yellow( ) ,
248247 tokens. to_string( ) . green( ) ,
249- // TODO: Make max_tokens configurable
250- ( model. context_size( ) - tokens) . to_string( ) . green( ) ,
248+ ( settings. max_tokens. unwrap_or( model. context_size( ) - tokens) ) . to_string( ) . green( ) ,
251249 temperature_value. to_string( ) . blue( ) // Use temperature_value
252250 ) ;
253251
@@ -257,9 +255,8 @@ pub async fn run(settings: AppConfig, content: String) -> Result<String> {
257255 . content ( prompt)
258256 . build ( ) ?
259257 . into ( ) ] )
260- . temperature ( temperature_value) // Use temperature_value
261- // TODO: Make max_tokens configurable
262- . max_tokens ( ( model. context_size ( ) - tokens) as u16 )
258+ . temperature ( temperature_value as f32 ) // Use temperature_value
259+ . max_tokens ( settings. max_tokens . unwrap_or ( model. context_size ( ) - tokens) as u16 )
263260 . build ( ) ?;
264261
265262 profile ! ( "OpenAI API call" ) ;
0 commit comments