@@ -17,26 +17,256 @@ pub mod candidates;
1717pub  mod  local; 
1818
1919// Re-export commonly used types and functions 
20- pub  use  analysis:: { FileAnalysis ,  analyze_file,  analyze_file_via_api,   ParsedFile } ; 
20+ pub  use  analysis:: { FileAnalysis ,  analyze_file,  analyze_file_via_api} ; 
2121pub  use  scoring:: { calculate_impact_scores,  ImpactScore } ; 
2222pub  use  candidates:: { generate_candidates,  select_best_candidate} ; 
2323
24+ /// Represents a parsed file from the git diff 
25+ #[ derive( Debug ) ]  
26+ pub  struct  ParsedFile  { 
27+     pub  path :  String , 
28+     pub  operation :  String , 
29+     pub  diff_content :  String , 
30+ } 
31+ 
32+ /// Parse git diff into individual files 
33+ pub  fn  parse_diff ( diff_content :  & str )  -> Result < Vec < ParsedFile > >  { 
34+     let  old_files = crate :: multi_step_integration:: parse_diff ( diff_content) ?; 
35+     Ok ( old_files. into_iter ( ) . map ( |f| ParsedFile  { 
36+         path :  f. path , 
37+         operation :  f. operation , 
38+         diff_content :  f. diff_content , 
39+     } ) . collect ( ) ) 
40+ } 
41+ 
42+ /// Call the analyze function via OpenAI 
43+ async  fn  call_analyze_function ( client :  & Client < OpenAIConfig > ,  model :  & str ,  file :  & ParsedFile )  -> Result < serde_json:: Value >  { 
44+     // Convert our ParsedFile to the old format 
45+     let  old_file = crate :: multi_step_integration:: ParsedFile  { 
46+         path :  file. path . clone ( ) , 
47+         operation :  file. operation . clone ( ) , 
48+         diff_content :  file. diff_content . clone ( ) , 
49+     } ; 
50+     crate :: multi_step_integration:: call_analyze_function ( client,  model,  & old_file) . await 
51+ } 
52+ 
53+ /// Call the score function via OpenAI   
54+ async  fn  call_score_function ( 
55+     client :  & Client < OpenAIConfig > ,  
56+     model :  & str ,  
57+     files_data :  Vec < crate :: multi_step_analysis:: FileDataForScoring > 
58+ )  -> Result < Vec < crate :: multi_step_analysis:: FileWithScore > >  { 
59+     crate :: multi_step_integration:: call_score_function ( client,  model,  files_data) . await 
60+ } 
61+ 
62+ /// Call the generate function via OpenAI 
63+ async  fn  call_generate_function ( 
64+     client :  & Client < OpenAIConfig > ,  
65+     model :  & str ,  
66+     scored_files :  Vec < crate :: multi_step_analysis:: FileWithScore > ,  
67+     max_length :  usize 
68+ )  -> Result < serde_json:: Value >  { 
69+     crate :: multi_step_integration:: call_generate_function ( client,  model,  scored_files,  max_length) . await 
70+ } 
71+ 
2472/// Main entry point for multi-step generation with API 
2573pub  async  fn  generate_with_api ( 
2674    client :  & Client < OpenAIConfig > , 
2775    model :  & str , 
2876    diff :  & str , 
2977    max_length :  Option < usize > , 
3078)  -> Result < String >  { 
31-     // This will be moved from multi_step_integration.rs generate_commit_message_multi_step 
32-     crate :: multi_step_integration:: generate_commit_message_multi_step ( client,  model,  diff,  max_length) . await 
79+     use  futures:: future:: join_all; 
80+     use  crate :: multi_step_analysis:: FileDataForScoring ; 
81+     use  crate :: debug_output; 
82+ 
83+     log:: info!( "Starting multi-step commit message generation" ) ; 
84+ 
85+     // Initialize multi-step debug session 
86+     if  let  Some ( session)  = debug_output:: debug_session ( )  { 
87+         session. init_multi_step_debug ( ) ; 
88+     } 
89+ 
90+     // Parse the diff to extract individual files 
91+     let  parsed_files = parse_diff ( diff) ?; 
92+     log:: info!( "Parsed {} files from diff" ,  parsed_files. len( ) ) ; 
93+ 
94+     // Track files parsed in debug session 
95+     if  let  Some ( session)  = debug_output:: debug_session ( )  { 
96+         session. set_total_files_parsed ( parsed_files. len ( ) ) ; 
97+     } 
98+ 
99+     // Step 1: Analyze each file individually in parallel 
100+     log:: debug!( "Analyzing {} files in parallel" ,  parsed_files. len( ) ) ; 
101+ 
102+     // Create futures for all file analyses 
103+     let  analysis_futures:  Vec < _ >  = parsed_files
104+         . iter ( ) 
105+         . map ( |file| { 
106+             let  file_path = file. path . clone ( ) ; 
107+             let  operation = file. operation . clone ( ) ; 
108+             async  move  { 
109+                 log:: debug!( "Analyzing file: {file_path}" ) ; 
110+                 let  start_time = std:: time:: Instant :: now ( ) ; 
111+                 let  payload = format ! ( "{{\" file_path\" : \" {file_path}\" , \" operation_type\" : \" {operation}\" , \" diff_content\" : \" ...\" }}" ) ; 
112+ 
113+                 let  result = call_analyze_function ( client,  model,  file) . await ; 
114+                 let  duration = start_time. elapsed ( ) ; 
115+                 ( file,  result,  duration,  payload) 
116+             } 
117+         } ) 
118+         . collect ( ) ; 
119+ 
120+     // Execute all analyses in parallel 
121+     let  analysis_results = join_all ( analysis_futures) . await ; 
122+ 
123+     // Process results and handle errors 
124+     let  mut  file_analyses = Vec :: new ( ) ; 
125+ 
126+     for  ( i,  ( file,  result,  duration,  payload) )  in  analysis_results. into_iter ( ) . enumerate ( )  { 
127+         match  result { 
128+             Ok ( analysis)  => { 
129+                 log:: debug!( "Successfully analyzed file {}: {}" ,  i,  file. path) ; 
130+ 
131+                 // Extract structured analysis data for debug 
132+                 let  analysis_result = crate :: multi_step_analysis:: FileAnalysisResult  { 
133+                     lines_added :    analysis[ "lines_added" ] . as_u64 ( ) . unwrap_or ( 0 )  as  u32 , 
134+                     lines_removed :  analysis[ "lines_removed" ] . as_u64 ( ) . unwrap_or ( 0 )  as  u32 , 
135+                     file_category :  analysis[ "file_category" ] 
136+                         . as_str ( ) 
137+                         . unwrap_or ( "source" ) 
138+                         . to_string ( ) , 
139+                     summary :        analysis[ "summary" ] . as_str ( ) . unwrap_or ( "" ) . to_string ( ) 
140+                 } ; 
141+ 
142+                 // Record in debug session 
143+                 if  let  Some ( session)  = debug_output:: debug_session ( )  { 
144+                     session. add_file_analysis_debug ( file. path . clone ( ) ,  file. operation . clone ( ) ,  analysis_result. clone ( ) ,  duration,  payload) ; 
145+                 } 
146+ 
147+                 file_analyses. push ( ( file,  analysis) ) ; 
148+             } 
149+             Err ( e)  => { 
150+                 // Check if it's an API key error - if so, propagate it immediately 
151+                 let  error_str = e. to_string ( ) ; 
152+                 if  error_str. contains ( "invalid_api_key" )  || error_str. contains ( "Incorrect API key" )  || error_str. contains ( "Invalid API key" )  { 
153+                     return  Err ( e) ; 
154+                 } 
155+                 log:: warn!( "Failed to analyze file {}: {}" ,  file. path,  e) ; 
156+                 // Continue with other files even if one fails 
157+             } 
158+         } 
159+     } 
160+ 
161+     if  file_analyses. is_empty ( )  { 
162+         anyhow:: bail!( "Failed to analyze any files" ) ; 
163+     } 
164+ 
165+     // Step 2: Calculate impact scores 
166+     let  files_data:  Vec < FileDataForScoring >  = file_analyses
167+         . iter ( ) 
168+         . map ( |( file,  analysis) | { 
169+             FileDataForScoring  { 
170+                 file_path :       file. path . clone ( ) , 
171+                 operation_type :  file. operation . clone ( ) , 
172+                 lines_added :     analysis[ "lines_added" ] . as_u64 ( ) . unwrap_or ( 0 )  as  u32 , 
173+                 lines_removed :   analysis[ "lines_removed" ] . as_u64 ( ) . unwrap_or ( 0 )  as  u32 , 
174+                 file_category :   analysis[ "file_category" ] 
175+                     . as_str ( ) 
176+                     . unwrap_or ( "source" ) 
177+                     . to_string ( ) , 
178+                 summary :         analysis[ "summary" ] . as_str ( ) . unwrap_or ( "" ) . to_string ( ) 
179+             } 
180+         } ) 
181+         . collect ( ) ; 
182+ 
183+     log:: debug!( "Calculating impact scores for {} files" ,  files_data. len( ) ) ; 
184+     let  start_time = std:: time:: Instant :: now ( ) ; 
185+     let  scored_files = call_score_function ( client,  model,  files_data) . await ?; 
186+     let  duration = start_time. elapsed ( ) ; 
187+ 
188+     // Record scoring debug info 
189+     if  let  Some ( session)  = debug_output:: debug_session ( )  { 
190+         let  payload = format ! ( "{{\" files_count\" : {}, \" scoring_method\" : \" api\" }}" ,  scored_files. len( ) ) ; 
191+         session. set_score_debug ( scored_files. clone ( ) ,  duration,  payload) ; 
192+     } 
193+ 
194+     log:: debug!( "Successfully scored {} files" ,  scored_files. len( ) ) ; 
195+ 
196+     // Step 3: Generate commit message using the scored files 
197+     log:: debug!( "Generating commit message from scored files" ) ; 
198+     let  start_time = std:: time:: Instant :: now ( ) ; 
199+     let  commit_result = call_generate_function ( client,  model,  scored_files,  max_length. unwrap_or ( 72 ) ) . await ?; 
200+     let  duration = start_time. elapsed ( ) ; 
201+ 
202+     // Record generate debug info 
203+     if  let  Some ( session)  = debug_output:: debug_session ( )  { 
204+         session. record_timing ( "generate" ,  duration) ; 
205+     } 
206+ 
207+     // Extract the commit message from the JSON response 
208+     let  message = commit_result[ "candidates" ] 
209+         . as_array ( ) 
210+         . and_then ( |arr| arr. first ( ) ) 
211+         . and_then ( |v| v. as_str ( ) ) 
212+         . ok_or_else ( || anyhow:: anyhow!( "No commit message candidates in response" ) ) ?; 
213+ 
214+     log:: info!( "Multi-step generation completed successfully" ) ; 
215+     Ok ( message. to_string ( ) ) 
216+ } 
217+ 
218+ /// Simplified multi-step commit message generation using OpenAI directly 
219+ pub  async  fn  generate_simple ( 
220+     client :  & Client < OpenAIConfig > , 
221+     model :  & str , 
222+     diff_content :  & str , 
223+     max_length :  Option < usize > , 
224+ )  -> Result < String >  { 
225+     // Delegate to the existing simple multi-step implementation 
226+     crate :: simple_multi_step:: generate_commit_message_simple ( client,  model,  diff_content,  max_length) . await 
33227} 
34228
35229/// Main entry point for local multi-step generation (no API) 
36230pub  fn  generate_local ( 
37231    diff :  & str , 
38232    max_length :  Option < usize > , 
39233)  -> Result < String >  { 
40-     // This will be moved from multi_step_integration.rs generate_commit_message_local 
41-     crate :: multi_step_integration:: generate_commit_message_local ( diff,  max_length) 
234+     use  crate :: multi_step_analysis:: { analyze_file,  calculate_impact_scores,  generate_commit_messages,  FileDataForScoring } ; 
235+     use  crate :: debug_output; 
236+ 
237+     log:: info!( "Starting local multi-step commit message generation" ) ; 
238+ 
239+     // Parse the diff 
240+     let  parsed_files = parse_diff ( diff) ?; 
241+ 
242+     // Track files parsed in debug session 
243+     if  let  Some ( session)  = debug_output:: debug_session ( )  { 
244+         session. set_total_files_parsed ( parsed_files. len ( ) ) ; 
245+     } 
246+ 
247+     // Step 1: Analyze each file 
248+     let  mut  files_data = Vec :: new ( ) ; 
249+     for  file in  parsed_files { 
250+         let  analysis = analyze_file ( & file. path ,  & file. diff_content ,  & file. operation ) ; 
251+         files_data. push ( FileDataForScoring  { 
252+             file_path :       file. path , 
253+             operation_type :  file. operation , 
254+             lines_added :     analysis. lines_added , 
255+             lines_removed :   analysis. lines_removed , 
256+             file_category :   analysis. file_category , 
257+             summary :         analysis. summary 
258+         } ) ; 
259+     } 
260+ 
261+     // Step 2: Calculate impact scores 
262+     let  score_result = calculate_impact_scores ( files_data) ; 
263+ 
264+     // Step 3: Generate commit messages 
265+     let  generate_result = generate_commit_messages ( score_result. files_with_scores ,  max_length. unwrap_or ( 72 ) ) ; 
266+ 
267+     // Return the first candidate 
268+     generate_result. candidates 
269+         . into_iter ( ) 
270+         . next ( ) 
271+         . ok_or_else ( || anyhow:: anyhow!( "No commit message candidates generated" ) ) 
42272} 
0 commit comments