@@ -21,6 +21,7 @@ use anyhow::Result;
2121use serde:: Deserialize ;
2222use serde_json:: json;
2323use std:: collections:: HashMap ;
24+ use std:: time:: Duration ;
2425
2526pub struct AIEnhancements {
2627 config : Config ,
@@ -355,6 +356,9 @@ impl AIEnhancements {
355356 sample
356357 }
357358
359+ /// Maximum retries for batch AI operations (covers both LLM call and response parsing)
360+ const MAX_BATCH_RETRIES : u32 = 3 ;
361+
358362 // Extract AI-powered descriptions for multiple files in a single batch call
359363 pub async fn extract_ai_descriptions_batch (
360364 & self ,
@@ -364,33 +368,54 @@ impl AIEnhancements {
364368 return Ok ( HashMap :: new ( ) ) ;
365369 }
366370
367- // Build batch user message with all files
368- let user_message = self . build_batch_user_message ( files) ;
369-
370- // Create JSON schema for structured response
371371 let json_schema = self . create_batch_response_schema ( ) ;
372+ let mut last_error = None ;
372373
373- // Single API call for multiple files
374- // LLM call includes retry with exponential backoff (in LlmClient).
375- // If it still fails after retries, propagate error to stop indexing.
376- let response = self
377- . call_llm (
378- & self . config . graphrag . llm . description_model ,
379- self . config . graphrag . llm . description_system_prompt . clone ( ) ,
380- user_message,
381- Some ( json_schema) ,
382- )
383- . await
384- . map_err ( |e| {
385- anyhow:: anyhow!(
386- "GraphRAG AI description failed for {} files after retries: {}. \
387- Stopping indexing to prevent storing data without LLM descriptions.",
388- files. len( ) ,
389- e
374+ for attempt in 0 ..=Self :: MAX_BATCH_RETRIES {
375+ if attempt > 0 {
376+ let delay = Duration :: from_secs ( 5 * ( 1 << ( attempt - 1 ) ) ) ; // 5s, 10s, 20s
377+ if !self . quiet {
378+ eprintln ! (
379+ "⚠️ AI batch attempt {}/{} failed, retrying in {:?}..." ,
380+ attempt,
381+ Self :: MAX_BATCH_RETRIES + 1 ,
382+ delay
383+ ) ;
384+ }
385+ tokio:: time:: sleep ( delay) . await ;
386+ }
387+
388+ // Build fresh message each attempt
389+ let user_message = self . build_batch_user_message ( files) ;
390+
391+ match self
392+ . call_llm (
393+ & self . config . graphrag . llm . description_model ,
394+ self . config . graphrag . llm . description_system_prompt . clone ( ) ,
395+ user_message,
396+ Some ( json_schema. clone ( ) ) ,
390397 )
391- } ) ?;
398+ . await
399+ {
400+ Ok ( response) => match self . parse_batch_response ( & response, files) {
401+ Ok ( results) => return Ok ( results) ,
402+ Err ( e) => {
403+ last_error = Some ( e) ;
404+ }
405+ } ,
406+ Err ( e) => {
407+ last_error = Some ( e) ;
408+ }
409+ }
410+ }
392411
393- self . parse_batch_response ( & response, files)
412+ Err ( last_error. unwrap_or_else ( || {
413+ anyhow:: anyhow!(
414+ "AI batch description failed for {} files after {} retries" ,
415+ files. len( ) ,
416+ Self :: MAX_BATCH_RETRIES
417+ )
418+ } ) )
394419 }
395420
396421 // Build user message for batch processing
0 commit comments