|
2 | 2 | LangGraph nodes for the Rule Feasibility Agent. |
3 | 3 | """ |
4 | 4 |
|
5 | | -import json |
6 | 5 | import logging |
7 | 6 |
|
8 | 7 | from langchain_openai import ChatOpenAI |
9 | 8 |
|
10 | 9 | from src.core.config import config |
11 | 10 |
|
12 | | -from .models import FeasibilityState |
| 11 | +from .models import FeasibilityAnalysis, FeasibilityState, YamlGeneration |
13 | 12 | from .prompts import RULE_FEASIBILITY_PROMPT, YAML_GENERATION_PROMPT |
14 | 13 |
|
15 | 14 | logger = logging.getLogger(__name__) |
16 | 15 |
|
17 | 16 |
|
18 | | -def analyze_rule_feasibility(state: FeasibilityState) -> FeasibilityState: |
| 17 | +async def analyze_rule_feasibility(state: FeasibilityState) -> FeasibilityState: |
19 | 18 | """ |
20 | | - Analyze whether a rule description is feasible to implement. |
| 19 | + Analyze whether a rule description is feasible to implement using structured output. |
21 | 20 | """ |
22 | 21 | try: |
23 | | - # Create LLM client directly using centralized config |
| 22 | + # Create LLM client with structured output |
24 | 23 | llm = ChatOpenAI( |
25 | 24 | api_key=config.ai.api_key, |
26 | 25 | model=config.ai.model, |
27 | 26 | max_tokens=config.ai.max_tokens, |
28 | 27 | temperature=config.ai.temperature, |
29 | 28 | ) |
30 | 29 |
|
| 30 | + # Use structured output instead of manual JSON parsing |
| 31 | + structured_llm = llm.with_structured_output(FeasibilityAnalysis) |
| 32 | + |
31 | 33 | # Analyze rule feasibility |
32 | 34 | prompt = RULE_FEASIBILITY_PROMPT.format(rule_description=state.rule_description) |
33 | 35 |
|
34 | | - response = llm.invoke(prompt) |
35 | | - |
36 | | - # Log the raw response for debugging |
37 | | - logger.info(f"Raw LLM response: {response.content}") |
38 | | - |
39 | | - # Check if response is empty |
40 | | - if not response.content or response.content.strip() == "": |
41 | | - logger.error("LLM returned empty response") |
42 | | - state.is_feasible = False |
43 | | - state.feedback = "Analysis failed: LLM returned empty response" |
44 | | - return state |
45 | | - |
46 | | - # Try to parse JSON with better error handling |
47 | | - try: |
48 | | - result = json.loads(response.content.strip()) |
49 | | - except json.JSONDecodeError as json_error: |
50 | | - logger.error(f"Failed to parse JSON response: {json_error}") |
51 | | - logger.error(f"Response content: {response.content}") |
52 | | - |
53 | | - # Try to extract JSON from markdown code blocks if present |
54 | | - content = response.content.strip() |
55 | | - if content.startswith("```json"): |
56 | | - content = content[7:] # Remove ```json |
57 | | - elif content.startswith("```"): |
58 | | - content = content[3:] # Remove ``` |
59 | | - if content.endswith("```"): |
60 | | - content = content[:-3] # Remove trailing ``` |
61 | | - |
62 | | - try: |
63 | | - result = json.loads(content.strip()) |
64 | | - logger.info("Successfully extracted JSON from markdown code blocks") |
65 | | - except json.JSONDecodeError: |
66 | | - # If all parsing attempts fail, set default values |
67 | | - logger.error("All JSON parsing attempts failed") |
68 | | - state.is_feasible = False |
69 | | - state.feedback = ( |
70 | | - f"Analysis failed: Could not parse LLM response as JSON. Raw response: {response.content[:200]}..." |
71 | | - ) |
72 | | - return state |
73 | | - |
74 | | - # Update state with analysis results |
75 | | - state.is_feasible = result.get("is_feasible", False) |
76 | | - state.rule_type = result.get("rule_type", "") |
77 | | - state.confidence_score = result.get("confidence_score", 0.0) |
78 | | - state.yaml_content = result.get("yaml_content", "") |
79 | | - state.feedback = result.get("feedback", "") |
80 | | - state.analysis_steps = result.get("analysis_steps", []) |
81 | | - |
82 | | - logger.info(f"Rule feasibility analysis completed: {state.is_feasible}") |
| 36 | + # Get structured response - no more JSON parsing needed! |
| 37 | + result = await structured_llm.ainvoke(prompt) |
| 38 | + |
| 39 | + # Update state with analysis results - now type-safe! |
| 40 | + state.is_feasible = result.is_feasible |
| 41 | + state.rule_type = result.rule_type |
| 42 | + state.confidence_score = result.confidence_score |
| 43 | + state.feedback = result.feedback |
| 44 | + state.analysis_steps = result.analysis_steps |
| 45 | + |
| 46 | + logger.info(f"🔍 Rule feasibility analysis completed: {state.is_feasible}") |
| 47 | + logger.info(f"🔍 Rule type identified: {state.rule_type}") |
| 48 | + logger.info(f"🔍 Confidence score: {state.confidence_score}") |
83 | 49 |
|
84 | 50 | except Exception as e: |
85 | | - logger.error(f"Error in rule feasibility analysis: {e}") |
| 51 | + logger.error(f"❌ Error in rule feasibility analysis: {e}") |
86 | 52 | state.is_feasible = False |
87 | 53 | state.feedback = f"Analysis failed: {str(e)}" |
| 54 | + state.confidence_score = 0.0 |
88 | 55 |
|
89 | 56 | return state |
90 | 57 |
|
91 | 58 |
|
92 | | -def generate_yaml_config(state: FeasibilityState) -> FeasibilityState: |
| 59 | +async def generate_yaml_config(state: FeasibilityState) -> FeasibilityState: |
93 | 60 | """ |
94 | | - Generate YAML configuration for feasible rules. |
| 61 | + Generate YAML configuration for feasible rules using structured output. |
| 62 | + This node only runs if the rule is feasible. |
95 | 63 | """ |
96 | 64 | if not state.is_feasible or not state.rule_type: |
| 65 | + logger.info("🔧 Skipping YAML generation - rule not feasible or no rule type") |
97 | 66 | return state |
98 | 67 |
|
99 | 68 | try: |
100 | | - # Create LLM client directly using centralized config |
| 69 | + # Create LLM client with structured output |
101 | 70 | llm = ChatOpenAI( |
102 | 71 | api_key=config.ai.api_key, |
103 | 72 | model=config.ai.model, |
104 | 73 | max_tokens=config.ai.max_tokens, |
105 | 74 | temperature=config.ai.temperature, |
106 | 75 | ) |
107 | 76 |
|
| 77 | + # Use structured output for YAML generation |
| 78 | + structured_llm = llm.with_structured_output(YamlGeneration) |
| 79 | + |
108 | 80 | prompt = YAML_GENERATION_PROMPT.format(rule_type=state.rule_type, rule_description=state.rule_description) |
109 | 81 |
|
110 | | - response = llm.invoke(prompt) |
111 | | - state.yaml_content = response.content.strip() |
| 82 | + # Get structured response |
| 83 | + result = await structured_llm.ainvoke(prompt) |
| 84 | + |
| 85 | + # Update state with generated YAML |
| 86 | + state.yaml_content = result.yaml_content.strip() |
112 | 87 |
|
113 | | - logger.info(f"YAML configuration generated for rule type: {state.rule_type}") |
| 88 | + logger.info(f"🔧 YAML configuration generated for rule type: {state.rule_type}") |
| 89 | + logger.info(f"🔧 Generated YAML length: {len(state.yaml_content)} characters") |
114 | 90 |
|
115 | 91 | except Exception as e: |
116 | | - logger.error(f"Error generating YAML configuration: {e}") |
| 92 | + logger.error(f"❌ Error generating YAML configuration: {e}") |
117 | 93 | state.feedback += f"\nYAML generation failed: {str(e)}" |
118 | 94 |
|
119 | 95 | return state |
0 commit comments