| | """ |
| | LLM Helper for visualization agents |
| | Handles OpenAI API calls with retry logic and error handling |
| | """ |
| | import os |
| | import json |
| | from typing import Dict, Any, Optional |
| | from openai import OpenAI |
| | from dotenv import load_dotenv |
| | import time |
| |
|
| | |
| | SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__)) |
| | VISUALIZATION_DIR = os.path.dirname(SCRIPT_DIR) |
| | ROOT_DIR = os.path.dirname(VISUALIZATION_DIR) |
| | load_dotenv(os.path.join(ROOT_DIR, '.env')) |
| |
|
| |
|
| | class LLMHelper: |
| | """ |
| | Helper class for LLM interactions |
| | """ |
| |
|
| | def __init__(self, model: str = "gpt-5-nano", temperature: float = 1): |
| | """ |
| | Initialize LLM helper |
| | |
| | Args: |
| | model: Model name to use |
| | temperature: Temperature for generation |
| | """ |
| | self.model = model |
| | self.temperature = temperature |
| | self.api_key = os.getenv('OPENAI_API_KEY') |
| |
|
| | if not self.api_key: |
| | raise ValueError("OPENAI_API_KEY not found in environment variables") |
| |
|
| | self.client = OpenAI(api_key=self.api_key) |
| |
|
| | def get_completion( |
| | self, |
| | prompt: str, |
| | system_message: Optional[str] = None, |
| | max_retries: int = 3, |
| | json_mode: bool = False |
| | ) -> Dict[str, Any]: |
| | """ |
| | Get completion from LLM with retry logic |
| | |
| | Args: |
| | prompt: User prompt |
| | system_message: Optional system message |
| | max_retries: Maximum number of retries |
| | json_mode: Whether to force JSON response |
| | |
| | Returns: |
| | Dictionary with response data |
| | """ |
| | messages = [] |
| |
|
| | if system_message: |
| | messages.append({"role": "system", "content": system_message}) |
| |
|
| | messages.append({"role": "user", "content": prompt}) |
| |
|
| | for attempt in range(max_retries): |
| | try: |
| | |
| | api_params = { |
| | "model": self.model, |
| | "messages": messages, |
| | "temperature": self.temperature, |
| | "reasoning_effort": "low", |
| | "n": 1 |
| | } |
| |
|
| | |
| | if json_mode: |
| | api_params["response_format"] = {"type": "json_object"} |
| |
|
| | |
| | response = self.client.chat.completions.create(**api_params) |
| |
|
| | |
| | content = response.choices[0].message.content |
| |
|
| | |
| | if json_mode: |
| | try: |
| | content = json.loads(content) |
| | except json.JSONDecodeError as e: |
| | return { |
| | 'success': False, |
| | 'error': f"Failed to parse JSON response: {str(e)}", |
| | 'raw_content': content |
| | } |
| |
|
| | return { |
| | 'success': True, |
| | 'content': content, |
| | 'model': response.model, |
| | 'usage': { |
| | 'prompt_tokens': response.usage.prompt_tokens, |
| | 'completion_tokens': response.usage.completion_tokens, |
| | 'total_tokens': response.usage.total_tokens |
| | } |
| | } |
| |
|
| | except Exception as e: |
| | if attempt < max_retries - 1: |
| | |
| | time.sleep(2 ** attempt) |
| | continue |
| | else: |
| | return { |
| | 'success': False, |
| | 'error': str(e), |
| | 'error_type': type(e).__name__ |
| | } |
| |
|
| | return { |
| | 'success': False, |
| | 'error': f"Failed after {max_retries} attempts" |
| | } |
| |
|
| | def get_structured_completion( |
| | self, |
| | prompt: str, |
| | system_message: str, |
| | max_retries: int = 3 |
| | ) -> Dict[str, Any]: |
| | """ |
| | Get structured JSON completion |
| | |
| | Args: |
| | prompt: User prompt |
| | system_message: System message |
| | max_retries: Maximum retries |
| | |
| | Returns: |
| | Structured response dictionary |
| | """ |
| | return self.get_completion( |
| | prompt=prompt, |
| | system_message=system_message, |
| | max_retries=max_retries, |
| | json_mode=True |
| | ) |