File size: 4,567 Bytes
9858829 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 | """
LLM Helper for visualization agents
Handles OpenAI API calls with retry logic and error handling
"""
import os
import json
from typing import Dict, Any, Optional
from openai import OpenAI
from dotenv import load_dotenv
import time
# Load environment variables from root directory (parent of visualization)
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
VISUALIZATION_DIR = os.path.dirname(SCRIPT_DIR)
ROOT_DIR = os.path.dirname(VISUALIZATION_DIR)
load_dotenv(os.path.join(ROOT_DIR, '.env'))
class LLMHelper:
"""
Helper class for LLM interactions
"""
def __init__(self, model: str = "gpt-5-nano", temperature: float = 1):
"""
Initialize LLM helper
Args:
model: Model name to use
temperature: Temperature for generation
"""
self.model = model
self.temperature = temperature
self.api_key = os.getenv('OPENAI_API_KEY')
if not self.api_key:
raise ValueError("OPENAI_API_KEY not found in environment variables")
self.client = OpenAI(api_key=self.api_key)
def get_completion(
self,
prompt: str,
system_message: Optional[str] = None,
max_retries: int = 3,
json_mode: bool = False
) -> Dict[str, Any]:
"""
Get completion from LLM with retry logic
Args:
prompt: User prompt
system_message: Optional system message
max_retries: Maximum number of retries
json_mode: Whether to force JSON response
Returns:
Dictionary with response data
"""
messages = []
if system_message:
messages.append({"role": "system", "content": system_message})
messages.append({"role": "user", "content": prompt})
for attempt in range(max_retries):
try:
# Prepare API call parameters
api_params = {
"model": self.model,
"messages": messages,
"temperature": self.temperature,
"reasoning_effort": "low",
"n": 1
}
# Add response format if JSON mode requested
if json_mode:
api_params["response_format"] = {"type": "json_object"}
# Make API call
response = self.client.chat.completions.create(**api_params)
# Extract response
content = response.choices[0].message.content
# Parse JSON if requested
if json_mode:
try:
content = json.loads(content)
except json.JSONDecodeError as e:
return {
'success': False,
'error': f"Failed to parse JSON response: {str(e)}",
'raw_content': content
}
return {
'success': True,
'content': content,
'model': response.model,
'usage': {
'prompt_tokens': response.usage.prompt_tokens,
'completion_tokens': response.usage.completion_tokens,
'total_tokens': response.usage.total_tokens
}
}
except Exception as e:
if attempt < max_retries - 1:
# Wait before retry (exponential backoff)
time.sleep(2 ** attempt)
continue
else:
return {
'success': False,
'error': str(e),
'error_type': type(e).__name__
}
return {
'success': False,
'error': f"Failed after {max_retries} attempts"
}
def get_structured_completion(
self,
prompt: str,
system_message: str,
max_retries: int = 3
) -> Dict[str, Any]:
"""
Get structured JSON completion
Args:
prompt: User prompt
system_message: System message
max_retries: Maximum retries
Returns:
Structured response dictionary
"""
return self.get_completion(
prompt=prompt,
system_message=system_message,
max_retries=max_retries,
json_mode=True
) |