File size: 5,185 Bytes
f201243 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 |
"""Minimal OpenAI LLM service for ad copy generation."""
import os
import sys
# Add parent directory to path for imports
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from typing import Optional, Dict, Any, List, Union
from openai import AsyncOpenAI
import json
import base64
from config import settings
class LLMService:
"""Simple OpenAI wrapper for generating ad copy."""
def __init__(self):
"""Initialize OpenAI client."""
self.client = AsyncOpenAI(api_key=settings.openai_api_key)
self.model = settings.llm_model
self.temperature = settings.llm_temperature
self.vision_model = getattr(settings, 'vision_model', 'gpt-4o')
async def generate(
self,
prompt: str,
system_prompt: Optional[str] = None,
temperature: Optional[float] = None,
response_format: Optional[Dict[str, Any]] = None,
) -> str:
"""
Generate text using OpenAI.
Args:
prompt: User prompt
system_prompt: System prompt for context
temperature: Override default temperature (0.95 for variety)
response_format: JSON schema for structured output
Returns:
Generated text
"""
messages = []
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
messages.append({"role": "user", "content": prompt})
kwargs = {
"model": self.model,
"messages": messages,
"temperature": temperature or self.temperature,
}
# Use gpt-4o for JSON schema (required)
if response_format:
kwargs["model"] = "gpt-4o"
kwargs["response_format"] = response_format
response = await self.client.chat.completions.create(**kwargs)
content = response.choices[0].message.content
if content is None:
raise ValueError("OpenAI returned empty response")
return content
async def generate_json(
self,
prompt: str,
system_prompt: Optional[str] = None,
temperature: Optional[float] = None,
) -> Dict[str, Any]:
"""
Generate JSON output using OpenAI.
Args:
prompt: User prompt (should request JSON output)
system_prompt: System prompt for context
temperature: Override default temperature
Returns:
Parsed JSON dictionary
"""
# Add JSON instruction to prompt
json_prompt = f"{prompt}\n\nRespond with valid JSON only."
response = await self.generate(
prompt=json_prompt,
system_prompt=system_prompt,
temperature=temperature,
)
# Parse JSON from response
try:
# Try to extract JSON from response
response = response.strip()
if response.startswith("```json"):
response = response[7:]
if response.startswith("```"):
response = response[3:]
if response.endswith("```"):
response = response[:-3]
return json.loads(response.strip())
except json.JSONDecodeError as e:
raise ValueError(f"Failed to parse JSON response: {e}\nResponse: {response}")
async def analyze_image_with_vision(
self,
image_bytes: bytes,
analysis_prompt: str,
system_prompt: Optional[str] = None,
) -> str:
"""
Analyze an image using GPT-4 Vision API.
Args:
image_bytes: Image file bytes
analysis_prompt: Prompt describing what to analyze
system_prompt: Optional system prompt for context
Returns:
Analysis text from vision model
"""
# Convert image bytes to base64
image_base64 = base64.b64encode(image_bytes).decode('utf-8')
image_data_url = f"data:image/png;base64,{image_base64}"
messages = []
if system_prompt:
messages.append({"role": "system", "content": system_prompt})
messages.append({
"role": "user",
"content": [
{
"type": "text",
"text": analysis_prompt
},
{
"type": "image_url",
"image_url": {
"url": image_data_url
}
}
]
})
response = await self.client.chat.completions.create(
model=self.vision_model,
messages=messages,
temperature=0.3, # Lower temperature for more consistent analysis
)
content = response.choices[0].message.content
if content is None:
raise ValueError("Vision API returned empty response")
return content
# Global instance
llm_service = LLMService()
|