Initial commit of the Ad Generator Lite project, including backend services, frontend components, and configuration files. Added core functionalities for ad generation, user management, and image processing, along with a structured matrix system for ad testing.
f201243
| """Minimal OpenAI LLM service for ad copy generation.""" | |
| import os | |
| import sys | |
| # Add parent directory to path for imports | |
| sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) | |
| from typing import Optional, Dict, Any, List, Union | |
| from openai import AsyncOpenAI | |
| import json | |
| import base64 | |
| from config import settings | |
| class LLMService: | |
| """Simple OpenAI wrapper for generating ad copy.""" | |
| def __init__(self): | |
| """Initialize OpenAI client.""" | |
| self.client = AsyncOpenAI(api_key=settings.openai_api_key) | |
| self.model = settings.llm_model | |
| self.temperature = settings.llm_temperature | |
| self.vision_model = getattr(settings, 'vision_model', 'gpt-4o') | |
| async def generate( | |
| self, | |
| prompt: str, | |
| system_prompt: Optional[str] = None, | |
| temperature: Optional[float] = None, | |
| response_format: Optional[Dict[str, Any]] = None, | |
| ) -> str: | |
| """ | |
| Generate text using OpenAI. | |
| Args: | |
| prompt: User prompt | |
| system_prompt: System prompt for context | |
| temperature: Override default temperature (0.95 for variety) | |
| response_format: JSON schema for structured output | |
| Returns: | |
| Generated text | |
| """ | |
| messages = [] | |
| if system_prompt: | |
| messages.append({"role": "system", "content": system_prompt}) | |
| messages.append({"role": "user", "content": prompt}) | |
| kwargs = { | |
| "model": self.model, | |
| "messages": messages, | |
| "temperature": temperature or self.temperature, | |
| } | |
| # Use gpt-4o for JSON schema (required) | |
| if response_format: | |
| kwargs["model"] = "gpt-4o" | |
| kwargs["response_format"] = response_format | |
| response = await self.client.chat.completions.create(**kwargs) | |
| content = response.choices[0].message.content | |
| if content is None: | |
| raise ValueError("OpenAI returned empty response") | |
| return content | |
| async def generate_json( | |
| self, | |
| prompt: str, | |
| system_prompt: Optional[str] = None, | |
| temperature: Optional[float] = None, | |
| ) -> Dict[str, Any]: | |
| """ | |
| Generate JSON output using OpenAI. | |
| Args: | |
| prompt: User prompt (should request JSON output) | |
| system_prompt: System prompt for context | |
| temperature: Override default temperature | |
| Returns: | |
| Parsed JSON dictionary | |
| """ | |
| # Add JSON instruction to prompt | |
| json_prompt = f"{prompt}\n\nRespond with valid JSON only." | |
| response = await self.generate( | |
| prompt=json_prompt, | |
| system_prompt=system_prompt, | |
| temperature=temperature, | |
| ) | |
| # Parse JSON from response | |
| try: | |
| # Try to extract JSON from response | |
| response = response.strip() | |
| if response.startswith("```json"): | |
| response = response[7:] | |
| if response.startswith("```"): | |
| response = response[3:] | |
| if response.endswith("```"): | |
| response = response[:-3] | |
| return json.loads(response.strip()) | |
| except json.JSONDecodeError as e: | |
| raise ValueError(f"Failed to parse JSON response: {e}\nResponse: {response}") | |
| async def analyze_image_with_vision( | |
| self, | |
| image_bytes: bytes, | |
| analysis_prompt: str, | |
| system_prompt: Optional[str] = None, | |
| ) -> str: | |
| """ | |
| Analyze an image using GPT-4 Vision API. | |
| Args: | |
| image_bytes: Image file bytes | |
| analysis_prompt: Prompt describing what to analyze | |
| system_prompt: Optional system prompt for context | |
| Returns: | |
| Analysis text from vision model | |
| """ | |
| # Convert image bytes to base64 | |
| image_base64 = base64.b64encode(image_bytes).decode('utf-8') | |
| image_data_url = f"data:image/png;base64,{image_base64}" | |
| messages = [] | |
| if system_prompt: | |
| messages.append({"role": "system", "content": system_prompt}) | |
| messages.append({ | |
| "role": "user", | |
| "content": [ | |
| { | |
| "type": "text", | |
| "text": analysis_prompt | |
| }, | |
| { | |
| "type": "image_url", | |
| "image_url": { | |
| "url": image_data_url | |
| } | |
| } | |
| ] | |
| }) | |
| response = await self.client.chat.completions.create( | |
| model=self.vision_model, | |
| messages=messages, | |
| temperature=0.3, # Lower temperature for more consistent analysis | |
| ) | |
| content = response.choices[0].message.content | |
| if content is None: | |
| raise ValueError("Vision API returned empty response") | |
| return content | |
| # Global instance | |
| llm_service = LLMService() | |