podcaster / api_clients.py
marks
Fixed logger
1a75585
raw
history blame
7.12 kB
from functools import lru_cache
from typing import List, Tuple, Optional
import aiohttp
import elevenlabs
from contextlib import asynccontextmanager
from logger import setup_logger, log_execution_time, log_async_execution_time
logger = setup_logger("api_clients")
class OpenRouterClient:
"""Handles OpenRouter API interactions with comprehensive logging and error tracking"""
def __init__(self, api_key: str):
logger.info("Initializing OpenRouter client")
if not api_key or len(api_key) < 32:
logger.error("Invalid API key format")
raise ValueError("Invalid OpenRouter API key")
self.api_key = api_key
self.base_url = "https://openrouter.ai/api/v1"
self.headers = {
"Authorization": f"Bearer {api_key}",
"Content-Type": "application/json",
}
logger.debug("OpenRouter client initialized successfully")
@asynccontextmanager
async def get_session(self):
logger.debug("Creating new aiohttp session")
async with aiohttp.ClientSession(headers=self.headers) as session:
yield session
@lru_cache(maxsize=1)
async def get_models(self) -> List[Tuple[str, str]]:
"""
Fetch available models from OpenRouter API
Returns:
List of tuples containing (model_id, model_description)
Raises:
ValueError: If API request fails
"""
logger.info("Fetching available models from OpenRouter")
async with self.get_session() as session:
async with session.get(f"{self.base_url}/models") as response:
if response.status != 200:
error_msg = await response.text()
logger.error(f"Failed to fetch models: {error_msg}")
raise ValueError(f"Failed to fetch models: {error_msg}")
models = await response.json()
logger.info(f"Successfully fetched {len(models)} models")
logger.debug(f"Available models: {[model['name'] for model in models]}")
return [(model['id'], f"{model['name']} ({model['context_length']} tokens)")
for model in models]
@log_async_execution_time(logger)
async def generate_script(self, content: str, prompt: str, model_id: str) -> str:
"""
Generate a podcast script with detailed progress tracking and validation
Performance metrics and content analysis are logged at each step.
"""
logger.info(f"Starting script generation with model: {model_id}")
logger.debug(f"Input metrics - Content: {len(content)} chars, Prompt: {len(prompt)} chars")
# Validate inputs
if not content or len(content) < 100:
logger.error("Content too short for meaningful script generation")
raise ValueError("Insufficient content for script generation")
if not prompt or len(prompt) < 10:
logger.error("Prompt too short or missing")
raise ValueError("Please provide a more detailed prompt")
try:
async with self.get_session() as session:
logger.debug("Preparing script generation request")
response = await self._make_script_request(session, content, prompt, model_id)
script = response['choices'][0]['message']['content']
logger.info(f"Script generated successfully: {len(script)} chars")
logger.debug(f"Script preview: {script[:200]}...")
return script
except Exception as e:
logger.error(f"Script generation failed", exc_info=True)
raise
async def _make_script_request(self, session, content, prompt, model_id):
async with session.post(
f"{self.base_url}/chat/completions",
json={
"model": model_id,
"messages": [
{
"role": "system",
"content": "You are an expert podcast script writer. Create engaging, conversational content."
},
{
"role": "user",
"content": f"""Based on this content: {content}
Create a 3-minute podcast script focusing on: {prompt}
Format as a natural conversation with clear speaker parts.
Include [HOST] and [GUEST] markers for different voices."""
}
]
}
) as response:
logger.debug("Sending script generation request")
if response.status != 200:
error_msg = await response.text()
logger.error(f"Script generation failed: {error_msg}")
raise ValueError(f"Script generation failed: {error_msg}")
return await response.json()
class ElevenLabsClient:
"""Handles ElevenLabs API interactions with detailed performance tracking"""
def __init__(self, api_key: str):
logger.info("Initializing ElevenLabs client")
self.api_key = api_key
elevenlabs.set_api_key(api_key)
@lru_cache(maxsize=1)
def get_voices(self) -> List[Tuple[str, str]]:
"""
Fetch available voices from ElevenLabs
Returns:
List of tuples containing (voice_id, voice_name)
"""
logger.info("Fetching available voices from ElevenLabs")
voices = elevenlabs.voices()
logger.info(f"Successfully fetched {len(voices)} voices")
logger.debug(f"Available voices: {[voice.name for voice in voices]}")
return [(voice.voice_id, voice.name) for voice in voices]
@log_execution_time(logger)
def generate_audio(self, text: str, voice_id: str) -> bytes:
"""
Generate audio with comprehensive error handling and quality checks
Logs detailed metrics about the input text and resulting audio.
"""
logger.info(f"Starting audio generation with voice: {voice_id}")
logger.debug(f"Input text length: {len(text)} chars")
if len(text) > 5000:
logger.warning(f"Long text detected ({len(text)} chars), may impact performance")
try:
start_time = time.time()
audio = elevenlabs.generate(
text=text,
voice=voice_id,
model="eleven_monolingual_v1"
)
duration = time.time() - start_time
audio_size = len(audio)
logger.info(f"Audio generated: {audio_size} bytes in {duration:.2f} seconds")
logger.debug(f"Audio generation rate: {len(text)/duration:.2f} chars/second")
return audio
except Exception as e:
logger.error("Audio generation failed", exc_info=True)
raise