File size: 7,123 Bytes
1778e91
 
 
 
 
1a75585
1778e91
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
from functools import lru_cache
from typing import List, Tuple, Optional
import aiohttp
import elevenlabs
from contextlib import asynccontextmanager
from logger import setup_logger, log_execution_time, log_async_execution_time

logger = setup_logger("api_clients")

class OpenRouterClient:
    """Handles OpenRouter API interactions with comprehensive logging and error tracking"""
    
    def __init__(self, api_key: str):
        logger.info("Initializing OpenRouter client")
        if not api_key or len(api_key) < 32:
            logger.error("Invalid API key format")
            raise ValueError("Invalid OpenRouter API key")
            
        self.api_key = api_key
        self.base_url = "https://openrouter.ai/api/v1"
        self.headers = {
            "Authorization": f"Bearer {api_key}",
            "Content-Type": "application/json",
        }
        logger.debug("OpenRouter client initialized successfully")
    
    @asynccontextmanager
    async def get_session(self):
        logger.debug("Creating new aiohttp session")
        async with aiohttp.ClientSession(headers=self.headers) as session:
            yield session
    
    @lru_cache(maxsize=1)
    async def get_models(self) -> List[Tuple[str, str]]:
        """
        Fetch available models from OpenRouter API
        
        Returns:
            List of tuples containing (model_id, model_description)
        
        Raises:
            ValueError: If API request fails
        """
        logger.info("Fetching available models from OpenRouter")
        async with self.get_session() as session:
            async with session.get(f"{self.base_url}/models") as response:
                if response.status != 200:
                    error_msg = await response.text()
                    logger.error(f"Failed to fetch models: {error_msg}")
                    raise ValueError(f"Failed to fetch models: {error_msg}")
                
                models = await response.json()
                logger.info(f"Successfully fetched {len(models)} models")
                logger.debug(f"Available models: {[model['name'] for model in models]}")
                return [(model['id'], f"{model['name']} ({model['context_length']} tokens)") 
                        for model in models]

    @log_async_execution_time(logger)
    async def generate_script(self, content: str, prompt: str, model_id: str) -> str:
        """
        Generate a podcast script with detailed progress tracking and validation
        
        Performance metrics and content analysis are logged at each step.
        """
        logger.info(f"Starting script generation with model: {model_id}")
        logger.debug(f"Input metrics - Content: {len(content)} chars, Prompt: {len(prompt)} chars")
        
        # Validate inputs
        if not content or len(content) < 100:
            logger.error("Content too short for meaningful script generation")
            raise ValueError("Insufficient content for script generation")
            
        if not prompt or len(prompt) < 10:
            logger.error("Prompt too short or missing")
            raise ValueError("Please provide a more detailed prompt")
        
        try:
            async with self.get_session() as session:
                logger.debug("Preparing script generation request")
                response = await self._make_script_request(session, content, prompt, model_id)
                
                script = response['choices'][0]['message']['content']
                logger.info(f"Script generated successfully: {len(script)} chars")
                logger.debug(f"Script preview: {script[:200]}...")
                
                return script
        except Exception as e:
            logger.error(f"Script generation failed", exc_info=True)
            raise

    async def _make_script_request(self, session, content, prompt, model_id):
        async with session.post(
            f"{self.base_url}/chat/completions",
            json={
                "model": model_id,
                "messages": [
                    {
                        "role": "system",
                        "content": "You are an expert podcast script writer. Create engaging, conversational content."
                    },
                    {
                        "role": "user",
                        "content": f"""Based on this content: {content}
                        Create a 3-minute podcast script focusing on: {prompt}
                        Format as a natural conversation with clear speaker parts.
                        Include [HOST] and [GUEST] markers for different voices."""
                    }
                ]
            }
        ) as response:
            logger.debug("Sending script generation request")
            
            if response.status != 200:
                error_msg = await response.text()
                logger.error(f"Script generation failed: {error_msg}")
                raise ValueError(f"Script generation failed: {error_msg}")
            
            return await response.json()

class ElevenLabsClient:
    """Handles ElevenLabs API interactions with detailed performance tracking"""
    
    def __init__(self, api_key: str):
        logger.info("Initializing ElevenLabs client")
        self.api_key = api_key
        elevenlabs.set_api_key(api_key)
    
    @lru_cache(maxsize=1)
    def get_voices(self) -> List[Tuple[str, str]]:
        """
        Fetch available voices from ElevenLabs
        
        Returns:
            List of tuples containing (voice_id, voice_name)
        """
        logger.info("Fetching available voices from ElevenLabs")
        voices = elevenlabs.voices()
        logger.info(f"Successfully fetched {len(voices)} voices")
        logger.debug(f"Available voices: {[voice.name for voice in voices]}")
        return [(voice.voice_id, voice.name) for voice in voices]
    
    @log_execution_time(logger)
    def generate_audio(self, text: str, voice_id: str) -> bytes:
        """
        Generate audio with comprehensive error handling and quality checks
        
        Logs detailed metrics about the input text and resulting audio.
        """
        logger.info(f"Starting audio generation with voice: {voice_id}")
        logger.debug(f"Input text length: {len(text)} chars")
        
        if len(text) > 5000:
            logger.warning(f"Long text detected ({len(text)} chars), may impact performance")
        
        try:
            start_time = time.time()
            audio = elevenlabs.generate(
                text=text,
                voice=voice_id,
                model="eleven_monolingual_v1"
            )
            
            duration = time.time() - start_time
            audio_size = len(audio)
            logger.info(f"Audio generated: {audio_size} bytes in {duration:.2f} seconds")
            logger.debug(f"Audio generation rate: {len(text)/duration:.2f} chars/second")
            
            return audio
        except Exception as e:
            logger.error("Audio generation failed", exc_info=True)
            raise