Spaces:
Build error
Build error
| from dataclasses import dataclass | |
| from typing import List, Tuple, Dict, Optional | |
| import os | |
| import json | |
| import httpx | |
| from openai import OpenAI | |
| import edge_tts | |
| import tempfile | |
| from pydub import AudioSegment | |
| import base64 | |
| from pathlib import Path | |
| class ConversationConfig: | |
| max_words: int = 3000 | |
| prefix_url: str = "https://r.jina.ai/" | |
| model_name: str = "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo" | |
| class URLToAudioConverter: | |
| def __init__(self, config: ConversationConfig, llm_api_key: str): | |
| self.config = config | |
| self.llm_client = OpenAI(api_key=llm_api_key, base_url="https://api.together.xyz/v1") | |
| self.llm_out = None | |
| def fetch_text(self, url: str) -> str: | |
| if not url: | |
| raise ValueError("URL cannot be empty") | |
| full_url = f"{self.config.prefix_url}{url}" | |
| try: | |
| response = httpx.get(full_url, timeout=60.0) | |
| response.raise_for_status() | |
| return response.text | |
| except httpx.HTTPError as e: | |
| raise RuntimeError(f"Failed to fetch URL: {e}") | |
| def extract_conversation(self, text: str) -> Dict: | |
| if not text: | |
| raise ValueError("Input text cannot be empty") | |
| try: | |
| prompt = ( | |
| f"{text}\nConvert the provided text into a short informative podcast conversation " | |
| f"between two experts. Return ONLY a JSON object with the following structure:\n" | |
| '{"conversation": [{"speaker": "Speaker1", "text": "..."}, {"speaker": "Speaker2", "text": "..."}]}' | |
| ) | |
| chat_completion = self.llm_client.chat.completions.create( | |
| messages=[{"role": "user", "content": prompt}], | |
| model=self.config.model_name, | |
| response_format={"type": "json_object"} | |
| ) | |
| response_content = chat_completion.choices[0].message.content | |
| json_str = response_content.strip() | |
| if not json_str.startswith('{'): | |
| json_str = json_str[json_str.find('{'):] | |
| if not json_str.endswith('}'): | |
| json_str = json_str[:json_str.rfind('}')+1] | |
| return json.loads(json_str) | |
| except Exception as e: | |
| raise RuntimeError(f"Failed to extract conversation: {str(e)}") | |
| async def text_to_speech(self, conversation_json: Dict, voice_1: str, voice_2: str) -> Tuple[List[str], str]: | |
| output_dir = Path(self._create_output_directory()) | |
| filenames = [] | |
| try: | |
| for i, turn in enumerate(conversation_json["conversation"]): | |
| filename = output_dir / f"output_{i}.mp3" | |
| voice = voice_1 if i % 2 == 0 else voice_2 | |
| tmp_path, error = await self._generate_audio(turn["text"], voice) | |
| if error: | |
| raise RuntimeError(f"Text-to-speech failed: {error}") | |
| os.rename(tmp_path, filename) | |
| filenames.append(str(filename)) | |
| return filenames, str(output_dir) | |
| except Exception as e: | |
| raise RuntimeError(f"Failed to convert text to speech: {e}") | |
| async def _generate_audio(self, text: str, voice: str, rate: int = 0, pitch: int = 0) -> Tuple[str, Optional[str]]: | |
| if not text.strip(): | |
| return None, "Text cannot be empty" | |
| voice_short_name = voice.split(" - ")[0] | |
| communicate = edge_tts.Communicate( | |
| text, | |
| voice_short_name, | |
| rate=f"{rate:+d}%", | |
| pitch=f"{pitch:+d}Hz" | |
| ) | |
| with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as tmp_file: | |
| tmp_path = tmp_file.name | |
| await communicate.save(tmp_path) | |
| return tmp_path, None | |
| def _create_output_directory(self) -> str: | |
| folder_name = base64.urlsafe_b64encode(os.urandom(8)).decode("utf-8") | |
| os.makedirs(folder_name, exist_ok=True) | |
| return folder_name | |
| def combine_audio_files(self, filenames: List[str]) -> AudioSegment: | |
| if not filenames: | |
| raise ValueError("No input files provided") | |
| combined = AudioSegment.empty() | |
| for filename in filenames: | |
| combined += AudioSegment.from_file(filename, format="mp3") | |
| return combined | |
| def add_background_music_and_tags( | |
| self, | |
| speech_audio: AudioSegment, | |
| music_file: str, | |
| tags_files: List[str] | |
| ) -> AudioSegment: | |
| music = AudioSegment.from_file(music_file) | |
| if len(music) < len(speech_audio): | |
| music = music * (len(speech_audio) // len(music) + 1) | |
| music = music[:len(speech_audio)] - 20 | |
| mixed = speech_audio.overlay(music) | |
| for tag_path in tags_files: | |
| tag_audio = AudioSegment.from_file(tag_path) - 5 | |
| mixed = tag_audio + mixed | |
| return mixed | |
| async def url_to_audio(self, url: str, voice_1: str, voice_2: str) -> Tuple[str, str]: | |
| text = self.fetch_text(url) | |
| if len(words := text.split()) > self.config.max_words: | |
| text = " ".join(words[:self.config.max_words]) | |
| conversation_json = self.extract_conversation(text) | |
| conversation_text = "\n".join( | |
| f"{turn['speaker']}: {turn['text']}" | |
| for turn in conversation_json["conversation"] | |
| ) | |
| return await self._process_audio(conversation_json, voice_1, voice_2, conversation_text) | |
| async def text_to_audio(self, structured_text: str, voice_1: str, voice_2: str) -> Tuple[str, str]: | |
| """Para texto YA estructurado como JSON de conversación.""" | |
| conversation_json = self.extract_conversation(structured_text) | |
| conversation_text = "\n".join( | |
| f"{turn['speaker']}: {turn['text']}" | |
| for turn in conversation_json["conversation"] | |
| ) | |
| return await self._process_audio(conversation_json, voice_1, voice_2, conversation_text) | |
| async def raw_text_to_audio(self, raw_text: str, voice_1: str, voice_2: str) -> Tuple[str, str]: | |
| """Para texto plano directo (sin estructura de diálogo).""" | |
| fake_conversation = {"conversation": [{"speaker": "Narrador", "text": raw_text}]} | |
| return await self._process_audio(fake_conversation, voice_1, voice_2, raw_text) | |
| async def _process_audio( | |
| self, | |
| conversation_json: Dict, | |
| voice_1: str, | |
| voice_2: str, | |
| text: str | |
| ) -> Tuple[str, str]: | |
| """Método interno para procesamiento común.""" | |
| audio_files, folder_name = await self.text_to_speech(conversation_json, voice_1, voice_2) | |
| combined_audio = self.combine_audio_files(audio_files) | |
| final_audio = self.add_background_music_and_tags( | |
| combined_audio, | |
| "musica.mp3", | |
| ["tag.mp3", "tag2.mp3"] | |
| ) | |
| output_file = os.path.join(folder_name, "output.mp3") | |
| final_audio.export(output_file, format="mp3") | |
| for f in audio_files: | |
| os.remove(f) | |
| return output_file, text |