import gradio as gr from huggingface_hub import InferenceClient, HfApi import os import tempfile API_TOKEN = os.environ.get("htoken") client = InferenceClient(token=API_TOKEN) def recupera_modelli_reali(): api = HfApi() modelli = api.list_models( pipeline_tag="text-to-speech", sort="downloads", # ❌ direction=-1 <-- rimosso, non esiste più limit=30 ) return [m.id for m in modelli] LISTA_MODELLI_DINAMICA = recupera_modelli_reali() def genera_audio(testo, model_id): if not testo.strip(): return "⚠️ Inserisci del testo.", None try: audio_bytes = client.text_to_speech(testo, model=model_id) with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as f: f.write(audio_bytes) return f"✅ Generato con: {model_id}", f.name except Exception as e: err = str(e) if "loading" in err.lower(): return "⏳ Modello in caricamento, riprova tra 20s", None elif "401" in err or "unauthorized" in err.lower(): return "❌ Token non valido — controlla il secret 'htoken'", None else: return f"❌ Errore: {err}", None with gr.Blocks(theme=gr.themes.Soft()) as interfaccia: gr.Markdown("# 🎙️ Browser Modelli Hugging Face TTS") gr.Markdown(f"✅ Lista popolata live da Hugging Face — **{len(LISTA_MODELLI_DINAMICA)} modelli trovati**") with gr.Row(): testo_input = gr.Textbox(label="Testo", placeholder="Scrivi qualcosa...", value="Hello, this is a test.") modello_dropdown = gr.Dropdown( choices=LISTA_MODELLI_DINAMICA, value=LISTA_MODELLI_DINAMICA[0], label="Seleziona Modello" ) pulsante = gr.Button("🎵 Genera Audio", variant="primary") status = gr.Textbox(label="Console", interactive=False) audio = gr.Audio(label="Player") pulsante.click(fn=genera_audio, inputs=[testo_input, modello_dropdown], outputs=[status, audio]) interfaccia.launch()