ayscript commited on
Commit
d663b53
Β·
verified Β·
1 Parent(s): 83a259c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -12
app.py CHANGED
@@ -3,7 +3,6 @@ import requests
3
  import os
4
 
5
  # Define the API URLs for both languages
6
- # We use Meta's MMS models which are excellent for both
7
  MODELS = {
8
  "English": "https://api-inference.huggingface.co/models/facebook/mms-tts-eng",
9
  "Yoruba": "https://api-inference.huggingface.co/models/facebook/mms-tts-yor"
@@ -14,40 +13,38 @@ hf_token = os.getenv("HF_TOKEN")
14
  headers = {"Authorization": f"Bearer {hf_token}"}
15
 
16
  def text_to_speech(text, language):
17
- # Select the correct URL based on user choice
 
 
18
  api_url = MODELS[language]
19
 
20
- # Send request to Hugging Face
21
  try:
22
  response = requests.post(api_url, headers=headers, json={"inputs": text})
23
  response.raise_for_status() # Check for errors
24
  except Exception as e:
25
- return None, f"Error: {str(e)}"
 
26
 
27
  # Save audio to file
28
  output_file = "output.wav"
29
  with open(output_file, "wb") as f:
30
  f.write(response.content)
31
 
32
- return output_file, None
 
33
 
34
  # Create the Interface
35
  with gr.Blocks() as demo:
36
  gr.Markdown("# πŸ‡³πŸ‡¬ English & Yoruba Text-to-Speech")
37
 
38
  with gr.Row():
39
- # Input Text
40
  txt_input = gr.Textbox(label="Enter Text", placeholder="Type something here...")
41
- # Language Dropdown
42
  lang_dropdown = gr.Dropdown(choices=["English", "Yoruba"], value="English", label="Select Language")
43
 
44
- # Run Button
45
  btn = gr.Button("Generate Audio")
46
-
47
- # Output Audio
48
  audio_out = gr.Audio(label="Output Audio")
49
 
50
- # Connect the button to the function
51
- btn.click(fn=text_to_speech, inputs=[txt_input, lang_dropdown], outputs=[audio_out])
52
 
53
  demo.launch()
 
3
  import os
4
 
5
  # Define the API URLs for both languages
 
6
  MODELS = {
7
  "English": "https://api-inference.huggingface.co/models/facebook/mms-tts-eng",
8
  "Yoruba": "https://api-inference.huggingface.co/models/facebook/mms-tts-yor"
 
13
  headers = {"Authorization": f"Bearer {hf_token}"}
14
 
15
  def text_to_speech(text, language):
16
+ if not text.strip():
17
+ return None # Do nothing if text is empty
18
+
19
  api_url = MODELS[language]
20
 
 
21
  try:
22
  response = requests.post(api_url, headers=headers, json={"inputs": text})
23
  response.raise_for_status() # Check for errors
24
  except Exception as e:
25
+ # This creates a red error popup in the UI instead of crashing
26
+ raise gr.Error(f"API Error: {str(e)}")
27
 
28
  # Save audio to file
29
  output_file = "output.wav"
30
  with open(output_file, "wb") as f:
31
  f.write(response.content)
32
 
33
+ # RETURN ONLY THE FILE PATH (Single value)
34
+ return output_file
35
 
36
  # Create the Interface
37
  with gr.Blocks() as demo:
38
  gr.Markdown("# πŸ‡³πŸ‡¬ English & Yoruba Text-to-Speech")
39
 
40
  with gr.Row():
 
41
  txt_input = gr.Textbox(label="Enter Text", placeholder="Type something here...")
 
42
  lang_dropdown = gr.Dropdown(choices=["English", "Yoruba"], value="English", label="Select Language")
43
 
 
44
  btn = gr.Button("Generate Audio")
 
 
45
  audio_out = gr.Audio(label="Output Audio")
46
 
47
+ # Ensure inputs and outputs match the function signature
48
+ btn.click(fn=text_to_speech, inputs=[txt_input, lang_dropdown], outputs=audio_out)
49
 
50
  demo.launch()