Sayiqa7 commited on
Commit
23e2b8a
·
verified ·
1 Parent(s): 0752c8e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +28 -0
app.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import whisper
4
+ # **Combine Interfaces in Tabs**
5
+ with gr.Blocks() as demo:
6
+ gr.Markdown("## Unified Gradio App for Text-to-Image and Speech Processing")
7
+
8
+ with gr.Tab("Text-to-Image"):
9
+ gr.Markdown("### Generate Images from Text")
10
+ text_to_image_prompt = gr.Textbox(label="Enter your prompt", placeholder="Describe the image you want...")
11
+ text_to_image_output = gr.Image(label="Generated Image", type="pil")
12
+ text_to_image_button = gr.Button("Generate Image")
13
+ text_to_image_button.click(generate_image, inputs=text_to_image_prompt, outputs=text_to_image_output)
14
+
15
+ with gr.Tab("Speech Processing"):
16
+ gr.Markdown("### Speech-to-Text and Text-to-Speech")
17
+ audio_input = gr.Audio(type="filepath", label="Upload voice sample (WAV file)")
18
+ tts_text_input = gr.Textbox(label="Text to speak (optional if transcribing)")
19
+ transcribe_checkbox = gr.Checkbox(label="Transcribe input audio")
20
+ audio_output = gr.Audio(label="Synthesized Voice Output")
21
+ transcription_output = gr.Textbox(label="Transcription/Status")
22
+ process_button = gr.Button("Process Voice")
23
+ process_button.click(process_voice,
24
+ inputs=[audio_input, tts_text_input, transcribe_checkbox],
25
+ outputs=[audio_output, transcription_output])
26
+
27
+ # Launch the app
28
+ demo.launch(share=True)