Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -5,7 +5,7 @@ from transformers import pipeline
|
|
| 5 |
sentiment_model = pipeline("sentiment-analysis")
|
| 6 |
chatbot_model = pipeline("text-generation", model="microsoft/DialoGPT-medium")
|
| 7 |
summarization_model = pipeline("summarization")
|
| 8 |
-
|
| 9 |
|
| 10 |
# Sentiment Analysis Function
|
| 11 |
def analyze_sentiment(text):
|
|
@@ -26,15 +26,22 @@ def summarize_text(text):
|
|
| 26 |
summary = summarization_model(text, max_length=150, min_length=50, do_sample=False)
|
| 27 |
return summary[0]["summary_text"]
|
| 28 |
|
| 29 |
-
#
|
| 30 |
-
def
|
| 31 |
-
|
| 32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
|
| 34 |
# Create Gradio Interface with Tabs
|
| 35 |
with gr.Blocks(theme="dark") as app:
|
| 36 |
gr.Markdown("# 🚀 Multi-Tab Language Application")
|
| 37 |
-
|
| 38 |
with gr.Tabs():
|
| 39 |
# Sentiment Analysis Tab
|
| 40 |
with gr.Tab("Sentiment Analysis"):
|
|
@@ -43,7 +50,7 @@ with gr.Blocks(theme="dark") as app:
|
|
| 43 |
sentiment_output = gr.Textbox(label="Sentiment")
|
| 44 |
confidence_output = gr.Textbox(label="Confidence Score")
|
| 45 |
analyze_button = gr.Button("Analyze")
|
| 46 |
-
|
| 47 |
analyze_button.click(analyze_sentiment, inputs=text_input, outputs=[sentiment_output, confidence_output])
|
| 48 |
|
| 49 |
# Chatbot Tab
|
|
@@ -52,7 +59,7 @@ with gr.Blocks(theme="dark") as app:
|
|
| 52 |
chatbot_input = gr.Textbox(label="Chat with AI")
|
| 53 |
chatbot_output = gr.Textbox(label="Response", interactive=False)
|
| 54 |
chat_button = gr.Button("Send")
|
| 55 |
-
|
| 56 |
chat_button.click(chatbot_response, inputs=chatbot_input, outputs=chatbot_output)
|
| 57 |
|
| 58 |
# Summarization Tab
|
|
@@ -61,17 +68,23 @@ with gr.Blocks(theme="dark") as app:
|
|
| 61 |
summary_input = gr.Textbox(label="Enter long text:", lines=5)
|
| 62 |
summary_output = gr.Textbox(label="Summary", interactive=False)
|
| 63 |
summarize_button = gr.Button("Summarize")
|
| 64 |
-
|
| 65 |
summarize_button.click(summarize_text, inputs=summary_input, outputs=summary_output)
|
| 66 |
|
| 67 |
-
#
|
| 68 |
-
with gr.Tab("
|
| 69 |
-
gr.Markdown("##
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
|
| 74 |
-
|
| 75 |
|
| 76 |
# Launch the application
|
| 77 |
app.launch()
|
|
|
|
| 5 |
sentiment_model = pipeline("sentiment-analysis")
|
| 6 |
chatbot_model = pipeline("text-generation", model="microsoft/DialoGPT-medium")
|
| 7 |
summarization_model = pipeline("summarization")
|
| 8 |
+
speech_to_text_model = pipeline("automatic-speech-recognition")
|
| 9 |
|
| 10 |
# Sentiment Analysis Function
|
| 11 |
def analyze_sentiment(text):
|
|
|
|
| 26 |
summary = summarization_model(text, max_length=150, min_length=50, do_sample=False)
|
| 27 |
return summary[0]["summary_text"]
|
| 28 |
|
| 29 |
+
# 4️⃣ Speech-to-Text (ASR) Function
|
| 30 |
+
def transcribe_audio(mic=None, file=None):
|
| 31 |
+
if mic:
|
| 32 |
+
audio = mic
|
| 33 |
+
elif file:
|
| 34 |
+
audio = file
|
| 35 |
+
else:
|
| 36 |
+
return "Please record or upload an audio file."
|
| 37 |
+
|
| 38 |
+
transcription = speech_to_text_model(audio)["text"]
|
| 39 |
+
return transcription
|
| 40 |
|
| 41 |
# Create Gradio Interface with Tabs
|
| 42 |
with gr.Blocks(theme="dark") as app:
|
| 43 |
gr.Markdown("# 🚀 Multi-Tab Language Application")
|
| 44 |
+
|
| 45 |
with gr.Tabs():
|
| 46 |
# Sentiment Analysis Tab
|
| 47 |
with gr.Tab("Sentiment Analysis"):
|
|
|
|
| 50 |
sentiment_output = gr.Textbox(label="Sentiment")
|
| 51 |
confidence_output = gr.Textbox(label="Confidence Score")
|
| 52 |
analyze_button = gr.Button("Analyze")
|
| 53 |
+
|
| 54 |
analyze_button.click(analyze_sentiment, inputs=text_input, outputs=[sentiment_output, confidence_output])
|
| 55 |
|
| 56 |
# Chatbot Tab
|
|
|
|
| 59 |
chatbot_input = gr.Textbox(label="Chat with AI")
|
| 60 |
chatbot_output = gr.Textbox(label="Response", interactive=False)
|
| 61 |
chat_button = gr.Button("Send")
|
| 62 |
+
|
| 63 |
chat_button.click(chatbot_response, inputs=chatbot_input, outputs=chatbot_output)
|
| 64 |
|
| 65 |
# Summarization Tab
|
|
|
|
| 68 |
summary_input = gr.Textbox(label="Enter long text:", lines=5)
|
| 69 |
summary_output = gr.Textbox(label="Summary", interactive=False)
|
| 70 |
summarize_button = gr.Button("Summarize")
|
| 71 |
+
|
| 72 |
summarize_button.click(summarize_text, inputs=summary_input, outputs=summary_output)
|
| 73 |
|
| 74 |
+
# Speech-to-Text Tab (Modified)
|
| 75 |
+
with gr.Tab("Speech-to-Text"):
|
| 76 |
+
gr.Markdown("## 🎤 Speech-to-Text (ASR)")
|
| 77 |
+
|
| 78 |
+
mic_input = gr.Audio(sources=["microphone"], type="filepath", label="Record Audio")
|
| 79 |
+
mic_input.optional = True # Mark as optional
|
| 80 |
+
|
| 81 |
+
file_input = gr.Audio(sources=["upload"], type="filepath", label="Upload Audio")
|
| 82 |
+
file_input.optional = True # Mark as optional
|
| 83 |
+
|
| 84 |
+
transcript_output = gr.Textbox(label="Transcription")
|
| 85 |
+
transcribe_button = gr.Button("Transcribe")
|
| 86 |
|
| 87 |
+
transcribe_button.click(transcribe_audio, inputs=[mic_input, file_input], outputs=transcript_output)
|
| 88 |
|
| 89 |
# Launch the application
|
| 90 |
app.launch()
|