Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import pipeline | |
| # Load pre-trained models | |
| sentiment_model = pipeline("sentiment-analysis") | |
| chatbot_model = pipeline("text-generation", model="microsoft/DialoGPT-medium") | |
| summarization_model = pipeline("summarization") | |
| speech_to_text_model = pipeline("automatic-speech-recognition") | |
| # Sentiment Analysis Function | |
| def analyze_sentiment(text): | |
| result = sentiment_model(text)[0] | |
| return result["label"], round(result["score"], 4) | |
| # Chatbot Function | |
| chat_history = [] | |
| def chatbot_response(user_input): | |
| global chat_history | |
| response = chatbot_model(f"User: {user_input} Chatbot:", max_length=100, num_return_sequences=1) | |
| chat_history.append(f"User: {user_input}") | |
| chat_history.append(f"Bot: {response[0]['generated_text']}") | |
| return "\n".join(chat_history) | |
| # Summarization Function | |
| def summarize_text(text): | |
| summary = summarization_model(text, max_length=150, min_length=50, do_sample=False) | |
| return summary[0]["summary_text"] | |
| # 4οΈβ£ Speech-to-Text (ASR) Function | |
| def transcribe_audio(mic=None, file=None): | |
| if mic: | |
| audio = mic | |
| elif file: | |
| audio = file | |
| else: | |
| return "Please record or upload an audio file." | |
| transcription = speech_to_text_model(audio)["text"] | |
| return transcription | |
| # Create Gradio Interface with Tabs | |
| with gr.Blocks(theme="dark") as app: | |
| gr.Markdown("# π Multi-Tab Language Application") | |
| with gr.Tabs(): | |
| # Sentiment Analysis Tab | |
| with gr.Tab("Sentiment Analysis"): | |
| gr.Markdown("## π Sentiment Analysis") | |
| text_input = gr.Textbox(label="Enter text:") | |
| sentiment_output = gr.Textbox(label="Sentiment") | |
| confidence_output = gr.Textbox(label="Confidence Score") | |
| analyze_button = gr.Button("Analyze") | |
| analyze_button.click(analyze_sentiment, inputs=text_input, outputs=[sentiment_output, confidence_output]) | |
| # Chatbot Tab | |
| with gr.Tab("Chatbot"): | |
| gr.Markdown("## π€ Chatbot") | |
| chatbot_input = gr.Textbox(label="Chat with AI") | |
| chatbot_output = gr.Textbox(label="Response", interactive=False) | |
| chat_button = gr.Button("Send") | |
| chat_button.click(chatbot_response, inputs=chatbot_input, outputs=chatbot_output) | |
| # Summarization Tab | |
| with gr.Tab("Summarization"): | |
| gr.Markdown("## βοΈ Text Summarization") | |
| summary_input = gr.Textbox(label="Enter long text:", lines=5) | |
| summary_output = gr.Textbox(label="Summary", interactive=False) | |
| summarize_button = gr.Button("Summarize") | |
| summarize_button.click(summarize_text, inputs=summary_input, outputs=summary_output) | |
| # Speech-to-Text Tab (Modified) | |
| with gr.Tab("Speech-to-Text"): | |
| gr.Markdown("## π€ Speech-to-Text (ASR)") | |
| mic_input = gr.Audio(sources=["microphone"], type="filepath", label="Record Audio") | |
| mic_input.optional = True # Mark as optional | |
| file_input = gr.Audio(sources=["upload"], type="filepath", label="Upload Audio") | |
| file_input.optional = True # Mark as optional | |
| transcript_output = gr.Textbox(label="Transcription") | |
| transcribe_button = gr.Button("Transcribe") | |
| transcribe_button.click(transcribe_audio, inputs=[mic_input, file_input], outputs=transcript_output) | |
| # Launch the application | |
| app.launch() |