File size: 3,456 Bytes
be98f23
 
 
 
 
 
 
ee5b656
be98f23
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ee5b656
 
 
 
 
 
 
 
 
 
 
be98f23
 
 
 
ee5b656
be98f23
 
 
 
 
 
 
 
ee5b656
be98f23
 
 
 
 
 
 
 
ee5b656
be98f23
 
 
 
 
 
 
 
ee5b656
be98f23
 
ee5b656
 
 
 
 
 
 
 
 
 
 
 
be98f23
ee5b656
be98f23
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import gradio as gr
from transformers import pipeline

# Load pre-trained models
sentiment_model = pipeline("sentiment-analysis")
chatbot_model = pipeline("text-generation", model="microsoft/DialoGPT-medium")
summarization_model = pipeline("summarization")
speech_to_text_model = pipeline("automatic-speech-recognition")

# Sentiment Analysis Function
def analyze_sentiment(text):
    result = sentiment_model(text)[0]
    return result["label"], round(result["score"], 4)

# Chatbot Function
chat_history = []
def chatbot_response(user_input):
    global chat_history
    response = chatbot_model(f"User: {user_input} Chatbot:", max_length=100, num_return_sequences=1)
    chat_history.append(f"User: {user_input}")
    chat_history.append(f"Bot: {response[0]['generated_text']}")
    return "\n".join(chat_history)

# Summarization Function
def summarize_text(text):
    summary = summarization_model(text, max_length=150, min_length=50, do_sample=False)
    return summary[0]["summary_text"]

# 4️⃣ Speech-to-Text (ASR) Function
def transcribe_audio(mic=None, file=None):
    if mic:
        audio = mic
    elif file:
        audio = file
    else:
        return "Please record or upload an audio file."
    
    transcription = speech_to_text_model(audio)["text"]
    return transcription

# Create Gradio Interface with Tabs
with gr.Blocks(theme="dark") as app:
    gr.Markdown("# 🚀 Multi-Tab Language Application")

    with gr.Tabs():
        # Sentiment Analysis Tab
        with gr.Tab("Sentiment Analysis"):
            gr.Markdown("## 📊 Sentiment Analysis")
            text_input = gr.Textbox(label="Enter text:")
            sentiment_output = gr.Textbox(label="Sentiment")
            confidence_output = gr.Textbox(label="Confidence Score")
            analyze_button = gr.Button("Analyze")

            analyze_button.click(analyze_sentiment, inputs=text_input, outputs=[sentiment_output, confidence_output])

        # Chatbot Tab
        with gr.Tab("Chatbot"):
            gr.Markdown("## 🤖 Chatbot")
            chatbot_input = gr.Textbox(label="Chat with AI")
            chatbot_output = gr.Textbox(label="Response", interactive=False)
            chat_button = gr.Button("Send")

            chat_button.click(chatbot_response, inputs=chatbot_input, outputs=chatbot_output)

        # Summarization Tab
        with gr.Tab("Summarization"):
            gr.Markdown("## ✍️ Text Summarization")
            summary_input = gr.Textbox(label="Enter long text:", lines=5)
            summary_output = gr.Textbox(label="Summary", interactive=False)
            summarize_button = gr.Button("Summarize")

            summarize_button.click(summarize_text, inputs=summary_input, outputs=summary_output)

        # Speech-to-Text Tab (Modified)
        with gr.Tab("Speech-to-Text"):
            gr.Markdown("## 🎤 Speech-to-Text (ASR)")

            mic_input = gr.Audio(sources=["microphone"], type="filepath", label="Record Audio")
            mic_input.optional = True  # Mark as optional

            file_input = gr.Audio(sources=["upload"], type="filepath", label="Upload Audio")
            file_input.optional = True  # Mark as optional

            transcript_output = gr.Textbox(label="Transcription")
            transcribe_button = gr.Button("Transcribe")

            transcribe_button.click(transcribe_audio, inputs=[mic_input, file_input], outputs=transcript_output)

# Launch the application
app.launch()