Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,32 +1,45 @@
|
|
| 1 |
import gradio as gr
|
| 2 |
-
from transformers import pipeline
|
| 3 |
from gtts import gTTS
|
| 4 |
import tempfile
|
|
|
|
| 5 |
|
| 6 |
-
#
|
| 7 |
sentiment_model = pipeline("sentiment-analysis")
|
| 8 |
-
summarizer_model = pipeline("summarization")
|
| 9 |
|
| 10 |
-
#
|
| 11 |
-
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
score = round(result['score'], 2)
|
| 15 |
-
return f"Sentiment: {label}, Confidence: {score}"
|
| 16 |
|
| 17 |
-
# Summarization function
|
| 18 |
def summarize_text(text):
|
| 19 |
-
|
| 20 |
-
|
|
|
|
| 21 |
|
| 22 |
-
# Text
|
| 23 |
def text_to_speech(text):
|
| 24 |
tts = gTTS(text)
|
| 25 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as fp:
|
| 26 |
tts.save(fp.name)
|
| 27 |
return fp.name
|
| 28 |
|
| 29 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
with gr.Blocks() as demo:
|
| 31 |
gr.Markdown("## π Homework - Tuwaiq Academy")
|
| 32 |
|
|
@@ -35,7 +48,8 @@ with gr.Blocks() as demo:
|
|
| 35 |
input_sent = gr.Textbox(label="Enter your text", lines=6, placeholder="Type something...")
|
| 36 |
output_sent = gr.Textbox(label="Sentiment Result")
|
| 37 |
btn_sent = gr.Button("Analyze")
|
| 38 |
-
btn_sent.click(
|
|
|
|
| 39 |
|
| 40 |
with gr.Tab("π Summarization"):
|
| 41 |
gr.Markdown("### Summarize your text")
|
|
@@ -51,4 +65,11 @@ with gr.Blocks() as demo:
|
|
| 51 |
btn_tts = gr.Button("Convert")
|
| 52 |
btn_tts.click(text_to_speech, inputs=input_tts, outputs=output_audio)
|
| 53 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 54 |
demo.launch()
|
|
|
|
| 1 |
import gradio as gr
|
| 2 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM
|
| 3 |
from gtts import gTTS
|
| 4 |
import tempfile
|
| 5 |
+
import torch
|
| 6 |
|
| 7 |
+
# ======= Sentiment Analysis =======
|
| 8 |
sentiment_model = pipeline("sentiment-analysis")
|
|
|
|
| 9 |
|
| 10 |
+
# ======= Summarization with tokenizer/model =======
|
| 11 |
+
sum_model_name = "sshleifer/distilbart-xsum-12-6"
|
| 12 |
+
sum_tokenizer = AutoTokenizer.from_pretrained(sum_model_name)
|
| 13 |
+
sum_model = AutoModelForSeq2SeqLM.from_pretrained(sum_model_name)
|
|
|
|
|
|
|
| 14 |
|
|
|
|
| 15 |
def summarize_text(text):
|
| 16 |
+
inputs = sum_tokenizer(text, return_tensors="pt", max_length=1024, truncation=True)
|
| 17 |
+
summary_ids = sum_model.generate(inputs["input_ids"], max_length=60, min_length=15, do_sample=False)
|
| 18 |
+
return sum_tokenizer.decode(summary_ids[0], skip_special_tokens=True)
|
| 19 |
|
| 20 |
+
# ======= Text to Speech =======
|
| 21 |
def text_to_speech(text):
|
| 22 |
tts = gTTS(text)
|
| 23 |
with tempfile.NamedTemporaryFile(delete=False, suffix=".mp3") as fp:
|
| 24 |
tts.save(fp.name)
|
| 25 |
return fp.name
|
| 26 |
|
| 27 |
+
# ======= Chatbot Tab =======
|
| 28 |
+
chat_model_name = "microsoft/DialoGPT-medium"
|
| 29 |
+
chat_tokenizer = AutoTokenizer.from_pretrained(chat_model_name)
|
| 30 |
+
chat_model = AutoModelForCausalLM.from_pretrained(chat_model_name)
|
| 31 |
+
chat_history = []
|
| 32 |
+
|
| 33 |
+
def chat_with_bot(user_input):
|
| 34 |
+
global chat_history
|
| 35 |
+
new_input_ids = chat_tokenizer.encode(user_input + chat_tokenizer.eos_token, return_tensors='pt')
|
| 36 |
+
bot_input_ids = torch.cat(chat_history + [new_input_ids], dim=-1) if chat_history else new_input_ids
|
| 37 |
+
chat_history.append(new_input_ids)
|
| 38 |
+
response_ids = chat_model.generate(bot_input_ids, max_length=1000, pad_token_id=chat_tokenizer.eos_token_id)
|
| 39 |
+
response = chat_tokenizer.decode(response_ids[:, bot_input_ids.shape[-1]:][0], skip_special_tokens=True)
|
| 40 |
+
return response
|
| 41 |
+
|
| 42 |
+
# ======= Gradio UI =======
|
| 43 |
with gr.Blocks() as demo:
|
| 44 |
gr.Markdown("## π Homework - Tuwaiq Academy")
|
| 45 |
|
|
|
|
| 48 |
input_sent = gr.Textbox(label="Enter your text", lines=6, placeholder="Type something...")
|
| 49 |
output_sent = gr.Textbox(label="Sentiment Result")
|
| 50 |
btn_sent = gr.Button("Analyze")
|
| 51 |
+
btn_sent.click(lambda text: sentiment_model(text)[0]['label'] + ", Confidence: " + str(round(sentiment_model(text)[0]['score'], 2)),
|
| 52 |
+
inputs=input_sent, outputs=output_sent)
|
| 53 |
|
| 54 |
with gr.Tab("π Summarization"):
|
| 55 |
gr.Markdown("### Summarize your text")
|
|
|
|
| 65 |
btn_tts = gr.Button("Convert")
|
| 66 |
btn_tts.click(text_to_speech, inputs=input_tts, outputs=output_audio)
|
| 67 |
|
| 68 |
+
with gr.Tab("π€ Chatbot"):
|
| 69 |
+
gr.Markdown("### Chat with an AI Bot")
|
| 70 |
+
chat_input = gr.Textbox(label="You:", placeholder="Ask me anything...", lines=2)
|
| 71 |
+
chat_output = gr.Textbox(label="Bot:", lines=4)
|
| 72 |
+
btn_chat = gr.Button("Send")
|
| 73 |
+
btn_chat.click(chat_with_bot, inputs=chat_input, outputs=chat_output)
|
| 74 |
+
|
| 75 |
demo.launch()
|