DronA23's picture
Create app.py
13f0358 verified
import os
import gradio as gr
import torch
from transformers import pipeline
# ─── 1) SET UP PIPELINES ──────────────────────────────────────────────────────
# Sentiment analysis (local)
sentiment_pipe = pipeline("sentiment-analysis")
# Text generation chat (local GPT-2)
device = 0 if torch.cuda.is_available() else -1
chat_pipe = pipeline(
"text-generation",
model="distilgpt2",
tokenizer="distilgpt2",
device=device,
max_new_tokens=100,
do_sample=True,
temperature=0.7
)
def respond(message, chat_history):
"""
- If the user starts with "Sentiment:", run sentiment analysis on the rest.
- Otherwise fall back to GPT-2 chat continuation.
"""
if message.lower().startswith("sentiment:"):
text = message[len("sentiment:"):].strip()
result = sentiment_pipe(text)[0]
label = result["label"]
score = result["score"]
reply = f"πŸ” Sentiment: **{label}** (score: {score:.3f})"
else:
# GPT-2 continuation
out = chat_pipe(message)
# the pipeline returns [{'generated_text': "..."}]
reply = out[0]["generated_text"].strip()
chat_history.append(("You", message))
chat_history.append(("Bot", reply))
return chat_history
# ─── 2) BUILD THE UI ───────────────────────────────────────────────────────────
with gr.Blocks() as demo:
gr.Markdown("## 😊 Sentiment-&-Chat Bot\n"
"_Type `Sentiment: <your text>` to analyze sentiment, or just chat!_")
chat = gr.Chatbot()
msg = gr.Textbox(placeholder="Type here…", show_label=False)
msg.submit(respond, [msg, chat], [chat])
# queue() creates the `/api/predict` endpoint Spaces needs
demo = demo.queue()
if __name__ == "__main__":
port = int(os.environ.get("PORT", 7860))
demo.launch(server_name="0.0.0.0", server_port=port)