EUONIA / app.py
kshitij230's picture
Update app.py
30ace8f verified
import streamlit as st
from transformers import pipeline
# Load the pipeline from Hugging Face
@st.cache_resource
def load_pipeline():
pipe = pipeline("text-generation", model="kshitij230/Eunoia")
return pipe
pipe = load_pipeline()
# Streamlit page config
st.set_page_config(page_title="Eunoia πŸ’œ", layout="centered")
st.title("πŸ«‚ Eunoia - Your Emotional Support Bot")
st.markdown("Speak your heart. I'm here to listen and help πŸ’–")
# Chat history
if "messages" not in st.session_state:
st.session_state.messages = []
# Show history
for msg in st.session_state.messages:
with st.chat_message(msg["role"]):
st.markdown(msg["content"])
# Input box
user_input = st.chat_input("What's on your mind?")
if user_input:
# Display user message
st.chat_message("user").markdown(user_input)
st.session_state.messages.append({"role": "user", "content": user_input})
# Build conversation prompt
prompt = ""
for msg in st.session_state.messages:
role = "User" if msg["role"] == "user" else "Bot"
prompt += f"{role}: {msg['content']}\n"
prompt += "Bot:"
# Generate model response
with st.spinner("Eunoia is listening..."):
response = pipe(
prompt,
max_length=200,
do_sample=True,
temperature=0.7,
top_k=50,
top_p=0.95,
repetition_penalty=1.2,
pad_token_id=pipe.tokenizer.eos_token_id
)[0]["generated_text"]
# Remove the prompt from the response
reply = response[len(prompt):].strip()
# Display bot reply
st.chat_message("assistant").markdown(reply)
st.session_state.messages.append({"role": "assistant", "content": reply})