saikiranmansa's picture
Update app.py
1e22a05 verified
import streamlit as st
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from textblob import TextBlob
import os
from huggingface_hub import login
# Hugging Face Authentication
hf_token = os.getenv("HUGGINGFACE_TOKEN", "").strip() # Remove any newline characters
if not hf_token:
st.error("HUGGINGFACE_TOKEN not found. Please set your Hugging Face token.")
st.stop()
login(token=hf_token)
# Load Model & Tokenizer
model_name = "meta-llama/Meta-Llama-3-8B"
@st.cache_resource
def load_model():
tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
# Load the model without BitsAndBytesConfig
model = AutoModelForCausalLM.from_pretrained(
model_name,
torch_dtype=torch.float16, # You can keep this if you're okay with it; otherwise, use torch.float32
device_map="auto"
)
return tokenizer, model
tokenizer, model = load_model()
# Function to generate response & detect sentiment
def chatbot_with_sentiment(user_input):
# Sentiment Analysis
sentiment = TextBlob(user_input).sentiment.polarity
emotion = "πŸ˜ƒ Positive" if sentiment > 0 else "😞 Negative" if sentiment < 0 else "😐 Neutral"
# Generate Response
inputs = tokenizer(user_input, return_tensors="pt") # Removed .to("cuda")
with torch.no_grad():
output = model.generate(**inputs, max_new_tokens=100)
response = tokenizer.decode(output[0], skip_special_tokens=True).strip()
return emotion, response
# Streamlit UI
st.title("πŸ€– AI Chatbot with Sentiment Analysis")
st.write("Powered by LLaMA 3 & TextBlob")
# Chat History
if "messages" not in st.session_state:
st.session_state.messages = []
for msg in st.session_state.messages:
with st.chat_message(msg["role"]):
st.markdown(msg["content"])
# User Input
user_input = st.chat_input("Type your message here...")
if user_input:
# Display user input
st.session_state.messages.append({"role": "user", "content": user_input})
with st.chat_message("user"):
st.markdown(user_input)
# Get AI response
emotion, ai_response = chatbot_with_sentiment(user_input)
# Display AI response with sentiment
st.session_state.messages.append({"role": "assistant", "content": f"{emotion}\n\n{ai_response}"})
with st.chat_message("assistant"):
st.markdown(f"{emotion}\n\n{ai_response}")