File size: 2,407 Bytes
b99b99e
 
1e22a05
b99b99e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1e22a05
 
b99b99e
 
1e22a05
b99b99e
 
 
 
 
 
 
 
 
 
 
 
 
8364f69
b99b99e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
import streamlit as st
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from textblob import TextBlob
import os
from huggingface_hub import login

# Hugging Face Authentication
hf_token = os.getenv("HUGGINGFACE_TOKEN", "").strip()  # Remove any newline characters

if not hf_token:
    st.error("HUGGINGFACE_TOKEN not found. Please set your Hugging Face token.")
    st.stop()

login(token=hf_token)

# Load Model & Tokenizer
model_name = "meta-llama/Meta-Llama-3-8B"

@st.cache_resource
def load_model():
    tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
    
    # Load the model without BitsAndBytesConfig
    model = AutoModelForCausalLM.from_pretrained(
        model_name,
        torch_dtype=torch.float16,  # You can keep this if you're okay with it; otherwise, use torch.float32
        device_map="auto"
    )
    return tokenizer, model

tokenizer, model = load_model()

# Function to generate response & detect sentiment
def chatbot_with_sentiment(user_input):
    # Sentiment Analysis
    sentiment = TextBlob(user_input).sentiment.polarity
    emotion = "πŸ˜ƒ Positive" if sentiment > 0 else "😞 Negative" if sentiment < 0 else "😐 Neutral"

    # Generate Response
    inputs = tokenizer(user_input, return_tensors="pt")  # Removed .to("cuda")
    with torch.no_grad():
        output = model.generate(**inputs, max_new_tokens=100)
    
    response = tokenizer.decode(output[0], skip_special_tokens=True).strip()
    
    return emotion, response

# Streamlit UI
st.title("πŸ€– AI Chatbot with Sentiment Analysis")
st.write("Powered by LLaMA 3 & TextBlob")

# Chat History
if "messages" not in st.session_state:
    st.session_state.messages = []

for msg in st.session_state.messages:
    with st.chat_message(msg["role"]):
        st.markdown(msg["content"])

# User Input
user_input = st.chat_input("Type your message here...")
if user_input:
    # Display user input
    st.session_state.messages.append({"role": "user", "content": user_input})
    with st.chat_message("user"):
        st.markdown(user_input)
    
    # Get AI response
    emotion, ai_response = chatbot_with_sentiment(user_input)

    # Display AI response with sentiment
    st.session_state.messages.append({"role": "assistant", "content": f"{emotion}\n\n{ai_response}"})
    with st.chat_message("assistant"):
        st.markdown(f"{emotion}\n\n{ai_response}")