| | import streamlit as st |
| | import torch |
| | from transformers import AutoModelForCausalLM, AutoTokenizer |
| | from textblob import TextBlob |
| | import os |
| | from huggingface_hub import login |
| |
|
| | |
| | hf_token = os.getenv("HUGGINGFACE_TOKEN", "").strip() |
| |
|
| | if not hf_token: |
| | st.error("HUGGINGFACE_TOKEN not found. Please set your Hugging Face token.") |
| | st.stop() |
| |
|
| | login(token=hf_token) |
| |
|
| | |
| | model_name = "meta-llama/Meta-Llama-3-8B" |
| |
|
| | @st.cache_resource |
| | def load_model(): |
| | tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token) |
| | |
| | |
| | model = AutoModelForCausalLM.from_pretrained( |
| | model_name, |
| | torch_dtype=torch.float16, |
| | device_map="auto" |
| | ) |
| | return tokenizer, model |
| |
|
| | tokenizer, model = load_model() |
| |
|
| | |
| | def chatbot_with_sentiment(user_input): |
| | |
| | sentiment = TextBlob(user_input).sentiment.polarity |
| | emotion = "π Positive" if sentiment > 0 else "π Negative" if sentiment < 0 else "π Neutral" |
| |
|
| | |
| | inputs = tokenizer(user_input, return_tensors="pt") |
| | with torch.no_grad(): |
| | output = model.generate(**inputs, max_new_tokens=100) |
| | |
| | response = tokenizer.decode(output[0], skip_special_tokens=True).strip() |
| | |
| | return emotion, response |
| |
|
| | |
| | st.title("π€ AI Chatbot with Sentiment Analysis") |
| | st.write("Powered by LLaMA 3 & TextBlob") |
| |
|
| | |
| | if "messages" not in st.session_state: |
| | st.session_state.messages = [] |
| |
|
| | for msg in st.session_state.messages: |
| | with st.chat_message(msg["role"]): |
| | st.markdown(msg["content"]) |
| |
|
| | |
| | user_input = st.chat_input("Type your message here...") |
| | if user_input: |
| | |
| | st.session_state.messages.append({"role": "user", "content": user_input}) |
| | with st.chat_message("user"): |
| | st.markdown(user_input) |
| | |
| | |
| | emotion, ai_response = chatbot_with_sentiment(user_input) |
| |
|
| | |
| | st.session_state.messages.append({"role": "assistant", "content": f"{emotion}\n\n{ai_response}"}) |
| | with st.chat_message("assistant"): |
| | st.markdown(f"{emotion}\n\n{ai_response}") |
| |
|