Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| from textblob import TextBlob | |
| import os | |
| from huggingface_hub import login | |
| # Hugging Face Authentication | |
| hf_token = os.getenv("H_API_KEY") | |
| if not hf_token: | |
| st.error("HUGGINGFACE_TOKEN not found. Please set your Hugging Face token.") | |
| st.stop() | |
| login(token=hf_token) | |
| # Load Model & Tokenizer | |
| model_name = "meta-llama/Meta-Llama-3-8B" | |
| def load_model(): | |
| tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=hf_token) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_name, | |
| torch_dtype=torch.float16, | |
| device_map="auto", | |
| use_auth_token=hf_token | |
| ) | |
| return tokenizer, model | |
| tokenizer, model = load_model() | |
| # Chatbot with Sentiment | |
| def chatbot_with_sentiment(user_input): | |
| sentiment = TextBlob(user_input).sentiment.polarity | |
| emotion = "π Positive" if sentiment > 0 else "π Negative" if sentiment < 0 else "π Neutral" | |
| inputs = tokenizer(user_input, return_tensors="pt").to(model.device) | |
| with torch.no_grad(): | |
| output = model.generate(**inputs, max_new_tokens=150) | |
| generated_tokens = output[0][inputs['input_ids'].shape[-1]:] | |
| response = tokenizer.decode(generated_tokens, skip_special_tokens=True).strip() | |
| return emotion, response | |
| # Streamlit UI | |
| st.title("π€ LLaMA 3 Chatbot with Sentiment Analysis") | |
| st.write("Ask anything, and see the sentiment of your input.") | |
| # Clear Chat Button | |
| if st.button("π§Ή Clear Chat"): | |
| st.session_state.messages = [] | |
| # Chat History | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| for msg in st.session_state.messages: | |
| with st.chat_message(msg["role"]): | |
| st.markdown(msg["content"]) | |
| # User Input | |
| user_input = st.chat_input("Type your message here...") | |
| if user_input: | |
| # Add user message | |
| st.session_state.messages.append({"role": "user", "content": user_input}) | |
| with st.chat_message("user"): | |
| st.markdown(user_input) | |
| # Generate response | |
| emotion, ai_response = chatbot_with_sentiment(user_input) | |
| final_response = f"{emotion}\n\n{ai_response}" | |
| # Add assistant message | |
| st.session_state.messages.append({"role": "assistant", "content": final_response}) | |
| with st.chat_message("assistant"): | |
| st.markdown(final_response) | |