Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import torch | |
| from transformers import AutoTokenizer, AutoModelForCausalLM | |
| from peft import PeftModel | |
| st.set_page_config( | |
| page_title="AI Agent Chatbot", | |
| page_icon="π€", | |
| layout="wide" | |
| ) | |
| BASE_MODEL = "Qwen/Qwen2.5-1.5B-Instruct" | |
| LORA_REPO = "Redfire-1234/AI-agent-v2" # Change to AI-agent if using old model | |
| # Initialize session state | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| def load_model(): | |
| tokenizer = AutoTokenizer.from_pretrained(LORA_REPO) | |
| base_model = AutoModelForCausalLM.from_pretrained( | |
| BASE_MODEL, | |
| torch_dtype=torch.float16, | |
| device_map="auto", | |
| low_cpu_mem_usage=True | |
| ) | |
| model = PeftModel.from_pretrained(base_model, LORA_REPO) | |
| model.eval() | |
| return tokenizer, model | |
| def generate_response(tokenizer, model, user_input): | |
| messages = [{"role": "user", "content": user_input}] | |
| try: | |
| prompt = tokenizer.apply_chat_template( | |
| messages, | |
| tokenize=False, | |
| add_generation_prompt=True | |
| ) | |
| except: | |
| prompt = f"User: {user_input}\nAssistant:" | |
| inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=200, | |
| temperature=0.7, | |
| do_sample=True, | |
| top_p=0.9, | |
| repetition_penalty=1.1, | |
| pad_token_id=tokenizer.eos_token_id | |
| ) | |
| new_tokens = outputs[0][inputs['input_ids'].shape[1]:] | |
| reply = tokenizer.decode(new_tokens, skip_special_tokens=True) | |
| return reply.strip() | |
| def clear_chat(): | |
| st.session_state.messages = [] | |
| # Header | |
| col1, col2 = st.columns([6, 1]) | |
| with col1: | |
| st.title("π€ AI Agent Chatbot") | |
| st.caption("Powered by Qwen 2.5 + LoRA Fine-tuning") | |
| with col2: | |
| if st.button("ποΈ New Chat", use_container_width=True, type="secondary"): | |
| clear_chat() | |
| st.rerun() | |
| # Load model | |
| with st.spinner("Loading model... (first time takes 2-3 minutes)"): | |
| tokenizer, model = load_model() | |
| # Display chat history | |
| chat_container = st.container() | |
| with chat_container: | |
| if len(st.session_state.messages) == 0: | |
| st.info("π **Welcome to AI Agent Chatbot!**\n\nπ¬ Ask me anything and keep the conversation going.\n\nβ οΈ **To end the conversation, simply type:** `q`") | |
| for message in st.session_state.messages: | |
| if message["role"] == "user": | |
| with st.chat_message("user", avatar="π€"): | |
| st.write(message["content"]) | |
| else: | |
| with st.chat_message("assistant", avatar="π€"): | |
| st.write(message["content"]) | |
| # Chat input | |
| user_input = st.chat_input("Type your message here... (Type 'q' to end conversation)") | |
| if user_input: | |
| # Check if user wants to quit | |
| if user_input.strip().lower() == 'q': | |
| st.session_state.messages.append({ | |
| "role": "assistant", | |
| "content": "π Goodbye! Click 'New Chat' to start a fresh conversation." | |
| }) | |
| st.rerun() | |
| # Add user message | |
| st.session_state.messages.append({ | |
| "role": "user", | |
| "content": user_input | |
| }) | |
| # Generate response | |
| with st.spinner("Thinking..."): | |
| reply = generate_response(tokenizer, model, user_input) | |
| # Add bot response | |
| st.session_state.messages.append({ | |
| "role": "assistant", | |
| "content": reply | |
| }) | |
| st.rerun() | |
| # Sidebar | |
| with st.sidebar: | |
| st.header("βΉοΈ About") | |
| st.write(f"**Base Model:** Qwen 2.5 1.5B") | |
| st.write(f"**LoRA Adapter:** AI-agent-v2") | |
| st.divider() | |
| st.header("π Chat Stats") | |
| st.metric("Messages", len(st.session_state.messages)) | |
| st.metric("User Messages", len([m for m in st.session_state.messages if m["role"] == "user"])) | |
| st.metric("Bot Messages", len([m for m in st.session_state.messages if m["role"] == "assistant"])) | |
| st.divider() | |
| st.header("π‘ Tips") | |
| st.info(""" | |
| - Type your question and press Enter | |
| - Type 'q' to end the conversation | |
| - Click 'New Chat' to start fresh | |
| - All messages are saved in this session | |
| """) | |
| st.divider() | |
| if st.button("ποΈ Clear History", use_container_width=True, type="primary"): | |
| clear_chat() | |
| st.rerun() | |
| st.divider() | |
| st.caption("Made with β€οΈ using Streamlit") | |