Spaces:
Sleeping
Sleeping
File size: 4,466 Bytes
99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b 2cab077 99d875b | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 | import streamlit as st
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from peft import PeftModel
st.set_page_config(
page_title="AI Agent Chatbot",
page_icon="π€",
layout="wide"
)
BASE_MODEL = "Qwen/Qwen2.5-1.5B-Instruct"
LORA_REPO = "Redfire-1234/AI-agent-v2" # Change to AI-agent if using old model
# Initialize session state
if "messages" not in st.session_state:
st.session_state.messages = []
@st.cache_resource
def load_model():
tokenizer = AutoTokenizer.from_pretrained(LORA_REPO)
base_model = AutoModelForCausalLM.from_pretrained(
BASE_MODEL,
torch_dtype=torch.float16,
device_map="auto",
low_cpu_mem_usage=True
)
model = PeftModel.from_pretrained(base_model, LORA_REPO)
model.eval()
return tokenizer, model
def generate_response(tokenizer, model, user_input):
messages = [{"role": "user", "content": user_input}]
try:
prompt = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
except:
prompt = f"User: {user_input}\nAssistant:"
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
with torch.no_grad():
outputs = model.generate(
**inputs,
max_new_tokens=200,
temperature=0.7,
do_sample=True,
top_p=0.9,
repetition_penalty=1.1,
pad_token_id=tokenizer.eos_token_id
)
new_tokens = outputs[0][inputs['input_ids'].shape[1]:]
reply = tokenizer.decode(new_tokens, skip_special_tokens=True)
return reply.strip()
def clear_chat():
st.session_state.messages = []
# Header
col1, col2 = st.columns([6, 1])
with col1:
st.title("π€ AI Agent Chatbot")
st.caption("Powered by Qwen 2.5 + LoRA Fine-tuning")
with col2:
if st.button("ποΈ New Chat", use_container_width=True, type="secondary"):
clear_chat()
st.rerun()
# Load model
with st.spinner("Loading model... (first time takes 2-3 minutes)"):
tokenizer, model = load_model()
# Display chat history
chat_container = st.container()
with chat_container:
if len(st.session_state.messages) == 0:
st.info("π **Welcome to AI Agent Chatbot!**\n\nπ¬ Ask me anything and keep the conversation going.\n\nβ οΈ **To end the conversation, simply type:** `q`")
for message in st.session_state.messages:
if message["role"] == "user":
with st.chat_message("user", avatar="π€"):
st.write(message["content"])
else:
with st.chat_message("assistant", avatar="π€"):
st.write(message["content"])
# Chat input
user_input = st.chat_input("Type your message here... (Type 'q' to end conversation)")
if user_input:
# Check if user wants to quit
if user_input.strip().lower() == 'q':
st.session_state.messages.append({
"role": "assistant",
"content": "π Goodbye! Click 'New Chat' to start a fresh conversation."
})
st.rerun()
# Add user message
st.session_state.messages.append({
"role": "user",
"content": user_input
})
# Generate response
with st.spinner("Thinking..."):
reply = generate_response(tokenizer, model, user_input)
# Add bot response
st.session_state.messages.append({
"role": "assistant",
"content": reply
})
st.rerun()
# Sidebar
with st.sidebar:
st.header("βΉοΈ About")
st.write(f"**Base Model:** Qwen 2.5 1.5B")
st.write(f"**LoRA Adapter:** AI-agent-v2")
st.divider()
st.header("π Chat Stats")
st.metric("Messages", len(st.session_state.messages))
st.metric("User Messages", len([m for m in st.session_state.messages if m["role"] == "user"]))
st.metric("Bot Messages", len([m for m in st.session_state.messages if m["role"] == "assistant"]))
st.divider()
st.header("π‘ Tips")
st.info("""
- Type your question and press Enter
- Type 'q' to end the conversation
- Click 'New Chat' to start fresh
- All messages are saved in this session
""")
st.divider()
if st.button("ποΈ Clear History", use_container_width=True, type="primary"):
clear_chat()
st.rerun()
st.divider()
st.caption("Made with β€οΈ using Streamlit")
|