Spaces:
Sleeping
Sleeping
Update src/streamlit_app.py
Browse files- src/streamlit_app.py +61 -26
src/streamlit_app.py
CHANGED
|
@@ -1,17 +1,18 @@
|
|
| 1 |
-
|
| 2 |
import torch
|
| 3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 4 |
from peft import PeftModel
|
| 5 |
|
| 6 |
st.set_page_config(
|
| 7 |
-
page_title="AI Agent Chatbot
|
| 8 |
page_icon="π€",
|
| 9 |
layout="wide"
|
| 10 |
)
|
| 11 |
|
| 12 |
BASE_MODEL = "Qwen/Qwen2.5-1.5B-Instruct"
|
| 13 |
-
LORA_REPO = "Redfire-1234/AI-agent-v2" #
|
| 14 |
|
|
|
|
| 15 |
if "messages" not in st.session_state:
|
| 16 |
st.session_state.messages = []
|
| 17 |
|
|
@@ -38,7 +39,7 @@ def generate_response(tokenizer, model, user_input):
|
|
| 38 |
add_generation_prompt=True
|
| 39 |
)
|
| 40 |
except:
|
| 41 |
-
prompt = f"User: {user_input}\
|
| 42 |
|
| 43 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 44 |
|
|
@@ -57,61 +58,95 @@ def generate_response(tokenizer, model, user_input):
|
|
| 57 |
reply = tokenizer.decode(new_tokens, skip_special_tokens=True)
|
| 58 |
return reply.strip()
|
| 59 |
|
| 60 |
-
|
|
|
|
|
|
|
|
|
|
| 61 |
col1, col2 = st.columns([6, 1])
|
| 62 |
with col1:
|
| 63 |
-
st.title("π€ AI Agent Chatbot
|
| 64 |
-
st.caption("
|
| 65 |
with col2:
|
| 66 |
-
if st.button("ποΈ New Chat", use_container_width=True):
|
| 67 |
-
|
| 68 |
st.rerun()
|
| 69 |
|
| 70 |
-
|
|
|
|
| 71 |
tokenizer, model = load_model()
|
| 72 |
|
|
|
|
| 73 |
chat_container = st.container()
|
| 74 |
with chat_container:
|
| 75 |
if len(st.session_state.messages) == 0:
|
| 76 |
-
st.info("π **Welcome!** Ask me anything.\
|
| 77 |
|
| 78 |
for message in st.session_state.messages:
|
| 79 |
-
|
| 80 |
-
st.
|
|
|
|
|
|
|
|
|
|
|
|
|
| 81 |
|
| 82 |
-
|
|
|
|
| 83 |
|
| 84 |
if user_input:
|
|
|
|
| 85 |
if user_input.strip().lower() == 'q':
|
| 86 |
st.session_state.messages.append({
|
| 87 |
"role": "assistant",
|
| 88 |
-
"content": "π Goodbye! Click 'New Chat' to start fresh."
|
| 89 |
})
|
| 90 |
st.rerun()
|
| 91 |
|
| 92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 93 |
|
|
|
|
| 94 |
with st.spinner("Thinking..."):
|
| 95 |
reply = generate_response(tokenizer, model, user_input)
|
| 96 |
|
| 97 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 98 |
st.rerun()
|
| 99 |
|
|
|
|
| 100 |
with st.sidebar:
|
| 101 |
st.header("βΉοΈ About")
|
| 102 |
-
st.write("**
|
| 103 |
-
st.write("**
|
| 104 |
-
st.write("**Fine-tuned:** Yes (LoRA)")
|
| 105 |
|
| 106 |
st.divider()
|
| 107 |
|
| 108 |
-
st.header("π Stats")
|
| 109 |
st.metric("Messages", len(st.session_state.messages))
|
|
|
|
|
|
|
| 110 |
|
| 111 |
st.divider()
|
| 112 |
|
| 113 |
-
st.
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
|
| 117 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
import torch
|
| 3 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 4 |
from peft import PeftModel
|
| 5 |
|
| 6 |
st.set_page_config(
|
| 7 |
+
page_title="AI Agent Chatbot",
|
| 8 |
page_icon="π€",
|
| 9 |
layout="wide"
|
| 10 |
)
|
| 11 |
|
| 12 |
BASE_MODEL = "Qwen/Qwen2.5-1.5B-Instruct"
|
| 13 |
+
LORA_REPO = "Redfire-1234/AI-agent-v2" # Change to AI-agent if using old model
|
| 14 |
|
| 15 |
+
# Initialize session state
|
| 16 |
if "messages" not in st.session_state:
|
| 17 |
st.session_state.messages = []
|
| 18 |
|
|
|
|
| 39 |
add_generation_prompt=True
|
| 40 |
)
|
| 41 |
except:
|
| 42 |
+
prompt = f"User: {user_input}\nAssistant:"
|
| 43 |
|
| 44 |
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
|
| 45 |
|
|
|
|
| 58 |
reply = tokenizer.decode(new_tokens, skip_special_tokens=True)
|
| 59 |
return reply.strip()
|
| 60 |
|
| 61 |
+
def clear_chat():
|
| 62 |
+
st.session_state.messages = []
|
| 63 |
+
|
| 64 |
+
# Header
|
| 65 |
col1, col2 = st.columns([6, 1])
|
| 66 |
with col1:
|
| 67 |
+
st.title("π€ AI Agent Chatbot")
|
| 68 |
+
st.caption("Powered by Qwen 2.5 + LoRA Fine-tuning")
|
| 69 |
with col2:
|
| 70 |
+
if st.button("ποΈ New Chat", use_container_width=True, type="secondary"):
|
| 71 |
+
clear_chat()
|
| 72 |
st.rerun()
|
| 73 |
|
| 74 |
+
# Load model
|
| 75 |
+
with st.spinner("Loading model... (first time takes 2-3 minutes)"):
|
| 76 |
tokenizer, model = load_model()
|
| 77 |
|
| 78 |
+
# Display chat history
|
| 79 |
chat_container = st.container()
|
| 80 |
with chat_container:
|
| 81 |
if len(st.session_state.messages) == 0:
|
| 82 |
+
st.info("π **Welcome to AI Agent Chatbot!**\n\nπ¬ Ask me anything and keep the conversation going.\n\nβ οΈ **To end the conversation, simply type:** `q`")
|
| 83 |
|
| 84 |
for message in st.session_state.messages:
|
| 85 |
+
if message["role"] == "user":
|
| 86 |
+
with st.chat_message("user", avatar="π€"):
|
| 87 |
+
st.write(message["content"])
|
| 88 |
+
else:
|
| 89 |
+
with st.chat_message("assistant", avatar="π€"):
|
| 90 |
+
st.write(message["content"])
|
| 91 |
|
| 92 |
+
# Chat input
|
| 93 |
+
user_input = st.chat_input("Type your message here... (Type 'q' to end conversation)")
|
| 94 |
|
| 95 |
if user_input:
|
| 96 |
+
# Check if user wants to quit
|
| 97 |
if user_input.strip().lower() == 'q':
|
| 98 |
st.session_state.messages.append({
|
| 99 |
"role": "assistant",
|
| 100 |
+
"content": "π Goodbye! Click 'New Chat' to start a fresh conversation."
|
| 101 |
})
|
| 102 |
st.rerun()
|
| 103 |
|
| 104 |
+
# Add user message
|
| 105 |
+
st.session_state.messages.append({
|
| 106 |
+
"role": "user",
|
| 107 |
+
"content": user_input
|
| 108 |
+
})
|
| 109 |
|
| 110 |
+
# Generate response
|
| 111 |
with st.spinner("Thinking..."):
|
| 112 |
reply = generate_response(tokenizer, model, user_input)
|
| 113 |
|
| 114 |
+
# Add bot response
|
| 115 |
+
st.session_state.messages.append({
|
| 116 |
+
"role": "assistant",
|
| 117 |
+
"content": reply
|
| 118 |
+
})
|
| 119 |
+
|
| 120 |
st.rerun()
|
| 121 |
|
| 122 |
+
# Sidebar
|
| 123 |
with st.sidebar:
|
| 124 |
st.header("βΉοΈ About")
|
| 125 |
+
st.write(f"**Base Model:** Qwen 2.5 1.5B")
|
| 126 |
+
st.write(f"**LoRA Adapter:** AI-agent-v2")
|
|
|
|
| 127 |
|
| 128 |
st.divider()
|
| 129 |
|
| 130 |
+
st.header("π Chat Stats")
|
| 131 |
st.metric("Messages", len(st.session_state.messages))
|
| 132 |
+
st.metric("User Messages", len([m for m in st.session_state.messages if m["role"] == "user"]))
|
| 133 |
+
st.metric("Bot Messages", len([m for m in st.session_state.messages if m["role"] == "assistant"]))
|
| 134 |
|
| 135 |
st.divider()
|
| 136 |
|
| 137 |
+
st.header("π‘ Tips")
|
| 138 |
+
st.info("""
|
| 139 |
+
- Type your question and press Enter
|
| 140 |
+
- Type 'q' to end the conversation
|
| 141 |
+
- Click 'New Chat' to start fresh
|
| 142 |
+
- All messages are saved in this session
|
| 143 |
+
""")
|
| 144 |
+
|
| 145 |
+
st.divider()
|
| 146 |
+
|
| 147 |
+
if st.button("ποΈ Clear History", use_container_width=True, type="primary"):
|
| 148 |
+
clear_chat()
|
| 149 |
+
st.rerun()
|
| 150 |
+
|
| 151 |
+
st.divider()
|
| 152 |
+
st.caption("Made with β€οΈ using Streamlit")
|