Spaces:
Sleeping
Sleeping
Samarth4023 commited on
Commit ·
5d0c611
1
Parent(s): 3a1d273
Added requirements.txt and history JSON file
Browse files- chat_history.json +1 -0
- chatbot.py +56 -48
- requirements.txt +12 -0
chat_history.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
["[2025-03-14 19:35:09] You: Hi", "[2025-03-14 19:35:09] Bot: Hey", "[2025-03-14 19:35:39] You: I need help.", "[2025-03-14 19:35:39] Bot: Sure, what do you need help with?", "[2025-03-14 19:37:41] You: How old are You ?", "[2025-03-14 19:37:41] Bot: Age is just a number for me.", "[2025-03-14 19:38:21] You: hi", "[2025-03-14 19:38:21] Bot: Nothing much", "[2025-03-14 19:38:46] You: Budget", "[2025-03-14 19:38:46] Bot: To make a budget, start by tracking your income and expenses. Then, allocate your income towards essential expenses like rent, food, and bills. Next, allocate some of your income towards savings and debt repayment. Finally, allocate the remainder of your income towards discretionary expenses like entertainment and hobbies.", "[2025-03-14 19:47:18] You: I need help.", "[2025-03-14 19:47:18] Bot: I'm here to help. What's the problem?", "[2025-03-14 19:51:35] You: Hi", "[2025-03-14 19:51:35] Bot: Nothing much", "[2025-03-14 19:54:12] You: Recommend some excercises.", "[2025-03-14 19:54:12] Bot: Setting boundaries with digital devices can improve sleep, productivity, and overall well-being."]
|
chatbot.py
CHANGED
|
@@ -2,6 +2,7 @@ import streamlit as st
|
|
| 2 |
import torch
|
| 3 |
from transformers import BertTokenizer, BertForSequenceClassification
|
| 4 |
import json
|
|
|
|
| 5 |
import time
|
| 6 |
import datetime
|
| 7 |
import random
|
|
@@ -21,8 +22,6 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
| 21 |
# Load model & tokenizer
|
| 22 |
tokenizer = BertTokenizer.from_pretrained(MODEL_PATH)
|
| 23 |
model = BertForSequenceClassification.from_pretrained(MODEL_PATH)
|
| 24 |
-
|
| 25 |
-
# Move model to device
|
| 26 |
model.to(device)
|
| 27 |
model.eval()
|
| 28 |
|
|
@@ -30,10 +29,32 @@ model.eval()
|
|
| 30 |
with open("intents.json", "r") as file:
|
| 31 |
intents = json.load(file)
|
| 32 |
|
| 33 |
-
# Create a mapping between index and intent tag
|
| 34 |
intent_mapping = {i: intent['tag'] for i, intent in enumerate(intents)}
|
| 35 |
|
| 36 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 37 |
|
| 38 |
def predict_intent(text):
|
| 39 |
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True).to(device)
|
|
@@ -41,95 +62,82 @@ def predict_intent(text):
|
|
| 41 |
outputs = model(**inputs)
|
| 42 |
|
| 43 |
predicted_index = torch.argmax(outputs.logits, dim=1).item()
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
if DEBUG: # Only print if debugging is enabled
|
| 47 |
-
print(f"Predicted Index: {predicted_index}, Mapped Intent: {predicted_tag}")
|
| 48 |
-
|
| 49 |
-
return predicted_tag
|
| 50 |
|
| 51 |
def get_response(intent_label):
|
| 52 |
-
if DEBUG:
|
| 53 |
-
print(f"Intent Label: {intent_label}")
|
| 54 |
-
|
| 55 |
for intent in intents:
|
| 56 |
-
if DEBUG:
|
| 57 |
-
print(f"Checking: {intent['tag']}")
|
| 58 |
if intent_label == intent['tag']:
|
| 59 |
return random.choice(intent['responses'])
|
| 60 |
-
|
| 61 |
return "I'm not sure how to respond to that."
|
| 62 |
|
| 63 |
-
# Initialize session state for chat history
|
| 64 |
-
if "chat_history" not in st.session_state:
|
| 65 |
-
st.session_state.chat_history = []
|
| 66 |
-
if "chat_input" not in st.session_state:
|
| 67 |
-
st.session_state.chat_input = ""
|
| 68 |
-
|
| 69 |
# Sidebar menu
|
| 70 |
with st.sidebar:
|
|
|
|
| 71 |
st.title("💬 Chatbot Menu")
|
| 72 |
add_vertical_space(1)
|
|
|
|
| 73 |
if st.button("🆕 New Chat"):
|
| 74 |
-
st.session_state.chat_history = []
|
| 75 |
st.session_state.chat_input = ""
|
| 76 |
-
|
|
|
|
| 77 |
if st.button("📜 Chat History"):
|
| 78 |
st.session_state.show_history = True
|
|
|
|
| 79 |
if st.button("ℹ️ About"):
|
| 80 |
st.session_state.show_about = True
|
| 81 |
|
| 82 |
-
#
|
| 83 |
history_modal = Modal("Chat History", key="history_modal")
|
| 84 |
about_modal = Modal("About", key="about_modal")
|
| 85 |
|
| 86 |
-
|
|
|
|
| 87 |
with history_modal.container():
|
| 88 |
st.subheader("🕰 Chat History")
|
| 89 |
-
for chat in st.session_state.chat_history:
|
| 90 |
st.write(chat)
|
| 91 |
if st.button("Close", key="close_history"):
|
| 92 |
-
st.session_state.show_history = False
|
| 93 |
st.rerun()
|
| 94 |
|
| 95 |
-
if
|
| 96 |
with about_modal.container():
|
| 97 |
st.subheader("ℹ️ About")
|
| 98 |
-
st.info("This is an intent-based chatbot powered by BERT, built using Streamlit
|
| 99 |
if st.button("Close", key="close_about"):
|
| 100 |
-
st.session_state.show_about = False
|
| 101 |
st.rerun()
|
| 102 |
|
| 103 |
# Chat UI
|
| 104 |
st.title("🤖 AI Chatbot")
|
| 105 |
st.markdown("### Talk to me, I'm listening...")
|
| 106 |
|
| 107 |
-
rain(
|
| 108 |
-
|
| 109 |
-
|
| 110 |
-
|
| 111 |
-
|
| 112 |
-
)
|
| 113 |
-
|
| 114 |
-
|
| 115 |
-
|
| 116 |
-
if st.session_state.chat_input:
|
| 117 |
-
intent_label = predict_intent(st.session_state.chat_input)
|
| 118 |
response = get_response(intent_label)
|
| 119 |
chat_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 120 |
-
|
|
|
|
|
|
|
| 121 |
st.session_state.chat_history.append(f"[{chat_time}] Bot: {response}")
|
| 122 |
|
| 123 |
-
#
|
|
|
|
|
|
|
|
|
|
| 124 |
with st.chat_message("user"):
|
| 125 |
-
st.markdown(f"**You:** {
|
| 126 |
time.sleep(0.5)
|
| 127 |
with st.chat_message("assistant"):
|
| 128 |
st.markdown(f"**Bot:** {response}")
|
| 129 |
|
| 130 |
-
# Clear input box after sending
|
| 131 |
-
st.session_state.chat_input = ""
|
| 132 |
-
|
| 133 |
# Style customization
|
| 134 |
st.markdown("""
|
| 135 |
<style>
|
|
|
|
| 2 |
import torch
|
| 3 |
from transformers import BertTokenizer, BertForSequenceClassification
|
| 4 |
import json
|
| 5 |
+
import os
|
| 6 |
import time
|
| 7 |
import datetime
|
| 8 |
import random
|
|
|
|
| 22 |
# Load model & tokenizer
|
| 23 |
tokenizer = BertTokenizer.from_pretrained(MODEL_PATH)
|
| 24 |
model = BertForSequenceClassification.from_pretrained(MODEL_PATH)
|
|
|
|
|
|
|
| 25 |
model.to(device)
|
| 26 |
model.eval()
|
| 27 |
|
|
|
|
| 29 |
with open("intents.json", "r") as file:
|
| 30 |
intents = json.load(file)
|
| 31 |
|
|
|
|
| 32 |
intent_mapping = {i: intent['tag'] for i, intent in enumerate(intents)}
|
| 33 |
|
| 34 |
+
# Chat history file
|
| 35 |
+
CHAT_HISTORY_FILE = "chat_history.json"
|
| 36 |
+
|
| 37 |
+
# Load Chat History (Ensuring Persistence)
|
| 38 |
+
def load_chat_history():
|
| 39 |
+
if os.path.exists(CHAT_HISTORY_FILE):
|
| 40 |
+
with open(CHAT_HISTORY_FILE, "r") as file:
|
| 41 |
+
return json.load(file)
|
| 42 |
+
return []
|
| 43 |
+
|
| 44 |
+
# Save Chat History
|
| 45 |
+
def save_chat_history():
|
| 46 |
+
with open(CHAT_HISTORY_FILE, "w") as file:
|
| 47 |
+
json.dump(st.session_state.chat_history, file)
|
| 48 |
+
|
| 49 |
+
# Initialize session state variables
|
| 50 |
+
if "chat_history" not in st.session_state:
|
| 51 |
+
st.session_state.chat_history = load_chat_history()
|
| 52 |
+
if "show_history" not in st.session_state:
|
| 53 |
+
st.session_state.show_history = False
|
| 54 |
+
if "show_about" not in st.session_state:
|
| 55 |
+
st.session_state.show_about = False
|
| 56 |
+
if "chat_input" not in st.session_state:
|
| 57 |
+
st.session_state.chat_input = ""
|
| 58 |
|
| 59 |
def predict_intent(text):
|
| 60 |
inputs = tokenizer(text, return_tensors="pt", padding=True, truncation=True).to(device)
|
|
|
|
| 62 |
outputs = model(**inputs)
|
| 63 |
|
| 64 |
predicted_index = torch.argmax(outputs.logits, dim=1).item()
|
| 65 |
+
return intent_mapping.get(predicted_index, "unknown")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 66 |
|
| 67 |
def get_response(intent_label):
|
|
|
|
|
|
|
|
|
|
| 68 |
for intent in intents:
|
|
|
|
|
|
|
| 69 |
if intent_label == intent['tag']:
|
| 70 |
return random.choice(intent['responses'])
|
|
|
|
| 71 |
return "I'm not sure how to respond to that."
|
| 72 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 73 |
# Sidebar menu
|
| 74 |
with st.sidebar:
|
| 75 |
+
st.image("Images/Bot Image.jpeg", use_container_width=True)
|
| 76 |
st.title("💬 Chatbot Menu")
|
| 77 |
add_vertical_space(1)
|
| 78 |
+
|
| 79 |
if st.button("🆕 New Chat"):
|
|
|
|
| 80 |
st.session_state.chat_input = ""
|
| 81 |
+
save_chat_history()
|
| 82 |
+
|
| 83 |
if st.button("📜 Chat History"):
|
| 84 |
st.session_state.show_history = True
|
| 85 |
+
|
| 86 |
if st.button("ℹ️ About"):
|
| 87 |
st.session_state.show_about = True
|
| 88 |
|
| 89 |
+
# Modal Windows (Chat History & About)
|
| 90 |
history_modal = Modal("Chat History", key="history_modal")
|
| 91 |
about_modal = Modal("About", key="about_modal")
|
| 92 |
|
| 93 |
+
# Display chat history without affecting the chat window
|
| 94 |
+
if st.session_state.show_history:
|
| 95 |
with history_modal.container():
|
| 96 |
st.subheader("🕰 Chat History")
|
| 97 |
+
for chat in st.session_state.chat_history[-10:]: # Show last 10 messages
|
| 98 |
st.write(chat)
|
| 99 |
if st.button("Close", key="close_history"):
|
| 100 |
+
st.session_state.show_history = False # Close modal without resetting UI
|
| 101 |
st.rerun()
|
| 102 |
|
| 103 |
+
if st.session_state.show_about:
|
| 104 |
with about_modal.container():
|
| 105 |
st.subheader("ℹ️ About")
|
| 106 |
+
st.info("This is an intent-based chatbot powered by BERT, built using Streamlit.")
|
| 107 |
if st.button("Close", key="close_about"):
|
| 108 |
+
st.session_state.show_about = False # Close modal without resetting UI
|
| 109 |
st.rerun()
|
| 110 |
|
| 111 |
# Chat UI
|
| 112 |
st.title("🤖 AI Chatbot")
|
| 113 |
st.markdown("### Talk to me, I'm listening...")
|
| 114 |
|
| 115 |
+
rain(emoji="💬", font_size=10, falling_speed=5, animation_length="infinite")
|
| 116 |
+
|
| 117 |
+
# Chat Input with Enter Button
|
| 118 |
+
with st.form("chat_form", clear_on_submit=True):
|
| 119 |
+
chat_input = st.text_input("You:", key="chat_input")
|
| 120 |
+
submit_button = st.form_submit_button("Enter")
|
| 121 |
+
|
| 122 |
+
if submit_button and chat_input:
|
| 123 |
+
intent_label = predict_intent(chat_input)
|
|
|
|
|
|
|
| 124 |
response = get_response(intent_label)
|
| 125 |
chat_time = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
| 126 |
+
|
| 127 |
+
# Append messages to chat history (User & Bot)
|
| 128 |
+
st.session_state.chat_history.append(f"[{chat_time}] You: {chat_input}")
|
| 129 |
st.session_state.chat_history.append(f"[{chat_time}] Bot: {response}")
|
| 130 |
|
| 131 |
+
# Save chat history persistently
|
| 132 |
+
save_chat_history()
|
| 133 |
+
|
| 134 |
+
# Display conversation immediately
|
| 135 |
with st.chat_message("user"):
|
| 136 |
+
st.markdown(f"**You:** {chat_input}")
|
| 137 |
time.sleep(0.5)
|
| 138 |
with st.chat_message("assistant"):
|
| 139 |
st.markdown(f"**Bot:** {response}")
|
| 140 |
|
|
|
|
|
|
|
|
|
|
| 141 |
# Style customization
|
| 142 |
st.markdown("""
|
| 143 |
<style>
|
requirements.txt
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
streamlit
|
| 2 |
+
torch
|
| 3 |
+
transformers
|
| 4 |
+
json
|
| 5 |
+
os
|
| 6 |
+
time
|
| 7 |
+
datetime
|
| 8 |
+
random
|
| 9 |
+
streamlit_extras
|
| 10 |
+
streamlit_extras.add_vertical_space
|
| 11 |
+
streamlit_extras.let_it_rain
|
| 12 |
+
streamlit_modal
|