SalesChatBot / app.py
VladB46's picture
Update app.py
8f9b0f3 verified
import streamlit as st
import os
import re
from query_chat import GeminiQanA
from pathlib import Path
def extract_text_from_txt(file_path):
# In a real scenario, handle FileNotFoundError
try:
with open(file_path, "r", encoding="utf-8") as file:
return " ".join([line.strip() for line in file.readlines() if line.strip()])
except FileNotFoundError:
st.warning(f"File not found: {file_path}. Using placeholder text.")
return f"Placeholder text for {os.path.basename(file_path)}"
def load_css(file_name):
"""Loads a CSS file and injects it into the Streamlit app."""
try:
css_path = Path(__file__).parent / file_name
with open(css_path) as f:
st.markdown(f'<style>{f.read()}</style>', unsafe_allow_html=True)
# st.info(f"Loaded CSS: {file_name}") # Optional: uncomment for debugging
except FileNotFoundError:
st.error(f"CSS file not found: {file_name}. Make sure it's in the same directory as app.py.")
except Exception as e:
st.error(f"Error loading CSS file {file_name}: {e}")
@st.cache_resource()
def load_chatbot():
print("Attempting to load chatbot...") # Add print statement
with st.spinner("Loading project information..."):
# Use dummy paths if Files/ directory doesn't exist or is empty
doc1_text = extract_text_from_txt("Files/brochure_1.txt")
doc2_text = extract_text_from_txt("Files/brochure_2.txt")
chatbot_instance = GeminiQanA(doc1_text, doc2_text)
print("Chatbot loaded.") # Add print statement
return chatbot_instance
# Streamlit App Configuration
st.set_page_config(page_title="Zega AI Sales Agent", page_icon="🤖", layout="centered")
st.markdown("""
<link rel="preconnect" href="https://fonts.googleapis.com">
<link rel="preconnect" href="https://fonts.gstatic.com" crossorigin>
<link href="https://fonts.googleapis.com/css2?family=Inter+Tight:ital,wght@0,100..900;1,100..900&family=Space+Grotesk:wght@300..700&display=swap" rel="stylesheet">
""", unsafe_allow_html=True)
load_css("style.css")
st.sidebar.markdown("### Welcome to Zega AI Sales Agent!")
st.sidebar.markdown("Ask anything about our team's capabilities and projects.")
# Load API key safely (using dummy key for this example if not set)
if "GOOGLE_API_KEY" in st.secrets:
os.environ["GOOGLE_API_KEY"] = st.secrets["GOOGLE_API_KEY"]
else:
# For local testing without secrets, you might uncomment the next line
# os.environ["GOOGLE_API_KEY"] = "YOUR_DUMMY_OR_REAL_API_KEY"
st.sidebar.warning("Google API key not found in Streamlit secrets. Using demo mode.")
# --- Initialize Chatbot ---
# Ensure chatbot is loaded before defining callbacks that use it
try:
chatbot = load_chatbot()
except Exception as e:
st.error(f"Failed to load chatbot: {e}")
st.stop() # Stop execution if chatbot fails to load
# --- Initialize Session State ---
if "messages" not in st.session_state:
st.session_state.messages = []
# Initialize the input field state (important!)
if "question_input" not in st.session_state:
st.session_state.question_input = ""
# --- Initialize session state if not already present ---
if "asked_ai" not in st.session_state:
st.session_state.asked_ai = False
def handle_ask_ai():
"""Callback function for the 'Ask AI' button."""
question = st.session_state.question_input # Get question from state
if question:
st.session_state.messages.append({"role": "user", "content": question})
try:
answer = chatbot.answer_question(question)
st.session_state.messages.append({"role": "assistant", "content": answer})
except Exception as e:
st.error(f"Error getting answer from chatbot: {e}")
st.session_state.question_input = "" # Clear input after processing
def handle_clear_chat():
"""Callback function for the 'Clear Chat' button."""
st.session_state.messages = []
chatbot.clear_conv_history()
st.session_state.question_input = "" # Clear the input field state as well
# No explicit st.rerun() needed here, on_click handles the rerun
# --- Chat UI ---
st.title("📄 Zega AI Sales Agent")
# Function to display messages and replace image tags with actual images
def display_message(role, content):
with st.chat_message(role):
# Find all image tags in the response (Corrected regex)
image_tags = re.findall(r"\[(.*?\.png)\]", content) # Find content inside brackets ending with .png
# Split response by image tags and process separately
parts = re.split(r"(\[.*?\.png\])", content) # Keep the delimiters
for part in parts:
if not part: # Skip empty strings from split
continue
match = re.match(r"\[(.*?\.png)\]", part) # Check if the part IS an image tag
if match:
image_filename = match.group(1)
# Assuming images are in a subfolder relative to the script
# Make sure the ZegaPos folder exists or handle the error
image_folder = "ZegaPos"
if not os.path.isdir(image_folder):
st.warning(f"Image folder '{image_folder}' not found.")
st.markdown(f"_{image_filename}_") # Display filename as text fallback
continue # Skip trying to display the image
image_path = os.path.join(image_folder, image_filename)
if os.path.exists(image_path):
st.image(image_path, use_container_width=True)
else:
st.markdown(f"⚠️ Image `{image_filename}` not found at `{image_path}`.")
else:
# Otherwise, display text
st.markdown(part)
# --- Display Chat History ---
# This loop runs on every rerun, displaying the current state of messages
for message in st.session_state.messages:
display_message(message["role"], message["content"])
# --- Chat Input using st.chat_input (Handles Enter automatically) ---
prompt = st.chat_input("Ask a question about Zega AI:")
if prompt:
# 1. Add user message to chat history immediately
st.session_state.messages.append({"role": "user", "content": prompt})
# 2. Display the user message (using your existing function)
display_message("user", prompt)
# 3. Get and display AI response
try:
with st.spinner("Thinking..."): # Optional: Add spinner for feedback
answer = chatbot.answer_question(prompt)
st.session_state.messages.append({"role": "assistant", "content": answer})
# Display the assistant message immediately after getting it
display_message("assistant", answer)
except Exception as e:
st.error(f"Error getting answer from chatbot: {e}")
# Optionally add an error message to the chat history
# st.session_state.messages.append({"role": "assistant", "content": f"Sorry, an error occurred: {e}"})
# display_message("assistant", f"Sorry, an error occurred: {e}")
# No need to manually clear st.session_state.question_input
# st.chat_input handles its state internally upon submission.
# Streamlit automatically reruns after processing the input block.
# --- Clear Chat Button (Keep this separate) ---
# Place it where you want it, maybe in the sidebar or below the chat input
st.markdown("""
<style>
.stButton > button {
color: white !important;
background-color: #ff4b4b; /* optional: red background for the trash button */
}
</style>
""", unsafe_allow_html=True)
if st.sidebar.button("🗑️ Clear Chat"):
st.session_state.messages = []
if 'chatbot' in globals() and hasattr(chatbot, 'clear_conv_history'):
chatbot.clear_conv_history()
# st.session_state.question_input is no longer used, so no need to clear it
st.rerun() # Rerun to reflect the cleared chat visually
import streamlit.components.v1 as components
components.html(
"""
<script>
function sendHeightWhenReady() {
const el = window.parent.document.getElementsByClassName('stMain')[0];
if (el) {
const height = el.scrollHeight;
console.log("Sending height to parent:", height);
window.parent.parent.postMessage({ type: 'setHeight', height: height }, '*');
} else {
// Retry in 100ms until the element appears
setTimeout(sendHeightWhenReady, 1000);
}
}
window.onload = sendHeightWhenReady;
window.addEventListener('resize', sendHeightWhenReady);
setInterval(sendHeightWhenReady, 1000);
</script>
"""
)