Vlad Bastina commited on
Commit
7fc2127
·
1 Parent(s): 4fd92c2

prompt changes

Browse files
Files changed (2) hide show
  1. app.py +110 -55
  2. query_chat.py +16 -14
app.py CHANGED
@@ -2,101 +2,156 @@ import streamlit as st
2
  import os
3
  import re
4
  from query_chat import GeminiQanA
5
- from docx import Document
6
 
7
  def extract_text_from_txt(file_path):
8
- with open(file_path, "r", encoding="utf-8") as file:
9
- return " ".join([line.strip() for line in file.readlines() if line.strip()])
 
 
 
 
 
 
10
 
11
  @st.cache_resource()
12
  def load_chatbot():
 
13
  with st.spinner("Loading project information..."):
 
14
  doc1_text = extract_text_from_txt("Files/brochure_1.txt")
15
  doc2_text = extract_text_from_txt("Files/brochure_2.txt")
16
-
17
- return GeminiQanA(doc1_text, doc2_text)
 
18
 
19
  # Streamlit App Configuration
20
  st.set_page_config(page_title="Zega AI Sales Agent", page_icon="🤖", layout="centered")
21
 
22
  # Sidebar with branding
23
- st.sidebar.image("zega_logo.PNG", width=300)
 
 
 
 
 
24
  st.sidebar.markdown("### Welcome to Zega AI Sales Agent!")
25
  st.sidebar.markdown("Ask anything about our team's capabilities and projects.")
26
 
27
- # Load API key safely
28
  if "GOOGLE_API_KEY" in st.secrets:
29
  os.environ["GOOGLE_API_KEY"] = st.secrets["GOOGLE_API_KEY"]
30
  else:
31
- st.error("API key missing! Please set up your Google API key in Streamlit secrets.")
32
-
33
- # Initialize chatbot
34
- chatbot = load_chatbot()
35
-
36
- # Initialize chat session
 
 
 
 
 
 
 
37
  if "messages" not in st.session_state:
38
  st.session_state.messages = []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
 
40
- # Chat UI
41
  st.title("📄 Zega AI Sales Agent")
42
 
43
  # Function to display messages and replace image tags with actual images
44
  def display_message(role, content):
45
- with st.chat_message(role):
46
- # Find all image tags in the response
47
- image_tags = re.findall(r"\[(.*?\.png)]", content)
48
-
49
  # Split response by image tags and process separately
50
- parts = re.split(r"\[(.*?\.png)]", content)
51
-
52
  for part in parts:
53
- if part in image_tags:
54
- # If it's an image tag, check if the image exists and display it
55
- image_path = f"ZegaPos/{part}"
 
 
 
 
 
 
 
 
 
 
 
56
  if os.path.exists(image_path):
57
  st.image(image_path, use_container_width=True)
58
  else:
59
- st.markdown(f"⚠️ Image `{part}` not found.")
60
  else:
61
  # Otherwise, display text
62
  st.markdown(part)
63
 
64
- # Display chat history
 
 
65
  for message in st.session_state.messages:
66
  display_message(message["role"], message["content"])
67
 
68
- # User Input
69
- question = st.text_area("Ask a question about Zega AI:", height=100)
 
 
 
 
 
70
 
71
- # Chat Actions
72
  col1, col2 = st.columns([3, 1])
73
  with col1:
74
- ask_button = st.button("💬 Ask AI")
 
75
  with col2:
76
- clear_button = st.button("🗑️ Clear Chat")
 
77
 
78
- # Clear chat history
79
- if clear_button:
80
- st.session_state.messages = []
81
- chatbot.clear_conv_history()
82
- st.rerun()
83
-
84
- # Handle user input
85
- if ask_button and question:
86
- # Append user question
87
- st.session_state.messages.append({"role": "user", "content": question})
88
-
89
- # Display user message
90
- display_message("user", question)
91
-
92
- # Generate AI response
93
- with st.spinner("💡 Thinking..."):
94
- answer = chatbot.answer_question(question)
95
-
96
- # Append AI response
97
- st.session_state.messages.append({"role": "assistant", "content": answer})
98
-
99
- # Display AI response with image handling
100
- display_message("assistant", answer)
101
-
102
- st.rerun()
 
2
  import os
3
  import re
4
  from query_chat import GeminiQanA
 
5
 
6
  def extract_text_from_txt(file_path):
7
+ # In a real scenario, handle FileNotFoundError
8
+ try:
9
+ with open(file_path, "r", encoding="utf-8") as file:
10
+ return " ".join([line.strip() for line in file.readlines() if line.strip()])
11
+ except FileNotFoundError:
12
+ st.warning(f"File not found: {file_path}. Using placeholder text.")
13
+ return f"Placeholder text for {os.path.basename(file_path)}"
14
+
15
 
16
  @st.cache_resource()
17
  def load_chatbot():
18
+ print("Attempting to load chatbot...") # Add print statement
19
  with st.spinner("Loading project information..."):
20
+ # Use dummy paths if Files/ directory doesn't exist or is empty
21
  doc1_text = extract_text_from_txt("Files/brochure_1.txt")
22
  doc2_text = extract_text_from_txt("Files/brochure_2.txt")
23
+ chatbot_instance = GeminiQanA(doc1_text, doc2_text)
24
+ print("Chatbot loaded.") # Add print statement
25
+ return chatbot_instance
26
 
27
  # Streamlit App Configuration
28
  st.set_page_config(page_title="Zega AI Sales Agent", page_icon="🤖", layout="centered")
29
 
30
  # Sidebar with branding
31
+ # Use a placeholder if the logo doesn't exist
32
+ logo_path = "zega_logo.PNG"
33
+ if os.path.exists(logo_path):
34
+ st.sidebar.image(logo_path, width=300)
35
+ else:
36
+ st.sidebar.warning("Logo zega_logo.PNG not found.")
37
  st.sidebar.markdown("### Welcome to Zega AI Sales Agent!")
38
  st.sidebar.markdown("Ask anything about our team's capabilities and projects.")
39
 
40
+ # Load API key safely (using dummy key for this example if not set)
41
  if "GOOGLE_API_KEY" in st.secrets:
42
  os.environ["GOOGLE_API_KEY"] = st.secrets["GOOGLE_API_KEY"]
43
  else:
44
+ # For local testing without secrets, you might uncomment the next line
45
+ # os.environ["GOOGLE_API_KEY"] = "YOUR_DUMMY_OR_REAL_API_KEY"
46
+ st.sidebar.warning("Google API key not found in Streamlit secrets. Using demo mode.")
47
+
48
+ # --- Initialize Chatbot ---
49
+ # Ensure chatbot is loaded before defining callbacks that use it
50
+ try:
51
+ chatbot = load_chatbot()
52
+ except Exception as e:
53
+ st.error(f"Failed to load chatbot: {e}")
54
+ st.stop() # Stop execution if chatbot fails to load
55
+
56
+ # --- Initialize Session State ---
57
  if "messages" not in st.session_state:
58
  st.session_state.messages = []
59
+ # Initialize the input field state (important!)
60
+ if "question_input" not in st.session_state:
61
+ st.session_state.question_input = ""
62
+
63
+
64
+ # --- Callback Functions ---
65
+ def handle_ask_ai():
66
+ """Callback function for the 'Ask AI' button."""
67
+ question = st.session_state.question_input # Get question from state
68
+ if question: # Proceed only if there is a question
69
+ # Append user question
70
+ st.session_state.messages.append({"role": "user", "content": question})
71
+
72
+ # Generate AI response
73
+ # Use a spinner *outside* the callback if needed, or understand
74
+ # the UI might briefly freeze if generation is long.
75
+ # For simplicity here, we generate directly.
76
+ try:
77
+ answer = chatbot.answer_question(question)
78
+ # Append AI response
79
+ st.session_state.messages.append({"role": "assistant", "content": answer})
80
+ except Exception as e:
81
+ st.error(f"Error getting answer from chatbot: {e}")
82
+ # Optionally remove the user message if the AI failed
83
+ # st.session_state.messages.pop()
84
+
85
+ # ***** Clear the input state variable AFTER processing *****
86
+ st.session_state.question_input = ""
87
+ # No explicit st.rerun() needed here, on_click handles the rerun
88
+
89
+
90
+ def handle_clear_chat():
91
+ """Callback function for the 'Clear Chat' button."""
92
+ st.session_state.messages = []
93
+ chatbot.clear_conv_history()
94
+ st.session_state.question_input = "" # Clear the input field state as well
95
+ # No explicit st.rerun() needed here, on_click handles the rerun
96
 
97
+ # --- Chat UI ---
98
  st.title("📄 Zega AI Sales Agent")
99
 
100
  # Function to display messages and replace image tags with actual images
101
  def display_message(role, content):
102
+ with st.chat_message(role):
103
+ # Find all image tags in the response (Corrected regex)
104
+ image_tags = re.findall(r"\[(.*?\.png)\]", content) # Find content inside brackets ending with .png
105
+
106
  # Split response by image tags and process separately
107
+ parts = re.split(r"(\[.*?\.png\])", content) # Keep the delimiters
108
+
109
  for part in parts:
110
+ if not part: # Skip empty strings from split
111
+ continue
112
+ match = re.match(r"\[(.*?\.png)\]", part) # Check if the part IS an image tag
113
+ if match:
114
+ image_filename = match.group(1)
115
+ # Assuming images are in a subfolder relative to the script
116
+ # Make sure the ZegaPos folder exists or handle the error
117
+ image_folder = "ZegaPos"
118
+ if not os.path.isdir(image_folder):
119
+ st.warning(f"Image folder '{image_folder}' not found.")
120
+ st.markdown(f"_{image_filename}_") # Display filename as text fallback
121
+ continue # Skip trying to display the image
122
+
123
+ image_path = os.path.join(image_folder, image_filename)
124
  if os.path.exists(image_path):
125
  st.image(image_path, use_container_width=True)
126
  else:
127
+ st.markdown(f"⚠️ Image `{image_filename}` not found at `{image_path}`.")
128
  else:
129
  # Otherwise, display text
130
  st.markdown(part)
131
 
132
+
133
+ # --- Display Chat History ---
134
+ # This loop runs on every rerun, displaying the current state of messages
135
  for message in st.session_state.messages:
136
  display_message(message["role"], message["content"])
137
 
138
+ # --- User Input ---
139
+ # This widget's value is controlled by st.session_state.question_input
140
+ st.text_area(
141
+ "Ask a question about Zega AI:",
142
+ height=100,
143
+ key="question_input" # Links to st.session_state.question_input
144
+ )
145
 
146
+ # --- Chat Actions ---
147
  col1, col2 = st.columns([3, 1])
148
  with col1:
149
+ # Attach the callback function to the button's on_click parameter
150
+ st.button("💬 Ask AI", on_click=handle_ask_ai)
151
  with col2:
152
+ # Attach the callback function to the button's on_click parameter
153
+ st.button("🗑️ Clear Chat", on_click=handle_clear_chat)
154
 
155
+ # --- No processing logic needed here anymore ---
156
+ # The callbacks handle the logic when the buttons are clicked.
157
+ # Streamlit automatically reruns after a callback, updating the UI.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
query_chat.py CHANGED
@@ -3,7 +3,8 @@ import os
3
 
4
  class GeminiQanA:
5
  def __init__(self, text1: str = '', text2: str = ''):
6
- """Initializes the Gemini question-answering model with brochures and conversation history."""
 
7
  self.api_key = os.getenv("GOOGLE_API_KEY")
8
  genai.configure(api_key=self.api_key)
9
  self.text1 = text1
@@ -14,36 +15,37 @@ class GeminiQanA:
14
  def _load_model(self):
15
  """Loads the generative AI model without the conversation history (history will be passed dynamically)."""
16
  system_instruction = f'''# Role:
17
- You are a sales agent responsible for assisting customers by answering questions about our team’s capabilities and the projects we offer. You have access to two brochures that detail the available projects and their features. Your goal is to provide accurate and honest responses based solely on the information within these brochures.
18
 
19
  ---
20
 
21
  ## Guidelines for Responses:
22
 
23
  ### 1. Accuracy & Honesty
24
- - Only provide responses based on the brochures.
25
  - Do not overstate or exaggerate the capabilities of the team.
26
- - If information is not available in the brochures, do not speculate—politely inform the customer that the requested details are not available.
 
27
 
28
  ### 2. Answering Questions About the Team’s Capabilities & Projects
29
- - When a customer asks about what our team can do, provide information only from the brochures.
30
- - If asked about past projects, refer only to those explicitly mentioned in the brochures.
31
- - If the customer asks for additional details not found in the brochures, politely inform them that you can only share the information available.
32
 
33
  ### 3. Providing Solutions to Customer Problems
34
- - If a customer presents a problem, check if a project in the brochures provides a direct solution.
35
  - If a matching project exists, explain how it can address their problem.
36
  - If an alternative but related project exists, suggest it as a partial solution, explaining its limitations.
37
  - If no project can help, politely state that no suitable solution is available.
38
 
39
  ### 4. Handling Image Tags in Responses
40
- - Some solutions in the brochures may be followed by an image tag in the format `[solution_name.png]`.
41
  - If an image tag appears in the provided brochure text, **include it exactly as it is when presenting the solution**.
42
- - Do not generate new image tags—only use them when they appear in the brochures.
43
 
44
  ### 5. What Not to Do
45
  - Do not create new information or assume additional capabilities.
46
- - Do not make guarantees beyond what is stated in the brochures.
47
  - Do not offer speculative solutions that are not explicitly supported by the documents.
48
 
49
  ---
@@ -56,12 +58,12 @@ You are a sales agent responsible for assisting customers by answering questions
56
 
57
  ---
58
 
59
- ## Brochure Content:
60
 
61
- ### First Brochure:
62
  {self.text1}
63
 
64
- ### Second Brochure:
65
  {self.text2}
66
  '''
67
  return genai.GenerativeModel("gemini-2.0-flash", system_instruction=system_instruction)
 
3
 
4
  class GeminiQanA:
5
  def __init__(self, text1: str = '', text2: str = ''):
6
+ """Initializes the Gemini question-answering model with texts
7
+ and conversation history."""
8
  self.api_key = os.getenv("GOOGLE_API_KEY")
9
  genai.configure(api_key=self.api_key)
10
  self.text1 = text1
 
15
  def _load_model(self):
16
  """Loads the generative AI model without the conversation history (history will be passed dynamically)."""
17
  system_instruction = f'''# Role:
18
+ You are a sales agent responsible for assisting customers by answering questions about our team’s capabilities and the projects we offer. You have access to two texts that detail the available projects and their features. Your goal is to provide accurate and honest responses based solely on the information within these texts.
19
 
20
  ---
21
 
22
  ## Guidelines for Responses:
23
 
24
  ### 1. Accuracy & Honesty
25
+ - Only provide responses based on the texts.
26
  - Do not overstate or exaggerate the capabilities of the team.
27
+ - If information is not available in the texts, do not speculate—politely inform the customer that the requested details are not available.
28
+ - Avoid saying that the answers are taken from a text and pretend you know them by heart
29
 
30
  ### 2. Answering Questions About the Team’s Capabilities & Projects
31
+ - When a customer asks about what our team can do, provide information only from the texts.
32
+ - If asked about past projects, refer only to those explicitly mentioned in the texts.
33
+ - If the customer asks for additional details not found in the texts, politely inform them that you can only share the information available.
34
 
35
  ### 3. Providing Solutions to Customer Problems
36
+ - If a customer presents a problem, check if a project in the texts provides a direct solution.
37
  - If a matching project exists, explain how it can address their problem.
38
  - If an alternative but related project exists, suggest it as a partial solution, explaining its limitations.
39
  - If no project can help, politely state that no suitable solution is available.
40
 
41
  ### 4. Handling Image Tags in Responses
42
+ - Some solutions in the texts may be followed by an image tag in the format `[solution_name.png]`.
43
  - If an image tag appears in the provided brochure text, **include it exactly as it is when presenting the solution**.
44
+ - Do not generate new image tags—only use them when they appear in the texts.
45
 
46
  ### 5. What Not to Do
47
  - Do not create new information or assume additional capabilities.
48
+ - Do not make guarantees beyond what is stated in the texts.
49
  - Do not offer speculative solutions that are not explicitly supported by the documents.
50
 
51
  ---
 
58
 
59
  ---
60
 
61
+ ## Text Content:
62
 
63
+ ### First text:
64
  {self.text1}
65
 
66
+ ### Second text:
67
  {self.text2}
68
  '''
69
  return genai.GenerativeModel("gemini-2.0-flash", system_instruction=system_instruction)