AlfredHarun commited on
Commit
f286be2
·
verified ·
1 Parent(s): 60c9c4a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -13
app.py CHANGED
@@ -2,7 +2,7 @@ import os
2
  import json
3
  import random
4
  import logging
5
- from typing import List, Dict, Any
6
  import streamlit as st
7
  from dotenv import load_dotenv
8
  import autogen
@@ -49,7 +49,6 @@ def retrieve_docs(query: str) -> str:
49
  hits = retriever.get_relevant_documents(query)
50
  if not hits:
51
  return "No relevant documentation found."
52
-
53
  results = []
54
  for doc in hits:
55
  question = doc.metadata.get('question', 'FAQ')
@@ -62,7 +61,7 @@ def escalate_ticket(query: str, analysis: str = "") -> str:
62
  logger.info(f"Escalating issue with ticket {ticket_id}: {description}")
63
  return f"Escalated issue. Created ticket {ticket_id}. A support technician will contact you shortly."
64
 
65
- # LLM Configuration (for open-source model served locally or from Hugging Face)
66
  llm_config = {
67
  "config_list": [{
68
  "model": "llama3", # or mistral, gemma, etc.
@@ -166,8 +165,7 @@ Always include the ticket ID and expected follow-up timeframe.
166
  function_map={"escalate_ticket": escalate_ticket}
167
  )
168
 
169
-
170
- def handle_it_query(query: str) -> str:
171
  query = query.strip()
172
  if not query:
173
  return "Please enter an IT question or issue."
@@ -181,6 +179,7 @@ def handle_it_query(query: str) -> str:
181
  master_prompt = f"User query: '{query}'. First, determine if this is an IT-related issue."
182
  master_proxy.initiate_chat(master_agent, message=master_prompt, max_turns=1)
183
  initial_assessment = master_proxy.chat_messages[master_agent][-1]["content"]
 
184
  workflow_logs["initial_assessment"] = initial_assessment
185
 
186
  if "NOT IT-RELATED" in initial_assessment.upper():
@@ -191,6 +190,7 @@ def handle_it_query(query: str) -> str:
191
  )
192
  plan_proxy.initiate_chat(planning_agent, message=query, max_turns=1)
193
  planning_output = plan_proxy.chat_messages[planning_agent][-1]["content"]
 
194
  workflow_logs["planning"] = planning_output
195
 
196
  analysis_proxy = UserProxyAgent(
@@ -198,6 +198,7 @@ def handle_it_query(query: str) -> str:
198
  )
199
  analysis_proxy.initiate_chat(analysis_agent, message=planning_output, max_turns=1)
200
  analysis_output = analysis_proxy.chat_messages[analysis_agent][-1]["content"]
 
201
  workflow_logs["analysis"] = analysis_output
202
 
203
  res_proxy = UserProxyAgent(
@@ -206,6 +207,7 @@ def handle_it_query(query: str) -> str:
206
  resolution_input = f"User Query: {query}\n\nPlanning: {planning_output}\n\nAnalysis: {analysis_output}"
207
  res_proxy.initiate_chat(resolution_agent, message=resolution_input, max_turns=1)
208
  resolution_output = res_proxy.chat_messages[resolution_agent][-1]["content"]
 
209
  workflow_logs["resolution"] = resolution_output
210
 
211
  escalation_output = None
@@ -216,6 +218,7 @@ def handle_it_query(query: str) -> str:
216
  escalation_input = f"Original Query: {query}\n\nAnalysis: {analysis_output}\n\nResolution Attempt: {resolution_output}"
217
  esc_proxy.initiate_chat(escalation_agent, message=escalation_input, max_turns=1)
218
  escalation_output = esc_proxy.chat_messages[escalation_agent][-1]["content"]
 
219
  workflow_logs["escalation"] = escalation_output
220
 
221
  final_master_proxy = UserProxyAgent(
@@ -242,13 +245,21 @@ def handle_it_query(query: str) -> str:
242
 
243
  final_master_proxy.initiate_chat(master_agent, message=final_prompt, max_turns=1)
244
  final_response = final_master_proxy.chat_messages[master_agent][-1]["content"]
 
245
  workflow_logs["final_response"] = final_response
246
 
247
- st.session_state.chat_history.append({
248
- "user": query,
249
- "assistant": final_response,
250
- "workflow_logs": workflow_logs
251
- })
 
 
 
 
 
 
 
252
 
253
  return final_response
254
 
@@ -257,6 +268,7 @@ def handle_it_query(query: str) -> str:
257
  return f"An error occurred during processing: {str(e)}\n\nPlease try rephrasing your question."
258
 
259
 
 
260
  st.title("AI Help Desk")
261
  st.write("Ask any IT support question and our multi-agent system will assist you.")
262
 
@@ -270,6 +282,16 @@ with st.form(key="query_form", clear_on_submit=True):
270
  st.error("Please type a message before submitting.")
271
  else:
272
  with st.spinner("Processing your request through our agent workflow..."):
273
- response = handle_it_query(user_input)
274
-
275
- st.markdown("### Response")
 
 
 
 
 
 
 
 
 
 
 
2
  import json
3
  import random
4
  import logging
5
+ from typing import List
6
  import streamlit as st
7
  from dotenv import load_dotenv
8
  import autogen
 
49
  hits = retriever.get_relevant_documents(query)
50
  if not hits:
51
  return "No relevant documentation found."
 
52
  results = []
53
  for doc in hits:
54
  question = doc.metadata.get('question', 'FAQ')
 
61
  logger.info(f"Escalating issue with ticket {ticket_id}: {description}")
62
  return f"Escalated issue. Created ticket {ticket_id}. A support technician will contact you shortly."
63
 
64
+ # LLM Configuration (local/open-source LLM API)
65
  llm_config = {
66
  "config_list": [{
67
  "model": "llama3", # or mistral, gemma, etc.
 
165
  function_map={"escalate_ticket": escalate_ticket}
166
  )
167
 
168
+ def handle_it_query(query: str, show_logs: bool = False) -> str:
 
169
  query = query.strip()
170
  if not query:
171
  return "Please enter an IT question or issue."
 
179
  master_prompt = f"User query: '{query}'. First, determine if this is an IT-related issue."
180
  master_proxy.initiate_chat(master_agent, message=master_prompt, max_turns=1)
181
  initial_assessment = master_proxy.chat_messages[master_agent][-1]["content"]
182
+ logger.info(f"Master Agent Response: {initial_assessment}")
183
  workflow_logs["initial_assessment"] = initial_assessment
184
 
185
  if "NOT IT-RELATED" in initial_assessment.upper():
 
190
  )
191
  plan_proxy.initiate_chat(planning_agent, message=query, max_turns=1)
192
  planning_output = plan_proxy.chat_messages[planning_agent][-1]["content"]
193
+ logger.info(f"Planning Agent Response: {planning_output}")
194
  workflow_logs["planning"] = planning_output
195
 
196
  analysis_proxy = UserProxyAgent(
 
198
  )
199
  analysis_proxy.initiate_chat(analysis_agent, message=planning_output, max_turns=1)
200
  analysis_output = analysis_proxy.chat_messages[analysis_agent][-1]["content"]
201
+ logger.info(f"Analysis Agent Response: {analysis_output}")
202
  workflow_logs["analysis"] = analysis_output
203
 
204
  res_proxy = UserProxyAgent(
 
207
  resolution_input = f"User Query: {query}\n\nPlanning: {planning_output}\n\nAnalysis: {analysis_output}"
208
  res_proxy.initiate_chat(resolution_agent, message=resolution_input, max_turns=1)
209
  resolution_output = res_proxy.chat_messages[resolution_agent][-1]["content"]
210
+ logger.info(f"Resolution Agent Response: {resolution_output}")
211
  workflow_logs["resolution"] = resolution_output
212
 
213
  escalation_output = None
 
218
  escalation_input = f"Original Query: {query}\n\nAnalysis: {analysis_output}\n\nResolution Attempt: {resolution_output}"
219
  esc_proxy.initiate_chat(escalation_agent, message=escalation_input, max_turns=1)
220
  escalation_output = esc_proxy.chat_messages[escalation_agent][-1]["content"]
221
+ logger.info(f"Escalation Agent Response: {escalation_output}")
222
  workflow_logs["escalation"] = escalation_output
223
 
224
  final_master_proxy = UserProxyAgent(
 
245
 
246
  final_master_proxy.initiate_chat(master_agent, message=final_prompt, max_turns=1)
247
  final_response = final_master_proxy.chat_messages[master_agent][-1]["content"]
248
+ logger.info(f"Final Master Agent Response: {final_response}")
249
  workflow_logs["final_response"] = final_response
250
 
251
+ # Store chat history with or without logs based on user preference
252
+ if show_logs:
253
+ st.session_state.chat_history.append({
254
+ "user": query,
255
+ "assistant": final_response,
256
+ "workflow_logs": workflow_logs
257
+ })
258
+ else:
259
+ st.session_state.chat_history.append({
260
+ "user": query,
261
+ "assistant": final_response,
262
+ })
263
 
264
  return final_response
265
 
 
268
  return f"An error occurred during processing: {str(e)}\n\nPlease try rephrasing your question."
269
 
270
 
271
+
272
  st.title("AI Help Desk")
273
  st.write("Ask any IT support question and our multi-agent system will assist you.")
274
 
 
282
  st.error("Please type a message before submitting.")
283
  else:
284
  with st.spinner("Processing your request through our agent workflow..."):
285
+ response = handle_it_query(user_input, show_logs=show_logs)
286
+
287
+ st.markdown("### Response")
288
+ st.write(response)
289
+
290
+ if show_logs and st.session_state.chat_history:
291
+ st.markdown("---")
292
+ st.markdown("### Workflow Logs")
293
+ logs = st.session_state.chat_history[-1].get("workflow_logs", {})
294
+ for step, content in logs.items():
295
+ st.markdown(f"**{step.capitalize().replace('_', ' ')}:**")
296
+ st.text(content)
297
+ st.markdown("---")