AlfredHarun commited on
Commit
f30d224
·
verified ·
1 Parent(s): 582819d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +297 -0
app.py CHANGED
@@ -11,9 +11,306 @@ from autogen import AssistantAgent, UserProxyAgent
11
 
12
  # Import DeepSeek API or appropriate SDK here
13
  from deepseek_api import DeepSeekEmbeddings # Hypothetical import
 
 
 
 
 
 
 
 
 
 
 
 
14
  from langchain_community.vectorstores import Chroma
15
  from langchain.docstore.document import Document
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  # Load environment variables
18
  load_dotenv()
19
  DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY") # Use DeepSeek API Key
 
11
 
12
  # Import DeepSeek API or appropriate SDK here
13
  from deepseek_api import DeepSeekEmbeddings # Hypothetical import
14
+ from langchain_community.vecimport os
15
+ import json
16
+ import random
17
+ import logging
18
+ from typing import List, Dict, Any
19
+
20
+ import streamlit as st
21
+ from dotenv import load_dotenv
22
+ import autogen
23
+ from autogen import AssistantAgent, UserProxyAgent
24
+
25
+ from langchain_community.embeddings import OpenAIEmbeddings
26
  from langchain_community.vectorstores import Chroma
27
  from langchain.docstore.document import Document
28
 
29
+
30
+ load_dotenv()
31
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
32
+
33
+
34
+ logging.basicConfig(level=logging.INFO)
35
+ logger = logging.getLogger(__name__)
36
+ st.set_page_config(page_title="IT Support System (RAG)", layout="centered")
37
+
38
+ # Initialize session memory
39
+ if "chat_history" not in st.session_state:
40
+ st.session_state.chat_history = []
41
+
42
+ #Knowledge Base Setup
43
+
44
+ kb_path = os.path.join(os.path.dirname(__file__), 'kb.json')
45
+ with open(kb_path, encoding='utf-8') as f:
46
+ kb_entries = json.load(f)
47
+
48
+ docs: List[Document] = []
49
+ for entry in kb_entries:
50
+ docs.append(Document(
51
+ page_content=entry['answer'],
52
+ metadata={
53
+ 'id': entry.get('id'),
54
+ 'question': entry.get('question')
55
+ }
56
+ ))
57
+
58
+ embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
59
+ vectordb = Chroma.from_documents(
60
+ documents=docs,
61
+ embedding=embeddings,
62
+ persist_directory='db/chroma'
63
+ )
64
+ retriever = vectordb.as_retriever(search_kwargs={"k": 3})
65
+
66
+ def retrieve_docs(query: str) -> str:
67
+ """Retrieve relevant documentation from the knowledge base"""
68
+ hits = retriever.get_relevant_documents(query)
69
+ if not hits:
70
+ return "No relevant documentation found."
71
+
72
+ results = []
73
+ for doc in hits:
74
+ question = doc.metadata.get('question', 'FAQ')
75
+ results.append(f"**Q: {question}**\nA: {doc.page_content}")
76
+ return "\n\n".join(results)
77
+
78
+ def escalate_ticket(query: str, analysis: str = "") -> str:
79
+ """Create a ticket for issues that need human intervention"""
80
+ ticket_id = f"TICKET-{random.randint(1000, 9999)}"
81
+ description = f"User Query: {query}\nAnalysis: {analysis}"
82
+ # In a real system, you would send this to a ticketing system like JIRA, ServiceNow, etc.
83
+ logger.info(f"Escalating issue with ticket {ticket_id}: {description}")
84
+ return f"Escalated issue. Created ticket {ticket_id}. A support technician will contact you shortly."
85
+
86
+ # LLM Configuration
87
+ llm_config = {
88
+ "config_list": [{
89
+ "model": "gpt-4",
90
+ "api_key": OPENAI_API_KEY,
91
+ }],
92
+ "seed": 42,
93
+ "temperature": 0.5,
94
+ }
95
+
96
+ # Agent Definitions
97
+ master_agent = AssistantAgent(
98
+ name="Master",
99
+ llm_config=llm_config,
100
+ system_message="""
101
+ You are the Master Agent that orchestrates the IT support workflow:
102
+ 1. First determine if the query is IT-related. If not, provide a direct response explaining your limitations.
103
+ 2. For IT-related queries, pass to the Planning Agent for execution plan development
104
+ 3. Receive and review the complete workflow results from all agents
105
+ 4. Provide a comprehensive yet concise final response to the user
106
+
107
+ Only handle one query at a time through the complete workflow.
108
+ """
109
+ )
110
+
111
+ planning_agent = AssistantAgent(
112
+ name="Planning",
113
+ llm_config=llm_config,
114
+ system_message="""
115
+ You are the Planning Agent responsible for:
116
+ 1. Validating if the user query is clear and complete
117
+ 2. Refining the query if needed for better processing
118
+ 3. Creating a structured execution plan with clear steps
119
+ 4. Categorizing the IT issue type (hardware, software, network, access, etc.)
120
+
121
+ Provide your analysis as a structured output with sections for:
122
+ - Query Validation
123
+ - Issue Category
124
+ - Execution Plan
125
+
126
+ Always end your message with: "Forwarding to Analysis Agent"
127
+ """
128
+ )
129
+
130
+ analysis_agent = AssistantAgent(
131
+ name="Analysis",
132
+ llm_config=llm_config,
133
+ system_message="""
134
+ You are the Analysis Agent responsible for:
135
+ 1. Identifying key entities in the user query (devices, software, errors, etc.)
136
+ 2. Determining severity level (Low, Medium, High, Critical)
137
+ 3. Extracting technical details mentioned in the query
138
+ 4. Structuring this information for the Resolution phase
139
+
140
+ Provide your analysis as structured output with sections for:
141
+ - Key Entities
142
+ - Technical Details
143
+ - Severity
144
+ - Analysis Summary
145
+
146
+ Always end your message with: "Forwarding to Resolution Agent"
147
+ """
148
+ )
149
+
150
+ resolution_agent = AssistantAgent(
151
+ name="Resolution",
152
+ llm_config=llm_config,
153
+ system_message="""
154
+ You are the Resolution Agent responsible for:
155
+ 1. Using available tools to retrieve relevant knowledge base articles or documentation
156
+ 2. Applying the retrieved information to the specific user issue
157
+ 3. Providing clear step-by-step instructions for resolution
158
+ 4. Determining if the issue requires escalation to a human technician
159
+
160
+ If you can resolve the issue:
161
+ - Provide clear instructions
162
+ - Include any relevant documentation references
163
+ - End with "RESOLUTION COMPLETE"
164
+
165
+ If escalation is needed:
166
+ - Explain why the issue requires escalation
167
+ - Provide details that would help a technician understand the issue
168
+ - End with "ESCALATION NEEDED"
169
+ """,
170
+ function_map={"retrieve_docs": retrieve_docs}
171
+ )
172
+
173
+ escalation_agent = AssistantAgent(
174
+ name="Escalation",
175
+ llm_config=llm_config,
176
+ system_message="""
177
+ You are the Escalation Agent responsible for:
178
+ 1. Creating support tickets for issues that cannot be resolved automatically
179
+ 2. Providing the user with ticket tracking information
180
+ 3. Setting expectations for next steps
181
+ 4. Compiling all analysis from previous agents to assist human technicians
182
+
183
+ Format your response to be professional and reassuring to the user.
184
+ Always include the ticket ID and expected follow-up timeframe.
185
+ """,
186
+ function_map={"escalate_ticket": escalate_ticket}
187
+ )
188
+
189
+
190
+ def handle_it_query(query: str) -> str:
191
+ """Process IT queries through the multi-agent workflow"""
192
+ query = query.strip()
193
+ if not query:
194
+ return "Please enter an IT question or issue."
195
+
196
+ workflow_logs = {"query": query}
197
+
198
+ try:
199
+ # First determine if it's an IT issue through the Master Agent
200
+ master_proxy = UserProxyAgent(
201
+ name="MasterProxy", human_input_mode="NEVER", code_execution_config=False
202
+ )
203
+ master_prompt = f"User query: '{query}'. First, determine if this is an IT-related issue."
204
+ master_proxy.initiate_chat(master_agent, message=master_prompt, max_turns=1)
205
+ initial_assessment = master_proxy.chat_messages[master_agent][-1]["content"]
206
+ workflow_logs["initial_assessment"] = initial_assessment
207
+
208
+ # If not IT-related, return the response directly
209
+ if "NOT IT-RELATED" in initial_assessment.upper():
210
+ return initial_assessment
211
+
212
+ # Planning
213
+ plan_proxy = UserProxyAgent(
214
+ name="PlanningProxy", human_input_mode="NEVER", code_execution_config=False
215
+ )
216
+ plan_proxy.initiate_chat(planning_agent, message=query, max_turns=1)
217
+ planning_output = plan_proxy.chat_messages[planning_agent][-1]["content"]
218
+ workflow_logs["planning"] = planning_output
219
+ logger.info(f"Planning completed: {len(planning_output)} chars")
220
+
221
+ # 2: Analysis
222
+ analysis_proxy = UserProxyAgent(
223
+ name="AnalysisProxy", human_input_mode="NEVER", code_execution_config=False
224
+ )
225
+ analysis_proxy.initiate_chat(analysis_agent, message=planning_output, max_turns=1)
226
+ analysis_output = analysis_proxy.chat_messages[analysis_agent][-1]["content"]
227
+ workflow_logs["analysis"] = analysis_output
228
+ logger.info(f"Analysis completed: {len(analysis_output)} chars")
229
+
230
+ # 3: Resolution
231
+ res_proxy = UserProxyAgent(
232
+ name="ResolutionProxy", human_input_mode="NEVER", code_execution_config=False
233
+ )
234
+ resolution_input = f"User Query: {query}\n\nPlanning: {planning_output}\n\nAnalysis: {analysis_output}"
235
+ res_proxy.initiate_chat(resolution_agent, message=resolution_input, max_turns=1)
236
+ resolution_output = res_proxy.chat_messages[resolution_agent][-1]["content"]
237
+ workflow_logs["resolution"] = resolution_output
238
+ logger.info(f"Resolution completed: {len(resolution_output)} chars")
239
+
240
+ # Escalation if needed
241
+ escalation_output = None
242
+ if "ESCALATION NEEDED" in resolution_output.upper():
243
+ esc_proxy = UserProxyAgent(
244
+ name="EscalationProxy", human_input_mode="NEVER", code_execution_config=False
245
+ )
246
+ escalation_input = f"Original Query: {query}\n\nAnalysis: {analysis_output}\n\nResolution Attempt: {resolution_output}"
247
+ esc_proxy.initiate_chat(escalation_agent, message=escalation_input, max_turns=1)
248
+ escalation_output = esc_proxy.chat_messages[escalation_agent][-1]["content"]
249
+ workflow_logs["escalation"] = escalation_output
250
+ logger.info(f"Escalation completed: {len(escalation_output)} chars")
251
+
252
+ # Master summarizes
253
+ final_master_proxy = UserProxyAgent(
254
+ name="FinalMasterProxy", human_input_mode="NEVER", code_execution_config=False
255
+ )
256
+
257
+ if escalation_output:
258
+ final_prompt = (
259
+ f"Complete workflow results for query: '{query}':\n\n"
260
+ f"Planning: {planning_output}\n\n"
261
+ f"Analysis: {analysis_output}\n\n"
262
+ f"Resolution: {resolution_output}\n\n"
263
+ f"Escalation: {escalation_output}\n\n"
264
+ f"Synthesize these results into a clear, helpful response for the user."
265
+ )
266
+ else:
267
+ final_prompt = (
268
+ f"Complete workflow results for query: '{query}':\n\n"
269
+ f"Planning: {planning_output}\n\n"
270
+ f"Analysis: {analysis_output}\n\n"
271
+ f"Resolution: {resolution_output}\n\n"
272
+ f"Synthesize these results into a clear, helpful response for the user."
273
+ )
274
+
275
+ final_master_proxy.initiate_chat(master_agent, message=final_prompt, max_turns=1)
276
+ final_response = final_master_proxy.chat_messages[master_agent][-1]["content"]
277
+ workflow_logs["final_response"] = final_response
278
+
279
+ # Save to memory
280
+ st.session_state.chat_history.append({
281
+ "user": query,
282
+ "assistant": final_response,
283
+ "workflow_logs": workflow_logs
284
+ })
285
+
286
+ return final_response
287
+
288
+ except Exception as e:
289
+ logger.error(f"Error in workflow: {e}", exc_info=True)
290
+ return f"An error occurred during processing: {str(e)}\n\nPlease try rephrasing your question."
291
+
292
+
293
+ st.title("AI Help Desk")
294
+ st.write("Ask any IT support question and our multi-agent system will assist you.")
295
+
296
+ with st.form(key="query_form", clear_on_submit=True):
297
+ user_input = st.text_area("Describe your IT issue:", height=100)
298
+ show_logs = st.checkbox("Show workflow details", value=False)
299
+ submitted = st.form_submit_button("Submit")
300
+
301
+ if submitted:
302
+ if not user_input:
303
+ st.error("Please type a message before submitting.")
304
+ else:
305
+ with st.spinner("Processing your request through our agent workflow..."):
306
+ response = handle_it_query(user_input)
307
+
308
+ st.markdown("### Response")
309
+ st.write(response)
310
+
311
+ torstores import Chroma
312
+ from langchain.docstore.document import Document
313
+
314
  # Load environment variables
315
  load_dotenv()
316
  DEEPSEEK_API_KEY = os.getenv("DEEPSEEK_API_KEY") # Use DeepSeek API Key