Pulastya0 commited on
Commit
e9a3932
·
verified ·
1 Parent(s): cd23e2f

Update agent_langchain.py

Browse files
Files changed (1) hide show
  1. agent_langchain.py +283 -240
agent_langchain.py CHANGED
@@ -1,282 +1,325 @@
1
  import os
2
-
3
- # SET CACHE PATHS BEFORE ANY IMPORTS
4
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
5
- os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers"
6
- os.environ["HF_HOME"] = "/tmp/huggingface"
7
- os.environ["SENTENCE_TRANSFORMERS_HOME"] = "/tmp/sentence_transformers"
8
- os.environ["TORCH_HOME"] = "/tmp/torch"
9
-
10
- import requests
11
- import torch
12
- import time
13
- from transformers import AutoTokenizer, AutoModelForSequenceClassification
14
- import numpy as np
15
- from sentence_transformers import SentenceTransformer
16
  import chromadb
17
  from chromadb.config import Settings
18
- from langchain_google_genai import ChatGoogleGenerativeAI
19
- from langchain.agents import initialize_agent, Tool, AgentType
20
- from langchain.memory import ConversationBufferMemory
21
- import threading
22
-
23
- # ==============================================================
24
- # 🌎 ENVIRONMENT & GLOBAL SETTINGS
25
- # ==============================================================
26
- GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
27
- ROUTING_URL = os.environ.get("ROUTING_URL") # Space 2 URL
28
- SPACE_URL = os.environ.get("SPACE_URL", "http://localhost:7860")
29
 
30
- # ==============================================================
31
- # 🏷️ LABEL DICTIONARY
32
- # ==============================================================
33
- LABEL_DICTIONARY = {
34
- "I1": "Low Impact",
35
- "I2": "Medium Impact",
36
- "I3": "High Impact",
37
- "I4": "Critical Impact",
38
- "U1": "Low Urgency",
39
- "U2": "Medium Urgency",
40
- "U3": "High Urgency",
41
- "U4": "Critical Urgency",
42
- "T1": "Information",
43
- "T2": "Incident",
44
- "T3": "Problem",
45
- "T4": "Request",
46
- "T5": "Question"
47
- }
48
 
49
- # ==============================================================
50
- # 🤖 LOAD CLASSIFICATION MODEL
51
- # ==============================================================
52
- clf_model_name = "DavinciTech/BERT_Categorizer"
53
- clf_tokenizer = AutoTokenizer.from_pretrained(
54
- clf_model_name,
55
- cache_dir="/tmp/transformers"
56
- )
57
- clf_model = AutoModelForSequenceClassification.from_pretrained(
58
- clf_model_name,
59
- cache_dir="/tmp/transformers"
60
  )
61
 
62
- def classify_ticket(text):
63
- """Classify the ticket into Impact, Urgency, and Type."""
64
- inputs = clf_tokenizer(text, return_tensors="pt", truncation=True)
65
- outputs = clf_model(**inputs)
66
- logits = outputs.logits[0]
67
-
68
- impact_idx = torch.argmax(logits[:4]).item() + 1
69
- urgency_idx = torch.argmax(logits[4:8]).item() + 1
70
- type_idx = torch.argmax(logits[8:]).item() + 1
71
-
72
- return {
73
- "impact": LABEL_DICTIONARY[f"I{impact_idx}"],
74
- "urgency": LABEL_DICTIONARY[f"U{urgency_idx}"],
75
- "type": LABEL_DICTIONARY[f"T{type_idx}"]
76
- }
77
-
78
- # ==============================================================
79
- # 🧭 ROUTING FUNCTION (Space 2)
80
- # ==============================================================
81
- def call_routing(text, retries=3, delay=5):
82
- """Call Space 2 routing endpoint and get department only."""
83
- url = ROUTING_URL if ROUTING_URL else f"{SPACE_URL}/route"
84
-
85
- for attempt in range(retries):
86
- try:
87
- resp = requests.post(url, json={"text": text}, timeout=30)
88
- resp.raise_for_status()
89
- data = resp.json()
90
- return data.get("department", "General IT")
91
- except Exception as e:
92
- print(f"Routing attempt {attempt+1} failed: {e}")
93
- if attempt < retries - 1:
94
- time.sleep(delay)
95
- else:
96
- return "General IT"
97
 
98
- # ==============================================================
99
- # 📚 KNOWLEDGE BASE SETUP
100
- # ==============================================================
101
  CHROMA_PATH = "/tmp/chroma"
102
  COLLECTION_NAME = "knowledge_base"
103
 
104
- # Shared KB collection reference
105
- kb_collection = None
106
- kb_lock = threading.Lock()
107
-
108
- # Initialize encoder once
109
- encoder = SentenceTransformer("all-MiniLM-L6-v2", cache_folder="/tmp/sentence_transformers")
110
 
111
  def get_kb_collection():
112
- """Get or create the KB collection."""
113
  global kb_collection
114
  if kb_collection is None:
115
- with kb_lock:
116
- if kb_collection is None:
117
- try:
118
- chroma_client = chromadb.PersistentClient(
119
- path=CHROMA_PATH,
120
- settings=Settings(
121
- anonymized_telemetry=False,
122
- allow_reset=True
123
- )
124
- )
125
- kb_collection = chroma_client.get_or_create_collection(COLLECTION_NAME)
126
- except Exception as e:
127
- print(f"⚠️ Could not get KB collection: {e}")
128
  return kb_collection
129
 
130
-
131
- def query_kb(text: str, top_k: int = 1):
132
- """
133
- Query the Chroma knowledge base using SentenceTransformer embeddings.
134
- Returns answer + confidence.
135
- """
136
  collection = get_kb_collection()
137
 
138
- if not collection:
139
- return {"answer": None, "confidence": 0.0, "metadata": {}}
140
 
141
  try:
142
- # Check if collection has data
143
- count = collection.count()
144
- if count == 0:
145
- return {"answer": None, "confidence": 0.0, "metadata": {}}
146
-
147
- # Embed the query
148
- query_embedding = encoder.encode([text])[0].tolist()
149
-
150
- # Query Chroma by embeddings
151
- results = collection.query(
152
  query_embeddings=[query_embedding],
153
- n_results=top_k,
154
- include=["documents", "distances", "metadatas", "embeddings"]
155
  )
156
-
157
- if not results or not results.get("documents") or len(results["documents"][0]) == 0:
158
- return {"answer": None, "confidence": 0.0, "metadata": {}}
159
-
160
- # Extract top document and metadata
161
- answer = results["documents"][0][0]
162
- metadata = results["metadatas"][0][0] if results.get("metadatas") and results["metadatas"][0] else {}
163
 
164
- # Calculate confidence from distance
165
- # ChromaDB returns L2 (Euclidean) distance by default - lower is better
166
- if results.get("distances") and len(results["distances"][0]) > 0:
167
- distance = results["distances"][0][0]
168
- # Convert L2 distance to similarity score (0-1 range)
169
- # For normalized embeddings, max L2 distance is ~2.0
170
- confidence = max(0.0, 1.0 - (distance / 2.0))
171
- else:
172
- # Fallback: compute cosine similarity if embeddings are available
173
- if results.get("embeddings") and len(results["embeddings"][0]) > 0:
174
- stored_embedding = np.array(results["embeddings"][0][0])
175
- query_vec = np.array(query_embedding)
176
- confidence = float(np.dot(query_vec, stored_embedding) /
177
- (np.linalg.norm(query_vec) * np.linalg.norm(stored_embedding) + 1e-8))
178
- else:
179
- confidence = 0.5
180
-
181
  return {
182
- "answer": answer,
183
- "confidence": round(float(confidence), 3),
184
- "metadata": metadata
185
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
186
 
 
 
 
 
187
  except Exception as e:
188
- print(f"⚠️ KB query failed: {e}")
189
- import traceback
190
- traceback.print_exc()
191
- return {"answer": None, "confidence": 0.0, "metadata": {}}
192
-
193
-
194
- # ==============================================================
195
- # 🧠 GEMINI LLM (Official LangChain Integration)
196
- # ==============================================================
197
- llm = ChatGoogleGenerativeAI(
198
- model="gemini-2.0-flash-exp",
199
- temperature=0.3,
200
- google_api_key=GEMINI_API_KEY
201
- )
202
-
203
- # ==============================================================
204
- # 🧰 DEFINE LANGCHAIN TOOLS
205
- # ==============================================================
206
- tools = [
207
- Tool(
208
- name="TicketClassifier",
209
- func=lambda text: classify_ticket(text),
210
- description="Classifies the ticket into impact, urgency, and type. Mandatory tool."
211
- ),
212
- Tool(
213
- name="RoutingTool",
214
- func=lambda text: call_routing(text),
215
- description="Determines which department should handle the ticket (via Space 2). Mandatory tool."
216
- ),
217
- Tool(
218
- name="KnowledgeBaseTool",
219
- func=lambda text: query_kb(text)["answer"],
220
- description="Searches the KB for relevant solutions. Returns a descriptive answer."
221
  )
222
- ]
223
-
224
- # ==============================================================
225
- # 💬 MEMORY & AGENT INITIALIZATION
226
- # ==============================================================
227
- memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
228
-
229
- agent_executor = initialize_agent(
230
- tools=tools,
231
- llm=llm,
232
- agent_type=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
233
- memory=memory,
234
- verbose=False
235
- )
236
 
237
- # ==============================================================
238
- # 🧾 MAIN TICKET PROCESSOR
239
- # ==============================================================
240
- def process_ticket_langchain(ticket_text):
241
- """Full pipeline: classify → route → query KB → decide KB vs Gemini."""
242
- reasoning_trace = []
243
 
244
- # Step 1: Classification
 
 
 
 
 
245
  classification = classify_ticket(ticket_text)
246
- reasoning_trace.append(f"[Classifier] Impact: {classification['impact']}, Urgency: {classification['urgency']}, Type: {classification['type']}")
247
-
248
- # Step 2: Routing
249
  department = call_routing(ticket_text)
250
- reasoning_trace.append(f"[Routing] Assigned Department: {department}")
251
-
252
- # Step 3: KB Search
253
  kb_result = query_kb(ticket_text)
254
- reasoning_trace.append(f"[KB Search] Top Answer: '{kb_result['answer']}' (confidence: {kb_result['confidence']})")
255
-
256
- # Step 4: KB vs LLM Decision
257
- if kb_result["answer"] and kb_result["confidence"] >= 0.75:
258
- final_answer = kb_result["answer"]
259
  status = "resolved"
260
- reasoning_trace.append("[Decision] High KB confidence → ticket resolved via KB.")
261
  else:
262
- llm_prompt = f"""
263
- You are a professional IT helpdesk agent.
264
- A user submitted the following ticket: "{ticket_text}"
265
 
266
- Ticket classification: {classification}
267
- Assigned department: {department}
268
- Knowledge base result: {kb_result['answer']} (confidence: {kb_result['confidence']})
269
 
270
- Please provide a clear, descriptive, and professional IT helpdesk response.
271
- """
272
- final_answer = llm.invoke(llm_prompt).content
273
  status = "escalated"
274
- reasoning_trace.append("[Decision] Low KB confidence → fallback to Gemini LLM for escalation.")
275
-
276
  return {
277
- "status": status,
278
  "classification": classification,
279
  "department": department,
280
- "answer": final_answer,
281
- "reasoning_trace": reasoning_trace
282
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import os
2
+ from typing import Optional, Dict, Any
3
+ from datetime import datetime
 
 
 
 
 
 
 
 
 
 
 
 
4
  import chromadb
5
  from chromadb.config import Settings
6
+ from sentence_transformers import SentenceTransformer
7
+ from langchain_groq import ChatGroq
8
+ from langchain_core.prompts import PromptTemplate
 
 
 
 
 
 
 
 
9
 
10
+ # Environment setup
11
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
+ # Initialize models
14
+ encoder = SentenceTransformer('all-MiniLM-L6-v2')
15
+ llm = ChatGroq(
16
+ model="llama-3.3-70b-versatile",
17
+ temperature=0.3,
18
+ api_key=os.getenv("GROQ_API_KEY")
 
 
 
 
 
19
  )
20
 
21
+ # Global storage
22
+ conversations = {}
23
+ kb_collection = None
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
+ # Chroma settings
 
 
26
  CHROMA_PATH = "/tmp/chroma"
27
  COLLECTION_NAME = "knowledge_base"
28
 
29
+ # ===========================
30
+ # Knowledge Base Functions
31
+ # ===========================
 
 
 
32
 
33
  def get_kb_collection():
34
+ """Get or initialize KB collection."""
35
  global kb_collection
36
  if kb_collection is None:
37
+ try:
38
+ chroma_client = chromadb.PersistentClient(
39
+ path=CHROMA_PATH,
40
+ settings=Settings(anonymized_telemetry=False, allow_reset=True)
41
+ )
42
+ kb_collection = chroma_client.get_or_create_collection(COLLECTION_NAME)
43
+ except Exception as e:
44
+ print(f"Warning: Could not initialize KB collection: {e}")
 
 
 
 
 
45
  return kb_collection
46
 
47
+ def query_kb(query: str, n_results: int = 1) -> Dict[str, Any]:
48
+ """Query knowledge base for relevant information."""
 
 
 
 
49
  collection = get_kb_collection()
50
 
51
+ if not collection or collection.count() == 0:
52
+ return {"answer": None, "confidence": 0.0}
53
 
54
  try:
55
+ query_embedding = encoder.encode([query])[0].tolist()
56
+ result = collection.query(
 
 
 
 
 
 
 
 
57
  query_embeddings=[query_embedding],
58
+ n_results=n_results,
59
+ include=["documents", "distances", "metadatas"]
60
  )
 
 
 
 
 
 
 
61
 
62
+ if not result or not result.get('documents') or len(result['documents'][0]) == 0:
63
+ return {"answer": None, "confidence": 0.0}
64
+
65
+ best_doc = result['documents'][0][0]
66
+ best_distance = result['distances'][0][0] if result.get('distances') else 1.0
67
+ confidence = max(0.0, 1.0 - (best_distance / 2.0))
68
+
 
 
 
 
 
 
 
 
 
 
69
  return {
70
+ "answer": best_doc,
71
+ "confidence": float(confidence),
72
+ "metadata": result['metadatas'][0][0] if result.get('metadatas') else {}
73
  }
74
+ except Exception as e:
75
+ print(f"KB query error: {e}")
76
+ return {"answer": None, "confidence": 0.0}
77
+
78
+ # ===========================
79
+ # Classification & Routing
80
+ # ===========================
81
+
82
+ def classify_ticket(ticket_text: str) -> str:
83
+ """Classify ticket into priority/category."""
84
+ prompt = PromptTemplate(
85
+ input_variables=["ticket"],
86
+ template="""Classify this IT support ticket into ONE of these categories:
87
+ - password_reset
88
+ - software_issue
89
+ - hardware_problem
90
+ - network_issue
91
+ - access_request
92
+ - general_inquiry
93
+
94
+ Ticket: {ticket}
95
+
96
+ Category (one word only):"""
97
+ )
98
+
99
+ try:
100
+ response = llm.invoke(prompt.format(ticket=ticket_text))
101
+ classification = response.content.strip().lower()
102
 
103
+ valid_categories = ["password_reset", "software_issue", "hardware_problem",
104
+ "network_issue", "access_request", "general_inquiry"]
105
+
106
+ return classification if classification in valid_categories else "general_inquiry"
107
  except Exception as e:
108
+ print(f"Classification error: {e}")
109
+ return "general_inquiry"
110
+
111
+ def call_routing(ticket_text: str) -> str:
112
+ """Route ticket to appropriate department."""
113
+ prompt = PromptTemplate(
114
+ input_variables=["ticket"],
115
+ template="""Route this IT ticket to the correct department. Choose ONE:
116
+ - IT Support
117
+ - Network Team
118
+ - Security Team
119
+ - Hardware Team
120
+ - Access Management
121
+
122
+ Ticket: {ticket}
123
+
124
+ Department (exact name only):"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
  )
126
+
127
+ try:
128
+ response = llm.invoke(prompt.format(ticket=ticket_text))
129
+ department = response.content.strip()
130
+
131
+ valid_depts = ["IT Support", "Network Team", "Security Team", "Hardware Team", "Access Management"]
132
+
133
+ return department if department in valid_depts else "IT Support"
134
+ except Exception as e:
135
+ print(f"Routing error: {e}")
136
+ return "IT Support"
 
 
 
137
 
138
+ # ===========================
139
+ # Main Processing Functions
140
+ # ===========================
 
 
 
141
 
142
+ def process_ticket_langchain(ticket_text: str) -> Dict[str, Any]:
143
+ """
144
+ Original single-turn ticket processing.
145
+ Used for initial ticket intake.
146
+ """
147
+ # Step 1: Classify
148
  classification = classify_ticket(ticket_text)
149
+
150
+ # Step 2: Route
 
151
  department = call_routing(ticket_text)
152
+
153
+ # Step 3: Query KB
 
154
  kb_result = query_kb(ticket_text)
155
+
156
+ # Step 4: Generate response
157
+ if kb_result["answer"] and kb_result["confidence"] >= 0.7:
158
+ answer = kb_result["answer"]
 
159
  status = "resolved"
 
160
  else:
161
+ # Generate ticket ID and escalate
162
+ ticket_id = f"TKT-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
163
+ answer = f"""I couldn't find a confident answer in our knowledge base.
164
 
165
+ **Ticket Created:** {ticket_id}
166
+ **Department:** {department}
167
+ **Classification:** {classification}
168
 
169
+ A specialist will review your ticket and respond within 2-4 business hours."""
 
 
170
  status = "escalated"
171
+
 
172
  return {
 
173
  "classification": classification,
174
  "department": department,
175
+ "answer": answer,
176
+ "confidence": kb_result["confidence"],
177
+ "status": status
178
+ }
179
+
180
+ def process_with_agent(user_message: str, conversation_id: Optional[str] = None) -> Dict[str, Any]:
181
+ """
182
+ Full agentic conversation handler with memory and escalation logic.
183
+ This is the main function used by the /orchestrate endpoint.
184
+ """
185
+ # Generate conversation ID if not provided
186
+ if not conversation_id:
187
+ conversation_id = f"conv_{datetime.now().strftime('%Y%m%d_%H%M%S')}_{hash(user_message) % 10000}"
188
+
189
+ # Initialize conversation if new
190
+ if conversation_id not in conversations:
191
+ conversations[conversation_id] = {
192
+ "messages": [],
193
+ "ticket_info": None,
194
+ "created_at": datetime.now().isoformat(),
195
+ "escalated": False
196
+ }
197
+
198
+ conv = conversations[conversation_id]
199
+
200
+ # Don't continue if already escalated
201
+ if conv.get("escalated", False):
202
+ return {
203
+ "conversation_id": conversation_id,
204
+ "response": "This ticket has been escalated to a human agent. They will contact you soon.",
205
+ "status": "escalated",
206
+ "message_count": len(conv["messages"]),
207
+ "can_continue": False
208
+ }
209
+
210
+ # Add user message
211
+ conv["messages"].append({
212
+ "role": "user",
213
+ "content": user_message,
214
+ "timestamp": datetime.now().isoformat()
215
+ })
216
+
217
+ # First message - full orchestration
218
+ if len(conv["messages"]) == 1:
219
+ result = process_ticket_langchain(user_message)
220
+ conv["ticket_info"] = {
221
+ "classification": result["classification"],
222
+ "department": result["department"],
223
+ "initial_query": user_message
224
+ }
225
+
226
+ response_text = result["answer"]
227
+ status = result["status"]
228
+
229
+ if status == "escalated":
230
+ conv["escalated"] = True
231
+
232
+ # Follow-up messages
233
+ else:
234
+ # Check for escalation keywords
235
+ escalation_keywords = ["not working", "didn't work", "still broken", "still not",
236
+ "escalate", "human", "agent", "supervisor", "still having"]
237
+ wants_escalation = any(kw in user_message.lower() for kw in escalation_keywords)
238
+
239
+ if wants_escalation:
240
+ # Try KB one more time with refined query
241
+ kb_result = query_kb(user_message)
242
+
243
+ if kb_result["answer"] and kb_result["confidence"] >= 0.75:
244
+ response_text = f"Let me try a different solution:\n\n{kb_result['answer']}\n\nPlease try this and let me know if it works."
245
+ status = "in_progress"
246
+ else:
247
+ # Escalate to human
248
+ ticket_id = f"TKT-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
249
+ conv["escalated"] = True
250
+ response_text = f"""I understand the previous solutions haven't resolved your issue. I'm escalating this to a human specialist.
251
+
252
+ **Escalation Ticket:** {ticket_id}
253
+ **Department:** {conv['ticket_info']['department']}
254
+ **Priority:** High
255
+ **Issue:** {conv['ticket_info']['classification']}
256
+
257
+ A {conv['ticket_info']['department']} specialist will contact you within 2-4 business hours. They will have full access to our conversation history.
258
+
259
+ Your ticket reference: {ticket_id}"""
260
+ status = "escalated"
261
+ else:
262
+ # Continue conversation with full context
263
+ context = f"""You are a helpful IT helpdesk AI agent. Provide clear, concise troubleshooting help.
264
+
265
+ **Conversation Context:**
266
+ - Initial Issue: {conv['ticket_info']['initial_query']}
267
+ - Classification: {conv['ticket_info']['classification']}
268
+ - Department: {conv['ticket_info']['department']}
269
+
270
+ **Recent Conversation:**
271
+ """
272
+ # Include last 6 messages for context
273
+ for msg in conv["messages"][-6:]:
274
+ context += f"{msg['role'].upper()}: {msg['content']}\n"
275
+
276
+ context += f"""\n**Current User Message:** {user_message}
277
+
278
+ Instructions:
279
+ - Provide helpful, specific guidance
280
+ - If user confirms something worked, congratulate them
281
+ - If unclear, ask clarifying questions
282
+ - Keep responses concise (2-3 paragraphs max)
283
+ - Don't repeat previous solutions
284
+
285
+ Your response:"""
286
+
287
+ try:
288
+ response_text = llm.invoke(context).content
289
+ status = "in_progress"
290
+ except Exception as e:
291
+ print(f"LLM error: {e}")
292
+ response_text = "I'm having trouble processing that. Could you rephrase your question?"
293
+ status = "in_progress"
294
+
295
+ # Add assistant response
296
+ conv["messages"].append({
297
+ "role": "assistant",
298
+ "content": response_text,
299
+ "status": status,
300
+ "timestamp": datetime.now().isoformat()
301
+ })
302
+
303
+ return {
304
+ "conversation_id": conversation_id,
305
+ "response": response_text,
306
+ "status": status,
307
+ "message_count": len(conv["messages"]),
308
+ "can_continue": not conv.get("escalated", False)
309
+ }
310
+
311
+ def get_conversation_history(conversation_id: str) -> Optional[Dict[str, Any]]:
312
+ """Retrieve conversation history by ID."""
313
+ return conversations.get(conversation_id)
314
+
315
+ # ===========================
316
+ # Initialization
317
+ # ===========================
318
+
319
+ # Initialize KB collection on module load
320
+ get_kb_collection()
321
+
322
+ print("✅ Agent LangChain module loaded successfully")
323
+ print(f"📊 KB Collection: {'initialized' if kb_collection else 'not initialized'}")
324
+ if kb_collection:
325
+ print(f"📚 KB Records: {kb_collection.count()}")