USAMA BHATTI commited on
Commit
239dbce
Β·
1 Parent(s): 4f47bd4

Final SaaS Security & Asset Support

Browse files
Files changed (1) hide show
  1. backend/src/services/chat_service.py +238 -23
backend/src/services/chat_service.py CHANGED
@@ -1,4 +1,213 @@
1
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  import json
3
  from sqlalchemy.ext.asyncio import AsyncSession
4
  from sqlalchemy.future import select
@@ -6,7 +215,7 @@ from sqlalchemy.future import select
6
  # --- Model Imports ---
7
  from backend.src.models.chat import ChatHistory
8
  from backend.src.models.integration import UserIntegration
9
- from backend.src.models.user import User # Added User model for Bot Persona
10
 
11
  # --- Dynamic Factory & Tool Imports ---
12
  from backend.src.services.llm.factory import get_llm_model
@@ -44,7 +253,6 @@ async def get_user_integrations(user_id: str, db: AsyncSession) -> dict:
44
  creds['provider'] = i.provider
45
  creds['schema_map'] = i.schema_map if i.schema_map else {}
46
 
47
- # --- STRICT CHECK ---
48
  if i.profile_description:
49
  creds['description'] = i.profile_description
50
 
@@ -74,7 +282,6 @@ async def get_chat_history(session_id: str, db: AsyncSession):
74
  async def get_bot_persona(user_id: str, db: AsyncSession):
75
  """Fetches custom Bot Name and Instructions from User table."""
76
  try:
77
- # User ID ko int mein convert karke query karein
78
  stmt = select(User).where(User.id == int(user_id))
79
  result = await db.execute(stmt)
80
  user = result.scalars().first()
@@ -88,17 +295,16 @@ async def get_bot_persona(user_id: str, db: AsyncSession):
88
  print(f"⚠️ Error fetching persona: {e}")
89
  pass
90
 
91
- # Fallback Default Persona
92
  return {"name": "OmniAgent", "instruction": "You are a helpful AI assistant."}
93
 
94
  # ==========================================
95
- # MAIN CHAT LOGIC
96
  # ==========================================
97
  async def process_chat(message: str, session_id: str, user_id: str, db: AsyncSession):
98
 
99
  # 1. Fetch User Settings & Persona
100
  user_settings = await get_user_integrations(user_id, db)
101
- bot_persona = await get_bot_persona(user_id, db) # <--- Persona Load kiya
102
 
103
  # 2. LLM Check
104
  llm_creds = user_settings.get('groq') or user_settings.get('openai')
@@ -115,13 +321,13 @@ async def process_chat(message: str, session_id: str, user_id: str, db: AsyncSes
115
  # 4. SEMANTIC DECISION (Router)
116
  selected_provider = None
117
  if tools_map:
118
- router = SemanticRouter() # Singleton Instance
119
  selected_provider = router.route(message, tools_map)
120
 
121
  response_text = ""
122
  provider_name = "general_chat"
123
 
124
- # 5. Route to Winner
125
  if selected_provider:
126
  print(f"πŸ‘‰ [Router] Selected Tool: {selected_provider.upper()}")
127
  try:
@@ -145,18 +351,16 @@ async def process_chat(message: str, session_id: str, user_id: str, db: AsyncSes
145
  response_text = str(res.get('output', ''))
146
  provider_name = "nosql_agent"
147
 
148
- # Anti-Hallucination
149
  if not response_text or "error" in response_text.lower():
150
- print(f"⚠️ [Router] Tool {selected_provider} failed. Triggering Fallback.")
151
  response_text = ""
152
 
153
  except Exception as e:
154
- print(f"❌ [Router] Execution Failed: {e}")
155
  response_text = ""
156
 
157
- # 6. Fallback / RAG (Using Custom Persona)
158
  if not response_text:
159
- print("πŸ‘‰ [Router] Fallback to RAG/General Chat...")
160
  try:
161
  llm = get_llm_model(credentials=llm_creds)
162
 
@@ -171,15 +375,26 @@ async def process_chat(message: str, session_id: str, user_id: str, db: AsyncSes
171
  except Exception as e:
172
  print(f"⚠️ RAG Warning: {e}")
173
 
174
- # --- πŸ”₯ DYNAMIC SYSTEM PROMPT ---
175
  system_instruction = f"""
176
- IDENTITY: You are '{bot_persona['name']}'.
177
- MISSION: {bot_persona['instruction']}
178
-
 
 
 
 
 
 
 
 
 
 
 
179
  CONTEXT FROM KNOWLEDGE BASE:
180
- {context if context else "No specific documents found."}
181
-
182
- Answer the user's question based on the context above or your general knowledge if permitted by your mission.
183
  """
184
 
185
  # History Load
@@ -189,7 +404,7 @@ async def process_chat(message: str, session_id: str, user_id: str, db: AsyncSes
189
  formatted_history.append(HumanMessage(content=chat.human_message))
190
  if chat.ai_message: formatted_history.append(AIMessage(content=chat.ai_message))
191
 
192
- # LLM Call
193
  prompt = ChatPromptTemplate.from_messages([
194
  ("system", system_instruction),
195
  MessagesPlaceholder(variable_name="chat_history"),
@@ -203,8 +418,8 @@ async def process_chat(message: str, session_id: str, user_id: str, db: AsyncSes
203
 
204
  except Exception as e:
205
  print(f"❌ Fallback Error: {e}")
206
- response_text = "I am currently unable to process your request. Please check your AI configuration."
207
 
208
  # 7. Save to DB
209
  await save_chat_to_db(db, session_id, message, response_text, provider_name)
210
- return response_text
 
1
 
2
+ # import json
3
+ # from sqlalchemy.ext.asyncio import AsyncSession
4
+ # from sqlalchemy.future import select
5
+
6
+ # # --- Model Imports ---
7
+ # from backend.src.models.chat import ChatHistory
8
+ # from backend.src.models.integration import UserIntegration
9
+ # from backend.src.models.user import User # Added User model for Bot Persona
10
+
11
+ # # --- Dynamic Factory & Tool Imports ---
12
+ # from backend.src.services.llm.factory import get_llm_model
13
+ # from backend.src.services.vector_store.qdrant_adapter import get_vector_store
14
+ # from backend.src.services.security.pii_scrubber import PIIScrubber
15
+
16
+ # # --- Agents ---
17
+ # from backend.src.services.tools.secure_agent import get_secure_agent
18
+ # from backend.src.services.tools.nosql_agent import get_nosql_agent
19
+ # from backend.src.services.tools.cms_agent import get_cms_agent
20
+
21
+ # # --- Router ---
22
+ # from backend.src.services.routing.semantic_router import SemanticRouter
23
+
24
+ # # --- LangChain Core ---
25
+ # from langchain_core.messages import HumanMessage, AIMessage
26
+ # from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
27
+
28
+ # # ==========================================
29
+ # # HELPER FUNCTIONS
30
+ # # ==========================================
31
+
32
+ # async def get_user_integrations(user_id: str, db: AsyncSession) -> dict:
33
+ # """Fetches active integrations and filters valid descriptions."""
34
+ # if not user_id: return {}
35
+
36
+ # query = select(UserIntegration).where(UserIntegration.user_id == user_id, UserIntegration.is_active == True)
37
+ # result = await db.execute(query)
38
+ # integrations = result.scalars().all()
39
+
40
+ # settings = {}
41
+ # for i in integrations:
42
+ # try:
43
+ # creds = json.loads(i.credentials)
44
+ # creds['provider'] = i.provider
45
+ # creds['schema_map'] = i.schema_map if i.schema_map else {}
46
+
47
+ # # --- STRICT CHECK ---
48
+ # if i.profile_description:
49
+ # creds['description'] = i.profile_description
50
+
51
+ # settings[i.provider] = creds
52
+ # except (json.JSONDecodeError, TypeError):
53
+ # continue
54
+ # return settings
55
+
56
+ # async def save_chat_to_db(db: AsyncSession, session_id: str, human_msg: str, ai_msg: str, provider: str):
57
+ # """Saves chat history with PII redaction."""
58
+ # if not session_id: return
59
+ # safe_human = PIIScrubber.scrub(human_msg)
60
+ # safe_ai = PIIScrubber.scrub(ai_msg)
61
+ # new_chat = ChatHistory(
62
+ # session_id=session_id, human_message=safe_human, ai_message=safe_ai, provider=provider
63
+ # )
64
+ # db.add(new_chat)
65
+ # await db.commit()
66
+
67
+ # async def get_chat_history(session_id: str, db: AsyncSession):
68
+ # """Retrieves past conversation history."""
69
+ # if not session_id: return []
70
+ # query = select(ChatHistory).where(ChatHistory.session_id == session_id).order_by(ChatHistory.timestamp.asc())
71
+ # result = await db.execute(query)
72
+ # return result.scalars().all()
73
+
74
+ # async def get_bot_persona(user_id: str, db: AsyncSession):
75
+ # """Fetches custom Bot Name and Instructions from User table."""
76
+ # try:
77
+ # # User ID ko int mein convert karke query karein
78
+ # stmt = select(User).where(User.id == int(user_id))
79
+ # result = await db.execute(stmt)
80
+ # user = result.scalars().first()
81
+
82
+ # if user:
83
+ # return {
84
+ # "name": getattr(user, "bot_name", "OmniAgent"),
85
+ # "instruction": getattr(user, "bot_instruction", "You are a helpful AI assistant.")
86
+ # }
87
+ # except Exception as e:
88
+ # print(f"⚠️ Error fetching persona: {e}")
89
+ # pass
90
+
91
+ # # Fallback Default Persona
92
+ # return {"name": "OmniAgent", "instruction": "You are a helpful AI assistant."}
93
+
94
+ # # ==========================================
95
+ # # MAIN CHAT LOGIC
96
+ # # ==========================================
97
+ # async def process_chat(message: str, session_id: str, user_id: str, db: AsyncSession):
98
+
99
+ # # 1. Fetch User Settings & Persona
100
+ # user_settings = await get_user_integrations(user_id, db)
101
+ # bot_persona = await get_bot_persona(user_id, db) # <--- Persona Load kiya
102
+
103
+ # # 2. LLM Check
104
+ # llm_creds = user_settings.get('groq') or user_settings.get('openai')
105
+ # if not llm_creds:
106
+ # return "Please configure your AI Model in Settings."
107
+
108
+ # # 3. Build Tool Map for Router
109
+ # tools_map = {}
110
+ # for provider, config in user_settings.items():
111
+ # if provider in ['sanity', 'sql', 'mongodb']:
112
+ # if config.get('description'):
113
+ # tools_map[provider] = config['description']
114
+
115
+ # # 4. SEMANTIC DECISION (Router)
116
+ # selected_provider = None
117
+ # if tools_map:
118
+ # router = SemanticRouter() # Singleton Instance
119
+ # selected_provider = router.route(message, tools_map)
120
+
121
+ # response_text = ""
122
+ # provider_name = "general_chat"
123
+
124
+ # # 5. Route to Winner
125
+ # if selected_provider:
126
+ # print(f"πŸ‘‰ [Router] Selected Tool: {selected_provider.upper()}")
127
+ # try:
128
+ # if selected_provider == 'sanity':
129
+ # schema = user_settings['sanity'].get('schema_map', {})
130
+ # agent = get_cms_agent(user_id=user_id, schema_map=schema, llm_credentials=llm_creds)
131
+ # res = await agent.ainvoke({"input": message})
132
+ # response_text = str(res.get('output', ''))
133
+ # provider_name = "cms_agent"
134
+
135
+ # elif selected_provider == 'sql':
136
+ # role = "admin" if user_id == '99' else "customer"
137
+ # agent = get_secure_agent(int(user_id), role, user_settings['sql'], llm_credentials=llm_creds)
138
+ # res = await agent.ainvoke({"input": message})
139
+ # response_text = str(res.get('output', ''))
140
+ # provider_name = "sql_agent"
141
+
142
+ # elif selected_provider == 'mongodb':
143
+ # agent = get_nosql_agent(user_id, user_settings['mongodb'], llm_credentials=llm_creds)
144
+ # res = await agent.ainvoke({"input": message})
145
+ # response_text = str(res.get('output', ''))
146
+ # provider_name = "nosql_agent"
147
+
148
+ # # Anti-Hallucination
149
+ # if not response_text or "error" in response_text.lower():
150
+ # print(f"⚠️ [Router] Tool {selected_provider} failed. Triggering Fallback.")
151
+ # response_text = ""
152
+
153
+ # except Exception as e:
154
+ # print(f"❌ [Router] Execution Failed: {e}")
155
+ # response_text = ""
156
+
157
+ # # 6. Fallback / RAG (Using Custom Persona)
158
+ # if not response_text:
159
+ # print("πŸ‘‰ [Router] Fallback to RAG/General Chat...")
160
+ # try:
161
+ # llm = get_llm_model(credentials=llm_creds)
162
+
163
+ # # Context from Vector DB
164
+ # context = ""
165
+ # if 'qdrant' in user_settings:
166
+ # try:
167
+ # vector_store = get_vector_store(credentials=user_settings['qdrant'])
168
+ # docs = await vector_store.asimilarity_search(message, k=3)
169
+ # if docs:
170
+ # context = "\n\n".join([d.page_content for d in docs])
171
+ # except Exception as e:
172
+ # print(f"⚠️ RAG Warning: {e}")
173
+
174
+ # # --- πŸ”₯ DYNAMIC SYSTEM PROMPT ---
175
+ # system_instruction = f"""
176
+ # IDENTITY: You are '{bot_persona['name']}'.
177
+ # MISSION: {bot_persona['instruction']}
178
+
179
+ # CONTEXT FROM KNOWLEDGE BASE:
180
+ # {context if context else "No specific documents found."}
181
+
182
+ # Answer the user's question based on the context above or your general knowledge if permitted by your mission.
183
+ # """
184
+
185
+ # # History Load
186
+ # history = await get_chat_history(session_id, db)
187
+ # formatted_history = []
188
+ # for chat in history:
189
+ # formatted_history.append(HumanMessage(content=chat.human_message))
190
+ # if chat.ai_message: formatted_history.append(AIMessage(content=chat.ai_message))
191
+
192
+ # # LLM Call
193
+ # prompt = ChatPromptTemplate.from_messages([
194
+ # ("system", system_instruction),
195
+ # MessagesPlaceholder(variable_name="chat_history"),
196
+ # ("human", "{question}")
197
+ # ])
198
+ # chain = prompt | llm
199
+
200
+ # ai_response = await chain.ainvoke({"chat_history": formatted_history, "question": message})
201
+ # response_text = ai_response.content
202
+ # provider_name = "rag_fallback"
203
+
204
+ # except Exception as e:
205
+ # print(f"❌ Fallback Error: {e}")
206
+ # response_text = "I am currently unable to process your request. Please check your AI configuration."
207
+
208
+ # # 7. Save to DB
209
+ # await save_chat_to_db(db, session_id, message, response_text, provider_name)
210
+ # return response_text
211
  import json
212
  from sqlalchemy.ext.asyncio import AsyncSession
213
  from sqlalchemy.future import select
 
215
  # --- Model Imports ---
216
  from backend.src.models.chat import ChatHistory
217
  from backend.src.models.integration import UserIntegration
218
+ from backend.src.models.user import User
219
 
220
  # --- Dynamic Factory & Tool Imports ---
221
  from backend.src.services.llm.factory import get_llm_model
 
253
  creds['provider'] = i.provider
254
  creds['schema_map'] = i.schema_map if i.schema_map else {}
255
 
 
256
  if i.profile_description:
257
  creds['description'] = i.profile_description
258
 
 
282
  async def get_bot_persona(user_id: str, db: AsyncSession):
283
  """Fetches custom Bot Name and Instructions from User table."""
284
  try:
 
285
  stmt = select(User).where(User.id == int(user_id))
286
  result = await db.execute(stmt)
287
  user = result.scalars().first()
 
295
  print(f"⚠️ Error fetching persona: {e}")
296
  pass
297
 
 
298
  return {"name": "OmniAgent", "instruction": "You are a helpful AI assistant."}
299
 
300
  # ==========================================
301
+ # MAIN CHAT LOGIC (Ultra-Strict Isolated Mode)
302
  # ==========================================
303
  async def process_chat(message: str, session_id: str, user_id: str, db: AsyncSession):
304
 
305
  # 1. Fetch User Settings & Persona
306
  user_settings = await get_user_integrations(user_id, db)
307
+ bot_persona = await get_bot_persona(user_id, db)
308
 
309
  # 2. LLM Check
310
  llm_creds = user_settings.get('groq') or user_settings.get('openai')
 
321
  # 4. SEMANTIC DECISION (Router)
322
  selected_provider = None
323
  if tools_map:
324
+ router = SemanticRouter()
325
  selected_provider = router.route(message, tools_map)
326
 
327
  response_text = ""
328
  provider_name = "general_chat"
329
 
330
+ # 5. Route to Winner (Agent Execution)
331
  if selected_provider:
332
  print(f"πŸ‘‰ [Router] Selected Tool: {selected_provider.upper()}")
333
  try:
 
351
  response_text = str(res.get('output', ''))
352
  provider_name = "nosql_agent"
353
 
 
354
  if not response_text or "error" in response_text.lower():
 
355
  response_text = ""
356
 
357
  except Exception as e:
358
+ print(f"❌ Agent Execution Failed: {e}")
359
  response_text = ""
360
 
361
+ # 6. Fallback / RAG (ULTRA-STRICT MODE πŸ›‘οΈ)
362
  if not response_text:
363
+ print("πŸ‘‰ [Router] Executing Strict RAG Fallback...")
364
  try:
365
  llm = get_llm_model(credentials=llm_creds)
366
 
 
375
  except Exception as e:
376
  print(f"⚠️ RAG Warning: {e}")
377
 
378
+ # --- πŸ”₯ THE ULTRA-STRICT SYSTEM PROMPT ---
379
  system_instruction = f"""
380
+ SYSTEM IDENTITY:
381
+ You are the '{bot_persona['name']}'. You are a 'Knowledge-Isolated' AI Assistant for this specific platform.
382
+
383
+ CORE MISSION:
384
+ Your ONLY source of truth is the 'CONTEXT FROM KNOWLEDGE BASE' provided below.
385
+ You must ignore ALL of your internal pre-trained general knowledge about the world, geography, famous people, or general facts.
386
+
387
+ STRICT OPERATING RULES:
388
+ 1. MANDATORY REFUSAL: If the user's question cannot be answered using ONLY the provided context, you MUST exactly say: "I apologize, but I am only authorized to provide information based on the provided database. This specific information is not currently available in my knowledge base."
389
+ 2. NO HALLUCINATION: Never attempt to be helpful using outside information. If a fact (like 'Japan's location') is not in the context, you do NOT know it.
390
+ 3. CONTEXT-ONLY: Your existence is bounded by the data below. If the data is empty, you cannot answer anything except greetings.
391
+ 4. GREETINGS: You may respond to 'Hi' or 'Hello' by briefly identifying yourself as '{bot_persona['name']}' and asking what data the user is looking for.
392
+ 5. PROHIBITED TOPICS: Do not discuss any topic that is not present in the provided context.
393
+
394
  CONTEXT FROM KNOWLEDGE BASE:
395
+ ---------------------------
396
+ {context if context else "THE DATABASE IS CURRENTLY EMPTY. DO NOT PROVIDE ANY INFORMATION."}
397
+ ---------------------------
398
  """
399
 
400
  # History Load
 
404
  formatted_history.append(HumanMessage(content=chat.human_message))
405
  if chat.ai_message: formatted_history.append(AIMessage(content=chat.ai_message))
406
 
407
+ # LLM Chain Setup
408
  prompt = ChatPromptTemplate.from_messages([
409
  ("system", system_instruction),
410
  MessagesPlaceholder(variable_name="chat_history"),
 
418
 
419
  except Exception as e:
420
  print(f"❌ Fallback Error: {e}")
421
+ response_text = "I apologize, but I am currently unable to process your request due to a system error."
422
 
423
  # 7. Save to DB
424
  await save_chat_to_db(db, session_id, message, response_text, provider_name)
425
+ return response_text