srilakshu012456 commited on
Commit
903bedf
·
verified ·
1 Parent(s): f06288d

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +9 -48
main.py CHANGED
@@ -44,8 +44,7 @@ GEMINI_URL = (
44
  os.environ["POSTHOG_DISABLED"] = "true"
45
 
46
  # ---------------------------------------------------------------------
47
- # Minimal server-side cache to remember last issue text shown to user
48
- # (Used when user says "issue resolved" but frontend doesn't send last_issue)
49
  # ---------------------------------------------------------------------
50
  LAST_ISSUE_HINT: str = ""
51
 
@@ -188,7 +187,6 @@ def _normalize_lines(text: str) -> List[str]:
188
  return [raw.strip()] if raw.strip() else []
189
 
190
 
191
- # ---------------- Action filters for steps (create/update/delete) ----------------
192
  def _filter_numbered_steps_by_actions(numbered_text: str, wanted: set[str], exclude: set[str]) -> str:
193
  ACTION_SYNONYMS = {
194
  "create": ("create", "creation", "add", "new", "generate"),
@@ -217,7 +215,6 @@ def _filter_numbered_steps_by_actions(numbered_text: str, wanted: set[str], excl
217
  return "\n".join(out_lines).strip() or (numbered_text or "").strip()
218
 
219
 
220
- # ---------------- Small utilities used by next-step & filtering ----------------
221
  def _dedupe_lines(text: str) -> str:
222
  seen, out = set(), []
223
  for ln in (text or "").splitlines():
@@ -233,7 +230,6 @@ def _split_sentences(block: str) -> list:
233
  return parts if parts else ([block.strip()] if (block or "").strip() else [])
234
 
235
 
236
- # ------------- Numbering + text normalization used elsewhere ----------
237
  def _ensure_numbering(text: str) -> str:
238
  text = re.sub(r"[\u2060\u200B]", "", text or "")
239
  lines = [ln.strip() for ln in (text or "").splitlines() if ln and ln.strip()]
@@ -242,7 +238,6 @@ def _ensure_numbering(text: str) -> str:
242
  para = " ".join(lines).strip()
243
  if not para:
244
  return ""
245
- # Hard breaks at step boundaries
246
  para_clean = re.sub(r"(?:\b\d+\s*[.\)])\s+", "\n\n\n", para) # 1. / 1)
247
  para_clean = re.sub(r"(?:[\u2460-\u2473]\s+)", "\n\n\n", para_clean) # circled digits
248
  para_clean = re.sub(r"(?i)\bstep\s*\d+\s*:\s*", "\n\n\n", para_clean) # Step 1:
@@ -307,7 +302,6 @@ def _split_sop_into_steps(numbered_text: str) -> list:
307
 
308
 
309
  def _format_steps_as_numbered(steps: list) -> str:
310
- """Render a list of steps with circled numbers for visual continuity."""
311
  circled = {
312
  1: "\u2460", 2: "\u2461", 3: "\u2462", 4: "\u2463", 5: "\u2464",
313
  6: "\u2465", 7: "\u2466", 8: "\u2467", 9: "\u2468", 10: "\u2469",
@@ -320,7 +314,6 @@ def _format_steps_as_numbered(steps: list) -> str:
320
  return "\n".join(out)
321
 
322
 
323
- # ---------------- Similarity for anchor-based next steps ----------------
324
  def _similarity(a: str, b: str) -> float:
325
  a_norm, b_norm = _norm_text(a), _norm_text(b)
326
  ta, tb = set(a_norm.split()), set(b_norm.split())
@@ -340,9 +333,6 @@ def _similarity(a: str, b: str) -> float:
340
 
341
 
342
  def _extract_anchor_from_query(msg: str) -> dict:
343
- """
344
- Pull the anchor clause out of the user's sentence and note if a follow-up cue exists.
345
- """
346
  raw = (msg or "").strip()
347
  low = _norm_text(raw)
348
  FOLLOWUP_CUES = ("what next", "what is next", "what to do", "then", "after that", "next")
@@ -363,10 +353,6 @@ def _extract_anchor_from_query(msg: str) -> dict:
363
 
364
 
365
  def _anchor_next_steps(user_message: str, numbered_text: str, max_next: int = 8) -> list | None:
366
- """
367
- Locate the best-matching line (or sentence inside it) for the user's anchor,
368
- then return ONLY subsequent steps. Returns None if no strong anchor is found.
369
- """
370
  steps = _split_sop_into_steps(numbered_text)
371
  if not steps:
372
  return None
@@ -413,7 +399,6 @@ def _anchor_next_steps(user_message: str, numbered_text: str, max_next: int = 8)
413
  return [ln for ln in _dedupe_lines("\n".join(next_steps)).splitlines() if ln.strip()]
414
 
415
 
416
- # ---------------- Context filtering (neutral/errors rendering) ----------------
417
  def _filter_context_for_query(context: str, query: str) -> Tuple[str, Dict[str, Any]]:
418
  STRICT_OVERLAP = 3
419
  MAX_SENTENCES_STRICT = 4
@@ -549,10 +534,8 @@ def _build_tracking_descriptions(issue_text: str, resolved_text: str) -> Tuple[s
549
  issue = (issue_text or "").strip()
550
  resolved = (resolved_text or "").strip()
551
 
552
- # Strict: only the original issue/query goes into ShortDescription
553
- short_desc = issue[:100] # If empty, stays empty—backend now tries to fill from cache
554
 
555
- # DetailedDescription documents both the original issue and the user's resolution confirmation
556
  long_desc = (
557
  f'User reported: "{issue}". '
558
  f'User confirmation: "{resolved}". '
@@ -635,6 +618,7 @@ async def health_check():
635
  # ---------------------------------------------------------------------
636
  @app.post("/chat")
637
  async def chat_with_ai(input_data: ChatInput):
 
638
  assist_followup: Optional[str] = None
639
  try:
640
  msg_norm = (input_data.user_message or "").lower().strip()
@@ -664,13 +648,10 @@ async def chat_with_ai(input_data: ChatInput):
664
  is_llm_resolved = False
665
  if (not _has_negation_resolved(msg_norm)) and (_is_resolution_ack_heuristic(msg_norm) or is_llm_resolved):
666
  try:
667
- # Prefer server-side cached hint if frontend didn't pass last_issue
668
  issue_hint = (input_data.last_issue or "").strip()
669
  if not issue_hint:
670
- try:
671
- issue_hint = LAST_ISSUE_HINT.strip()
672
- except Exception:
673
- issue_hint = ""
674
 
675
  short_desc, long_desc = _build_tracking_descriptions(issue_hint, input_data.user_message)
676
  result = create_incident(short_desc, long_desc)
@@ -915,7 +896,6 @@ async def chat_with_ai(input_data: ChatInput):
915
  steps_override_applied = False # for Gemini paraphrase gating
916
 
917
  if best_doc and detected_intent == "steps":
918
- # prefer exact section of the top hit; fallback to all steps
919
  sec = (top_meta or {}).get("section")
920
  if sec:
921
  full_steps = get_section_text(best_doc, sec)
@@ -925,11 +905,9 @@ async def chat_with_ai(input_data: ChatInput):
925
  if full_steps:
926
  numbered_full = _ensure_numbering(full_steps)
927
 
928
- # --- Section-aware action filtering (avoid over-trimming "update" sections) ---
929
  raw_actions = set((kb_results.get("actions") or []))
930
  msg_low2 = (input_data.user_message or "").lower()
931
 
932
- # infer action from user text if extractor missed it
933
  if not raw_actions and ("creation" in msg_low2 or "create" in msg_low2 or "set up" in msg_low2 or "setup" in msg_low2):
934
  raw_actions = {"create"}
935
  elif not raw_actions and ("update" in msg_low2 or "modify" in msg_low2 or "edit" in msg_low2 or "change" in msg_low2):
@@ -960,11 +938,9 @@ async def chat_with_ai(input_data: ChatInput):
960
  if (wanted or exclude) and not skip_action_filter:
961
  before = numbered_full
962
  numbered_full = _filter_numbered_steps_by_actions(numbered_full, wanted=wanted, exclude=exclude)
963
- # safety: if over-trimmed to <=1 line, revert
964
  if len([ln for ln in numbered_full.splitlines() if ln.strip()]) <= 1:
965
  numbered_full = before
966
 
967
- # --- Keyword-free anchor-based next-step resolver ---
968
  next_only = _anchor_next_steps(input_data.user_message, numbered_full, max_next=6)
969
 
970
  if next_only is not None:
@@ -983,12 +959,10 @@ async def chat_with_ai(input_data: ChatInput):
983
  context = numbered_full
984
  context_preformatted = True
985
 
986
- # clear filter info for debug clarity
987
  filt_info = {'mode': None, 'matched_count': None, 'all_sentences': None}
988
  context_found = True
989
 
990
  elif best_doc and detected_intent == "errors":
991
- # --- Detect explicit "not resolved" phrases ---
992
  said_not_resolved = (
993
  _has_negation_resolved(msg_norm) or
994
  bool(re.search(
@@ -999,9 +973,8 @@ async def chat_with_ai(input_data: ChatInput):
999
  )
1000
 
1001
  if said_not_resolved:
1002
- # Keep only the card; avoid duplicate text or dots in bubble.
1003
  return {
1004
- "bot_response": "Select an option below.", # short, non-duplicated helper line
1005
  "status": "OK",
1006
  "context_found": False,
1007
  "ask_resolved": False,
@@ -1017,17 +990,13 @@ async def chat_with_ai(input_data: ChatInput):
1017
  },
1018
  }
1019
 
1020
- # Build errors context
1021
  full_errors = get_best_errors_section_text(best_doc)
1022
  if full_errors:
1023
  ctx_err = _extract_errors_only(full_errors, max_lines=30)
1024
 
1025
- # If it's a permissions query, trim to permission lines
1026
  if is_perm_query:
1027
  context = _filter_permission_lines(ctx_err, max_lines=6)
1028
  else:
1029
- # Treat domain-only messages (e.g., "putaway error") as specific queries
1030
- # so we filter errors to the most relevant lines instead of dumping entire heading.
1031
  DOMAIN_TERMS = (
1032
  "trailer", "shipment", "order", "load", "wave",
1033
  "inventory", "putaway", "receiving", "appointment",
@@ -1038,9 +1007,7 @@ async def chat_with_ai(input_data: ChatInput):
1038
 
1039
  is_specific_error = (len(_detect_error_families(msg_low)) > 0) or mentions_domain_local
1040
  if is_specific_error:
1041
- # Keep only the top overlapping sentences/bullets with the user's query
1042
  context = _filter_context_for_query(ctx_err, input_data.user_message)[0]
1043
- # If filtering produced too little, keep a small focused set
1044
  if not context.strip():
1045
  all_lines = _normalize_lines(ctx_err)
1046
  error_bullets = [
@@ -1049,7 +1016,6 @@ async def chat_with_ai(input_data: ChatInput):
1049
  ]
1050
  context = "\n".join(error_bullets[:6]).strip()
1051
  else:
1052
- # Generic fallback — a compact list
1053
  all_lines = _normalize_lines(ctx_err)
1054
  error_bullets = [
1055
  ln for ln in all_lines
@@ -1063,10 +1029,8 @@ async def chat_with_ai(input_data: ChatInput):
1063
  )
1064
  escalation_line = _extract_escalation_line(full_errors)
1065
 
1066
- # --- Domain-aware next-step override from SOP steps (for messages like "getting putaway error")
1067
- # If we can anchor into steps, show only the subsequent steps for clear guidance.
1068
  try:
1069
- if mentions_domain: # computed earlier in the function
1070
  steps_src = get_best_steps_section_text(best_doc)
1071
  if steps_src:
1072
  numbered_steps = _ensure_numbering(steps_src)
@@ -1079,7 +1043,6 @@ async def chat_with_ai(input_data: ChatInput):
1079
  next_step_info = {"count": len(next_only), "source": "errors_domain_override"}
1080
  steps_override_applied = True
1081
  except Exception:
1082
- # Non-fatal; keep errors context
1083
  pass
1084
 
1085
  elif best_doc and detected_intent == "prereqs":
@@ -1167,9 +1130,7 @@ Return ONLY the rewritten guidance."""
1167
 
1168
  # ----- Cache last issue hint (used if user later says "issue resolved thanks")
1169
  try:
1170
- global LAST_ISSUE_HINT
1171
  if detected_intent == "steps":
1172
- # Prefer section heading if present; else user's query
1173
  section_heading = ((top_meta or {}).get("section") or "").strip()
1174
  LAST_ISSUE_HINT = (section_heading or input_data.user_message or "").strip()[:100]
1175
  elif detected_intent == "errors":
@@ -1310,7 +1271,7 @@ def _classify_resolution_llm(user_message: str) -> bool:
1310
  Return only 'true' or 'false'.
1311
  Message: {user_message}"""
1312
  headers = {"Content-Type": "application/json"}
1313
- payload = {"contents": [{"parts": [{"text": prompt}]}]} # noqa: E501
1314
  try:
1315
  resp = requests.post(GEMINI_URL, headers=headers, json=payload, timeout=12, verify=GEMINI_SSL_VERIFY)
1316
  data = resp.json()
@@ -1388,7 +1349,7 @@ def _set_incident_resolved(sys_id: str) -> bool:
1388
  notes_field = os.getenv("SERVICENOW_RESOLUTION_NOTES_FIELD", "close_notes")
1389
  payload_C = clean({
1390
  "state": "6",
1391
- code_field: close_notes_val, # adjust if custom fields are mapped differently
1392
  notes_field: close_notes_val,
1393
  "caller_id": caller_sysid,
1394
  "resolved_at": datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),
 
44
  os.environ["POSTHOG_DISABLED"] = "true"
45
 
46
  # ---------------------------------------------------------------------
47
+ # Minimal server-side cache (used to populate short description if frontend didn’t pass last_issue)
 
48
  # ---------------------------------------------------------------------
49
  LAST_ISSUE_HINT: str = ""
50
 
 
187
  return [raw.strip()] if raw.strip() else []
188
 
189
 
 
190
  def _filter_numbered_steps_by_actions(numbered_text: str, wanted: set[str], exclude: set[str]) -> str:
191
  ACTION_SYNONYMS = {
192
  "create": ("create", "creation", "add", "new", "generate"),
 
215
  return "\n".join(out_lines).strip() or (numbered_text or "").strip()
216
 
217
 
 
218
  def _dedupe_lines(text: str) -> str:
219
  seen, out = set(), []
220
  for ln in (text or "").splitlines():
 
230
  return parts if parts else ([block.strip()] if (block or "").strip() else [])
231
 
232
 
 
233
  def _ensure_numbering(text: str) -> str:
234
  text = re.sub(r"[\u2060\u200B]", "", text or "")
235
  lines = [ln.strip() for ln in (text or "").splitlines() if ln and ln.strip()]
 
238
  para = " ".join(lines).strip()
239
  if not para:
240
  return ""
 
241
  para_clean = re.sub(r"(?:\b\d+\s*[.\)])\s+", "\n\n\n", para) # 1. / 1)
242
  para_clean = re.sub(r"(?:[\u2460-\u2473]\s+)", "\n\n\n", para_clean) # circled digits
243
  para_clean = re.sub(r"(?i)\bstep\s*\d+\s*:\s*", "\n\n\n", para_clean) # Step 1:
 
302
 
303
 
304
  def _format_steps_as_numbered(steps: list) -> str:
 
305
  circled = {
306
  1: "\u2460", 2: "\u2461", 3: "\u2462", 4: "\u2463", 5: "\u2464",
307
  6: "\u2465", 7: "\u2466", 8: "\u2467", 9: "\u2468", 10: "\u2469",
 
314
  return "\n".join(out)
315
 
316
 
 
317
  def _similarity(a: str, b: str) -> float:
318
  a_norm, b_norm = _norm_text(a), _norm_text(b)
319
  ta, tb = set(a_norm.split()), set(b_norm.split())
 
333
 
334
 
335
  def _extract_anchor_from_query(msg: str) -> dict:
 
 
 
336
  raw = (msg or "").strip()
337
  low = _norm_text(raw)
338
  FOLLOWUP_CUES = ("what next", "what is next", "what to do", "then", "after that", "next")
 
353
 
354
 
355
  def _anchor_next_steps(user_message: str, numbered_text: str, max_next: int = 8) -> list | None:
 
 
 
 
356
  steps = _split_sop_into_steps(numbered_text)
357
  if not steps:
358
  return None
 
399
  return [ln for ln in _dedupe_lines("\n".join(next_steps)).splitlines() if ln.strip()]
400
 
401
 
 
402
  def _filter_context_for_query(context: str, query: str) -> Tuple[str, Dict[str, Any]]:
403
  STRICT_OVERLAP = 3
404
  MAX_SENTENCES_STRICT = 4
 
534
  issue = (issue_text or "").strip()
535
  resolved = (resolved_text or "").strip()
536
 
537
+ short_desc = issue[:100] # If empty, stays empty; backend will try to fill from cache
 
538
 
 
539
  long_desc = (
540
  f'User reported: "{issue}". '
541
  f'User confirmation: "{resolved}". '
 
618
  # ---------------------------------------------------------------------
619
  @app.post("/chat")
620
  async def chat_with_ai(input_data: ChatInput):
621
+ global LAST_ISSUE_HINT # <-- ensure global declared before any assignment/use
622
  assist_followup: Optional[str] = None
623
  try:
624
  msg_norm = (input_data.user_message or "").lower().strip()
 
648
  is_llm_resolved = False
649
  if (not _has_negation_resolved(msg_norm)) and (_is_resolution_ack_heuristic(msg_norm) or is_llm_resolved):
650
  try:
651
+ # Prefer cached issue hint if frontend didn't pass last_issue
652
  issue_hint = (input_data.last_issue or "").strip()
653
  if not issue_hint:
654
+ issue_hint = LAST_ISSUE_HINT.strip()
 
 
 
655
 
656
  short_desc, long_desc = _build_tracking_descriptions(issue_hint, input_data.user_message)
657
  result = create_incident(short_desc, long_desc)
 
896
  steps_override_applied = False # for Gemini paraphrase gating
897
 
898
  if best_doc and detected_intent == "steps":
 
899
  sec = (top_meta or {}).get("section")
900
  if sec:
901
  full_steps = get_section_text(best_doc, sec)
 
905
  if full_steps:
906
  numbered_full = _ensure_numbering(full_steps)
907
 
 
908
  raw_actions = set((kb_results.get("actions") or []))
909
  msg_low2 = (input_data.user_message or "").lower()
910
 
 
911
  if not raw_actions and ("creation" in msg_low2 or "create" in msg_low2 or "set up" in msg_low2 or "setup" in msg_low2):
912
  raw_actions = {"create"}
913
  elif not raw_actions and ("update" in msg_low2 or "modify" in msg_low2 or "edit" in msg_low2 or "change" in msg_low2):
 
938
  if (wanted or exclude) and not skip_action_filter:
939
  before = numbered_full
940
  numbered_full = _filter_numbered_steps_by_actions(numbered_full, wanted=wanted, exclude=exclude)
 
941
  if len([ln for ln in numbered_full.splitlines() if ln.strip()]) <= 1:
942
  numbered_full = before
943
 
 
944
  next_only = _anchor_next_steps(input_data.user_message, numbered_full, max_next=6)
945
 
946
  if next_only is not None:
 
959
  context = numbered_full
960
  context_preformatted = True
961
 
 
962
  filt_info = {'mode': None, 'matched_count': None, 'all_sentences': None}
963
  context_found = True
964
 
965
  elif best_doc and detected_intent == "errors":
 
966
  said_not_resolved = (
967
  _has_negation_resolved(msg_norm) or
968
  bool(re.search(
 
973
  )
974
 
975
  if said_not_resolved:
 
976
  return {
977
+ "bot_response": "Select an option below.", # concise helper; avoids duplicate long sentence/dot
978
  "status": "OK",
979
  "context_found": False,
980
  "ask_resolved": False,
 
990
  },
991
  }
992
 
 
993
  full_errors = get_best_errors_section_text(best_doc)
994
  if full_errors:
995
  ctx_err = _extract_errors_only(full_errors, max_lines=30)
996
 
 
997
  if is_perm_query:
998
  context = _filter_permission_lines(ctx_err, max_lines=6)
999
  else:
 
 
1000
  DOMAIN_TERMS = (
1001
  "trailer", "shipment", "order", "load", "wave",
1002
  "inventory", "putaway", "receiving", "appointment",
 
1007
 
1008
  is_specific_error = (len(_detect_error_families(msg_low)) > 0) or mentions_domain_local
1009
  if is_specific_error:
 
1010
  context = _filter_context_for_query(ctx_err, input_data.user_message)[0]
 
1011
  if not context.strip():
1012
  all_lines = _normalize_lines(ctx_err)
1013
  error_bullets = [
 
1016
  ]
1017
  context = "\n".join(error_bullets[:6]).strip()
1018
  else:
 
1019
  all_lines = _normalize_lines(ctx_err)
1020
  error_bullets = [
1021
  ln for ln in all_lines
 
1029
  )
1030
  escalation_line = _extract_escalation_line(full_errors)
1031
 
 
 
1032
  try:
1033
+ if mentions_domain: # computed earlier
1034
  steps_src = get_best_steps_section_text(best_doc)
1035
  if steps_src:
1036
  numbered_steps = _ensure_numbering(steps_src)
 
1043
  next_step_info = {"count": len(next_only), "source": "errors_domain_override"}
1044
  steps_override_applied = True
1045
  except Exception:
 
1046
  pass
1047
 
1048
  elif best_doc and detected_intent == "prereqs":
 
1130
 
1131
  # ----- Cache last issue hint (used if user later says "issue resolved thanks")
1132
  try:
 
1133
  if detected_intent == "steps":
 
1134
  section_heading = ((top_meta or {}).get("section") or "").strip()
1135
  LAST_ISSUE_HINT = (section_heading or input_data.user_message or "").strip()[:100]
1136
  elif detected_intent == "errors":
 
1271
  Return only 'true' or 'false'.
1272
  Message: {user_message}"""
1273
  headers = {"Content-Type": "application/json"}
1274
+ payload = {"contents": [{"parts": [{"text": prompt}]}]}
1275
  try:
1276
  resp = requests.post(GEMINI_URL, headers=headers, json=payload, timeout=12, verify=GEMINI_SSL_VERIFY)
1277
  data = resp.json()
 
1349
  notes_field = os.getenv("SERVICENOW_RESOLUTION_NOTES_FIELD", "close_notes")
1350
  payload_C = clean({
1351
  "state": "6",
1352
+ code_field: close_notes_val,
1353
  notes_field: close_notes_val,
1354
  "caller_id": caller_sysid,
1355
  "resolved_at": datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S"),