Pawan Mane commited on
Commit
f919e71
Β·
1 Parent(s): ceb563c

Code optimization

Browse files
app/frontend/gradio_app.py CHANGED
@@ -3,8 +3,14 @@ app/frontend/gradio_app.py β€” Full page warm gray UI
3
  """
4
  import os
5
  import gradio as gr
 
6
  from langchain_core.messages import HumanMessage
7
 
 
 
 
 
 
8
  os.environ["GRADIO_MODE"] = "true"
9
  os.environ["HITL_ENABLED"] = os.getenv("HITL_ENABLED", "true")
10
 
@@ -21,9 +27,18 @@ _pending_hitl_state: AgentState | None = None
21
 
22
 
23
  def run_graph(query: str) -> AgentState:
24
- # Just pass the query β€” graph manages its own message history via state
 
 
 
 
 
 
 
 
 
25
  initial_state: AgentState = {
26
- "messages": [], # MemorySaver restores history; safety_node adds HumanMessage
27
  "query": query,
28
  "route": "", "rag_context": "", "tool_calls": [], "tool_results": [],
29
  "response": "", "retry_count": 0, "hitl_approved": False,
@@ -75,6 +90,10 @@ def handle_submit(user_message, chat_history):
75
  return chat_history, "", "*Waiting for a query...*", "", gr.update(visible=False), gr.update(value="")
76
 
77
  chat_history = chat_history + [user_msg(user_message)]
 
 
 
 
78
  try:
79
  fs = run_graph(user_message)
80
  route = fs.get("route", "")
 
3
  """
4
  import os
5
  import gradio as gr
6
+ from datetime import datetime, timezone, timedelta
7
  from langchain_core.messages import HumanMessage
8
 
9
+ IST = timezone(timedelta(hours=5, minutes=30))
10
+
11
+ def _now_ist() -> str:
12
+ return datetime.now(IST).strftime("%d %b %Y %I:%M:%S %p IST")
13
+
14
  os.environ["GRADIO_MODE"] = "true"
15
  os.environ["HITL_ENABLED"] = os.getenv("HITL_ENABLED", "true")
16
 
 
27
 
28
 
29
  def run_graph(query: str) -> AgentState:
30
+ # Restore persisted messages from MemorySaver checkpoint
31
+ # This gives the LLM full clean conversation history across turns
32
+ prior_messages = []
33
+ try:
34
+ checkpoint = _graph.get_state(_thread_config)
35
+ if checkpoint and checkpoint.values:
36
+ prior_messages = checkpoint.values.get("messages", [])
37
+ except Exception:
38
+ pass # First turn β€” no checkpoint yet
39
+
40
  initial_state: AgentState = {
41
+ "messages": prior_messages, # Prior safe history; safety_node adds current HumanMessage
42
  "query": query,
43
  "route": "", "rag_context": "", "tool_calls": [], "tool_results": [],
44
  "response": "", "retry_count": 0, "hitl_approved": False,
 
90
  return chat_history, "", "*Waiting for a query...*", "", gr.update(visible=False), gr.update(value="")
91
 
92
  chat_history = chat_history + [user_msg(user_message)]
93
+ print(f"\n{'─'*60}")
94
+ print(f"[QUERY] {user_message}")
95
+ print(f"[TIME] {_now_ist()}")
96
+ print(f"{'─'*60}")
97
  try:
98
  fs = run_graph(user_message)
99
  route = fs.get("route", "")
app/frontend/gradio_app_hf.py CHANGED
@@ -25,6 +25,7 @@ if "dotenv" not in sys.modules:
25
  sys.modules["dotenv"] = MagicMock()
26
 
27
  import gradio as gr
 
28
  from langchain_core.messages import HumanMessage
29
 
30
  from app.graph.builder import build_graph
@@ -32,6 +33,10 @@ from app.state import AgentState
32
  from app.nodes.hitl import HITLPauseException
33
  from app.frontend.css import CSS
34
 
 
 
 
 
35
 
36
  # ── Graph singleton ────────────────────────────────────────────────────────
37
  _graph = build_graph()
@@ -43,9 +48,17 @@ _pending_hitl_state: AgentState | None = None
43
  # ── Core runner ────────────────────────────────────────────────────────────
44
 
45
  def run_graph(query: str) -> AgentState:
46
- # messages=[] β€” MemorySaver restores prior history; safety_node adds HumanMessage
 
 
 
 
 
 
 
 
47
  initial_state: AgentState = {
48
- "messages": [],
49
  "query": query,
50
  "route": "",
51
  "rag_context": "",
@@ -109,6 +122,10 @@ def handle_submit(user_message, chat_history):
109
  return chat_history, "", "*Waiting for a query...*", "", gr.update(visible=False), gr.update(value="")
110
 
111
  chat_history = chat_history + [user_msg(user_message)]
 
 
 
 
112
  try:
113
  fs = run_graph(user_message)
114
  route = fs.get("route", "")
 
25
  sys.modules["dotenv"] = MagicMock()
26
 
27
  import gradio as gr
28
+ from datetime import datetime, timezone, timedelta
29
  from langchain_core.messages import HumanMessage
30
 
31
  from app.graph.builder import build_graph
 
33
  from app.nodes.hitl import HITLPauseException
34
  from app.frontend.css import CSS
35
 
36
+ IST = timezone(timedelta(hours=5, minutes=30))
37
+
38
+ def _now_ist() -> str:
39
+ return datetime.now(IST).strftime("%d %b %Y %I:%M:%S %p IST")
40
 
41
  # ── Graph singleton ────────────────────────────────────────────────────────
42
  _graph = build_graph()
 
48
  # ── Core runner ────────────────────────────────────────────────────────────
49
 
50
  def run_graph(query: str) -> AgentState:
51
+ # Restore persisted messages from MemorySaver checkpoint
52
+ prior_messages = []
53
+ try:
54
+ checkpoint = _graph.get_state(_thread_config)
55
+ if checkpoint and checkpoint.values:
56
+ prior_messages = checkpoint.values.get("messages", [])
57
+ except Exception:
58
+ pass # First turn β€” no checkpoint yet
59
+
60
  initial_state: AgentState = {
61
+ "messages": prior_messages, # Prior safe history; safety_node adds current HumanMessage
62
  "query": query,
63
  "route": "",
64
  "rag_context": "",
 
122
  return chat_history, "", "*Waiting for a query...*", "", gr.update(visible=False), gr.update(value="")
123
 
124
  chat_history = chat_history + [user_msg(user_message)]
125
+ print(f"\n{'─'*60}")
126
+ print(f"[QUERY] {user_message}")
127
+ print(f"[TIME] {_now_ist()}")
128
+ print(f"{'─'*60}")
129
  try:
130
  fs = run_graph(user_message)
131
  route = fs.get("route", "")