Spaces:
Sleeping
Sleeping
Charles Grandjean commited on
Commit Β·
ee2e9b4
1
Parent(s): ded54d3
dd logging
Browse files- langraph_agent.py +8 -8
langraph_agent.py
CHANGED
|
@@ -81,11 +81,9 @@ class CyberLegalAgent:
|
|
| 81 |
|
| 82 |
# Log LightRAG response
|
| 83 |
lightrag_context = lightrag_response.get("response", "")
|
| 84 |
-
logger.info(f"π LightRAG response received:")
|
| 85 |
-
logger.info(f"π Context length: {len(lightrag_context)} characters")
|
| 86 |
-
logger.info(f"π References found: {len(state.get('relevant_documents', []))}")
|
| 87 |
if len(lightrag_context) > 1500:
|
| 88 |
-
logger.info(f"π Context preview: {lightrag_context[:
|
| 89 |
else:
|
| 90 |
logger.info(f"π Full context: {lightrag_context}")
|
| 91 |
|
|
@@ -126,9 +124,9 @@ class CyberLegalAgent:
|
|
| 126 |
response = await self.llm.ainvoke(messages)
|
| 127 |
answer = response.content
|
| 128 |
state["final_response"] = answer
|
| 129 |
-
|
| 130 |
except Exception as e:
|
| 131 |
-
|
| 132 |
|
| 133 |
answer_generation_duration = self.performance_monitor.end_timer("answer_generation")
|
| 134 |
logger.info(f"β±οΈ Answer generation processing time: {answer_generation_duration:.3f}s")
|
|
@@ -190,9 +188,11 @@ class CyberLegalAgent:
|
|
| 190 |
|
| 191 |
# Run the workflow
|
| 192 |
final_state = await self.workflow.ainvoke(initial_state)
|
| 193 |
-
|
|
|
|
|
|
|
| 194 |
result = {
|
| 195 |
-
"response": final_state
|
| 196 |
"processing_time": final_state.get("processing_time", 0.0),
|
| 197 |
"references": final_state.get("relevant_documents", []),
|
| 198 |
"timestamp": final_state.get("query_timestamp")
|
|
|
|
| 81 |
|
| 82 |
# Log LightRAG response
|
| 83 |
lightrag_context = lightrag_response.get("response", "")
|
| 84 |
+
logger.info(f"π LightRAG response received\nπ Context length: {len(lightrag_context)} characters\nπ References found: {len(state.get('relevant_documents', []))}")
|
|
|
|
|
|
|
| 85 |
if len(lightrag_context) > 1500:
|
| 86 |
+
logger.info(f"π Context preview: {lightrag_context[:1500]}...")
|
| 87 |
else:
|
| 88 |
logger.info(f"π Full context: {lightrag_context}")
|
| 89 |
|
|
|
|
| 124 |
response = await self.llm.ainvoke(messages)
|
| 125 |
answer = response.content
|
| 126 |
state["final_response"] = answer
|
| 127 |
+
logger.info(f"π€ Answer generated with success")
|
| 128 |
except Exception as e:
|
| 129 |
+
raise e
|
| 130 |
|
| 131 |
answer_generation_duration = self.performance_monitor.end_timer("answer_generation")
|
| 132 |
logger.info(f"β±οΈ Answer generation processing time: {answer_generation_duration:.3f}s")
|
|
|
|
| 188 |
|
| 189 |
# Run the workflow
|
| 190 |
final_state = await self.workflow.ainvoke(initial_state)
|
| 191 |
+
logger.info("=" * 80)
|
| 192 |
+
logger.info(f"final state: {final_state}")
|
| 193 |
+
logger.info("=" * 80)
|
| 194 |
result = {
|
| 195 |
+
"response": final_state["final_response"],
|
| 196 |
"processing_time": final_state.get("processing_time", 0.0),
|
| 197 |
"references": final_state.get("relevant_documents", []),
|
| 198 |
"timestamp": final_state.get("query_timestamp")
|