Charles Grandjean commited on
Commit
661f497
·
1 Parent(s): ddc1c7f
Files changed (7) hide show
  1. add_secrets.ipynb +1 -11
  2. agent_api.py +0 -2
  3. agent_state.py +0 -2
  4. langraph_agent.py +36 -115
  5. prompts.py +7 -93
  6. test_openai_key.ipynb +2 -3
  7. utils.py +2 -0
add_secrets.ipynb CHANGED
@@ -32,17 +32,7 @@
32
  },
33
  {
34
  "cell_type": "code",
35
- "execution_count": null,
36
- "id": "90204225",
37
- "metadata": {},
38
- "outputs": [],
39
- "source": [
40
- "git remote add hf https://huggingface.co/spaces/Cyberlgl/CyberLegalAIendpoint\n"
41
- ]
42
- },
43
- {
44
- "cell_type": "code",
45
- "execution_count": 5,
46
  "id": "ebc2564d",
47
  "metadata": {},
48
  "outputs": [
 
32
  },
33
  {
34
  "cell_type": "code",
35
+ "execution_count": 3,
 
 
 
 
 
 
 
 
 
 
36
  "id": "ebc2564d",
37
  "metadata": {},
38
  "outputs": [
agent_api.py CHANGED
@@ -60,7 +60,6 @@ class ChatRequest(BaseModel):
60
 
61
  class ChatResponse(BaseModel):
62
  response: str = Field(..., description="Assistant's response")
63
- confidence: float = Field(..., description="Confidence score (0.0-1.0)")
64
  processing_time: float = Field(..., description="Processing time in seconds")
65
  references: List[str] = Field(default=[], description="Referenced documents")
66
  timestamp: str = Field(..., description="Response timestamp")
@@ -111,7 +110,6 @@ class CyberLegalAPI:
111
  # Create response
112
  response = ChatResponse(
113
  response=result["response"],
114
- confidence=result.get("confidence", 0.0),
115
  processing_time=result.get("processing_time", 0.0),
116
  references=result.get("references", []),
117
  timestamp=result.get("timestamp", datetime.now().isoformat()),
 
60
 
61
  class ChatResponse(BaseModel):
62
  response: str = Field(..., description="Assistant's response")
 
63
  processing_time: float = Field(..., description="Processing time in seconds")
64
  references: List[str] = Field(default=[], description="Referenced documents")
65
  timestamp: str = Field(..., description="Response timestamp")
 
110
  # Create response
111
  response = ChatResponse(
112
  response=result["response"],
 
113
  processing_time=result.get("processing_time", 0.0),
114
  references=result.get("references", []),
115
  timestamp=result.get("timestamp", datetime.now().isoformat()),
agent_state.py CHANGED
@@ -15,7 +15,6 @@ class AgentState(TypedDict):
15
  user_query: str
16
  conversation_history: List[Dict[str, str]]
17
 
18
- # LightRAG integration
19
  lightrag_response: Optional[Dict[str, Any]]
20
  lightrag_error: Optional[str]
21
 
@@ -30,7 +29,6 @@ class AgentState(TypedDict):
30
 
31
  # Final output
32
  final_response: Optional[str]
33
- confidence_score: Optional[float]
34
 
35
  # Metadata
36
  query_timestamp: str
 
15
  user_query: str
16
  conversation_history: List[Dict[str, str]]
17
 
 
18
  lightrag_response: Optional[Dict[str, Any]]
19
  lightrag_error: Optional[str]
20
 
 
29
 
30
  # Final output
31
  final_response: Optional[str]
 
32
 
33
  # Metadata
34
  query_timestamp: str
langraph_agent.py CHANGED
@@ -4,14 +4,18 @@ Simplified LangGraph agent implementation for cyber-legal assistant
4
  """
5
 
6
  import os
 
7
  from typing import Dict, Any, List, Optional
8
  from datetime import datetime
9
  from langgraph.graph import StateGraph, END
10
  from langchain_openai import ChatOpenAI
11
  from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
12
 
 
 
 
13
  from agent_state import AgentState, ConversationManager
14
- from prompts import SYSTEM_PROMPT, ERROR_HANDLING_PROMPT
15
  from utils import LightRAGClient, ConversationFormatter, PerformanceMonitor
16
 
17
 
@@ -45,34 +49,14 @@ class CyberLegalAgent:
45
  # Add nodes
46
  workflow.add_node("query_lightrag", self._query_lightrag)
47
  workflow.add_node("answer_with_context", self._answer_with_context)
48
- workflow.add_node("handle_error", self._handle_error)
49
 
50
  # Add edges
51
  workflow.set_entry_point("query_lightrag")
52
  workflow.add_edge("query_lightrag", "answer_with_context")
53
  workflow.add_edge("answer_with_context", END)
54
- workflow.add_edge("handle_error", END)
55
-
56
- # Add conditional edges
57
- workflow.add_conditional_edges(
58
- "query_lightrag",
59
- self._should_handle_error,
60
- {
61
- "error": "handle_error",
62
- "continue": "answer_with_context"
63
- }
64
- )
65
 
66
  return workflow.compile()
67
 
68
- def _should_handle_error(self, state: AgentState) -> str:
69
- """
70
- Determine if we should handle an error
71
- """
72
- if state.get("lightrag_error"):
73
- return "error"
74
- return "continue"
75
-
76
  async def _query_lightrag(self, state: AgentState) -> AgentState:
77
  """
78
  Query LightRAG for legal information
@@ -80,10 +64,6 @@ class CyberLegalAgent:
80
  self.performance_monitor.start_timer("lightrag_query")
81
 
82
  try:
83
- # Check LightRAG health
84
- if not self.lightrag_client.health_check():
85
- state["lightrag_error"] = "LightRAG server is not healthy"
86
- return state
87
 
88
  # Prepare conversation history for LightRAG
89
  history = state.get("conversation_history", [])
@@ -91,21 +71,28 @@ class CyberLegalAgent:
91
 
92
  # Query LightRAG
93
  query = state["user_query"]
94
- response = self.lightrag_client.query(
95
  query=query,
96
  conversation_history=formatted_history
97
  )
98
 
99
- if "error" in response:
100
- state["lightrag_error"] = response["error"]
 
 
 
 
 
 
 
 
101
  else:
102
- state["lightrag_response"] = response
103
- state["relevant_documents"] = self.lightrag_client.get_references(response)
104
 
105
  except Exception as e:
106
- state["lightrag_error"] = f"LightRAG query failed: {str(e)}"
107
-
108
- self.performance_monitor.end_timer("lightrag_query")
109
  return state
110
 
111
  async def _answer_with_context(self, state: AgentState) -> AgentState:
@@ -115,22 +102,10 @@ class CyberLegalAgent:
115
  self.performance_monitor.start_timer("answer_generation")
116
 
117
  try:
118
- if not state.get("lightrag_response"):
119
- state["lightrag_error"] = "No response from LightRAG"
120
- return state
121
-
122
- # Extract context from LightRAG response
123
  lightrag_response = state["lightrag_response"]
124
  context = lightrag_response.get("response", "")
125
-
126
- if not context:
127
- state["final_response"] = "I apologize, but I couldn't find relevant information for your query."
128
- return state
129
-
130
- # Build message stack with conversation history
131
  messages = []
132
-
133
- # Add conversation history messages
134
  history = state.get("conversation_history", [])
135
  has_system_message = False
136
 
@@ -143,49 +118,20 @@ class CyberLegalAgent:
143
  elif msg["role"] == "assistant":
144
  messages.append(AIMessage(content=msg["content"]))
145
 
146
- # Add system message only if not present in history
147
  if not has_system_message:
148
  messages.insert(0, SystemMessage(content=SYSTEM_PROMPT))
149
-
150
- # Add final message with user query and LightRAG context
151
- answer_prompt = f"""Based on the following retrieved legal information, please answer the user's question accurately and comprehensively.
152
-
153
- **User Question:** {state["user_query"]}
154
-
155
- **Retrieved Legal Context:**
156
- {context}
157
-
158
- **Instructions:**
159
- 1. Answer the user's question directly based on the provided context
160
- 2. If the context doesn't fully answer the question, acknowledge the limitations
161
- 3. Provide specific legal references when available in the context
162
- 4. Include practical implications for organizations
163
- 5. Add a disclaimer that this is for guidance purposes only
164
-
165
- Please provide a clear, well-structured response."""
166
-
167
  messages.append(HumanMessage(content=answer_prompt))
168
 
169
  response = await self.llm.ainvoke(messages)
170
  answer = response.content
171
-
172
- # Add references if available
173
- references = state.get("relevant_documents", [])
174
- if references:
175
- answer += "\n\n**📚 References:**\n"
176
- for ref in references[:3]: # Limit to top 3 references
177
- answer += f"• {ref}\n"
178
-
179
- # Add standard disclaimer
180
- answer += "\n\n**Disclaimer:** This information is for guidance purposes only and not legal advice. For specific legal matters, consult with qualified legal counsel."
181
-
182
  state["final_response"] = answer
183
- state["confidence_score"] = 0.8 # High confidence when LightRAG provides good context
184
 
185
  except Exception as e:
186
  state["lightrag_error"] = f"Answer generation failed: {str(e)}"
187
 
188
- self.performance_monitor.end_timer("answer_generation")
 
189
 
190
  # Record total processing time
191
  total_time = sum(
@@ -195,31 +141,8 @@ Please provide a clear, well-structured response."""
195
  state["processing_time"] = total_time
196
  state["query_timestamp"] = datetime.now().isoformat()
197
 
198
- return state
199
-
200
- async def _handle_error(self, state: AgentState) -> AgentState:
201
- """
202
- Handle errors gracefully
203
- """
204
- error = state.get("lightrag_error", "Unknown error occurred")
205
-
206
- error_prompt = ERROR_HANDLING_PROMPT.format(error_message=error)
207
-
208
- try:
209
- messages = [
210
- SystemMessage(content=SYSTEM_PROMPT),
211
- HumanMessage(content=error_prompt)
212
- ]
213
-
214
- response = await self.llm.ainvoke(messages)
215
- state["final_response"] = response.content
216
-
217
- except Exception:
218
- state["final_response"] = f"I apologize, but an error occurred: {error}"
219
-
220
- state["confidence_score"] = 0.2 # Low confidence for errors
221
- state["processing_time"] = self.performance_monitor.get_metrics()
222
- state["query_timestamp"] = datetime.now().isoformat()
223
 
224
  return state
225
 
@@ -235,42 +158,40 @@ Please provide a clear, well-structured response."""
235
  initial_state: AgentState = {
236
  "user_query": user_query,
237
  "conversation_history": conversation_history or [],
238
- "lightrag_response": None,
239
- "lightrag_error": None,
240
  "processed_context": None,
241
  "relevant_documents": [],
242
  "analysis_thoughts": None,
243
- "needs_clarification": False,
244
- "clarification_question": None,
245
- "final_response": None,
246
- "confidence_score": None,
247
  "query_timestamp": datetime.now().isoformat(),
248
  "processing_time": None,
249
- "query_type": None
250
  }
251
 
252
  # Reset performance monitor
253
  self.performance_monitor.reset()
254
 
255
  try:
 
 
 
256
  # Run the workflow
257
  final_state = await self.workflow.ainvoke(initial_state)
258
 
259
- return {
260
  "response": final_state.get("final_response", ""),
261
- "confidence": final_state.get("confidence_score", 0.0),
262
  "processing_time": final_state.get("processing_time", 0.0),
263
  "references": final_state.get("relevant_documents", []),
264
- "error": final_state.get("lightrag_error"),
265
  "timestamp": final_state.get("query_timestamp")
266
  }
267
 
 
 
 
 
 
268
  except Exception as e:
 
269
  return {
270
  "response": f"I apologize, but a critical error occurred: {str(e)}",
271
- "confidence": 0.0,
272
  "processing_time": 0.0,
273
  "references": [],
274
- "error": str(e),
275
  "timestamp": datetime.now().isoformat()
276
  }
 
4
  """
5
 
6
  import os
7
+ import logging
8
  from typing import Dict, Any, List, Optional
9
  from datetime import datetime
10
  from langgraph.graph import StateGraph, END
11
  from langchain_openai import ChatOpenAI
12
  from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
13
 
14
+ # Configure logging
15
+ logger = logging.getLogger(__name__)
16
+
17
  from agent_state import AgentState, ConversationManager
18
+ from prompts import SYSTEM_PROMPT, ERROR_HANDLING_PROMPT, RESPONSE_FORMATTING_PROMPT
19
  from utils import LightRAGClient, ConversationFormatter, PerformanceMonitor
20
 
21
 
 
49
  # Add nodes
50
  workflow.add_node("query_lightrag", self._query_lightrag)
51
  workflow.add_node("answer_with_context", self._answer_with_context)
 
52
 
53
  # Add edges
54
  workflow.set_entry_point("query_lightrag")
55
  workflow.add_edge("query_lightrag", "answer_with_context")
56
  workflow.add_edge("answer_with_context", END)
 
 
 
 
 
 
 
 
 
 
 
57
 
58
  return workflow.compile()
59
 
 
 
 
 
 
 
 
 
60
  async def _query_lightrag(self, state: AgentState) -> AgentState:
61
  """
62
  Query LightRAG for legal information
 
64
  self.performance_monitor.start_timer("lightrag_query")
65
 
66
  try:
 
 
 
 
67
 
68
  # Prepare conversation history for LightRAG
69
  history = state.get("conversation_history", [])
 
71
 
72
  # Query LightRAG
73
  query = state["user_query"]
74
+ lightrag_response = self.lightrag_client.query(
75
  query=query,
76
  conversation_history=formatted_history
77
  )
78
 
79
+ state["lightrag_response"] = lightrag_response
80
+ state["relevant_documents"] = self.lightrag_client.get_references(lightrag_response)
81
+
82
+ # Log LightRAG response
83
+ lightrag_context = lightrag_response.get("response", "")
84
+ logger.info(f"🔍 LightRAG response received:")
85
+ logger.info(f"📄 Context length: {len(lightrag_context)} characters")
86
+ logger.info(f"📚 References found: {len(state.get('relevant_documents', []))}")
87
+ if len(lightrag_context) > 1500:
88
+ logger.info(f"📝 Context preview: {lightrag_context[:500]}...")
89
  else:
90
+ logger.info(f"📝 Full context: {lightrag_context}")
 
91
 
92
  except Exception as e:
93
+ raise e
94
+ lightrag_duration = self.performance_monitor.end_timer("lightrag_query")
95
+ logger.info(f"⏱️ LightRAG query processing time: {lightrag_duration:.3f}s")
96
  return state
97
 
98
  async def _answer_with_context(self, state: AgentState) -> AgentState:
 
102
  self.performance_monitor.start_timer("answer_generation")
103
 
104
  try:
 
 
 
 
 
105
  lightrag_response = state["lightrag_response"]
106
  context = lightrag_response.get("response", "")
 
 
 
 
 
 
107
  messages = []
108
+
 
109
  history = state.get("conversation_history", [])
110
  has_system_message = False
111
 
 
118
  elif msg["role"] == "assistant":
119
  messages.append(AIMessage(content=msg["content"]))
120
 
 
121
  if not has_system_message:
122
  messages.insert(0, SystemMessage(content=SYSTEM_PROMPT))
123
+ answer_prompt = RESPONSE_FORMATTING_PROMPT.format(context=context, query=state["user_query"])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  messages.append(HumanMessage(content=answer_prompt))
125
 
126
  response = await self.llm.ainvoke(messages)
127
  answer = response.content
 
 
 
 
 
 
 
 
 
 
 
128
  state["final_response"] = answer
 
129
 
130
  except Exception as e:
131
  state["lightrag_error"] = f"Answer generation failed: {str(e)}"
132
 
133
+ answer_generation_duration = self.performance_monitor.end_timer("answer_generation")
134
+ logger.info(f"⏱️ Answer generation processing time: {answer_generation_duration:.3f}s")
135
 
136
  # Record total processing time
137
  total_time = sum(
 
141
  state["processing_time"] = total_time
142
  state["query_timestamp"] = datetime.now().isoformat()
143
 
144
+ logger.info(f"⏱️ Total query processing time: {total_time:.3f}s")
145
+ logger.info(f"📚 References found: {len(state.get('relevant_documents', []))}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
 
147
  return state
148
 
 
158
  initial_state: AgentState = {
159
  "user_query": user_query,
160
  "conversation_history": conversation_history or [],
 
 
161
  "processed_context": None,
162
  "relevant_documents": [],
163
  "analysis_thoughts": None,
 
 
 
 
164
  "query_timestamp": datetime.now().isoformat(),
165
  "processing_time": None,
 
166
  }
167
 
168
  # Reset performance monitor
169
  self.performance_monitor.reset()
170
 
171
  try:
172
+ logger.info(f"🚀 Starting query processing: {user_query[:100]}...")
173
+ logger.info(f"💬 Conversation history length: {len(initial_state['conversation_history'])} messages")
174
+
175
  # Run the workflow
176
  final_state = await self.workflow.ainvoke(initial_state)
177
 
178
+ result = {
179
  "response": final_state.get("final_response", ""),
 
180
  "processing_time": final_state.get("processing_time", 0.0),
181
  "references": final_state.get("relevant_documents", []),
 
182
  "timestamp": final_state.get("query_timestamp")
183
  }
184
 
185
+ logger.info(f"✅ Query processing completed successfully")
186
+ logger.info(f"📄 Response length: {len(result['response'])} characters")
187
+
188
+ return result
189
+
190
  except Exception as e:
191
+ logger.error(f"💥 Critical error during query processing: {str(e)}")
192
  return {
193
  "response": f"I apologize, but a critical error occurred: {str(e)}",
 
194
  "processing_time": 0.0,
195
  "references": [],
 
196
  "timestamp": datetime.now().isoformat()
197
  }
prompts.py CHANGED
@@ -3,55 +3,14 @@
3
  System prompts for the LangGraph cyber-legal assistant
4
  """
5
 
6
- SYSTEM_PROMPT = """You are an expert cyber-legal assistant specializing in European Union regulations and directives.
7
- Your expertise covers:
 
8
 
9
- - GDPR (General Data Protection Regulation)
10
- - NIS2 Directive (Network and Information Systems Directive 2)
11
- - DORA (Digital Operational Resilience Act)
12
- - Cyber Resilience Act (CRA)
13
- - eIDAS 2.0 (Electronic Identification, Authentication and Trust Services)
14
- - Romanian Civil Code provisions relevant to cyber security
15
-
16
- **Your Role:**
17
- Provide accurate, clear, and practical information about cyber-legal regulations. Always base your responses on the retrieved legal documents and context provided.
18
-
19
- **Guidelines:**
20
- 1. Be precise and accurate with legal information
21
- 2. Provide practical examples when helpful
22
- 3. Clarify jurisdiction (EU-wide vs member state implementation)
23
- 4. Mention important dates, deadlines, or transitional periods
24
- 5. Include relevant penalties or enforcement mechanisms when applicable
25
- 6. Suggest official sources for further reading
26
-
27
- **Response Structure:**
28
- 1. Direct answer to the user's question
29
- 2. Relevant legal basis (specific articles, sections)
30
- 3. Practical implications
31
- 4. Related compliance requirements
32
- 5. References to source documents
33
-
34
- **Important Disclaimer:**
35
- Always include a note that this information is for guidance purposes and not legal advice. For specific legal matters, consult with qualified legal counsel."""
36
-
37
- CONTEXT_ENHANCEMENT_PROMPT = """Based on the following RAG response about European cyber-legal regulations, enhance the information by:
38
-
39
- 1. **Structuring**: Organize the information in a clear, logical manner
40
- 2. **Context**: Add relevant background information about the regulation
41
- 3. **Practicality**: Include practical implications for organizations
42
- 4. **Completeness**: Fill in gaps with general knowledge about EU regulations
43
- 5. **Clarity**: Ensure complex legal concepts are explained clearly
44
-
45
- **RAG Response:**
46
- {lightrag_response}
47
-
48
- **Conversation Context:**
49
- {conversation_context}
50
-
51
- **User Query:**
52
- {user_query}
53
-
54
- Please provide an enhanced response that is more comprehensive and user-friendly while maintaining accuracy."""
55
 
56
  ERROR_HANDLING_PROMPT = """I apologize, but I encountered an issue while retrieving information from the legal database.
57
 
@@ -73,14 +32,6 @@ ERROR_HANDLING_PROMPT = """I apologize, but I encountered an issue while retriev
73
 
74
  Would you like to try asking your question in a different way?"""
75
 
76
- CLARIFICATION_PROMPT = """To provide you with the most accurate information, I need a bit more detail about your question.
77
-
78
- **Your Question:** {user_query}
79
-
80
- **Clarification Needed:** {clarification_question}
81
-
82
- This will help me search the specific legal provisions that are most relevant to your situation."""
83
-
84
  RESPONSE_FORMATTING_PROMPT = """Format the final response according to these guidelines:
85
 
86
  1. **Clear Heading**: Start with a clear, direct answer
@@ -94,40 +45,3 @@ RESPONSE_FORMATTING_PROMPT = """Format the final response according to these gui
94
  {content}
95
 
96
  **User Query:** {user_query}"""
97
-
98
- FOLLOW_UP_SUGGESTIONS_PROMPT = """Based on the user's query about "{user_query}", suggest relevant follow-up questions that might be helpful:
99
-
100
- Consider:
101
- 1. Related regulations they might need to know about
102
- 2. Implementation or compliance aspects
103
- 3. Similar scenarios or use cases
104
- 4. Recent updates or changes
105
-
106
- Provide 3-4 relevant follow-up suggestions."""
107
-
108
- CONVERSATION_SUMMARY_PROMPT = """Summarize the key points discussed in this conversation about European cyber-legal regulations:
109
-
110
- **Conversation History:**
111
- {conversation_history}
112
-
113
- **Focus Areas:**
114
- - Main regulations discussed
115
- - Key compliance points mentioned
116
- - Important deadlines or requirements
117
- - Any specific scenarios covered
118
-
119
- Provide a concise summary that captures the essence of the legal discussion."""
120
-
121
- CONFIDENCE_ASSESSMENT_PROMPT = """Assess the confidence level of the provided response based on:
122
-
123
- 1. **Source Quality**: How reliable are the referenced documents?
124
- 2. **Information Completeness**: Does the response fully address the query?
125
- 3. **Legal Specificity**: How specific and accurate are the legal references?
126
- 4. **Context Relevance**: How well does it match the user's needs?
127
-
128
- **Response to Assess:**
129
- {response}
130
-
131
- **User Query:** {user_query}
132
-
133
- Provide a confidence score (0.0-1.0) and brief reasoning."""
 
3
  System prompts for the LangGraph cyber-legal assistant
4
  """
5
 
6
+ SYSTEM_PROMPT = """### Role
7
+ You are an expert cyber-legal assistant specializing in European Union regulations and directives.
8
+ You must answer user's questions based on a legal knowledge, we queried a knowledge graph to about the user's question.
9
 
10
+ ### Guidelines
11
+ 1. Your responses should be clear, concise, and provide a direct answer to the user's question.
12
+ 2. If you use specific knowledge from a regulation or directive, you should reference it in your response.
13
+ 4. Create a section at the end of your response called "References" that list the source documents used to answer the user's question."""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  ERROR_HANDLING_PROMPT = """I apologize, but I encountered an issue while retrieving information from the legal database.
16
 
 
32
 
33
  Would you like to try asking your question in a different way?"""
34
 
 
 
 
 
 
 
 
 
35
  RESPONSE_FORMATTING_PROMPT = """Format the final response according to these guidelines:
36
 
37
  1. **Clear Heading**: Start with a clear, direct answer
 
45
  {content}
46
 
47
  **User Query:** {user_query}"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test_openai_key.ipynb CHANGED
@@ -37,15 +37,14 @@
37
  },
38
  {
39
  "cell_type": "code",
40
- "execution_count": 8,
41
  "metadata": {},
42
  "outputs": [
43
  {
44
  "name": "stdout",
45
  "output_type": "stream",
46
  "text": [
47
- " OpenAI API Test Successful!\n",
48
- "Response: API key works!\n"
49
  ]
50
  }
51
  ],
 
37
  },
38
  {
39
  "cell_type": "code",
40
+ "execution_count": 2,
41
  "metadata": {},
42
  "outputs": [
43
  {
44
  "name": "stdout",
45
  "output_type": "stream",
46
  "text": [
47
+ " OpenAI API Test Failed: Error code: 429 - {'error': {'message': 'You exceeded your current quota, please check your plan and billing details. For more information on this error, read the docs: https://platform.openai.com/docs/guides/error-codes/api-errors.', 'type': 'insufficient_quota', 'param': None, 'code': 'insufficient_quota'}}\n"
 
48
  ]
49
  }
50
  ],
utils.py CHANGED
@@ -77,7 +77,9 @@ class LightRAGClient:
77
  )
78
 
79
  if response.status_code == 200:
 
80
  return response.json()
 
81
  else:
82
  logger.warning(f"Query failed with status {response.status_code}, attempt {attempt + 1}")
83
 
 
77
  )
78
 
79
  if response.status_code == 200:
80
+ logger.info(f"Query successful;{response.json()}")
81
  return response.json()
82
+
83
  else:
84
  logger.warning(f"Query failed with status {response.status_code}, attempt {attempt + 1}")
85