Charles Grandjean commited on
Commit
0e8eff5
·
1 Parent(s): 8a6f47d

update foramt of answers

Browse files
Files changed (2) hide show
  1. agent_api.py +3 -40
  2. langraph_agent.py +24 -8
agent_api.py CHANGED
@@ -56,8 +56,6 @@ class Message(BaseModel):
56
 
57
  class ChatRequest(BaseModel):
58
  message: str = Field(..., description="User's question")
59
- role: str = Field(..., description="User role: 'client' or 'lawyer'")
60
- jurisdiction: str = Field(..., description="Selected jurisdiction")
61
  conversationHistory: Optional[List[Message]] = Field(default=[], description="Previous conversation messages")
62
 
63
  class ChatResponse(BaseModel):
@@ -104,12 +102,9 @@ class CyberLegalAPI:
104
  })
105
 
106
  try:
107
- # Create enhanced query with context
108
- enhanced_query = self._create_enhanced_query(request)
109
-
110
- # Process through agent
111
  result = await self.agent.process_query(
112
- user_query=enhanced_query,
113
  conversation_history=conversation_history
114
  )
115
 
@@ -131,34 +126,6 @@ class CyberLegalAPI:
131
  detail=f"Processing failed: {str(e)}"
132
  )
133
 
134
- def _create_enhanced_query(self, request: ChatRequest) -> str:
135
- """
136
- Create enhanced query with role and jurisdiction context
137
- """
138
- base_query = request.message
139
-
140
- # Add role context
141
- role_context = ""
142
- if request.role == "client":
143
- role_context = "Answer from the perspective of advising a client who needs practical guidance."
144
- elif request.role == "lawyer":
145
- role_context = "Answer from the perspective of providing legal analysis for a legal professional."
146
-
147
- # Add jurisdiction context
148
- jurisdiction_context = f"Focus on the legal framework in {request.jurisdiction}."
149
-
150
- # Combine into enhanced query
151
- enhanced_query = f"""{base_query}
152
-
153
- Context:
154
- - User Role: {request.role}
155
- - Jurisdiction: {request.jurisdiction}
156
- - Special Instructions: {role_context} {jurisdiction_context}
157
-
158
- Please provide a response tailored to this context."""
159
-
160
- return enhanced_query
161
-
162
  async def health_check(self) -> HealthResponse:
163
  """
164
  Check health status of the API and dependencies
@@ -234,10 +201,6 @@ async def root():
234
  "chat": "POST /chat - Chat with the assistant",
235
  "health": "GET /health - Health check"
236
  },
237
- "supported_jurisdictions": [
238
- "EU", "France", "Germany", "Italy", "Spain", "Romania", "Netherlands", "Belgium"
239
- ],
240
- "user_roles": ["client", "lawyer"],
241
  "expertise": [
242
  "GDPR", "NIS2", "DORA", "Cyber Resilience Act", "eIDAS 2.0"
243
  ]
@@ -266,4 +229,4 @@ if __name__ == "__main__":
266
  port=port,
267
  reload=False,
268
  log_level="info"
269
- )
 
56
 
57
  class ChatRequest(BaseModel):
58
  message: str = Field(..., description="User's question")
 
 
59
  conversationHistory: Optional[List[Message]] = Field(default=[], description="Previous conversation messages")
60
 
61
  class ChatResponse(BaseModel):
 
102
  })
103
 
104
  try:
105
+ # Process through agent with raw message and conversation history
 
 
 
106
  result = await self.agent.process_query(
107
+ user_query=request.message,
108
  conversation_history=conversation_history
109
  )
110
 
 
126
  detail=f"Processing failed: {str(e)}"
127
  )
128
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
129
  async def health_check(self) -> HealthResponse:
130
  """
131
  Check health status of the API and dependencies
 
201
  "chat": "POST /chat - Chat with the assistant",
202
  "health": "GET /health - Health check"
203
  },
 
 
 
 
204
  "expertise": [
205
  "GDPR", "NIS2", "DORA", "Cyber Resilience Act", "eIDAS 2.0"
206
  ]
 
229
  port=port,
230
  reload=False,
231
  log_level="info"
232
+ )
langraph_agent.py CHANGED
@@ -8,7 +8,7 @@ from typing import Dict, Any, List, Optional
8
  from datetime import datetime
9
  from langgraph.graph import StateGraph, END
10
  from langchain_openai import ChatOpenAI
11
- from langchain_core.messages import HumanMessage, SystemMessage
12
 
13
  from agent_state import AgentState, ConversationManager
14
  from prompts import SYSTEM_PROMPT, ERROR_HANDLING_PROMPT
@@ -127,7 +127,27 @@ class CyberLegalAgent:
127
  state["final_response"] = "I apologize, but I couldn't find relevant information for your query."
128
  return state
129
 
130
- # Create prompt for LLM to answer based on retrieved context
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131
  answer_prompt = f"""Based on the following retrieved legal information, please answer the user's question accurately and comprehensively.
132
 
133
  **User Question:** {state["user_query"]}
@@ -143,12 +163,8 @@ class CyberLegalAgent:
143
  5. Add a disclaimer that this is for guidance purposes only
144
 
145
  Please provide a clear, well-structured response."""
146
-
147
- # Get answer from LLM
148
- messages = [
149
- SystemMessage(content=SYSTEM_PROMPT),
150
- HumanMessage(content=answer_prompt)
151
- ]
152
 
153
  response = await self.llm.ainvoke(messages)
154
  answer = response.content
 
8
  from datetime import datetime
9
  from langgraph.graph import StateGraph, END
10
  from langchain_openai import ChatOpenAI
11
+ from langchain_core.messages import HumanMessage, SystemMessage, AssistantMessage
12
 
13
  from agent_state import AgentState, ConversationManager
14
  from prompts import SYSTEM_PROMPT, ERROR_HANDLING_PROMPT
 
127
  state["final_response"] = "I apologize, but I couldn't find relevant information for your query."
128
  return state
129
 
130
+ # Build message stack with conversation history
131
+ messages = []
132
+
133
+ # Add conversation history messages
134
+ history = state.get("conversation_history", [])
135
+ has_system_message = False
136
+
137
+ for msg in history:
138
+ if msg["role"] == "system":
139
+ messages.append(SystemMessage(content=msg["content"]))
140
+ has_system_message = True
141
+ elif msg["role"] == "user":
142
+ messages.append(HumanMessage(content=msg["content"]))
143
+ elif msg["role"] == "assistant":
144
+ messages.append(AssistantMessage(content=msg["content"]))
145
+
146
+ # Add system message only if not present in history
147
+ if not has_system_message:
148
+ messages.insert(0, SystemMessage(content=SYSTEM_PROMPT))
149
+
150
+ # Add final message with user query and LightRAG context
151
  answer_prompt = f"""Based on the following retrieved legal information, please answer the user's question accurately and comprehensively.
152
 
153
  **User Question:** {state["user_query"]}
 
163
  5. Add a disclaimer that this is for guidance purposes only
164
 
165
  Please provide a clear, well-structured response."""
166
+
167
+ messages.append(HumanMessage(content=answer_prompt))
 
 
 
 
168
 
169
  response = await self.llm.ainvoke(messages)
170
  answer = response.content