Charles Grandjean commited on
Commit
db8e436
·
1 Parent(s): 56880e2

fix graph by jurisdiction

Browse files
agent_api.py CHANGED
@@ -19,7 +19,8 @@ import secrets
19
 
20
  from structured_outputs.api_models import (
21
  Message, DocumentAnalysis, ChatRequest, ChatResponse,
22
- HealthResponse, AnalyzePDFRequest, AnalyzePDFResponse
 
23
  )
24
  from langraph_agent import CyberLegalAgent
25
  from utils.conversation_manager import ConversationManager
@@ -125,21 +126,45 @@ class CyberLegalAPI:
125
  self.conversation_manager = ConversationManager()
126
  logger.info(f"🔧 CyberLegalAPI initialized with {llm_provider.upper()} provider")
127
 
128
- def _build_lawyer_prompt(self, document_analyses: Optional[List[DocumentAnalysis]], jurisdiction: str) -> str:
129
- """Build lawyer prompt with optional document context"""
130
- if not document_analyses:
131
- return SYSTEM_PROMPT_LAWYER.format(jurisdiction=jurisdiction)
132
 
133
- docs_text = "\n\n### Documents parsed in the lawyer profile\n"
134
- for i, doc in enumerate(document_analyses, 1):
135
- docs_text += f"[Doc {i}] {doc.file_name}\n"
136
- if doc.summary: docs_text += f"Summary: {doc.summary}\n"
137
- if doc.actors: docs_text += f"Actors: {doc.actors}\n"
138
- if doc.key_details: docs_text += f"Key Details: {doc.key_details}\n"
139
- docs_text += "\n"
 
 
 
 
 
 
 
 
 
 
140
 
141
- docs_text += "Use these documents if the user's question is related to their content.\n"
142
- return SYSTEM_PROMPT_LAWYER.format(jurisdiction=jurisdiction) + docs_text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
 
144
  async def process_request(self, request: ChatRequest) -> ChatResponse:
145
  """
@@ -173,13 +198,22 @@ class CyberLegalAPI:
173
  logger.info(f"💬 User query: {request.message}")
174
 
175
  try:
176
- # Build dynamic system prompt for lawyers with document analyses
177
- if request.userType == "lawyer" and request.documentAnalyses:
178
- system_prompt = self._build_lawyer_prompt(request.documentAnalyses, request.jurisdiction)
179
- logger.info(f"📚 Using lawyer prompt with {len(request.documentAnalyses)} document analyses")
180
- elif request.userType == "lawyer":
181
- system_prompt = SYSTEM_PROMPT_LAWYER.format(jurisdiction=request.jurisdiction)
182
- logger.info(f"📝 Using default lawyer prompt with jurisdiction: {request.jurisdiction}")
 
 
 
 
 
 
 
 
 
183
  else:
184
  system_prompt = SYSTEM_PROMPT_CLIENT.format(jurisdiction=request.jurisdiction)
185
  logger.info(f"👤 Using client prompt with jurisdiction: {request.jurisdiction}")
 
19
 
20
  from structured_outputs.api_models import (
21
  Message, DocumentAnalysis, ChatRequest, ChatResponse,
22
+ HealthResponse, AnalyzePDFRequest, AnalyzePDFResponse,
23
+ LawyerProfile
24
  )
25
  from langraph_agent import CyberLegalAgent
26
  from utils.conversation_manager import ConversationManager
 
126
  self.conversation_manager = ConversationManager()
127
  logger.info(f"🔧 CyberLegalAPI initialized with {llm_provider.upper()} provider")
128
 
129
+ def _build_lawyer_prompt(self, document_analyses: Optional[List[DocumentAnalysis]], jurisdiction: str, lawyer_profile: Optional[LawyerProfile] = None) -> str:
130
+ """Build lawyer prompt with optional document context and lawyer profile"""
131
+ prompt_parts = []
 
132
 
133
+ # Add lawyer profile context if available
134
+ if lawyer_profile:
135
+ profile_text = "\n\n### Lawyer Profile Context\n"
136
+ if lawyer_profile.full_name:
137
+ profile_text += f"Name: {lawyer_profile.full_name}\n"
138
+ if lawyer_profile.primary_specialty:
139
+ profile_text += f"Primary Specialty: {lawyer_profile.primary_specialty}\n"
140
+ if lawyer_profile.legal_specialties:
141
+ profile_text += f"Specialties: {', '.join(lawyer_profile.legal_specialties)}\n"
142
+ if lawyer_profile.experience_level:
143
+ profile_text += f"Experience Level: {lawyer_profile.experience_level}\n"
144
+ if lawyer_profile.languages:
145
+ profile_text += f"Languages: {', '.join(lawyer_profile.languages)}\n"
146
+ if lawyer_profile.lawyer_description:
147
+ profile_text += f"Description: {lawyer_profile.lawyer_description}\n"
148
+ profile_text += "\nWhen answering, consider this lawyer's expertise and experience level. Tailor your responses to be appropriate for their seniority and specialization.\n"
149
+ prompt_parts.append(profile_text)
150
 
151
+ # Add document analyses if available
152
+ if document_analyses:
153
+ docs_text = "\n### Documents parsed in the lawyer profile\n"
154
+ for i, doc in enumerate(document_analyses, 1):
155
+ docs_text += f"[Doc {i}] {doc.file_name}\n"
156
+ if doc.summary: docs_text += f"Summary: {doc.summary}\n"
157
+ if doc.actors: docs_text += f"Actors: {doc.actors}\n"
158
+ if doc.key_details: docs_text += f"Key Details: {doc.key_details}\n"
159
+ docs_text += "\n"
160
+ docs_text += "Use these documents if the user's question is related to their content.\n"
161
+ prompt_parts.append(docs_text)
162
+
163
+ # Combine base prompt with context
164
+ base_prompt = SYSTEM_PROMPT_LAWYER.format(jurisdiction=jurisdiction)
165
+ if prompt_parts:
166
+ return base_prompt + "\n".join(prompt_parts)
167
+ return base_prompt
168
 
169
  async def process_request(self, request: ChatRequest) -> ChatResponse:
170
  """
 
198
  logger.info(f"💬 User query: {request.message}")
199
 
200
  try:
201
+ # Build dynamic system prompt for lawyers with document analyses and/or lawyer profile
202
+ if request.userType == "lawyer":
203
+ system_prompt = self._build_lawyer_prompt(
204
+ request.documentAnalyses,
205
+ request.jurisdiction,
206
+ request.lawyerProfile
207
+ )
208
+ context_parts = []
209
+ if request.lawyerProfile:
210
+ context_parts.append("lawyer profile")
211
+ if request.documentAnalyses:
212
+ context_parts.append(f"{len(request.documentAnalyses)} document analyses")
213
+ if context_parts:
214
+ logger.info(f"📚 Using lawyer prompt with {', '.join(context_parts)}")
215
+ else:
216
+ logger.info(f"📝 Using default lawyer prompt with jurisdiction: {request.jurisdiction}")
217
  else:
218
  system_prompt = SYSTEM_PROMPT_CLIENT.format(jurisdiction=request.jurisdiction)
219
  logger.info(f"👤 Using client prompt with jurisdiction: {request.jurisdiction}")
langraph_agent.py CHANGED
@@ -92,8 +92,11 @@ class CyberLegalAgent:
92
  args["conversation_history"] = state.get("conversation_history", [])
93
  logger.info(f"📝 Passing conversation_history to {tool_call['name']}: {len(args['conversation_history'])} messages")
94
 
 
 
 
95
  result = await tool_func.ainvoke(args)
96
- logger.info(f"🔧 Tool {tool_call['name']} returned: {result}")
97
  intermediate_steps.append(ToolMessage(content=str(result), tool_call_id=tool_call['id'], name=tool_call['name']))
98
 
99
  state["intermediate_steps"] = intermediate_steps
 
92
  args["conversation_history"] = state.get("conversation_history", [])
93
  logger.info(f"📝 Passing conversation_history to {tool_call['name']}: {len(args['conversation_history'])} messages")
94
 
95
+ # Pass jurisdiction to query_knowledge_graph tool
96
+ if tool_call['name'] == "query_knowledge_graph":
97
+ args["jurisdiction"] = state.get("jurisdiction")
98
  result = await tool_func.ainvoke(args)
99
+ logger.info(f"🔧 Tool {tool_call} returned: {result}")
100
  intermediate_steps.append(ToolMessage(content=str(result), tool_call_id=tool_call['id'], name=tool_call['name']))
101
 
102
  state["intermediate_steps"] = intermediate_steps
structured_outputs/api_models.py CHANGED
@@ -13,6 +13,17 @@ class Message(BaseModel):
13
  content: str = Field(..., description="Message content")
14
 
15
 
 
 
 
 
 
 
 
 
 
 
 
16
  class DocumentAnalysis(BaseModel):
17
  """Document analysis result"""
18
  file_name: str
@@ -28,6 +39,7 @@ class ChatRequest(BaseModel):
28
  userType: Optional[str] = Field(default="client", description="User type: 'client' for general users or 'lawyer' for legal professionals")
29
  jurisdiction: Optional[str] = Field(default="Romania", description="Jurisdiction of the user")
30
  documentAnalyses: Optional[List[DocumentAnalysis]] = Field(default=None, description="Lawyer's document analyses")
 
31
 
32
 
33
  class ChatResponse(BaseModel):
 
13
  content: str = Field(..., description="Message content")
14
 
15
 
16
+ class LawyerProfile(BaseModel):
17
+ """Lawyer's professional profile"""
18
+ full_name: Optional[str] = Field(None, description="Lawyer's full name")
19
+ primary_specialty: Optional[str] = Field(None, description="Primary legal specialty")
20
+ legal_specialties: Optional[List[str]] = Field(None, description="List of legal specialties")
21
+ jurisdiction: Optional[str] = Field(None, description="Primary jurisdiction of practice")
22
+ experience_level: Optional[str] = Field(None, description="Experience level (e.g., Junior, Mid-level, Senior, Partner)")
23
+ languages: Optional[List[str]] = Field(None, description="Languages spoken")
24
+ lawyer_description: Optional[str] = Field(None, description="Professional description or bio")
25
+
26
+
27
  class DocumentAnalysis(BaseModel):
28
  """Document analysis result"""
29
  file_name: str
 
39
  userType: Optional[str] = Field(default="client", description="User type: 'client' for general users or 'lawyer' for legal professionals")
40
  jurisdiction: Optional[str] = Field(default="Romania", description="Jurisdiction of the user")
41
  documentAnalyses: Optional[List[DocumentAnalysis]] = Field(default=None, description="Lawyer's document analyses")
42
+ lawyerProfile: Optional[LawyerProfile] = Field(default=None, description="Lawyer's professional profile")
43
 
44
 
45
  class ChatResponse(BaseModel):
utils/lawyer_profile_formatter.py ADDED
@@ -0,0 +1,111 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Utility functions to transform external API lawyer data into our LawyerProfile format
4
+ """
5
+
6
+ import logging
7
+ from typing import List, Optional, Dict, Any
8
+ from structured_outputs.api_models import LawyerProfile
9
+
10
+ logger = logging.getLogger(__name__)
11
+
12
+
13
+ def format_lawyer_profile_from_api(api_lawyer_data: Dict[str, Any]) -> Optional[LawyerProfile]:
14
+ """Transform external API lawyer data into LawyerProfile model."""
15
+ try:
16
+ return LawyerProfile(
17
+ full_name=api_lawyer_data.get('full_name'),
18
+ primary_specialty=api_lawyer_data.get('primary_specialty'),
19
+ legal_specialties=_process_list_field(api_lawyer_data.get('legal_specialties')),
20
+ jurisdiction=api_lawyer_data.get('jurisdiction'),
21
+ experience_level=api_lawyer_data.get('experience_level'),
22
+ languages=_process_list_field(api_lawyer_data.get('languages')),
23
+ lawyer_description=_process_string_field(api_lawyer_data.get('lawyer_description'))
24
+ )
25
+ except Exception as e:
26
+ logger.error(f"Error formatting lawyer profile: {e}")
27
+ return None
28
+
29
+
30
+ def format_lawyer_profiles_from_response(api_response: Dict[str, Any]) -> List[LawyerProfile]:
31
+ """Extract and format all lawyers from external API response."""
32
+ if not isinstance(api_response, dict) or not api_response.get('success'):
33
+ return []
34
+
35
+ lawyers_data = api_response.get('data', [])
36
+ profiles = []
37
+
38
+ for idx, lawyer_data in enumerate(lawyers_data):
39
+ if isinstance(lawyer_data, dict):
40
+ profile = format_lawyer_profile_from_api(lawyer_data)
41
+ if profile:
42
+ profiles.append(profile)
43
+
44
+ logger.info(f"Formatted {len(profiles)} lawyer profiles out of {len(lawyers_data)}")
45
+ return profiles
46
+
47
+
48
+ def _process_list_field(value: Any) -> Optional[List[str]]:
49
+ """Process list fields, return None if empty or None."""
50
+ if value is None:
51
+ return None
52
+ if isinstance(value, list):
53
+ return value if value else None
54
+ logger.warning(f"Expected list, got {type(value)}: {value}")
55
+ return None
56
+
57
+
58
+ def _process_string_field(value: Any) -> Optional[str]:
59
+ """Process string fields, return None if empty or None."""
60
+ if value is None:
61
+ return None
62
+ if isinstance(value, str):
63
+ stripped = value.strip()
64
+ return stripped if stripped else None
65
+ try:
66
+ str_value = str(value).strip()
67
+ return str_value if str_value else None
68
+ except Exception as e:
69
+ logger.warning(f"Could not convert to string: {e}")
70
+ return None
71
+
72
+
73
+ def get_lawyer_profile_summary(profile: LawyerProfile) -> str:
74
+ """Generate human-readable summary of a lawyer profile."""
75
+ parts = []
76
+ if profile.full_name:
77
+ parts.append(profile.full_name)
78
+ if profile.primary_specialty:
79
+ parts.append(f"- {profile.primary_specialty}")
80
+ if profile.experience_level:
81
+ parts.append(f"({profile.experience_level})")
82
+ return " ".join(parts) if parts else "Unnamed Lawyer"
83
+
84
+
85
+ def format_lawyers_as_string(profiles: List[LawyerProfile]) -> str:
86
+ """Format multiple lawyer profiles as a single string."""
87
+ if not profiles:
88
+ return "No lawyer profiles available."
89
+
90
+ result = ["Lawyer Profiles:", "-" * 80]
91
+
92
+ for idx, profile in enumerate(profiles, 1):
93
+ result.append(f"\n{idx}. {get_lawyer_profile_summary(profile)}")
94
+
95
+ if profile.primary_specialty:
96
+ result.append(f" Specialty: {profile.primary_specialty}")
97
+ if profile.legal_specialties:
98
+ result.append(f" Specialties: {', '.join(profile.legal_specialties)}")
99
+ if profile.experience_level:
100
+ result.append(f" Experience Level: {profile.experience_level}")
101
+ if profile.jurisdiction:
102
+ result.append(f" Jurisdiction: {profile.jurisdiction}")
103
+ if profile.languages:
104
+ result.append(f" Languages: {', '.join(profile.languages)}")
105
+ if profile.lawyer_description:
106
+ result.append(f" Description: {profile.lawyer_description}")
107
+
108
+ if idx < len(profiles):
109
+ result.append("")
110
+
111
+ return "\n".join(result)
utils/lightrag_client.py CHANGED
@@ -18,19 +18,78 @@ logging.basicConfig(level=logging.INFO)
18
  logger = logging.getLogger(__name__)
19
 
20
  # LightRAG configuration
21
- LIGHTRAG_PORT = int(os.getenv("LIGHTRAG_PORT", "9621"))
22
  LIGHTRAG_HOST = os.getenv("LIGHTRAG_HOST", "127.0.0.1")
23
- SERVER_URL = f"http://{LIGHTRAG_HOST}:{LIGHTRAG_PORT}"
24
  API_KEY = os.getenv("LIGHTRAG_API_KEY")
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  class LightRAGClient:
28
  """
29
  Client for interacting with LightRAG server
30
  """
31
 
32
- def __init__(self, server_url: str = SERVER_URL, api_key: Optional[str] = API_KEY):
33
- self.server_url = server_url
34
  self.api_key = api_key
35
  self.timeout = 300
36
 
@@ -149,3 +208,59 @@ class ResponseProcessor:
149
  legal_entities.append(reg)
150
 
151
  return list(set(legal_entities)) # Remove duplicates
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  logger = logging.getLogger(__name__)
19
 
20
  # LightRAG configuration
 
21
  LIGHTRAG_HOST = os.getenv("LIGHTRAG_HOST", "127.0.0.1")
22
+ DEFAULT_LIGHTRAG_PORT = int(os.getenv("LIGHTRAG_PORT", "9621"))
23
  API_KEY = os.getenv("LIGHTRAG_API_KEY")
24
 
25
+ def parse_jurisdiction_graphs() -> Dict[str, int]:
26
+ """
27
+ Parse LIGHTRAG_GRAPHS environment variable to get jurisdiction-to-port mappings.
28
+
29
+ Expected format: "jurisdiction1:port1,jurisdiction2:port2,..."
30
+ Example: "romania:9621,bahrain:9622"
31
+
32
+ Returns:
33
+ Dictionary mapping jurisdiction names to their respective ports
34
+ """
35
+ graphs_config = os.getenv("LIGHTRAG_GRAPHS", "")
36
+ jurisdiction_map = {}
37
+
38
+ if not graphs_config:
39
+ logger.info("No LIGHTRAG_GRAPHS configured, using default port")
40
+ return jurisdiction_map
41
+
42
+ try:
43
+ # Parse the comma-separated list
44
+ for mapping in graphs_config.split(","):
45
+ mapping = mapping.strip()
46
+ if ":" in mapping:
47
+ jurisdiction, port = mapping.split(":", 1)
48
+ jurisdiction = jurisdiction.strip().lower()
49
+ port = int(port.strip())
50
+ jurisdiction_map[jurisdiction] = port
51
+ logger.info(f"Loaded jurisdiction mapping: {jurisdiction} → port {port}")
52
+
53
+ logger.info(f"Total jurisdictions loaded: {len(jurisdiction_map)}")
54
+ return jurisdiction_map
55
+
56
+ except Exception as e:
57
+ logger.error(f"Error parsing LIGHTRAG_GRAPHS: {e}")
58
+ return jurisdiction_map
59
+
60
+
61
+ # Parse jurisdiction mappings at module load time
62
+ JURISDICTION_PORTS = parse_jurisdiction_graphs()
63
+
64
+ def get_server_url_for_jurisdiction(jurisdiction: Optional[str] = None) -> str:
65
+ """
66
+ Get the appropriate server URL based on jurisdiction.
67
+
68
+ Args:
69
+ jurisdiction: The jurisdiction name (e.g., "romania", "bahrain")
70
+
71
+ Returns:
72
+ Server URL string
73
+ """
74
+ if jurisdiction and jurisdiction.lower() in JURISDICTION_PORTS:
75
+ port = JURISDICTION_PORTS[jurisdiction.lower()]
76
+ logger.info(f"Using jurisdiction-specific server: {jurisdiction} → port {port}")
77
+ return f"http://{LIGHTRAG_HOST}:{port}"
78
+ else:
79
+ if jurisdiction:
80
+ logger.warning(f"Jurisdiction '{jurisdiction}' not found in mappings, using default port {DEFAULT_LIGHTRAG_PORT}")
81
+ else:
82
+ logger.info(f"No jurisdiction specified, using default port {DEFAULT_LIGHTRAG_PORT}")
83
+ return f"http://{LIGHTRAG_HOST}:{DEFAULT_LIGHTRAG_PORT}"
84
+
85
 
86
  class LightRAGClient:
87
  """
88
  Client for interacting with LightRAG server
89
  """
90
 
91
+ def __init__(self, server_url: Optional[str] = None, api_key: Optional[str] = API_KEY):
92
+ self.server_url = server_url or get_server_url_for_jurisdiction(None)
93
  self.api_key = api_key
94
  self.timeout = 300
95
 
 
208
  legal_entities.append(reg)
209
 
210
  return list(set(legal_entities)) # Remove duplicates
211
+
212
+
213
+ _lightrag_client_cache: Dict[str, LightRAGClient] = {}
214
+
215
+
216
+ def get_lightrag_client(jurisdiction: Optional[str] = None) -> LightRAGClient:
217
+ """
218
+ Get or create a LightRAG client for the specified jurisdiction.
219
+ Clients are cached to reuse connections.
220
+
221
+ Args:
222
+ jurisdiction: The jurisdiction name (e.g., "romania", "bahrain")
223
+ If None, uses default port
224
+
225
+ Returns:
226
+ LightRAGClient instance configured for the jurisdiction
227
+ """
228
+ cache_key = jurisdiction.lower() if jurisdiction else "default"
229
+
230
+ # Return cached client if available
231
+ if cache_key in _lightrag_client_cache:
232
+ logger.debug(f"Using cached LightRAG client for jurisdiction: {cache_key}")
233
+ return _lightrag_client_cache[cache_key]
234
+
235
+ # Create new client
236
+ server_url = get_server_url_for_jurisdiction(jurisdiction)
237
+ client = LightRAGClient(server_url=server_url, api_key=API_KEY)
238
+
239
+ # Cache the client
240
+ _lightrag_client_cache[cache_key] = client
241
+ logger.info(f"Created and cached LightRAG client for jurisdiction: {cache_key} → {server_url}")
242
+
243
+ return client
244
+
245
+
246
+ def validate_jurisdiction(jurisdiction: str) -> bool:
247
+ """
248
+ Validate if a jurisdiction is supported.
249
+
250
+ Args:
251
+ jurisdiction: The jurisdiction name to validate
252
+
253
+ Returns:
254
+ True if jurisdiction is configured, False otherwise
255
+ """
256
+ return jurisdiction.lower() in JURISDICTION_PORTS
257
+
258
+
259
+ def get_available_jurisdictions() -> List[str]:
260
+ """
261
+ Get list of available jurisdictions.
262
+
263
+ Returns:
264
+ List of configured jurisdiction names
265
+ """
266
+ return list(JURISDICTION_PORTS.keys())
utils/tools.py CHANGED
@@ -8,7 +8,7 @@ from typing import List, Dict, Any, Optional
8
  from langchain_core.tools import tool
9
  from langchain_tavily import TavilySearch
10
  from subagents.lawyer_selector import LawyerSelectorAgent
11
- from utils.lightrag_client import LightRAGClient
12
  import resend
13
 
14
  # Global instances - will be initialized in agent_api.py
@@ -18,34 +18,54 @@ tavily_search = None
18
  resend_api_key: Optional[str] = None
19
 
20
  @tool
21
- async def query_knowledge_graph(query: str, conversation_history: List[Dict[str, str]]) -> str:
 
 
 
 
22
  """
23
- Query the legal knowledge graph for relevant information about EU cyber regulations and directives.
24
 
25
  This tool searches through a comprehensive knowledge graph containing legal documents,
26
- regulations, and directives related to cyber law, GDPR, NIS2, DORA, Cyber Resilience Act, eIDAS 2.0, etc.
 
 
 
 
 
27
 
28
  Use this tool when answering legal questions to provide accurate, up-to-date information
29
- from official EU legal sources.
30
 
31
  Args:
32
  query: The legal question or topic to search for in the knowledge graph
33
  conversation_history: Optional conversation history for context (automatically provided by the agent)
 
34
 
35
  Returns:
36
  Relevant legal information from the knowledge graph with context and references
37
  """
38
  try:
39
- # Use the globally initialized LightRAG client
40
- if lightrag_client is None:
41
- raise ValueError("LightRAGClient not initialized. Please initialize it in agent_api.py")
 
 
 
 
 
 
42
 
43
  # Query the knowledge graph
44
- result = lightrag_client.query(
45
  query=query,
46
  conversation_history=conversation_history
47
  )
48
 
 
 
 
 
49
  # Extract the response content
50
  response = result.get("response", "")
51
 
 
8
  from langchain_core.tools import tool
9
  from langchain_tavily import TavilySearch
10
  from subagents.lawyer_selector import LawyerSelectorAgent
11
+ from utils.lightrag_client import LightRAGClient, get_lightrag_client, validate_jurisdiction, get_available_jurisdictions
12
  import resend
13
 
14
  # Global instances - will be initialized in agent_api.py
 
18
  resend_api_key: Optional[str] = None
19
 
20
  @tool
21
+ async def query_knowledge_graph(
22
+ query: str,
23
+ conversation_history: List[Dict[str, str]],
24
+ jurisdiction: Optional[str] = None
25
+ ) -> str:
26
  """
27
+ Query the legal knowledge graph for relevant information about cyber regulations and directives.
28
 
29
  This tool searches through a comprehensive knowledge graph containing legal documents,
30
+ regulations, and directives related to law
31
+
32
+ The knowledge graph is dynamically selected based on jurisdiction:
33
+ - Romania: Romanian law documents
34
+ - Bahrain: Bahraini law documents
35
+ - Default: Falls back to default port if jurisdiction not specified
36
 
37
  Use this tool when answering legal questions to provide accurate, up-to-date information
38
+ from official legal sources specific to the user's jurisdiction.
39
 
40
  Args:
41
  query: The legal question or topic to search for in the knowledge graph
42
  conversation_history: Optional conversation history for context (automatically provided by the agent)
43
+ jurisdiction: The jurisdiction name (e.g., "romania", "bahrain") to query the appropriate graph
44
 
45
  Returns:
46
  Relevant legal information from the knowledge graph with context and references
47
  """
48
  try:
49
+ # Validate jurisdiction if provided
50
+ if jurisdiction:
51
+ jurisdiction = jurisdiction.strip().lower()
52
+ if not validate_jurisdiction(jurisdiction):
53
+ available = ", ".join(get_available_jurisdictions())
54
+ return f"Error: Jurisdiction '{jurisdiction}' is not supported. Available jurisdictions: {available}"
55
+
56
+ # Get the appropriate LightRAG client for the jurisdiction
57
+ client = get_lightrag_client(jurisdiction)
58
 
59
  # Query the knowledge graph
60
+ result = client.query(
61
  query=query,
62
  conversation_history=conversation_history
63
  )
64
 
65
+ # Check for errors
66
+ if "error" in result:
67
+ return f"Error querying knowledge graph: {result['error']}"
68
+
69
  # Extract the response content
70
  response = result.get("response", "")
71