AI_Personas / src /pipeline /query_engine.py
Claude
Implement Phase 1: Persona-based LLM query system for urban planning
514b626 unverified
"""Main query engine for persona-based responses"""
from typing import Optional, Dict, Any
from pydantic import BaseModel
from datetime import datetime
from ..personas.database import PersonaDatabase
from ..context.database import ContextDatabase
from ..llm.anthropic_client import AnthropicClient
from ..llm.prompt_builder import PromptBuilder
class QueryResponse(BaseModel):
"""Structured response from a persona query"""
persona_id: str
persona_name: str
persona_role: str
question: str
response: str
context_id: Optional[str] = None
timestamp: str
model_used: str
metadata: Dict[str, Any] = {}
class Config:
"""Pydantic config"""
json_schema_extra = {
"example": {
"persona_id": "sarah_chen",
"persona_name": "Sarah Chen",
"persona_role": "Urban Planner",
"question": "What do you think about the bike lane proposal?",
"response": "I strongly support this bike lane proposal...",
"context_id": "downtown_district",
"timestamp": "2024-03-15T10:30:00",
"model_used": "claude-3-5-sonnet-20241022",
}
}
class QueryEngine:
"""Main engine for querying personas and generating responses"""
def __init__(
self,
persona_db: Optional[PersonaDatabase] = None,
context_db: Optional[ContextDatabase] = None,
llm_client: Optional[AnthropicClient] = None,
):
"""
Initialize query engine
Args:
persona_db: Persona database instance (creates default if None)
context_db: Context database instance (creates default if None)
llm_client: LLM client instance (creates default if None)
"""
self.persona_db = persona_db or PersonaDatabase()
self.context_db = context_db or ContextDatabase()
self.llm_client = llm_client or AnthropicClient()
self.prompt_builder = PromptBuilder()
def query(
self,
persona_id: str,
question: str,
context_id: Optional[str] = None,
scenario_description: Optional[str] = None,
temperature: Optional[float] = None,
max_tokens: Optional[int] = None,
) -> QueryResponse:
"""
Query a persona with a question
Args:
persona_id: ID of persona to query
question: Question to ask the persona
context_id: Optional environmental context ID
scenario_description: Optional scenario description
temperature: Optional temperature override
max_tokens: Optional max_tokens override
Returns:
QueryResponse object with the persona's response
Raises:
ValueError: If persona not found
"""
# Get persona
persona = self.persona_db.get_persona(persona_id)
if persona is None:
available = ", ".join(self.persona_db.list_persona_ids())
raise ValueError(
f"Persona '{persona_id}' not found. "
f"Available personas: {available}"
)
# Get context if specified
context = None
if context_id:
context = self.context_db.get_context(context_id)
if context is None:
print(f"Warning: Context '{context_id}' not found, proceeding without context")
# Build prompts
system_prompt = self.prompt_builder.build_persona_system_prompt(
persona=persona,
context=context,
)
user_message = self.prompt_builder.build_contextual_query(
question=question,
scenario_description=scenario_description,
)
# Generate response
response_text = self.llm_client.generate_response(
system_prompt=system_prompt,
user_message=user_message,
temperature=temperature,
max_tokens=max_tokens,
)
# Build response object
return QueryResponse(
persona_id=persona.persona_id,
persona_name=persona.name,
persona_role=persona.role,
question=question,
response=response_text,
context_id=context_id,
timestamp=datetime.now().isoformat(),
model_used=self.llm_client.model,
metadata={
"scenario_description": scenario_description,
"temperature": temperature or self.llm_client.temperature,
"max_tokens": max_tokens or self.llm_client.max_tokens,
},
)
def query_multiple(
self,
persona_ids: list[str],
question: str,
context_id: Optional[str] = None,
scenario_description: Optional[str] = None,
) -> list[QueryResponse]:
"""
Query multiple personas with the same question
Args:
persona_ids: List of persona IDs to query
question: Question to ask all personas
context_id: Optional environmental context ID
scenario_description: Optional scenario description
Returns:
List of QueryResponse objects
"""
responses = []
for persona_id in persona_ids:
try:
response = self.query(
persona_id=persona_id,
question=question,
context_id=context_id,
scenario_description=scenario_description,
)
responses.append(response)
except Exception as e:
print(f"Error querying persona {persona_id}: {e}")
return responses
def list_available_personas(self) -> list[tuple[str, str, str]]:
"""
List all available personas
Returns:
List of (persona_id, name, role) tuples
"""
personas = self.persona_db.get_all_personas()
return [
(p.persona_id, p.name, p.role)
for p in personas
]
def list_available_contexts(self) -> list[str]:
"""
List all available contexts
Returns:
List of context IDs
"""
return self.context_db.list_context_ids()
def test_system(self) -> bool:
"""
Test that all system components are working
Returns:
True if system is operational
"""
try:
# Check personas loaded
personas = self.persona_db.get_all_personas()
if not personas:
print("Error: No personas loaded")
return False
print(f"✓ Loaded {len(personas)} personas")
# Check contexts loaded (optional)
contexts = self.context_db.get_all_contexts()
print(f"✓ Loaded {len(contexts)} contexts")
# Check LLM connection
if self.llm_client.test_connection():
print(f"✓ LLM client connected ({self.llm_client.model})")
else:
print("Error: LLM client connection failed")
return False
return True
except Exception as e:
print(f"System test failed: {e}")
return False