File size: 7,352 Bytes
514b626
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
"""Main query engine for persona-based responses"""

from typing import Optional, Dict, Any
from pydantic import BaseModel
from datetime import datetime

from ..personas.database import PersonaDatabase
from ..context.database import ContextDatabase
from ..llm.anthropic_client import AnthropicClient
from ..llm.prompt_builder import PromptBuilder


class QueryResponse(BaseModel):
    """Structured response from a persona query"""

    persona_id: str
    persona_name: str
    persona_role: str
    question: str
    response: str
    context_id: Optional[str] = None
    timestamp: str
    model_used: str
    metadata: Dict[str, Any] = {}

    class Config:
        """Pydantic config"""
        json_schema_extra = {
            "example": {
                "persona_id": "sarah_chen",
                "persona_name": "Sarah Chen",
                "persona_role": "Urban Planner",
                "question": "What do you think about the bike lane proposal?",
                "response": "I strongly support this bike lane proposal...",
                "context_id": "downtown_district",
                "timestamp": "2024-03-15T10:30:00",
                "model_used": "claude-3-5-sonnet-20241022",
            }
        }


class QueryEngine:
    """Main engine for querying personas and generating responses"""

    def __init__(
        self,
        persona_db: Optional[PersonaDatabase] = None,
        context_db: Optional[ContextDatabase] = None,
        llm_client: Optional[AnthropicClient] = None,
    ):
        """
        Initialize query engine

        Args:
            persona_db: Persona database instance (creates default if None)
            context_db: Context database instance (creates default if None)
            llm_client: LLM client instance (creates default if None)
        """
        self.persona_db = persona_db or PersonaDatabase()
        self.context_db = context_db or ContextDatabase()
        self.llm_client = llm_client or AnthropicClient()
        self.prompt_builder = PromptBuilder()

    def query(
        self,
        persona_id: str,
        question: str,
        context_id: Optional[str] = None,
        scenario_description: Optional[str] = None,
        temperature: Optional[float] = None,
        max_tokens: Optional[int] = None,
    ) -> QueryResponse:
        """
        Query a persona with a question

        Args:
            persona_id: ID of persona to query
            question: Question to ask the persona
            context_id: Optional environmental context ID
            scenario_description: Optional scenario description
            temperature: Optional temperature override
            max_tokens: Optional max_tokens override

        Returns:
            QueryResponse object with the persona's response

        Raises:
            ValueError: If persona not found
        """
        # Get persona
        persona = self.persona_db.get_persona(persona_id)
        if persona is None:
            available = ", ".join(self.persona_db.list_persona_ids())
            raise ValueError(
                f"Persona '{persona_id}' not found. "
                f"Available personas: {available}"
            )

        # Get context if specified
        context = None
        if context_id:
            context = self.context_db.get_context(context_id)
            if context is None:
                print(f"Warning: Context '{context_id}' not found, proceeding without context")

        # Build prompts
        system_prompt = self.prompt_builder.build_persona_system_prompt(
            persona=persona,
            context=context,
        )

        user_message = self.prompt_builder.build_contextual_query(
            question=question,
            scenario_description=scenario_description,
        )

        # Generate response
        response_text = self.llm_client.generate_response(
            system_prompt=system_prompt,
            user_message=user_message,
            temperature=temperature,
            max_tokens=max_tokens,
        )

        # Build response object
        return QueryResponse(
            persona_id=persona.persona_id,
            persona_name=persona.name,
            persona_role=persona.role,
            question=question,
            response=response_text,
            context_id=context_id,
            timestamp=datetime.now().isoformat(),
            model_used=self.llm_client.model,
            metadata={
                "scenario_description": scenario_description,
                "temperature": temperature or self.llm_client.temperature,
                "max_tokens": max_tokens or self.llm_client.max_tokens,
            },
        )

    def query_multiple(
        self,
        persona_ids: list[str],
        question: str,
        context_id: Optional[str] = None,
        scenario_description: Optional[str] = None,
    ) -> list[QueryResponse]:
        """
        Query multiple personas with the same question

        Args:
            persona_ids: List of persona IDs to query
            question: Question to ask all personas
            context_id: Optional environmental context ID
            scenario_description: Optional scenario description

        Returns:
            List of QueryResponse objects
        """
        responses = []
        for persona_id in persona_ids:
            try:
                response = self.query(
                    persona_id=persona_id,
                    question=question,
                    context_id=context_id,
                    scenario_description=scenario_description,
                )
                responses.append(response)
            except Exception as e:
                print(f"Error querying persona {persona_id}: {e}")

        return responses

    def list_available_personas(self) -> list[tuple[str, str, str]]:
        """
        List all available personas

        Returns:
            List of (persona_id, name, role) tuples
        """
        personas = self.persona_db.get_all_personas()
        return [
            (p.persona_id, p.name, p.role)
            for p in personas
        ]

    def list_available_contexts(self) -> list[str]:
        """
        List all available contexts

        Returns:
            List of context IDs
        """
        return self.context_db.list_context_ids()

    def test_system(self) -> bool:
        """
        Test that all system components are working

        Returns:
            True if system is operational
        """
        try:
            # Check personas loaded
            personas = self.persona_db.get_all_personas()
            if not personas:
                print("Error: No personas loaded")
                return False
            print(f"✓ Loaded {len(personas)} personas")

            # Check contexts loaded (optional)
            contexts = self.context_db.get_all_contexts()
            print(f"✓ Loaded {len(contexts)} contexts")

            # Check LLM connection
            if self.llm_client.test_connection():
                print(f"✓ LLM client connected ({self.llm_client.model})")
            else:
                print("Error: LLM client connection failed")
                return False

            return True

        except Exception as e:
            print(f"System test failed: {e}")
            return False