AI_Personas / src /llm /prompt_builder.py
Claude
Implement Phase 1: Persona-based LLM query system for urban planning
514b626 unverified
"""Prompt construction for persona-based responses"""
from typing import Optional
from ..personas.models import Persona
from ..context.models import EnvironmentalContext
class PromptBuilder:
"""Build system prompts for persona-based LLM queries"""
@staticmethod
def build_persona_system_prompt(
persona: Persona,
context: Optional[EnvironmentalContext] = None,
additional_instructions: Optional[str] = None,
) -> str:
"""
Build a system prompt that embodies a persona
Args:
persona: Persona object to embody
context: Optional environmental context
additional_instructions: Optional additional instructions
Returns:
System prompt string
"""
prompt_parts = [
"You are responding as the following person in an urban planning context:",
"",
persona.get_context_summary(),
"",
"IMPORTANT INSTRUCTIONS:",
"- Respond authentically as this person would, reflecting their:",
" * Values, priorities, and political orientation",
" * Communication style and language patterns",
" * Professional expertise and life experiences",
" * Typical concerns and decision-making approach",
"- Use first-person perspective ('I think...', 'In my experience...')",
"- Be specific and grounded in this persona's background",
"- Show the nuance and complexity of real people",
"- You may disagree with or question aspects of proposals",
"- Reference your lived experience and expertise where relevant",
]
if context:
prompt_parts.extend([
"",
"CURRENT CONTEXT:",
context.get_context_summary(),
"",
"Consider how these environmental conditions might influence your perspective.",
])
if additional_instructions:
prompt_parts.extend([
"",
"ADDITIONAL GUIDANCE:",
additional_instructions,
])
prompt_parts.extend([
"",
"Respond thoughtfully and authentically as this persona.",
])
return "\n".join(prompt_parts)
@staticmethod
def build_simple_query(question: str) -> str:
"""
Build a simple user query
Args:
question: The question to ask
Returns:
Formatted user message
"""
return question
@staticmethod
def build_contextual_query(
question: str,
scenario_description: Optional[str] = None,
specific_context: Optional[str] = None,
) -> str:
"""
Build a query with additional contextual information
Args:
question: The main question
scenario_description: Optional scenario context
specific_context: Optional specific situational details
Returns:
Formatted user message with context
"""
parts = []
if scenario_description:
parts.append(f"SCENARIO: {scenario_description}")
parts.append("")
if specific_context:
parts.append(f"CONTEXT: {specific_context}")
parts.append("")
parts.append(question)
return "\n".join(parts)
@staticmethod
def build_comparison_prompt(
personas: list[Persona],
question: str,
) -> str:
"""
Build a prompt for comparing multiple persona responses
Args:
personas: List of personas to compare
question: Question to ask
Returns:
System prompt for comparison
"""
persona_summaries = []
for i, persona in enumerate(personas, 1):
persona_summaries.append(
f"PERSONA {i}: {persona.name} ({persona.role})\n"
f"{persona.tagline}"
)
prompt = f"""You are analyzing responses to an urban planning question from multiple stakeholder perspectives.
{chr(10).join(persona_summaries)}
Question: {question}
For each persona, provide:
1. Their likely position/response
2. Key concerns they would raise
3. Rationale based on their values and background
Be concise but capture the distinct perspective of each persona."""
return prompt