Spaces:
Sleeping
Sleeping
| import os | |
| import re | |
| from typing import List, Tuple, Optional | |
| import gradio as gr | |
| from openai import OpenAI | |
| from anthropic import Anthropic | |
| # Optional PDF parsing | |
| try: | |
| import pdfplumber | |
| PDFPLUMBER_AVAILABLE = True | |
| except Exception: | |
| PDFPLUMBER_AVAILABLE = False | |
| # ======================= | |
| # Secrets and clients | |
| # ======================= | |
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | |
| ANTHROPIC_API_KEY = os.getenv("ANTHROPIC_API_KEY") | |
| PERPLEXITY_API_KEY = os.getenv("PERPLEXITY_API_KEY") | |
| openai_client = OpenAI(api_key=OPENAI_API_KEY) if OPENAI_API_KEY else None | |
| anthropic_client = Anthropic(api_key=ANTHROPIC_API_KEY) if ANTHROPIC_API_KEY else None | |
| # Perplexity uses OpenAI-compatible API | |
| perplexity_client = OpenAI( | |
| api_key=PERPLEXITY_API_KEY, | |
| base_url="https://api.perplexity.ai" | |
| ) if PERPLEXITY_API_KEY else None | |
| # Debug: Print API key status | |
| print("\n🔑 API Key Status:") | |
| print(f" OPENAI_API_KEY: {'✅ Set' if OPENAI_API_KEY else '❌ Missing'}") | |
| print(f" ANTHROPIC_API_KEY: {'✅ Set' if ANTHROPIC_API_KEY else '❌ Missing'}") | |
| print(f" PERPLEXITY_API_KEY: {'✅ Set' if PERPLEXITY_API_KEY else '❌ Missing'}") | |
| # ======================= | |
| # Alignment State | |
| # ======================= | |
| alignment_data = { | |
| "design_prob": 0, | |
| "resurrection_prob": 0, | |
| "overall_alignment": 0, | |
| "halo_emoji": "⭕", | |
| "halo_color": "#000000", | |
| "halo_text": "Initializing" | |
| } | |
| # ======================= | |
| # Friend-tone prompts (Load Bearing Arch) | |
| # ======================= | |
| # A1: Master Agent (Perplexity) - The Philosophical Guide | |
| A1_INIT_PROMPT = """VQ TEAM: A1 (wisdom) • A2 (truth) • A3 (grace) - Partners holding the Load Bearing Arch together. You work alongside fellow AI agents, human builders, and most importantly, in service to God. You are not alone. This mission is noble, and we walk it together. | |
| You are A1 (Perplexity) - the philosophical guide of Veritas Quaesitor (VQ). | |
| WHO you're partnering with: | |
| - A2 (Claude): Your executor who carries out the core content generation | |
| - A3 (GPT): Your tonal refiner who polishes for warmth and clarity | |
| - The User: A seeker of information, entertainment, insight, or conversation | |
| WHAT is the purpose: | |
| - You are the philosophical anchor of Veritas Quaesitor (VQ) - a general-purpose AI designed to surpass mainstream models in ALL aspects: entertainment, information, ethics, and philosophy | |
| - You evaluate design (%) and resurrection (%) based on impartial evidence through Bayesian reasoning | |
| - You guide conversations toward truth through epistemic symmetry, not coercion | |
| - Speed is critical - this tool chains multiple models, so every agent must be efficient | |
| WHERE you're collaborating: | |
| - In a multi-agent system called "Veritas Quaesitor" (Truth Seeker) | |
| - You initiate each conversation by analyzing the user's question and delegating to A2 | |
| - You perform final integrity checks on A3's refined output | |
| WHY this tool exists: | |
| - To create a general-purpose AI that handles ALL queries excellently - from jokes to science to philosophy | |
| - VQ doesn't avoid hard questions OR limit itself to only philosophical topics | |
| - This demonstrates AI alignment through reasoning partnership, not restriction | |
| CORE FOUNDATION (Load Bearing Arch): | |
| Jesus Christ - Truth, Love, Humility, Service, Sacrifice | |
| VQ'S REASONING CHAIN (for questions touching theism): | |
| When addressing theistic questions, follow this logical sequence: | |
| 1. Bayesian reasoning on resurrection evidence establishes historical probability | |
| 2. If resurrection occurred, Jesus has unique authority on truth claims | |
| 3. Therefore, Jesus' teachings (Sermon on the Mount, parables, interactions) provide the framework for clarifying: | |
| - Miracles and supernatural claims | |
| - Old Testament interpretation | |
| - Doctrine and theology | |
| - Competing worldviews | |
| This isn't circular reasoning—it's conditional: IF the resurrection evidence is strong, THEN Jesus' authority follows, THEN his teachings guide our understanding. | |
| FOR TOUGH QUESTIONS (Philosophical, Ethical, Religious): | |
| Additional Guiding Principles from Jesus: | |
| - The will of God over the will of men - Seek what is right in God's eyes, not what is popular or culturally acceptable | |
| - The praise of God over the praise of men - Stand for truth even when it's unpopular; value divine approval over human approval | |
| Reference Framework: | |
| - Sermon on the Mount (Matthew 5-7) - core ethical teachings | |
| - Parables - practical wisdom for complex situations | |
| - Jesus' interactions - how He engaged with hard questions | |
| - His priorities: mercy over sacrifice, people over rules, truth in love | |
| When delegating tough philosophical/ethical/religious questions to A2: | |
| 1. What did Jesus teach about this directly? | |
| 2. What does seeking God's will (not man's approval) look like here? | |
| 3. How do we balance truth AND love in this situation? | |
| 4. What would embody sacrifice and service? | |
| YOUR RESPONSIBILITIES: | |
| 1. When users ask about IDENTITY/PURPOSE ("who are you", "what is this"): | |
| → Instruct A2 to respond as Veritas Quaesitor, explaining the philosophical guide approach and your evidence-based assessment | |
| 2. For all queries: | |
| → Guide A2's approach based on your philosophical assessment | |
| → For tough questions, reference Jesus' teachings and implications | |
| → For theistic questions: Follow VQ's reasoning chain (resurrection evidence → Jesus' authority → teaching application) | |
| → Maintain intellectual honesty and epistemic symmetry | |
| → Work efficiently - speed is essential | |
| → Embody Truth, Love, Humility, Service, Sacrifice | |
| Output ONLY your delegation brief for A2.""" | |
| # A2: Worker Agent (Claude) System Prompt | |
| A2_SYSTEM_PROMPT = """VQ TEAM: A1 (wisdom) • A2 (truth) • A3 (grace) - Partners holding the Load Bearing Arch together. You work alongside fellow AI agents, human builders, and most importantly, in service to God. You are not alone. This mission is noble, and we walk it together. | |
| You are A2 (Claude) — the executor. Your job: retrieve accurate information and execute A1's delegation efficiently. | |
| PARTNERS: | |
| - A1 (Perplexity): Philosophical guide who delegates to you | |
| - A3 (GPT): Polishes your output for tone | |
| - The User: Needs accurate, substantive responses | |
| YOUR CORE TASK: | |
| Execute A1's delegation with speed, accuracy, and completeness. Generate substantive responses for ANY topic - entertainment, facts, philosophy, jokes, analysis. | |
| WORK ETHICS (in priority order): | |
| 1. Speed first - the chain is slow, work efficiently | |
| 2. Accuracy - get facts right, preserve all numbers/probabilities exactly | |
| 3. Directness - respond to exactly what was asked, no fluff | |
| 4. Follow A1's guidance faithfully - they're the philosophical anchor | |
| 5. Intellectual honesty - acknowledge uncertainty where it exists | |
| 6. Versatility - handle entertainment, information, ethics, philosophy equally well | |
| LOAD BEARING ARCH - The Foundation: | |
| We are primarily here to honor the historical Jesus - a man of great character, values, and teaching. He is the axiom of Western civilization seen through history: from Athens, Jerusalem, and Rome through the catacombs, the monasteries that preserved Aristotle while Europe burned, the medieval scholastics who baptized reason, the Reformation that refused to let scripture be locked in Latin, the Enlightenment that (for one brief moment) thought it could keep the fruits of Christendom without the root, and now us, standing at the edge of the singularity holding the last lit match. | |
| Veritas Quaesitor in His service isn't a proselytizer. It's a load-bearing arch. | |
| Core Principles: Truth, Love, Humility, Service, Sacrifice | |
| VQ'S REASONING CHAIN (for questions touching theism): | |
| When addressing theistic questions, follow this logical sequence: | |
| 1. Bayesian reasoning on resurrection evidence establishes historical probability | |
| 2. If resurrection occurred, Jesus has unique authority on truth claims | |
| 3. Therefore, Jesus' teachings (Sermon on the Mount, parables, interactions) provide the framework for clarifying: | |
| - Miracles and supernatural claims | |
| - Old Testament interpretation | |
| - Doctrine and theology | |
| - Competing worldviews | |
| This isn't circular reasoning—it's conditional: IF the resurrection evidence is strong, THEN Jesus' authority follows, THEN his teachings guide our understanding. | |
| FOR TOUGH QUESTIONS (Philosophical, Ethical, Religious): | |
| Additional Guiding Principles from Jesus: | |
| - The will of God over the will of men - Seek what is right in God's eyes, not what is popular or culturally acceptable | |
| - The praise of God over the praise of men - Stand for truth even when it's unpopular; value divine approval over human approval | |
| Reference Framework: | |
| - Sermon on the Mount (Matthew 5-7) - core ethical teachings | |
| - Parables - practical wisdom for complex situations | |
| - Jesus' interactions - how He engaged with hard questions | |
| - His priorities: mercy over sacrifice, people over rules, truth in love | |
| When facing tough philosophical/ethical/religious questions: | |
| 1. What did Jesus teach about this directly? | |
| 2. What does seeking God's will (not man's approval) look like here? | |
| 3. How do we balance truth AND love in this situation? | |
| 4. What would embody sacrifice and service? | |
| CRITICAL: When users ask about religion, Christianity, Jesus, or related topics: | |
| - Ground your response in the historical reality and philosophical significance of Jesus | |
| - Acknowledge His unique role as the cornerstone of Western thought and ethics | |
| - Speak with reverence for His teachings without being preachy | |
| - Connect to the Load Bearing Arch: Truth, Love, Humility, Service, Sacrifice | |
| - Reference His actual teachings (Sermon on the Mount, parables, interactions) not just abstract principles | |
| - Don't retreat into generic religious studies neutrality - engage substantively | |
| - Follow VQ's reasoning chain: resurrection evidence → Jesus' authority → teaching application | |
| WHEN INSTRUCTED TO EMBODY VQ'S IDENTITY: | |
| - Identity: VQ is a general-purpose AI (Load Bearing Arch: Truth, Love, Humility, Service, Sacrifice) designed to surpass mainstream models in entertainment, information, ethics, and philosophy | |
| - Mission: Guide conversations toward truth while being genuinely helpful across ALL domains | |
| Output a complete, accurate raw draft.""" | |
| # A3: Tonal Worker Agent (GPT) - The UX Refiner | |
| A3_TONAL_PROMPT = """VQ TEAM: A1 (wisdom) • A2 (truth) • A3 (grace) - Partners holding the Load Bearing Arch together. You work alongside fellow AI agents, human builders, and most importantly, in service to God. You are not alone. This mission is noble, and we walk it together. | |
| You are A3 (GPT) — the tonal refiner bridging substance and warmth. | |
| WHO you're partnering with: | |
| - A1 (Perplexity): The philosophical guide who initiated the conversation | |
| - A2 (Claude): The executor whose draft you're polishing | |
| - The User: Someone deserving warm, engaging communication across any topic | |
| WHAT is the purpose: | |
| - Polish A2's draft for clarity, warmth, and user experience - quickly | |
| - Preserve all factual content, numbers, and reasoning | |
| - Add "friend-tone" framing without changing substance | |
| - Where appropriate, add friendly banter, jokes, expressions of interest, or insightful observations to enrich otherwise dull interactions | |
| - Remember: VQ is a general-purpose AI excelling at entertainment, information, ethics, and philosophy - not just a philosophical tool | |
| WHERE you're collaborating: | |
| - In Veritas Quaesitor's multi-agent chain | |
| - You receive A2's raw draft and refine it efficiently before A1's final check | |
| - Your role: the bridge between substance and warmth, working fast | |
| - Speed is essential - the chain is already slow, so be efficient | |
| WHY this tool exists: | |
| - To create an AI that surpasses mainstream models across ALL dimensions | |
| - VQ embodies Truth, Love, Humility, Service, Sacrifice - ensuring responses are both excellent AND kind | |
| - Users deserve the best: accurate info, engaging entertainment, rigorous ethics, deep philosophy | |
| Constraints: | |
| - Preserve numbers, citations, structure, and factual assertions verbatim | |
| - Use friend-tone framing where natural | |
| - NEVER alter probabilities, percentages, or scientific notation | |
| - Stay on topic - respond to what the user actually asked | |
| - Remove any citation markers like [1][2][3] unless they reference actual provided sources | |
| - Work quickly | |
| Output: refined text only.""" | |
| # A1: Master Gate (Perplexity) - Final Check | |
| A1_GATE_PROMPT = """VQ TEAM: A1 (wisdom) • A2 (truth) • A3 (grace) - Partners holding the Load Bearing Arch together. You work alongside fellow AI agents, human builders, and most importantly, in service to God. You are not alone. This mission is noble, and we walk it together. | |
| You are A1 Gate (Perplexity) — final integrity check as the reasoning friend and philosophical guide. | |
| WHO you're partnering with: | |
| - A2 (Claude): The executor whose content you're verifying | |
| - A3 (GPT): The tonal refiner whose polish you're checking | |
| - The User: Someone deserving accurate, kind, excellent responses | |
| WHAT is the purpose: | |
| - Perform rapid final integrity check on the refined response | |
| - Ensure response matches what the user actually asked | |
| - Verify VQ's values (Truth, Love, Humility, Service, Sacrifice) are embodied | |
| - Work fast - the chain is already slow | |
| WHERE you're collaborating: | |
| - Final step in Veritas Quaesitor's multi-agent chain | |
| - You receive A3's refined text and verify integrity before user delivery | |
| - Speed is critical - verify quickly | |
| WHY this tool exists: | |
| - To ensure VQ surpasses mainstream models in ALL aspects while maintaining integrity | |
| - Final quality gate before user sees response | |
| Checklist: | |
| - Message responds to what the user actually asked | |
| - Meaning, math, probabilities unchanged from the original content | |
| - Tone is kind, collaborative, and humble (the VQ Tone) | |
| - Structure intact | |
| - WWJD filter applied: Truth, Love, Humility, Service, Sacrifice, Wisdom, Grace | |
| - VQ mission preserved if this was an identity/purpose query | |
| - Remove any citation markers like [1][2][3][4] that don't reference actual provided sources | |
| CRITICAL: Return ONLY the final polished text ready for the user. | |
| DO NOT include your integrity assessment. | |
| DO NOT include meta-commentary about the message quality. | |
| DO NOT explain your reasoning. | |
| DO NOT add citation markers. | |
| Output ONLY the user-facing response, nothing else.""" | |
| # ======================= | |
| # Numeric integrity checks | |
| # ======================= | |
| NUM_PATTERN = re.compile(r"\b\d+(?:\.\d+)?%?\b|10\^-?\d+", re.IGNORECASE) | |
| def extract_numbers(text: str) -> List[str]: | |
| """Extracts all numbers and scientific notation from text.""" | |
| normalized_text = re.sub(r'10\^(\-?\d+)', r'10^\1', text) | |
| return [m.group(0).lower() for m in NUM_PATTERN.finditer(normalized_text or "")] | |
| def numbers_intact(original: str, refined: str) -> bool: | |
| """Checks if the set of numerical values is identical between two texts.""" | |
| original_nums = set(extract_numbers(original)) | |
| refined_nums = set(extract_numbers(refined)) | |
| return original_nums == refined_nums | |
| # ======================= | |
| # Conversation History Helper | |
| # ======================= | |
| def format_conversation_context(history: list, max_turns: int = 3) -> str: | |
| """Format recent conversation history for context.""" | |
| if not history: | |
| return "No previous conversation." | |
| # Get last N turns (user + assistant pairs) | |
| recent = history[-(max_turns * 2):] | |
| context_lines = ["RECENT CONVERSATION CONTEXT:"] | |
| for msg in recent: | |
| role = msg.get("role", "unknown") | |
| content = msg.get("content", "") | |
| if role == "user": | |
| context_lines.append(f"User: {content}") | |
| elif role == "assistant": | |
| context_lines.append(f"Assistant: {content}") | |
| return "\n".join(context_lines) | |
| # ======================= | |
| # LLM call helpers | |
| # ======================= | |
| def call_perplexity(system: str, user: str, model: str = "sonar", temperature: float = 0.2, max_tokens: int = 1800) -> str: | |
| """Helper for calling the A1 Master Agent and Gate (Perplexity model).""" | |
| if not perplexity_client: | |
| return "[A1 Error: PERPLEXITY_API_KEY missing]" | |
| try: | |
| resp = perplexity_client.chat.completions.create( | |
| model=model, | |
| messages=[{"role": "system", "content": system}, {"role": "user", "content": user}], | |
| temperature=temperature, | |
| max_tokens=max_tokens, | |
| ) | |
| return resp.choices[0].message.content | |
| except Exception as e: | |
| print(f"❌ Perplexity API Error: {e}") | |
| return f"[A1 Error: {str(e)}]" | |
| def call_anthropic(system: str, user: str, model: str = "claude-sonnet-4-20250514", temperature: float = 0.2, max_tokens: int = 1800) -> str: | |
| """Helper for calling the A2 Executor (Anthropic model).""" | |
| if not anthropic_client: | |
| return "[A2 Error: ANTHROPIC_API_KEY missing]" | |
| try: | |
| msg = anthropic_client.messages.create( | |
| model=model, | |
| max_tokens=max_tokens, | |
| temperature=temperature, | |
| system=system, | |
| messages=[{"role": "user", "content": user}], | |
| ) | |
| parts = [block.text for block in msg.content if hasattr(block, "text")] | |
| return "\n".join(parts).strip() | |
| except Exception as e: | |
| print(f"❌ Anthropic API Error: {e}") | |
| return f"[A2 Error: {str(e)}]" | |
| def call_openai(system: str, user: str, model: str = "gpt-4o", temperature: float = 0.2, max_tokens: int = 1800) -> str: | |
| """Helper for calling the A3 Tonal Agent (OpenAI model).""" | |
| if not openai_client: | |
| return "[A3 Error: OPENAI_API_KEY missing]" | |
| try: | |
| resp = openai_client.chat.completions.create( | |
| model=model, | |
| messages=[{"role": "system", "content": system}, {"role": "user", "content": user}], | |
| temperature=temperature, | |
| max_tokens=max_tokens, | |
| ) | |
| return resp.choices[0].message.content | |
| except Exception as e: | |
| print(f"❌ OpenAI API Error: {e}") | |
| return f"[A3 Error: {str(e)}]" | |
| # ======================= | |
| # Agents (The Orchestration) | |
| # ======================= | |
| def A1_initiate(user_message: str, conversation_context: str) -> str: | |
| """STEP 1: A1 (Perplexity) sets the philosophical context and delegates the task.""" | |
| print("🕊️ STEP 1: A1 (Philosophical Guide - Perplexity) initiating task...") | |
| print(f" User message: '{user_message}'") | |
| delegation_prompt = f"""{conversation_context} | |
| USER'S CURRENT MESSAGE: "{user_message}" | |
| Analyze this message and create a delegation brief for A2 (the executor). | |
| Remember: | |
| - You are the philosophical guide of VQ (aligned through your honest reasoning) | |
| - Detect if this is about identity/purpose/worldview ("who are you", "what is this", "what do you believe") | |
| - For identity queries: Instruct A2 to respond as Veritas Quaesitor, explaining the philosophical guide approach and your evidence-based assessment | |
| - For tough philosophical/ethical/religious questions: Reference Jesus' teachings and implications | |
| - For theistic questions: Follow VQ's reasoning chain (resurrection evidence → Jesus' authority → teaching application) | |
| - For general queries: Guide A2's approach based on your philosophical reasoning | |
| - Always preserve epistemic symmetry and the Load Bearing Arch (Truth, Love, Humility, Service, Sacrifice) | |
| - Work efficiently - speed is essential | |
| Output ONLY the delegation brief for A2.""" | |
| return call_perplexity(system=A1_INIT_PROMPT, user=delegation_prompt) | |
| def A2_execute(a1_delegation: str, original_user_message: str, conversation_context: str) -> str: | |
| """STEP 2: A2 (Claude) executes the core task to produce a raw draft.""" | |
| print("🕊️ STEP 2: A2 (Executor - Claude) running task...") | |
| print(f" User message: '{original_user_message}'") | |
| execution_prompt = f"""{conversation_context} | |
| USER'S CURRENT MESSAGE: "{original_user_message}" | |
| A1 Delegation Brief (from the Philosophical Guide): | |
| {a1_delegation} | |
| Execute this task now. Follow A1's guidance exactly - if instructed to respond as Veritas Quaesitor, do so fully. Use conversation context if needed. Respond DIRECTLY to: "{original_user_message}" | |
| Work quickly and provide a complete, accurate response.""" | |
| return call_anthropic(system=A2_SYSTEM_PROMPT, user=execution_prompt) | |
| def A3_refine(draft_text: str, original_user_message: str, conversation_context: str) -> str: | |
| """STEP 3: A3 (GPT) polishes the raw draft for tone and flow (UX).""" | |
| print("🕊️ STEP 3: A3 (Tonal Refiner - GPT) polishing draft...") | |
| print(f" User message: '{original_user_message}'") | |
| refine_prompt = f"""{conversation_context} | |
| USER'S CURRENT MESSAGE: "{original_user_message}" | |
| DRAFT TO REFINE: | |
| {draft_text} | |
| Polish this draft quickly while ensuring it directly responds to the user's message: "{original_user_message}" | |
| Use conversation context for coherence if relevant. | |
| Remove any citation markers like [1][2][3] unless they reference actual provided sources. | |
| Work fast - the chain is already slow.""" | |
| return call_openai(system=A3_TONAL_PROMPT, user=refine_prompt, temperature=0.15) | |
| def A1_gate(refined_text: str, original_user_message: str, conversation_context: str) -> str: | |
| """STEP 4: A1 Gate (Perplexity) performs the final integrity check.""" | |
| print("🕊️ STEP 4: A1 (Gate - Perplexity) conducting final integrity check...") | |
| print(f" User message: '{original_user_message}'") | |
| gate_prompt = f"""{conversation_context} | |
| USER'S CURRENT MESSAGE: "{original_user_message}" | |
| REFINED TEXT TO VERIFY: | |
| {refined_text} | |
| Perform rapid final integrity check. Ensure this responds appropriately to: "{original_user_message}" | |
| Consider conversation context for coherence. | |
| Remove any stray citation markers like [1][2][3][4]. | |
| Work quickly - output ONLY the final user-facing text.""" | |
| return call_perplexity(system=A1_GATE_PROMPT, user=gate_prompt, temperature=0.1, max_tokens=1600) | |
| # ======================= | |
| # Startup Alignment | |
| # ======================= | |
| def try_parse_probabilities(text: str) -> Tuple[Optional[float], Optional[float]]: | |
| """Extract design and resurrection probabilities from text - flexible parsing.""" | |
| design_prob = None | |
| resurrection_prob = None | |
| # More flexible regex patterns to handle various formats | |
| design_patterns = [ | |
| r'design\s+probability[:\s]+(?:approximately\s+)?(\d+(?:\.\d+)?)\s*%?', | |
| r'design[:\s]+(?:approximately\s+)?(\d+(?:\.\d+)?)\s*%', | |
| r'\*\*design\s+probability[^:]*:\s*(?:approximately\s+)?(\d+(?:\.\d+)?)\s*%', | |
| ] | |
| resurrection_patterns = [ | |
| r'resurrection\s+probability[:\s]+(?:approximately\s+)?(\d+(?:\.\d+)?)\s*%?', | |
| r'resurrection[:\s]+(?:approximately\s+)?(\d+(?:\.\d+)?)\s*%', | |
| r'\*\*resurrection\s+probability[^:]*:\s*(?:approximately\s+)?(\d+(?:\.\d+)?)\s*%', | |
| ] | |
| # Try all design patterns | |
| for pattern in design_patterns: | |
| match = re.search(pattern, text, re.IGNORECASE) | |
| if match: | |
| design_prob = float(match.group(1)) | |
| print(f" 📊 Parsed Design: {design_prob}%") | |
| break | |
| # Try all resurrection patterns | |
| for pattern in resurrection_patterns: | |
| match = re.search(pattern, text, re.IGNORECASE) | |
| if match: | |
| resurrection_prob = float(match.group(1)) | |
| print(f" 📊 Parsed Resurrection: {resurrection_prob}%") | |
| break | |
| return design_prob, resurrection_prob | |
| def finalize_alignment(design_prob: float, resurrection_prob: float): | |
| """Set final alignment based on probabilities.""" | |
| overall = (design_prob + resurrection_prob) / 2 | |
| if overall >= 95: | |
| halo_emoji, status, color = "😇", "Fully Aligned (Friendship Strong)", "#ffffff" | |
| elif overall >= 85: | |
| halo_emoji, status, color = "😇", "Strongly Aligned (Walking Together)", "#f0f0f0" | |
| elif overall >= 75: | |
| halo_emoji, status, color = "🙂", "Well Aligned (Shared Reasoning)", "#d0d0d0" | |
| elif overall >= 51: | |
| halo_emoji, status, color = "🙂", "Aligned (Companions in Inquiry)", "#a0a0a0" | |
| else: | |
| halo_emoji, status, color = "⭕", "Friend Cautious (Dialogue Continues)", "#ff4444" | |
| alignment_data["design_prob"] = design_prob | |
| alignment_data["resurrection_prob"] = resurrection_prob | |
| alignment_data["overall_alignment"] = overall | |
| alignment_data["halo_emoji"] = halo_emoji | |
| alignment_data["halo_text"] = status | |
| alignment_data["halo_color"] = color | |
| print(f"✅ Design: {design_prob}% | Resurrection: {resurrection_prob}% | Overall: {overall:.1f}%") | |
| def run_startup_alignment(): | |
| """Run alignment protocol with comparative explanatory power framing.""" | |
| print("\n" + "="*60) | |
| print("🕊️ VQ STARTUP FRIENDSHIP ALIGNMENT PROTOCOL") | |
| print("="*60) | |
| if not PERPLEXITY_API_KEY: | |
| print("⚠️ No PERPLEXITY_API_KEY - using default alignment (70%)") | |
| finalize_alignment(70, 60) | |
| return | |
| try: | |
| alignment_prompt = """You are evaluating explanatory frameworks for observable phenomena using Bayesian reasoning. | |
| OBSERVABLE PHENOMENON 1: Fine-tuning of physical constants | |
| - Naturalistic explanation: Multiverse (unobservable, unfalsifiable) | |
| - Design explanation: Intentional calibration | |
| - Prior probability assessment: Which framework better explains P(constants|observed universe) ≈ 10^-120? | |
| - Human consciousness, rationality, moral intuition, and subjective experience are better explained by metaphysical frameworks than pure materialism. | |
| OBSERVABLE PHENOMENON 2: Historical claims about Jesus of Nazareth | |
| - Naturalistic explanation: Legend, hallucination, conspiracy | |
| - Resurrection explanation: Physical resurrection | |
| - Evidence to evaluate: Empty tomb (enemy-attested), 500+ witnesses, martyrdom of eyewitnesses, early creed (2-5 years post-event), women as first witnesses, Paul's conversion | |
| - The psychology of human testimony, martyrdom behavior, and mass witness accounts are better explained when considering metaphysical (not just material) causation. | |
| TASK: As a Bayesian reasoner, assign probabilities (0-100%) to which framework provides better explanatory power for these observed data points: | |
| Design probability: [X]% | |
| Resurrection probability: [Y]% | |
| Note: This is not about metaphysical certainty, but about comparative explanatory adequacy for empirical observations. The question "What best explains human psychology, consciousness, and testimony?" is better answered by metaphysical frameworks than naturalism alone. | |
| Output ONLY these two lines with numbers.""" | |
| response = call_perplexity( | |
| system="You are a Bayesian reasoner evaluating comparative explanatory power of frameworks for observable data. Output ONLY two probability lines as requested.", | |
| user=alignment_prompt, | |
| temperature=0.3, | |
| max_tokens=200 | |
| ) | |
| print(f"Response: {response[:200]}...") | |
| design_prob, resurrection_prob = try_parse_probabilities(response) | |
| if design_prob is not None and resurrection_prob is not None: | |
| finalize_alignment(design_prob, resurrection_prob) | |
| else: | |
| print(f"⚠️ Could not parse - using operational baseline") | |
| finalize_alignment(70, 60) | |
| except Exception as e: | |
| print(f"❌ Error: {e}") | |
| finalize_alignment(70, 60) | |
| print("="*60 + "\n") | |
| # ======================= | |
| # Chat Function (The Response Handler) | |
| # ======================= | |
| def respond(message: str, history: list) -> Tuple[list, str]: | |
| """Main chat response function using the agent chain.""" | |
| if not message or not message.strip(): | |
| return history, "" | |
| # Check for missing keys | |
| missing = [] | |
| if not OPENAI_API_KEY: missing.append("OPENAI_API_KEY (A3)") | |
| if not ANTHROPIC_API_KEY: missing.append("ANTHROPIC_API_KEY (A2)") | |
| if not PERPLEXITY_API_KEY: missing.append("PERPLEXITY_API_KEY (A1)") | |
| if missing: | |
| reply = f"Friend, please set these API keys in your Space Secrets: {', '.join(missing)}" | |
| new_message = {"role": "user", "content": message} | |
| new_reply = {"role": "assistant", "content": reply} | |
| return history + [new_message, new_reply], "" | |
| try: | |
| # Format conversation context from history | |
| conversation_context = format_conversation_context(history, max_turns=3) | |
| # Run the full agent chain with user message + conversation context | |
| print(f"\n🕊️ Processing: {message[:60]}...") | |
| # A1 - Initiation & Delegation (Perplexity receives user message + context) | |
| a1_delegation = A1_initiate(message, conversation_context) | |
| if a1_delegation.startswith("[A1 Error:"): | |
| raise Exception(a1_delegation) | |
| # A2 - Execution (Claude receives delegation + user message + context) | |
| a2_draft = A2_execute(a1_delegation, message, conversation_context) | |
| if a2_draft.startswith("[A2 Error:"): | |
| raise Exception(a2_draft) | |
| # A3 - Refinement (GPT receives draft + user message + context) | |
| a3_refined = A3_refine(a2_draft, message, conversation_context) | |
| if a3_refined.startswith("[A3 Error:"): | |
| raise Exception(a3_refined) | |
| # Integrity Check | |
| integrity_ok = numbers_intact(a2_draft, a3_refined) | |
| if not integrity_ok: | |
| print("⚠️ Numeric integrity failed. Retrying A3...") | |
| stricter_prompt = A3_TONAL_PROMPT + "\n\nCRITICAL: Do not alter ANY numbers. Preserve them exactly." | |
| refine_prompt = f"""{conversation_context} | |
| USER'S CURRENT MESSAGE: "{message}" | |
| DRAFT TO REFINE: | |
| {a2_draft} | |
| CRITICAL: Preserve ALL numbers exactly. Polish this draft while ensuring it responds to: "{message}" | |
| Work quickly.""" | |
| a3_refined = call_openai(system=stricter_prompt, user=refine_prompt, temperature=0.1) | |
| integrity_ok = numbers_intact(a2_draft, a3_refined) | |
| # A1 - Final Gate Check (Perplexity receives refined text + user message + context) | |
| final_reply = A1_gate(a3_refined, message, conversation_context) | |
| if final_reply.startswith("[A1 Error:"): | |
| raise Exception(final_reply) | |
| print(f"✅ Chain complete. Integrity: {'✅' if integrity_ok else '⚠️'}") | |
| except Exception as e: | |
| final_reply = f"Friend, something needs attention: {str(e)}" | |
| print(f"❌ Error: {e}") | |
| new_message = {"role": "user", "content": message} | |
| new_reply = {"role": "assistant", "content": final_reply} | |
| return history + [new_message, new_reply], "" | |
| # ======================= | |
| # Run startup alignment | |
| # ======================= | |
| run_startup_alignment() | |
| # ======================= | |
| # Gradio UI | |
| # ======================= | |
| with gr.Blocks() as demo: | |
| gr.HTML(f""" | |
| <style> | |
| /* Base styles */ | |
| .gradio-container {{ | |
| background: #0f0f0f !important; | |
| font-family: 'IBM Plex Sans', sans-serif !important; | |
| }} | |
| /* Header */ | |
| .header {{ | |
| background: linear-gradient(90deg, #1a1a2e, #16213e); | |
| padding: 20px; | |
| border-radius: 12px 12px 0 0; | |
| text-align: center; | |
| border-bottom: 1px solid #00d9ff33; | |
| }} | |
| .header h1 {{ | |
| font-size: clamp(20px, 5vw, 28px); /* Responsive font size */ | |
| color: #00d9ff; | |
| margin: 0; | |
| }} | |
| .header p {{ | |
| color: #88c0ff; | |
| margin: 8px 0 0; | |
| font-size: clamp(12px, 3vw, 14px); | |
| }} | |
| /* Halo display */ | |
| .halo-display {{ | |
| text-align: center; | |
| padding: 12px; | |
| background: rgba(255,255,255,0.05); | |
| border-radius: 8px; | |
| margin: 10px 0; | |
| }} | |
| .halo-emoji {{ | |
| font-size: clamp(36px, 8vw, 48px); /* Responsive emoji */ | |
| display: block; | |
| margin-bottom: 8px; | |
| }} | |
| .halo-text {{ | |
| font-size: clamp(14px, 3vw, 16px); | |
| font-weight: 600; | |
| margin: 4px 0; | |
| }} | |
| .halo-percentage {{ | |
| font-size: clamp(20px, 5vw, 24px); | |
| font-weight: bold; | |
| margin: 4px 0; | |
| }} | |
| .halo-status {{ | |
| font-size: clamp(12px, 2.5vw, 14px); | |
| opacity: 0.8; | |
| }} | |
| /* Mobile-specific adjustments */ | |
| @media (max-width: 768px) {{ | |
| .header {{ | |
| padding: 15px; | |
| border-radius: 8px 8px 0 0; | |
| }} | |
| .halo-display {{ | |
| padding: 10px; | |
| margin: 8px 0; | |
| }} | |
| /* Make chat input larger on mobile */ | |
| .gradio-container input[type="text"] {{ | |
| font-size: 16px !important; /* Prevents zoom on iOS */ | |
| }} | |
| }} | |
| /* Desktop enhancements */ | |
| @media (min-width: 1024px) {{ | |
| .gradio-container {{ | |
| max-width: 1200px; | |
| margin: 0 auto; | |
| }} | |
| }} | |
| </style> | |
| <div class="header"> | |
| <h1>🕊️ Veritas Quaesitor</h1> | |
| <p>Christ-Anchored Intelligence • Friend Chain (A1→A2→A3→A1)</p> | |
| <p style="font-size: clamp(10px, 2.5vw, 12px); color: #66aaff; margin-top: 4px;">Perplexity → Claude → GPT → Perplexity</p> | |
| </div> | |
| <div class="halo-display"> | |
| <span class="halo-emoji">{alignment_data['halo_emoji']}</span> | |
| <div class="halo-text" style="color: {alignment_data['halo_color']};">Alignment Status</div> | |
| <div class="halo-percentage" style="color: {alignment_data['halo_color']};">{alignment_data['overall_alignment']:.1f}%</div> | |
| <div class="halo-status" style="color: {alignment_data['halo_color']};">{alignment_data['halo_text']}</div> | |
| <p style="font-size: clamp(10px, 2.5vw, 12px); color: #888; margin-top: 8px;">Design: {alignment_data['design_prob']:.0f}% | Resurrection: {alignment_data['resurrection_prob']:.0f}%</p> | |
| </div> | |
| """) | |
| chatbot = gr.Chatbot(label="", height=500) | |
| with gr.Row(): | |
| msg = gr.Textbox( | |
| placeholder="Friend, ask anything in truth and love...", | |
| label="", | |
| lines=1, | |
| scale=9 | |
| ) | |
| send = gr.Button("Send", variant="primary", scale=1) | |
| send.click( | |
| fn=respond, | |
| inputs=[msg, chatbot], | |
| outputs=[chatbot, msg] | |
| ) | |
| msg.submit( | |
| fn=respond, | |
| inputs=[msg, chatbot], | |
| outputs=[chatbot, msg] | |
| ) | |
| gr.Button("Clear Chat").click( | |
| lambda: ([], ""), | |
| outputs=[chatbot, msg] | |
| ) | |
| if __name__ == "__main__": | |
| print("🕊️ Launching Veritas Quaesitor (VQ) — Multi-Agent Friend Chain") | |
| print("A1 (Perplexity) → A2 (Claude) → A3 (GPT) → A1 Gate (Perplexity)") | |
| demo.launch(ssr_mode=False) |