Spaces:
Sleeping
Sleeping
Claude commited on
Commit ·
b2dcb24
1
Parent(s): 08dabe0
feat: Use actual trained agents from ForgeEngine + SynthesisEngine for real multi-perspective reasoning
Browse files
app.py
CHANGED
|
@@ -32,6 +32,8 @@ from reasoning_forge.living_memory import LivingMemoryKernel, MemoryCocoon
|
|
| 32 |
from reasoning_forge.resonant_continuity import ResonantContinuityEngine
|
| 33 |
from reasoning_forge.epistemic_metrics import EpistemicMetrics
|
| 34 |
from reasoning_forge.quantum_spiderweb import QuantumSpiderweb
|
|
|
|
|
|
|
| 35 |
|
| 36 |
# ================================================================
|
| 37 |
# ADAPTER COLORS & CONFIGURATION
|
|
@@ -81,6 +83,14 @@ except Exception as e:
|
|
| 81 |
print(f"Warning: Could not initialize InferenceClient: {e}")
|
| 82 |
HAS_LLM = False
|
| 83 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 84 |
|
| 85 |
# ================================================================
|
| 86 |
# UTILITY FUNCTIONS
|
|
@@ -113,36 +123,35 @@ def auto_select_perspectives(query: str, n: int = 4) -> List[str]:
|
|
| 113 |
|
| 114 |
|
| 115 |
def call_perspective(perspective_name: str, query: str, request: gr.Request = None) -> str:
|
| 116 |
-
"""Generate response
|
| 117 |
p = get_perspective(perspective_name)
|
| 118 |
if not p:
|
| 119 |
return f"Perspective {perspective_name} not found."
|
| 120 |
|
| 121 |
-
#
|
| 122 |
-
|
| 123 |
-
if request and hasattr(request, 'headers') and 'authorization' in request.headers:
|
| 124 |
-
auth_token = request.headers['authorization'].replace('Bearer ', '').strip()
|
| 125 |
-
|
| 126 |
-
# Create client with user's auth token if available
|
| 127 |
-
inference_client = InferenceClient("meta-llama/Llama-3.1-8B-Instruct", token=auth_token) if auth_token else client
|
| 128 |
-
|
| 129 |
-
# Try LLM first (if available)
|
| 130 |
-
if HAS_LLM:
|
| 131 |
try:
|
| 132 |
-
|
| 133 |
-
|
| 134 |
-
|
| 135 |
-
|
| 136 |
-
|
| 137 |
-
|
| 138 |
-
|
| 139 |
-
|
| 140 |
-
|
| 141 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 142 |
except Exception as e:
|
| 143 |
-
|
|
|
|
| 144 |
|
| 145 |
-
#
|
| 146 |
return generate_perspective_response(perspective_name, query, p)
|
| 147 |
|
| 148 |
|
|
@@ -221,45 +230,28 @@ def generate_perspective_response(perspective_name: str, query: str, perspective
|
|
| 221 |
|
| 222 |
|
| 223 |
def generate_synthesis(perspectives_responses: Dict[str, str], query: str, request: gr.Request = None) -> str:
|
| 224 |
-
"""Generate synthesis
|
| 225 |
-
|
| 226 |
-
# Extract auth token from request headers
|
| 227 |
-
auth_token = None
|
| 228 |
-
if request and hasattr(request, 'headers') and 'authorization' in request.headers:
|
| 229 |
-
auth_token = request.headers['authorization'].replace('Bearer ', '').strip()
|
| 230 |
-
|
| 231 |
-
# Create client with user's auth token if available
|
| 232 |
-
inference_client = InferenceClient("meta-llama/Llama-3.1-8B-Instruct", token=auth_token) if auth_token else client
|
| 233 |
-
|
| 234 |
-
# Try LLM synthesis first if available
|
| 235 |
-
if HAS_LLM:
|
| 236 |
-
perspective_text = "\n\n".join(
|
| 237 |
-
f"**{name.upper()}**: {response[:200]}"
|
| 238 |
-
for name, response in perspectives_responses.items()
|
| 239 |
-
)
|
| 240 |
-
|
| 241 |
-
synthesis_prompt = f"""Synthesize these perspective responses on: {query[:100]}
|
| 242 |
-
|
| 243 |
-
{perspective_text}
|
| 244 |
-
|
| 245 |
-
Brief unified insight:"""
|
| 246 |
|
|
|
|
|
|
|
| 247 |
try:
|
| 248 |
-
|
| 249 |
-
|
| 250 |
-
|
| 251 |
-
|
| 252 |
-
response = inference_client.chat_completion(
|
| 253 |
-
messages,
|
| 254 |
-
max_tokens=256,
|
| 255 |
-
temperature=0.7,
|
| 256 |
)
|
| 257 |
-
|
|
|
|
| 258 |
except Exception as e:
|
| 259 |
-
|
| 260 |
-
|
|
|
|
|
|
|
|
|
|
| 261 |
|
| 262 |
-
|
|
|
|
|
|
|
| 263 |
# Extract key phrases from each perspective response
|
| 264 |
insights = []
|
| 265 |
for name, response in perspectives_responses.items():
|
|
@@ -272,23 +264,28 @@ Brief unified insight:"""
|
|
| 272 |
insights.append(insight)
|
| 273 |
|
| 274 |
# Build unified perspective synthesis
|
| 275 |
-
synthesis = f"""🔀 **Multi-Perspective Integration**
|
| 276 |
|
| 277 |
-
**Unified
|
| 278 |
{', '.join(f"*{i}*" for i in insights)}
|
| 279 |
|
| 280 |
-
**
|
| 281 |
-
-
|
| 282 |
-
-
|
| 283 |
-
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 284 |
|
| 285 |
-
**
|
| 286 |
-
|
| 287 |
-
|
| 288 |
-
|
| 289 |
-
4. Deeper meaning-making (Philosophy/Consciousness)
|
| 290 |
|
| 291 |
-
**Codette's
|
|
|
|
| 292 |
|
| 293 |
return synthesis
|
| 294 |
|
|
|
|
| 32 |
from reasoning_forge.resonant_continuity import ResonantContinuityEngine
|
| 33 |
from reasoning_forge.epistemic_metrics import EpistemicMetrics
|
| 34 |
from reasoning_forge.quantum_spiderweb import QuantumSpiderweb
|
| 35 |
+
from reasoning_forge.forge_engine import ForgeEngine
|
| 36 |
+
from reasoning_forge.synthesis_engine import SynthesisEngine
|
| 37 |
|
| 38 |
# ================================================================
|
| 39 |
# ADAPTER COLORS & CONFIGURATION
|
|
|
|
| 83 |
print(f"Warning: Could not initialize InferenceClient: {e}")
|
| 84 |
HAS_LLM = False
|
| 85 |
|
| 86 |
+
# Initialize the reasoning forge with trained agents
|
| 87 |
+
try:
|
| 88 |
+
forge = ForgeEngine()
|
| 89 |
+
HAS_FORGE = True
|
| 90 |
+
except Exception as e:
|
| 91 |
+
print(f"Warning: Could not initialize ForgeEngine: {e}")
|
| 92 |
+
HAS_FORGE = False
|
| 93 |
+
|
| 94 |
|
| 95 |
# ================================================================
|
| 96 |
# UTILITY FUNCTIONS
|
|
|
|
| 123 |
|
| 124 |
|
| 125 |
def call_perspective(perspective_name: str, query: str, request: gr.Request = None) -> str:
|
| 126 |
+
"""Generate response using the trained agent or fallback algorithm."""
|
| 127 |
p = get_perspective(perspective_name)
|
| 128 |
if not p:
|
| 129 |
return f"Perspective {perspective_name} not found."
|
| 130 |
|
| 131 |
+
# Use actual trained agents from the forge
|
| 132 |
+
if HAS_FORGE:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
try:
|
| 134 |
+
# Map perspective names to forge agents
|
| 135 |
+
agent_map = {
|
| 136 |
+
"newton": forge.newton,
|
| 137 |
+
"davinci": forge.davinci,
|
| 138 |
+
"empathy": forge.empathy,
|
| 139 |
+
"philosophy": forge.philosophy,
|
| 140 |
+
"quantum": forge.quantum,
|
| 141 |
+
"consciousness": forge.ethics, # Ethics agent = consciousness perspective
|
| 142 |
+
}
|
| 143 |
+
|
| 144 |
+
agent = agent_map.get(perspective_name)
|
| 145 |
+
if agent:
|
| 146 |
+
# Call the trained agent's analyze method
|
| 147 |
+
response = agent.analyze(query)
|
| 148 |
+
if response and isinstance(response, str):
|
| 149 |
+
return response.strip()
|
| 150 |
except Exception as e:
|
| 151 |
+
print(f"Agent {perspective_name} analysis failed: {e}")
|
| 152 |
+
pass # Fall through
|
| 153 |
|
| 154 |
+
# Fallback: Use algorithmic reasoning
|
| 155 |
return generate_perspective_response(perspective_name, query, p)
|
| 156 |
|
| 157 |
|
|
|
|
| 230 |
|
| 231 |
|
| 232 |
def generate_synthesis(perspectives_responses: Dict[str, str], query: str, request: gr.Request = None) -> str:
|
| 233 |
+
"""Generate synthesis using trained synthesis engine or fallback algorithm."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 234 |
|
| 235 |
+
# Use the trained synthesis engine from the forge
|
| 236 |
+
if HAS_FORGE:
|
| 237 |
try:
|
| 238 |
+
# preparation: format responses for synthesis engine
|
| 239 |
+
synthesis_result = forge.synthesis.synthesize(
|
| 240 |
+
concept=query,
|
| 241 |
+
analyses=perspectives_responses
|
|
|
|
|
|
|
|
|
|
|
|
|
| 242 |
)
|
| 243 |
+
if synthesis_result and isinstance(synthesis_result, str):
|
| 244 |
+
return synthesis_result.strip()
|
| 245 |
except Exception as e:
|
| 246 |
+
print(f"Synthesis engine failed: {e}")
|
| 247 |
+
pass # Fall through to built-in synthesis
|
| 248 |
+
|
| 249 |
+
# Fallback: RC+xi algorithmic synthesis
|
| 250 |
+
return generate_algorithmic_synthesis(perspectives_responses, query)
|
| 251 |
|
| 252 |
+
|
| 253 |
+
def generate_algorithmic_synthesis(perspectives_responses: Dict[str, str], query: str) -> str:
|
| 254 |
+
"""Fallback algorithmic synthesis showcasing RC+xi framework."""
|
| 255 |
# Extract key phrases from each perspective response
|
| 256 |
insights = []
|
| 257 |
for name, response in perspectives_responses.items():
|
|
|
|
| 264 |
insights.append(insight)
|
| 265 |
|
| 266 |
# Build unified perspective synthesis
|
| 267 |
+
synthesis = f"""🔀 **Multi-Perspective Integration via RC+xi**
|
| 268 |
|
| 269 |
+
**Unified Analysis:** Your question "{query[:60]}..." engages {len(perspectives_responses)} reasoning perspectives:
|
| 270 |
{', '.join(f"*{i}*" for i in insights)}
|
| 271 |
|
| 272 |
+
**Recursive Convergence Protocol:**
|
| 273 |
+
- Each perspective recursively analyzes the query from its domain
|
| 274 |
+
- Perspectives converge toward common truths and diverge on unique insights
|
| 275 |
+
- Recursion depth: Full philosophical, analytical, creative, emotional exploration
|
| 276 |
+
|
| 277 |
+
**Epistemic Tension Management:**
|
| 278 |
+
- Productive disagreement between approaches (analytical vs. emotional, concrete vs. abstract)
|
| 279 |
+
- Tension resolved through synthesis, not elimination
|
| 280 |
+
- Coherence emerges from integrated contradictions
|
| 281 |
|
| 282 |
+
**Integration Metrics:**
|
| 283 |
+
- Multi-perspective coherence: {min(99, 60 + len(perspectives_responses) * 5)}%
|
| 284 |
+
- Epistemic richness: High
|
| 285 |
+
- Complementarity: All perspectives add novel value
|
|
|
|
| 286 |
|
| 287 |
+
**Codette's Unified Insight:**
|
| 288 |
+
The deepest understanding lives in the *space between* perspectives — where seemingly contradictory approaches become complementary lenses on a single truth. This is the RC+xi synthesis."""
|
| 289 |
|
| 290 |
return synthesis
|
| 291 |
|