Supernova25million / supernova /reasoning_engine.py
algorythmtechnologies's picture
Update supernova/reasoning_engine.py
4e8d206 verified
"""
Enhanced Reasoning Engine for Supernova AI
Provides sophisticated problem-solving capabilities through structured reasoning,
multi-tool coordination, and knowledge synthesis.
"""
import torch
import numpy as np
try:
import sympy as sp
except ImportError:
sp = None
import re
import json
from typing import List, Dict, Any, Optional, Tuple
from dataclasses import dataclass
from enum import Enum
from .tools import ToolOrchestrator, ToolCall
class ReasoningType(Enum):
ANALYTICAL = "analytical"
CREATIVE = "creative"
COMPARATIVE = "comparative"
CAUSAL = "causal"
SEQUENTIAL = "sequential"
EVALUATIVE = "evaluative"
@dataclass
class ReasoningStep:
step_number: int
description: str
reasoning_type: ReasoningType
tool_needed: Optional[str] = None
query: Optional[str] = None
result: Optional[str] = None
confidence: float = 0.8
@dataclass
class KnowledgeDomain:
domain: str
confidence: float
sources: List[str]
key_facts: List[str]
class EnhancedReasoningEngine:
"""Advanced reasoning engine that mimics sophisticated AI reasoning patterns."""
def __init__(self, tool_orchestrator: ToolOrchestrator):
self.tools = tool_orchestrator
self.conversation_context = []
self.domain_expertise = {
'science': ['physics', 'chemistry', 'biology', 'mathematics', 'astronomy'],
'technology': ['programming', 'ai', 'computing', 'engineering', 'electronics'],
'humanities': ['history', 'literature', 'philosophy', 'psychology', 'sociology'],
'medicine': ['anatomy', 'pharmacology', 'diagnosis', 'treatment', 'research'],
'business': ['finance', 'management', 'economics', 'marketing', 'strategy'],
'arts': ['music', 'visual arts', 'design', 'architecture', 'performance']
}
def analyze_query_complexity(self, query: str) -> Dict[str, Any]:
"""Analyze the complexity and requirements of a user query."""
complexity_indicators = {
'simple': ['what is', 'define', 'who is', 'when did'],
'moderate': ['how does', 'why does', 'explain', 'compare', 'analyze'],
'complex': ['evaluate', 'synthesize', 'create', 'design', 'solve for multiple', 'consider all factors']
}
domains_detected = []
for domain, keywords in self.domain_expertise.items():
if any(keyword in query.lower() for keyword in keywords):
domains_detected.append(domain)
complexity_level = 'simple'
for level, indicators in complexity_indicators.items():
if any(indicator in query.lower() for indicator in indicators):
complexity_level = level
requires_multi_step = any(phrase in query.lower() for phrase in [
'step by step', 'first...then', 'multiple', 'several', 'both', 'compare and contrast'
])
return {
'complexity': complexity_level,
'domains': domains_detected,
'multi_step_needed': requires_multi_step,
'estimated_steps': min(5, len(domains_detected) + (2 if requires_multi_step else 1))
}
def decompose_complex_query(self, query: str, analysis: Dict[str, Any]) -> List[ReasoningStep]:
"""Break down complex queries into manageable reasoning steps."""
steps = []
step_num = 1
# Step 1: Information Gathering
if analysis['complexity'] in ['moderate', 'complex']:
# Determine if we need current information
if any(term in query.lower() for term in ['current', 'latest', 'recent', 'today', '2024', '2025']):
steps.append(ReasoningStep(
step_number=step_num,
description="Gather current information from web sources",
reasoning_type=ReasoningType.ANALYTICAL,
tool_needed="serper",
query=query
))
step_num += 1
# Check if mathematical computation is needed
if any(term in query.lower() for term in ['calculate', 'compute', 'solve', 'derivative', 'integral']):
steps.append(ReasoningStep(
step_number=step_num,
description="Perform mathematical computation",
reasoning_type=ReasoningType.ANALYTICAL,
tool_needed="math_engine",
query=query
))
step_num += 1
# Step 2: Domain-specific analysis
for domain in analysis['domains']:
steps.append(ReasoningStep(
step_number=step_num,
description=f"Analyze from {domain} perspective",
reasoning_type=ReasoningType.ANALYTICAL,
tool_needed=None, # Will use model generation with domain context
query=f"From a {domain} perspective: {query}"
))
step_num += 1
# Step 3: Synthesis and evaluation
if analysis['complexity'] == 'complex':
steps.append(ReasoningStep(
step_number=step_num,
description="Synthesize information and provide comprehensive analysis",
reasoning_type=ReasoningType.EVALUATIVE,
tool_needed=None,
query=query
))
return steps if steps else [ReasoningStep(1, "Direct response", ReasoningType.ANALYTICAL, query=query)]
def execute_reasoning_chain(self, steps: List[ReasoningStep], model, tokenizer) -> List[ReasoningStep]:
"""Execute a chain of reasoning steps, using tools and model generation as needed."""
results = []
context_info = []
for step in steps:
if step.tool_needed:
# Use appropriate tool
tool_call = ToolCall(tool=step.tool_needed, query=step.query)
executed_call = self.tools.execute_tool_call(tool_call)
if executed_call.result:
step.result = executed_call.result
step.confidence = 0.9
context_info.append(f"{step.description}: {executed_call.result}")
else:
step.result = f"Tool execution failed: {executed_call.error}"
step.confidence = 0.3
else:
# Use model generation with enhanced context
enhanced_context = self._build_enhanced_context(step, context_info)
try:
response = self._generate_with_context(model, tokenizer, enhanced_context, step.query)
step.result = response
step.confidence = 0.7
context_info.append(f"{step.description}: {response}")
except Exception as e:
step.result = f"Generation failed: {str(e)}"
step.confidence = 0.2
results.append(step)
return results
def _build_enhanced_context(self, step: ReasoningStep, context_info: List[str]) -> str:
"""Build enhanced context for model generation."""
context_parts = [
"You are Supernova, an advanced AI assistant with deep expertise across multiple domains.",
"Apply sophisticated reasoning and provide comprehensive, nuanced responses.",
""
]
if context_info:
context_parts.extend([
"Previous analysis steps:",
*[f"- {info}" for info in context_info],
""
])
reasoning_guidance = {
ReasoningType.ANALYTICAL: "Analyze systematically, consider multiple factors, and provide evidence-based insights.",
ReasoningType.CREATIVE: "Think creatively, explore innovative solutions, and consider unconventional approaches.",
ReasoningType.COMPARATIVE: "Compare different perspectives, weigh pros and cons, and identify key differences.",
ReasoningType.CAUSAL: "Identify cause-and-effect relationships, trace underlying mechanisms, and explain why things happen.",
ReasoningType.SEQUENTIAL: "Break down into logical steps, show progression, and maintain clear sequencing.",
ReasoningType.EVALUATIVE: "Make judgments based on criteria, assess quality and effectiveness, and provide recommendations."
}
context_parts.extend([
f"Reasoning approach: {reasoning_guidance.get(step.reasoning_type, 'Provide thorough analysis.')}",
f"Focus area: {step.description}",
""
])
return "\n".join(context_parts)
def _generate_with_context(self, model, tokenizer, context: str, query: str, max_tokens: int = 400) -> str:
"""Generate response using the model with enhanced context."""
full_prompt = f"{context}\nUser Query: {query}\n\nDetailed Response:"
# Use the existing generate function (simplified version)
model.eval()
device = next(model.parameters()).device
input_ids = tokenizer.encode(full_prompt, return_tensors="pt").to(device)
with torch.no_grad():
for _ in range(max_tokens):
if input_ids.size(1) >= model.cfg.n_positions:
input_cond = input_ids[:, -model.cfg.n_positions:]
else:
input_cond = input_ids
logits, _ = model(input_cond)
logits = logits[:, -1, :] / 0.8 # temperature
# Top-k sampling
v, _ = torch.topk(logits, min(50, logits.size(-1)))
logits[logits < v[:, [-1]]] = -float("Inf")
probs = torch.softmax(logits, dim=-1)
next_id = torch.multinomial(probs, num_samples=1)
input_ids = torch.cat([input_ids, next_id], dim=1)
response = tokenizer.decode(input_ids[0].tolist())
# Extract the response part
if "Detailed Response:" in response:
response = response.split("Detailed Response:", 1)[1].strip()
return response
def synthesize_final_response(self, steps: List[ReasoningStep], original_query: str) -> str:
"""Synthesize all reasoning steps into a comprehensive final response."""
successful_steps = [step for step in steps if step.result and step.confidence > 0.5]
if not successful_steps:
return "I apologize, but I encountered difficulties processing your request. Could you please rephrase or provide more specific details?"
# Build comprehensive response
response_parts = []
# Add executive summary for complex queries
if len(successful_steps) > 2:
response_parts.append("Here's my comprehensive analysis:")
response_parts.append("")
# Include results from each step
for step in successful_steps:
if step.tool_needed in ['math_engine', 'serper']:
# Tool results are already well-formatted
response_parts.append(step.result)
else:
# Model-generated responses
response_parts.append(step.result)
response_parts.append("")
# Add synthesis for multi-step responses
if len(successful_steps) > 2:
confidence_score = sum(step.confidence for step in successful_steps) / len(successful_steps)
synthesis_parts = [
"**Key Insights:**",
"• Multiple perspectives have been considered",
f"• Analysis confidence: {confidence_score:.1%}",
"• Both current information and domain expertise were utilized"
]
response_parts.extend(synthesis_parts)
return "\n".join(response_parts).strip()
def process_complex_query(self, query: str, model, tokenizer) -> str:
"""Main method to process complex queries with enhanced reasoning."""
# Analyze query complexity and requirements
analysis = self.analyze_query_complexity(query)
# For simple queries, use direct processing
if analysis['complexity'] == 'simple' and not analysis['multi_step_needed']:
tool_call = self.tools.route_query(query)
if tool_call:
executed_call = self.tools.execute_tool_call(tool_call)
if executed_call.result:
return executed_call.result
# Fall back to enhanced model generation
context = self._build_enhanced_context(
ReasoningStep(1, "Direct response", ReasoningType.ANALYTICAL),
[]
)
return self._generate_with_context(model, tokenizer, context, query)
# For complex queries, use multi-step reasoning
reasoning_steps = self.decompose_complex_query(query, analysis)
executed_steps = self.execute_reasoning_chain(reasoning_steps, model, tokenizer)
return self.synthesize_final_response(executed_steps, query)
# Import torch and other needed modules here to avoid import issues
import torch
try:
import sympy as sp
import numpy as np
except ImportError:
pass