mvi-ai-engine / core /response_engine.py
Musombi's picture
Update core/response_engine.py
b408081 verified
import torch
from typing import Optional, Dict
from reasoning.scraper import scrape_social_knowledge
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
class ResponseEngine:
"""
Cognitive response generator for MVI-AI.
Produces clean user-facing responses while accepting
full cognitive inputs from the AI core.
"""
def __init__(self, ltm=None, registry=None):
self.ltm = ltm
self.registry = registry
# ================= MEMORY =================
def _memory_reasoning(self, text: str):
if not self.ltm:
return []
try:
memories = self.ltm.retrieve_text(text, k=5)
except Exception:
memories = []
return memories if memories else []
# ================= MODEL INSIGHTS =================
def _registry_reasoning(self, registry_outputs):
if not registry_outputs:
return []
insights = []
for name, tensor in registry_outputs.items():
if not isinstance(tensor, torch.Tensor):
continue
try:
score = torch.mean(tensor).item()
# filter weak signals
if abs(score) > 0.01:
insights.append(
f"{name} suggests relevance {score:.3f}"
)
except Exception:
continue
return insights
# ================= SOCIAL KNOWLEDGE =================
def _social_learning(self, text: str):
try:
scraped = scrape_social_knowledge(text)
except Exception:
scraped = []
if not scraped:
return []
return scraped[:5]
# ================= RESPONSE GENERATION =================
def generate(
self,
text: str,
intent: Optional[str] = None,
emotion: Optional[str] = None,
model_outputs: Optional[Dict[str, torch.Tensor]] = None,
cognitive_state: Optional[torch.Tensor] = None, # <-- keep compatibility
system_prompt: Optional[str] = None
) -> str:
reasoning_blocks = []
# MEMORY
memory_knowledge = self._memory_reasoning(text)
reasoning_blocks.extend(memory_knowledge)
# MODEL REGISTRY
model_insights = self._registry_reasoning(model_outputs)
reasoning_blocks.extend(model_insights)
# SOCIAL FALLBACK
if len(reasoning_blocks) < 2:
reasoning_blocks.extend(self._social_learning(text))
# USER RESPONSE
response_parts = []
if system_prompt:
response_parts.append(system_prompt)
# Clean query start
response_parts.append(f"Answering: {text}")
if intent:
response_parts.append(f"(Intent: {intent})")
if emotion:
response_parts.append(f"(Detected tone: {emotion})")
for block in reasoning_blocks:
if isinstance(block, dict):
block_text = block.get("text", "")
source = block.get("source", "knowledge")
response_parts.append(f"{block_text}")
else:
response_parts.append(str(block))
return " ".join(response_parts)