File size: 3,279 Bytes
09fe520
65eeb27
b81d759
5129ff1
def3d6a
3c74bc2
def3d6a
b81d759
09fe520
6f24429
b81d759
b408081
 
6f24429
 
b81d759
6f24429
b81d759
 
d9760a9
b81d759
b408081
b81d759
 
 
d9760a9
 
 
 
b81d759
d9760a9
b81d759
d9760a9
b81d759
b408081
b81d759
 
 
 
 
 
b408081
b81d759
 
b408081
d9760a9
 
b408081
 
 
 
 
 
 
d9760a9
 
b81d759
 
 
d9760a9
b81d759
b408081
d9760a9
 
 
 
b81d759
b408081
 
 
 
def3d6a
d9760a9
6f24429
 
 
 
 
 
b408081
b81d759
6f24429
3c74bc2
b81d759
 
b408081
 
 
b81d759
b408081
 
 
b81d759
b408081
b81d759
d9760a9
b81d759
b408081
b81d759
 
 
 
 
b408081
 
b81d759
 
b408081
b81d759
 
b408081
d9760a9
b81d759
b408081
65eeb27
b408081
65eeb27
b27a7ae
b408081
 
 
65eeb27
 
b81d759
d9760a9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import torch
from typing import Optional, Dict

from reasoning.scraper import scrape_social_knowledge

DEVICE = "cuda" if torch.cuda.is_available() else "cpu"


class ResponseEngine:
    """
    Cognitive response generator for MVI-AI.
    Produces clean user-facing responses while accepting
    full cognitive inputs from the AI core.
    """

    def __init__(self, ltm=None, registry=None):
        self.ltm = ltm
        self.registry = registry

    # ================= MEMORY =================
    def _memory_reasoning(self, text: str):

        if not self.ltm:
            return []

        try:
            memories = self.ltm.retrieve_text(text, k=5)
        except Exception:
            memories = []

        return memories if memories else []

    # ================= MODEL INSIGHTS =================
    def _registry_reasoning(self, registry_outputs):

        if not registry_outputs:
            return []

        insights = []

        for name, tensor in registry_outputs.items():

            if not isinstance(tensor, torch.Tensor):
                continue

            try:
                score = torch.mean(tensor).item()

                # filter weak signals
                if abs(score) > 0.01:
                    insights.append(
                        f"{name} suggests relevance {score:.3f}"
                    )

            except Exception:
                continue

        return insights

    # ================= SOCIAL KNOWLEDGE =================
    def _social_learning(self, text: str):

        try:
            scraped = scrape_social_knowledge(text)
        except Exception:
            scraped = []

        if not scraped:
            return []

        return scraped[:5]

    # ================= RESPONSE GENERATION =================
    def generate(
        self,
        text: str,
        intent: Optional[str] = None,
        emotion: Optional[str] = None,
        model_outputs: Optional[Dict[str, torch.Tensor]] = None,
        cognitive_state: Optional[torch.Tensor] = None,  # <-- keep compatibility
        system_prompt: Optional[str] = None
    ) -> str:

        reasoning_blocks = []

        # MEMORY
        memory_knowledge = self._memory_reasoning(text)
        reasoning_blocks.extend(memory_knowledge)

        # MODEL REGISTRY
        model_insights = self._registry_reasoning(model_outputs)
        reasoning_blocks.extend(model_insights)

        # SOCIAL FALLBACK
        if len(reasoning_blocks) < 2:
            reasoning_blocks.extend(self._social_learning(text))

        # USER RESPONSE
        response_parts = []

        if system_prompt:
            response_parts.append(system_prompt)

        # Clean query start
        response_parts.append(f"Answering: {text}")

        if intent:
            response_parts.append(f"(Intent: {intent})")

        if emotion:
            response_parts.append(f"(Detected tone: {emotion})")

        for block in reasoning_blocks:

            if isinstance(block, dict):

                block_text = block.get("text", "")
                source = block.get("source", "knowledge")

                response_parts.append(f"{block_text}")

            else:
                response_parts.append(str(block))

        return " ".join(response_parts)