| import torch |
| import torch.nn as nn |
| from .memory import CognitiveMemory |
|
|
| class CognitiveNode(nn.Module): |
| """Differentiable cognitive node with dynamic plasticity""" |
| def __init__(self, node_id: int, input_size: int): |
| super().__init__() |
| self.id = node_id |
| self.input_size = input_size |
| self.activation = 0.0 |
| |
| |
| self.weights = nn.Parameter(torch.randn(input_size) * 0.1) |
| self.bias = nn.Parameter(torch.zeros(1)) |
| |
| |
| self.memory = CognitiveMemory(context_size=input_size) |
| |
| |
| self.dopamine = nn.Parameter(torch.tensor(0.5)) |
| self.serotonin = nn.Parameter(torch.tensor(0.5)) |
| |
| def forward(self, inputs: torch.Tensor) -> torch.Tensor: |
| |
| mem_context = self.memory.retrieve(inputs) |
| |
| |
| combined = inputs * 0.7 + mem_context * 0.3 |
| |
| |
| base_activation = torch.tanh(combined @ self.weights + self.bias) |
| modulated = base_activation * (1 + self.dopamine - self.serotonin) |
| |
| |
| self.memory.add_memory(inputs, modulated.item()) |
| |
| return modulated |
| |
| def update_plasticity(self, reward: float): |
| """Update neurotransmitter levels based on reward signal""" |
| self.dopamine.data = torch.sigmoid(self.dopamine + reward * 0.1) |
| self.serotonin.data = torch.sigmoid(self.serotonin - reward * 0.05) |