| | |
| | import torch |
| | import torch.nn as nn |
| | from collections import deque |
| | from .memory import CognitiveMemory |
| |
|
| | class CognitiveNode(nn.Module): |
| | """Autonomous neural unit with neuromodulatory dynamics""" |
| | def __init__(self, node_id: int, input_size: int): |
| | super().__init__() |
| | self.id = node_id |
| | self.input_size = input_size |
| | |
| | |
| | self.weights = nn.Parameter(torch.randn(input_size) * 0.1) |
| | self.bias = nn.Parameter(torch.zeros(1)) |
| | self.memory = CognitiveMemory(context_size=input_size) |
| | |
| | |
| | self.dopamine = nn.Parameter(torch.tensor(0.5)) |
| | self.serotonin = nn.Parameter(torch.tensor(0.5)) |
| | |
| | |
| | self.recent_activations = deque(maxlen=100) |
| |
|
| | def forward(self, inputs: torch.Tensor) -> torch.Tensor: |
| | |
| | mem_context = self.memory.retrieve(inputs) |
| | combined = inputs * 0.7 + mem_context * 0.3 |
| | |
| | |
| | base_activation = torch.tanh(combined @ self.weights + self.bias) |
| | modulated = base_activation * (1 + torch.sigmoid(self.dopamine) |
| | - torch.sigmoid(self.serotonin)) |
| | |
| | |
| | self.memory.add_memory(inputs, modulated.item()) |
| | self.recent_activations.append(modulated.item()) |
| | |
| | return modulated |
| |
|
| | def update_plasticity(self, reward: float): |
| | """Adaptive neuromodulation based on performance""" |
| | with torch.no_grad(): |
| | self.dopamine += reward * 0.1 |
| | self.serotonin -= reward * 0.05 |
| | |
| | self.dopamine.clamp_(0, 1) |
| | self.serotonin.clamp_(0, 1) |