Felladrin's picture
Add the simulator files
17fd0c2 verified
/**
* Lightweight feedforward neural network for entity decision-making.
* Supports arbitrary topology, weight serialization, mutation, and
* activation visualization for the HUD neural activity overlay.
*/
export class NeuralNetwork {
/**
* @param {number[]} topology - Layer sizes, e.g. [12, 10, 6]
*/
constructor(topology) {
this.topology = topology;
this.weights = [];
this.biases = [];
this.activations = [];
this._initWeights();
}
/** Xavier-style initialization for stable gradient flow */
_initWeights() {
for (let i = 1; i < this.topology.length; i++) {
const fanIn = this.topology[i - 1];
const fanOut = this.topology[i];
const scale = Math.sqrt(2 / (fanIn + fanOut));
const layerW = new Array(fanOut);
const layerB = new Array(fanOut);
for (let j = 0; j < fanOut; j++) {
layerW[j] = new Array(fanIn);
for (let k = 0; k < fanIn; k++) {
layerW[j][k] = (Math.random() * 2 - 1) * scale;
}
layerB[j] = (Math.random() * 2 - 1) * 0.1;
}
this.weights.push(layerW);
this.biases.push(layerB);
}
}
/**
* Run a forward pass through the network.
* @param {number[]} inputs - Input activations
* @returns {number[]} Output activations
*/
forward(inputs) {
let current = inputs;
this.activations = [inputs.slice()];
for (let layer = 0; layer < this.weights.length; layer++) {
const w = this.weights[layer];
const b = this.biases[layer];
const numNeurons = w.length;
const next = new Array(numNeurons);
const isOutput = layer === this.weights.length - 1;
for (let j = 0; j < numNeurons; j++) {
let sum = b[j];
const wj = w[j];
for (let k = 0; k < current.length; k++) {
sum += wj[k] * current[k];
}
next[j] = Math.tanh(sum);
}
current = next;
this.activations.push(current.slice());
}
return current;
}
/** Count total trainable parameters */
get totalWeights() {
let count = 0;
for (let i = 0; i < this.weights.length; i++) {
count += this.weights[i].length * this.weights[i][0].length;
count += this.biases[i].length;
}
return count;
}
/** Flatten all weights + biases into a single array */
serialize() {
const flat = new Array(this.totalWeights);
let idx = 0;
for (let i = 0; i < this.weights.length; i++) {
const w = this.weights[i];
for (let j = 0; j < w.length; j++) {
for (let k = 0; k < w[j].length; k++) {
flat[idx++] = w[j][k];
}
}
const b = this.biases[i];
for (let j = 0; j < b.length; j++) {
flat[idx++] = b[j];
}
}
return flat;
}
/** Restore weights from a flat array */
deserialize(flat) {
let idx = 0;
for (let i = 0; i < this.weights.length; i++) {
const w = this.weights[i];
for (let j = 0; j < w.length; j++) {
for (let k = 0; k < w[j].length; k++) {
w[j][k] = flat[idx++];
}
}
const b = this.biases[i];
for (let j = 0; j < b.length; j++) {
b[j] = flat[idx++];
}
}
}
/** Create an exact copy of this network */
clone() {
const nn = new NeuralNetwork(this.topology.slice());
nn.deserialize(this.serialize());
return nn;
}
/**
* Apply Gaussian mutations to a fraction of weights.
* @param {number} rate - Probability of mutating each weight (0-1)
* @param {number} magnitude - Standard deviation of mutation noise
*/
mutate(rate, magnitude) {
for (let i = 0; i < this.weights.length; i++) {
const w = this.weights[i];
for (let j = 0; j < w.length; j++) {
for (let k = 0; k < w[j].length; k++) {
if (Math.random() < rate) {
w[j][k] += gaussianNoise() * magnitude;
}
}
}
const b = this.biases[i];
for (let j = 0; j < b.length; j++) {
if (Math.random() < rate) {
b[j] += gaussianNoise() * magnitude;
}
}
}
}
/**
* Uniform crossover between two networks of the same topology.
* @param {NeuralNetwork} other
* @returns {NeuralNetwork} child network
*/
crossover(other) {
const child = new NeuralNetwork(this.topology.slice());
const w1 = this.serialize();
const w2 = other.serialize();
const childW = new Array(w1.length);
const crossPoint = Math.floor(Math.random() * w1.length);
for (let i = 0; i < w1.length; i++) {
childW[i] = i < crossPoint ? w1[i] : w2[i];
}
child.deserialize(childW);
return child;
}
/**
* Get the maximum absolute activation across all layers.
* Useful for visualization intensity scaling.
*/
getMaxActivation() {
let max = 0;
for (const layer of this.activations) {
for (const v of layer) {
const abs = Math.abs(v);
if (abs > max) max = abs;
}
}
return max;
}
/**
* Render a mini neural network diagram to a canvas context.
* @param {CanvasRenderingContext2D} ctx
* @param {number} x - Top-left x
* @param {number} y - Top-left y
* @param {number} w - Width
* @param {number} h - Height
*/
renderMini(ctx, x, y, w, h) {
const layers = this.topology.length;
const layerSpacing = w / (layers - 1);
ctx.lineWidth = 0.5;
for (let l = 0; l < this.weights.length; l++) {
const fromCount = this.topology[l];
const toCount = this.topology[l + 1];
const fromX = x + l * layerSpacing;
const toX = x + (l + 1) * layerSpacing;
for (let j = 0; j < toCount; j++) {
const toY = y + (j + 0.5) * (h / toCount);
for (let k = 0; k < fromCount; k++) {
const fromY = y + (k + 0.5) * (h / fromCount);
const weight = this.weights[l][j][k];
const alpha = Math.min(Math.abs(weight) * 0.5, 0.6);
ctx.strokeStyle =
weight > 0
? `rgba(0, 240, 255, ${alpha})`
: `rgba(255, 51, 102, ${alpha})`;
ctx.beginPath();
ctx.moveTo(fromX, fromY);
ctx.lineTo(toX, toY);
ctx.stroke();
}
}
}
for (let l = 0; l < layers; l++) {
const count = this.topology[l];
const lx = x + l * layerSpacing;
const act = this.activations[l] || [];
for (let n = 0; n < count; n++) {
const ny = y + (n + 0.5) * (h / count);
const val = act[n] || 0;
const intensity = Math.abs(val);
const r = Math.max(2, 4 - layers * 0.3);
ctx.beginPath();
ctx.arc(lx, ny, r, 0, Math.PI * 2);
ctx.fillStyle =
val > 0
? `rgba(0, 240, 255, ${0.3 + intensity * 0.7})`
: `rgba(255, 51, 102, ${0.3 + intensity * 0.7})`;
ctx.fill();
}
}
}
}
/** Gaussian noise using Box-Muller transform */
function gaussianNoise() {
let u = 0,
v = 0;
while (u === 0) u = Math.random();
while (v === 0) v = Math.random();
return Math.sqrt(-2.0 * Math.log(u)) * Math.cos(2.0 * Math.PI * v);
}
/**
* QuantumDecisionLayer - probabilistic branching for elite entities.
* Samples from a probability distribution over actions rather than
* taking the deterministic argmax.
*/
export class QuantumDecisionLayer {
/**
* @param {number} temperature - Controls exploration vs exploitation.
* Higher = more random, lower = more greedy.
*/
constructor(temperature = 1.0) {
this.temperature = temperature;
}
/**
* Apply softmax with temperature to raw outputs and sample.
* @param {number[]} logits - Raw network outputs
* @returns {{ action: number, probabilities: number[] }}
*/
sample(logits) {
const t = this.temperature;
const maxLogit = Math.max(...logits);
const exps = logits.map((l) => Math.exp((l - maxLogit) / t));
const sum = exps.reduce((a, b) => a + b, 0);
const probs = exps.map((e) => e / sum);
const r = Math.random();
let cumulative = 0;
for (let i = 0; i < probs.length; i++) {
cumulative += probs[i];
if (r <= cumulative) {
return { action: i, probabilities: probs };
}
}
return { action: probs.length - 1, probabilities: probs };
}
}