| import numpy as np |
|
|
| |
| def cost_function(y_true, y_pred): |
| return np.mean((y_true - y_pred) ** 2) |
|
|
| |
| def gradient_descent(X, y, learning_rate=0.01, epochs=1000): |
| m, n = X.shape |
| theta = np.zeros(n) |
| cost_history = [] |
|
|
| for epoch in range(epochs): |
| predictions = np.dot(X, theta) |
| errors = predictions - y |
| gradient = (1/m) * np.dot(X.T, errors) |
| theta -= learning_rate * gradient |
| cost = cost_function(y, predictions) |
| cost_history.append(cost) |
|
|
| return theta, cost_history |
|
|
| |
| X = 2 * np.random.rand(100, 1) |
| y = 4 + 3 * X + np.random.randn(100, 1) |
|
|
| |
| X_b = np.c_[np.ones((100, 1)), X] |
|
|
| |
| theta, cost_history = gradient_descent(X_b, y, learning_rate=0.1, epochs=1000) |
|
|
| print(f'Learned parameters: {theta}') |
| print(f'Cost history: {cost_history}')""" |
| matrix_world.py |
| |
| Matrix World — programmable laws, managed by "Ananthu Sajeev". |
| |
| Save as: matrix_world.py |
| Run: python matrix_world.py |
| |
| Author: Generated by ChatGPT (GPT-5 Thinking mini) |
| Date: 2025-10-27 |
| """ |
|
|
| import os |
| import json |
| import math |
| import random |
| from dataclasses import dataclass, field |
| from typing import Callable, Dict, Any, List, Tuple |
| import numpy as np |
|
|
| |
| try: |
| import matplotlib.pyplot as plt |
| HAS_MPL = True |
| except Exception: |
| HAS_MPL = False |
|
|
| |
| |
| |
| DEFAULT_GRID = 64 |
| OUT_DIR = "matrix_out" |
| os.makedirs(OUT_DIR, exist_ok=True) |
| RANDOM_SEED = 2025 |
| random.seed(RANDOM_SEED) |
| np.random.seed(RANDOM_SEED) |
|
|
| |
| |
| |
| @dataclass |
| class Agent: |
| id: int |
| y: int |
| x: int |
| energy: float |
| genome: np.ndarray = field(default_factory=lambda: np.array([])) |
| age: int = 0 |
| metadata: dict = field(default_factory=dict) |
|
|
| def to_dict(self): |
| return { |
| "id": self.id, |
| "y": int(self.y), |
| "x": int(self.x), |
| "energy": float(self.energy), |
| "age": int(self.age), |
| "genome": self.genome.tolist() if self.genome is not None else [], |
| "metadata": self.metadata, |
| } |
|
|
| @staticmethod |
| def from_dict(d): |
| return Agent(id=d["id"], y=d["y"], x=d["x"], energy=d["energy"], |
| genome=np.array(d.get("genome", [])), age=d.get("age", 0), metadata=d.get("metadata", {})) |
|
|
|
|
| |
| |
| |
| class LawEngine: |
| """ |
| Holds the world's laws. Each law is a callable that the World will call at specific hooks. |
| Manager (Ananthu Sajeev) can replace laws on the fly. |
| """ |
|
|
| def __init__(self): |
| |
| |
| self.laws: Dict[str, Callable] = { |
| |
| "resource_regeneration": self.default_resource_regeneration, |
| |
| "movement_cost": self.default_movement_cost, |
| |
| "reproduction_condition": self.default_reproduction_condition, |
| |
| "reproduction_effect": self.default_reproduction_effect, |
| |
| "mutate_genome": self.default_mutate_genome, |
| |
| "agent_behavior": self.default_agent_behavior, |
| |
| "aging": self.default_aging, |
| |
| "death_condition": self.default_death_condition, |
| |
| "environment_tick": self.default_environment_tick, |
| } |
| |
| self.params: Dict[str, Any] = { |
| "resource_regen_count": 20, |
| "movement_cost_base": 0.5, |
| "reproduce_energy_threshold": 40.0, |
| "reproduce_energy_cost": 20.0, |
| "mutation_rate": 0.05, |
| "mutation_strength": 0.2, |
| "max_energy": 100.0, |
| "max_age": 500, |
| "resource_energy": 7.0, |
| } |
|
|
| |
| def set_law(self, name: str, func: Callable): |
| if name not in self.laws: |
| raise KeyError(f"Unknown law: {name}") |
| self.laws[name] = func |
|
|
| def get_law(self, name: str) -> Callable: |
| return self.laws.get(name) |
|
|
| def set_param(self, name: str, value: Any): |
| self.params[name] = value |
|
|
| def get_param(self, name: str) -> Any: |
| return self.params.get(name) |
|
|
| |
| |
| |
| def default_resource_regeneration(self, world, params): |
| count = params.get("resource_regen_count", 20) |
| free = list(zip(*np.where(world.resources == 0))) |
| if not free: |
| return |
| picks = random.sample(free, min(count, len(free))) |
| for (y,x) in picks: |
| world.resources[y,x] = 1 |
|
|
| def default_movement_cost(self, agent: Agent, world, params): |
| return params.get("movement_cost_base", 0.5) |
|
|
| def default_reproduction_condition(self, agent: Agent, world, params): |
| return agent.energy >= params.get("reproduce_energy_threshold", 40.0) |
|
|
| def default_reproduction_effect(self, parent: Agent, child: Agent, world, params): |
| cost = params.get("reproduce_energy_cost", 20.0) |
| parent.energy -= cost |
| child.energy = parent.energy / 2.0 if parent.energy > 0 else 5.0 |
|
|
| def default_mutate_genome(self, genome: np.ndarray, world, params): |
| |
| if genome is None or genome.size == 0: |
| |
| size = params.get("genome_size", 8) |
| return (np.random.randn(size) * 0.5).astype(float) |
| mask = np.random.rand(genome.size) < params.get("mutation_rate", 0.05) |
| perturb = np.random.randn(genome.size) * params.get("mutation_strength", 0.2) |
| new = genome.copy() |
| new[mask] += perturb[mask] |
| return new |
|
|
| def default_agent_behavior(self, agent: Agent, world, params): |
| """ |
| Basic behavior: look for nearest resource within radius and move towards it; |
| otherwise random walk. Uses genome as simple bias vector if present. |
| Returns dy, dx in {-1,0,1} |
| """ |
| radius = params.get("sense_radius", 3) |
| sy, sx = world.find_nearest_resource(agent.y, agent.x, radius) |
| if sy is not None: |
| dy = int(math.copysign(1, sy - agent.y)) if sy != agent.y else 0 |
| dx = int(math.copysign(1, sx - agent.x)) if sx != agent.x else 0 |
| return dy, dx |
| |
| if agent.genome is not None and agent.genome.size >= 2: |
| g0 = math.tanh(agent.genome[0]) |
| g1 = math.tanh(agent.genome[1]) |
| r = random.random() |
| if r < 0.25 + 0.25 * g0: |
| return -1, 0 |
| elif r < 0.5 + 0.25 * g1: |
| return 1, 0 |
| elif r < 0.75: |
| return 0, -1 |
| else: |
| return 0, 1 |
| return random.choice([(-1,0),(1,0),(0,-1),(0,1),(0,0)]) |
|
|
| def default_aging(self, agent: Agent, world, params): |
| agent.age += 1 |
| |
| agent.energy -= 0.02 |
|
|
| def default_death_condition(self, agent: Agent, world, params): |
| if agent.energy <= 0: |
| return True |
| if agent.age > params.get("max_age", 500): |
| return True |
| return False |
|
|
| def default_environment_tick(self, world, params): |
| |
| return |
|
|
| |
| |
| |
| class MatrixWorld: |
| def __init__(self, manager_name: str, size: int = DEFAULT_GRID, seed: int = RANDOM_SEED): |
| self.manager = manager_name |
| self.size = size |
| self.resources = np.zeros((size, size), dtype=np.int32) |
| self.agents: List[Agent] = [] |
| self.next_agent_id = 1 |
| self.step_counter = 0 |
| self.log: List[dict] = [] |
| self.laws = LawEngine() |
| |
| self.spawn_resources(count=int(size * size * 0.05)) |
| random.seed(seed) |
| np.random.seed(seed) |
|
|
| |
| def spawn_resources(self, count: int): |
| free = list(zip(*np.where(self.resources == 0))) |
| picks = random.sample(free, min(len(free), count)) |
| for (y,x) in picks: |
| self.resources[y,x] = 1 |
|
|
| def add_agent(self, y: int, x: int, energy: float = 20.0, genome: np.ndarray = None, metadata: dict = None): |
| metadata = metadata or {} |
| if genome is None: |
| genome = self.laws.default_mutate_genome(None, self, self.laws.params) |
| agent = Agent(id=self.next_agent_id, y=y % self.size, x=x % self.size, energy=energy, genome=genome, metadata=metadata) |
| self.agents.append(agent) |
| self.next_agent_id += 1 |
| return agent |
|
|
| def find_nearest_resource(self, y: int, x: int, radius: int = 5): |
| |
| best = None |
| for r in range(1, radius+1): |
| for dy in range(-r, r+1): |
| dx = r - abs(dy) |
| for ddx in (-dx, dx) if dx != 0 else (0,): |
| yy = (y + dy) % self.size |
| xx = (x + ddx) % self.size |
| if self.resources[yy,xx] > 0: |
| return yy, xx |
| return None, None |
|
|
| |
| def set_law(self, law_name: str, func: Callable): |
| print(f"[Manager:{self.manager}] Setting law '{law_name}'") |
| self.laws.set_law(law_name, func) |
|
|
| def set_param(self, param_name: str, value: Any): |
| print(f"[Manager:{self.manager}] Setting param '{param_name}' = {value}") |
| self.laws.set_param(param_name, value) |
|
|
| def get_law(self, law_name: str): |
| return self.laws.get_law(law_name) |
|
|
| def run_step(self): |
| self.step_counter += 1 |
| |
| self.laws.laws["environment_tick"](self, self.laws.params) |
| |
| self.laws.laws["resource_regeneration"](self, self.laws.params) |
|
|
| random.shuffle(self.agents) |
| new_agents: List[Agent] = [] |
| dead_agents: List[Agent] = [] |
| for agent in list(self.agents): |
| |
| self.laws.laws["aging"](agent, self, self.laws.params) |
|
|
| |
| dy, dx = self.laws.laws["agent_behavior"](agent, self, self.laws.params) |
| |
| agent.y = (agent.y + dy) % self.size |
| agent.x = (agent.x + dx) % self.size |
|
|
| |
| cost = self.laws.laws["movement_cost"](agent, self, self.laws.params) |
| agent.energy -= cost |
|
|
| |
| if self.resources[agent.y, agent.x] > 0: |
| gain = self.laws.params.get("resource_energy", 7.0) |
| agent.energy += gain |
| self.resources[agent.y, agent.x] = 0 |
| agent.metadata.setdefault("food_eaten", 0) |
| agent.metadata["food_eaten"] += 1 |
|
|
| |
| cond = self.laws.laws["reproduction_condition"](agent, self, self.laws.params) |
| if cond: |
| |
| child_genome = self.laws.laws["mutate_genome"](agent.genome, self, self.laws.params) |
| child = Agent(id=self.next_agent_id, y=(agent.y+1)%self.size, x=(agent.x+1)%self.size, energy=0.0, genome=child_genome, metadata={"parent":agent.id}) |
| self.next_agent_id += 1 |
| self.laws.laws["reproduction_effect"](agent, child, self, self.laws.params) |
| new_agents.append(child) |
|
|
| |
| if self.laws.laws["death_condition"](agent, self, self.laws.params): |
| dead_agents.append(agent) |
|
|
| |
| for d in dead_agents: |
| if d in self.agents: |
| self.agents.remove(d) |
| self.agents.extend(new_agents) |
|
|
| |
| self.log.append({ |
| "step": self.step_counter, |
| "num_agents": len(self.agents), |
| "resources": int(self.resources.sum()), |
| "avg_energy": float(np.mean([a.energy for a in self.agents]) if self.agents else 0.0) |
| }) |
|
|
| def run_steps(self, n: int): |
| for i in range(n): |
| self.run_step() |
|
|
| def snapshot(self, path: str): |
| |
| data = { |
| "manager": self.manager, |
| "size": self.size, |
| "step": self.step_counter, |
| "resources": self.resources.tolist(), |
| "agents": [a.to_dict() for a in self.agents], |
| "laws_params": self.laws.params, |
| } |
| with open(path, "w") as f: |
| json.dump(data, f) |
| print(f"[Manager:{self.manager}] Snapshot saved to {path}") |
|
|
| def save_state(self, prefix: str = None): |
| prefix = prefix or os.path.join(OUT_DIR, f"matrix_state_step{self.step_counter}") |
| self.snapshot(prefix + ".json") |
| |
| if HAS_MPL: |
| fig_path = prefix + ".png" |
| self._save_visual(fig_path) |
| print(f"[Manager:{self.manager}] Visual saved to {fig_path}") |
|
|
| def load_state(self, path: str): |
| with open(path, "r") as f: |
| data = json.load(f) |
| self.manager = data.get("manager", self.manager) |
| self.size = data.get("size", self.size) |
| self.step_counter = data.get("step", 0) |
| self.resources = np.array(data.get("resources", self.resources.tolist())) |
| self.agents = [Agent.from_dict(ad) for ad in data.get("agents", [])] |
| self.next_agent_id = max([a.id for a in self.agents], default=0) + 1 |
| print(f"[Manager:{self.manager}] Loaded state from {path}") |
|
|
| def _save_visual(self, path: str): |
| if not HAS_MPL: |
| return |
| import matplotlib.pyplot as plt |
| fig, ax = plt.subplots(figsize=(6,6)) |
| ax.imshow(np.zeros((self.size,self.size)), cmap='gray', alpha=0.2) |
| ry, rx = np.where(self.resources > 0) |
| ax.scatter(rx, ry, s=6, marker='s', label='resources', alpha=0.9) |
| if self.agents: |
| ax.scatter([a.x for a in self.agents], [a.y for a in self.agents], s=18, c='red', alpha=0.8, label='agents') |
| ax.set_title(f"Matrix (step {self.step_counter}) managed by {self.manager}") |
| ax.set_xticks([]); ax.set_yticks([]) |
| plt.tight_layout() |
| fig.savefig(path, dpi=150) |
| plt.close(fig) |
|
|
| |
| |
| |
| def demo(): |
| print("Matrix World demo — manager: Ananthu Sajeev") |
| w = MatrixWorld(manager_name="Ananthu Sajeev", size=48) |
|
|
| |
| for i in range(12): |
| y = random.randrange(w.size) |
| x = random.randrange(w.size) |
| |
| genome = (np.random.randn(6) * 0.5).astype(float) |
| w.add_agent(y, x, energy=25.0, genome=genome) |
|
|
| |
| w.set_param("resource_regen_count", 40) |
| w.set_param("movement_cost_base", 0.2) |
| w.set_param("reproduce_energy_threshold", 30.0) |
| w.set_param("mutation_rate", 0.08) |
| w.set_param("mutation_strength", 0.15) |
| w.set_param("genome_size", 6) |
|
|
| |
| def seasons(world, params): |
| |
| if world.step_counter > 0 and world.step_counter % 100 == 0: |
| total = int(world.resources.sum()) |
| to_clear = int(total * 0.3) |
| if to_clear <= 0: return |
| cells = list(zip(*np.where(world.resources > 0))) |
| picks = random.sample(cells, min(len(cells), to_clear)) |
| for (y,x) in picks: |
| world.resources[y,x] = 0 |
| print(f"[Seasons] Winter at step {world.step_counter}: cleared {len(picks)} resources") |
|
|
| w.set_law("environment_tick", seasons) |
|
|
| |
| steps = 300 |
| for s in range(steps): |
| w.run_step() |
| if s % 50 == 0: |
| p = os.path.join(OUT_DIR, f"matrix_snapshot_step{s:04d}.json") |
| w.save_state(prefix=os.path.join(OUT_DIR, f"matrix_snapshot_step{s:04d}")) |
| if s % 30 == 0: |
| summary = w.log[-1] |
| print(f"Step {summary['step']}: agents={summary['num_agents']} resources={summary['resources']} avg_energy={summary['avg_energy']:.2f}") |
|
|
| |
| w.save_state(prefix=os.path.join(OUT_DIR, "matrix_final")) |
|
|
| print("Demo complete. Outputs (JSON, optional PNG) saved to:", OUT_DIR) |
|
|
| if __name__ == "__main__": |
| demo() |