Update app.py
Browse files
app.py
CHANGED
|
@@ -5,7 +5,7 @@ import matplotlib.pyplot as plt
|
|
| 5 |
from scipy.spatial import distance
|
| 6 |
from sklearn.cluster import KMeans
|
| 7 |
import networkx as nx
|
| 8 |
-
from collections import deque
|
| 9 |
from scipy.signal import convolve2d
|
| 10 |
|
| 11 |
# Constants
|
|
@@ -27,25 +27,61 @@ env_graph = nx.grid_2d_graph(GRID_SIZE, GRID_SIZE)
|
|
| 27 |
for obstacle in OBSTACLES:
|
| 28 |
env_graph.remove_node(obstacle)
|
| 29 |
|
| 30 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 31 |
class Ant:
|
| 32 |
-
def __init__(self, position, genome):
|
| 33 |
self.position = position
|
| 34 |
self.genome = genome
|
|
|
|
| 35 |
self.carrying_food = False
|
| 36 |
self.energy = 100
|
| 37 |
self.memory = deque(maxlen=20)
|
| 38 |
self.path_home = []
|
| 39 |
self.role = "explorer"
|
| 40 |
self.communication_range = 10
|
| 41 |
-
self.q_table = {}
|
|
|
|
|
|
|
| 42 |
|
| 43 |
def perceive_environment(self, pheromone_grid, ants):
|
| 44 |
self.food_pheromone = pheromone_grid[self.position[0], self.position[1], 0]
|
| 45 |
self.danger_pheromone = pheromone_grid[self.position[0], self.position[1], 1]
|
| 46 |
self.exploration_pheromone = pheromone_grid[self.position[0], self.position[1], 2]
|
| 47 |
|
| 48 |
-
# Perceive nearby ants
|
| 49 |
self.nearby_ants = [ant for ant in ants if distance.euclidean(self.position, ant.position) <= self.communication_range]
|
| 50 |
|
| 51 |
def act(self, pheromone_grid):
|
|
@@ -54,14 +90,26 @@ class Ant:
|
|
| 54 |
if random.random() < self.genome['exploration_rate']:
|
| 55 |
action = random.choice(possible_actions)
|
| 56 |
else:
|
| 57 |
-
|
| 58 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 59 |
|
| 60 |
reward = self.calculate_reward()
|
| 61 |
self.update_q_table(action, reward)
|
|
|
|
| 62 |
|
| 63 |
return action
|
| 64 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 65 |
def get_q_value(self, action):
|
| 66 |
return self.q_table.get((self.position, action), 0)
|
| 67 |
|
|
@@ -75,14 +123,20 @@ class Ant:
|
|
| 75 |
self.q_table[(self.position, action)] = new_q
|
| 76 |
|
| 77 |
def calculate_reward(self):
|
|
|
|
| 78 |
if self.carrying_food:
|
| 79 |
-
|
| 80 |
-
|
| 81 |
-
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
|
| 87 |
def get_possible_actions(self):
|
| 88 |
x, y = self.position
|
|
@@ -118,16 +172,11 @@ class Ant:
|
|
| 118 |
|
| 119 |
pheromone_grid[self.position[0], self.position[1], 2] += 1 # Exploration pheromone
|
| 120 |
|
| 121 |
-
self.memory.append(self.position)
|
|
|
|
|
|
|
|
|
|
| 122 |
|
| 123 |
-
# Update role based on situation
|
| 124 |
-
if self.carrying_food:
|
| 125 |
-
self.role = "carrier"
|
| 126 |
-
elif self.food_pheromone > 5:
|
| 127 |
-
self.role = "follower"
|
| 128 |
-
else:
|
| 129 |
-
self.role = "explorer"
|
| 130 |
-
|
| 131 |
# Path planning
|
| 132 |
if self.carrying_food and not self.path_home:
|
| 133 |
self.path_home = nx.shortest_path(env_graph, self.position, (0, 0))
|
|
@@ -160,9 +209,12 @@ def mutate(genome):
|
|
| 160 |
return genome
|
| 161 |
|
| 162 |
# Simulation Loop
|
| 163 |
-
def simulate(ants):
|
| 164 |
global pheromone_grid
|
| 165 |
food_collected = 0
|
|
|
|
|
|
|
|
|
|
| 166 |
for ant in ants:
|
| 167 |
if ant.update(pheromone_grid, ants):
|
| 168 |
if ant.position == (0, 0) and not ant.carrying_food:
|
|
@@ -171,28 +223,25 @@ def simulate(ants):
|
|
| 171 |
pheromone_grid *= (1 - PHEROMONE_DECAY_RATE)
|
| 172 |
diffuse_pheromones(pheromone_grid)
|
| 173 |
|
| 174 |
-
|
|
|
|
|
|
|
|
|
|
| 175 |
if len(ants) > MAX_ANTS:
|
| 176 |
-
ants.sort(key=lambda x: x.
|
| 177 |
survivors = ants[:MAX_ANTS//2]
|
| 178 |
new_ants = []
|
| 179 |
while len(new_ants) < MAX_ANTS//2:
|
| 180 |
parent1, parent2 = random.sample(survivors, 2)
|
| 181 |
child_genome = crossover(parent1.genome, parent2.genome)
|
| 182 |
child_genome = mutate(child_genome)
|
| 183 |
-
|
|
|
|
| 184 |
new_ants.append(new_ant)
|
| 185 |
ants = survivors + new_ants
|
| 186 |
|
| 187 |
return ants, food_collected
|
| 188 |
|
| 189 |
-
# Clustering for strategic analysis
|
| 190 |
-
def analyze_ant_clusters(ants):
|
| 191 |
-
positions = np.array([ant.position for ant in ants])
|
| 192 |
-
kmeans = KMeans(n_clusters=3)
|
| 193 |
-
kmeans.fit(positions)
|
| 194 |
-
return kmeans.cluster_centers_
|
| 195 |
-
|
| 196 |
# Visualization Functions
|
| 197 |
def plot_environment(pheromone_grid, ants, cluster_centers):
|
| 198 |
fig, ax = plt.subplots(figsize=(10, 10))
|
|
@@ -225,11 +274,13 @@ exploration_rate = st.sidebar.slider("Exploration Rate", 0.0, 1.0, 0.2)
|
|
| 225 |
learning_rate = st.sidebar.slider("Learning Rate", 0.0, 1.0, 0.1)
|
| 226 |
discount_factor = st.sidebar.slider("Discount Factor", 0.0, 1.0, 0.9)
|
| 227 |
|
| 228 |
-
# Initialize ants
|
|
|
|
| 229 |
ants = [Ant((random.randint(0, GRID_SIZE-1), random.randint(0, GRID_SIZE-1)),
|
| 230 |
{'exploration_rate': exploration_rate,
|
| 231 |
'learning_rate': learning_rate,
|
| 232 |
-
'discount_factor': discount_factor}
|
|
|
|
| 233 |
for _ in range(num_ants)]
|
| 234 |
|
| 235 |
# Simulation control
|
|
@@ -250,12 +301,12 @@ if start_simulation:
|
|
| 250 |
plot_placeholder = st.empty()
|
| 251 |
|
| 252 |
while not stop_simulation:
|
| 253 |
-
ants, food_collected = simulate(ants)
|
| 254 |
total_food_collected += food_collected
|
| 255 |
iterations += 1
|
| 256 |
|
| 257 |
if iterations % 10 == 0:
|
| 258 |
-
cluster_centers =
|
| 259 |
|
| 260 |
if iterations % 5 == 0:
|
| 261 |
progress_bar.progress(min(iterations / 1000, 1.0))
|
|
@@ -266,10 +317,12 @@ if start_simulation:
|
|
| 266 |
|
| 267 |
if reset_simulation:
|
| 268 |
pheromone_grid = np.zeros((GRID_SIZE, GRID_SIZE, 3))
|
|
|
|
| 269 |
ants = [Ant((random.randint(0, GRID_SIZE-1), random.randint(0, GRID_SIZE-1)),
|
| 270 |
{'exploration_rate': exploration_rate,
|
| 271 |
'learning_rate': learning_rate,
|
| 272 |
-
'discount_factor': discount_factor}
|
|
|
|
| 273 |
for _ in range(num_ants)]
|
| 274 |
total_food_collected = 0
|
| 275 |
iterations = 0
|
|
@@ -304,4 +357,51 @@ if iterations > 0:
|
|
| 304 |
fig, ax = plt.subplots(figsize=(10, 10))
|
| 305 |
pos = nx.spring_layout(G)
|
| 306 |
nx.draw(G, pos, with_labels=False, node_size=30, node_color='skyblue', edge_color='gray', ax=ax)
|
| 307 |
-
st.pyplot(fig)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
from scipy.spatial import distance
|
| 6 |
from sklearn.cluster import KMeans
|
| 7 |
import networkx as nx
|
| 8 |
+
from collections import deque, Counter
|
| 9 |
from scipy.signal import convolve2d
|
| 10 |
|
| 11 |
# Constants
|
|
|
|
| 27 |
for obstacle in OBSTACLES:
|
| 28 |
env_graph.remove_node(obstacle)
|
| 29 |
|
| 30 |
+
class HiveMind:
|
| 31 |
+
def __init__(self):
|
| 32 |
+
self.collective_memory = {}
|
| 33 |
+
self.global_strategy = {}
|
| 34 |
+
self.task_allocation = {}
|
| 35 |
+
self.pheromone_importance = {'food': 0.5, 'danger': 0.3, 'exploration': 0.2}
|
| 36 |
+
|
| 37 |
+
def update_collective_memory(self, ant_memories):
|
| 38 |
+
for memory in ant_memories:
|
| 39 |
+
for position, info in memory:
|
| 40 |
+
if position not in self.collective_memory:
|
| 41 |
+
self.collective_memory[position] = info
|
| 42 |
+
else:
|
| 43 |
+
self.collective_memory[position] = (self.collective_memory[position] + info) / 2
|
| 44 |
+
|
| 45 |
+
def update_global_strategy(self, ant_performances):
|
| 46 |
+
best_ants = sorted(ant_performances, key=lambda x: x[1], reverse=True)[:5]
|
| 47 |
+
self.global_strategy = {
|
| 48 |
+
'exploration_rate': np.mean([ant.genome['exploration_rate'] for ant, _ in best_ants]),
|
| 49 |
+
'learning_rate': np.mean([ant.genome['learning_rate'] for ant, _ in best_ants]),
|
| 50 |
+
'discount_factor': np.mean([ant.genome['discount_factor'] for ant, _ in best_ants])
|
| 51 |
+
}
|
| 52 |
+
|
| 53 |
+
def allocate_tasks(self, ants):
|
| 54 |
+
ant_positions = [ant.position for ant in ants]
|
| 55 |
+
clusters = KMeans(n_clusters=min(5, len(ants))).fit(ant_positions)
|
| 56 |
+
for i, ant in enumerate(ants):
|
| 57 |
+
cluster = clusters.labels_[i]
|
| 58 |
+
if cluster not in self.task_allocation:
|
| 59 |
+
self.task_allocation[cluster] = []
|
| 60 |
+
self.task_allocation[cluster].append(ant)
|
| 61 |
+
|
| 62 |
+
def get_swarm_decision(self, decisions):
|
| 63 |
+
return Counter(decisions).most_common(1)[0][0]
|
| 64 |
+
|
| 65 |
class Ant:
|
| 66 |
+
def __init__(self, position, genome, hivemind):
|
| 67 |
self.position = position
|
| 68 |
self.genome = genome
|
| 69 |
+
self.hivemind = hivemind
|
| 70 |
self.carrying_food = False
|
| 71 |
self.energy = 100
|
| 72 |
self.memory = deque(maxlen=20)
|
| 73 |
self.path_home = []
|
| 74 |
self.role = "explorer"
|
| 75 |
self.communication_range = 10
|
| 76 |
+
self.q_table = {}
|
| 77 |
+
self.performance = 0
|
| 78 |
+
self.cluster = None
|
| 79 |
|
| 80 |
def perceive_environment(self, pheromone_grid, ants):
|
| 81 |
self.food_pheromone = pheromone_grid[self.position[0], self.position[1], 0]
|
| 82 |
self.danger_pheromone = pheromone_grid[self.position[0], self.position[1], 1]
|
| 83 |
self.exploration_pheromone = pheromone_grid[self.position[0], self.position[1], 2]
|
| 84 |
|
|
|
|
| 85 |
self.nearby_ants = [ant for ant in ants if distance.euclidean(self.position, ant.position) <= self.communication_range]
|
| 86 |
|
| 87 |
def act(self, pheromone_grid):
|
|
|
|
| 90 |
if random.random() < self.genome['exploration_rate']:
|
| 91 |
action = random.choice(possible_actions)
|
| 92 |
else:
|
| 93 |
+
nearby_ants = [ant for ant in self.hivemind.task_allocation.get(self.cluster, [])
|
| 94 |
+
if distance.euclidean(self.position, ant.position) <= self.communication_range]
|
| 95 |
+
if nearby_ants:
|
| 96 |
+
swarm_decisions = [ant.decide(pheromone_grid) for ant in nearby_ants]
|
| 97 |
+
action = self.hivemind.get_swarm_decision(swarm_decisions)
|
| 98 |
+
else:
|
| 99 |
+
q_values = [self.get_q_value(action) for action in possible_actions]
|
| 100 |
+
action = possible_actions[np.argmax(q_values)]
|
| 101 |
|
| 102 |
reward = self.calculate_reward()
|
| 103 |
self.update_q_table(action, reward)
|
| 104 |
+
self.performance += reward
|
| 105 |
|
| 106 |
return action
|
| 107 |
|
| 108 |
+
def decide(self, pheromone_grid):
|
| 109 |
+
possible_actions = self.get_possible_actions()
|
| 110 |
+
q_values = [self.get_q_value(action) for action in possible_actions]
|
| 111 |
+
return possible_actions[np.argmax(q_values)]
|
| 112 |
+
|
| 113 |
def get_q_value(self, action):
|
| 114 |
return self.q_table.get((self.position, action), 0)
|
| 115 |
|
|
|
|
| 123 |
self.q_table[(self.position, action)] = new_q
|
| 124 |
|
| 125 |
def calculate_reward(self):
|
| 126 |
+
base_reward = -1 # Cost of living
|
| 127 |
if self.carrying_food:
|
| 128 |
+
base_reward += 10
|
| 129 |
+
if self.position in FOOD_SOURCES:
|
| 130 |
+
base_reward += 20
|
| 131 |
+
if self.position in OBSTACLES:
|
| 132 |
+
base_reward -= 10
|
| 133 |
+
|
| 134 |
+
pheromone_reward = (
|
| 135 |
+
self.hivemind.pheromone_importance['food'] * self.food_pheromone +
|
| 136 |
+
self.hivemind.pheromone_importance['danger'] * -self.danger_pheromone +
|
| 137 |
+
self.hivemind.pheromone_importance['exploration'] * self.exploration_pheromone
|
| 138 |
+
)
|
| 139 |
+
return base_reward + pheromone_reward
|
| 140 |
|
| 141 |
def get_possible_actions(self):
|
| 142 |
x, y = self.position
|
|
|
|
| 172 |
|
| 173 |
pheromone_grid[self.position[0], self.position[1], 2] += 1 # Exploration pheromone
|
| 174 |
|
| 175 |
+
self.memory.append((self.position, (self.food_pheromone, self.danger_pheromone, self.exploration_pheromone)))
|
| 176 |
+
|
| 177 |
+
# Update role based on cluster task
|
| 178 |
+
self.role = self.hivemind.task_allocation.get(self.cluster, ["explorer"])[0]
|
| 179 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 180 |
# Path planning
|
| 181 |
if self.carrying_food and not self.path_home:
|
| 182 |
self.path_home = nx.shortest_path(env_graph, self.position, (0, 0))
|
|
|
|
| 209 |
return genome
|
| 210 |
|
| 211 |
# Simulation Loop
|
| 212 |
+
def simulate(ants, hivemind):
|
| 213 |
global pheromone_grid
|
| 214 |
food_collected = 0
|
| 215 |
+
|
| 216 |
+
hivemind.allocate_tasks(ants)
|
| 217 |
+
|
| 218 |
for ant in ants:
|
| 219 |
if ant.update(pheromone_grid, ants):
|
| 220 |
if ant.position == (0, 0) and not ant.carrying_food:
|
|
|
|
| 223 |
pheromone_grid *= (1 - PHEROMONE_DECAY_RATE)
|
| 224 |
diffuse_pheromones(pheromone_grid)
|
| 225 |
|
| 226 |
+
hivemind.update_collective_memory([ant.memory for ant in ants])
|
| 227 |
+
hivemind.update_global_strategy([(ant, ant.performance) for ant in ants])
|
| 228 |
+
|
| 229 |
+
# Genetic Algorithm and Swarm Adaptation
|
| 230 |
if len(ants) > MAX_ANTS:
|
| 231 |
+
ants.sort(key=lambda x: x.performance, reverse=True)
|
| 232 |
survivors = ants[:MAX_ANTS//2]
|
| 233 |
new_ants = []
|
| 234 |
while len(new_ants) < MAX_ANTS//2:
|
| 235 |
parent1, parent2 = random.sample(survivors, 2)
|
| 236 |
child_genome = crossover(parent1.genome, parent2.genome)
|
| 237 |
child_genome = mutate(child_genome)
|
| 238 |
+
child_genome = {**child_genome, **hivemind.global_strategy} # Incorporate global strategy
|
| 239 |
+
new_ant = Ant((random.randint(0, GRID_SIZE-1), random.randint(0, GRID_SIZE-1)), child_genome, hivemind)
|
| 240 |
new_ants.append(new_ant)
|
| 241 |
ants = survivors + new_ants
|
| 242 |
|
| 243 |
return ants, food_collected
|
| 244 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 245 |
# Visualization Functions
|
| 246 |
def plot_environment(pheromone_grid, ants, cluster_centers):
|
| 247 |
fig, ax = plt.subplots(figsize=(10, 10))
|
|
|
|
| 274 |
learning_rate = st.sidebar.slider("Learning Rate", 0.0, 1.0, 0.1)
|
| 275 |
discount_factor = st.sidebar.slider("Discount Factor", 0.0, 1.0, 0.9)
|
| 276 |
|
| 277 |
+
# Initialize hivemind and ants
|
| 278 |
+
hivemind = HiveMind()
|
| 279 |
ants = [Ant((random.randint(0, GRID_SIZE-1), random.randint(0, GRID_SIZE-1)),
|
| 280 |
{'exploration_rate': exploration_rate,
|
| 281 |
'learning_rate': learning_rate,
|
| 282 |
+
'discount_factor': discount_factor},
|
| 283 |
+
hivemind)
|
| 284 |
for _ in range(num_ants)]
|
| 285 |
|
| 286 |
# Simulation control
|
|
|
|
| 301 |
plot_placeholder = st.empty()
|
| 302 |
|
| 303 |
while not stop_simulation:
|
| 304 |
+
ants, food_collected = simulate(ants, hivemind)
|
| 305 |
total_food_collected += food_collected
|
| 306 |
iterations += 1
|
| 307 |
|
| 308 |
if iterations % 10 == 0:
|
| 309 |
+
cluster_centers = hivemind.allocate_tasks(ants)
|
| 310 |
|
| 311 |
if iterations % 5 == 0:
|
| 312 |
progress_bar.progress(min(iterations / 1000, 1.0))
|
|
|
|
| 317 |
|
| 318 |
if reset_simulation:
|
| 319 |
pheromone_grid = np.zeros((GRID_SIZE, GRID_SIZE, 3))
|
| 320 |
+
hivemind = HiveMind()
|
| 321 |
ants = [Ant((random.randint(0, GRID_SIZE-1), random.randint(0, GRID_SIZE-1)),
|
| 322 |
{'exploration_rate': exploration_rate,
|
| 323 |
'learning_rate': learning_rate,
|
| 324 |
+
'discount_factor': discount_factor},
|
| 325 |
+
hivemind)
|
| 326 |
for _ in range(num_ants)]
|
| 327 |
total_food_collected = 0
|
| 328 |
iterations = 0
|
|
|
|
| 357 |
fig, ax = plt.subplots(figsize=(10, 10))
|
| 358 |
pos = nx.spring_layout(G)
|
| 359 |
nx.draw(G, pos, with_labels=False, node_size=30, node_color='skyblue', edge_color='gray', ax=ax)
|
| 360 |
+
st.pyplot(fig)
|
| 361 |
+
|
| 362 |
+
# Display hivemind collective memory heatmap
|
| 363 |
+
st.write("## Hivemind Collective Memory")
|
| 364 |
+
memory_grid = np.zeros((GRID_SIZE, GRID_SIZE))
|
| 365 |
+
for pos, info in hivemind.collective_memory.items():
|
| 366 |
+
memory_grid[pos[0], pos[1]] = np.mean(info)
|
| 367 |
+
|
| 368 |
+
fig, ax = plt.subplots(figsize=(10, 10))
|
| 369 |
+
heatmap = ax.imshow(memory_grid, cmap='viridis', interpolation='nearest')
|
| 370 |
+
plt.colorbar(heatmap)
|
| 371 |
+
st.pyplot(fig)
|
| 372 |
+
|
| 373 |
+
# Display global strategy evolution
|
| 374 |
+
st.write("## Global Strategy Evolution")
|
| 375 |
+
strategy_df = pd.DataFrame(hivemind.global_strategy, index=[0])
|
| 376 |
+
st.line_chart(strategy_df)
|
| 377 |
+
|
| 378 |
+
# Display performance distribution of ants
|
| 379 |
+
st.write("## Ant Performance Distribution")
|
| 380 |
+
performances = [ant.performance for ant in ants]
|
| 381 |
+
fig, ax = plt.subplots()
|
| 382 |
+
ax.hist(performances, bins=20)
|
| 383 |
+
ax.set_xlabel('Performance')
|
| 384 |
+
ax.set_ylabel('Number of Ants')
|
| 385 |
+
st.pyplot(fig)
|
| 386 |
+
|
| 387 |
+
# Display task allocation
|
| 388 |
+
st.write("## Task Allocation")
|
| 389 |
+
task_df = pd.DataFrame.from_dict(hivemind.task_allocation, orient='index')
|
| 390 |
+
task_df = task_df.applymap(lambda x: len(x) if x else 0)
|
| 391 |
+
st.bar_chart(task_df)
|
| 392 |
+
|
| 393 |
+
# Add some final notes about the simulation
|
| 394 |
+
st.write("""
|
| 395 |
+
## About this Simulation
|
| 396 |
+
|
| 397 |
+
This advanced ant hivemind simulation demonstrates several key concepts in swarm intelligence and collective behavior:
|
| 398 |
+
|
| 399 |
+
1. **Collective Decision Making**: Ants make decisions based on both individual and swarm intelligence.
|
| 400 |
+
2. **Adaptive Strategies**: The hivemind evolves its strategy based on the performance of the best ants.
|
| 401 |
+
3. **Distributed Task Allocation**: Ants are dynamically assigned to different tasks based on their location and the colony's needs.
|
| 402 |
+
4. **Emergent Behavior**: Complex colony-level behaviors emerge from simple individual ant rules.
|
| 403 |
+
5. **Information Sharing**: Ants share information through pheromones and direct communication.
|
| 404 |
+
6. **Collective Memory**: The hivemind maintains a collective memory of the environment.
|
| 405 |
+
|
| 406 |
+
This simulation showcases how simple agents, when working together with the right rules, can exhibit complex and intelligent behavior at the group level.
|
| 407 |
+
""")
|