Anoozh-Akileswaran commited on
Commit
a258060
·
1 Parent(s): 3d433ba

A2c version on HF

Browse files
Files changed (2) hide show
  1. A2C_combo/a2c_helpers.py +417 -0
  2. A2C_combo/a2c_main.py +348 -0
A2C_combo/a2c_helpers.py ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch as T
3
+ import torch.nn as nn
4
+ import torch.optim as optim
5
+ from torch.distributions import Categorical
6
+
7
+
8
+ class Agent:
9
+ def __init__(
10
+ self,
11
+ obs_space,
12
+ action_space,
13
+ hidden,
14
+ gamma,
15
+ lr,
16
+ value_coef,
17
+ entropy_coef,
18
+ seed,
19
+ lam
20
+
21
+ ):
22
+ EPSILON = 1e-8
23
+
24
+ # Initialize seed for reproducibility
25
+ if seed is not None:
26
+ np.random.seed(seed)
27
+ T.manual_seed(seed)
28
+
29
+ # Use GPU if available
30
+ self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
31
+ self.action_dim = int(getattr(action_space, "n", action_space))
32
+
33
+ # Initialize the policy and the critic networks
34
+ self.policy = Policy(obs_space.shape, self.action_dim, hidden).to(self.device)
35
+ self.critic = Critic(obs_space.shape, hidden).to(self.device)
36
+
37
+ # Set optimizer for policy and critic networks
38
+ self.opt = optim.Adam(
39
+ list(self.policy.parameters()) + list(self.critic.parameters()),
40
+ lr=lr
41
+ )
42
+
43
+ self.gamma = gamma
44
+ self.value_coef = value_coef
45
+ self.entropy_coef = entropy_coef
46
+ self.sigma_history = []
47
+ self.loss_history = []
48
+ self.policy_loss_history = []
49
+ self.value_loss_history = []
50
+ self.entropy_history = []
51
+ self.lam = lam
52
+ self.EPSILON = EPSILON
53
+ self.observeNorm = ObservationNorm()
54
+ self.advantageNorm = AdvantageNorm()
55
+ self.returnNorm = ReturnNorm()
56
+
57
+ self.memory = Memory()
58
+
59
+ # Function to choose action based on current policy
60
+ # Returns: action, log probabilitiy, value of the state
61
+ def choose_action(self, observation):
62
+ state = T.as_tensor(observation, dtype=T.float32, device=self.device)
63
+ with T.no_grad():
64
+ dist = self.policy.next_action(state)
65
+ action = dist.sample()
66
+ value = self.critic.evaluated_state(state)
67
+ return int(action.item()), float(value.item())
68
+
69
+ # Store reward, state, action in memory
70
+ def remember(self, state, action, reward, done, value, next_state):
71
+ with T.no_grad():
72
+ ns = T.as_tensor(next_state, dtype=T.float32, device=self.device)
73
+ next_value = self.critic.evaluated_state(ns).item()
74
+ self.memory.store(state, action, reward, done, value, next_value)
75
+
76
+ def _prepare_batch_data(self):
77
+ """Convert memory to tensors."""
78
+ states = T.as_tensor(np.array(self.memory.states), dtype=T.float32, device=self.device)
79
+ actions = T.as_tensor(self.memory.actions, dtype=T.long, device=self.device)
80
+ rewards = T.as_tensor(self.memory.rewards, dtype=T.float32, device=self.device)
81
+ dones = T.as_tensor(self.memory.dones, dtype=T.float32, device=self.device)
82
+ values = T.as_tensor(self.memory.values, dtype=T.float32, device=self.device)
83
+ return states, actions, rewards, dones, values
84
+
85
+ def _compute_gae(self, rewards, values, dones):
86
+ """Compute Generalized Advantage Estimation."""
87
+ with T.no_grad():
88
+ next_values = T.cat([values[1:], values[-1:].clone()])
89
+ deltas = rewards + self.gamma * next_values * (1 - dones) - values
90
+
91
+ adv = T.zeros_like(rewards)
92
+ gae = 0.0
93
+ for t in reversed(range(len(rewards))):
94
+ gae = deltas[t] + self.gamma * self.lam * (1 - dones[t]) * gae
95
+ adv[t] = gae
96
+
97
+ returns = adv + values
98
+ return adv, returns
99
+
100
+ def _compute_a2c_loss(self, states, actions, returns, advantages):
101
+ """Compute A2C loss components."""
102
+ dist = self.policy.next_action(states)
103
+ new_logp = dist.log_prob(actions)
104
+ entropy = dist.entropy().mean()
105
+
106
+ # Simple policy gradient (no clipping)
107
+ policy_loss = -(new_logp * advantages).mean()
108
+
109
+ # Critic loss
110
+ value_pred = self.critic.evaluated_state(states)
111
+ value_loss = 0.5 * (returns - value_pred).pow(2).mean()
112
+
113
+ # Total loss
114
+ total_loss = (
115
+ policy_loss +
116
+ self.value_coef * value_loss -
117
+ self.entropy_coef * entropy
118
+ )
119
+
120
+ return total_loss, policy_loss, value_loss
121
+
122
+ def _a2c_update(self, states, actions, returns, adv, use_grad_clip=False):
123
+ """Run single A2C update (no multiple epochs)."""
124
+ total_loss, policy_loss, value_loss = self._compute_a2c_loss(
125
+ states, actions, returns, adv
126
+ )
127
+
128
+ self.policy_loss_history.append(policy_loss.item())
129
+ self.value_loss_history.append(value_loss.item())
130
+
131
+ self.opt.zero_grad(set_to_none=True)
132
+ total_loss.backward()
133
+
134
+ if use_grad_clip:
135
+ T.nn.utils.clip_grad_norm_(
136
+ list(self.policy.parameters()) + list(self.critic.parameters()),
137
+ 0.5
138
+ )
139
+
140
+ self.opt.step()
141
+
142
+ return total_loss.item()
143
+
144
+ def vanilla_a2c_update(self):
145
+ if len(self.memory.states) == 0:
146
+ return 0.0
147
+
148
+ states, actions, rewards, dones, values = self._prepare_batch_data()
149
+ adv, returns = self._compute_gae(rewards, values, dones)
150
+
151
+ with T.no_grad():
152
+ adv = (adv - adv.mean()) / (adv.std(unbiased=False) + self.EPSILON)
153
+
154
+ avg_loss = self._a2c_update(states, actions, returns, adv) # changed
155
+ self.memory.clear()
156
+ return avg_loss
157
+
158
+ def update_rbs(self):
159
+ if len(self.memory.states) == 0:
160
+ return 0.0
161
+
162
+ states, actions, rewards, dones, values = self._prepare_batch_data()
163
+ adv, returns = self._compute_gae(rewards, values, dones)
164
+
165
+ with T.no_grad():
166
+ sigma_t = returns.std(unbiased=False) + 1e-8
167
+ returns = returns / sigma_t
168
+ self.sigma_history.append(sigma_t.item())
169
+ adv = adv / sigma_t
170
+ adv = (adv - adv.mean()) / (adv.std(unbiased=False) + 1e-8)
171
+
172
+ avg_loss = self._a2c_update(states, actions, returns, adv) # changed
173
+ self.memory.clear()
174
+ return avg_loss
175
+
176
+ def update_gradient_clipping(self):
177
+ if len(self.memory.states) == 0:
178
+ return 0.0
179
+
180
+ states, actions, rewards, dones, values = self._prepare_batch_data()
181
+ adv, returns = self._compute_gae(rewards, values, dones)
182
+
183
+ with T.no_grad():
184
+ adv = (adv - adv.mean()) / (adv.std(unbiased=False) + 1e-8)
185
+
186
+ avg_loss = self._a2c_update(states, actions, returns, adv, use_grad_clip=True) # changed
187
+ self.memory.clear()
188
+ return avg_loss
189
+
190
+ def update_obs_norm(self):
191
+ if len(self.memory.states) == 0:
192
+ return 0.0
193
+
194
+ states, actions, rewards, dones, values = self._prepare_batch_data()
195
+ adv, returns = self._compute_gae(rewards, values, dones)
196
+
197
+ with T.no_grad():
198
+ # --- observation normalization ---
199
+ states = self.observeNorm.normalize(states)
200
+ # Advantage normalization
201
+ adv = (adv - adv.mean()) / (adv.std(unbiased=False) + 1e-8)
202
+
203
+ avg_loss = self._a2c_update(states, actions, returns, adv)
204
+ self.memory.clear()
205
+ return avg_loss
206
+
207
+ def update_adv_norm(self):
208
+ if len(self.memory.states) == 0:
209
+ return 0.0
210
+
211
+ states, actions, rewards, dones, values = self._prepare_batch_data()
212
+ adv, returns = self._compute_gae(rewards, values, dones)
213
+
214
+ with T.no_grad():
215
+ # --- Advantage normalization ---
216
+ adv = self.advantageNorm.normalize(adv)
217
+
218
+ avg_loss = self._a2c_update(states, actions, returns, adv)
219
+ self.memory.clear()
220
+ return avg_loss
221
+
222
+ def update_return_norm(self):
223
+ if len(self.memory.states) == 0:
224
+ return 0.0
225
+
226
+ states = T.as_tensor(np.array(self.memory.states), dtype=T.float32, device=self.device)
227
+ actions = T.as_tensor(self.memory.actions, dtype=T.long, device=self.device)
228
+ rewards = T.as_tensor(self.memory.rewards, dtype=T.float32, device=self.device)
229
+ dones = T.as_tensor(self.memory.dones, dtype=T.float32, device=self.device)
230
+ values = T.as_tensor(self.memory.values, dtype=T.float32, device=self.device)
231
+
232
+ with T.no_grad():
233
+ next_values = T.cat([values[1:], values[-1:].clone()])
234
+ returns = rewards + self.gamma * next_values * (1 - dones)
235
+ adv = returns - values
236
+ returns = self.returnNorm.normalize(returns)
237
+ adv = (adv - adv.mean()) / (adv.std(unbiased=False) + 1e-8)
238
+
239
+ dist = self.policy.next_action(states)
240
+ log_probs = dist.log_prob(actions)
241
+ entropy = dist.entropy().mean()
242
+
243
+ policy_loss = -(log_probs * adv).mean()
244
+
245
+ value_pred = self.critic.evaluated_state(states)
246
+ value_loss = 0.5 * (returns - value_pred).pow(2).mean()
247
+
248
+ total_loss = (
249
+ policy_loss +
250
+ self.value_coef * value_loss -
251
+ self.entropy_coef * entropy
252
+ )
253
+
254
+ self.opt.zero_grad(set_to_none=True)
255
+ total_loss.backward()
256
+ self.opt.step()
257
+
258
+ avg_loss = self._a2c_update(states, actions, returns, adv)
259
+ self.memory.clear()
260
+ return avg_loss
261
+
262
+ def update_reward_norm(self):
263
+ if len(self.memory.states) == 0:
264
+ return 0.0
265
+
266
+ states = T.as_tensor(np.array(self.memory.states), dtype=T.float32, device=self.device)
267
+ actions = T.as_tensor(self.memory.actions, dtype=T.long, device=self.device)
268
+ rewards = T.as_tensor(self.memory.rewards, dtype=T.float32, device=self.device)
269
+ dones = T.as_tensor(self.memory.dones, dtype=T.float32, device=self.device)
270
+ values = T.as_tensor(self.memory.values, dtype=T.float32, device=self.device)
271
+
272
+ rewards = (rewards - rewards.mean()) / (rewards.std(unbiased=False) + 1e-8)
273
+
274
+ with T.no_grad():
275
+ next_values = T.cat([values[1:], values[-1:].clone()])
276
+
277
+ returns = rewards + self.gamma * next_values * (1 - dones)
278
+
279
+ adv = returns - values
280
+
281
+ adv = (adv - adv.mean()) / (adv.std(unbiased=False) + 1e-8)
282
+
283
+ dist = self.policy.next_action(states)
284
+ log_probs = dist.log_prob(actions)
285
+ entropy = dist.entropy().mean()
286
+
287
+ # Actor Loss: - log_prob * Advantage
288
+ policy_loss = -(log_probs * adv).mean()
289
+
290
+ value_pred = self.critic.evaluated_state(states)
291
+ value_loss = 0.5 * (returns - value_pred).pow(2).mean()
292
+
293
+ total_loss = (
294
+ policy_loss +
295
+ self.value_coef * value_loss -
296
+ self.entropy_coef * entropy
297
+ )
298
+
299
+ self.opt.zero_grad(set_to_none=True)
300
+ total_loss.backward()
301
+ self.opt.step()
302
+
303
+ avg_loss = self._a2c_update(states, actions, returns, adv)
304
+ self.memory.clear()
305
+ return avg_loss
306
+
307
+ # Policy network (CNN)
308
+ class Policy(nn.Module):
309
+ def __init__(self, obs_shape: tuple, action_dim: int, hidden: int):
310
+ super().__init__()
311
+ c, h, w = obs_shape
312
+ # Suggested architecture for Atari: https://arxiv.org/pdf/1312.5602
313
+ self.cnn = nn.Sequential(
314
+ nn.Conv2d(c, 16, kernel_size=8, stride=4),
315
+ nn.ReLU(),
316
+ nn.Conv2d(16, 32, kernel_size=4, stride=2),
317
+ nn.ReLU(),
318
+ nn.Flatten(),
319
+ nn.Linear(32 * 9 * 9, 256), # 2592 → 256
320
+ nn.ReLU(),
321
+ )
322
+
323
+ # Final output layer: one logit per action
324
+ self.net = nn.Linear(256, action_dim)
325
+
326
+ def next_action(self, state: T.Tensor) -> Categorical:
327
+ # state shape should be (B, C, H, W)
328
+ if state.dim() == 3:
329
+ state = state.unsqueeze(0)
330
+
331
+ cnn_out = self.cnn(state) # [B, 256]
332
+ logits = self.net(cnn_out) # [B, action_dim]
333
+ return Categorical(logits=logits)
334
+
335
+
336
+ # Critic network (CNN)
337
+ class Critic(nn.Module):
338
+ def __init__(self, obs_shape: tuple, hidden: int):
339
+ super().__init__()
340
+ c, h, w = obs_shape
341
+ # Suggested architecture for Atari: https://arxiv.org/pdf/1312.5602
342
+ self.cnn = nn.Sequential(
343
+ nn.Conv2d(c, 16, kernel_size=8, stride=4),
344
+ nn.ReLU(),
345
+ nn.Conv2d(16, 32, kernel_size=4, stride=2),
346
+ nn.ReLU(),
347
+ nn.Flatten()
348
+ )
349
+
350
+ with T.no_grad():
351
+ cnn_output_dim = self.cnn(T.zeros(1, c, h, w)).shape[1]
352
+
353
+ self.net = nn.Sequential(
354
+ nn.Linear(cnn_output_dim, hidden),
355
+ nn.ReLU(),
356
+ nn.Linear(hidden, 1)
357
+ )
358
+
359
+ def evaluated_state(self, x: T.Tensor) -> T.Tensor:
360
+ if x.dim() == 3:
361
+ x = x.unsqueeze(0)
362
+ cnn_out = self.cnn(x)
363
+ return self.net(cnn_out).squeeze(-1)
364
+
365
+
366
+ class Memory():
367
+ def __init__(self):
368
+ self.states = []
369
+ self.actions = []
370
+ self.rewards = []
371
+ self.dones = []
372
+ self.values = []
373
+ self.next_values = []
374
+
375
+ def store(self, state, action, reward, done, value, next_value):
376
+ self.states.append(np.asarray(state, dtype=np.float32))
377
+ self.actions.append(int(action))
378
+ self.rewards.append(float(reward))
379
+ self.dones.append(float(done))
380
+ self.values.append(float(value))
381
+ self.next_values.append(float(next_value))
382
+
383
+ def clear(self):
384
+ self.states = []
385
+ self.actions = []
386
+ self.rewards = []
387
+ self.dones = []
388
+ self.values = []
389
+ self.next_values = []
390
+
391
+
392
+ class ObservationNorm:
393
+
394
+ def normalize(self, x):
395
+ return (x - x.mean()) / (x.std(unbiased=False) + 1e-8) # We add epsilon to make sure that we don't
396
+ # divide through zero.
397
+
398
+
399
+ class AdvantageNorm:
400
+ '''
401
+ This class implements the Advantage Normalization. The purpose is to normalize either across batches or
402
+ only within the same batch.
403
+ '''
404
+
405
+ def normalize(self, x):
406
+ return (x - x.mean()) / (x.std(unbiased=False) + 1e-8) # We add epsilon to make sure that we don't
407
+ # divide through zero.
408
+
409
+
410
+ class ReturnNorm:
411
+ '''
412
+ This class implements the Return Normalization. The purpose is to normalize either across batches or
413
+ only within the same batch.
414
+ '''
415
+
416
+ def normalize(self, x):
417
+ return (x - x.mean()) / (x.std(unbiased=False) + 1e-8)
A2C_combo/a2c_main.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import gymnasium as gym
3
+ import sys
4
+ import matplotlib.pyplot as plt
5
+ import ale_py
6
+ from a2c_helpers import *
7
+ from gymnasium.spaces import Box
8
+ import cv2
9
+ import logging
10
+ import numpy as np
11
+ import pandas as pd
12
+
13
+ # Set up logging
14
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ # Preprocess environment
19
+ def preprocess(obs):
20
+ # Convert to grayscale
21
+ obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
22
+ # Resize
23
+ obs = cv2.resize(obs, (84, 84), interpolation=cv2.INTER_AREA)
24
+
25
+ return np.expand_dims(obs, axis=0).astype(np.float32) / 255.0
26
+
27
+
28
+ def df_ops(lst_df, seeds):
29
+ for df in lst_df:
30
+ seed_data = df[seeds]
31
+ df['Avg'] = seed_data.mean(axis=1)
32
+ df['High'] = seed_data.max(axis=1)
33
+ df['Low'] = seed_data.min(axis=1)
34
+
35
+ return lst_df
36
+
37
+
38
+ # Main loop
39
+ def main() -> int:
40
+ # Initialize variables
41
+ batches = 1000
42
+ steps = 5
43
+ clip_interval = 2
44
+ seeds = [10, 20, 30, 40, 50]
45
+ ep_per_batch = 5
46
+
47
+ # batches = 5
48
+ # steps = 5
49
+ # clip_interval = 2
50
+ # seeds = [10, 20]
51
+ # ep_per_batch = 2
52
+ # Arguments
53
+ """
54
+ usage examples:
55
+ python3 a2c_main.py --method vanilla
56
+
57
+ python3 a2c_main.py --method grad_clip
58
+
59
+ python3 a2c_main.py --method rbs
60
+ """
61
+ parser = argparse.ArgumentParser(description='A2C Training')
62
+
63
+ parser.add_argument('--method', type=str, choices=['vanilla', 'reward_clip', 'rbs', 'grad_clip',
64
+ 'obs_norm', 'adv_norm', 'return_norm', 'reward_norm'],
65
+ default='vanilla', help='A2C update method')
66
+ parser.add_argument('--env', type=str, default='ALE/Pacman-v5',
67
+ help='Gym environment name (e.g., ALE/Pacman-v5, ALE/SpaceInvaders-v5)')
68
+ parser.add_argument('--render', action='store_true', help='Enable rendering')
69
+ parser.add_argument('--clip_window', type=int, default=clip_interval,
70
+ help='Number of batches to collect rewards for clipping range update')
71
+
72
+ args = parser.parse_args()
73
+
74
+ # Set up environment
75
+ if args.render:
76
+ env = gym.make(args.env, render_mode="human")
77
+ else:
78
+ env = gym.make(args.env)
79
+
80
+ logger.info(f"Observation space: {env.observation_space}")
81
+ logger.info(f"Action space: {env.action_space}")
82
+ logger.info(f'Method: {args.method}')
83
+
84
+ # Initialize CNN with a dummy observation to get correct input shape
85
+ obs, _ = env.reset()
86
+ dummy_obs_space = Box(low=0.0, high=1.0, shape=preprocess(obs).shape)
87
+
88
+ # Initialize A2C agent
89
+ agent = Agent(obs_space=dummy_obs_space, action_space=env.action_space,
90
+ hidden=64, lr=0.00001, gamma=0.997,
91
+ entropy_coef=0.01, value_coef=0.5, seed=70,lam=0.95)
92
+
93
+ # === Return-Based Scaling stats (for RBS method) ===
94
+ r_mean, r_var = 0.0, 1e-8
95
+ g2_mean = 1.0
96
+ agent.r_var = r_var
97
+ agent.g2_mean = g2_mean
98
+
99
+ all_reward_histories = pd.DataFrame(columns=[i for i in seeds], index=[i for i in range(1, batches + 1)])
100
+ all_loss_histories = pd.DataFrame(columns=[i for i in seeds], index=[i for i in range(1, batches + 1)])
101
+ all_policy_loss = pd.DataFrame(columns=[i for i in seeds])
102
+ all_value_loss = pd.DataFrame(columns=[i for i in seeds])
103
+
104
+ step = 0
105
+ # Main update loop
106
+ try:
107
+ for seed in seeds:
108
+ obs, info = env.reset(seed=seed)
109
+ state = preprocess(obs)
110
+
111
+ loss_history = []
112
+ reward_history = []
113
+ episode = 0
114
+ total_return = 0
115
+
116
+ """ Update loop: Gradient, Reward Normalization """
117
+ if args.method == 'reward_clip':
118
+ alpha = np.random.uniform(1, 2)
119
+ logger.info(f"α sampled = {alpha:.3f} seed = {seed}")
120
+
121
+ clip_low, clip_high = None, None
122
+ ep_reward_history = []
123
+
124
+ obs, info = env.reset()
125
+ state = preprocess(obs)
126
+
127
+ for update in range(1, batches + 1):
128
+
129
+ batch_episode_returns = [] # used for μ, σ
130
+
131
+ for _ in range(ep_per_batch):
132
+ ep_rewards = []
133
+ done = False
134
+
135
+ while not done:
136
+ action, value = agent.choose_action(state)
137
+ next_obs, reward, terminated, truncated, info = env.step(action)
138
+ done = terminated or truncated
139
+ next_state = preprocess(next_obs)
140
+
141
+ ep_rewards.append(reward)
142
+
143
+ agent.remember(state, action, reward, done, value, next_state)
144
+
145
+ state = next_state
146
+
147
+ if done:
148
+ ep_return = sum(ep_rewards)
149
+ if clip_low is not None:
150
+ clipped_return = np.clip(ep_return, clip_low, clip_high)
151
+ else:
152
+ clipped_return = ep_return
153
+ ep_reward_history.append(clipped_return)
154
+ batch_episode_returns.append(clipped_return)
155
+
156
+ episode += 1
157
+ total_return += clipped_return
158
+
159
+ logger.info(f"Episode {episode} return: {clipped_return:.2f}")
160
+
161
+ obs, info = env.reset()
162
+ state = preprocess(obs)
163
+
164
+ # === Compute clipping bounds using Code 1 logic ===
165
+ mu = np.mean(batch_episode_returns)
166
+ sigma = np.std(batch_episode_returns) + 1e-8 if np.std(batch_episode_returns)!=0 else 1
167
+
168
+ clip_low = mu - alpha * sigma
169
+ clip_high = mu + alpha * sigma
170
+
171
+ logger.info(
172
+ f"[UPDATE {update}] New Reward Clip Range: "
173
+ f"[{clip_low:.4f}, {clip_high:.4f}]"
174
+ )
175
+
176
+ # === A2C UPDATE ===
177
+ avg_loss = agent.vanilla_a2c_update()
178
+ loss_history.append(avg_loss)
179
+
180
+ avg_ret = np.mean(batch_episode_returns)
181
+ reward_history.append(avg_ret)
182
+
183
+ logger.info(
184
+ f"Update {update}: batch_mean={avg_ret:.4f}, "
185
+ f"batch_std={np.std(batch_episode_returns):.4f}, "
186
+ f"episodes={episode}, avg_loss={avg_loss:.4f}"
187
+ )
188
+
189
+ """ Update loop: Other Normalization Methods """
190
+ else:
191
+ for update in range(1, batches + 1):
192
+ batch_episode_rewards = []
193
+ ep_per_batch = 5
194
+
195
+ for _ in range(ep_per_batch):
196
+ ep_rewards = []
197
+
198
+ done = False
199
+
200
+ while not done:
201
+ action, value = agent.choose_action(state)
202
+ next_obs, reward, terminated, truncated, info = env.step(action)
203
+ done = terminated or truncated
204
+ next_state = preprocess(next_obs)
205
+
206
+ ep_rewards.append(reward)
207
+ agent.remember(state, action, reward, done, value, next_state)
208
+
209
+ state = next_state
210
+
211
+ if done:
212
+ ep_return = sum(ep_rewards)
213
+ episode += 1
214
+ total_return += ep_return
215
+ batch_episode_rewards.append(ep_return)
216
+ logger.info(f"Episode {episode} return: {ep_return:.2f}")
217
+
218
+ obs, info = env.reset()
219
+ state = preprocess(obs)
220
+
221
+ # Choose normalization method
222
+ if args.method == 'vanilla':
223
+ avg_loss = agent.vanilla_a2c_update()
224
+ elif args.method == 'grad_clip':
225
+ avg_loss = agent.update_gradient_clipping()
226
+ elif args.method == 'obs_norm':
227
+ avg_loss = agent.update_obs_norm()
228
+ elif args.method == 'adv_norm':
229
+ avg_loss = agent.update_adv_norm()
230
+ elif args.method == 'reward_norm':
231
+ avg_loss = agent.update_reward_norm()
232
+ else: # rbs
233
+ avg_loss = agent.update_rbs()
234
+
235
+ loss_history.append(avg_loss)
236
+
237
+ avg_ret = (total_return / episode) if episode else 0
238
+ reward_history.append(avg_ret)
239
+ logger.info(
240
+ f"Update {update}: episodes={episode}, avg_return={avg_ret:.2f}, avg_loss={avg_loss:.4f}")
241
+
242
+ all_reward_histories[seed] = reward_history
243
+ all_loss_histories[seed] = loss_history
244
+ all_value_loss[seed] = agent.value_loss_history[step:step+batches]
245
+ all_policy_loss[seed] = agent.policy_loss_history[step:step+batches]
246
+
247
+ step += batches
248
+
249
+ [all_reward_histories, all_loss_histories, all_value_loss, all_policy_loss] = df_ops([all_reward_histories,
250
+ all_loss_histories,
251
+ all_value_loss,
252
+ all_policy_loss],
253
+ seeds)
254
+ # [all_reward_histories, all_loss_histories] = df_ops([all_reward_histories, all_loss_histories], seeds)
255
+
256
+ # all_policy_loss.to_csv(args.method + '_policy_loss.csv')
257
+ # all_value_loss.to_csv(args.method + '_value_loss.csv')
258
+ # all_reward_histories.to_csv(args.method + '_a2c_reward_history.csv')
259
+ # all_loss_histories.to_csv(args.method + '_a2c_loss_history.csv')
260
+
261
+ fig = plt.figure(figsize=(15, 10))
262
+
263
+ # --- Subplot 1: Average PPO Loss ---
264
+ ax2 = plt.subplot(221)
265
+ # Plot the shaded High-Low Range
266
+ ax2.fill_between(
267
+ all_loss_histories.index,
268
+ all_loss_histories['Low'],
269
+ all_loss_histories['High'],
270
+ color='#A8DADC', # Light blue for aesthetic shading
271
+ alpha=0.5,
272
+ label="High-Low Range"
273
+ )
274
+ # Plot the Average Line
275
+ ax2.plot(all_loss_histories['Avg'], label="Avg Loss", color='#1D3557', linewidth=2)
276
+ ax2.set_ylabel("Average PPO Loss")
277
+ ax2.set_xlabel("PPO Update")
278
+ ax2.legend()
279
+
280
+ # --- Subplot 2: Reward ---
281
+ ax3 = plt.subplot(222)
282
+ # Plot the shaded High-Low Range
283
+ ax3.fill_between(
284
+ all_reward_histories.index,
285
+ all_reward_histories['Low'],
286
+ all_reward_histories['High'],
287
+ color='#FEDCC8', # Light orange/peach
288
+ alpha=0.5,
289
+ label="High-Low Range"
290
+ )
291
+ # Plot the Average Line
292
+ ax3.plot(all_reward_histories['Avg'], label="Avg Reward", color='#E63946', linewidth=2)
293
+ ax3.set_ylabel("Average Reward")
294
+ ax3.set_xlabel("PPO Update")
295
+ ax3.legend()
296
+
297
+ # --- Subplot 3: Policy Loss ---
298
+ ax4 = plt.subplot(223)
299
+ # Plot the shaded High-Low Range
300
+ ax4.fill_between(
301
+ all_policy_loss.index,
302
+ all_policy_loss['Low'],
303
+ all_policy_loss['High'],
304
+ color='#B0E0A0', # Light green
305
+ alpha=0.5,
306
+ label="High-Low Range"
307
+ )
308
+ # Plot the Average Line
309
+ ax4.plot(all_policy_loss['Avg'], label="Policy Loss", color='#38B000', linewidth=2)
310
+ ax4.set_ylabel("Average Policy Loss")
311
+ ax4.set_xlabel("PPO Update")
312
+ ax4.legend()
313
+
314
+ # --- Subplot 4: Value Loss ---
315
+ ax5 = plt.subplot(224)
316
+ # Plot the shaded High-Low Range
317
+ ax5.fill_between(
318
+ all_value_loss.index,
319
+ all_value_loss['Low'],
320
+ all_value_loss['High'],
321
+ color='#D7BDE2', # Light purple
322
+ alpha=0.5,
323
+ label="High-Low Range"
324
+ )
325
+ # Plot the Average Line
326
+ ax5.plot(all_value_loss['Avg'], label="Value Loss", color='#8E44AD', linewidth=2)
327
+ ax5.set_ylabel("Average Value Loss")
328
+ ax5.set_xlabel("PPO Update")
329
+ ax5.legend()
330
+
331
+ # --- Figure Settings ---
332
+ fig.suptitle(f"PPO Training Stability - {args.method}", fontsize=16, fontweight='bold')
333
+ # fig.tight_layout() # Adjust layout to make room for suptitle
334
+ plt.show()
335
+
336
+ except Exception as e:
337
+ logger.error(f"Error: {e}", exc_info=True)
338
+ return 1
339
+ finally:
340
+ avg = total_return / episode if episode else 0
341
+ logger.info(f"\nEpisodes: {episode}, Avg return: {avg:.3f}")
342
+ env.close()
343
+
344
+ return 0
345
+
346
+
347
+ if __name__ == "__main__":
348
+ raise SystemExit(main())