ShuvamGanguli commited on
Commit
3d433ba
·
verified ·
1 Parent(s): fc2ab64

Upload 4 files

Browse files

Uploading final 4 files to run for graphs.

Files changed (4) hide show
  1. a2c_helpers.py +417 -0
  2. a2c_main.py +348 -0
  3. ppo_helpers_cnn.py +673 -0
  4. ppo_main.py +383 -0
a2c_helpers.py ADDED
@@ -0,0 +1,417 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch as T
3
+ import torch.nn as nn
4
+ import torch.optim as optim
5
+ from torch.distributions import Categorical
6
+
7
+
8
+ class Agent:
9
+ def __init__(
10
+ self,
11
+ obs_space,
12
+ action_space,
13
+ hidden,
14
+ gamma,
15
+ lr,
16
+ value_coef,
17
+ entropy_coef,
18
+ seed,
19
+ lam
20
+
21
+ ):
22
+ EPSILON = 1e-8
23
+
24
+ # Initialize seed for reproducibility
25
+ if seed is not None:
26
+ np.random.seed(seed)
27
+ T.manual_seed(seed)
28
+
29
+ # Use GPU if available
30
+ self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
31
+ self.action_dim = int(getattr(action_space, "n", action_space))
32
+
33
+ # Initialize the policy and the critic networks
34
+ self.policy = Policy(obs_space.shape, self.action_dim, hidden).to(self.device)
35
+ self.critic = Critic(obs_space.shape, hidden).to(self.device)
36
+
37
+ # Set optimizer for policy and critic networks
38
+ self.opt = optim.Adam(
39
+ list(self.policy.parameters()) + list(self.critic.parameters()),
40
+ lr=lr
41
+ )
42
+
43
+ self.gamma = gamma
44
+ self.value_coef = value_coef
45
+ self.entropy_coef = entropy_coef
46
+ self.sigma_history = []
47
+ self.loss_history = []
48
+ self.policy_loss_history = []
49
+ self.value_loss_history = []
50
+ self.entropy_history = []
51
+ self.lam = lam
52
+ self.EPSILON = EPSILON
53
+ self.observeNorm = ObservationNorm()
54
+ self.advantageNorm = AdvantageNorm()
55
+ self.returnNorm = ReturnNorm()
56
+
57
+ self.memory = Memory()
58
+
59
+ # Function to choose action based on current policy
60
+ # Returns: action, log probabilitiy, value of the state
61
+ def choose_action(self, observation):
62
+ state = T.as_tensor(observation, dtype=T.float32, device=self.device)
63
+ with T.no_grad():
64
+ dist = self.policy.next_action(state)
65
+ action = dist.sample()
66
+ value = self.critic.evaluated_state(state)
67
+ return int(action.item()), float(value.item())
68
+
69
+ # Store reward, state, action in memory
70
+ def remember(self, state, action, reward, done, value, next_state):
71
+ with T.no_grad():
72
+ ns = T.as_tensor(next_state, dtype=T.float32, device=self.device)
73
+ next_value = self.critic.evaluated_state(ns).item()
74
+ self.memory.store(state, action, reward, done, value, next_value)
75
+
76
+ def _prepare_batch_data(self):
77
+ """Convert memory to tensors."""
78
+ states = T.as_tensor(np.array(self.memory.states), dtype=T.float32, device=self.device)
79
+ actions = T.as_tensor(self.memory.actions, dtype=T.long, device=self.device)
80
+ rewards = T.as_tensor(self.memory.rewards, dtype=T.float32, device=self.device)
81
+ dones = T.as_tensor(self.memory.dones, dtype=T.float32, device=self.device)
82
+ values = T.as_tensor(self.memory.values, dtype=T.float32, device=self.device)
83
+ return states, actions, rewards, dones, values
84
+
85
+ def _compute_gae(self, rewards, values, dones):
86
+ """Compute Generalized Advantage Estimation."""
87
+ with T.no_grad():
88
+ next_values = T.cat([values[1:], values[-1:].clone()])
89
+ deltas = rewards + self.gamma * next_values * (1 - dones) - values
90
+
91
+ adv = T.zeros_like(rewards)
92
+ gae = 0.0
93
+ for t in reversed(range(len(rewards))):
94
+ gae = deltas[t] + self.gamma * self.lam * (1 - dones[t]) * gae
95
+ adv[t] = gae
96
+
97
+ returns = adv + values
98
+ return adv, returns
99
+
100
+ def _compute_a2c_loss(self, states, actions, returns, advantages):
101
+ """Compute A2C loss components."""
102
+ dist = self.policy.next_action(states)
103
+ new_logp = dist.log_prob(actions)
104
+ entropy = dist.entropy().mean()
105
+
106
+ # Simple policy gradient (no clipping)
107
+ policy_loss = -(new_logp * advantages).mean()
108
+
109
+ # Critic loss
110
+ value_pred = self.critic.evaluated_state(states)
111
+ value_loss = 0.5 * (returns - value_pred).pow(2).mean()
112
+
113
+ # Total loss
114
+ total_loss = (
115
+ policy_loss +
116
+ self.value_coef * value_loss -
117
+ self.entropy_coef * entropy
118
+ )
119
+
120
+ return total_loss, policy_loss, value_loss
121
+
122
+ def _a2c_update(self, states, actions, returns, adv, use_grad_clip=False):
123
+ """Run single A2C update (no multiple epochs)."""
124
+ total_loss, policy_loss, value_loss = self._compute_a2c_loss(
125
+ states, actions, returns, adv
126
+ )
127
+
128
+ self.policy_loss_history.append(policy_loss.item())
129
+ self.value_loss_history.append(value_loss.item())
130
+
131
+ self.opt.zero_grad(set_to_none=True)
132
+ total_loss.backward()
133
+
134
+ if use_grad_clip:
135
+ T.nn.utils.clip_grad_norm_(
136
+ list(self.policy.parameters()) + list(self.critic.parameters()),
137
+ 0.5
138
+ )
139
+
140
+ self.opt.step()
141
+
142
+ return total_loss.item()
143
+
144
+ def vanilla_a2c_update(self):
145
+ if len(self.memory.states) == 0:
146
+ return 0.0
147
+
148
+ states, actions, rewards, dones, values = self._prepare_batch_data()
149
+ adv, returns = self._compute_gae(rewards, values, dones)
150
+
151
+ with T.no_grad():
152
+ adv = (adv - adv.mean()) / (adv.std(unbiased=False) + self.EPSILON)
153
+
154
+ avg_loss = self._a2c_update(states, actions, returns, adv) # changed
155
+ self.memory.clear()
156
+ return avg_loss
157
+
158
+ def update_rbs(self):
159
+ if len(self.memory.states) == 0:
160
+ return 0.0
161
+
162
+ states, actions, rewards, dones, values = self._prepare_batch_data()
163
+ adv, returns = self._compute_gae(rewards, values, dones)
164
+
165
+ with T.no_grad():
166
+ sigma_t = returns.std(unbiased=False) + 1e-8
167
+ returns = returns / sigma_t
168
+ self.sigma_history.append(sigma_t.item())
169
+ adv = adv / sigma_t
170
+ adv = (adv - adv.mean()) / (adv.std(unbiased=False) + 1e-8)
171
+
172
+ avg_loss = self._a2c_update(states, actions, returns, adv) # changed
173
+ self.memory.clear()
174
+ return avg_loss
175
+
176
+ def update_gradient_clipping(self):
177
+ if len(self.memory.states) == 0:
178
+ return 0.0
179
+
180
+ states, actions, rewards, dones, values = self._prepare_batch_data()
181
+ adv, returns = self._compute_gae(rewards, values, dones)
182
+
183
+ with T.no_grad():
184
+ adv = (adv - adv.mean()) / (adv.std(unbiased=False) + 1e-8)
185
+
186
+ avg_loss = self._a2c_update(states, actions, returns, adv, use_grad_clip=True) # changed
187
+ self.memory.clear()
188
+ return avg_loss
189
+
190
+ def update_obs_norm(self):
191
+ if len(self.memory.states) == 0:
192
+ return 0.0
193
+
194
+ states, actions, rewards, dones, values = self._prepare_batch_data()
195
+ adv, returns = self._compute_gae(rewards, values, dones)
196
+
197
+ with T.no_grad():
198
+ # --- observation normalization ---
199
+ states = self.observeNorm.normalize(states)
200
+ # Advantage normalization
201
+ adv = (adv - adv.mean()) / (adv.std(unbiased=False) + 1e-8)
202
+
203
+ avg_loss = self._a2c_update(states, actions, returns, adv)
204
+ self.memory.clear()
205
+ return avg_loss
206
+
207
+ def update_adv_norm(self):
208
+ if len(self.memory.states) == 0:
209
+ return 0.0
210
+
211
+ states, actions, rewards, dones, values = self._prepare_batch_data()
212
+ adv, returns = self._compute_gae(rewards, values, dones)
213
+
214
+ with T.no_grad():
215
+ # --- Advantage normalization ---
216
+ adv = self.advantageNorm.normalize(adv)
217
+
218
+ avg_loss = self._a2c_update(states, actions, returns, adv)
219
+ self.memory.clear()
220
+ return avg_loss
221
+
222
+ def update_return_norm(self):
223
+ if len(self.memory.states) == 0:
224
+ return 0.0
225
+
226
+ states = T.as_tensor(np.array(self.memory.states), dtype=T.float32, device=self.device)
227
+ actions = T.as_tensor(self.memory.actions, dtype=T.long, device=self.device)
228
+ rewards = T.as_tensor(self.memory.rewards, dtype=T.float32, device=self.device)
229
+ dones = T.as_tensor(self.memory.dones, dtype=T.float32, device=self.device)
230
+ values = T.as_tensor(self.memory.values, dtype=T.float32, device=self.device)
231
+
232
+ with T.no_grad():
233
+ next_values = T.cat([values[1:], values[-1:].clone()])
234
+ returns = rewards + self.gamma * next_values * (1 - dones)
235
+ adv = returns - values
236
+ returns = self.returnNorm.normalize(returns)
237
+ adv = (adv - adv.mean()) / (adv.std(unbiased=False) + 1e-8)
238
+
239
+ dist = self.policy.next_action(states)
240
+ log_probs = dist.log_prob(actions)
241
+ entropy = dist.entropy().mean()
242
+
243
+ policy_loss = -(log_probs * adv).mean()
244
+
245
+ value_pred = self.critic.evaluated_state(states)
246
+ value_loss = 0.5 * (returns - value_pred).pow(2).mean()
247
+
248
+ total_loss = (
249
+ policy_loss +
250
+ self.value_coef * value_loss -
251
+ self.entropy_coef * entropy
252
+ )
253
+
254
+ self.opt.zero_grad(set_to_none=True)
255
+ total_loss.backward()
256
+ self.opt.step()
257
+
258
+ avg_loss = self._a2c_update(states, actions, returns, adv)
259
+ self.memory.clear()
260
+ return avg_loss
261
+
262
+ def update_reward_norm(self):
263
+ if len(self.memory.states) == 0:
264
+ return 0.0
265
+
266
+ states = T.as_tensor(np.array(self.memory.states), dtype=T.float32, device=self.device)
267
+ actions = T.as_tensor(self.memory.actions, dtype=T.long, device=self.device)
268
+ rewards = T.as_tensor(self.memory.rewards, dtype=T.float32, device=self.device)
269
+ dones = T.as_tensor(self.memory.dones, dtype=T.float32, device=self.device)
270
+ values = T.as_tensor(self.memory.values, dtype=T.float32, device=self.device)
271
+
272
+ rewards = (rewards - rewards.mean()) / (rewards.std(unbiased=False) + 1e-8)
273
+
274
+ with T.no_grad():
275
+ next_values = T.cat([values[1:], values[-1:].clone()])
276
+
277
+ returns = rewards + self.gamma * next_values * (1 - dones)
278
+
279
+ adv = returns - values
280
+
281
+ adv = (adv - adv.mean()) / (adv.std(unbiased=False) + 1e-8)
282
+
283
+ dist = self.policy.next_action(states)
284
+ log_probs = dist.log_prob(actions)
285
+ entropy = dist.entropy().mean()
286
+
287
+ # Actor Loss: - log_prob * Advantage
288
+ policy_loss = -(log_probs * adv).mean()
289
+
290
+ value_pred = self.critic.evaluated_state(states)
291
+ value_loss = 0.5 * (returns - value_pred).pow(2).mean()
292
+
293
+ total_loss = (
294
+ policy_loss +
295
+ self.value_coef * value_loss -
296
+ self.entropy_coef * entropy
297
+ )
298
+
299
+ self.opt.zero_grad(set_to_none=True)
300
+ total_loss.backward()
301
+ self.opt.step()
302
+
303
+ avg_loss = self._a2c_update(states, actions, returns, adv)
304
+ self.memory.clear()
305
+ return avg_loss
306
+
307
+ # Policy network (CNN)
308
+ class Policy(nn.Module):
309
+ def __init__(self, obs_shape: tuple, action_dim: int, hidden: int):
310
+ super().__init__()
311
+ c, h, w = obs_shape
312
+ # Suggested architecture for Atari: https://arxiv.org/pdf/1312.5602
313
+ self.cnn = nn.Sequential(
314
+ nn.Conv2d(c, 16, kernel_size=8, stride=4),
315
+ nn.ReLU(),
316
+ nn.Conv2d(16, 32, kernel_size=4, stride=2),
317
+ nn.ReLU(),
318
+ nn.Flatten(),
319
+ nn.Linear(32 * 9 * 9, 256), # 2592 → 256
320
+ nn.ReLU(),
321
+ )
322
+
323
+ # Final output layer: one logit per action
324
+ self.net = nn.Linear(256, action_dim)
325
+
326
+ def next_action(self, state: T.Tensor) -> Categorical:
327
+ # state shape should be (B, C, H, W)
328
+ if state.dim() == 3:
329
+ state = state.unsqueeze(0)
330
+
331
+ cnn_out = self.cnn(state) # [B, 256]
332
+ logits = self.net(cnn_out) # [B, action_dim]
333
+ return Categorical(logits=logits)
334
+
335
+
336
+ # Critic network (CNN)
337
+ class Critic(nn.Module):
338
+ def __init__(self, obs_shape: tuple, hidden: int):
339
+ super().__init__()
340
+ c, h, w = obs_shape
341
+ # Suggested architecture for Atari: https://arxiv.org/pdf/1312.5602
342
+ self.cnn = nn.Sequential(
343
+ nn.Conv2d(c, 16, kernel_size=8, stride=4),
344
+ nn.ReLU(),
345
+ nn.Conv2d(16, 32, kernel_size=4, stride=2),
346
+ nn.ReLU(),
347
+ nn.Flatten()
348
+ )
349
+
350
+ with T.no_grad():
351
+ cnn_output_dim = self.cnn(T.zeros(1, c, h, w)).shape[1]
352
+
353
+ self.net = nn.Sequential(
354
+ nn.Linear(cnn_output_dim, hidden),
355
+ nn.ReLU(),
356
+ nn.Linear(hidden, 1)
357
+ )
358
+
359
+ def evaluated_state(self, x: T.Tensor) -> T.Tensor:
360
+ if x.dim() == 3:
361
+ x = x.unsqueeze(0)
362
+ cnn_out = self.cnn(x)
363
+ return self.net(cnn_out).squeeze(-1)
364
+
365
+
366
+ class Memory():
367
+ def __init__(self):
368
+ self.states = []
369
+ self.actions = []
370
+ self.rewards = []
371
+ self.dones = []
372
+ self.values = []
373
+ self.next_values = []
374
+
375
+ def store(self, state, action, reward, done, value, next_value):
376
+ self.states.append(np.asarray(state, dtype=np.float32))
377
+ self.actions.append(int(action))
378
+ self.rewards.append(float(reward))
379
+ self.dones.append(float(done))
380
+ self.values.append(float(value))
381
+ self.next_values.append(float(next_value))
382
+
383
+ def clear(self):
384
+ self.states = []
385
+ self.actions = []
386
+ self.rewards = []
387
+ self.dones = []
388
+ self.values = []
389
+ self.next_values = []
390
+
391
+
392
+ class ObservationNorm:
393
+
394
+ def normalize(self, x):
395
+ return (x - x.mean()) / (x.std(unbiased=False) + 1e-8) # We add epsilon to make sure that we don't
396
+ # divide through zero.
397
+
398
+
399
+ class AdvantageNorm:
400
+ '''
401
+ This class implements the Advantage Normalization. The purpose is to normalize either across batches or
402
+ only within the same batch.
403
+ '''
404
+
405
+ def normalize(self, x):
406
+ return (x - x.mean()) / (x.std(unbiased=False) + 1e-8) # We add epsilon to make sure that we don't
407
+ # divide through zero.
408
+
409
+
410
+ class ReturnNorm:
411
+ '''
412
+ This class implements the Return Normalization. The purpose is to normalize either across batches or
413
+ only within the same batch.
414
+ '''
415
+
416
+ def normalize(self, x):
417
+ return (x - x.mean()) / (x.std(unbiased=False) + 1e-8)
a2c_main.py ADDED
@@ -0,0 +1,348 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import gymnasium as gym
3
+ import sys
4
+ import matplotlib.pyplot as plt
5
+ import ale_py
6
+ from a2c_helpers import *
7
+ from gymnasium.spaces import Box
8
+ import cv2
9
+ import logging
10
+ import numpy as np
11
+ import pandas as pd
12
+
13
+ # Set up logging
14
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ # Preprocess environment
19
+ def preprocess(obs):
20
+ # Convert to grayscale
21
+ obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
22
+ # Resize
23
+ obs = cv2.resize(obs, (84, 84), interpolation=cv2.INTER_AREA)
24
+
25
+ return np.expand_dims(obs, axis=0).astype(np.float32) / 255.0
26
+
27
+
28
+ def df_ops(lst_df, seeds):
29
+ for df in lst_df:
30
+ seed_data = df[seeds]
31
+ df['Avg'] = seed_data.mean(axis=1)
32
+ df['High'] = seed_data.max(axis=1)
33
+ df['Low'] = seed_data.min(axis=1)
34
+
35
+ return lst_df
36
+
37
+
38
+ # Main loop
39
+ def main() -> int:
40
+ # Initialize variables
41
+ batches = 1000
42
+ steps = 5
43
+ clip_interval = 2
44
+ seeds = [10, 20, 30, 40, 50]
45
+ ep_per_batch = 5
46
+
47
+ # batches = 5
48
+ # steps = 5
49
+ # clip_interval = 2
50
+ # seeds = [10, 20]
51
+ # ep_per_batch = 2
52
+ # Arguments
53
+ """
54
+ usage examples:
55
+ python3 a2c_main.py --method vanilla
56
+
57
+ python3 a2c_main.py --method grad_clip
58
+
59
+ python3 a2c_main.py --method rbs
60
+ """
61
+ parser = argparse.ArgumentParser(description='A2C Training')
62
+
63
+ parser.add_argument('--method', type=str, choices=['vanilla', 'reward_clip', 'rbs', 'grad_clip',
64
+ 'obs_norm', 'adv_norm', 'return_norm', 'reward_norm'],
65
+ default='vanilla', help='A2C update method')
66
+ parser.add_argument('--env', type=str, default='ALE/Pacman-v5',
67
+ help='Gym environment name (e.g., ALE/Pacman-v5, ALE/SpaceInvaders-v5)')
68
+ parser.add_argument('--render', action='store_true', help='Enable rendering')
69
+ parser.add_argument('--clip_window', type=int, default=clip_interval,
70
+ help='Number of batches to collect rewards for clipping range update')
71
+
72
+ args = parser.parse_args()
73
+
74
+ # Set up environment
75
+ if args.render:
76
+ env = gym.make(args.env, render_mode="human")
77
+ else:
78
+ env = gym.make(args.env)
79
+
80
+ logger.info(f"Observation space: {env.observation_space}")
81
+ logger.info(f"Action space: {env.action_space}")
82
+ logger.info(f'Method: {args.method}')
83
+
84
+ # Initialize CNN with a dummy observation to get correct input shape
85
+ obs, _ = env.reset()
86
+ dummy_obs_space = Box(low=0.0, high=1.0, shape=preprocess(obs).shape)
87
+
88
+ # Initialize A2C agent
89
+ agent = Agent(obs_space=dummy_obs_space, action_space=env.action_space,
90
+ hidden=64, lr=0.00001, gamma=0.997,
91
+ entropy_coef=0.01, value_coef=0.5, seed=70,lam=0.95)
92
+
93
+ # === Return-Based Scaling stats (for RBS method) ===
94
+ r_mean, r_var = 0.0, 1e-8
95
+ g2_mean = 1.0
96
+ agent.r_var = r_var
97
+ agent.g2_mean = g2_mean
98
+
99
+ all_reward_histories = pd.DataFrame(columns=[i for i in seeds], index=[i for i in range(1, batches + 1)])
100
+ all_loss_histories = pd.DataFrame(columns=[i for i in seeds], index=[i for i in range(1, batches + 1)])
101
+ all_policy_loss = pd.DataFrame(columns=[i for i in seeds])
102
+ all_value_loss = pd.DataFrame(columns=[i for i in seeds])
103
+
104
+ step = 0
105
+ # Main update loop
106
+ try:
107
+ for seed in seeds:
108
+ obs, info = env.reset(seed=seed)
109
+ state = preprocess(obs)
110
+
111
+ loss_history = []
112
+ reward_history = []
113
+ episode = 0
114
+ total_return = 0
115
+
116
+ """ Update loop: Gradient, Reward Normalization """
117
+ if args.method == 'reward_clip':
118
+ alpha = np.random.uniform(1, 2)
119
+ logger.info(f"α sampled = {alpha:.3f} seed = {seed}")
120
+
121
+ clip_low, clip_high = None, None
122
+ ep_reward_history = []
123
+
124
+ obs, info = env.reset()
125
+ state = preprocess(obs)
126
+
127
+ for update in range(1, batches + 1):
128
+
129
+ batch_episode_returns = [] # used for μ, σ
130
+
131
+ for _ in range(ep_per_batch):
132
+ ep_rewards = []
133
+ done = False
134
+
135
+ while not done:
136
+ action, value = agent.choose_action(state)
137
+ next_obs, reward, terminated, truncated, info = env.step(action)
138
+ done = terminated or truncated
139
+ next_state = preprocess(next_obs)
140
+
141
+ ep_rewards.append(reward)
142
+
143
+ agent.remember(state, action, reward, done, value, next_state)
144
+
145
+ state = next_state
146
+
147
+ if done:
148
+ ep_return = sum(ep_rewards)
149
+ if clip_low is not None:
150
+ clipped_return = np.clip(ep_return, clip_low, clip_high)
151
+ else:
152
+ clipped_return = ep_return
153
+ ep_reward_history.append(clipped_return)
154
+ batch_episode_returns.append(clipped_return)
155
+
156
+ episode += 1
157
+ total_return += clipped_return
158
+
159
+ logger.info(f"Episode {episode} return: {clipped_return:.2f}")
160
+
161
+ obs, info = env.reset()
162
+ state = preprocess(obs)
163
+
164
+ # === Compute clipping bounds using Code 1 logic ===
165
+ mu = np.mean(batch_episode_returns)
166
+ sigma = np.std(batch_episode_returns) + 1e-8 if np.std(batch_episode_returns)!=0 else 1
167
+
168
+ clip_low = mu - alpha * sigma
169
+ clip_high = mu + alpha * sigma
170
+
171
+ logger.info(
172
+ f"[UPDATE {update}] New Reward Clip Range: "
173
+ f"[{clip_low:.4f}, {clip_high:.4f}]"
174
+ )
175
+
176
+ # === A2C UPDATE ===
177
+ avg_loss = agent.vanilla_a2c_update()
178
+ loss_history.append(avg_loss)
179
+
180
+ avg_ret = np.mean(batch_episode_returns)
181
+ reward_history.append(avg_ret)
182
+
183
+ logger.info(
184
+ f"Update {update}: batch_mean={avg_ret:.4f}, "
185
+ f"batch_std={np.std(batch_episode_returns):.4f}, "
186
+ f"episodes={episode}, avg_loss={avg_loss:.4f}"
187
+ )
188
+
189
+ """ Update loop: Other Normalization Methods """
190
+ else:
191
+ for update in range(1, batches + 1):
192
+ batch_episode_rewards = []
193
+ ep_per_batch = 5
194
+
195
+ for _ in range(ep_per_batch):
196
+ ep_rewards = []
197
+
198
+ done = False
199
+
200
+ while not done:
201
+ action, value = agent.choose_action(state)
202
+ next_obs, reward, terminated, truncated, info = env.step(action)
203
+ done = terminated or truncated
204
+ next_state = preprocess(next_obs)
205
+
206
+ ep_rewards.append(reward)
207
+ agent.remember(state, action, reward, done, value, next_state)
208
+
209
+ state = next_state
210
+
211
+ if done:
212
+ ep_return = sum(ep_rewards)
213
+ episode += 1
214
+ total_return += ep_return
215
+ batch_episode_rewards.append(ep_return)
216
+ logger.info(f"Episode {episode} return: {ep_return:.2f}")
217
+
218
+ obs, info = env.reset()
219
+ state = preprocess(obs)
220
+
221
+ # Choose normalization method
222
+ if args.method == 'vanilla':
223
+ avg_loss = agent.vanilla_a2c_update()
224
+ elif args.method == 'grad_clip':
225
+ avg_loss = agent.update_gradient_clipping()
226
+ elif args.method == 'obs_norm':
227
+ avg_loss = agent.update_obs_norm()
228
+ elif args.method == 'adv_norm':
229
+ avg_loss = agent.update_adv_norm()
230
+ elif args.method == 'reward_norm':
231
+ avg_loss = agent.update_reward_norm()
232
+ else: # rbs
233
+ avg_loss = agent.update_rbs()
234
+
235
+ loss_history.append(avg_loss)
236
+
237
+ avg_ret = (total_return / episode) if episode else 0
238
+ reward_history.append(avg_ret)
239
+ logger.info(
240
+ f"Update {update}: episodes={episode}, avg_return={avg_ret:.2f}, avg_loss={avg_loss:.4f}")
241
+
242
+ all_reward_histories[seed] = reward_history
243
+ all_loss_histories[seed] = loss_history
244
+ all_value_loss[seed] = agent.value_loss_history[step:step+batches]
245
+ all_policy_loss[seed] = agent.policy_loss_history[step:step+batches]
246
+
247
+ step += batches
248
+
249
+ [all_reward_histories, all_loss_histories, all_value_loss, all_policy_loss] = df_ops([all_reward_histories,
250
+ all_loss_histories,
251
+ all_value_loss,
252
+ all_policy_loss],
253
+ seeds)
254
+ # [all_reward_histories, all_loss_histories] = df_ops([all_reward_histories, all_loss_histories], seeds)
255
+
256
+ # all_policy_loss.to_csv(args.method + '_policy_loss.csv')
257
+ # all_value_loss.to_csv(args.method + '_value_loss.csv')
258
+ # all_reward_histories.to_csv(args.method + '_a2c_reward_history.csv')
259
+ # all_loss_histories.to_csv(args.method + '_a2c_loss_history.csv')
260
+
261
+ fig = plt.figure(figsize=(15, 10))
262
+
263
+ # --- Subplot 1: Average PPO Loss ---
264
+ ax2 = plt.subplot(221)
265
+ # Plot the shaded High-Low Range
266
+ ax2.fill_between(
267
+ all_loss_histories.index,
268
+ all_loss_histories['Low'],
269
+ all_loss_histories['High'],
270
+ color='#A8DADC', # Light blue for aesthetic shading
271
+ alpha=0.5,
272
+ label="High-Low Range"
273
+ )
274
+ # Plot the Average Line
275
+ ax2.plot(all_loss_histories['Avg'], label="Avg Loss", color='#1D3557', linewidth=2)
276
+ ax2.set_ylabel("Average PPO Loss")
277
+ ax2.set_xlabel("PPO Update")
278
+ ax2.legend()
279
+
280
+ # --- Subplot 2: Reward ---
281
+ ax3 = plt.subplot(222)
282
+ # Plot the shaded High-Low Range
283
+ ax3.fill_between(
284
+ all_reward_histories.index,
285
+ all_reward_histories['Low'],
286
+ all_reward_histories['High'],
287
+ color='#FEDCC8', # Light orange/peach
288
+ alpha=0.5,
289
+ label="High-Low Range"
290
+ )
291
+ # Plot the Average Line
292
+ ax3.plot(all_reward_histories['Avg'], label="Avg Reward", color='#E63946', linewidth=2)
293
+ ax3.set_ylabel("Average Reward")
294
+ ax3.set_xlabel("PPO Update")
295
+ ax3.legend()
296
+
297
+ # --- Subplot 3: Policy Loss ---
298
+ ax4 = plt.subplot(223)
299
+ # Plot the shaded High-Low Range
300
+ ax4.fill_between(
301
+ all_policy_loss.index,
302
+ all_policy_loss['Low'],
303
+ all_policy_loss['High'],
304
+ color='#B0E0A0', # Light green
305
+ alpha=0.5,
306
+ label="High-Low Range"
307
+ )
308
+ # Plot the Average Line
309
+ ax4.plot(all_policy_loss['Avg'], label="Policy Loss", color='#38B000', linewidth=2)
310
+ ax4.set_ylabel("Average Policy Loss")
311
+ ax4.set_xlabel("PPO Update")
312
+ ax4.legend()
313
+
314
+ # --- Subplot 4: Value Loss ---
315
+ ax5 = plt.subplot(224)
316
+ # Plot the shaded High-Low Range
317
+ ax5.fill_between(
318
+ all_value_loss.index,
319
+ all_value_loss['Low'],
320
+ all_value_loss['High'],
321
+ color='#D7BDE2', # Light purple
322
+ alpha=0.5,
323
+ label="High-Low Range"
324
+ )
325
+ # Plot the Average Line
326
+ ax5.plot(all_value_loss['Avg'], label="Value Loss", color='#8E44AD', linewidth=2)
327
+ ax5.set_ylabel("Average Value Loss")
328
+ ax5.set_xlabel("PPO Update")
329
+ ax5.legend()
330
+
331
+ # --- Figure Settings ---
332
+ fig.suptitle(f"PPO Training Stability - {args.method}", fontsize=16, fontweight='bold')
333
+ # fig.tight_layout() # Adjust layout to make room for suptitle
334
+ plt.show()
335
+
336
+ except Exception as e:
337
+ logger.error(f"Error: {e}", exc_info=True)
338
+ return 1
339
+ finally:
340
+ avg = total_return / episode if episode else 0
341
+ logger.info(f"\nEpisodes: {episode}, Avg return: {avg:.3f}")
342
+ env.close()
343
+
344
+ return 0
345
+
346
+
347
+ if __name__ == "__main__":
348
+ raise SystemExit(main())
ppo_helpers_cnn.py ADDED
@@ -0,0 +1,673 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch as T
3
+ import torch.nn as nn
4
+ import torch.optim as optim
5
+ from torch.distributions import Categorical
6
+
7
+
8
+ class Agent:
9
+ def __init__(
10
+ self,
11
+ obs_space,
12
+ action_space,
13
+ hidden,
14
+ gamma,
15
+ clip_coef,
16
+ lr,
17
+ value_coef,
18
+ entropy_coef,
19
+ seed,
20
+ batch_size,
21
+ ppo_epochs,
22
+ lam
23
+
24
+ ):
25
+ EPSILON = 1e-8
26
+ DEFAULT_BATCH_SIZE = 32
27
+ DEFAULT_PPO_EPOCHS = 5
28
+
29
+ # Initialize seed for reproducibility
30
+ if seed is not None:
31
+ np.random.seed(seed)
32
+ T.manual_seed(seed)
33
+
34
+ # Use GPU if available
35
+ self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
36
+ self.action_dim = int(getattr(action_space, "n", action_space))
37
+
38
+ # Initialize the policy and the critic networks
39
+ self.policy = Policy(obs_space.shape, self.action_dim, hidden).to(self.device)
40
+ self.critic = Critic(obs_space.shape, hidden).to(self.device)
41
+
42
+ # Set optimizer for policy and critic networks
43
+ self.opt = optim.Adam(
44
+ list(self.policy.parameters()) + list(self.critic.parameters()),
45
+ lr=lr
46
+ )
47
+
48
+ self.gamma = gamma
49
+ self.clip = clip_coef
50
+ self.value_coef = value_coef
51
+ self.entropy_coef = entropy_coef
52
+ self.sigma_history = []
53
+ self.loss_history = []
54
+ self.policy_loss_history = []
55
+ self.ppo_avg_loss_history = []
56
+ self.value_loss_history = []
57
+ self.entropy_history = []
58
+ self.lam = lam
59
+ self.ppo_epochs = ppo_epochs
60
+ self.batch_size = batch_size
61
+ self.EPSILON = EPSILON
62
+ self.DEFAULT_BATCH_SIZE = DEFAULT_BATCH_SIZE
63
+ self.DEFAULT_PPO_EPOCHS = DEFAULT_PPO_EPOCHS
64
+ self.policy = Policy(obs_space.shape, self.action_dim, hidden).to(self.device)
65
+ self.critic = Critic(obs_space.shape, hidden).to(self.device)
66
+ self.observeNorm = ObservationNorm()
67
+ self.advantageNorm = AdvantageNorm()
68
+ self.returnNorm = ReturnNorm()
69
+
70
+ self.memory = Memory()
71
+
72
+ # Function to choose action based on current policy
73
+ # Returns: action, log probabilitiy, value of the state
74
+ def choose_action(self, observation):
75
+ state = T.as_tensor(observation, dtype=T.float32, device=self.device)
76
+ with T.no_grad():
77
+ dist = self.policy.next_action(state)
78
+ action = dist.sample()
79
+ logp = dist.log_prob(action)
80
+ value = self.critic.evaluated_state(state)
81
+ return int(action.item()), float(logp.item()), float(value.item())
82
+
83
+ # Store reward, state, action in memory
84
+ def remember(self, state, action, reward, done, log_prob, value, next_state):
85
+ with T.no_grad():
86
+ ns = T.as_tensor(next_state, dtype=T.float32, device=self.device)
87
+ next_value = self.critic.evaluated_state(ns).item()
88
+ self.memory.store(state, action, reward, done, log_prob, value, next_value)
89
+
90
+ def _prepare_batch_data(self):
91
+ """Convert memory to tensors."""
92
+ states = T.as_tensor(np.array(self.memory.states), dtype=T.float32, device=self.device)
93
+ actions = T.as_tensor(self.memory.actions, dtype=T.long, device=self.device)
94
+ rewards = T.as_tensor(self.memory.rewards, dtype=T.float32, device=self.device)
95
+ dones = T.as_tensor(self.memory.dones, dtype=T.float32, device=self.device)
96
+ old_logp = T.as_tensor(self.memory.log_probs, dtype=T.float32, device=self.device)
97
+ values = T.as_tensor(self.memory.values, dtype=T.float32, device=self.device)
98
+ return states, actions, rewards, dones, old_logp, values
99
+
100
+ def _compute_gae(self, rewards, values, dones):
101
+ """Compute Generalized Advantage Estimation."""
102
+ with T.no_grad():
103
+ next_values = T.cat([values[1:], values[-1:].clone()])
104
+ deltas = rewards + self.gamma * next_values * (1 - dones) - values
105
+
106
+ adv = T.zeros_like(rewards)
107
+ gae = 0.0
108
+ for t in reversed(range(len(rewards))):
109
+ gae = deltas[t] + self.gamma * self.lam * (1 - dones[t]) * gae
110
+ adv[t] = gae
111
+
112
+ returns = adv + values
113
+ return adv, returns
114
+
115
+ def _compute_ppo_loss(self, states, actions, old_logp, returns, advantages):
116
+ """Compute PPO loss components."""
117
+ dist = self.policy.next_action(states)
118
+ new_logp = dist.log_prob(actions)
119
+ entropy = dist.entropy().mean()
120
+ ratio = (new_logp - old_logp).exp()
121
+
122
+ # Clipped surrogate objective
123
+ surr1 = ratio * advantages
124
+ surr2 = T.clamp(ratio, 1 - self.clip, 1 + self.clip) * advantages
125
+ policy_loss = -T.min(surr1, surr2).mean()
126
+
127
+ # Critic loss
128
+ value_pred = self.critic.evaluated_state(states)
129
+ value_loss = 0.5 * (returns - value_pred).pow(2).mean()
130
+
131
+ # Total loss
132
+ total_loss = (
133
+ policy_loss +
134
+ self.value_coef * value_loss -
135
+ self.entropy_coef * entropy
136
+ )
137
+
138
+ return total_loss, policy_loss, value_loss
139
+
140
+ def _ppo_update_loop(self, states, actions, old_logp, returns, adv, use_grad_clip=False):
141
+ """Run PPO training loop over multiple epochs and minibatches."""
142
+ total_loss_epoch = 0.0
143
+ num_samples = len(states)
144
+ batch_size = min(self.DEFAULT_BATCH_SIZE, num_samples)
145
+ ppo_epochs = self.DEFAULT_PPO_EPOCHS
146
+ num_batches = 32
147
+
148
+ for _ in range(ppo_epochs):
149
+ idxs = T.randperm(num_samples)
150
+ for start in range(0, num_samples, batch_size):
151
+ batch_idx = idxs[start:start + batch_size]
152
+
153
+ b_states = states[batch_idx]
154
+ b_actions = actions[batch_idx]
155
+ b_old_logp = old_logp[batch_idx]
156
+ b_returns = returns[batch_idx]
157
+ b_adv = adv[batch_idx]
158
+
159
+ total_loss, policy_loss, value_loss = self._compute_ppo_loss(
160
+ b_states, b_actions, b_old_logp, b_returns, b_adv
161
+ )
162
+
163
+ self.policy_loss_history.append(policy_loss.item())
164
+ self.value_loss_history.append(value_loss.item())
165
+
166
+ self.opt.zero_grad(set_to_none=True)
167
+ total_loss.backward()
168
+
169
+ if use_grad_clip:
170
+ T.nn.utils.clip_grad_norm_(
171
+ list(self.policy.parameters()) + list(self.critic.parameters()),
172
+ 0.5
173
+ )
174
+
175
+ self.opt.step()
176
+ total_loss_epoch += total_loss.item()
177
+ num_batches += 1
178
+
179
+ return total_loss_epoch / num_batches
180
+
181
+ # Basic PPO update function
182
+ def vanilla_ppo_update(self):
183
+ if len(self.memory.states) == 0:
184
+ return 0.0
185
+
186
+ states, actions, rewards, dones, old_logp, values = self._prepare_batch_data()
187
+ adv, returns = self._compute_gae(rewards, values, dones)
188
+
189
+ with T.no_grad():
190
+ # Advantage normalization
191
+ adv = (adv - adv.mean()) / (adv.std(unbiased=False) + self.EPSILON)
192
+
193
+ avg_total_loss = self._ppo_update_loop(states, actions, old_logp, returns, adv)
194
+ self.ppo_avg_loss_history.append(avg_total_loss)
195
+ self.memory.clear()
196
+ return avg_total_loss
197
+
198
+ # Return Based Scaling PPO update function
199
+ def update_rbs(self):
200
+ if len(self.memory.states) == 0:
201
+ return 0.0
202
+
203
+ states, actions, rewards, dones, old_logp, values = self._prepare_batch_data()
204
+ adv, returns = self._compute_gae(rewards, values, dones)
205
+
206
+ with T.no_grad():
207
+ # Return-based normalization (RBS)
208
+ sigma_t = returns.std(unbiased=False) + 1e-8
209
+ returns = returns / sigma_t
210
+ self.sigma_history.append(sigma_t.item())
211
+ adv = adv / sigma_t
212
+ # Advantage normalization
213
+ adv = (adv - adv.mean()) / (adv.std(unbiased=False) + 1e-8)
214
+
215
+ avg_loss = self._ppo_update_loop(states, actions, old_logp, returns, adv)
216
+ self.memory.clear()
217
+ return avg_loss
218
+
219
+ # Reward Gradient Clipping PPO update function
220
+ def update_gradient_clipping(self):
221
+ if len(self.memory.states) == 0:
222
+ return 0.0
223
+
224
+ states, actions, rewards, dones, old_logp, values = self._prepare_batch_data()
225
+ adv, returns = self._compute_gae(rewards, values, dones)
226
+
227
+ with T.no_grad():
228
+ # Advantage normalization
229
+ adv = (adv - adv.mean()) / (adv.std(unbiased=False) + 1e-8)
230
+
231
+ avg_loss = self._ppo_update_loop(states, actions, old_logp, returns, adv, use_grad_clip=True)
232
+ self.memory.clear()
233
+ return avg_loss
234
+
235
+ def update_obs_norm(self):
236
+ if len(self.memory.states) == 0:
237
+ return 0.0
238
+
239
+ # Convert memory to tensors
240
+ states = T.as_tensor(np.array(self.memory.states), dtype=T.float32, device=self.device)
241
+ actions = T.as_tensor(self.memory.actions, dtype=T.long, device=self.device)
242
+ rewards = T.as_tensor(self.memory.rewards, dtype=T.float32, device=self.device)
243
+ dones = T.as_tensor(self.memory.dones, dtype=T.float32, device=self.device)
244
+ old_logp = T.as_tensor(self.memory.log_probs, dtype=T.float32, device=self.device)
245
+ values = T.as_tensor(self.memory.values, dtype=T.float32, device=self.device)
246
+
247
+ with T.no_grad():
248
+ # Compute next values (bootstrap for final step)
249
+ next_values = T.cat([values[1:], values[-1:].clone()])
250
+ deltas = rewards + self.gamma * next_values * (1 - dones) - values
251
+
252
+ # --- GAE-Lambda ---
253
+ adv = T.zeros_like(rewards)
254
+ gae = 0.0
255
+ for t in reversed(range(len(rewards))):
256
+ gae = deltas[t] + self.gamma * self.lam * (1 - dones[t]) * gae
257
+ adv[t] = gae
258
+
259
+ returns = adv + values
260
+
261
+ # --- observation normalization ---
262
+ states = self.observeNorm.normalize(states)
263
+ # Advantage normalization
264
+ # adv = (adv - adv.mean()) / (adv.std(unbiased=False) + 1e-8)
265
+
266
+ # --- PPO Multiple Epochs + Minibatch ---
267
+ total_loss_epoch = 0.0
268
+ num_samples = len(states)
269
+ batch_size = min(32, num_samples)
270
+ ppo_epochs = 5
271
+
272
+ for _ in range(ppo_epochs):
273
+ # Shuffle indices
274
+ idxs = T.randperm(num_samples)
275
+ for start in range(0, num_samples, batch_size):
276
+ batch_idx = idxs[start:start + batch_size]
277
+
278
+ b_states = states[batch_idx]
279
+ b_actions = actions[batch_idx]
280
+ b_old_logp = old_logp[batch_idx]
281
+ b_returns = returns[batch_idx]
282
+ b_adv = adv[batch_idx]
283
+
284
+ dist = self.policy.next_action(b_states)
285
+ new_logp = dist.log_prob(b_actions)
286
+ entropy = dist.entropy().mean()
287
+ ratio = (new_logp - b_old_logp).exp()
288
+
289
+ # --- Clipped surrogate objective ---
290
+ surr1 = ratio * b_adv
291
+ surr2 = T.clamp(ratio, 1 - self.clip, 1 + self.clip) * b_adv
292
+ policy_loss = -T.min(surr1, surr2).mean()
293
+
294
+ # --- Critic loss ---
295
+ value_pred = self.critic.evaluated_state(b_states)
296
+ value_loss = 0.5 * (b_returns - value_pred).pow(2).mean()
297
+
298
+ # --- Total loss ---
299
+ total_loss = (
300
+ policy_loss +
301
+ self.value_coef * value_loss -
302
+ self.entropy_coef * entropy
303
+ )
304
+
305
+ # Debug: track individual loss components
306
+ self.policy_loss_history.append(policy_loss.item())
307
+ self.value_loss_history.append(value_loss.item())
308
+
309
+ self.opt.zero_grad(set_to_none=True)
310
+ total_loss.backward()
311
+ self.opt.step()
312
+ total_loss_epoch += total_loss.item()
313
+
314
+ # Clear memory after full PPO update
315
+ self.memory.clear()
316
+
317
+ return total_loss_epoch / (ppo_epochs * (num_samples / batch_size))
318
+
319
+
320
+ def update_adv_norm(self):
321
+ if len(self.memory.states) == 0:
322
+ return 0.0
323
+
324
+ # Convert memory to tensors
325
+ states = T.as_tensor(np.array(self.memory.states), dtype=T.float32, device=self.device)
326
+ actions = T.as_tensor(self.memory.actions, dtype=T.long, device=self.device)
327
+ rewards = T.as_tensor(self.memory.rewards, dtype=T.float32, device=self.device)
328
+ dones = T.as_tensor(self.memory.dones, dtype=T.float32, device=self.device)
329
+ old_logp = T.as_tensor(self.memory.log_probs, dtype=T.float32, device=self.device)
330
+ values = T.as_tensor(self.memory.values, dtype=T.float32, device=self.device)
331
+
332
+ with T.no_grad():
333
+ # Compute next values (bootstrap for final step)
334
+ next_values = T.cat([values[1:], values[-1:].clone()])
335
+ deltas = rewards + self.gamma * next_values * (1 - dones) - values
336
+
337
+ # --- GAE-Lambda ---
338
+ adv = T.zeros_like(rewards)
339
+ gae = 0.0
340
+ for t in reversed(range(len(rewards))):
341
+ gae = deltas[t] + self.gamma * self.lam * (1 - dones[t]) * gae
342
+ adv[t] = gae
343
+
344
+ # --- Advantage normalization ---
345
+
346
+ returns = adv + values
347
+
348
+ adv = self.advantageNorm.normalize(adv)
349
+
350
+ # --- PPO Multiple Epochs + Minibatch ---
351
+ total_loss_epoch = 0.0
352
+ num_samples = len(states)
353
+ batch_size = min(32, num_samples)
354
+ ppo_epochs = 5
355
+
356
+ for _ in range(ppo_epochs):
357
+ # Shuffle indices
358
+ idxs = T.randperm(num_samples)
359
+ for start in range(0, num_samples, batch_size):
360
+ batch_idx = idxs[start:start + batch_size]
361
+
362
+ b_states = states[batch_idx]
363
+ b_actions = actions[batch_idx]
364
+ b_old_logp = old_logp[batch_idx]
365
+ b_returns = returns[batch_idx]
366
+ b_adv = adv[batch_idx]
367
+
368
+ dist = self.policy.next_action(b_states)
369
+ new_logp = dist.log_prob(b_actions)
370
+ entropy = dist.entropy().mean()
371
+ ratio = (new_logp - b_old_logp).exp()
372
+
373
+ # --- Clipped surrogate objective ---
374
+ surr1 = ratio * b_adv
375
+ surr2 = T.clamp(ratio, 1 - self.clip, 1 + self.clip) * b_adv
376
+ policy_loss = -T.min(surr1, surr2).mean()
377
+
378
+ # --- Critic loss ---
379
+ value_pred = self.critic.evaluated_state(b_states)
380
+ value_loss = 0.5 * (b_returns - value_pred).pow(2).mean()
381
+
382
+ # --- Total loss ---
383
+ total_loss = (
384
+ policy_loss +
385
+ self.value_coef * value_loss -
386
+ self.entropy_coef * entropy
387
+ )
388
+
389
+ # Debug: track individual loss components
390
+ self.policy_loss_history.append(policy_loss.item())
391
+ self.value_loss_history.append(value_loss.item())
392
+
393
+ self.opt.zero_grad(set_to_none=True)
394
+ total_loss.backward()
395
+ self.opt.step()
396
+ total_loss_epoch += total_loss.item()
397
+
398
+ # Clear memory after full PPO update
399
+ self.memory.clear()
400
+
401
+ return total_loss_epoch / (ppo_epochs * (num_samples / batch_size))
402
+
403
+ def update_return_norm(self):
404
+ if len(self.memory.states) == 0:
405
+ return 0.0
406
+
407
+ # Convert memory to tensors
408
+ states = T.as_tensor(np.array(self.memory.states), dtype=T.float32, device=self.device)
409
+ actions = T.as_tensor(self.memory.actions, dtype=T.long, device=self.device)
410
+ rewards = T.as_tensor(self.memory.rewards, dtype=T.float32, device=self.device)
411
+ dones = T.as_tensor(self.memory.dones, dtype=T.float32, device=self.device)
412
+ old_logp = T.as_tensor(self.memory.log_probs, dtype=T.float32, device=self.device)
413
+ values = T.as_tensor(self.memory.values, dtype=T.float32, device=self.device)
414
+
415
+ with T.no_grad():
416
+ # Compute next values (bootstrap for final step)
417
+ next_values = T.cat([values[1:], values[-1:].clone()])
418
+ deltas = rewards + self.gamma * next_values * (1 - dones) - values
419
+
420
+ # --- GAE-Lambda ---
421
+ adv = T.zeros_like(rewards)
422
+ gae = 0.0
423
+ for t in reversed(range(len(rewards))):
424
+ gae = deltas[t] + self.gamma * self.lam * (1 - dones[t]) * gae
425
+ adv[t] = gae
426
+
427
+ returns = adv + values
428
+
429
+ # --- returns normalization ---
430
+ returns = self.returnNorm.normalize(returns)
431
+
432
+ # Advantage normalization
433
+ # adv = (adv - adv.mean()) / (adv.std(unbiased=False) + 1e-8)
434
+
435
+ # --- PPO Multiple Epochs + Minibatch ---
436
+ total_loss_epoch = 0.0
437
+ num_samples = len(states)
438
+ batch_size = min(32, num_samples)
439
+ ppo_epochs = 5
440
+
441
+ for _ in range(ppo_epochs):
442
+ # Shuffle indices
443
+ idxs = T.randperm(num_samples)
444
+ for start in range(0, num_samples, batch_size):
445
+ batch_idx = idxs[start:start + batch_size]
446
+
447
+ b_states = states[batch_idx]
448
+ b_actions = actions[batch_idx]
449
+ b_old_logp = old_logp[batch_idx]
450
+ b_returns = returns[batch_idx]
451
+ b_adv = adv[batch_idx]
452
+
453
+ dist = self.policy.next_action(b_states)
454
+ new_logp = dist.log_prob(b_actions)
455
+ entropy = dist.entropy().mean()
456
+ ratio = (new_logp - b_old_logp).exp()
457
+
458
+ # --- Clipped surrogate objective ---
459
+ surr1 = ratio * b_adv
460
+ surr2 = T.clamp(ratio, 1 - self.clip, 1 + self.clip) * b_adv
461
+ policy_loss = -T.min(surr1, surr2).mean()
462
+
463
+ # --- Critic loss ---
464
+ value_pred = self.critic.evaluated_state(b_states)
465
+ value_loss = 0.5 * (b_returns - value_pred).pow(2).mean()
466
+
467
+ # --- Total loss ---
468
+ total_loss = (
469
+ policy_loss +
470
+ self.value_coef * value_loss -
471
+ self.entropy_coef * entropy
472
+ )
473
+
474
+ # Debug: track individual loss components
475
+ self.policy_loss_history.append(policy_loss.item())
476
+ self.value_loss_history.append(value_loss.item())
477
+
478
+ self.opt.zero_grad(set_to_none=True)
479
+ total_loss.backward()
480
+ self.opt.step()
481
+ total_loss_epoch += total_loss.item()
482
+
483
+ # Clear memory after full PPO update
484
+ self.memory.clear()
485
+
486
+ return total_loss_epoch / (ppo_epochs * (num_samples / batch_size))
487
+
488
+ def update_reward_norm(self):
489
+ if len(self.memory.states) == 0:
490
+ return 0.0
491
+
492
+ states = T.as_tensor(np.array(self.memory.states), dtype=T.float32, device=self.device)
493
+ actions = T.as_tensor(self.memory.actions, dtype=T.long, device=self.device)
494
+ rewards = T.as_tensor(self.memory.rewards, dtype=T.float32, device=self.device)
495
+ dones = T.as_tensor(self.memory.dones, dtype=T.float32, device=self.device)
496
+ old_logp = T.as_tensor(self.memory.log_probs, dtype=T.float32, device=self.device)
497
+ values = T.as_tensor(self.memory.values, dtype=T.float32, device=self.device)
498
+
499
+ rewards = (rewards - rewards.mean()) / (rewards.std(unbiased=False) + 1e-8)
500
+
501
+ with T.no_grad():
502
+ next_values = T.cat([values[1:], values[-1:].clone()])
503
+ deltas = rewards + self.gamma * next_values * (1 - dones) - values
504
+
505
+ adv = T.zeros_like(rewards)
506
+ gae = 0.0
507
+ for t in reversed(range(len(rewards))):
508
+ gae = deltas[t] + self.gamma * self.lam * (1 - dones[t]) * gae
509
+ adv[t] = gae
510
+
511
+ returns = adv + values
512
+
513
+ total_loss_epoch = 0.0
514
+ num_samples = len(states)
515
+ batch_size = min(self.batch_size, num_samples)
516
+ ppo_epochs = self.ppo_epochs
517
+
518
+ for _ in range(ppo_epochs):
519
+ idxs = T.randperm(num_samples)
520
+ for start in range(0, num_samples, batch_size):
521
+ batch_idx = idxs[start:start + batch_size]
522
+
523
+ b_states = states[batch_idx]
524
+ b_actions = actions[batch_idx]
525
+ b_old_logp = old_logp[batch_idx]
526
+ b_returns = returns[batch_idx]
527
+ b_adv = adv[batch_idx]
528
+
529
+ dist = self.policy.next_action(b_states)
530
+ new_logp = dist.log_prob(b_actions)
531
+ entropy = dist.entropy().mean()
532
+ ratio = (new_logp - b_old_logp).exp()
533
+
534
+ surr1 = ratio * b_adv
535
+ surr2 = T.clamp(ratio, 1 - self.clip, 1 + self.clip) * b_adv
536
+ policy_loss = -T.min(surr1, surr2).mean()
537
+
538
+ value_pred = self.critic.evaluated_state(b_states)
539
+ value_loss = 0.5 * (b_returns - value_pred).pow(2).mean()
540
+
541
+ total_loss = (
542
+ policy_loss +
543
+ self.value_coef * value_loss -
544
+ self.entropy_coef * entropy
545
+ )
546
+
547
+ self.policy_loss_history.append(policy_loss.item())
548
+ self.value_loss_history.append(value_loss.item())
549
+
550
+ self.opt.zero_grad(set_to_none=True)
551
+ total_loss.backward()
552
+ self.opt.step()
553
+
554
+ total_loss_epoch += total_loss.item()
555
+
556
+ self.memory.clear()
557
+ return total_loss_epoch / (ppo_epochs * (num_samples / batch_size))
558
+
559
+
560
+ # Policy network (CNN)
561
+ class Policy(nn.Module):
562
+ def __init__(self, obs_shape: tuple, action_dim: int, hidden: int):
563
+ super().__init__()
564
+ c, h, w = obs_shape
565
+ # Suggested architecture for Atari: https://arxiv.org/pdf/1312.5602
566
+ self.cnn = nn.Sequential(
567
+ nn.Conv2d(c, 16, kernel_size=8, stride=4),
568
+ nn.ReLU(),
569
+ nn.Conv2d(16, 32, kernel_size=4, stride=2),
570
+ nn.ReLU(),
571
+ nn.Flatten(),
572
+ nn.Linear(32 * 9 * 9, 256), # 2592 → 256
573
+ nn.ReLU(),
574
+ )
575
+
576
+ # Final output layer: one logit per action
577
+ self.net = nn.Linear(256, action_dim)
578
+
579
+ def next_action(self, state: T.Tensor) -> Categorical:
580
+ # state shape should be (B, C, H, W)
581
+ if state.dim() == 3:
582
+ state = state.unsqueeze(0)
583
+
584
+ cnn_out = self.cnn(state) # [B, 256]
585
+ logits = self.net(cnn_out) # [B, action_dim]
586
+ return Categorical(logits=logits)
587
+
588
+
589
+ # Critic network (CNN)
590
+ class Critic(nn.Module):
591
+ def __init__(self, obs_shape: tuple, hidden: int):
592
+ super().__init__()
593
+ c, h, w = obs_shape
594
+ # Suggested architecture for Atari: https://arxiv.org/pdf/1312.5602
595
+ self.cnn = nn.Sequential(
596
+ nn.Conv2d(c, 16, kernel_size=8, stride=4),
597
+ nn.ReLU(),
598
+ nn.Conv2d(16, 32, kernel_size=4, stride=2),
599
+ nn.ReLU(),
600
+ nn.Flatten()
601
+ )
602
+
603
+ with T.no_grad():
604
+ cnn_output_dim = self.cnn(T.zeros(1, c, h, w)).shape[1]
605
+
606
+ self.net = nn.Sequential(
607
+ nn.Linear(cnn_output_dim, hidden),
608
+ nn.ReLU(),
609
+ nn.Linear(hidden, 1)
610
+ )
611
+
612
+ def evaluated_state(self, x: T.Tensor) -> T.Tensor:
613
+ if x.dim() == 3:
614
+ x = x.unsqueeze(0)
615
+ cnn_out = self.cnn(x)
616
+ return self.net(cnn_out).squeeze(-1)
617
+
618
+
619
+ class Memory():
620
+ def __init__(self):
621
+ self.states = []
622
+ self.actions = []
623
+ self.rewards = []
624
+ self.dones = []
625
+ self.log_probs = []
626
+ self.values = []
627
+ self.next_values = []
628
+
629
+ def store(self, state, action, reward, done, log_prob, value, next_value):
630
+ self.states.append(np.asarray(state, dtype=np.float32))
631
+ self.actions.append(int(action))
632
+ self.rewards.append(float(reward))
633
+ self.dones.append(float(done))
634
+ self.log_probs.append(float(log_prob))
635
+ self.values.append(float(value))
636
+ self.next_values.append(float(next_value))
637
+
638
+ def clear(self):
639
+ self.states = []
640
+ self.actions = []
641
+ self.rewards = []
642
+ self.dones = []
643
+ self.log_probs = []
644
+ self.values = []
645
+ self.next_values = []
646
+
647
+
648
+ class ObservationNorm:
649
+
650
+ def normalize(self, x):
651
+ return (x - x.mean()) / (x.std(unbiased=False) + 1e-8) # We add epsilon to make sure that we don't
652
+ # divide through zero.
653
+
654
+
655
+ class AdvantageNorm:
656
+ '''
657
+ This class implements the Advantage Normalization. The purpose is to normalize either across batches or
658
+ only within the same batch.
659
+ '''
660
+
661
+ def normalize(self, x):
662
+ return (x - x.mean()) / (x.std(unbiased=False) + 1e-8) # We add epsilon to make sure that we don't
663
+ # divide through zero.
664
+
665
+
666
+ class ReturnNorm:
667
+ '''
668
+ This class implements the Return Normalization. The purpose is to normalize either across batches or
669
+ only within the same batch.
670
+ '''
671
+
672
+ def normalize(self, x):
673
+ return (x - x.mean()) / (x.std(unbiased=False) + 1e-8)
ppo_main.py ADDED
@@ -0,0 +1,383 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import gymnasium as gym
3
+ import sys
4
+ import matplotlib.pyplot as plt
5
+ import ale_py
6
+ import pandas as pd
7
+
8
+ from ppo_helpers_cnn import *
9
+ from gymnasium.spaces import Box
10
+ import cv2
11
+ import logging
12
+ import numpy as np
13
+
14
+ # Set up logging
15
+ logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
16
+ logger = logging.getLogger(__name__)
17
+
18
+ # Preprocess environment
19
+ def preprocess(obs):
20
+ # Convert to grayscale
21
+ obs = cv2.cvtColor(obs, cv2.COLOR_RGB2GRAY)
22
+ # Resize
23
+ obs = cv2.resize(obs, (84, 84), interpolation=cv2.INTER_AREA)
24
+
25
+ return np.expand_dims(obs, axis=0).astype(np.float32) / 255.0
26
+
27
+
28
+ import pandas as pd
29
+ import numpy as np
30
+
31
+
32
+ def df_ops(lst_df, seeds):
33
+ for df in lst_df:
34
+ seed_data = df[seeds]
35
+ df['Avg'] = seed_data.mean(axis=1)
36
+ df['High'] = seed_data.max(axis=1)
37
+ df['Low'] = seed_data.min(axis=1)
38
+
39
+ return lst_df
40
+
41
+
42
+ # Main loop
43
+ def main() -> int:
44
+ # Initialize variables
45
+ """
46
+ batches = 5
47
+ steps = 5
48
+ clip_interval = 2
49
+ seeds = [10, 20]
50
+ ep_per_batch = 2
51
+ """
52
+ batches = 1000
53
+ steps = 5
54
+ clip_interval = 2
55
+ seeds = [10, 20, 30, 40, 50]
56
+ ep_per_batch = 5
57
+
58
+ # Arguments - 'vanilla', 'reward_clip', 'rbs', 'grad_clip', 'obs_norm', 'adv_norm', 'return_norm', 'reward_norm'
59
+ """
60
+ 'vanilla', 'reward_clip', 'rbs', 'grad_clip', 'obs_norm', 'adv_norm', 'return_norm', 'reward_norm'
61
+
62
+ python Poster/ppo_main.py --method vanilla --env ALE/Pacman-v5
63
+
64
+ usage examples:
65
+ python3 ppo_main.py --method vanilla
66
+
67
+ python3 ppo_main.py --method grad_clip
68
+
69
+ python3 ppo_main.py --method rbs
70
+ """
71
+ parser = argparse.ArgumentParser(description='PPO Training')
72
+
73
+ parser.add_argument('--method', type=str, choices=['vanilla', 'reward_clip', 'rbs', 'grad_clip',
74
+ 'obs_norm', 'adv_norm', 'return_norm', 'reward_norm'],
75
+ default='vanilla', help='PPO update method')
76
+ parser.add_argument('--env', type=str, default='ALE/Pacman-v5',
77
+ help='Gym environment name (e.g., ALE/Pacman-v5, ALE/SpaceInvaders-v5, ALE/BattleZone-v5)')
78
+ parser.add_argument('--render', action='store_true', help='Enable rendering')
79
+ parser.add_argument('--clip_window', type=int, default=clip_interval,
80
+ help='Number of batches to collect rewards for clipping range update')
81
+
82
+ args = parser.parse_args()
83
+
84
+ # Set up environment
85
+ if args.render:
86
+ env = gym.make(args.env, render_mode='human')
87
+ else:
88
+ env = gym.make(args.env)
89
+
90
+ logger.info(f"Observation space: {env.observation_space}")
91
+ logger.info(f"Action space: {env.action_space}")
92
+ logger.info(f'Method: {args.method}')
93
+
94
+ # Initialize CNN with a dummy observation to get correct input shape
95
+ obs, _ = env.reset()
96
+ dummy_obs_space = Box(low=0.0, high=1.0, shape=preprocess(obs).shape)
97
+
98
+ # Initialize PPO agent
99
+ agent = Agent(obs_space=dummy_obs_space, action_space=env.action_space,
100
+ hidden=64, lr=0.00001, gamma=0.997, clip_coef=0.2,
101
+ entropy_coef=0.01, value_coef=0.5, seed=70,
102
+ batch_size=64, ppo_epochs=32, lam=0.95)
103
+
104
+ # === Return-Based Scaling stats (for RBS method) ===
105
+ r_mean, r_var = 0.0, 1e-8
106
+ g2_mean = 1.0
107
+ agent.r_var = r_var
108
+ agent.g2_mean = g2_mean
109
+
110
+ # Initialize data structure outside the loop
111
+ all_reward_histories = pd.DataFrame(columns=[i for i in seeds], index=[i for i in range(1, batches + 1)])
112
+ all_loss_histories = pd.DataFrame(columns=[i for i in seeds], index=[i for i in range(1, batches + 1)])
113
+ all_policy_loss = pd.DataFrame(columns=[i for i in seeds])
114
+ all_value_loss = pd.DataFrame(columns=[i for i in seeds])
115
+
116
+ # Main update loop
117
+ try:
118
+
119
+ for seed in seeds:
120
+ obs, info = env.reset(seed=seed)
121
+ state = preprocess(obs)
122
+
123
+ loss_history = []
124
+ reward_history = []
125
+ policy_loss_history = []
126
+ value_loss_history = []
127
+
128
+ episode = 0
129
+ total_return = 0
130
+
131
+ steps = [0]
132
+
133
+ """ Update loop: Gradient, Reward Normalization """
134
+ if args.method == 'reward_clip':
135
+ alpha = np.random.uniform(1, 2)
136
+ logger.info(f"α sampled = {alpha:.3f} seed = {seed}")
137
+
138
+ clip_low, clip_high = None, None
139
+ ep_reward_history = []
140
+
141
+ obs, info = env.reset()
142
+ state = preprocess(obs)
143
+
144
+ for update in range(1, batches + 1):
145
+
146
+ batch_episode_returns = [] # used for μ, σ
147
+
148
+ for _ in range(ep_per_batch):
149
+ ep_rewards = []
150
+ done = False
151
+
152
+ while not done:
153
+ action, logp, value = agent.choose_action(state)
154
+ next_obs, reward, terminated, truncated, info = env.step(action)
155
+ done = terminated or truncated
156
+ next_state = preprocess(next_obs)
157
+
158
+ ep_rewards.append(reward)
159
+
160
+ agent.remember(state, action, reward, done, logp, value, next_state)
161
+
162
+ state = next_state
163
+
164
+ if done:
165
+ ep_return = sum(ep_rewards)
166
+ if clip_low is not None:
167
+ clipped_return = np.clip(ep_return, clip_low, clip_high)
168
+ else:
169
+ clipped_return = ep_return
170
+ ep_reward_history.append(clipped_return)
171
+ batch_episode_returns.append(clipped_return)
172
+
173
+ episode += 1
174
+ total_return += clipped_return
175
+
176
+ logger.info(f"Episode {episode} return: {clipped_return:.2f}")
177
+
178
+ obs, info = env.reset()
179
+ state = preprocess(obs)
180
+
181
+ # === Compute clipping bounds using Code 1 logic ===
182
+ mu = np.mean(batch_episode_returns)
183
+ sigma = np.std(batch_episode_returns) + 1e-8 if np.std(batch_episode_returns) != 0 else 1
184
+
185
+ clip_low = mu - alpha * sigma
186
+ clip_high = mu + alpha * sigma
187
+
188
+ logger.info(
189
+ f"[UPDATE {update}] New Reward Clip Range: "
190
+ f"[{clip_low:.4f}, {clip_high:.4f}]"
191
+ )
192
+
193
+ # === PPO UPDATE ===
194
+ avg_loss = agent.vanilla_ppo_update()
195
+ loss_history.append(avg_loss)
196
+
197
+ avg_ret = np.mean(batch_episode_returns)
198
+ reward_history.append(avg_ret)
199
+
200
+ logger.info(
201
+ f"Update {update}: batch_mean={avg_ret:.4f}, "
202
+ f"batch_std={np.std(batch_episode_returns):.4f}, "
203
+ f"episodes={episode}, avg_loss={avg_loss:.4f}"
204
+ )
205
+
206
+ current_steps = len(agent.value_loss_history)
207
+ steps.append(current_steps - 1 - steps[-1])
208
+ x = len(steps) - 1
209
+
210
+ value_loss_history.append(
211
+ sum(agent.value_loss_history[steps[x - 1]:steps[x]]) / (steps[x - 1] - steps[x]))
212
+ policy_loss_history.append(sum(agent.policy_loss_history[x - 1:x]) / (steps[x - 1] - steps[x]))
213
+
214
+ """ Update loop: Other Normalization Methods """
215
+ else:
216
+ for update in range(1, batches + 1):
217
+ batch_episode_rewards = []
218
+ ep_per_batch = 5
219
+
220
+ for _ in range(ep_per_batch):
221
+ ep_rewards = []
222
+
223
+ done = False
224
+
225
+ while not done:
226
+ action, logp, value = agent.choose_action(state)
227
+ next_obs, reward, terminated, truncated, info = env.step(action)
228
+ done = terminated or truncated
229
+ next_state = preprocess(next_obs)
230
+
231
+ ep_rewards.append(reward) # Add this line to collect rewards
232
+ agent.remember(state, action, reward, done, logp, value, next_state)
233
+
234
+ state = next_state
235
+
236
+ if done:
237
+ ep_return = sum(ep_rewards)
238
+ episode += 1
239
+ total_return += ep_return
240
+ batch_episode_rewards.append(ep_return)
241
+ logger.info(f"Episode {episode} return: {ep_return:.2f}")
242
+ obs, info = env.reset()
243
+ state = preprocess(obs)
244
+
245
+
246
+ # Choose normalization method
247
+ if args.method == 'vanilla':
248
+ avg_loss = agent.vanilla_ppo_update()
249
+ elif args.method == 'grad_clip':
250
+ avg_loss = agent.update_gradient_clipping()
251
+ elif args.method == 'obs_norm':
252
+ avg_loss = agent.update_obs_norm()
253
+ elif args.method == 'return_norm':
254
+ avg_loss = agent.update_return_norm()
255
+ elif args.method == 'reward_norm':
256
+ avg_loss = agent.update_reward_norm()
257
+ else: # rbs
258
+ avg_loss = agent.update_rbs()
259
+
260
+ loss_history.append(avg_loss)
261
+
262
+ avg_ret = (total_return / episode) if episode else 0
263
+ reward_history.append(avg_ret)
264
+ logger.info(
265
+ f"Update {update}: episodes={episode}, avg_return={avg_ret:.2f}, avg_loss={avg_loss:.4f}")
266
+
267
+ current_steps = len(agent.value_loss_history)
268
+ steps.append(current_steps-1 - steps[-1])
269
+ x = len(steps)-1
270
+
271
+ value_loss_history.append(sum(agent.value_loss_history[steps[x-1]:steps[x]]) / (steps[x-1] - steps[x]))
272
+ policy_loss_history.append(sum(agent.policy_loss_history[x - 1:x]) / (steps[x - 1] - steps[x]))
273
+
274
+ all_reward_histories[seed] = reward_history
275
+ all_loss_histories[seed] = loss_history
276
+
277
+ # print(agent.value_loss_history)
278
+ all_value_loss[seed] = value_loss_history[1:]
279
+ # print(len(agent.value_loss_history))
280
+ # print(agent.policy_loss_history)
281
+ all_policy_loss[seed] = policy_loss_history[1:]
282
+ # print(len(agent.policy_loss_history))
283
+
284
+ [all_reward_histories, all_loss_histories, all_value_loss, all_policy_loss] = df_ops([all_reward_histories,
285
+ all_loss_histories,
286
+ all_value_loss,
287
+ all_policy_loss], seeds)
288
+ # [all_reward_histories, all_loss_histories] = df_ops([all_reward_histories,
289
+ # all_loss_histories], seeds)
290
+
291
+ all_policy_loss.to_csv(args.method + '_policy_loss.csv')
292
+ all_reward_histories.to_csv(args.method + '_reward_history.csv')
293
+ all_loss_histories.to_csv(args.method + '_loss_history.csv')
294
+ all_value_loss.to_csv(args.method + '_value_loss.csv')
295
+
296
+ fig = plt.figure(figsize=(15, 10))
297
+
298
+ # --- Subplot 1: Average PPO Loss ---
299
+ ax2 = plt.subplot(221)
300
+ # Plot the shaded High-Low Range
301
+ ax2.fill_between(
302
+ all_loss_histories.index,
303
+ all_loss_histories['Low'],
304
+ all_loss_histories['High'],
305
+ color='#A8DADC', # Light blue for aesthetic shading
306
+ alpha=0.5,
307
+ label="High-Low Range"
308
+ )
309
+ # Plot the Average Line
310
+ ax2.plot(all_loss_histories['Avg'], label="Avg Loss", color='#1D3557', linewidth=2)
311
+ ax2.set_ylabel("Average PPO Loss")
312
+ ax2.set_xlabel("PPO Update")
313
+ ax2.legend()
314
+
315
+ # --- Subplot 2: Reward ---
316
+ ax3 = plt.subplot(222)
317
+ # Plot the shaded High-Low Range
318
+ ax3.fill_between(
319
+ all_reward_histories.index,
320
+ all_reward_histories['Low'],
321
+ all_reward_histories['High'],
322
+ color='#FEDCC8', # Light orange/peach
323
+ alpha=0.5,
324
+ label="High-Low Range"
325
+ )
326
+ # Plot the Average Line
327
+ ax3.plot(all_reward_histories['Avg'], label="Avg Reward", color='#E63946', linewidth=2)
328
+ ax3.set_ylabel("Average Reward")
329
+ ax3.set_xlabel("PPO Update")
330
+ ax3.legend()
331
+
332
+ # --- Subplot 3: Policy Loss ---
333
+ ax4 = plt.subplot(223)
334
+ # Plot the shaded High-Low Range
335
+ ax4.fill_between(
336
+ all_policy_loss.index,
337
+ all_policy_loss['Low'],
338
+ all_policy_loss['High'],
339
+ color='#B0E0A0', # Light green
340
+ alpha=0.5,
341
+ label="High-Low Range"
342
+ )
343
+ # Plot the Average Line
344
+ ax4.plot(all_policy_loss['Avg'], label="Policy Loss", color='#38B000', linewidth=2)
345
+ ax4.set_ylabel("Average Policy Loss")
346
+ ax4.set_xlabel("PPO Update")
347
+ ax4.legend()
348
+
349
+ # --- Subplot 4: Value Loss ---
350
+ ax5 = plt.subplot(224)
351
+ # Plot the shaded High-Low Range
352
+ ax5.fill_between(
353
+ all_value_loss.index,
354
+ all_value_loss['Low'],
355
+ all_value_loss['High'],
356
+ color='#D7BDE2', # Light purple
357
+ alpha=0.5,
358
+ label="High-Low Range"
359
+ )
360
+ # Plot the Average Line
361
+ ax5.plot(all_value_loss['Avg'], label="Value Loss", color='#8E44AD', linewidth=2)
362
+ ax5.set_ylabel("Average Value Loss")
363
+ ax5.set_xlabel("PPO Update")
364
+ ax5.legend()
365
+
366
+ # --- Figure Settings ---
367
+ fig.suptitle(f"PPO Training Stability - {args.method}", fontsize=16, fontweight='bold')
368
+ # fig.tight_layout() # Adjust layout to make room for suptitle
369
+ plt.show()
370
+
371
+ except Exception as e:
372
+ logger.error(f"Error: {e}", exc_info=True)
373
+ return 1
374
+ finally:
375
+ avg = total_return / episode if episode else 0
376
+ logger.info(f"\nEpisodes: {episode}, Avg return: {avg:.3f}")
377
+ env.close()
378
+
379
+ return 0
380
+
381
+
382
+ if __name__ == "__main__":
383
+ raise SystemExit(main())