Anoozh-Akileswaran commited on
Commit
9d3b1fd
·
1 Parent(s): d5efabf

Adrian's first attempt of PPO

Browse files
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .idea/
Adrian/ppo_helpers_v2 (1).py ADDED
@@ -0,0 +1,233 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch as T
3
+ import torch.nn as nn
4
+ import torch.optim as optim
5
+ from torch.distributions import Categorical
6
+
7
+ class Agent():
8
+ # Minimal PPO-Clip agent (single full-batch update per episode, MC returns)
9
+ def __init__(
10
+ self,
11
+ obs_space,
12
+ action_space,
13
+ hidden,
14
+ gamma,
15
+ clip_coef,
16
+ lr,
17
+ value_coef,
18
+ entropy_coef,
19
+ seed
20
+ ):
21
+ # Initialize seed for reproducibility
22
+ if seed is not None:
23
+ np.random.seed(seed)
24
+ T.manual_seed(seed)
25
+
26
+ # Use GPU if available
27
+ self.device = T.device('cuda:0' if T.cuda.is_available() else 'cpu')
28
+ self.obs_dim = int(np.prod(getattr(obs_space, "shape", (obs_space,))))
29
+ self.action_dim = int(getattr(action_space, "n", action_space))
30
+
31
+ # Initialize the policy and the critic networks
32
+ self.policy = Policy(self.obs_dim, self.action_dim, hidden).to(self.device)
33
+ self.critic = Critic(self.obs_dim, hidden).to(self.device)
34
+
35
+ # Set optimizer for policy and critic networks
36
+ self.opt = optim.Adam(
37
+ list(self.policy.parameters()) + list(self.critic.parameters()),
38
+ lr=lr
39
+ )
40
+
41
+ self.gamma = gamma
42
+ self.clip = clip_coef
43
+ self.value_coef = value_coef
44
+ self.entropy_coef = entropy_coef
45
+
46
+ self.memory = Memory()
47
+
48
+ def choose_action(self, observation):
49
+ # Returns: action, log probabilitiy, value of the state
50
+ state = T.as_tensor(observation, dtype=T.float32, device=self.device).view(-1)
51
+ with T.no_grad():
52
+ # Forward function (defined in Policy class)
53
+ dist = self.policy.next_action(state)
54
+ action = dist.sample()
55
+ logp = dist.log_prob(action)
56
+ value = self.critic.evaluated_state(state)
57
+ return int(action.item()), float(logp.item()), float(value.item())
58
+
59
+
60
+ def remember(self, state, action, reward, done, log_prob, value, next_state):
61
+ with T.no_grad():
62
+ # Pass on next state and have it evaluated by the critic network
63
+ ns = T.as_tensor(next_state, dtype=T.float32, device=self.device).view(-1)
64
+ next_value = self.critic.evaluated_state(ns).item()
65
+ self.memory.store(state, action, reward, done, log_prob, value, next_value)
66
+
67
+ """
68
+ def run_episode(self, env, max_steps: int, render: bool = False):
69
+ # Runs one episode, updates the policy once at the end
70
+ self.memory.clear()
71
+ out = env.reset()
72
+
73
+ state = out[0] if isinstance(out, tuple) else out
74
+
75
+ ep_return, ep_len = 0, 0
76
+
77
+ steps_limit = max_steps if max_steps is not None else float("inf")
78
+
79
+ while ep_len < steps_limit:
80
+ if render and hasattr(env, "render"):
81
+ env.render()
82
+
83
+ action, logp, value = self.choose_action(state)
84
+ step_out = env.step(action)
85
+ if len(step_out) == 5:
86
+ next_state, reward, terminated, truncated, _ = step_out
87
+ done = terminated or truncated
88
+ else:
89
+ next_state, reward, done, _ = step_out
90
+
91
+ self.remember(state, action, reward, done, logp, value, next_state)
92
+
93
+ ep_return += float(reward)
94
+ ep_len += 1
95
+ state = next_state
96
+ if done:
97
+ break
98
+
99
+ self._update()
100
+ return ep_return, ep_len
101
+
102
+ def run_episodes(self, env, n_episodes: int, max_steps: int, render: bool = False):
103
+ returns = []
104
+ for _ in range(n_episodes):
105
+ ep_ret, _ = self.run_episode(env, max_steps=max_steps, render=render)
106
+ returns.append(ep_ret)
107
+ return returns
108
+
109
+ """
110
+
111
+ def _update(self):
112
+ if len(self.memory.states) == 0:
113
+ return
114
+
115
+ states = T.as_tensor(np.array(self.memory.states), dtype=T.float32, device=self.device)
116
+ actions = T.as_tensor(self.memory.actions, dtype=T.long, device=self.device)
117
+ rewards = T.as_tensor(self.memory.rewards, dtype=T.float32, device=self.device)
118
+ dones = T.as_tensor(self.memory.dones, dtype=T.float32, device=self.device)
119
+ old_logp = T.as_tensor(self.memory.log_probs, dtype=T.float32, device=self.device)
120
+ values = T.as_tensor(self.memory.values, dtype=T.float32, device=self.device)
121
+
122
+ # Monte Carlo returns (episode-aware)
123
+ with T.no_grad():
124
+ returns = T.zeros_like(rewards)
125
+ G = 0.0
126
+ for t in reversed(range(rewards.size(0))):
127
+ G = rewards[t] + self.gamma * G * (1.0 - dones[t])
128
+ returns[t] = G
129
+ adv = returns - values
130
+ adv = (adv - adv.mean()) / (adv.std(unbiased=False) + 1e-8)
131
+
132
+ dist = self.policy.next_action(states)
133
+ new_logp = dist.log_prob(actions)
134
+
135
+ """PPO Components: Policy update, weighted probability distribution, clipped returns """
136
+
137
+ # Updating the policy: update probability distribution (i.e., compute clipped probs)
138
+ ratio = (new_logp - old_logp).exp()
139
+
140
+ # Weighted probaility distribution (according to the formula/update rule)
141
+ surr1 = ratio * adv
142
+ surr2 = T.clamp(ratio, 1 - self.clip, 1 + self.clip) * adv
143
+ value_pred = self.critic.evaluated_state(states)
144
+
145
+ # Actor loss: minimize negative of the clipped objective
146
+ policy_loss = -T.min(surr1, surr2).mean()
147
+ # Loss: MSE of (return - critic value)
148
+ value_loss = 0.5 * (returns - value_pred).pow(2).mean()
149
+ # Entropy (account for randomness in action selection)
150
+ entropy = dist.entropy().mean()
151
+ # Total loss: policy loss + constant * value loss - constant * entropy
152
+ total_loss = policy_loss + self.value_coef * value_loss - self.entropy_coef * entropy
153
+
154
+ self.opt.zero_grad(set_to_none=True)
155
+ total_loss.backward()
156
+ self.opt.step()
157
+
158
+ self.memory.clear()
159
+
160
+
161
+ class Policy(nn.Module):
162
+ def __init__(self, obs_dim: int, action_dim: int, hidden: int):
163
+ super().__init__()
164
+ self.net = nn.Sequential(
165
+ nn.Linear(obs_dim, hidden),
166
+ nn.ReLU(),
167
+ nn.Linear(hidden, hidden),
168
+ nn.ReLU(),
169
+ nn.Linear(hidden, action_dim)
170
+ )
171
+
172
+ def next_action(self, state: T.Tensor) -> Categorical:
173
+ # Returns the probability distribution over actions
174
+ if state.dim() == 1:
175
+ state = state.unsqueeze(0)
176
+ state = state.view(state.size(0), -1)
177
+ return Categorical(logits=self.net(state))
178
+
179
+
180
+ class Critic(nn.Module):
181
+ def __init__(self, obs_dim: int, hidden: int):
182
+ super().__init__()
183
+ self.net = nn.Sequential(
184
+ nn.Linear(obs_dim, hidden),
185
+ nn.ReLU(),
186
+ nn.Linear(hidden, hidden),
187
+ nn.ReLU(),
188
+ nn.Linear(hidden, 1)
189
+ )
190
+
191
+ def evaluated_state(self, x: T.Tensor) -> T.Tensor:
192
+ if x.dim() == 1:
193
+ x = x.unsqueeze(0)
194
+ x = x.view(x.size(0), -1)
195
+ return self.net(x).squeeze(-1)
196
+
197
+
198
+ class Memory():
199
+ def __init__(self):
200
+ self.states = []
201
+ self.actions = []
202
+ self.rewards = []
203
+ self.dones = []
204
+ self.log_probs = []
205
+ self.values = []
206
+ self.next_values = []
207
+
208
+ def store(self, state, action, reward, done, log_prob, value, next_value):
209
+ self.states.append(np.asarray(state, dtype=np.float32))
210
+ self.actions.append(int(action))
211
+ self.rewards.append(float(reward))
212
+ self.dones.append(float(done))
213
+ self.log_probs.append(float(log_prob))
214
+ self.values.append(float(value))
215
+ self.next_values.append(float(next_value))
216
+
217
+ """
218
+ # For mini-batch updates? To be implemented
219
+ def start_batch(self, batch_size: int):
220
+ n_states = len(self.states)
221
+ starts = np.arange(0, n_states, batch_size)
222
+ index = np.arange(n_states, dtype=np.int64)
223
+ np.random.shuffle(index)
224
+ return [index[s:s + batch_size] for s in starts]
225
+ """
226
+ def clear(self):
227
+ self.states = []
228
+ self.actions = []
229
+ self.rewards = []
230
+ self.dones = []
231
+ self.log_probs = []
232
+ self.values = []
233
+ self.next_values = []
Adrian/template_v2_ppo (1).py ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ale_py
2
+ import gymnasium as gym
3
+ import sys
4
+ import numpy as np
5
+ from ppo_helpers_v2 import *
6
+
7
+ def preprocess(obs):
8
+ # Flatten and normalize uint8 frames to float32 in [0,1]
9
+ return (obs.astype(np.float32).ravel() / 255.0)
10
+
11
+ def main() -> int:
12
+ # Initialize environment
13
+ env = gym.make("ALE/Pacman-v5", render_mode="human") # consider removing render_mode for training speed
14
+ # Initialize variables
15
+ episode = 0
16
+ total_return = 0
17
+ ep_return = 0
18
+ steps = 2000 # Batch of 100, 2000 environment steps
19
+ batches = 100
20
+
21
+ # Inspect spaces
22
+ print("Observation space:", env.observation_space)
23
+ print("Action space:", env.action_space)
24
+
25
+ # Create PPO Agent (adapted to ppo_helpers_v2.Agent signature)
26
+ agent = Agent(obs_space=env.observation_space, action_space=env.action_space, hidden=64,
27
+ lr=3e-4, gamma=0.99, clip_coef=0.2, entropy_coef=0, value_coef=0.5, seed=70)
28
+
29
+ try:
30
+ obs, info = env.reset(seed=42)
31
+ state = preprocess(obs)
32
+
33
+ for update in range(1, batches + 1):
34
+ for t in range(steps):
35
+ action, logp, value = agent.choose_action(state)
36
+ next_obs, reward, terminated, truncated, info = env.step(action)
37
+ done = terminated or truncated
38
+ next_state = preprocess(next_obs)
39
+
40
+ agent.remember(state, action, reward, done, logp, value, next_state)
41
+
42
+ ep_return += reward
43
+ state = next_state
44
+
45
+ if done:
46
+ episode += 1
47
+ total_return += ep_return
48
+ print(f"Episode {episode} return: {ep_return:.2f}")
49
+ ep_return = 0
50
+ obs, info = env.reset()
51
+ state = preprocess(obs)
52
+
53
+ agent._update()
54
+ avg_ret = (total_return / episode) if episode else 0
55
+ print(f"Update {update}: episodes={episode}, avg_return={avg_ret:.2f}")
56
+
57
+ except Exception as e:
58
+ print(f"Error: {e}", file=sys.stderr)
59
+ return 1
60
+ finally:
61
+ avg = total_return / episode if episode else 0
62
+ print(f"\nEpisodes: {episode}, Avg return: {avg:.3f}")
63
+ env.close()
64
+
65
+ return 0
66
+
67
+ if __name__ == "__main__":
68
+ raise SystemExit(main())