privateboss commited on
Commit
f0c8b65
·
verified ·
1 Parent(s): 89f1fa6

Upload 6 files

Browse files
Files changed (6) hide show
  1. Environment_Wrapper.py +75 -0
  2. GPU_test.py +19 -0
  3. PPO_Model.py +273 -0
  4. RaceCar.py +49 -0
  5. Train.py +100 -0
  6. Trained_Agent.py +82 -0
Environment_Wrapper.py ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gymnasium as gym
2
+ import numpy as np
3
+ import cv2
4
+ from gymnasium.spaces import Box
5
+
6
+ class CarRacingEnvWrapper(gym.Wrapper):
7
+ def __init__(self, env, num_stack_frames, grayscale, resize_dim):
8
+ super().__init__(env)
9
+ self.num_stack_frames = num_stack_frames
10
+ self.grayscale = grayscale
11
+ self.resize_dim = resize_dim
12
+ self.orig_observation_space = self.env.observation_space
13
+
14
+ if self.grayscale:
15
+ shape = (self.resize_dim[0], self.resize_dim[1], self.num_stack_frames)
16
+ else:
17
+ shape = (self.resize_dim[0], self.resize_dim[1], self.env.observation_space.shape[2] * self.num_stack_frames)
18
+
19
+ self.observation_space = Box(low=0, high=255, shape=shape, dtype=np.uint8)
20
+ self.frames = np.zeros(shape, dtype=np.uint8)
21
+
22
+ def _process_obs(self, obs):
23
+
24
+ obs = obs.astype(np.float32) / 255.0
25
+
26
+ if self.grayscale:
27
+
28
+ grayscale_obs = np.dot(obs[...,:3], [0.2989, 0.5870, 0.1140])
29
+ resized_obs = cv2.resize(grayscale_obs, self.resize_dim, interpolation=cv2.INTER_AREA)
30
+
31
+ processed_obs = np.expand_dims(resized_obs, axis=-1)
32
+ else:
33
+
34
+ processed_obs = cv2.resize(obs, self.resize_dim, interpolation=cv2.INTER_AREA)
35
+
36
+ return (processed_obs * 255).astype(np.uint8)
37
+
38
+ def _stack_frames(self, processed_obs):
39
+ self.frames = np.roll(self.frames, shift=-1, axis=-1)
40
+ if self.grayscale:
41
+ self.frames[..., -1] = processed_obs[..., 0]
42
+ else:
43
+ self.frames[..., -3:] = processed_obs
44
+ return self.frames
45
+
46
+ def reset(self, **kwargs):
47
+ obs, info = self.env.reset(**kwargs)
48
+ processed_obs = self._process_obs(obs)
49
+
50
+ if self.grayscale:
51
+ self.frames = np.stack([processed_obs[..., 0]] * self.num_stack_frames, axis=-1)
52
+ else:
53
+ self.frames = np.stack([processed_obs] * self.num_stack_frames, axis=-1)
54
+
55
+ return self.frames, info
56
+
57
+ def step(self, action):
58
+ obs, reward, terminated, truncated, info = self.env.step(action)
59
+
60
+ is_stuck = np.mean(obs[64:72, 32:64, 1]) < 10
61
+ if is_stuck and reward < 0:
62
+ truncated = True
63
+
64
+ is_on_grass = np.mean(obs[64:72, 32:64, 1]) > 100
65
+ if is_on_grass:
66
+ reward -= 10
67
+
68
+ if info.get('is_complete'):
69
+ reward += 100
70
+
71
+ processed_obs = self._process_obs(obs)
72
+ stacked_frames = self._stack_frames(processed_obs)
73
+ info['original_reward'] = info.get('original_reward', reward)
74
+
75
+ return stacked_frames, reward, terminated, truncated, info
GPU_test.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ print("Num GPUs Available: ", len(tf.config.list_physical_devices('GPU')))
3
+
4
+ gpus = tf.config.list_physical_devices('GPU')
5
+ if gpus:
6
+ try:
7
+
8
+ for gpu in gpus:
9
+ tf.config.experimental.set_memory_growth(gpu, True)
10
+ logical_gpus = tf.config.experimental.list_logical_devices('GPU')
11
+ print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
12
+ except RuntimeError as e:
13
+
14
+ print(e)
15
+
16
+ import gymnasium as gym
17
+ env = gym.make("CarRacing-v3", render_mode="rgb_array")
18
+ print("CarRacing-v3 environment created successfully.")
19
+ env.close()
PPO_Model.py ADDED
@@ -0,0 +1,273 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gymnasium as gym
2
+ import numpy as np
3
+ from collections import deque
4
+ import tensorflow as tf
5
+ from keras import optimizers
6
+ from keras.optimizers import Adam
7
+ import tensorflow_probability as tfp
8
+ import os
9
+ from datetime import datetime
10
+ import json
11
+ from RaceCar import ActorCritic
12
+ from Environment_Wrapper import CarRacingEnvWrapper
13
+ from gymnasium.wrappers import TimeLimit
14
+ tfd = tfp.distributions
15
+
16
+ class PPOAgent:
17
+ def __init__(self, env_id="CarRacing-v3", num_envs=21,
18
+ gamma=0.99, lam=0.95, clip_epsilon=0.2,
19
+ actor_lr=3e-4, critic_lr=3e-4,
20
+ ppo_epochs=10, minibatches=4,
21
+ steps_per_batch=1024,
22
+ num_stack_frames=4, resize_dim=(84, 84), grayscale=True,
23
+ seed=42, log_dir="./ppo_logs",
24
+ entropy_coeff=0.01,
25
+ save_interval_timesteps=537600,
26
+ hidden_layer_sizes=[512, 512, 512]):
27
+ self.env_id = env_id
28
+ self.num_envs = num_envs
29
+ self.gamma = gamma
30
+ self.lam = lam
31
+ self.clip_epsilon = clip_epsilon
32
+ self.ppo_epochs = ppo_epochs
33
+ self.minibatches = minibatches
34
+ self.steps_per_batch = steps_per_batch
35
+ self.num_stack_frames = num_stack_frames
36
+ self.resize_dim = resize_dim
37
+ self.grayscale = grayscale
38
+ self.seed = seed
39
+ self.log_dir = log_dir
40
+ self.entropy_coeff = entropy_coeff
41
+ self.save_interval_timesteps = save_interval_timesteps
42
+ self.hidden_layer_sizes = hidden_layer_sizes
43
+ self.envs = self._make_vec_envs()
44
+ self.action_dim = self.envs.single_action_space.shape[0]
45
+ self.observation_shape = self.envs.single_observation_space.shape
46
+ self.model = ActorCritic(self.action_dim,
47
+ self.num_stack_frames,
48
+ self.resize_dim[1],
49
+ self.resize_dim[0],
50
+ hidden_layer_sizes=self.hidden_layer_sizes)
51
+ self.actor_optimizer = Adam(learning_rate=actor_lr)
52
+ self.critic_optimizer = Adam(learning_rate=critic_lr)
53
+ self.train_log_dir = None
54
+ self.summary_writer = None
55
+ tf.random.set_seed(self.seed)
56
+ np.random.seed(self.seed)
57
+ dummy_input = np.zeros((1, *self.observation_shape), dtype=np.uint8)
58
+ _ = self.model(dummy_input)
59
+
60
+ def _make_env(self):
61
+ env = gym.make(self.env_id, render_mode="rgb_array", continuous=True)
62
+ env = CarRacingEnvWrapper(env, num_stack_frames=self.num_stack_frames,
63
+ grayscale=self.grayscale, resize_dim=self.resize_dim)
64
+ env = TimeLimit(env, max_episode_steps=1000)
65
+ return env
66
+
67
+ def _make_vec_envs(self):
68
+ return gym.vector.AsyncVectorEnv([lambda: self._make_env() for _ in range(self.num_envs)])
69
+
70
+ def _compute_returns_and_advantages(self, rewards, values, dones):
71
+ advantages = np.zeros_like(rewards, dtype=np.float32)
72
+ returns = np.zeros_like(rewards, dtype=np.float32)
73
+ last_gae_lam = np.zeros(self.num_envs, dtype=np.float32)
74
+ for t in reversed(range(self.steps_per_batch)):
75
+ next_non_terminal = 1.0 - dones[t]
76
+ delta = rewards[t] + self.gamma * values[t+1] * next_non_terminal - values[t]
77
+ last_gae_lam = delta + self.gamma * self.lam * next_non_terminal * last_gae_lam
78
+ advantages[t] = last_gae_lam
79
+ returns = advantages + values[:-1]
80
+ return advantages, returns
81
+
82
+ @tf.function
83
+ def _train_step(self, observations, actions, old_log_probs, advantages, returns):
84
+ with tf.GradientTape() as tape:
85
+ action_distribution, value_pred = self.model(observations)
86
+ value_pred = tf.squeeze(value_pred, axis=-1)
87
+ critic_loss = tf.reduce_mean(tf.square(returns - value_pred))
88
+ log_prob = action_distribution.log_prob(actions)
89
+ ratio = tf.exp(log_prob - old_log_probs)
90
+ ratio = tf.where(tf.math.is_nan(ratio), 1.0, ratio)
91
+ ratio = tf.where(tf.math.is_inf(ratio), tf.sign(ratio) * 1e5, ratio)
92
+ pg_loss1 = ratio * advantages
93
+ pg_loss2 = tf.clip_by_value(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon) * advantages
94
+ actor_loss = -tf.reduce_mean(tf.minimum(pg_loss1, pg_loss2))
95
+ entropy = tf.reduce_mean(action_distribution.entropy())
96
+ entropy_loss = -self.entropy_coeff * entropy
97
+ total_loss = actor_loss + critic_loss + entropy_loss
98
+ grads = tape.gradient(total_loss, self.model.trainable_variables)
99
+ self.actor_optimizer.apply_gradients(zip(grads, self.model.trainable_variables))
100
+ return actor_loss, critic_loss, entropy, total_loss
101
+
102
+ def train(self, total_timesteps, resume_from_timestep=0, resume_model_path=None, run_log_dir=None):
103
+ global_timestep = resume_from_timestep
104
+ ep_rewards = deque(maxlen=100)
105
+ ep_modified_rewards = deque(maxlen=100)
106
+ resume_json_path = "resume_config.json"
107
+ if run_log_dir is None:
108
+ if os.path.exists(resume_json_path):
109
+ with open(resume_json_path, "r") as f:
110
+ resume_info = json.load(f)
111
+ self.train_log_dir = resume_info.get("run_log_directory")
112
+ if self.train_log_dir is None:
113
+ current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
114
+ self.train_log_dir = os.path.join(self.log_dir, current_time)
115
+ else:
116
+ self.train_log_dir = run_log_dir
117
+ os.makedirs(os.path.join(self.train_log_dir, "checkpoints"), exist_ok=True)
118
+ self.summary_writer = tf.summary.create_file_writer(self.train_log_dir)
119
+ if resume_model_path and os.path.exists(resume_model_path):
120
+ self.model.load_weights(resume_model_path)
121
+ print(f"Resuming training from timestep {global_timestep} and loaded model from: {resume_model_path}")
122
+ elif resume_from_timestep > 0:
123
+ print(f"WARNING: Attempting to resume from timestep {global_timestep} but no valid model path provided or found. Starting fresh.")
124
+ else:
125
+ print("Starting new training run.")
126
+ obs, _ = self.envs.reset(seed=self.seed)
127
+ print(f"Target total timesteps: {total_timesteps}")
128
+ print(f"Current global timestep: {global_timestep}")
129
+ print("Starting training loop...")
130
+ while global_timestep < total_timesteps:
131
+ batch_observations = np.zeros((self.steps_per_batch, self.num_envs, *self.observation_shape), dtype=np.uint8)
132
+ batch_actions = np.zeros((self.steps_per_batch, self.num_envs, self.action_dim), dtype=np.float32)
133
+ batch_rewards = np.zeros((self.steps_per_batch, self.num_envs), dtype=np.float32)
134
+ batch_dones = np.zeros((self.steps_per_batch, self.num_envs), dtype=bool)
135
+ batch_values = np.zeros((self.steps_per_batch, self.num_envs), dtype=np.float32)
136
+ batch_log_probs = np.zeros((self.steps_per_batch, self.num_envs), dtype=np.float32)
137
+ batch_original_rewards = np.zeros((self.steps_per_batch, self.num_envs), dtype=np.float32)
138
+ for i in range(self.steps_per_batch):
139
+
140
+ #print("Step:", i)
141
+ tf_obs = tf.convert_to_tensor(obs, dtype=tf.uint8)
142
+ action_dist, value = self.model(tf_obs)
143
+ #print("Model forward pass complete.")
144
+
145
+ action = action_dist.sample()
146
+ log_prob = action_dist.log_prob(action)
147
+ #print("Action sampled and log_prob calculated.")
148
+
149
+ action_np = action.numpy()
150
+ value_np = tf.squeeze(value).numpy()
151
+ log_prob_np = log_prob.numpy()
152
+ next_obs, reward, terminated, truncated, info = self.envs.step(action_np)
153
+ #print("Environment step complete.")
154
+
155
+ done = terminated | truncated
156
+ batch_observations[i] = obs
157
+ batch_actions[i] = action_np
158
+ batch_rewards[i, :] = reward
159
+ batch_dones[i] = done
160
+ batch_values[i] = value_np
161
+ batch_log_probs[i] = log_prob_np
162
+
163
+ for single_env_info in info:
164
+ if isinstance(single_env_info, dict) and single_env_info.get('episode'):
165
+ episode_reward = single_env_info['episode']['r']
166
+ ep_rewards.append(episode_reward)
167
+
168
+ obs = next_obs
169
+ global_timestep += self.num_envs
170
+
171
+ _, last_values_np = self.model(tf.convert_to_tensor(obs, dtype=tf.uint8))
172
+ last_values_np = tf.squeeze(last_values_np).numpy()
173
+ batch_values = np.concatenate((batch_values, last_values_np[np.newaxis, :]), axis=0)
174
+ advantages, returns = self._compute_returns_and_advantages(
175
+ batch_rewards, batch_values, batch_dones
176
+ )
177
+ flat_observations = batch_observations.reshape((-1, *self.observation_shape))
178
+ flat_actions = batch_actions.reshape((-1, self.action_dim))
179
+ flat_old_log_probs = batch_log_probs.reshape(-1)
180
+ flat_advantages = advantages.reshape(-1)
181
+ flat_returns = returns.reshape(-1)
182
+ flat_advantages = (flat_advantages - np.mean(flat_advantages)) / (np.std(flat_advantages) + 1e-8)
183
+ batch_original_total_reward = np.sum(batch_original_rewards)
184
+ batch_modified_total_reward = np.sum(batch_rewards)
185
+ batch_indices = np.arange(self.steps_per_batch * self.num_envs)
186
+ for _ in range(self.ppo_epochs):
187
+ np.random.shuffle(batch_indices)
188
+ for start_idx in range(0, len(batch_indices) // self.minibatches * self.minibatches, len(batch_indices) // self.minibatches):
189
+ end_idx = start_idx + len(batch_indices) // self.minibatches
190
+ minibatch_indices = batch_indices[start_idx:end_idx]
191
+ mb_obs = tf.constant(flat_observations[minibatch_indices], dtype=tf.uint8)
192
+ mb_actions = tf.constant(flat_actions[minibatch_indices], dtype=tf.float32)
193
+ mb_old_log_probs = tf.constant(flat_old_log_probs[minibatch_indices], dtype=tf.float32)
194
+ mb_advantages = tf.constant(flat_advantages[minibatch_indices], dtype=tf.float32)
195
+ mb_returns = tf.constant(flat_returns[minibatch_indices], dtype=tf.float32)
196
+ actor_loss, critic_loss, entropy, total_loss = self._train_step(
197
+ mb_obs, mb_actions, mb_old_log_probs, mb_advantages, mb_returns
198
+ )
199
+ with self.summary_writer.as_default():
200
+ tf.summary.scalar("charts/total_timesteps", global_timestep, step=global_timestep)
201
+ tf.summary.scalar("losses/actor_loss", actor_loss, step=global_timestep)
202
+ tf.summary.scalar("losses/critic_loss", critic_loss, step=global_timestep)
203
+ tf.summary.scalar("losses/entropy", entropy, step=global_timestep)
204
+ tf.summary.scalar("losses/total_loss", total_loss, step=global_timestep)
205
+ if ep_rewards:
206
+ tf.summary.scalar("charts/avg_episode_reward_original_env", np.mean(ep_rewards), step=global_timestep)
207
+ tf.summary.scalar("charts/min_episode_reward_original_env", np.min(ep_rewards), step=global_timestep)
208
+ tf.summary.scalar("charts/max_episode_reward_original_env", np.max(ep_rewards), step=global_timestep)
209
+ tf.summary.scalar("charts/batch_total_reward_original", batch_original_total_reward, step=global_timestep)
210
+ tf.summary.scalar("charts/batch_total_reward_modified", batch_modified_total_reward, step=global_timestep)
211
+ with tf.name_scope('actor_std'):
212
+ tf.summary.histogram('log_std', self.model.actor_log_std, step=global_timestep)
213
+ tf.summary.scalar('std_mean_overall', tf.reduce_mean(tf.exp(self.model.actor_log_std)), step=global_timestep)
214
+ self.summary_writer.flush()
215
+ if global_timestep % self.save_interval_timesteps == 0:
216
+ if ep_rewards:
217
+ avg_orig_reward_str = f"{np.mean(ep_rewards):.2f}"
218
+ else:
219
+ avg_orig_reward_str = 'N/A'
220
+ print(f"Timestep: {global_timestep}, Number of episodes in Timestep: (525 eps)") #{avg_orig_reward_str}")
221
+ current_checkpoint_path = os.path.join(self.train_log_dir, "checkpoints", f"Actor-Critic_at_{global_timestep}.weights.h5")
222
+ self.model.save_weights(current_checkpoint_path)
223
+ resume_info = {
224
+ "last_global_timestep": global_timestep,
225
+ "last_checkpoint_path": current_checkpoint_path,
226
+ "run_log_directory": self.train_log_dir
227
+ }
228
+ with open(resume_json_path, "w") as f:
229
+ json.dump(resume_info, f, indent=4)
230
+ print(f"Resume info saved to {resume_json_path}")
231
+ print("Training finished.")
232
+ self.envs.close()
233
+ self.summary_writer.close()
234
+ final_model_path = os.path.join(self.train_log_dir, "Actor-Critic_final_model.weights.h5")
235
+ self.model.save_weights(final_model_path)
236
+ if os.path.exists(resume_json_path):
237
+ os.remove(resume_json_path)
238
+ print(f"Removed {resume_json_path} as training completed successfully.")
239
+
240
+ def evaluate(self, num_episodes=5, render=True, model_path=None):
241
+ eval_env = gym.make(self.env_id, render_mode="human" if render else "rgb_array", continuous=True)
242
+ eval_env = CarRacingEnvWrapper(eval_env, num_stack_frames=self.num_stack_frames,
243
+ grayscale=self.grayscale, resize_dim=self.resize_dim)
244
+ if model_path:
245
+ dummy_input = np.zeros((1, *self.observation_shape), dtype=np.uint8)
246
+ _ = self.model(dummy_input)
247
+ self.model.load_weights(model_path)
248
+ print(f"Loaded model from {model_path}")
249
+ episode_rewards = []
250
+ episode_original_rewards = []
251
+ for ep in range(num_episodes):
252
+ obs, _ = eval_env.reset()
253
+ done = False
254
+ total_reward = 0
255
+ total_original_reward = 0
256
+ while not done:
257
+ tf_obs = tf.convert_to_tensor(obs[np.newaxis, :], dtype=tf.uint8)
258
+ action_dist, _ = self.model(tf_obs)
259
+ action = action_dist.mean().numpy().flatten()
260
+ action[0] = np.clip(action[0], -1.0, 1.0)
261
+ action[1] = np.clip(action[1], 0.0, 1.0)
262
+ action[2] = np.clip(action[2], 0.0, 1.0)
263
+ obs, reward, terminated, truncated, info = eval_env.step(action)
264
+ done = terminated or truncated
265
+ total_reward += reward
266
+ total_original_reward += info.get('original_reward', reward)
267
+ episode_rewards.append(total_reward)
268
+ episode_original_rewards.append(total_original_reward)
269
+ print(f"Episode {ep+1} finished. Modified Reward: {total_reward:.2f}, Original Env Reward: {total_original_reward:.2f}")
270
+ eval_env.close()
271
+ print(f"Average modified evaluation reward over {num_episodes} episodes: {np.mean(episode_rewards):.2f}")
272
+ print(f"Average original environment reward over {num_episodes} episodes: {np.mean(episode_original_rewards):.2f}")
273
+ return episode_rewards, episode_original_rewards
RaceCar.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import keras
3
+ from keras import layers, Model
4
+ import tensorflow_probability as tfp
5
+
6
+ tfd = tfp.distributions
7
+
8
+ class ActorCritic(keras.Model):
9
+ def __init__(self, action_dim, num_stack_frames, img_height, img_width, hidden_layer_sizes):
10
+ super().__init__()
11
+ self.conv_layers = keras.Sequential([
12
+ layers.Conv2D(32, 8, strides=4, activation="relu", input_shape=(img_height, img_width, num_stack_frames)),
13
+ layers.Conv2D(64, 4, strides=2, activation="relu"),
14
+ layers.Conv2D(64, 3, strides=1, activation="relu"),
15
+ layers.Flatten(),
16
+ ])
17
+
18
+ actor_layers = []
19
+
20
+ for size in hidden_layer_sizes:
21
+ actor_layers.append(layers.Dense(size, activation="relu"))
22
+ self.common_actor_layer = keras.Sequential(actor_layers)
23
+
24
+ self.actor_mean = layers.Dense(action_dim, activation=None)
25
+ self.actor_log_std = tf.Variable(tf.zeros(action_dim, dtype=tf.float32), trainable=True)
26
+
27
+ critic_layers = []
28
+ for size in hidden_layer_sizes:
29
+ critic_layers.append(layers.Dense(size, activation="relu"))
30
+ self.common_critic_layer = keras.Sequential(critic_layers)
31
+
32
+ self.critic_value = layers.Dense(1, activation=None)
33
+
34
+ def call(self, inputs):
35
+ normalized_inputs = tf.cast(inputs, tf.float32) / 255.0
36
+
37
+ features = self.conv_layers(normalized_inputs)
38
+
39
+ actor_features = self.common_actor_layer(features)
40
+ mean = self.actor_mean(actor_features)
41
+
42
+ std = tf.exp(self.actor_log_std)
43
+
44
+ action_distribution = tfd.MultivariateNormalDiag(loc=mean, scale_diag=std)
45
+
46
+ critic_features = self.common_critic_layer(features)
47
+ value = self.critic_value(critic_features)
48
+
49
+ return action_distribution, value
Train.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from PPO_Model import PPOAgent
4
+ from datetime import datetime
5
+ import tensorflow as tf
6
+
7
+ gpus = tf.config.list_physical_devices('GPU')
8
+ if gpus:
9
+ try:
10
+ for gpu in gpus:
11
+ tf.config.experimental.set_memory_growth(gpu, True)
12
+ print("GPU memory growth is enabled.")
13
+ except RuntimeError as e:
14
+
15
+ print(e)
16
+
17
+ if __name__ == '__main__':
18
+
19
+ ENV_ID = "CarRacing-v3"
20
+ NUM_ENVS = 21
21
+ TOTAL_TIMESTEPS = 30_000_000
22
+ SAVE_INTERVAL_TIMESTEPS = 537600
23
+ PPO_EPOCHS = 10
24
+ MINIBATCHES = 4
25
+ STEPS_PER_BATCH = 1024
26
+ GAMMA = 0.99
27
+ LAM = 0.95
28
+ CLIP_EPSILON = 0.2
29
+ ACTOR_LR = 3e-4
30
+ CRITIC_LR = 3e-4
31
+ ENTROPY_COEFF = 0.01
32
+
33
+ NUM_STACK_FRAMES = 4
34
+ RESIZE_DIM = (84, 84)
35
+ GRAYSCALE = True
36
+ HIDDEN_LAYER_SIZES = [512, 512, 512]
37
+
38
+ SEED = 42
39
+ LOG_DIR = "./ppo_car_racing_logs"
40
+ RESUME_CONFIG_PATH = "resume_config.json"
41
+
42
+ resume_from_timestep = 0
43
+ resume_model_path = None
44
+ run_log_dir = None
45
+
46
+ if os.path.exists(RESUME_CONFIG_PATH):
47
+ try:
48
+ with open(RESUME_CONFIG_PATH, "r") as f:
49
+ resume_info = json.load(f)
50
+ run_log_dir = resume_info.get("run_log_directory")
51
+
52
+ if run_log_dir:
53
+ checkpoints_dir = os.path.join(run_log_dir, "checkpoints")
54
+ if os.path.exists(checkpoints_dir):
55
+ checkpoint_files = [f for f in os.listdir(checkpoints_dir) if f.endswith(".weights.h5")]
56
+ if checkpoint_files:
57
+
58
+ latest_checkpoint = max(checkpoint_files, key=lambda f: os.path.getmtime(os.path.join(checkpoints_dir, f)))
59
+ resume_model_path = os.path.join(checkpoints_dir, latest_checkpoint)
60
+
61
+ timestep_str = latest_checkpoint.split("_at_")[1].split(".")[0]
62
+ resume_from_timestep = int(timestep_str)
63
+ print(f"Resume file found. Will resume from timestep {resume_from_timestep} using model: {resume_model_path}")
64
+
65
+ except (IOError, json.JSONDecodeError) as e:
66
+ print(f"Error reading resume file, starting a new session: {e}")
67
+ run_log_dir = None
68
+ if os.path.exists(RESUME_CONFIG_PATH):
69
+ os.remove(RESUME_CONFIG_PATH)
70
+
71
+ if not run_log_dir:
72
+ run_log_dir = os.path.join(LOG_DIR, datetime.now().strftime("%Y%m%d-%H%M%S"))
73
+
74
+ agent = PPOAgent(
75
+ env_id=ENV_ID,
76
+ num_envs=NUM_ENVS,
77
+ gamma=GAMMA,
78
+ lam=LAM,
79
+ clip_epsilon=CLIP_EPSILON,
80
+ actor_lr=ACTOR_LR,
81
+ critic_lr=CRITIC_LR,
82
+ ppo_epochs=PPO_EPOCHS,
83
+ minibatches=MINIBATCHES,
84
+ steps_per_batch=STEPS_PER_BATCH,
85
+ num_stack_frames=NUM_STACK_FRAMES,
86
+ resize_dim=RESIZE_DIM,
87
+ grayscale=GRAYSCALE,
88
+ seed=SEED,
89
+ log_dir=LOG_DIR,
90
+ entropy_coeff=ENTROPY_COEFF,
91
+ save_interval_timesteps=SAVE_INTERVAL_TIMESTEPS,
92
+ hidden_layer_sizes=HIDDEN_LAYER_SIZES
93
+ )
94
+
95
+ agent.train(
96
+ total_timesteps=TOTAL_TIMESTEPS,
97
+ resume_from_timestep=resume_from_timestep,
98
+ resume_model_path=resume_model_path,
99
+ run_log_dir=run_log_dir
100
+ )
Trained_Agent.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ import os
3
+ from PPO_Model import PPOAgent
4
+
5
+ gpus = tf.config.list_physical_devices('GPU')
6
+ if gpus:
7
+ try:
8
+ for gpu in gpus:
9
+ tf.config.experimental.set_memory_growth(gpu, True)
10
+ logical_gpus = tf.config.experimental.list_logical_devices('GPU')
11
+ print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
12
+ except RuntimeError as e:
13
+ print(e)
14
+
15
+ agent_config = {
16
+ "env_id": "CarRacing-v3",
17
+ "num_envs": 21,
18
+ "gamma": 0.99,
19
+ "lam": 0.95,
20
+ "clip_epsilon": 0.2,
21
+ "actor_lr": 3e-4,
22
+ "critic_lr": 3e-4,
23
+ "ppo_epochs": 10,
24
+ "minibatches": 4,
25
+ "steps_per_batch": 1024,
26
+ "num_stack_frames": 4,
27
+ "resize_dim": (84, 84),
28
+ "grayscale": True,
29
+ "seed": 42,
30
+ "log_dir": "./ppo_car_racing_logs",
31
+ "entropy_coeff": 0.01,
32
+ 'save_interval_timesteps': 537600,
33
+ 'hidden_layer_sizes': [512, 512, 512]
34
+ }
35
+
36
+ if __name__ == "__main__":
37
+ print("Initializing PPO Agent for evaluation...")
38
+
39
+ agent = PPOAgent(**agent_config)
40
+
41
+ root_log_dir = "./ppo_car_racing_logs"
42
+
43
+ latest_log_run_dir = None
44
+ if os.path.exists(root_log_dir):
45
+ all_runs = [os.path.join(root_log_dir, d) for d in os.listdir(root_log_dir) if os.path.isdir(os.path.join(root_log_dir, d))]
46
+ if all_runs:
47
+
48
+ latest_log_run_dir = max(all_runs, key=os.path.getmtime)
49
+ print(f"Found latest training run directory: {latest_log_run_dir}")
50
+ else:
51
+ print(f"No training run directories found in {root_log_dir}.")
52
+ else:
53
+ print(f"Log directory {root_log_dir} does not exist. Cannot find trained model.")
54
+
55
+
56
+ model_to_load = None
57
+ if latest_log_run_dir:
58
+
59
+ final_model_path = os.path.join(latest_log_run_dir, "final_model.weights.h5")
60
+ if os.path.exists(final_model_path):
61
+ model_to_load = final_model_path
62
+ else:
63
+ print(f"Final model weights not found in {latest_log_run_dir}. Checking checkpoints...")
64
+
65
+ checkpoint_dir = os.path.join(latest_log_run_dir, "checkpoints")
66
+ if os.path.exists(checkpoint_dir):
67
+ all_checkpoints = [os.path.join(checkpoint_dir, f) for f in os.listdir(checkpoint_dir) if f.endswith(".weights.h5")]
68
+ if all_checkpoints:
69
+ model_to_load = max(all_checkpoints, key=os.path.getmtime)
70
+ print(f"Loading latest checkpoint: {model_to_load}")
71
+ else:
72
+ print("No checkpoints found.")
73
+ else:
74
+ print("Checkpoints directory does not exist.")
75
+
76
+
77
+ if model_to_load:
78
+
79
+ print("\n--- Evaluation ---")
80
+ agent.evaluate(num_episodes=10, render=True, model_path=model_to_load)
81
+ else:
82
+ print("No trained model found to evaluate. Please train an agent first.")