Taranosaurus commited on
Commit
cd587f2
·
verified ·
1 Parent(s): 66be0ef

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +50 -3
README.md CHANGED
@@ -26,12 +26,59 @@ This is a trained model of a **PPO** agent playing **LunarLander-v2**
26
  using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
27
 
28
  ## Usage (with Stable-baselines3)
29
- TODO: Add your code
30
 
31
 
32
  ```python
33
- from stable_baselines3 import ...
34
- from huggingface_sb3 import load_from_hub
 
 
 
 
 
 
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  ...
37
  ```
 
26
  using the [stable-baselines3 library](https://github.com/DLR-RM/stable-baselines3).
27
 
28
  ## Usage (with Stable-baselines3)
 
29
 
30
 
31
  ```python
32
+ import gymnasium as gym
33
+ from pyvirtualdisplay import Display
34
+ from huggingface_sb3 import load_from_hub, package_to_hub
35
+ from huggingface_hub import notebook_login # To log to our Hugging Face account to be able to upload models to the Hub.
36
+ from stable_baselines3 import PPO
37
+ from stable_baselines3.common.env_util import make_vec_env
38
+ from stable_baselines3.common.evaluation import evaluate_policy
39
+ from stable_baselines3.common.monitor import Monitor
40
 
41
+ virtual_display = Display(visible=0, size=(1400, 900))
42
+ virtual_display.start()
43
+
44
+ env = gym.make("LunarLander-v2")
45
+
46
+ observation, info = env.reset()
47
+
48
+ for _ in range(20):
49
+ action = env.action_space.sample()
50
+ print("Action taken:", action)
51
+
52
+ observation, reward, terminated, truncated, info = env.step(action)
53
+
54
+ if terminated or truncated:
55
+ # Reset the environment
56
+ print("Environment is reset")
57
+ observation, info = env.reset()
58
+
59
+
60
+ env.reset()
61
+
62
+ print("_____OBSERVATION SPACE_____ \n")
63
+ print("Observation Space Shape", env.observation_space.shape)
64
+ print("Sample observation", env.observation_space.sample())
65
+
66
+ print("\n _____ACTION SPACE_____ \n")
67
+ print("Action Space Shape", env.action_space.n)
68
+ print("Action Space Sample", env.action_space.sample()) # Take a random action
69
+
70
+ env = make_vec_env('LunarLander-v2', n_envs=16)
71
+
72
+ model = PPO(policy = 'MlpPolicy', env = env, n_steps = 1024, batch_size = 64, n_epochs = 4, gamma = 0.999, gae_lambda = 0.98, ent_coef = 0.01, verbose=1)
73
+
74
+
75
+ model.learn(total_timesteps=1000000)
76
+
77
+ model_name = "ppo-LunarLander-v2"
78
+ model.save(model_name)
79
+
80
+ eval_env = Monitor(gym.make("LunarLander-v2", render_mode='rgb_array'))
81
+ mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=10, deterministic=True)
82
+ print(f"mean_reward={mean_reward:.2f} +/- {std_reward}")
83
  ...
84
  ```