Update README.md
Browse files
README.md
CHANGED
|
@@ -30,8 +30,46 @@ TODO: Add your code
|
|
| 30 |
|
| 31 |
|
| 32 |
```python
|
| 33 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
from huggingface_sb3 import load_from_hub
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 35 |
|
| 36 |
...
|
| 37 |
```
|
|
|
|
| 30 |
|
| 31 |
|
| 32 |
```python
|
| 33 |
+
# In Colab, install packages if required:
|
| 34 |
+
# gymnasium[box2d]: Contains the LunarLander-v2 environment 🌛
|
| 35 |
+
# stable-baselines3[extra]: The deep reinforcement learning library.
|
| 36 |
+
# huggingface_sb3: Additional code for Stable-baselines3 to load and upload models from the Hugging Face 🤗 Hub.
|
| 37 |
+
!apt install swig cmake
|
| 38 |
+
!pip install gymnasium[box2d] stable_baselines3[extra] huggingface-sb3
|
| 39 |
+
|
| 40 |
+
import gymnasium as gym
|
| 41 |
from huggingface_sb3 import load_from_hub
|
| 42 |
+
from stable_baselines3 import PPO
|
| 43 |
+
from stable_baselines3.common.vec_env import DummyVecEnv
|
| 44 |
+
from stable_baselines3.common.evaluation import evaluate_policy
|
| 45 |
+
from stable_baselines3.common.monitor import Monitor
|
| 46 |
+
|
| 47 |
+
# Create the evaluation environment
|
| 48 |
+
env_id = "LunarLander-v2"
|
| 49 |
+
eval_env = Monitor(gym.make(env_id), filename="./video.mp4")
|
| 50 |
+
|
| 51 |
+
# Load saved agent
|
| 52 |
+
repo_id = "davidkh/ppo-LunarLander-v2"
|
| 53 |
+
filename = "ppo-LunarLander-v2.zip"
|
| 54 |
+
checkpoint = load_from_hub(repo_id, filename)
|
| 55 |
+
model = PPO.load(checkpoint, print_system_info=True)
|
| 56 |
+
|
| 57 |
+
# Evaluation
|
| 58 |
+
mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes=10, deterministic=True)
|
| 59 |
+
print(f"mean_reward={mean_reward:.2f} +/- {std_reward}")
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
# Example use of the trained agent
|
| 63 |
+
observation, info = eval_env.reset()
|
| 64 |
+
for _ in range(1000):
|
| 65 |
+
eval_env.render()
|
| 66 |
+
action, _states = model.predict(observation, deterministic=True)
|
| 67 |
+
observation, rewards, terminated, truncated, info = eval_env.step(action)
|
| 68 |
+
if terminated or truncated:
|
| 69 |
+
print("Environment is reset")
|
| 70 |
+
observation, info = eval_env.reset()
|
| 71 |
+
|
| 72 |
+
eval_env.close()
|
| 73 |
|
| 74 |
...
|
| 75 |
```
|