| import gymnasium as gym | |
| from stable_baselines3 import PPO | |
| from stable_baselines3.common.vec_env import DummyVecEnv | |
| from stable_baselines3.common.monitor import Monitor # Import Monitor | |
| from huggingface_sb3 import package_to_hub | |
| from stable_baselines3.common.vec_env import VecNormalize | |
| # Define the name of the environment | |
| env_id = "LunarLander-v2" | |
| # Define the model architecture we used | |
| model_architecture = "PPO" | |
| repo_id = "pkalkman/LunarLander-v2-ppo" | |
| # Define the commit message | |
| commit_message = "Upload PPO LunarLander-v2 trained agent" | |
| # Load the best trained model | |
| model_name = "ppo-LunarLander-v2" | |
| model = PPO.load("./logs/best_model/best_model.zip") | |
| # Create the evaluation environment and wrap it with Monitor | |
| eval_env = DummyVecEnv([lambda: Monitor(gym.make(env_id, render_mode="rgb_array"))]) | |
| # Load VecNormalize statistics from training | |
| eval_env = VecNormalize.load("vecnormalize.pkl", eval_env) | |
| # Push the model to Hugging Face | |
| package_to_hub( | |
| model=model, # Our trained model | |
| model_name=model_name, # The name of our trained model | |
| model_architecture=model_architecture, # The model architecture we used | |
| env_id=env_id, # Name of the environment | |
| eval_env=eval_env, # Evaluation environment | |
| repo_id=repo_id, # Your Hugging Face repo ID | |
| commit_message=commit_message # Commit message for the upload | |
| ) | |