zjowowen commited on
Commit
cd452f7
·
1 Parent(s): 6a0273c

Upload README.md with huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -21,7 +21,7 @@ model-index:
21
  type: OpenAI/Gym/Box2d-LunarLander-v2
22
  metrics:
23
  - type: mean_reward
24
- value: 284.02 +/- 20.47
25
  name: mean_reward
26
  ---
27
 
@@ -45,7 +45,7 @@ This is a simple **PPO** implementation to OpenAI/Gym/Box2d **LunarLander-v2** u
45
  git clone https://github.com/opendilab/huggingface_ding.git
46
  pip3 install -e ./huggingface_ding/
47
  # install environment dependencies if needed
48
- pip3 install DI-engine[common_env]
49
  ```
50
  </details>
51
 
@@ -138,7 +138,7 @@ push_model_to_hub(
138
  github_repo_url="https://github.com/opendilab/DI-engine",
139
  github_doc_model_url="https://di-engine-docs.readthedocs.io/en/latest/12_policies/ppo.html",
140
  github_doc_env_url="https://di-engine-docs.readthedocs.io/en/latest/13_envs/lunarlander.html",
141
- installation_guide="pip3 install DI-engine[common_env]",
142
  usage_file_by_git_clone="./ppo/lunarlander_ppo_deploy.py",
143
  usage_file_by_huggingface_ding="./ppo/lunarlander_ppo_download.py",
144
  train_file="./ppo/lunarlander_ppo.py",
@@ -198,7 +198,7 @@ exp_config = {
198
  - **Demo:** [video](https://huggingface.co/OpenDILabCommunity/LunarLander-v2-PPO/blob/main/replay.mp4)
199
  <!-- Provide the size information for the model. -->
200
  - **Parameters total size:** 371.84 KB
201
- - **Last Update Date:** 2023-09-20
202
 
203
  ## Environments
204
  <!-- Address questions around what environment the model is intended to be trained and deployed at, including the necessary information needed to be provided for future users. -->
 
21
  type: OpenAI/Gym/Box2d-LunarLander-v2
22
  metrics:
23
  - type: mean_reward
24
+ value: 239.76 +/- 86.83
25
  name: mean_reward
26
  ---
27
 
 
45
  git clone https://github.com/opendilab/huggingface_ding.git
46
  pip3 install -e ./huggingface_ding/
47
  # install environment dependencies if needed
48
+ pip3 install DI-engine[common_env,video]
49
  ```
50
  </details>
51
 
 
138
  github_repo_url="https://github.com/opendilab/DI-engine",
139
  github_doc_model_url="https://di-engine-docs.readthedocs.io/en/latest/12_policies/ppo.html",
140
  github_doc_env_url="https://di-engine-docs.readthedocs.io/en/latest/13_envs/lunarlander.html",
141
+ installation_guide="pip3 install DI-engine[common_env,video]",
142
  usage_file_by_git_clone="./ppo/lunarlander_ppo_deploy.py",
143
  usage_file_by_huggingface_ding="./ppo/lunarlander_ppo_download.py",
144
  train_file="./ppo/lunarlander_ppo.py",
 
198
  - **Demo:** [video](https://huggingface.co/OpenDILabCommunity/LunarLander-v2-PPO/blob/main/replay.mp4)
199
  <!-- Provide the size information for the model. -->
200
  - **Parameters total size:** 371.84 KB
201
+ - **Last Update Date:** 2023-09-22
202
 
203
  ## Environments
204
  <!-- Address questions around what environment the model is intended to be trained and deployed at, including the necessary information needed to be provided for future users. -->