Instructions to use H2Ozone/dorm_training with libraries, inference providers, notebooks, and local apps. Follow these links to get started.
- Libraries
- LeRobot
How to use H2Ozone/dorm_training with LeRobot:
# See https://github.com/huggingface/lerobot?tab=readme-ov-file#installation for more details git clone https://github.com/huggingface/lerobot.git cd lerobot pip install -e .[smolvla]
# Launch finetuning on your dataset python lerobot/scripts/train.py \ --policy.path=H2Ozone/dorm_training \ --dataset.repo_id=lerobot/svla_so101_pickplace \ --batch_size=64 \ --steps=20000 \ --output_dir=outputs/train/my_smolvla \ --job_name=my_smolvla_training \ --policy.device=cuda \ --wandb.enable=true
# Run the policy using the record function python -m lerobot.record \ --robot.type=so101_follower \ --robot.port=/dev/ttyACM0 \ # <- Use your port --robot.id=my_blue_follower_arm \ # <- Use your robot id --robot.cameras="{ front: {type: opencv, index_or_path: 8, width: 640, height: 480, fps: 30}}" \ # <- Use your cameras --dataset.single_task="Grasp a lego block and put it in the bin." \ # <- Use the same task description you used in your dataset recording --dataset.repo_id=HF_USER/dataset_name \ # <- This will be the dataset name on HF Hub --dataset.episode_time_s=50 \ --dataset.num_episodes=10 \ --policy.path=H2Ozone/dorm_training - Notebooks
- Google Colab
- Kaggle
File size: 2,478 Bytes
4b68dea | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 | {
"type": "smolvla",
"n_obs_steps": 1,
"input_features": {
"observation.state": {
"type": "STATE",
"shape": [
6
]
},
"observation.images.camera1": {
"type": "VISUAL",
"shape": [
3,
256,
256
]
},
"observation.images.camera2": {
"type": "VISUAL",
"shape": [
3,
256,
256
]
},
"observation.images.camera3": {
"type": "VISUAL",
"shape": [
3,
256,
256
]
}
},
"output_features": {
"action": {
"type": "ACTION",
"shape": [
6
]
}
},
"device": "cuda",
"use_amp": false,
"use_peft": false,
"push_to_hub": true,
"repo_id": "H2Ozone/dorm_training",
"private": null,
"tags": null,
"license": null,
"pretrained_path": "lerobot/smolvla_base",
"chunk_size": 50,
"n_action_steps": 50,
"normalization_mapping": {
"VISUAL": "IDENTITY",
"STATE": "MEAN_STD",
"ACTION": "MEAN_STD"
},
"max_state_dim": 32,
"max_action_dim": 32,
"resize_imgs_with_padding": [
512,
512
],
"empty_cameras": 0,
"adapt_to_pi_aloha": false,
"use_delta_joint_actions_aloha": false,
"tokenizer_max_length": 48,
"num_steps": 10,
"use_cache": true,
"freeze_vision_encoder": true,
"train_expert_only": true,
"train_state_proj": true,
"optimizer_lr": 0.0001,
"optimizer_betas": [
0.9,
0.95
],
"optimizer_eps": 1e-08,
"optimizer_weight_decay": 1e-10,
"optimizer_grad_clip_norm": 10.0,
"scheduler_warmup_steps": 1000,
"scheduler_decay_steps": 30000,
"scheduler_decay_lr": 2.5e-06,
"vlm_model_name": "HuggingFaceTB/SmolVLM2-500M-Video-Instruct",
"load_vlm_weights": true,
"add_image_special_tokens": false,
"attention_mode": "cross_attn",
"prefix_length": 0,
"pad_language_to": "max_length",
"num_expert_layers": 0,
"num_vlm_layers": 16,
"self_attn_every_n_layers": 2,
"expert_width_multiplier": 0.75,
"min_period": 0.004,
"max_period": 4.0,
"rtc_config": null,
"compile_model": false,
"compile_mode": "max-autotune"
} |