poolvarine commited on
Commit
0058056
·
verified ·
1 Parent(s): d79c8fd

Upload 203 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +7 -0
  2. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/buffer/transitions_33000.pkl +3 -0
  3. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/buffer/transitions_36000.pkl +3 -0
  4. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/buffer/transitions_39000.pkl +3 -0
  5. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/buffer/transitions_42000.pkl +3 -0
  6. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/_CHECKPOINT_METADATA +1 -0
  7. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/_METADATA +0 -0
  8. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/array_metadatas/process_0 +0 -0
  9. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/d/64ed86e138b262145259faaf8de0d94b +0 -0
  10. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/manifest.ocdbt +0 -0
  11. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/ocdbt.process_0/d/0e75a21ebe9c2ab362e879db53d18257 +3 -0
  12. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/ocdbt.process_0/d/8fc8da3d95e23e92ab384b61936f6e13 +0 -0
  13. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/ocdbt.process_0/d/951d9a2edb1f9e8c158d5d2db8fc792b +0 -0
  14. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/ocdbt.process_0/d/c32f11e2a1d8988a3f73eeb04c9a327b +3 -0
  15. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/ocdbt.process_0/d/e07dd1c595617d055c6f31cb89ffd37c +0 -0
  16. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/ocdbt.process_0/d/f3fcaaa8e30c640af4b9db9a4e6b42eb +3 -0
  17. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/ocdbt.process_0/manifest.ocdbt +0 -0
  18. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/_CHECKPOINT_METADATA +1 -0
  19. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/_METADATA +0 -0
  20. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/array_metadatas/process_0 +0 -0
  21. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/d/43fa8c81079d04b3a9456a46df4e954b +0 -0
  22. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/manifest.ocdbt +0 -0
  23. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/ocdbt.process_0/d/4cddfe17387748b18bea87635e1f7c52 +0 -0
  24. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/ocdbt.process_0/d/5f329704476571c7c8f0827f7ec2291d +0 -0
  25. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/ocdbt.process_0/d/aca056ee932536a68a9897bbb8c7b786 +0 -0
  26. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/ocdbt.process_0/d/bbe1f3b85b3b9c0bc794ccaf87d74561 +0 -0
  27. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/ocdbt.process_0/d/cea5c7ae45012fc6774dd5a3334b1b2f +3 -0
  28. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/ocdbt.process_0/d/fdd9a4bba940947e2317cf0e771e6916 +3 -0
  29. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/ocdbt.process_0/manifest.ocdbt +0 -0
  30. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/_CHECKPOINT_METADATA +1 -0
  31. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/_METADATA +0 -0
  32. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/array_metadatas/process_0 +0 -0
  33. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/d/47c88bcae99fa94c56fe553ff32430d0 +0 -0
  34. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/manifest.ocdbt +0 -0
  35. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/ocdbt.process_0/d/595487b98986151b896c7346ed505b87 +3 -0
  36. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/ocdbt.process_0/d/61d9900065e47297083908220731cb58 +0 -0
  37. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/ocdbt.process_0/d/669214ea056858cc2b3edc6973ddf799 +0 -0
  38. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/ocdbt.process_0/d/727568051ff325fb128076ca7600df64 +0 -0
  39. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/ocdbt.process_0/d/72daa7e7baae8b74fc61f7fe9677bf3e +0 -0
  40. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/ocdbt.process_0/d/8556eb97bb6ce535ac8a4aead103083f +3 -0
  41. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/ocdbt.process_0/manifest.ocdbt +0 -0
  42. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/artifact/2373403493/wandb_manifest.json +1 -0
  43. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/code/examples/train_rlpd.py +792 -0
  44. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/config.yaml +166 -0
  45. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/history_full.csv +0 -0
  46. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/output.log +0 -0
  47. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/plots/actor_acator_objective.png +3 -0
  48. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/plots/actor_actor_loss.png +3 -0
  49. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/plots/actor_entropy.png +3 -0
  50. experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/plots/actor_lr.png +3 -0
.gitattributes CHANGED
@@ -94,3 +94,10 @@ experiments/cube_stacking_gym/RL/logs/checkpoint_30000/ocdbt.process_0/d/d55d126
94
  experiments/cube_stacking_gym/RL/logs/checkpoint_5000/ocdbt.process_0/d/0133c1dd6362c099b6a5270874a06c04 filter=lfs diff=lfs merge=lfs -text
95
  experiments/cube_stacking_gym/RL/logs/checkpoint_5000/ocdbt.process_0/d/100a3f048c1567670f069d6c8e7a8a13 filter=lfs diff=lfs merge=lfs -text
96
  experiments/cube_stacking_gym/RL/logs/checkpoint_5000/ocdbt.process_0/d/effd20c1371ee4069f1a73672e0e0049 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
94
  experiments/cube_stacking_gym/RL/logs/checkpoint_5000/ocdbt.process_0/d/0133c1dd6362c099b6a5270874a06c04 filter=lfs diff=lfs merge=lfs -text
95
  experiments/cube_stacking_gym/RL/logs/checkpoint_5000/ocdbt.process_0/d/100a3f048c1567670f069d6c8e7a8a13 filter=lfs diff=lfs merge=lfs -text
96
  experiments/cube_stacking_gym/RL/logs/checkpoint_5000/ocdbt.process_0/d/effd20c1371ee4069f1a73672e0e0049 filter=lfs diff=lfs merge=lfs -text
97
+ experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting[[:space:]]the[[:space:]]reward[[:space:]]func/checkpoint_10000/ocdbt.process_0/d/0e75a21ebe9c2ab362e879db53d18257 filter=lfs diff=lfs merge=lfs -text
98
+ experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting[[:space:]]the[[:space:]]reward[[:space:]]func/checkpoint_10000/ocdbt.process_0/d/c32f11e2a1d8988a3f73eeb04c9a327b filter=lfs diff=lfs merge=lfs -text
99
+ experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting[[:space:]]the[[:space:]]reward[[:space:]]func/checkpoint_10000/ocdbt.process_0/d/f3fcaaa8e30c640af4b9db9a4e6b42eb filter=lfs diff=lfs merge=lfs -text
100
+ experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting[[:space:]]the[[:space:]]reward[[:space:]]func/checkpoint_15000/ocdbt.process_0/d/cea5c7ae45012fc6774dd5a3334b1b2f filter=lfs diff=lfs merge=lfs -text
101
+ experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting[[:space:]]the[[:space:]]reward[[:space:]]func/checkpoint_15000/ocdbt.process_0/d/fdd9a4bba940947e2317cf0e771e6916 filter=lfs diff=lfs merge=lfs -text
102
+ experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting[[:space:]]the[[:space:]]reward[[:space:]]func/checkpoint_5000/ocdbt.process_0/d/595487b98986151b896c7346ed505b87 filter=lfs diff=lfs merge=lfs -text
103
+ experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting[[:space:]]the[[:space:]]reward[[:space:]]func/checkpoint_5000/ocdbt.process_0/d/8556eb97bb6ce535ac8a4aead103083f filter=lfs diff=lfs merge=lfs -text
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/buffer/transitions_33000.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05af4da05513d9f5b40de1265c73422c17f704fc51625f7c75bd731db34f8c7c
3
+ size 1181718334
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/buffer/transitions_36000.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:637e8df67b456806e241f319cf30de59b12db6436bdfec4a99d64b404b8e7670
3
+ size 1181718334
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/buffer/transitions_39000.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:23f63ea044180eb2319c0519243a55647f9c09e56aedccfc648548c8315388c9
3
+ size 1181718334
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/buffer/transitions_42000.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4a8b0799b413626181a31f20d98c5349b91ece352b0af7c1adaa4925436277c5
3
+ size 1181718334
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/_CHECKPOINT_METADATA ADDED
@@ -0,0 +1 @@
 
 
1
+ {"item_handlers": "orbax.checkpoint._src.handlers.pytree_checkpoint_handler.PyTreeCheckpointHandler", "metrics": {}, "performance_metrics": {}, "init_timestamp_nsecs": 1768240431363748046, "commit_timestamp_nsecs": 1768240431974967249, "custom_metadata": {}}
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/_METADATA ADDED
The diff for this file is too large to render. See raw diff
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/array_metadatas/process_0 ADDED
The diff for this file is too large to render. See raw diff
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/d/64ed86e138b262145259faaf8de0d94b ADDED
Binary file (87.4 kB). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/manifest.ocdbt ADDED
Binary file (119 Bytes). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/ocdbt.process_0/d/0e75a21ebe9c2ab362e879db53d18257 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc407cdcc6e27b74fd11b46150f47aa72099c86749ab8655f47441f937665e4b
3
+ size 260507
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/ocdbt.process_0/d/8fc8da3d95e23e92ab384b61936f6e13 ADDED
Binary file (1.54 kB). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/ocdbt.process_0/d/951d9a2edb1f9e8c158d5d2db8fc792b ADDED
Binary file (87.4 kB). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/ocdbt.process_0/d/c32f11e2a1d8988a3f73eeb04c9a327b ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:afe37a0d20d690223b34070b2a97250a66056f0fb7cd6d5604e59a85b9da81a4
3
+ size 87101440
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/ocdbt.process_0/d/e07dd1c595617d055c6f31cb89ffd37c ADDED
Binary file (171 Bytes). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/ocdbt.process_0/d/f3fcaaa8e30c640af4b9db9a4e6b42eb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:35ec3b763c8277a03040e76d1afa9a781179fb997985a1a66c2c62e05ff4cfd6
3
+ size 46157824
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_10000/ocdbt.process_0/manifest.ocdbt ADDED
Binary file (355 Bytes). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/_CHECKPOINT_METADATA ADDED
@@ -0,0 +1 @@
 
 
1
+ {"item_handlers": "orbax.checkpoint._src.handlers.pytree_checkpoint_handler.PyTreeCheckpointHandler", "metrics": {}, "performance_metrics": {}, "init_timestamp_nsecs": 1768241973050295003, "commit_timestamp_nsecs": 1768241974023474249, "custom_metadata": {}}
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/_METADATA ADDED
The diff for this file is too large to render. See raw diff
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/array_metadatas/process_0 ADDED
The diff for this file is too large to render. See raw diff
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/d/43fa8c81079d04b3a9456a46df4e954b ADDED
Binary file (86.5 kB). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/manifest.ocdbt ADDED
Binary file (119 Bytes). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/ocdbt.process_0/d/4cddfe17387748b18bea87635e1f7c52 ADDED
Binary file (1.48 kB). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/ocdbt.process_0/d/5f329704476571c7c8f0827f7ec2291d ADDED
Binary file (6.39 kB). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/ocdbt.process_0/d/aca056ee932536a68a9897bbb8c7b786 ADDED
Binary file (171 Bytes). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/ocdbt.process_0/d/bbe1f3b85b3b9c0bc794ccaf87d74561 ADDED
Binary file (86.5 kB). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/ocdbt.process_0/d/cea5c7ae45012fc6774dd5a3334b1b2f ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d45ee2dc37ef7e59a8a738cc3a674ba64325d94bc5bd7110cd81cbd771a06391
3
+ size 15650816
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/ocdbt.process_0/d/fdd9a4bba940947e2317cf0e771e6916 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0dc2bdca7c11e3de4d3bb3b73b9e4c0249c8b582c7ce7731aeac9b6b2de4bb16
3
+ size 117821440
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_15000/ocdbt.process_0/manifest.ocdbt ADDED
Binary file (351 Bytes). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/_CHECKPOINT_METADATA ADDED
@@ -0,0 +1 @@
 
 
1
+ {"item_handlers": "orbax.checkpoint._src.handlers.pytree_checkpoint_handler.PyTreeCheckpointHandler", "metrics": {}, "performance_metrics": {}, "init_timestamp_nsecs": 1768238999604476954, "commit_timestamp_nsecs": 1768239000371446938, "custom_metadata": {}}
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/_METADATA ADDED
The diff for this file is too large to render. See raw diff
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/array_metadatas/process_0 ADDED
The diff for this file is too large to render. See raw diff
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/d/47c88bcae99fa94c56fe553ff32430d0 ADDED
Binary file (86.5 kB). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/manifest.ocdbt ADDED
Binary file (119 Bytes). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/ocdbt.process_0/d/595487b98986151b896c7346ed505b87 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b5f014e40103e283071291e152523b2c40d1f2943ce732af40e2af2f6c77c2e
3
+ size 91234304
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/ocdbt.process_0/d/61d9900065e47297083908220731cb58 ADDED
Binary file (1.48 kB). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/ocdbt.process_0/d/669214ea056858cc2b3edc6973ddf799 ADDED
Binary file (1.57 kB). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/ocdbt.process_0/d/727568051ff325fb128076ca7600df64 ADDED
Binary file (86.5 kB). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/ocdbt.process_0/d/72daa7e7baae8b74fc61f7fe9677bf3e ADDED
Binary file (171 Bytes). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/ocdbt.process_0/d/8556eb97bb6ce535ac8a4aead103083f ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5c9ac4d17e2f815c6e3267903c71ff52764f98b3557dfe163228383e1eda93c
3
+ size 42274816
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/checkpoint_5000/ocdbt.process_0/manifest.ocdbt ADDED
Binary file (352 Bytes). View file
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/artifact/2373403493/wandb_manifest.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"version":1,"storagePolicy":"wandb-storage-policy-v1","storagePolicyConfig":{"storageLayout":"V2"},"contents":{"0000.parquet":{"digest":"bvmk52MesdDILhybX62M0g==","birthArtifactID":"QXJ0aWZhY3Q6MjM3MzQwMzQ5Mw==","size":108430}}}
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/code/examples/train_rlpd.py ADDED
@@ -0,0 +1,792 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+
3
+ import glob
4
+ import time
5
+ import json
6
+ from datetime import datetime
7
+ from functools import partial
8
+ from collections import defaultdict
9
+ import jax
10
+ import jax.numpy as jnp
11
+ import numpy as np
12
+ import tqdm
13
+ from absl import app, flags
14
+ from flax.training import checkpoints
15
+ import os
16
+ import copy
17
+ import pickle as pkl
18
+ from natsort import natsorted
19
+
20
+ from serl_launcher.agents.continuous.sac import SACAgent
21
+ from serl_launcher.common.evaluation import evaluate
22
+ from serl_launcher.utils.logging_utils import RecordEpisodeStatistics
23
+ from serl_launcher.agents.continuous.sac_hybrid_single import SACAgentHybridSingleArm
24
+ from serl_launcher.agents.continuous.sac_hybrid_dual import SACAgentHybridDualArm
25
+ from serl_launcher.utils.timer_utils import Timer
26
+ from serl_launcher.utils.train_utils import concat_batches
27
+
28
+ from agentlace.trainer import TrainerServer, TrainerClient
29
+ from agentlace.data.data_store import QueuedDataStore
30
+
31
+ from serl_launcher.utils.launcher import (
32
+ make_sac_pixel_agent,
33
+ make_sac_pixel_agent_hybrid_single_arm,
34
+ make_sac_pixel_agent_hybrid_dual_arm,
35
+ make_trainer_config,
36
+ make_wandb_logger,
37
+ )
38
+ from serl_launcher.data.data_store import MemoryEfficientReplayBufferDataStore
39
+
40
+ from experiments.mappings import get_config
41
+
42
+
43
+ FLAGS = flags.FLAGS
44
+
45
+ flags.DEFINE_string("exp_name", None, "Name of experiment corresponding to folder.")
46
+ flags.DEFINE_integer("seed", 42, "Random seed.")
47
+ flags.DEFINE_boolean("learner", False, "Whether this is a learner.")
48
+ flags.DEFINE_boolean("actor", False, "Whether this is an actor.")
49
+ flags.DEFINE_string("ip", "localhost", "IP address of the learner.")
50
+ flags.DEFINE_multi_string("demo_path", None, "Path to the demo data.")
51
+ flags.DEFINE_string("checkpoint_path", None, "Path to save checkpoints.")
52
+ flags.DEFINE_integer("eval_checkpoint_step", 0, "Step to evaluate the checkpoint.")
53
+ flags.DEFINE_integer("eval_n_trajs", 10, "Number of trajectories to evaluate.")
54
+ flags.DEFINE_boolean("save_video", False, "Save video.")
55
+ flags.DEFINE_boolean("render", True, "Render the environment.")
56
+
57
+ flags.DEFINE_boolean(
58
+ "debug", False, "Debug mode."
59
+ ) # debug mode will disable wandb logging
60
+
61
+
62
+ devices = jax.local_devices()
63
+ num_devices = len(devices)
64
+ sharding = jax.sharding.PositionalSharding(devices)
65
+
66
+
67
+ def print_green(x):
68
+ return print("\033[92m {}\033[00m".format(x))
69
+
70
+
71
+ ##############################################################################
72
+
73
+
74
+ def actor(agent, data_store, intvn_data_store, env, sampling_rng):
75
+ """
76
+ This is the actor loop, which runs when "--actor" is set to True.
77
+ Features:
78
+ - Periodic evaluation with separate eval environment
79
+ - Detailed logging at log_period intervals
80
+ - Eval stats saved to JSON file
81
+ """
82
+ if FLAGS.eval_checkpoint_step:
83
+ success_counter = 0
84
+ time_list = []
85
+
86
+ ckpt = checkpoints.restore_checkpoint(
87
+ os.path.abspath(FLAGS.checkpoint_path),
88
+ agent.state,
89
+ step=FLAGS.eval_checkpoint_step,
90
+ )
91
+ agent = agent.replace(state=ckpt)
92
+
93
+ for episode in range(FLAGS.eval_n_trajs):
94
+ obs, _ = env.reset()
95
+ done = False
96
+ start_time = time.time()
97
+ while not done:
98
+ sampling_rng, key = jax.random.split(sampling_rng)
99
+ actions = agent.sample_actions(
100
+ observations=jax.device_put(obs),
101
+ argmax=True, # Use argmax for evaluation
102
+ seed=key
103
+ )
104
+ actions = np.asarray(jax.device_get(actions), copy=True)
105
+
106
+ next_obs, reward, done, truncated, info = env.step(actions)
107
+ done = done or truncated
108
+ obs = next_obs
109
+
110
+ if done:
111
+ is_success = info.get("is_success", False)
112
+ if is_success:
113
+ dt = time.time() - start_time
114
+ time_list.append(dt)
115
+ print(f"Episode {episode + 1}: SUCCESS in {dt:.2f}s")
116
+
117
+ success_counter += int(is_success)
118
+ print(f"Success rate so far: {success_counter}/{episode + 1}")
119
+
120
+ print(f"\n🎯 Final success rate: {success_counter / FLAGS.eval_n_trajs:.1%}")
121
+ if time_list:
122
+ print(f"⏱️ Average success time: {np.mean(time_list):.2f}s")
123
+ return # after done eval, return and exit
124
+
125
+ start_step = (
126
+ int(os.path.basename(natsorted(glob.glob(os.path.join(FLAGS.checkpoint_path, "buffer/*.pkl")))[-1])[12:-4]) + 1
127
+ if FLAGS.checkpoint_path and os.path.exists(FLAGS.checkpoint_path) and glob.glob(os.path.join(FLAGS.checkpoint_path, "buffer/*.pkl"))
128
+ else 0
129
+ )
130
+
131
+ datastore_dict = {
132
+ "actor_env": data_store,
133
+ "actor_env_intvn": intvn_data_store,
134
+ }
135
+
136
+ client = TrainerClient(
137
+ "actor_env",
138
+ FLAGS.ip,
139
+ make_trainer_config(),
140
+ data_stores=datastore_dict,
141
+ wait_for_server=True,
142
+ timeout_ms=3000,
143
+ )
144
+
145
+ # Function to update the agent with new params
146
+ def update_params(params):
147
+ nonlocal agent
148
+ agent = agent.replace(state=agent.state.replace(params=params))
149
+
150
+ client.recv_network_callback(update_params)
151
+
152
+ # Setup evaluation stats file
153
+ eval_stats_file = None
154
+ if FLAGS.checkpoint_path is not None:
155
+ os.makedirs(FLAGS.checkpoint_path, exist_ok=True)
156
+ eval_stats_file = os.path.join(FLAGS.checkpoint_path, "eval_stats.json")
157
+ # Initialize with empty list
158
+ with open(eval_stats_file, 'w') as f:
159
+ json.dump([], f)
160
+ print(f"📊 Evaluation stats will be saved to: {eval_stats_file}")
161
+
162
+ transitions = []
163
+ demo_transitions = []
164
+
165
+ print(f"🎯 Actor starting with training env. Calling env.reset()...")
166
+ obs, _ = env.reset()
167
+ done = False
168
+
169
+ # training loop
170
+ timer = Timer()
171
+ running_return = 0.0
172
+ episode_length = 0
173
+ already_intervened = False
174
+ intervention_count = 0
175
+ intervention_steps = 0
176
+
177
+ pbar = tqdm.tqdm(range(start_step, config.max_steps), dynamic_ncols=True)
178
+ for step in pbar:
179
+ timer.tick("total")
180
+
181
+ with timer.context("sample_actions"):
182
+ if step < config.random_steps:
183
+ # Scale down random actions to avoid wild movements (20% of max range)
184
+ actions = env.action_space.sample()
185
+ action_source = "🎲 RANDOM"
186
+ else:
187
+ sampling_rng, key = jax.random.split(sampling_rng)
188
+ actions = agent.sample_actions(
189
+ observations=jax.device_put(obs),
190
+ seed=key,
191
+ argmax=True,
192
+ )
193
+ actions = np.asarray(jax.device_get(actions), copy=True)
194
+ action_source = "🤖 AGENT"
195
+
196
+ # DETAILED LOGGING: Show action source periodically
197
+ if step % config.log_period == 0 or step < 10:
198
+ print(f"\n[Actor Step {step:6d}] {action_source}")
199
+
200
+ # Step environment
201
+ with timer.context("step_env"):
202
+ next_obs, reward, terminated, truncated, info = env.step(actions)
203
+ done = terminated or truncated
204
+ episode_length += 1
205
+
206
+ if "left" in info:
207
+ info.pop("left")
208
+ if "right" in info:
209
+ info.pop("right")
210
+
211
+ # override the action with the intervention action
212
+ if "intervene_action" in info:
213
+ actions = info.pop("intervene_action")
214
+ intervention_steps += 1
215
+ if not already_intervened:
216
+ intervention_count += 1
217
+ already_intervened = True
218
+ else:
219
+ already_intervened = False
220
+
221
+ running_return += reward
222
+
223
+ # CRITICAL: masks = 1.0 - terminated (NOT 1.0 - done!)
224
+ # terminated = True for task completion (bootstrap = 0)
225
+ # truncated = True for time limit (bootstrap = 1)
226
+ transition = dict(
227
+ observations=obs,
228
+ actions=actions,
229
+ next_observations=next_obs,
230
+ rewards=reward,
231
+ masks=1.0 - float(terminated), # Correct mask for RLPD
232
+ dones=done,
233
+ )
234
+ if transition["masks"] == 0.0:
235
+ print_green(f"[Actor Step {step:6d}] 🚩 Termination detected. Mask=0.0")
236
+
237
+ if 'grasp_penalty' in info:
238
+ transition['grasp_penalty'] = info['grasp_penalty']
239
+ else:
240
+ transition['grasp_penalty'] = 0.0
241
+
242
+ data_store.insert(transition)
243
+ transitions.append(copy.deepcopy(transition))
244
+ if already_intervened:
245
+ intvn_data_store.insert(transition)
246
+ demo_transitions.append(copy.deepcopy(transition))
247
+
248
+ # Log episode termination details
249
+ if done:
250
+ is_success = info.get("is_success", False)
251
+ term_reason = "✅ SUCCESS" if is_success else ("⏱️ TRUNCATED" if truncated else "❌ FAILED")
252
+
253
+ # RecordEpisodeStatistics already added info["episode"] with "r" and "l"
254
+ # Get stats from RecordEpisodeStatistics
255
+ ep_return = info["episode"]["r"]
256
+ ep_length = info["episode"]["l"]
257
+ ep_last_step_reward = reward
258
+
259
+ print(f"[Actor Step {step:6d}] 🏁 Episode ended → {term_reason}")
260
+ print(f" Episode return: {float(ep_return):.3f}")
261
+ print(f" Episode length: {int(ep_length)} steps")
262
+ print(f" mask={transition['masks']:.1f}, terminated={terminated}, truncated={truncated}\n")
263
+ print(f" Last step reward: {float(ep_last_step_reward):.3f}")
264
+ print(f" Total episode reward accumulated: {running_return:.3f} over {episode_length} steps")
265
+ # Add custom fields to existing episode dict
266
+ info["episode"]["is_success"] = is_success
267
+ info["episode"]["intervention_count"] = intervention_count
268
+ info["episode"]["intervention_steps"] = intervention_steps
269
+
270
+ stats = {"environment": info} # send stats to the learner to log
271
+ client.request("send-stats", stats)
272
+ pbar.set_description(f"last return: {float(ep_return):.2f}")
273
+
274
+ # Reset episode tracking
275
+ running_return = 0.0
276
+ episode_length = 0
277
+ intervention_count = 0
278
+ intervention_steps = 0
279
+ already_intervened = False
280
+ client.update()
281
+ obs, _ = env.reset()
282
+ else:
283
+ obs = next_obs
284
+
285
+ # Periodic policy evaluation
286
+ if step > 0 and config.eval_period > 0 and step % config.eval_period == 0:
287
+ print(f"\n[Actor Step {step:6d}] 🧪 Starting evaluation...")
288
+ print(f" Creating fresh eval environment with video recording...")
289
+
290
+ # Create new eval environment with video recording enabled
291
+ eval_env = config.get_environment(fake_env=False, save_video=True, video_save_path=os.path.join(FLAGS.checkpoint_path, "eval_videos") if FLAGS.checkpoint_path is not None else None, render=True)
292
+ eval_env = RecordEpisodeStatistics(eval_env)
293
+
294
+ with timer.context("eval"):
295
+ # Use fixed seed for reproducible evaluation
296
+ eval_seed = FLAGS.seed + (step // config.eval_period)
297
+ evaluate_info = evaluate(
298
+ policy_fn=partial(agent.sample_actions, argmax=True),
299
+ env=eval_env,
300
+ num_episodes=FLAGS.eval_n_trajs,
301
+ seed=eval_seed,
302
+ )
303
+
304
+ # Close eval environment to free resources
305
+ eval_env.close()
306
+ print(f" Closed eval environment")
307
+
308
+ # Send stats to learner for WandB logging
309
+ eval_stats = {"eval": evaluate_info}
310
+ client.request("send-stats", eval_stats)
311
+
312
+ # Print evaluation results
313
+ success_rate = evaluate_info.get('final.is_success', 0.0)
314
+ avg_return = evaluate_info.get('eval/average_return', 0.0)
315
+ avg_length = evaluate_info.get('eval/average_length', 0)
316
+
317
+ print(f"[Actor Step {step:6d}] ✅ Evaluation complete:")
318
+ print(f" • Success rate: {success_rate:.1%}")
319
+ print(f" • Avg return: {avg_return:.3f}")
320
+ print(f" • Avg length: {avg_length:.1f} steps")
321
+
322
+ # Save to JSON file
323
+ if eval_stats_file is not None:
324
+ eval_record = {
325
+ "step": step,
326
+ "timestamp": datetime.now().isoformat(),
327
+ "success_rate": float(success_rate),
328
+ "average_return": float(avg_return),
329
+ "average_length": float(avg_length),
330
+ "full_stats": {k: float(v) if isinstance(v, (np.number, np.floating, np.integer)) else v
331
+ for k, v in evaluate_info.items()}
332
+ }
333
+
334
+ # Load existing stats, append new one, save
335
+ try:
336
+ with open(eval_stats_file, 'r') as f:
337
+ all_stats = json.load(f)
338
+ except (json.JSONDecodeError, FileNotFoundError):
339
+ all_stats = []
340
+ all_stats.append(eval_record)
341
+ with open(eval_stats_file, 'w') as f:
342
+ json.dump(all_stats, f, indent=2)
343
+ print(f" • Saved stats to: {eval_stats_file}\n")
344
+ else:
345
+ print()
346
+
347
+ if step > 0 and config.buffer_period > 0 and step % config.buffer_period == 0:
348
+ # dump to pickle file
349
+ buffer_path = os.path.join(FLAGS.checkpoint_path, "buffer")
350
+ demo_buffer_path = os.path.join(FLAGS.checkpoint_path, "demo_buffer")
351
+ if not os.path.exists(buffer_path):
352
+ os.makedirs(buffer_path)
353
+ if not os.path.exists(demo_buffer_path):
354
+ os.makedirs(demo_buffer_path)
355
+ with open(os.path.join(buffer_path, f"transitions_{step}.pkl"), "wb") as f:
356
+ pkl.dump(transitions, f)
357
+ transitions = []
358
+ with open(
359
+ os.path.join(demo_buffer_path, f"transitions_{step}.pkl"), "wb"
360
+ ) as f:
361
+ pkl.dump(demo_transitions, f)
362
+ demo_transitions = []
363
+
364
+ timer.tock("total")
365
+
366
+ if step % config.log_period == 0:
367
+ stats = {"timer": timer.get_average_times()}
368
+ client.request("send-stats", stats)
369
+
370
+
371
+ ##############################################################################
372
+
373
+
374
+ def learner(rng, agent, replay_buffer, demo_buffer, wandb_logger=None):
375
+ """
376
+ The learner loop, which runs when "--learner" is set to True.
377
+ """
378
+ start_step = (
379
+ int(os.path.basename(checkpoints.latest_checkpoint(os.path.abspath(FLAGS.checkpoint_path)))[11:])
380
+ + 1
381
+ if FLAGS.checkpoint_path and os.path.exists(FLAGS.checkpoint_path)
382
+ else 0
383
+ )
384
+ step = start_step
385
+
386
+ def stats_callback(type: str, payload: dict) -> dict:
387
+ """Callback for when server receives stats request."""
388
+ assert type == "send-stats", f"Invalid request type: {type}"
389
+ if wandb_logger is not None:
390
+ wandb_logger.log(payload, step=step)
391
+ return {} # not expecting a response
392
+
393
+ # Create server
394
+ server = TrainerServer(make_trainer_config(), request_callback=stats_callback)
395
+ server.register_data_store("actor_env", replay_buffer)
396
+ server.register_data_store("actor_env_intvn", demo_buffer)
397
+ server.start(threaded=True)
398
+
399
+ # Loop to wait until replay_buffer is filled
400
+ pbar = tqdm.tqdm(
401
+ total=config.training_starts,
402
+ initial=len(replay_buffer),
403
+ desc="Filling up replay buffer",
404
+ position=0,
405
+ leave=True,
406
+ )
407
+ while len(replay_buffer) < config.training_starts:
408
+ pbar.update(len(replay_buffer) - pbar.n) # Update progress bar
409
+ time.sleep(1)
410
+ pbar.update(len(replay_buffer) - pbar.n) # Update progress bar
411
+ pbar.close()
412
+
413
+ # send the initial network to the actor
414
+ server.publish_network(agent.state.params)
415
+ print_green("sent initial network to actor")
416
+
417
+ # 50/50 sampling from RLPD, half from demo and half from online experience
418
+ replay_iterator = replay_buffer.get_iterator(
419
+ sample_args={
420
+ "batch_size": config.batch_size // 2,
421
+ "pack_obs_and_next_obs": True,
422
+ },
423
+ device=sharding.replicate(),
424
+ )
425
+ demo_iterator = demo_buffer.get_iterator(
426
+ sample_args={
427
+ "batch_size": config.batch_size // 2,
428
+ "pack_obs_and_next_obs": True,
429
+ },
430
+ device=sharding.replicate(),
431
+ )
432
+
433
+ # wait till the replay buffer is filled with enough data
434
+ timer = Timer()
435
+
436
+ if isinstance(agent, SACAgent):
437
+ train_critic_networks_to_update = frozenset({"critic"})
438
+ train_networks_to_update = frozenset({"critic", "actor", "temperature"})
439
+ else:
440
+ train_critic_networks_to_update = frozenset({"critic", "grasp_critic"})
441
+ train_networks_to_update = frozenset({"critic", "grasp_critic", "actor", "temperature"})
442
+
443
+ # Counters for tracking updates
444
+ total_critic_only_updates = 0
445
+ total_critic_actor_updates = 0
446
+
447
+ print_green(f"\n🎯 Starting training with CTA_RATIO={config.cta_ratio}")
448
+ print_green(f" • Critic-only updates per step: {config.cta_ratio - 1}")
449
+ print_green(f" • Critic+Actor updates per step: 1\n")
450
+
451
+ for step in tqdm.tqdm(
452
+ range(start_step, config.max_steps), dynamic_ncols=True, desc="learner"
453
+ ):
454
+ # ========================================================================
455
+ # CRITIC-ONLY UPDATES (CTA_RATIO - 1 updates)
456
+ # Purpose: Update value networks without policy changes
457
+ # ========================================================================
458
+ for critic_step in range(config.cta_ratio - 1):
459
+ with timer.context("sample_replay_buffer"):
460
+ online_batch = next(replay_iterator)
461
+ demo_batch = next(demo_iterator)
462
+
463
+ # ========================================================================
464
+ # BATCH COMPOSITION LOGGING (Every log_period steps)
465
+ # ========================================================================
466
+ if step % config.log_period == 0 and critic_step == 0:
467
+ print(f"\n{'='*80}")
468
+ print(f"[LEARNER Step {step:6d}] BATCH ANALYSIS")
469
+ print(f"{'='*80}")
470
+
471
+ # --- Batch Sizes ---
472
+ online_size = online_batch['actions'].shape[0]
473
+ demo_size = demo_batch['actions'].shape[0]
474
+ print(f"\n📦 BATCH SIZES:")
475
+ print(f" Online: {online_size:3d} | Demo: {demo_size:3d} | Total: {online_size + demo_size:3d}")
476
+
477
+ # --- Camera Images Check ---
478
+ # NOTE: SERLObsWrapper unwraps images to top level (not nested under 'images')
479
+ print(f"\n📸 CAMERA IMAGES (Verifying all 4 cameras):")
480
+ for batch_name, batch_data in [("Online", online_batch), ("Demo", demo_batch)]:
481
+ obs = batch_data['observations']
482
+ print(f" {batch_name} batch images:")
483
+ cam_count = 0
484
+ for cam_name in ['cam_high', 'cam_low', 'cam_left_wrist', 'cam_right_wrist']:
485
+ if cam_name in obs:
486
+ img_shape = obs[cam_name].shape
487
+ img_mean = float(jnp.mean(obs[cam_name]))
488
+ img_std = float(jnp.std(obs[cam_name]))
489
+ print(f" ✓ {cam_name:18s}: shape={img_shape}, mean={img_mean:6.2f}, std={img_std:5.2f}")
490
+ cam_count += 1
491
+ else:
492
+ print(f" ✗ {cam_name:18s}: MISSING!")
493
+ print(f" → {batch_name} total cameras found: {cam_count}/4")
494
+
495
+ # --- Masks/Dones Analysis ---
496
+ online_masks = online_batch['masks']
497
+ demo_masks = demo_batch['masks']
498
+ print(f"\n🎭 MASKS (Bootstrapping Signal):")
499
+ print(f" Online: mean={float(jnp.mean(online_masks)):.3f}, min={float(jnp.min(online_masks)):.3f}, max={float(jnp.max(online_masks)):.3f}")
500
+ print(f" Demo: mean={float(jnp.mean(demo_masks)):.3f}, min={float(jnp.min(demo_masks)):.3f}, max={float(jnp.max(demo_masks)):.3f}")
501
+
502
+ # --- Rewards Analysis ---
503
+ online_rewards = online_batch['rewards']
504
+ demo_rewards = demo_batch['rewards']
505
+ print(f"\n🎁 REWARDS:")
506
+ print(f" Online: mean={float(jnp.mean(online_rewards)):.4f}, min={float(jnp.min(online_rewards)):.4f}, max={float(jnp.max(online_rewards)):.4f}")
507
+ print(f" Demo: mean={float(jnp.mean(demo_rewards)):.4f}, min={float(jnp.min(demo_rewards)):.4f}, max={float(jnp.max(demo_rewards)):.4f}")
508
+
509
+ batch = concat_batches(online_batch, demo_batch, axis=0)
510
+
511
+ with timer.context("train_critics"):
512
+ agent, critics_info = agent.update(
513
+ batch,
514
+ networks_to_update=train_critic_networks_to_update,
515
+ )
516
+ total_critic_only_updates += 1
517
+
518
+ # ========================================================================
519
+ # CRITIC + ACTOR UPDATE (1 update per step)
520
+ # Purpose: Update both value networks AND policy
521
+ # ========================================================================
522
+ with timer.context("train"):
523
+ online_batch = next(replay_iterator)
524
+ demo_batch = next(demo_iterator)
525
+
526
+ batch = concat_batches(online_batch, demo_batch, axis=0)
527
+
528
+ agent, update_info = agent.update(
529
+ batch,
530
+ networks_to_update=train_networks_to_update,
531
+ )
532
+ total_critic_actor_updates += 1
533
+
534
+ # ========================================================================
535
+ # TRAINING METRICS LOGGING (Every log_period steps)
536
+ # ========================================================================
537
+ if step % config.log_period == 0:
538
+ print(f"\n{'='*80}")
539
+ print(f"[LEARNER Step {step:6d}] TRAINING METRICS")
540
+ print(f"{'='*80}")
541
+
542
+ # --- Update Counts ---
543
+ update_ratio = total_critic_only_updates / (total_critic_actor_updates + 1e-8)
544
+ print(f"\n📊 UPDATE STATISTICS:")
545
+ print(f" Critic-only updates: {total_critic_only_updates:7d}")
546
+ print(f" Critic+Actor updates: {total_critic_actor_updates:7d}")
547
+ print(f" Ratio: {update_ratio:.2f} (expected: {config.cta_ratio-1:.2f})")
548
+
549
+ # Helper function to safely extract scalar values
550
+ def safe_float(val, default=0.0):
551
+ if val is None:
552
+ return default
553
+ if isinstance(val, dict):
554
+ # Try to extract first value if it's a dict
555
+ if len(val) > 0:
556
+ first_key = next(iter(val))
557
+ val = val[first_key]
558
+ else:
559
+ return default
560
+ try:
561
+ return float(val)
562
+ except (TypeError, ValueError):
563
+ return default
564
+
565
+ # --- Loss Values ---
566
+ print(f"\n📉 LOSS VALUES:")
567
+ if 'grasp_critic_loss' in update_info:
568
+ print(f" Temperature: {safe_float(update_info.get('temperature')):8.5f}")
569
+
570
+ # --- Reward/Mask Statistics from Batch ---
571
+ print(f"\n📦 BATCH STATISTICS:")
572
+ print(f" Rewards: mean={safe_float(update_info.get('rewards')):7.4f}, "
573
+ f"min={safe_float(update_info.get('rewards_min')):7.4f}, "
574
+ f"max={safe_float(update_info.get('rewards_max')):7.4f}")
575
+ print(f" Masks: mean={safe_float(update_info.get('masks')):7.4f}, "
576
+ f"min={safe_float(update_info.get('masks_min')):7.4f}, "
577
+ f"max={safe_float(update_info.get('masks_max')):7.4f}")
578
+
579
+ # --- Buffer Status ---
580
+ print(f"\n💾 BUFFER STATUS:")
581
+ print(f" Replay buffer: {len(replay_buffer):6d} / {replay_buffer._capacity:6d} ({100*len(replay_buffer)/replay_buffer._capacity:.1f}%)")
582
+ print(f" Demo buffer: {len(demo_buffer):6d} / {demo_buffer._capacity:6d} ({100*len(demo_buffer)/demo_buffer._capacity:.1f}%)")
583
+ print(f"{'='*80}\n")
584
+
585
+ # publish the updated network
586
+ if step > 0 and step % (config.steps_per_update) == 0:
587
+ agent = jax.block_until_ready(agent)
588
+ server.publish_network(agent.state.params)
589
+
590
+ if step % config.log_period == 0 and wandb_logger:
591
+ # Add update counts to wandb
592
+ update_info_extended = {
593
+ **update_info,
594
+ "training/critic_only_updates": total_critic_only_updates,
595
+ "training/critic_actor_updates": total_critic_actor_updates,
596
+ "training/update_ratio": total_critic_only_updates / (total_critic_actor_updates + 1e-8),
597
+ }
598
+ wandb_logger.log(update_info_extended, step=step)
599
+ wandb_logger.log({"timer": timer.get_average_times()}, step=step)
600
+
601
+ if (
602
+ step > 0
603
+ and config.checkpoint_period
604
+ and step % config.checkpoint_period == 0
605
+ ):
606
+ checkpoints.save_checkpoint(
607
+ os.path.abspath(FLAGS.checkpoint_path), agent.state, step=step, keep=100
608
+ )
609
+
610
+
611
+ ##############################################################################
612
+
613
+
614
+ def main(_):
615
+ global config
616
+ config = get_config(FLAGS.exp_name)()
617
+
618
+ assert config.batch_size % num_devices == 0
619
+ # seed
620
+ rng = jax.random.PRNGKey(FLAGS.seed)
621
+ rng, sampling_rng = jax.random.split(rng)
622
+
623
+ # assert FLAGS.exp_name in CONFIG_MAPPING, "Experiment folder not found."
624
+ env = config.get_environment(
625
+ fake_env=FLAGS.learner,
626
+ save_video=True if FLAGS.eval_checkpoint_step > 0 else False,
627
+ classifier=True,
628
+ video_save_path=os.path.join(FLAGS.checkpoint_path, "eval_videos") if FLAGS.checkpoint_path is not None else None,
629
+ render=FLAGS.render,
630
+ )
631
+ env = RecordEpisodeStatistics(env)
632
+
633
+ rng, sampling_rng = jax.random.split(rng)
634
+
635
+ if config.setup_mode == 'single-arm-fixed-gripper' or config.setup_mode == 'dual-arm-fixed-gripper':
636
+ agent: SACAgent = make_sac_pixel_agent(
637
+ seed=FLAGS.seed,
638
+ sample_obs=env.observation_space.sample(),
639
+ sample_action=env.action_space.sample(),
640
+ image_keys=config.image_keys,
641
+ encoder_type=config.encoder_type,
642
+ discount=config.discount,
643
+ critic_ensemble_size=config.critic_ensemble_size,
644
+ critic_subsample_size=config.critic_subsample_size,
645
+ )
646
+ include_grasp_penalty = False
647
+ elif config.setup_mode == 'single-arm-learned-gripper':
648
+ agent: SACAgentHybridSingleArm = make_sac_pixel_agent_hybrid_single_arm(
649
+ seed=FLAGS.seed,
650
+ sample_obs=env.observation_space.sample(),
651
+ sample_action=env.action_space.sample(),
652
+ image_keys=config.image_keys,
653
+ encoder_type=config.encoder_type,
654
+ discount=config.discount,
655
+ )
656
+ include_grasp_penalty = True
657
+ elif config.setup_mode == 'dual-arm-learned-gripper':
658
+ agent: SACAgentHybridDualArm = make_sac_pixel_agent_hybrid_dual_arm(
659
+ seed=FLAGS.seed,
660
+ sample_obs=env.observation_space.sample(),
661
+ sample_action=env.action_space.sample(),
662
+ image_keys=config.image_keys,
663
+ encoder_type=config.encoder_type,
664
+ discount=config.discount,
665
+ )
666
+ include_grasp_penalty = True
667
+ else:
668
+ raise NotImplementedError(f"Unknown setup mode: {config.setup_mode}")
669
+
670
+ # replicate agent across devices
671
+ # need the jnp.array to avoid a bug where device_put doesn't recognize primitives
672
+ agent = jax.device_put(
673
+ jax.tree_util.tree_map(jnp.array, agent), sharding.replicate()
674
+ )
675
+
676
+ if FLAGS.checkpoint_path is not None and os.path.exists(FLAGS.checkpoint_path):
677
+ # Check if there are actual checkpoint files
678
+ latest_ckpt = checkpoints.latest_checkpoint(os.path.abspath(FLAGS.checkpoint_path))
679
+ if latest_ckpt is not None:
680
+ input("Checkpoint path already exists. Press Enter to resume training.")
681
+ ckpt = checkpoints.restore_checkpoint(
682
+ os.path.abspath(FLAGS.checkpoint_path),
683
+ agent.state,
684
+ )
685
+ agent = agent.replace(state=ckpt)
686
+ ckpt_number = os.path.basename(latest_ckpt)[11:]
687
+ print_green(f"Loaded previous checkpoint at step {ckpt_number}.")
688
+ else:
689
+ print_green(f"Checkpoint directory exists but is empty. Starting fresh training.")
690
+ # Create directory if it doesn't exist
691
+ os.makedirs(FLAGS.checkpoint_path, exist_ok=True)
692
+
693
+ def create_replay_buffer_and_wandb_logger():
694
+ replay_buffer = MemoryEfficientReplayBufferDataStore(
695
+ env.observation_space,
696
+ env.action_space,
697
+ capacity=config.replay_buffer_capacity,
698
+ image_keys=config.image_keys,
699
+ include_grasp_penalty=include_grasp_penalty,
700
+ )
701
+ # set up wandb and logging
702
+ wandb_logger = make_wandb_logger(
703
+ project="hil-serl",
704
+ description=FLAGS.exp_name,
705
+ debug=FLAGS.debug,
706
+ )
707
+ return replay_buffer, wandb_logger
708
+
709
+ if FLAGS.learner:
710
+ sampling_rng = jax.device_put(sampling_rng, device=sharding.replicate())
711
+ replay_buffer, wandb_logger = create_replay_buffer_and_wandb_logger()
712
+ demo_buffer = MemoryEfficientReplayBufferDataStore(
713
+ env.observation_space,
714
+ env.action_space,
715
+ capacity=config.replay_buffer_capacity,
716
+ image_keys=config.image_keys,
717
+ include_grasp_penalty=include_grasp_penalty,
718
+ )
719
+
720
+ assert FLAGS.demo_path is not None
721
+ for path in FLAGS.demo_path:
722
+ with open(path, "rb") as f:
723
+ transitions = pkl.load(f)
724
+ for transition in transitions:
725
+ # Handle grasp_penalty for hybrid agents
726
+ if include_grasp_penalty:
727
+ if 'infos' in transition and 'grasp_penalty' in transition['infos']:
728
+ transition['grasp_penalty'] = transition['infos']['grasp_penalty']
729
+ else:
730
+ # For BC demos without grasp_penalty, set to 0 (no penalty)
731
+ transition['grasp_penalty'] = 0.0
732
+ demo_buffer.insert(transition)
733
+ print_green(f"demo buffer size: {len(demo_buffer)}")
734
+ print_green(f"online buffer size: {len(replay_buffer)}")
735
+
736
+ if FLAGS.checkpoint_path is not None and os.path.exists(
737
+ os.path.join(FLAGS.checkpoint_path, "buffer")
738
+ ):
739
+ for file in glob.glob(os.path.join(FLAGS.checkpoint_path, "buffer/*.pkl")):
740
+ with open(file, "rb") as f:
741
+ transitions = pkl.load(f)
742
+ for transition in transitions:
743
+ replay_buffer.insert(transition)
744
+ print_green(
745
+ f"Loaded previous buffer data. Replay buffer size: {len(replay_buffer)}"
746
+ )
747
+
748
+ if FLAGS.checkpoint_path is not None and os.path.exists(
749
+ os.path.join(FLAGS.checkpoint_path, "demo_buffer")
750
+ ):
751
+ for file in glob.glob(
752
+ os.path.join(FLAGS.checkpoint_path, "demo_buffer/*.pkl")
753
+ ):
754
+ with open(file, "rb") as f:
755
+ transitions = pkl.load(f)
756
+ for transition in transitions:
757
+ demo_buffer.insert(transition)
758
+ print_green(
759
+ f"Loaded previous demo buffer data. Demo buffer size: {len(demo_buffer)}"
760
+ )
761
+
762
+ # learner loop
763
+ print_green("starting learner loop")
764
+ learner(
765
+ sampling_rng,
766
+ agent,
767
+ replay_buffer,
768
+ demo_buffer=demo_buffer,
769
+ wandb_logger=wandb_logger,
770
+ )
771
+
772
+ elif FLAGS.actor:
773
+ sampling_rng = jax.device_put(sampling_rng, sharding.replicate())
774
+ data_store = QueuedDataStore(50000) # the queue size on the actor
775
+ intvn_data_store = QueuedDataStore(50000)
776
+
777
+ # actor loop
778
+ print_green("starting actor loop")
779
+ actor(
780
+ agent,
781
+ data_store,
782
+ intvn_data_store,
783
+ env,
784
+ sampling_rng,
785
+ )
786
+
787
+ else:
788
+ raise NotImplementedError("Must be either a learner or an actor")
789
+
790
+
791
+ if __name__ == "__main__":
792
+ app.run(main)
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/config.yaml ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ '?':
2
+ value: false
3
+ _wandb:
4
+ value:
5
+ cli_version: 0.23.1
6
+ code_path: code/examples/train_rlpd.py
7
+ e:
8
+ kth3utdpevuw7jymdlpalescmucl9buu:
9
+ args:
10
+ - --exp_name=cube_stacking_gym_rlpd
11
+ - --checkpoint_path=./RLPD_Checkpoints/
12
+ - --demo_path=/home/qte9489/personal_abhi/temp/hil-serl/train_data_sets/test1/only_right_arm_data_regenerated_deleted.pkl
13
+ - --learner
14
+ codePath: examples/train_rlpd.py
15
+ cpu_count: 24
16
+ cpu_count_logical: 32
17
+ cudaVersion: "12.8"
18
+ disk:
19
+ /:
20
+ total: "972996431872"
21
+ used: "780807614464"
22
+ email: nannuriabhi2000@gmail.com
23
+ executable: /home/qte9489/anaconda3/envs/hilserl/bin/python
24
+ git:
25
+ commit: 8214ad9987b024a5c3f75be0516ffc5d020611a9
26
+ remote: https://github.com/Abhi-0212000/hil-serl.git
27
+ gpu: NVIDIA GeForce RTX 4090 Laptop GPU
28
+ gpu_count: 1
29
+ gpu_nvidia:
30
+ - architecture: Ada
31
+ cudaCores: 9728
32
+ memoryTotal: "17171480576"
33
+ name: NVIDIA GeForce RTX 4090 Laptop GPU
34
+ uuid: GPU-956b4ab5-e4a8-1320-18b2-a3eb83de1da9
35
+ host: cw011081522
36
+ memory:
37
+ total: "33372749824"
38
+ os: Linux-6.8.0-90-generic-x86_64-with-glibc2.35
39
+ program: /home/qte9489/personal_abhi/temp/hil-serl/examples/experiments/cube_stacking_gym/../../train_rlpd.py
40
+ python: CPython 3.10.19
41
+ root: /tmp/tmpp99_95qz
42
+ startedAt: "2026-01-12T17:03:53.458678Z"
43
+ writerId: kth3utdpevuw7jymdlpalescmucl9buu
44
+ m: []
45
+ python_version: 3.10.19
46
+ t:
47
+ "1":
48
+ - 2
49
+ - 3
50
+ - 12
51
+ - 45
52
+ "2":
53
+ - 2
54
+ - 3
55
+ - 12
56
+ - 45
57
+ "3":
58
+ - 14
59
+ - 15
60
+ - 16
61
+ - 61
62
+ "4": 3.10.19
63
+ "5": 0.23.1
64
+ "12": 0.23.1
65
+ "13": linux-x86_64
66
+ actor:
67
+ value: false
68
+ alsologtostderr:
69
+ value: false
70
+ checkpoint_path:
71
+ value: ./RLPD_Checkpoints/
72
+ chex_assert_multiple_cpu_devices:
73
+ value: false
74
+ chex_n_cpu_devices:
75
+ value: 1
76
+ chex_skip_pmap_variant_if_single_device:
77
+ value: true
78
+ debug:
79
+ value: false
80
+ delta_threshold:
81
+ value: 0.5
82
+ demo_path:
83
+ value:
84
+ - /home/qte9489/personal_abhi/temp/hil-serl/train_data_sets/test1/only_right_arm_data_regenerated_deleted.pkl
85
+ eval_checkpoint_step:
86
+ value: 0
87
+ eval_n_trajs:
88
+ value: 10
89
+ exp_name:
90
+ value: cube_stacking_gym_rlpd
91
+ experimental_orbax_use_distributed_barrier:
92
+ value: false
93
+ experimental_orbax_use_distributed_process_id:
94
+ value: false
95
+ hbm_oom_exit:
96
+ value: true
97
+ help:
98
+ value: false
99
+ helpfull:
100
+ value: false
101
+ helpshort:
102
+ value: false
103
+ helpxml:
104
+ value: false
105
+ hostname:
106
+ value: cw011081522
107
+ ip:
108
+ value: localhost
109
+ learner:
110
+ value: true
111
+ log_dir:
112
+ value: ""
113
+ logtostderr:
114
+ value: false
115
+ only_check_args:
116
+ value: false
117
+ op_conversion_fallback_to_while_loop:
118
+ value: true
119
+ pdb:
120
+ value: false
121
+ pdb_post_mortem:
122
+ value: false
123
+ profile_file:
124
+ value: null
125
+ pymjcf_debug:
126
+ value: false
127
+ pymjcf_debug_full_dump_dir:
128
+ value: ""
129
+ pymjcf_log_xml:
130
+ value: false
131
+ render:
132
+ value: true
133
+ run_with_pdb:
134
+ value: false
135
+ run_with_profiling:
136
+ value: false
137
+ runtime_oom_exit:
138
+ value: true
139
+ save_video:
140
+ value: false
141
+ seed:
142
+ value: 42
143
+ showprefixforinfo:
144
+ value: true
145
+ stderrthreshold:
146
+ value: fatal
147
+ test_random_seed:
148
+ value: 301
149
+ test_randomize_ordering_seed:
150
+ value: ""
151
+ test_srcdir:
152
+ value: ""
153
+ test_tmpdir:
154
+ value: /tmp/absl_testing
155
+ tt_check_filter:
156
+ value: false
157
+ tt_single_core_summaries:
158
+ value: false
159
+ use_cprofile_for_profiling:
160
+ value: true
161
+ v:
162
+ value: 0
163
+ verbosity:
164
+ value: 0
165
+ xml_output_file:
166
+ value: ""
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/history_full.csv ADDED
The diff for this file is too large to render. See raw diff
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/output.log ADDED
The diff for this file is too large to render. See raw diff
 
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/plots/actor_acator_objective.png ADDED

Git LFS Details

  • SHA256: e666ba659cb79d1c3ddaa99893c709ca4263ad7791ad63fb248242d3d4c6cbfa
  • Pointer size: 130 Bytes
  • Size of remote file: 76.2 kB
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/plots/actor_actor_loss.png ADDED

Git LFS Details

  • SHA256: 57ff107a131dd3e68be6355c3910d3069b7cd040171181b37e57c6acb09930fe
  • Pointer size: 130 Bytes
  • Size of remote file: 73.7 kB
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/plots/actor_entropy.png ADDED

Git LFS Details

  • SHA256: 2c25eed8507e112e951ff3adf617f4a39de3169b06b60519c2b026d64251928d
  • Pointer size: 130 Bytes
  • Size of remote file: 80.5 kB
experiments/cube_stacking_gym/RL/RLPD_Checkpoints/RLPD_Checkpoints_exploiting the reward func/cube_stacking_gym_rlpd_20260112_180353/plots/actor_lr.png ADDED

Git LFS Details

  • SHA256: f042ea80948b89a4b05c7434ed48a5e9d7612c3f3ecddc9c3ef30dd6565f2c6d
  • Pointer size: 130 Bytes
  • Size of remote file: 41.7 kB