diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl000.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl000.obj
new file mode 100644
index 0000000000000000000000000000000000000000..db8c976d777591f0a25f6691b65b15956cf11d53
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl000.obj
@@ -0,0 +1,188 @@
+v 0.008262449 -0.056882737 0.048841554
+v 0.012940890 -0.054406889 0.035926501
+v 0.012970069 -0.052959721 0.035895600
+v -0.014549516 -0.046235780 0.007557420
+v -0.005075807 -0.060021150 0.047272347
+v 0.004731734 -0.050213609 0.010395993
+v -0.014491046 -0.054136626 0.039034013
+v 0.008262449 -0.058844245 0.048841554
+v 0.012970069 -0.054528927 0.034326393
+v 0.004740576 -0.047912968 0.010380272
+v -0.012137236 -0.049429006 0.008042184
+v 0.009439354 -0.052959721 0.035895600
+v -0.007037315 -0.057667340 0.048841554
+v 0.006693242 -0.059236547 0.046880046
+v 0.012577767 -0.054921229 0.035895600
+v -0.009433870 -0.046971134 0.007482143
+v -0.014098744 -0.045898291 0.008042184
+v 0.012970069 -0.052959721 0.034326393
+v -0.002721997 -0.050998213 0.010788295
+v -0.015275649 -0.056490436 0.038641711
+v 0.007477846 -0.056882737 0.048841554
+v -0.007037315 -0.059628849 0.048841554
+v 0.005124036 -0.059628849 0.048449252
+v 0.008262449 -0.058844245 0.046880046
+v 0.004731734 -0.050605911 0.011572898
+v -0.013314141 -0.049036704 0.007649882
+v -0.015355732 -0.052861005 0.033077031
+v -0.014491046 -0.045898291 0.008042184
+v -0.014098744 -0.045898291 0.007649882
+v -0.011352633 -0.049429006 0.007649882
+v -0.001937394 -0.060021150 0.046880046
+v -0.009783426 -0.057275039 0.037857108
+v -0.015275649 -0.054921229 0.039034013
+v -0.006645013 -0.060021150 0.048056951
+v 0.007477846 -0.059236547 0.048449252
+v 0.003947131 -0.059628849 0.048841554
+v 0.012577767 -0.054921229 0.035503298
+v 0.008654750 -0.055705832 0.034718695
+v 0.001985623 -0.050605911 0.010395993
+v -0.014491046 -0.048644403 0.007649882
+v -0.014491046 -0.052175117 0.020595836
+v -0.009363853 -0.049119892 0.007727841
+v -0.003506600 -0.060021150 0.046880046
+v 0.005124036 -0.059628849 0.048056951
+v -0.011744935 -0.050213609 0.010788295
+v -0.007821918 -0.058059642 0.048056951
+v -0.015275649 -0.056490436 0.039034013
+v -0.006252712 -0.060021150 0.047664649
+v 0.007477846 -0.059236547 0.047664649
+v -0.006645013 -0.060021150 0.048449252
+v 0.006300941 -0.059236547 0.048841554
+v 0.007870147 -0.053744324 0.026480361
+v 0.005908639 -0.055705832 0.032364885
+v 0.003947131 -0.050605911 0.011180597
+v -0.001152790 -0.050998213 0.010788295
+v -0.015275649 -0.054921229 0.032364885
+v -0.013314141 -0.050605911 0.013534406
+v -0.014491046 -0.050605911 0.014711311
+v -0.014883348 -0.056490436 0.038249410
+v -0.003506600 -0.058451944 0.040603219
+v 0.003554829 -0.059628849 0.047272347
+v 0.000024114 -0.060021150 0.048056951
+v -0.010960331 -0.056882737 0.037072505
+v -0.007858685 -0.057587207 0.047978282
+f 8 2 3
+f 8 3 1
+f 9 3 2
+f 10 9 6
+f 12 1 3
+f 13 8 1
+f 15 9 2
+f 15 2 8
+f 16 10 6
+f 17 3 10
+f 17 12 3
+f 18 10 3
+f 18 3 9
+f 18 9 10
+f 21 12 7
+f 21 1 12
+f 21 13 1
+f 21 7 13
+f 22 8 13
+f 24 15 8
+f 25 6 9
+f 28 7 12
+f 28 12 17
+f 28 27 7
+f 28 4 27
+f 29 16 4
+f 29 28 17
+f 29 4 28
+f 29 17 10
+f 29 10 16
+f 30 11 26
+f 30 26 16
+f 32 19 5
+f 33 13 7
+f 33 7 27
+f 34 5 31
+f 35 24 8
+f 36 8 22
+f 37 24 9
+f 37 9 15
+f 37 15 24
+f 38 9 14
+f 39 30 6
+f 40 4 16
+f 40 16 26
+f 42 30 16
+f 42 16 6
+f 42 6 30
+f 43 31 5
+f 44 35 23
+f 45 11 30
+f 45 30 19
+f 45 19 32
+f 46 22 13
+f 47 46 33
+f 47 22 46
+f 47 20 34
+f 47 33 27
+f 47 27 20
+f 48 32 5
+f 48 5 34
+f 49 24 35
+f 49 14 9
+f 49 9 24
+f 49 44 14
+f 49 35 44
+f 50 36 22
+f 50 34 31
+f 50 47 34
+f 50 22 47
+f 51 35 8
+f 51 8 36
+f 51 36 23
+f 51 23 35
+f 52 38 25
+f 52 25 9
+f 52 9 38
+f 53 38 14
+f 53 25 38
+f 54 39 6
+f 54 6 25
+f 54 53 39
+f 54 25 53
+f 55 19 30
+f 55 30 39
+f 55 39 53
+f 56 20 27
+f 56 27 4
+f 56 4 40
+f 56 41 20
+f 57 41 26
+f 57 26 11
+f 57 45 41
+f 57 11 45
+f 58 40 26
+f 58 26 41
+f 58 56 40
+f 58 41 56
+f 59 20 41
+f 59 32 48
+f 59 48 34
+f 59 34 20
+f 60 43 5
+f 60 5 19
+f 60 31 43
+f 60 55 31
+f 60 19 55
+f 61 44 31
+f 61 14 44
+f 61 31 55
+f 61 55 53
+f 61 53 14
+f 62 44 23
+f 62 31 44
+f 62 50 31
+f 62 23 36
+f 62 36 50
+f 63 45 32
+f 63 41 45
+f 63 59 41
+f 63 32 59
+f 64 46 13
+f 64 13 33
+f 64 33 46
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl001.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl001.obj
new file mode 100644
index 0000000000000000000000000000000000000000..743172d3ff233ae9a583b687ef5f03ddaa028bca
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl001.obj
@@ -0,0 +1,188 @@
+v -0.005475058 0.058876268 0.048638654
+v -0.005463524 0.057814980 0.048505999
+v -0.006997945 0.050252120 0.010089541
+v -0.019590967 0.047861800 0.010003692
+v -0.019235275 0.056177896 0.048461989
+v -0.019983269 0.045115689 0.011180597
+v -0.015275649 0.058453944 0.045703141
+v -0.005468108 0.058846246 0.048056951
+v -0.018021761 0.056100134 0.048841554
+v -0.007037315 0.047861800 0.011180597
+v -0.008606522 0.047469498 0.010003692
+v -0.021944777 0.051784816 0.027657265
+v -0.013314141 0.049823308 0.010788295
+v -0.016844856 0.058453944 0.048449252
+v -0.005467018 0.057703980 0.048048537
+v -0.011352633 0.059238547 0.047272347
+v -0.008214220 0.057277039 0.048841554
+v -0.020375571 0.049431007 0.028049567
+v -0.020403472 0.045174808 0.009987244
+v -0.007821918 0.050607911 0.010003692
+v -0.019590967 0.045115689 0.010788295
+v -0.007068560 0.048082903 0.009947392
+v -0.016060253 0.056492436 0.038641711
+v -0.013706443 0.058846246 0.048841554
+v -0.017629459 0.058061643 0.048841554
+v -0.018414062 0.056884738 0.042957029
+v -0.007820392 0.059224598 0.048053668
+v -0.015275649 0.058846246 0.047664649
+v -0.019983269 0.047077197 0.019026629
+v -0.019245712 0.055437521 0.048310001
+v -0.021944777 0.048646403 0.015495915
+v -0.021944777 0.049823308 0.028049567
+v -0.014098744 0.049431007 0.010003692
+v -0.009391125 0.052177118 0.017065121
+v -0.007037315 0.052961721 0.019418931
+v -0.008606522 0.047469498 0.010395993
+v -0.019753283 0.045459617 0.009908006
+v -0.010568030 0.059238547 0.048841554
+v -0.019227901 0.056554535 0.048538440
+v -0.018021761 0.058061643 0.048449252
+v -0.016844856 0.058453944 0.047272347
+v -0.014883348 0.058846246 0.047272347
+v -0.014883348 0.058846246 0.048449252
+v -0.020375571 0.047077197 0.019026629
+v -0.019983269 0.049431007 0.028049567
+v -0.018021761 0.055315531 0.048449252
+v -0.020286556 0.047166036 0.010083143
+v -0.017237158 0.048646403 0.010003692
+v -0.020767872 0.049038705 0.015495915
+v -0.020522329 0.045426653 0.011094082
+v -0.012137236 0.049823308 0.010003692
+v -0.014883348 0.049823308 0.011965200
+v -0.008606522 0.051000213 0.011965200
+v -0.007429617 0.058846246 0.043741633
+v -0.014491046 0.056100134 0.035895600
+v -0.009783426 0.059238547 0.046487744
+v -0.011744935 0.059238547 0.048056951
+v -0.021944777 0.051784816 0.028049567
+v -0.016844856 0.057277039 0.042564728
+v -0.015275649 0.058846246 0.048056951
+v -0.011352633 0.056884738 0.048841554
+v -0.020375571 0.047861800 0.010788295
+v -0.019983269 0.048254102 0.011572898
+v -0.021944777 0.051000213 0.024518852
+f 8 1 2
+f 15 8 2
+f 15 3 8
+f 15 2 10
+f 17 2 1
+f 17 10 2
+f 21 10 6
+f 21 19 11
+f 21 6 19
+f 22 10 11
+f 22 15 10
+f 22 3 15
+f 22 20 3
+f 24 9 17
+f 25 24 14
+f 25 9 24
+f 27 1 8
+f 29 6 10
+f 29 10 17
+f 30 9 5
+f 32 12 31
+f 32 18 30
+f 32 30 5
+f 35 8 3
+f 35 3 20
+f 36 21 11
+f 36 11 10
+f 36 10 21
+f 37 22 11
+f 37 11 19
+f 37 19 4
+f 37 20 22
+f 38 24 17
+f 38 17 1
+f 38 1 27
+f 39 5 9
+f 39 9 25
+f 39 32 5
+f 40 25 14
+f 40 39 25
+f 40 26 12
+f 40 12 39
+f 41 28 7
+f 41 26 40
+f 41 40 14
+f 41 14 28
+f 42 28 16
+f 42 16 7
+f 42 7 28
+f 43 14 24
+f 43 24 38
+f 44 29 18
+f 44 6 29
+f 44 32 6
+f 44 18 32
+f 45 29 17
+f 45 18 29
+f 46 9 30
+f 46 45 17
+f 46 30 18
+f 46 18 45
+f 47 4 19
+f 47 19 31
+f 48 37 4
+f 48 33 37
+f 50 19 6
+f 50 6 32
+f 50 32 31
+f 50 31 19
+f 51 33 13
+f 51 13 20
+f 51 37 33
+f 51 20 37
+f 52 13 33
+f 52 48 23
+f 52 33 48
+f 53 34 20
+f 53 20 13
+f 53 13 34
+f 54 34 7
+f 54 27 8
+f 54 8 35
+f 54 35 20
+f 54 20 34
+f 55 23 7
+f 55 7 34
+f 55 34 13
+f 55 52 23
+f 55 13 52
+f 56 38 27
+f 56 16 38
+f 56 27 54
+f 56 54 7
+f 56 7 16
+f 57 38 16
+f 57 16 28
+f 57 43 38
+f 58 39 12
+f 58 12 32
+f 58 32 39
+f 59 26 41
+f 59 49 26
+f 59 41 7
+f 59 7 23
+f 60 28 14
+f 60 14 43
+f 60 57 28
+f 60 43 57
+f 61 46 17
+f 61 17 9
+f 61 9 46
+f 62 47 31
+f 62 4 47
+f 62 48 4
+f 63 23 48
+f 63 48 62
+f 63 62 31
+f 63 31 49
+f 63 59 23
+f 63 49 59
+f 64 49 31
+f 64 31 12
+f 64 12 26
+f 64 26 49
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl002.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl002.obj
new file mode 100644
index 0000000000000000000000000000000000000000..d67d2632650981751ea1cd22fb1bc4a367f5b939
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl002.obj
@@ -0,0 +1,188 @@
+v -0.040382954 -0.040013767 0.039034013
+v -0.038379611 -0.044674307 0.037087126
+v -0.037636843 -0.035698449 0.019026629
+v -0.042344462 -0.025498606 0.003334564
+v -0.050582797 -0.031775432 0.036680203
+v -0.040382954 -0.033344639 0.007649882
+v -0.045875177 -0.023144796 0.014319010
+v -0.038421446 -0.041190672 0.037072505
+v -0.045875177 -0.038836862 0.039034013
+v -0.038217332 -0.040979623 0.024983721
+v -0.039206049 -0.030206226 0.008042184
+v -0.047836685 -0.024321701 0.012357502
+v -0.047444384 -0.031383131 0.037072505
+v -0.039948528 -0.043889376 0.039048735
+v -0.038207413 -0.044480517 0.035970403
+v -0.041952161 -0.025498606 0.007257580
+v -0.045482875 -0.039229163 0.037857108
+v -0.039334996 -0.033453942 0.007567204
+v -0.043913669 -0.028637019 0.008042184
+v -0.047821565 -0.023135692 0.012362082
+v -0.050115766 -0.025453609 0.024541493
+v -0.047444384 -0.025498606 0.024126551
+v -0.046659780 -0.032167734 0.037072505
+v -0.041952161 -0.038444560 0.039034013
+v -0.050362611 -0.031231327 0.037140674
+v -0.039990653 -0.040406068 0.039034013
+v -0.037636843 -0.036090750 0.020203534
+v -0.039598351 -0.029421623 0.008042184
+v -0.041852574 -0.025436793 0.003442828
+v -0.045875177 -0.038836862 0.038249410
+v -0.042344462 -0.033736941 0.014319010
+v -0.039206049 -0.041975275 0.028049567
+v -0.041559859 -0.043152180 0.039034013
+v -0.038813748 -0.041190672 0.024911154
+v -0.037577135 -0.036421245 0.018655989
+v -0.042344462 -0.026675511 0.003726866
+v -0.048228987 -0.029421623 0.023734249
+v -0.044698272 -0.025498606 0.005688374
+v -0.047836685 -0.023144796 0.014319010
+v -0.050190495 -0.025890908 0.023341947
+v -0.047052082 -0.031383131 0.036287901
+v -0.040176593 -0.043704547 0.039106135
+v -0.050582797 -0.031775432 0.037072505
+v -0.045726106 -0.038334110 0.039079587
+v -0.045482875 -0.023144796 0.012749803
+v -0.039598351 -0.029421623 0.007649882
+v -0.039374302 -0.030346531 0.007489375
+v -0.048621289 -0.032560036 0.031972583
+v -0.041952161 -0.036483052 0.019811233
+v -0.040775256 -0.033344639 0.008434485
+v -0.038813748 -0.044721386 0.035895600
+v -0.038187906 -0.043281660 0.032054204
+v -0.037636843 -0.035698449 0.018634328
+v -0.042344462 -0.025890908 0.003334564
+v -0.045482875 -0.026283209 0.008434485
+v -0.045090574 -0.025498606 0.006472977
+v -0.044305971 -0.026675511 0.005688374
+v -0.049405892 -0.027460114 0.023734249
+v -0.045482875 -0.023144796 0.012357502
+v -0.041757720 -0.025770219 0.003545946
+v -0.045482875 -0.033736941 0.024518852
+v -0.049405892 -0.032952337 0.035503298
+v -0.049405892 -0.031775432 0.032757187
+v -0.042344462 -0.035698449 0.019026629
+f 14 2 8
+f 15 8 2
+f 16 7 8
+f 22 8 7
+f 22 21 13
+f 22 7 21
+f 23 13 1
+f 24 1 13
+f 25 24 13
+f 25 13 21
+f 26 14 8
+f 26 23 1
+f 26 8 23
+f 27 8 15
+f 27 16 8
+f 27 3 16
+f 28 16 3
+f 28 3 11
+f 29 4 20
+f 29 16 28
+f 30 17 9
+f 30 9 5
+f 30 5 17
+f 33 9 17
+f 34 18 10
+f 34 6 18
+f 35 10 18
+f 35 3 27
+f 35 27 15
+f 35 18 11
+f 36 18 6
+f 38 20 4
+f 39 21 7
+f 39 7 20
+f 39 20 21
+f 40 21 20
+f 40 20 12
+f 41 22 13
+f 41 13 23
+f 41 23 8
+f 41 8 22
+f 42 1 24
+f 42 26 1
+f 42 14 26
+f 42 33 14
+f 42 9 33
+f 43 5 9
+f 43 25 21
+f 43 40 5
+f 43 21 40
+f 44 24 25
+f 44 42 24
+f 44 9 42
+f 44 43 9
+f 44 25 43
+f 45 7 16
+f 45 16 29
+f 45 20 7
+f 46 29 28
+f 46 28 11
+f 46 11 29
+f 47 29 11
+f 47 11 18
+f 49 32 17
+f 49 34 32
+f 50 31 19
+f 50 6 34
+f 50 34 49
+f 51 15 2
+f 51 2 14
+f 51 14 33
+f 51 33 17
+f 51 17 32
+f 51 32 34
+f 52 35 15
+f 52 10 35
+f 52 15 51
+f 52 51 34
+f 52 34 10
+f 53 35 11
+f 53 11 3
+f 53 3 35
+f 54 18 36
+f 54 4 29
+f 54 38 4
+f 55 19 37
+f 56 12 20
+f 56 20 38
+f 57 36 6
+f 57 56 38
+f 57 19 55
+f 57 55 12
+f 57 12 56
+f 57 50 19
+f 57 6 50
+f 57 54 36
+f 57 38 54
+f 58 37 5
+f 58 5 40
+f 58 40 12
+f 58 55 37
+f 58 12 55
+f 59 45 29
+f 59 29 20
+f 59 20 45
+f 60 47 18
+f 60 29 47
+f 60 54 29
+f 60 18 54
+f 61 48 31
+f 62 17 5
+f 62 61 17
+f 62 48 61
+f 63 5 37
+f 63 37 19
+f 63 19 31
+f 63 31 48
+f 63 62 5
+f 63 48 62
+f 64 49 17
+f 64 50 49
+f 64 31 50
+f 64 61 31
+f 64 17 61
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl003.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl003.obj
new file mode 100644
index 0000000000000000000000000000000000000000..75ddff9df47a33042d0655b1063a914ba36eef94
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl003.obj
@@ -0,0 +1,188 @@
+v 0.032032741 -0.033520568 0.009538295
+v 0.037292770 -0.030206226 0.007649882
+v 0.010999716 -0.029411729 0.003356638
+v 0.011008560 -0.037267655 -0.000196151
+v 0.016108482 -0.045505990 0.005688374
+v 0.023169911 -0.029813924 -0.000196151
+v 0.033762055 -0.029421623 0.007649882
+v 0.025523721 -0.041975275 0.009219088
+v 0.010977829 -0.047336511 0.006934185
+v 0.011015013 -0.029431382 0.001366821
+v 0.023954514 -0.038444560 0.002942262
+v 0.037238327 -0.029385514 0.007665354
+v 0.031408245 -0.033344639 0.009611390
+v 0.031902883 -0.036198967 0.009573787
+v 0.011400862 -0.043936783 0.002549961
+v 0.012185465 -0.047467498 0.006865279
+v 0.020816101 -0.033736941 -0.000196151
+v 0.033762055 -0.029813924 0.003334564
+v 0.025916023 -0.040406068 0.006080675
+v 0.027877531 -0.029421623 0.000980754
+v 0.037013312 -0.029236444 0.007006718
+v 0.017285387 -0.042759878 0.008434485
+v 0.028662134 -0.035698449 0.009611390
+v 0.011008560 -0.043936783 0.002549961
+v 0.018069990 -0.043936783 0.004511469
+v 0.024346816 -0.042759878 0.008826787
+v 0.024346816 -0.042759878 0.009219088
+v 0.011400862 -0.046682895 0.004903770
+v 0.013754672 -0.037659957 -0.000196151
+v 0.027092927 -0.033344639 0.001765357
+v 0.031800547 -0.036875354 0.008826787
+v 0.031408245 -0.034913845 0.004903770
+v 0.023169911 -0.029421623 0.000196151
+v 0.023954514 -0.030990829 -0.000196151
+v 0.033762055 -0.029421623 0.003334564
+v 0.037292770 -0.029813924 0.006865279
+v 0.011008560 -0.044329085 0.006865279
+v 0.029054436 -0.035306147 0.009611390
+v 0.011008560 -0.038444560 -0.000196151
+v 0.013754672 -0.042759878 0.002157659
+v 0.024346816 -0.038836862 0.003334564
+v 0.018069990 -0.045113688 0.006865279
+v 0.024739118 -0.041975275 0.007257580
+v 0.023800155 -0.042537225 0.009289494
+v 0.012185465 -0.047467498 0.006472977
+v 0.016893085 -0.044721386 0.004903770
+v 0.011400862 -0.045898291 0.004119167
+v 0.011008560 -0.046682895 0.004903770
+v 0.025916023 -0.035306147 0.002157659
+v 0.031800547 -0.036875354 0.009219088
+v 0.031408245 -0.036483052 0.006865279
+v 0.022385308 -0.032560036 -0.000196151
+v 0.031408245 -0.029813924 0.002157659
+v 0.031408245 -0.029421623 0.002157659
+v 0.036900468 -0.029421623 0.006080675
+v 0.011400862 -0.038444560 -0.000196151
+v 0.018069990 -0.035306147 -0.000196151
+v 0.014539275 -0.042367577 0.002157659
+v 0.024346816 -0.039621465 0.004119167
+v 0.030623642 -0.034913845 0.004119167
+v 0.020423800 -0.043152180 0.005296072
+v 0.026308324 -0.041190672 0.008042184
+v 0.024346816 -0.042367577 0.007649882
+v 0.017415032 -0.043406060 0.008364071
+f 9 3 4
+f 10 6 4
+f 10 4 3
+f 12 1 2
+f 13 7 3
+f 13 12 7
+f 13 1 12
+f 14 2 1
+f 14 1 13
+f 17 4 6
+f 21 3 7
+f 21 7 12
+f 21 10 3
+f 23 14 13
+f 23 3 22
+f 27 9 16
+f 27 8 14
+f 27 26 8
+f 27 16 26
+f 29 4 17
+f 33 20 6
+f 33 6 10
+f 33 21 20
+f 33 10 21
+f 34 17 6
+f 34 6 20
+f 36 21 12
+f 36 12 2
+f 37 22 3
+f 37 3 9
+f 37 9 22
+f 38 23 13
+f 38 13 3
+f 38 3 23
+f 39 24 9
+f 39 9 4
+f 39 4 29
+f 39 15 24
+f 44 9 27
+f 44 23 22
+f 44 27 14
+f 44 14 23
+f 45 16 9
+f 45 5 42
+f 45 42 26
+f 45 26 16
+f 46 28 25
+f 46 45 28
+f 46 5 45
+f 46 42 5
+f 47 24 15
+f 47 15 40
+f 47 40 25
+f 47 25 28
+f 48 9 24
+f 48 47 28
+f 48 24 47
+f 48 45 9
+f 48 28 45
+f 49 11 17
+f 49 41 11
+f 50 14 8
+f 50 8 31
+f 50 31 2
+f 50 2 14
+f 51 19 32
+f 51 32 36
+f 51 36 2
+f 51 2 31
+f 52 34 30
+f 52 17 34
+f 52 49 17
+f 52 30 49
+f 53 34 20
+f 53 30 34
+f 53 35 18
+f 54 20 21
+f 54 21 35
+f 54 53 20
+f 54 35 53
+f 55 35 21
+f 55 21 36
+f 55 18 35
+f 55 36 32
+f 56 39 29
+f 56 29 40
+f 56 40 15
+f 56 15 39
+f 57 29 17
+f 58 17 11
+f 58 11 41
+f 58 41 25
+f 58 25 40
+f 58 57 17
+f 58 40 29
+f 58 29 57
+f 59 19 25
+f 59 25 41
+f 60 32 19
+f 60 59 41
+f 60 19 59
+f 60 41 49
+f 60 55 32
+f 60 18 55
+f 60 49 30
+f 60 53 18
+f 60 30 53
+f 61 43 25
+f 61 25 19
+f 61 19 43
+f 62 31 8
+f 62 8 26
+f 62 51 31
+f 62 43 19
+f 62 19 51
+f 63 26 42
+f 63 42 46
+f 63 46 25
+f 63 25 43
+f 63 62 26
+f 63 43 62
+f 64 44 22
+f 64 22 9
+f 64 9 44
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl004.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl004.obj
new file mode 100644
index 0000000000000000000000000000000000000000..4b5538e1d5b8bf54dcd2d733dacfa47f2ca1a765
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl004.obj
@@ -0,0 +1,188 @@
+v 0.038469675 0.040015767 0.037072505
+v 0.047100311 0.023931400 0.028049567
+v 0.035723563 0.032562036 0.008042184
+v 0.037292770 0.023931400 0.002942262
+v 0.028662134 0.041977275 0.029226472
+v 0.041215786 0.024323702 0.007649882
+v 0.044746501 0.032562036 0.036680203
+v 0.043961898 0.023539098 0.028049567
+v 0.029446737 0.040015767 0.012357502
+v 0.041215786 0.023539098 0.007257580
+v 0.037292770 0.026677512 0.004119167
+v 0.040431183 0.036877354 0.033541790
+v 0.039646580 0.027069813 0.007649882
+v 0.046708009 0.024323702 0.026872662
+v 0.036900468 0.023539098 0.006472977
+v 0.045395258 0.030523236 0.037111290
+v 0.034154357 0.043938784 0.036287901
+v 0.045923406 0.023931400 0.023341947
+v 0.046776894 0.023417030 0.028135294
+v 0.031408245 0.036485052 0.007649882
+v 0.039646580 0.023931400 0.004903770
+v 0.037292770 0.032954338 0.013926708
+v 0.040038881 0.038446561 0.036680203
+v 0.037292770 0.029423623 0.006472977
+v 0.045531104 0.030992830 0.036287901
+v 0.045138803 0.031777433 0.036287901
+v 0.027092927 0.040015767 0.019418931
+v 0.036774673 0.023465649 0.003180987
+v 0.044746501 0.032562036 0.037072505
+v 0.031800547 0.041977275 0.037072505
+v 0.030029547 0.042625797 0.024437296
+v 0.031800547 0.037269656 0.009219088
+v 0.042000390 0.023931400 0.009611390
+v 0.047100311 0.023931400 0.027657265
+v 0.043569596 0.025892908 0.031187980
+v 0.037292770 0.025108305 0.003334564
+v 0.041215786 0.023931400 0.007257580
+v 0.036115865 0.033346639 0.011180597
+v 0.036900468 0.030992830 0.008042184
+v 0.042000390 0.036092751 0.036680203
+v 0.038469675 0.040015767 0.036680203
+v 0.039254278 0.027462115 0.007257580
+v 0.033369754 0.034915846 0.007649882
+v 0.043569596 0.030992830 0.029226472
+v 0.043569596 0.023539098 0.026872662
+v 0.027877531 0.037661957 0.008434485
+v 0.038077373 0.023539098 0.003334564
+v 0.045236419 0.030825165 0.037156639
+v 0.040038881 0.038446561 0.037072505
+v 0.033164827 0.044168268 0.036999401
+v 0.031408245 0.042369577 0.024518852
+v 0.028682829 0.042394906 0.028041203
+v 0.027175421 0.040509034 0.018600985
+v 0.030623642 0.037661957 0.008434485
+v 0.045923406 0.023539098 0.023341947
+v 0.047100311 0.023539098 0.027657265
+v 0.046708009 0.024716003 0.027657265
+v 0.042392691 0.030600528 0.037072505
+v 0.037685072 0.025500607 0.003726866
+v 0.037685072 0.034523544 0.019026629
+v 0.035331262 0.033346639 0.008826787
+v 0.040823485 0.037269656 0.035895600
+v 0.034154357 0.041584974 0.028834170
+v 0.039254278 0.038446561 0.034326393
+f 19 2 16
+f 22 12 7
+f 25 16 2
+f 26 7 25
+f 26 14 18
+f 28 10 19
+f 28 19 8
+f 29 25 7
+f 29 16 25
+f 30 5 8
+f 33 6 13
+f 33 13 18
+f 34 18 14
+f 34 25 2
+f 34 26 25
+f 35 19 16
+f 35 8 19
+f 36 20 11
+f 36 4 20
+f 37 10 21
+f 37 33 10
+f 37 6 33
+f 38 22 3
+f 39 3 22
+f 40 7 12
+f 40 29 7
+f 41 17 1
+f 41 1 23
+f 42 24 3
+f 42 21 11
+f 42 11 24
+f 42 37 21
+f 42 13 6
+f 42 6 37
+f 42 3 39
+f 43 3 24
+f 43 20 32
+f 43 24 11
+f 43 11 20
+f 44 7 26
+f 44 13 42
+f 44 42 39
+f 44 39 22
+f 44 22 7
+f 44 26 18
+f 44 18 13
+f 45 27 15
+f 45 8 5
+f 45 5 27
+f 45 28 8
+f 45 15 28
+f 46 20 4
+f 46 4 28
+f 46 28 15
+f 46 15 27
+f 47 21 10
+f 47 10 28
+f 47 28 4
+f 47 4 36
+f 48 16 29
+f 48 1 30
+f 49 23 1
+f 49 48 29
+f 49 1 48
+f 49 40 23
+f 49 29 40
+f 50 5 30
+f 50 17 31
+f 50 30 1
+f 50 1 17
+f 51 31 17
+f 51 9 31
+f 51 32 9
+f 51 17 32
+f 52 27 5
+f 52 50 31
+f 52 5 50
+f 53 31 9
+f 53 52 31
+f 53 27 52
+f 53 46 27
+f 53 9 46
+f 54 32 20
+f 54 9 32
+f 54 46 9
+f 54 20 46
+f 55 33 18
+f 55 10 33
+f 55 18 34
+f 55 19 10
+f 56 34 2
+f 56 2 19
+f 56 55 34
+f 56 19 55
+f 57 34 14
+f 57 14 26
+f 57 26 34
+f 58 30 8
+f 58 8 35
+f 58 35 16
+f 58 48 30
+f 58 16 48
+f 59 36 11
+f 59 11 21
+f 59 47 36
+f 59 21 47
+f 60 38 12
+f 60 12 22
+f 60 22 38
+f 61 38 3
+f 61 12 38
+f 61 43 32
+f 61 3 43
+f 62 40 12
+f 62 12 23
+f 62 23 40
+f 63 41 32
+f 63 32 17
+f 63 17 41
+f 64 41 23
+f 64 32 41
+f 64 61 32
+f 64 23 12
+f 64 12 61
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl005.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl005.obj
new file mode 100644
index 0000000000000000000000000000000000000000..f6ee8810386969b818028aca32c3c5ae04a3dd9b
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl005.obj
@@ -0,0 +1,188 @@
+v -0.047444384 -0.013729557 0.007257580
+v -0.032144620 -0.038836862 0.007649882
+v -0.034106100 -0.012160339 0.003332876
+v -0.040382954 -0.012160351 -0.000196151
+v -0.042736764 -0.028244718 0.004903770
+v -0.032144620 -0.030206226 0.000980754
+v -0.050397206 -0.008586123 0.005423802
+v -0.032144620 -0.036483052 0.007649882
+v -0.043913669 -0.028244718 0.007649882
+v -0.034116376 -0.012164341 0.001240234
+v -0.037244541 -0.022360193 -0.000196151
+v -0.033321525 -0.036875354 0.003726866
+v -0.047444384 -0.009021937 0.005688374
+v -0.032156041 -0.029051517 0.003455351
+v -0.032005723 -0.038657385 0.005827687
+v -0.049013590 -0.018437177 0.006472977
+v -0.041559859 -0.026675511 0.008042184
+v -0.044263597 -0.008617910 0.001505187
+v -0.035283033 -0.022360193 -0.000196151
+v -0.035283033 -0.030598527 0.001765357
+v -0.038029144 -0.035306147 0.006865279
+v -0.032098543 -0.028971293 0.000991329
+v -0.032144620 -0.035698449 0.002942262
+v -0.043913669 -0.027460114 0.005688374
+v -0.045875177 -0.015691065 0.002157659
+v -0.048754168 -0.018349858 0.007376724
+v -0.039237473 -0.031023334 0.008019958
+v -0.040382954 -0.010983446 0.000196151
+v -0.044698272 -0.008629636 0.004119167
+v -0.036459938 -0.023144796 -0.000196151
+v -0.040775256 -0.028637019 0.003334564
+v -0.033713826 -0.038836862 0.006865279
+v -0.036852239 -0.036483052 0.007649882
+v -0.032144620 -0.038444560 0.004903770
+v -0.032014931 -0.037067553 0.004026341
+v -0.032929223 -0.032560036 0.001765357
+v -0.047836685 -0.018044875 0.004511469
+v -0.045875177 -0.024714003 0.006865279
+v -0.041167557 -0.032167734 0.007257580
+v -0.047836685 -0.008629636 0.002549961
+v -0.041559859 -0.013337255 -0.000196151
+v -0.049707645 -0.014875984 0.006906864
+v -0.044305971 -0.027460114 0.007649882
+v -0.042069204 -0.027147470 0.007963007
+v -0.038813748 -0.030598527 0.008042184
+v -0.046267479 -0.008629636 0.001765357
+v -0.044975550 -0.008597808 0.001731724
+v -0.045482875 -0.008629636 0.004511469
+v -0.044444583 -0.008657131 0.003428935
+v -0.037244541 -0.027460114 0.001373056
+v -0.036067636 -0.030990829 0.002157659
+v -0.042736764 -0.023537098 0.002549961
+v -0.038813748 -0.032560036 0.004511469
+v -0.036067636 -0.036090750 0.005296072
+v -0.032536921 -0.038836862 0.005688374
+v -0.041167557 -0.032167734 0.007649882
+v -0.033542238 -0.038633840 0.007755414
+v -0.034498430 -0.038444560 0.007257580
+v -0.032536921 -0.037267655 0.003726866
+v -0.032144620 -0.033736941 0.002157659
+v -0.032536921 -0.030206226 0.000980754
+v -0.034890731 -0.029813924 0.001373056
+v -0.032929223 -0.036483052 0.003334564
+v -0.048621289 -0.018437177 0.005688374
+f 8 3 1
+f 13 7 1
+f 13 1 3
+f 14 3 8
+f 15 8 2
+f 15 14 8
+f 17 8 1
+f 18 3 10
+f 19 11 4
+f 19 4 10
+f 22 10 3
+f 22 3 14
+f 22 19 10
+f 22 6 19
+f 22 14 15
+f 26 17 1
+f 28 18 10
+f 28 10 4
+f 30 19 6
+f 30 11 19
+f 35 22 15
+f 35 15 34
+f 35 6 22
+f 37 5 24
+f 38 24 9
+f 38 16 24
+f 39 9 24
+f 39 21 33
+f 39 24 5
+f 40 37 7
+f 40 25 37
+f 41 4 11
+f 41 11 25
+f 41 25 40
+f 42 26 1
+f 42 1 7
+f 42 7 16
+f 42 16 26
+f 43 38 9
+f 43 26 16
+f 43 16 38
+f 44 27 17
+f 44 9 27
+f 44 43 9
+f 44 17 26
+f 44 26 43
+f 45 8 17
+f 45 17 27
+f 46 28 4
+f 46 18 28
+f 46 4 41
+f 46 41 40
+f 47 29 18
+f 47 7 29
+f 47 18 46
+f 47 46 40
+f 47 40 7
+f 48 29 7
+f 48 7 13
+f 48 13 3
+f 48 3 29
+f 49 29 3
+f 49 3 18
+f 49 18 29
+f 50 11 30
+f 51 20 36
+f 51 50 20
+f 51 31 50
+f 52 25 11
+f 52 50 31
+f 52 11 50
+f 52 37 25
+f 52 31 5
+f 52 5 37
+f 53 5 31
+f 53 39 5
+f 53 21 39
+f 53 31 12
+f 54 53 12
+f 54 21 53
+f 55 15 2
+f 55 2 32
+f 55 34 15
+f 55 12 34
+f 55 54 12
+f 55 32 54
+f 56 33 27
+f 56 27 9
+f 56 39 33
+f 56 9 39
+f 57 27 33
+f 57 45 27
+f 57 2 8
+f 57 8 45
+f 57 32 2
+f 58 33 21
+f 58 57 33
+f 58 32 57
+f 58 54 32
+f 58 21 54
+f 59 34 12
+f 59 35 34
+f 59 23 35
+f 60 35 23
+f 60 6 35
+f 61 30 6
+f 61 60 36
+f 61 6 60
+f 62 36 20
+f 62 61 36
+f 62 30 61
+f 62 50 30
+f 62 20 50
+f 63 51 36
+f 63 12 31
+f 63 31 51
+f 63 59 12
+f 63 23 59
+f 63 60 23
+f 63 36 60
+f 64 37 24
+f 64 24 16
+f 64 16 7
+f 64 7 37
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl007.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl007.obj
new file mode 100644
index 0000000000000000000000000000000000000000..205c384ace150008f1b02e6f2efc03e53dd59b3d
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl007.obj
@@ -0,0 +1,188 @@
+v 0.017677688 0.046292593 0.010003692
+v 0.023579023 0.033383490 0.003442734
+v 0.023562213 0.039623466 0.003726866
+v 0.008262449 0.039231164 -0.000196151
+v 0.008262333 0.030600364 0.003328482
+v 0.020816101 0.033346639 -0.000196151
+v 0.008654750 0.046684895 0.004119167
+v 0.023783137 0.039893859 0.008345192
+v 0.023562213 0.034915846 0.001373056
+v 0.012577767 0.043154180 0.002157659
+v 0.008262449 0.030600528 0.000980754
+v 0.016893085 0.044723387 0.004903770
+v 0.008208959 0.048734845 0.008904135
+v 0.023562213 0.040015767 0.004119167
+v 0.020825248 0.030618465 0.003107049
+v 0.017285387 0.043546482 0.010003692
+v 0.023562213 0.038446561 0.002942262
+v 0.015716180 0.036485052 -0.000196151
+v 0.011400862 0.043938784 0.002549961
+v 0.018462291 0.032954338 -0.000196151
+v 0.016893085 0.043938784 0.004119167
+v 0.009439354 0.048646403 0.007257580
+v 0.008262449 0.046684895 0.004119167
+v 0.017677688 0.046292593 0.009611390
+v 0.023169911 0.039623466 0.008434485
+v 0.020818675 0.030602946 0.001062395
+v 0.017817856 0.043803431 0.009930225
+v 0.014931577 0.044331085 0.010003692
+v 0.023562213 0.036092751 0.001765357
+v 0.012185465 0.038054259 -0.000196151
+v 0.008654750 0.039231164 -0.000196151
+v 0.008262449 0.038054259 -0.000196151
+v 0.011793164 0.044331085 0.002942262
+v 0.018854593 0.041584974 0.002942262
+v 0.019246895 0.042369577 0.003726866
+v 0.011008560 0.046292593 0.004511469
+v 0.010616259 0.048646403 0.008826787
+v 0.009047052 0.048254102 0.006472977
+v 0.017677688 0.045507990 0.007257580
+v 0.020423800 0.031777433 0.000196151
+v 0.023169911 0.033346639 0.000980754
+v 0.023537717 0.033315679 0.001488085
+v 0.023602722 0.040065348 0.008418112
+v 0.008373556 0.046636570 0.008733905
+v 0.014787742 0.047158184 0.010088339
+v 0.021993006 0.037661957 0.001765357
+v 0.016500783 0.042761879 0.002942262
+v 0.022777609 0.039623466 0.003334564
+v 0.017677688 0.044331085 0.004903770
+v 0.008654750 0.049038705 0.008434485
+v 0.014146973 0.047469498 0.008042184
+v 0.014931577 0.047469498 0.009219088
+v 0.015323878 0.045507990 0.005296072
+v 0.020423800 0.032562036 -0.000196151
+v 0.023169911 0.033738941 0.000980754
+v 0.020933729 0.031985035 0.000293609
+v 0.014460489 0.047298974 0.010050057
+v 0.020031498 0.034131243 -0.000196151
+v 0.018069990 0.039623466 0.001765357
+v 0.017677688 0.042761879 0.003334564
+v 0.021208403 0.038838862 0.002157659
+v 0.018462291 0.042369577 0.003334564
+v 0.010616259 0.048646403 0.008434485
+v 0.008262449 0.048646403 0.007257580
+f 9 8 2
+f 13 11 5
+f 14 8 3
+f 15 2 8
+f 17 3 8
+f 18 6 4
+f 19 7 10
+f 20 4 6
+f 23 7 4
+f 23 4 13
+f 24 1 8
+f 24 8 14
+f 25 15 8
+f 25 5 15
+f 25 16 5
+f 26 15 5
+f 26 5 11
+f 27 1 16
+f 28 5 16
+f 29 17 8
+f 29 8 9
+f 29 9 6
+f 30 18 4
+f 30 10 18
+f 31 19 10
+f 31 4 7
+f 31 7 19
+f 31 30 4
+f 31 10 30
+f 32 20 11
+f 32 4 20
+f 32 13 4
+f 32 11 13
+f 33 10 7
+f 33 7 21
+f 35 14 3
+f 36 21 7
+f 36 12 21
+f 38 7 23
+f 38 36 7
+f 39 24 14
+f 40 26 11
+f 42 2 15
+f 42 15 26
+f 42 26 41
+f 42 41 9
+f 42 9 2
+f 43 8 1
+f 43 1 27
+f 43 25 8
+f 43 27 16
+f 43 16 25
+f 44 28 13
+f 44 13 5
+f 44 5 28
+f 45 28 16
+f 45 16 1
+f 46 17 29
+f 47 33 21
+f 47 10 33
+f 48 3 17
+f 48 35 3
+f 49 35 12
+f 49 14 35
+f 49 39 14
+f 49 12 39
+f 50 13 37
+f 51 24 39
+f 51 39 12
+f 52 1 24
+f 52 24 51
+f 53 38 22
+f 53 12 36
+f 53 36 38
+f 53 51 12
+f 53 22 51
+f 54 40 11
+f 54 11 20
+f 54 20 6
+f 54 6 41
+f 55 41 6
+f 55 6 9
+f 55 9 41
+f 56 41 26
+f 56 26 40
+f 56 54 41
+f 56 40 54
+f 57 13 28
+f 57 28 45
+f 57 37 13
+f 57 52 37
+f 57 45 1
+f 57 1 52
+f 58 6 18
+f 58 18 46
+f 58 46 29
+f 58 29 6
+f 59 46 18
+f 59 18 10
+f 60 47 21
+f 61 34 48
+f 61 46 59
+f 61 59 10
+f 61 47 34
+f 61 10 47
+f 61 48 17
+f 61 17 46
+f 62 48 34
+f 62 35 48
+f 62 34 47
+f 62 47 60
+f 62 60 21
+f 62 21 12
+f 62 12 35
+f 63 50 37
+f 63 22 50
+f 63 51 22
+f 63 52 51
+f 63 37 52
+f 64 50 22
+f 64 22 38
+f 64 38 23
+f 64 23 13
+f 64 13 50
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl008.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl008.obj
new file mode 100644
index 0000000000000000000000000000000000000000..0d330da798c68222408399feaa103363f8c99c71
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl008.obj
@@ -0,0 +1,188 @@
+v 0.049061819 -0.029421623 0.048841554
+v 0.049454121 -0.029813924 0.048449252
+v 0.043177295 -0.029421623 0.026480361
+v 0.040823485 -0.029421623 0.023341947
+v 0.030808736 -0.045748854 0.044060464
+v 0.049145946 -0.029257069 0.047756856
+v 0.036508167 -0.045505990 0.048841554
+v 0.046315708 -0.029421623 0.047664649
+v 0.042098175 -0.029264363 0.024214671
+v 0.049454121 -0.029421623 0.048449252
+v 0.042784993 -0.039229163 0.048841554
+v 0.039254278 -0.029421623 0.024911154
+v 0.046708009 -0.029421623 0.048841554
+v 0.033762055 -0.040013767 0.023341947
+v 0.049061819 -0.029813924 0.046095442
+v 0.045923406 -0.034913845 0.048841554
+v 0.030623642 -0.045113688 0.044133934
+v 0.039254278 -0.029421623 0.024518852
+v 0.041608088 -0.030990829 0.023341947
+v 0.034938960 -0.045505990 0.043349331
+v 0.049454121 -0.029813924 0.047664649
+v 0.049061819 -0.029421623 0.046095442
+v 0.049061819 -0.029813924 0.048841554
+v 0.032585150 -0.045505990 0.048841554
+v 0.030649512 -0.041616917 0.030000807
+v 0.041820483 -0.029325560 0.023395776
+v 0.033457620 -0.036962377 0.023311194
+v 0.042000390 -0.040406068 0.048056951
+v 0.043177295 -0.029813924 0.026480361
+v 0.032373769 -0.045743377 0.047985137
+v 0.033369754 -0.041190672 0.025695757
+v 0.047100311 -0.033736941 0.047664649
+v 0.047884914 -0.031775432 0.048841554
+v 0.032192849 -0.045113688 0.048056951
+v 0.030700568 -0.045606925 0.043318796
+v 0.030623642 -0.041190672 0.031187980
+v 0.042000390 -0.030206226 0.023341947
+v 0.033173690 -0.039806436 0.023413991
+v 0.032977452 -0.037267655 0.024518852
+v 0.036508167 -0.037267655 0.023341947
+v 0.041608088 -0.040798370 0.048449252
+v 0.038077373 -0.043544481 0.045310839
+v 0.044746501 -0.030206226 0.032364885
+v 0.036115865 -0.045505990 0.046487744
+v 0.048669517 -0.031383131 0.048056951
+v 0.047100311 -0.033736941 0.048056951
+v 0.041215786 -0.032167734 0.024518852
+v 0.047884914 -0.032560036 0.047272347
+v 0.048277216 -0.031775432 0.048449252
+v 0.032585150 -0.045113688 0.048841554
+v 0.042392691 -0.030206226 0.024518852
+v 0.042392691 -0.029813924 0.024126551
+v 0.032779197 -0.040588722 0.024591701
+v 0.032977452 -0.037267655 0.025303456
+v 0.042000390 -0.040406068 0.048449252
+v 0.037292770 -0.044721386 0.047272347
+v 0.035331262 -0.043544481 0.037857108
+v 0.037292770 -0.042367577 0.039426315
+v 0.036508167 -0.045505990 0.048056951
+v 0.045138803 -0.036090750 0.048841554
+v 0.045923406 -0.035306147 0.047664649
+v 0.041215786 -0.031775432 0.023734249
+v 0.037685072 -0.036875354 0.025695757
+v 0.048669517 -0.031383131 0.047664649
+f 9 6 3
+f 10 6 1
+f 10 1 2
+f 11 1 7
+f 12 8 6
+f 12 6 9
+f 13 1 6
+f 13 6 8
+f 13 7 1
+f 16 1 11
+f 17 8 12
+f 17 13 8
+f 18 12 9
+f 21 10 2
+f 22 15 3
+f 22 3 6
+f 22 6 10
+f 22 21 15
+f 22 10 21
+f 23 2 1
+f 23 1 16
+f 24 17 5
+f 24 7 13
+f 26 18 9
+f 26 4 18
+f 26 9 3
+f 27 18 4
+f 29 3 15
+f 30 24 5
+f 30 7 24
+f 33 23 16
+f 33 2 23
+f 34 13 17
+f 34 17 24
+f 35 5 17
+f 35 17 25
+f 35 25 31
+f 35 31 20
+f 35 20 5
+f 36 25 17
+f 36 17 12
+f 37 4 26
+f 37 27 4
+f 37 19 27
+f 38 27 14
+f 39 12 18
+f 39 18 27
+f 39 25 36
+f 39 38 25
+f 39 27 38
+f 40 27 19
+f 40 14 27
+f 41 11 7
+f 43 29 15
+f 44 30 5
+f 44 5 20
+f 45 21 2
+f 48 46 32
+f 48 45 46
+f 48 47 19
+f 49 33 16
+f 49 2 33
+f 49 45 2
+f 49 46 45
+f 50 34 24
+f 50 24 13
+f 50 13 34
+f 51 37 29
+f 51 19 37
+f 51 43 19
+f 51 29 43
+f 52 37 26
+f 52 26 3
+f 52 3 29
+f 52 29 37
+f 53 31 25
+f 53 25 38
+f 53 38 14
+f 53 14 31
+f 54 39 36
+f 54 36 12
+f 54 12 39
+f 55 41 28
+f 55 11 41
+f 55 46 11
+f 56 28 41
+f 56 44 42
+f 56 42 28
+f 57 20 31
+f 57 44 20
+f 57 42 44
+f 58 31 14
+f 58 14 40
+f 58 28 42
+f 58 57 31
+f 58 42 57
+f 59 7 30
+f 59 30 44
+f 59 44 56
+f 59 56 41
+f 59 41 7
+f 60 16 11
+f 60 11 46
+f 60 49 16
+f 60 46 49
+f 61 32 46
+f 61 28 47
+f 61 48 32
+f 61 47 48
+f 61 55 28
+f 61 46 55
+f 62 47 40
+f 62 40 19
+f 62 19 47
+f 63 47 28
+f 63 40 47
+f 63 58 40
+f 63 28 58
+f 64 15 21
+f 64 21 45
+f 64 45 48
+f 64 43 15
+f 64 48 19
+f 64 19 43
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl009.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl009.obj
new file mode 100644
index 0000000000000000000000000000000000000000..c5cc8207211a96861f9c62b7862ed61b6a3419a9
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl009.obj
@@ -0,0 +1,188 @@
+v -0.058821131 -0.013337255 0.048841554
+v -0.043913669 -0.030990829 0.024911154
+v -0.052544305 -0.015298764 0.026872662
+v -0.053020830 -0.015319332 0.026458239
+v -0.054505813 -0.030990829 0.047664649
+v -0.046267479 -0.027460114 0.024518852
+v -0.058821131 -0.013337255 0.044526236
+v -0.058036528 -0.013337255 0.045703141
+v -0.050975098 -0.030990829 0.048449252
+v -0.043913669 -0.030990829 0.024518852
+v -0.052544305 -0.015298764 0.026480361
+v -0.050582797 -0.025890908 0.024518852
+v -0.061174941 -0.014514160 0.047664649
+v -0.060807531 -0.013250732 0.046973295
+v -0.050498511 -0.031598719 0.046003257
+v -0.044305971 -0.030990829 0.026480361
+v -0.054898115 -0.029813924 0.048841554
+v -0.058226093 -0.013383550 0.044476446
+v -0.048108272 -0.025683444 0.024437322
+v -0.047052082 -0.031383131 0.024518852
+v -0.057251925 -0.023537098 0.043349331
+v -0.055682718 -0.016867970 0.028441869
+v -0.061174941 -0.013337255 0.048841554
+v -0.060782639 -0.013337255 0.045310839
+v -0.053721210 -0.031383131 0.048841554
+v -0.050190495 -0.030990829 0.046095442
+v -0.044181577 -0.031570647 0.024830972
+v -0.058036528 -0.024321701 0.048056951
+v -0.047444384 -0.025890908 0.024518852
+v -0.050582797 -0.025498606 0.024518852
+v -0.049405892 -0.029813924 0.028441869
+v -0.044107242 -0.031518618 0.024460918
+v -0.052544305 -0.024714003 0.028834170
+v -0.060390338 -0.017652573 0.047272347
+v -0.055682718 -0.016475669 0.028441869
+v -0.060782639 -0.013729557 0.045310839
+v -0.061174941 -0.013729557 0.046880046
+v -0.051232376 -0.031563206 0.048372252
+v -0.054113511 -0.031383131 0.047272347
+v -0.054505813 -0.030990829 0.048449252
+v -0.051367400 -0.030598527 0.048841554
+v -0.046659780 -0.031383131 0.034326393
+v -0.044305971 -0.031383131 0.026480361
+v -0.057644226 -0.024714003 0.046880046
+v -0.058428829 -0.022360193 0.048841554
+v -0.055290416 -0.017652573 0.028049567
+v -0.053328908 -0.028637019 0.039034013
+v -0.049405892 -0.027852416 0.024518852
+v -0.055682718 -0.018829478 0.030795679
+v -0.060782639 -0.016083367 0.048449252
+v -0.059213433 -0.016475669 0.041387823
+v -0.060782639 -0.015298764 0.046487744
+v -0.049798193 -0.031383131 0.033149488
+v -0.054113511 -0.030990829 0.048841554
+v -0.051400524 -0.031405235 0.048529680
+v -0.058036528 -0.024321701 0.047664649
+v -0.054113511 -0.021967891 0.029226472
+v -0.057644226 -0.024321701 0.048841554
+v -0.053328908 -0.030990829 0.043741633
+v -0.051759702 -0.025106305 0.026872662
+v -0.056075020 -0.023929400 0.039818616
+v -0.059605734 -0.017652573 0.044133934
+v -0.061174941 -0.014514160 0.048449252
+v -0.061174941 -0.014121859 0.048841554
+f 8 1 2
+f 8 2 3
+f 10 3 2
+f 11 10 6
+f 11 3 10
+f 11 4 7
+f 11 8 3
+f 14 8 7
+f 14 1 8
+f 16 2 1
+f 18 11 7
+f 18 7 8
+f 18 8 11
+f 19 6 10
+f 19 4 11
+f 23 1 14
+f 23 17 1
+f 24 14 7
+f 24 7 4
+f 24 23 14
+f 25 1 17
+f 26 16 1
+f 26 1 9
+f 27 15 20
+f 29 19 11
+f 29 11 6
+f 29 6 19
+f 30 19 12
+f 30 4 19
+f 32 20 19
+f 32 19 10
+f 32 27 20
+f 32 10 2
+f 35 24 4
+f 35 22 24
+f 36 24 22
+f 37 13 23
+f 37 23 24
+f 37 24 36
+f 38 26 9
+f 39 38 25
+f 39 15 38
+f 40 39 25
+f 40 5 39
+f 40 28 5
+f 40 17 28
+f 41 9 1
+f 41 1 25
+f 41 38 9
+f 42 16 26
+f 42 38 15
+f 42 26 38
+f 43 2 16
+f 43 16 42
+f 43 42 15
+f 43 15 27
+f 43 32 2
+f 43 27 32
+f 45 17 23
+f 46 30 12
+f 46 22 35
+f 46 35 4
+f 46 4 30
+f 48 20 31
+f 48 12 19
+f 48 19 20
+f 48 47 12
+f 50 34 28
+f 50 28 45
+f 51 36 22
+f 52 34 13
+f 52 13 37
+f 52 51 34
+f 52 37 36
+f 52 36 51
+f 53 20 15
+f 53 15 39
+f 53 31 20
+f 54 40 25
+f 54 25 17
+f 54 17 40
+f 55 41 25
+f 55 25 38
+f 55 38 41
+f 56 44 5
+f 56 5 28
+f 56 28 34
+f 56 34 21
+f 56 21 44
+f 57 21 49
+f 57 46 12
+f 57 49 22
+f 57 22 46
+f 58 45 28
+f 58 28 17
+f 58 17 45
+f 59 5 47
+f 59 31 53
+f 59 53 39
+f 59 39 5
+f 59 48 31
+f 59 47 48
+f 60 47 33
+f 60 12 47
+f 60 57 12
+f 60 33 57
+f 61 33 47
+f 61 57 33
+f 61 44 21
+f 61 21 57
+f 61 47 5
+f 61 5 44
+f 62 22 49
+f 62 51 22
+f 62 34 51
+f 62 49 21
+f 62 21 34
+f 63 23 13
+f 63 13 34
+f 63 34 50
+f 64 50 45
+f 64 45 23
+f 64 63 50
+f 64 23 63
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl010.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl010.obj
new file mode 100644
index 0000000000000000000000000000000000000000..ac8126a521de69a5fc9500be637ac0ab5caa30b4
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl010.obj
@@ -0,0 +1,146 @@
+v 0.040823485 -0.016475669 0.007257580
+v 0.041427321 -0.021670897 0.007114596
+v 0.035331262 -0.014121859 -0.000196151
+v 0.026322282 -0.014127541 0.001350427
+v 0.023966946 -0.030622597 0.003081220
+v 0.037321324 -0.014137313 0.003408776
+v 0.041215786 -0.017260272 0.003334564
+v 0.038861977 -0.020790987 0.007257580
+v 0.023954514 -0.030598527 -0.000196151
+v 0.034938960 -0.014121859 0.003726866
+v 0.041408617 -0.016557803 0.007146663
+v 0.041054728 -0.015644391 0.003553222
+v 0.029839039 -0.029421623 0.004511469
+v 0.026317691 -0.014122484 0.003045246
+v 0.034154357 -0.014121859 -0.000196151
+v 0.032192849 -0.021575590 -0.000196151
+v 0.030231341 -0.029421623 0.001765357
+v 0.037292770 -0.014121859 0.000980754
+v 0.040823485 -0.015691065 0.006472977
+v 0.041331178 -0.015740215 0.006406604
+v 0.039074290 -0.021687820 0.007135652
+v 0.040431183 -0.017260272 0.007257580
+v 0.024335752 -0.025105183 0.003053541
+v 0.023954514 -0.029029321 -0.000196151
+v 0.033762055 -0.018437177 -0.000196151
+v 0.040431183 -0.021575590 0.004511469
+v 0.030346773 -0.029519985 0.004244733
+v 0.027877531 -0.027460114 -0.000196151
+v 0.040823485 -0.016475669 0.002942262
+v 0.023169911 -0.029029321 0.000588452
+v 0.040823485 -0.018437177 0.003334564
+v 0.041215786 -0.021183288 0.005296072
+v 0.041215786 -0.021575590 0.005688374
+v 0.025529912 -0.030605297 0.003289920
+v 0.025916023 -0.029421623 -0.000196151
+v 0.038469675 -0.021575590 0.002942262
+v 0.035331262 -0.014514160 -0.000196151
+v 0.023178553 -0.029039104 0.001079663
+v 0.023169911 -0.029421623 0.000588452
+v 0.041215786 -0.018829478 0.003726866
+v 0.025131419 -0.030598527 0.000196151
+v 0.029446737 -0.029421623 0.001373056
+v 0.029054436 -0.025890908 -0.000196151
+v 0.039646580 -0.021575590 0.003726866
+v 0.023947651 -0.029413945 0.003047272
+v 0.023175221 -0.029427633 0.001041523
+v 0.039646580 -0.020790987 0.003334564
+v 0.026308324 -0.030598527 0.002549961
+v 0.024346816 -0.030598527 -0.000196151
+v 0.026237141 -0.030529208 0.001317927
+f 8 2 1
+f 11 1 2
+f 11 2 7
+f 13 8 5
+f 15 3 9
+f 15 4 14
+f 15 14 10
+f 15 10 3
+f 16 9 3
+f 18 10 6
+f 18 3 10
+f 19 6 10
+f 19 14 1
+f 19 10 14
+f 20 11 7
+f 20 7 12
+f 20 6 19
+f 20 18 6
+f 20 12 18
+f 20 19 1
+f 20 1 11
+f 21 13 2
+f 21 2 8
+f 21 8 13
+f 22 14 8
+f 22 8 1
+f 22 1 14
+f 23 8 14
+f 24 15 9
+f 24 4 15
+f 25 16 3
+f 27 17 2
+f 27 2 13
+f 28 9 16
+f 29 18 12
+f 29 12 7
+f 29 3 18
+f 30 14 4
+f 30 4 24
+f 30 24 9
+f 31 16 25
+f 31 29 7
+f 33 2 17
+f 33 17 26
+f 33 32 2
+f 33 26 32
+f 34 27 13
+f 34 13 5
+f 35 9 28
+f 36 17 28
+f 37 25 3
+f 37 3 29
+f 37 31 25
+f 37 29 31
+f 38 23 14
+f 38 14 30
+f 39 30 9
+f 39 9 5
+f 40 7 2
+f 40 2 32
+f 40 31 7
+f 40 32 26
+f 42 35 28
+f 42 28 17
+f 43 36 28
+f 43 28 16
+f 43 16 36
+f 44 26 17
+f 44 17 36
+f 44 40 26
+f 45 38 5
+f 45 23 38
+f 45 5 8
+f 45 8 23
+f 46 38 30
+f 46 30 39
+f 46 39 5
+f 46 5 38
+f 47 31 40
+f 47 36 16
+f 47 16 31
+f 47 44 36
+f 47 40 44
+f 48 34 5
+f 48 5 41
+f 48 27 34
+f 48 17 27
+f 49 9 35
+f 49 35 42
+f 49 42 17
+f 49 17 41
+f 49 41 5
+f 49 5 9
+f 50 48 41
+f 50 41 17
+f 50 17 48
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl012.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl012.obj
new file mode 100644
index 0000000000000000000000000000000000000000..de134b91b869315460c501b858842420abf3e4af
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl012.obj
@@ -0,0 +1,188 @@
+v 0.014931577 0.057277039 0.048841554
+v 0.015834079 0.054812100 0.047982060
+v 0.004670384 0.050259400 0.012446216
+v -0.006997945 0.050252120 0.010089541
+v -0.009391125 0.056492436 0.046095442
+v -0.009391125 0.048254102 0.013534406
+v -0.001937394 0.060023151 0.046880046
+v 0.015716180 0.054138626 0.035503298
+v -0.007828886 0.058492029 0.048647770
+v 0.011008560 0.049823308 0.025303456
+v 0.002770419 0.047864192 0.010003083
+v -0.010960331 0.055315531 0.030795679
+v 0.015323878 0.053354023 0.044133934
+v 0.001985623 0.050607911 0.010003692
+v 0.015716180 0.057277039 0.047664649
+v 0.014146973 0.054923229 0.048841554
+v 0.005908639 0.059630849 0.048449252
+v 0.011008560 0.052569420 0.024518852
+v -0.007068560 0.048082903 0.009947392
+v 0.001593321 0.047861800 0.011572898
+v 0.005913356 0.048704744 0.014304161
+v -0.011011730 0.052870398 0.031112474
+v -0.009391125 0.051000213 0.012749803
+v -0.009383232 0.058774916 0.046504955
+v 0.014146973 0.051392515 0.035503298
+v 0.015716180 0.053354023 0.044133934
+v 0.002731114 0.049993425 0.010060249
+v 0.015716180 0.057277039 0.048449252
+v 0.015860592 0.054483027 0.046788314
+v -0.008264558 0.057571811 0.047983002
+v 0.014539275 0.054923229 0.048841554
+v 0.001201019 0.059238547 0.048841554
+v 0.014931577 0.054530928 0.048056951
+v 0.005124036 0.059630849 0.047664649
+v 0.011070146 0.050013971 0.024467369
+v 0.002770226 0.050607911 0.010395993
+v 0.014931577 0.054138626 0.034326393
+v 0.004731734 0.048254102 0.013926708
+v -0.007037315 0.047861800 0.010788295
+v 0.004735563 0.048301459 0.012345449
+v 0.005908639 0.049823308 0.014319010
+v -0.010965562 0.052600048 0.030003391
+v -0.009406012 0.048359454 0.012330675
+v -0.007037315 0.052177118 0.015888216
+v -0.007821918 0.059630849 0.048449252
+v -0.009409823 0.056994230 0.046460276
+v -0.010960331 0.055315531 0.031187980
+v 0.010616259 0.049823308 0.025303456
+v 0.014584019 0.051531038 0.034288988
+v 0.009439354 0.058846246 0.048841554
+v 0.015786923 0.053573037 0.043290192
+v -0.005463524 0.057814980 0.048505999
+v 0.015323878 0.054138626 0.046880046
+v -0.000368187 0.058846246 0.042564728
+v 0.000416416 0.060023151 0.047664649
+v 0.005908639 0.059630849 0.048056951
+v 0.015323878 0.054530928 0.036287901
+v -0.010929045 0.054640491 0.030079298
+v -0.009355458 0.050416000 0.012407116
+v -0.008606522 0.051392515 0.013534406
+v -0.004683505 0.059630849 0.045703141
+v -0.004683505 0.054923229 0.026480361
+v -0.008214220 0.059630849 0.048056951
+v 0.014539275 0.051392515 0.035503298
+f 19 14 11
+f 19 4 14
+f 22 13 5
+f 22 6 13
+f 25 13 6
+f 26 13 25
+f 27 11 14
+f 29 8 15
+f 29 2 26
+f 29 28 2
+f 29 15 28
+f 30 5 16
+f 31 1 16
+f 31 28 1
+f 31 2 28
+f 32 16 1
+f 32 9 16
+f 33 16 5
+f 33 31 16
+f 33 2 31
+f 35 18 8
+f 36 18 3
+f 36 27 14
+f 36 3 27
+f 37 8 18
+f 38 20 11
+f 38 10 20
+f 39 19 11
+f 39 11 20
+f 39 20 6
+f 40 27 3
+f 40 11 27
+f 40 38 11
+f 40 10 38
+f 40 35 10
+f 40 21 35
+f 41 3 18
+f 41 35 21
+f 41 18 35
+f 41 40 3
+f 41 21 40
+f 42 22 12
+f 42 6 22
+f 43 42 23
+f 43 6 42
+f 43 39 6
+f 43 19 39
+f 45 9 32
+f 45 32 17
+f 46 24 22
+f 46 22 5
+f 46 5 30
+f 46 30 9
+f 46 9 24
+f 47 24 12
+f 47 12 22
+f 47 22 24
+f 48 25 6
+f 48 10 25
+f 48 20 10
+f 48 6 20
+f 49 35 8
+f 49 10 35
+f 50 1 28
+f 50 32 1
+f 50 17 32
+f 50 28 15
+f 51 29 26
+f 51 8 29
+f 51 49 8
+f 51 26 49
+f 52 30 16
+f 52 16 9
+f 52 9 30
+f 53 33 5
+f 53 5 13
+f 53 13 26
+f 53 26 2
+f 53 2 33
+f 54 34 14
+f 54 14 7
+f 55 45 17
+f 55 7 45
+f 55 54 7
+f 55 34 54
+f 56 15 37
+f 56 37 18
+f 56 18 36
+f 56 36 14
+f 56 14 34
+f 56 50 15
+f 56 17 50
+f 56 55 17
+f 56 34 55
+f 57 37 15
+f 57 15 8
+f 57 8 37
+f 58 42 12
+f 58 12 23
+f 58 23 42
+f 59 43 23
+f 59 23 4
+f 59 4 19
+f 59 19 43
+f 60 23 12
+f 60 12 44
+f 60 4 23
+f 60 44 14
+f 60 14 4
+f 61 44 12
+f 62 7 14
+f 62 14 44
+f 62 61 7
+f 62 44 61
+f 63 24 9
+f 63 9 45
+f 63 12 24
+f 63 61 12
+f 63 45 7
+f 63 7 61
+f 64 49 26
+f 64 26 25
+f 64 25 10
+f 64 10 49
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl013.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl013.obj
new file mode 100644
index 0000000000000000000000000000000000000000..6c6fa715be29f4940bc762ad17e2885c6414d92c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl013.obj
@@ -0,0 +1,188 @@
+v -0.032536921 -0.049429006 0.048841554
+v -0.032144620 -0.052175117 0.048841554
+v -0.035421823 -0.044514080 0.037015334
+v -0.052152003 -0.025498606 0.037072505
+v -0.053721210 -0.031775432 0.048841554
+v -0.051367400 -0.025498606 0.038249410
+v -0.032144620 -0.048252101 0.043349331
+v -0.043913669 -0.041190672 0.039034013
+v -0.050975098 -0.025890908 0.037072505
+v -0.050975098 -0.031383131 0.048841554
+v -0.052311337 -0.025565283 0.039775906
+v -0.032144620 -0.049429006 0.048056951
+v -0.032536921 -0.050998213 0.043349331
+v -0.043129066 -0.044721386 0.048449252
+v -0.051759702 -0.030206226 0.037072505
+v -0.051565629 -0.025581559 0.037019369
+v -0.035283033 -0.044329085 0.038641711
+v -0.050975098 -0.025890908 0.037464806
+v -0.051759702 -0.025890908 0.040603219
+v -0.051759702 -0.025498606 0.039818616
+v -0.053721210 -0.031775432 0.046880046
+v -0.032294307 -0.050153227 0.048733430
+v -0.031949098 -0.051849902 0.047364790
+v -0.032144620 -0.048252101 0.043741633
+v -0.035218314 -0.046967547 0.037103104
+v -0.043521367 -0.044329085 0.048841554
+v -0.038421446 -0.048644403 0.047272347
+v -0.052152003 -0.029421623 0.037072505
+v -0.046659780 -0.037267655 0.037072505
+v -0.053721210 -0.031383131 0.048841554
+v -0.032144620 -0.048644403 0.045310839
+v -0.047052082 -0.040798370 0.046880046
+v -0.031974854 -0.050715837 0.043429596
+v -0.032536921 -0.052175117 0.047272347
+v -0.038421446 -0.048644403 0.048449252
+v -0.036067636 -0.047075196 0.037072505
+v -0.052152003 -0.029813924 0.037464806
+v -0.050190495 -0.032560036 0.037072505
+v -0.052348345 -0.025973071 0.040550590
+v -0.031950774 -0.051068086 0.044617886
+v -0.049013590 -0.038444560 0.048841554
+v -0.043129066 -0.044721386 0.048056951
+v -0.045482875 -0.039621465 0.039034013
+v -0.040382954 -0.047075196 0.048449252
+v -0.036852239 -0.049429006 0.048841554
+v -0.032536921 -0.052175117 0.048841554
+v -0.037244541 -0.049429006 0.047664649
+v -0.035675334 -0.047467498 0.037464806
+v -0.038813748 -0.045113688 0.037072505
+v -0.052152003 -0.030598527 0.039034013
+v -0.053721210 -0.032167734 0.047664649
+v -0.046267479 -0.039621465 0.041387823
+v -0.032536921 -0.051390514 0.044526236
+v -0.047836685 -0.040013767 0.048449252
+v -0.045875177 -0.041190672 0.044526236
+v -0.045875177 -0.038444560 0.037464806
+v -0.040382954 -0.047075196 0.048056951
+v -0.039990653 -0.047075196 0.048841554
+v -0.034498430 -0.050998213 0.048841554
+v -0.033321525 -0.051782816 0.048449252
+v -0.036852239 -0.047467498 0.039818616
+v -0.052544305 -0.033736941 0.048841554
+v -0.047052082 -0.037267655 0.038249410
+v -0.047836685 -0.039621465 0.046487744
+f 5 2 1
+f 10 5 1
+f 16 9 3
+f 16 4 6
+f 16 6 9
+f 17 7 3
+f 17 3 9
+f 18 9 6
+f 18 17 9
+f 19 10 1
+f 19 1 12
+f 20 6 4
+f 20 4 11
+f 20 11 19
+f 20 19 12
+f 21 11 4
+f 22 12 1
+f 22 1 2
+f 22 2 12
+f 23 12 2
+f 24 18 6
+f 24 7 17
+f 24 17 18
+f 25 3 7
+f 26 2 5
+f 28 16 15
+f 28 4 16
+f 28 21 4
+f 29 16 3
+f 30 5 10
+f 30 10 19
+f 30 21 5
+f 30 11 21
+f 31 20 12
+f 31 6 20
+f 31 24 6
+f 31 12 23
+f 33 7 24
+f 33 25 7
+f 33 13 25
+f 34 23 2
+f 36 3 25
+f 37 28 15
+f 37 21 28
+f 38 15 16
+f 38 16 29
+f 39 30 19
+f 39 19 11
+f 39 11 30
+f 40 31 23
+f 40 24 31
+f 40 33 24
+f 40 23 34
+f 41 26 5
+f 44 35 26
+f 44 26 14
+f 44 27 35
+f 44 14 42
+f 45 2 26
+f 46 34 2
+f 46 2 45
+f 47 35 27
+f 47 27 34
+f 48 36 25
+f 48 25 13
+f 49 29 3
+f 49 3 36
+f 50 21 37
+f 50 37 15
+f 50 15 38
+f 51 50 38
+f 51 21 50
+f 51 5 21
+f 52 43 32
+f 52 29 43
+f 53 40 34
+f 53 13 33
+f 53 33 40
+f 53 48 13
+f 54 26 41
+f 54 14 26
+f 54 42 14
+f 54 32 42
+f 54 51 32
+f 54 41 51
+f 55 42 32
+f 55 8 49
+f 55 49 27
+f 55 27 42
+f 55 43 8
+f 55 32 43
+f 56 43 29
+f 56 8 43
+f 56 49 8
+f 56 29 49
+f 57 44 42
+f 57 42 27
+f 57 27 44
+f 58 45 26
+f 58 26 35
+f 58 35 45
+f 59 46 45
+f 59 45 35
+f 60 47 34
+f 60 34 46
+f 60 46 59
+f 60 59 35
+f 60 35 47
+f 61 36 48
+f 61 49 36
+f 61 27 49
+f 61 48 53
+f 61 53 34
+f 61 34 27
+f 62 51 41
+f 62 41 5
+f 62 5 51
+f 63 38 29
+f 63 29 52
+f 64 51 38
+f 64 63 52
+f 64 38 63
+f 64 52 32
+f 64 32 51
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl014.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl014.obj
new file mode 100644
index 0000000000000000000000000000000000000000..793bda50ca37727149d4423c3ed7848223fbcc34
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl014.obj
@@ -0,0 +1,188 @@
+v 0.052592534 0.022754495 0.048841554
+v 0.056515550 0.008239335 0.048841554
+v 0.049454121 0.017654574 0.027657265
+v 0.043961898 0.027069813 0.028049567
+v 0.042392691 0.030208226 0.036287901
+v 0.044354199 0.030600528 0.031187980
+v 0.054554042 0.018046876 0.047664649
+v 0.049846422 0.023146797 0.048841554
+v 0.052200232 0.005885525 0.039818616
+v 0.045923406 0.026285210 0.028049567
+v 0.045923406 0.030600528 0.037072505
+v 0.041304034 0.030673462 0.031160034
+v 0.054946344 0.016085367 0.048841554
+v 0.054946344 0.008631636 0.040210918
+v 0.054161740 0.008631636 0.048841554
+v 0.050238724 0.008631636 0.033149488
+v 0.045531104 0.027069813 0.028441869
+v 0.048669517 0.024716003 0.034718695
+v 0.051718343 0.023510439 0.048638224
+v 0.043002833 0.030732292 0.037009615
+v 0.041215786 0.030208226 0.031972583
+v 0.053769439 0.020400685 0.048449252
+v 0.056515550 0.009808541 0.047664649
+v 0.051023327 0.016869971 0.032364885
+v 0.054946344 0.005885525 0.039818616
+v 0.054161740 0.008239335 0.048449252
+v 0.046315708 0.018439177 0.028049567
+v 0.044878458 0.026921687 0.028123897
+v 0.047492613 0.023146797 0.028049567
+v 0.052592534 0.023146797 0.047272347
+v 0.048277216 0.026285210 0.036287901
+v 0.042784993 0.030208226 0.037072505
+v 0.050070208 0.023631840 0.048607467
+v 0.042392691 0.030600528 0.036287901
+v 0.041894337 0.030837103 0.033451141
+v 0.041608088 0.030208226 0.033541790
+v 0.050238724 0.008631636 0.033541790
+v 0.054946344 0.016869971 0.048449252
+v 0.053769439 0.020008384 0.048841554
+v 0.055730947 0.012554653 0.046880046
+v 0.052200232 0.010985446 0.031580282
+v 0.054749511 0.005872378 0.040653384
+v 0.052279002 0.005891686 0.038229639
+v 0.056515550 0.008239335 0.046487744
+v 0.044276854 0.024110406 0.027958404
+v 0.051755697 0.009886239 0.031101055
+v 0.044746501 0.021969892 0.028049567
+v 0.047492613 0.024716003 0.030403377
+v 0.049454121 0.018046876 0.028049567
+v 0.052984835 0.022362194 0.048449252
+v 0.053769439 0.020400685 0.048056951
+v 0.054161740 0.018439177 0.046487744
+v 0.052592534 0.023146797 0.048449252
+v 0.045923406 0.030600528 0.036680203
+v 0.041443362 0.030788612 0.031900515
+v 0.055730947 0.013339256 0.047664649
+v 0.056123249 0.010985446 0.048841554
+v 0.054161740 0.018831479 0.048841554
+v 0.056515550 0.008631636 0.046487744
+v 0.051807931 0.012162351 0.031187980
+v 0.053769439 0.008239335 0.035503298
+v 0.053769439 0.009416240 0.036287901
+v 0.053377137 0.005885525 0.040603219
+v 0.054554042 0.005885525 0.038249410
+f 8 2 1
+f 10 3 4
+f 12 6 4
+f 13 1 2
+f 15 8 5
+f 15 2 8
+f 17 6 10
+f 19 8 1
+f 26 15 5
+f 26 5 9
+f 26 2 15
+f 28 17 10
+f 28 10 4
+f 28 4 6
+f 28 6 17
+f 29 3 10
+f 31 30 18
+f 32 5 8
+f 32 8 20
+f 33 19 11
+f 33 11 20
+f 33 20 8
+f 33 8 19
+f 34 32 20
+f 34 5 32
+f 35 20 11
+f 35 34 20
+f 36 21 9
+f 36 9 5
+f 36 34 21
+f 36 5 34
+f 37 21 16
+f 37 16 9
+f 37 9 21
+f 38 7 22
+f 39 1 13
+f 39 38 22
+f 40 24 7
+f 42 25 2
+f 43 9 16
+f 43 42 9
+f 44 23 2
+f 44 2 25
+f 45 4 3
+f 45 3 27
+f 45 12 4
+f 46 27 3
+f 46 16 27
+f 46 3 41
+f 47 27 16
+f 47 16 21
+f 47 45 27
+f 47 21 12
+f 47 12 45
+f 48 29 10
+f 48 18 29
+f 48 31 18
+f 48 10 31
+f 49 24 3
+f 49 3 29
+f 49 29 18
+f 49 18 30
+f 50 39 22
+f 50 1 39
+f 51 22 7
+f 51 7 30
+f 51 50 22
+f 51 30 50
+f 52 30 7
+f 52 7 24
+f 52 49 30
+f 52 24 49
+f 53 19 1
+f 53 50 30
+f 53 1 50
+f 53 30 11
+f 53 11 19
+f 54 11 30
+f 54 30 31
+f 54 35 11
+f 54 6 35
+f 54 31 10
+f 54 10 6
+f 55 35 6
+f 55 6 12
+f 55 12 21
+f 55 21 34
+f 55 34 35
+f 56 38 23
+f 56 7 38
+f 56 40 7
+f 57 38 13
+f 57 13 2
+f 57 2 23
+f 57 23 38
+f 58 39 13
+f 58 13 38
+f 58 38 39
+f 59 14 40
+f 59 23 44
+f 59 44 25
+f 59 56 23
+f 59 40 56
+f 60 41 3
+f 60 3 24
+f 60 24 41
+f 61 46 41
+f 62 40 14
+f 62 61 41
+f 62 14 61
+f 62 41 24
+f 62 24 40
+f 63 26 9
+f 63 9 42
+f 63 42 2
+f 63 2 26
+f 64 25 42
+f 64 42 43
+f 64 43 16
+f 64 16 46
+f 64 46 61
+f 64 61 14
+f 64 59 25
+f 64 14 59
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl015.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl015.obj
new file mode 100644
index 0000000000000000000000000000000000000000..139670d0d128168cb87d8c371b1288b80e633e73
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl015.obj
@@ -0,0 +1,188 @@
+v -0.049013590 0.013731558 0.010788295
+v -0.043913669 -0.006275826 0.003726866
+v -0.044305971 0.013731558 0.004511469
+v -0.044698272 0.013339256 0.001373056
+v -0.051759702 -0.005883524 0.005688374
+v -0.044305971 0.013339256 0.001373056
+v -0.048228987 0.013731558 0.003334564
+v -0.044305971 0.012946954 0.004511469
+v -0.049270353 -0.008688896 0.007062471
+v -0.044698272 -0.007452731 0.000980754
+v -0.044281181 0.013719572 0.001855627
+v -0.052936607 0.008239335 0.010395993
+v -0.049013590 0.013339256 0.010788295
+v -0.048621289 -0.008629636 0.002942262
+v -0.044444583 -0.008657131 0.003428935
+v -0.044139859 -0.007401556 0.001182165
+v -0.044698272 0.007062430 0.000980754
+v -0.043913669 -0.005491223 0.003726866
+v -0.045482875 0.013731558 0.001765357
+v -0.051367400 0.008631636 0.005688374
+v -0.051759702 0.013731558 0.010003692
+v -0.051557541 -0.008601945 0.007350296
+v -0.044698272 -0.008629636 0.004119167
+v -0.050492374 0.007494575 0.010732570
+v -0.045482875 -0.008629636 0.001373056
+v -0.044263597 -0.008617910 0.001505187
+v -0.044172533 0.007037659 0.001344649
+v -0.047444384 0.003924017 0.001765357
+v -0.043924327 -0.005490714 0.003273659
+v -0.052152003 0.004316318 0.006472977
+v -0.051457246 0.013671333 0.010879076
+v -0.052152003 0.007062430 0.006865279
+v -0.050582797 0.013731558 0.006472977
+v -0.052152003 -0.007060429 0.006865279
+v -0.051759702 -0.008629636 0.006472977
+v -0.049798193 0.009023938 0.010788295
+v -0.052708298 0.007408440 0.010856977
+v -0.043925439 -0.006275265 0.003267306
+v -0.045090574 0.004708620 0.000980754
+v -0.045875177 0.012946954 0.001765357
+v -0.050582797 0.001570207 0.003726866
+v -0.047444384 -0.001960508 0.001765357
+v -0.052152003 0.012162351 0.010788295
+v -0.052936607 0.007847033 0.010003692
+v -0.051759702 0.006670128 0.006080675
+v -0.049798193 0.011770049 0.004511469
+v -0.050975098 0.011770049 0.006080675
+v -0.052152003 -0.004706619 0.006472977
+v -0.050975098 -0.008629636 0.005296072
+v -0.052936607 0.008239335 0.010788295
+v -0.047836685 0.012946954 0.002942262
+v -0.047052082 0.013731558 0.002549961
+v -0.047052082 0.006277826 0.001765357
+v -0.051367400 -0.004314318 0.004903770
+v -0.045875177 -0.007452731 0.001373056
+v -0.048228987 0.002747112 0.002157659
+v -0.049405892 -0.004706619 0.002942262
+v -0.045090574 -0.006275826 0.000980754
+v -0.052152003 0.012162351 0.010395993
+v -0.051759702 0.002747112 0.005688374
+v -0.051367400 0.005100922 0.005296072
+v -0.049405892 0.012946954 0.004511469
+v -0.050975098 0.007062430 0.004903770
+v -0.050975098 0.012946954 0.006865279
+f 7 1 3
+f 8 2 3
+f 8 3 1
+f 13 8 1
+f 13 2 8
+f 17 4 6
+f 17 16 10
+f 18 11 3
+f 18 3 2
+f 19 7 3
+f 19 3 11
+f 19 11 6
+f 19 6 4
+f 21 1 7
+f 23 2 13
+f 23 15 2
+f 23 9 15
+f 24 22 9
+f 25 15 9
+f 25 9 14
+f 26 2 15
+f 26 15 25
+f 26 25 10
+f 26 10 16
+f 27 17 6
+f 27 16 17
+f 27 6 11
+f 27 11 18
+f 29 18 2
+f 29 27 18
+f 29 16 27
+f 31 1 21
+f 31 13 1
+f 33 21 7
+f 35 9 22
+f 35 22 34
+f 35 34 5
+f 36 23 13
+f 36 13 31
+f 36 24 9
+f 36 9 23
+f 37 22 24
+f 37 36 31
+f 37 24 36
+f 38 26 16
+f 38 2 26
+f 38 29 2
+f 38 16 29
+f 39 28 17
+f 39 17 10
+f 40 19 4
+f 40 4 17
+f 42 28 39
+f 43 31 21
+f 44 12 21
+f 44 32 30
+f 45 32 20
+f 45 30 32
+f 47 44 21
+f 47 32 44
+f 47 20 32
+f 47 46 20
+f 48 30 5
+f 48 5 34
+f 48 44 30
+f 48 34 44
+f 49 14 9
+f 49 9 35
+f 49 35 5
+f 50 34 22
+f 50 22 37
+f 50 44 34
+f 50 12 44
+f 50 43 12
+f 50 37 31
+f 50 31 43
+f 51 41 7
+f 52 7 19
+f 52 19 40
+f 52 51 7
+f 53 40 17
+f 53 17 28
+f 53 52 40
+f 53 28 51
+f 53 51 52
+f 54 49 5
+f 55 25 14
+f 56 28 42
+f 56 51 28
+f 56 41 51
+f 57 56 42
+f 57 41 56
+f 57 54 41
+f 57 14 49
+f 57 49 54
+f 57 55 14
+f 57 42 55
+f 58 42 39
+f 58 39 10
+f 58 55 42
+f 58 10 25
+f 58 25 55
+f 59 43 21
+f 59 21 12
+f 59 12 43
+f 60 30 45
+f 60 41 54
+f 60 54 5
+f 60 5 30
+f 61 60 45
+f 61 41 60
+f 62 33 7
+f 62 7 46
+f 62 47 33
+f 62 46 47
+f 63 20 46
+f 63 41 61
+f 63 61 45
+f 63 45 20
+f 63 46 7
+f 63 7 41
+f 64 47 21
+f 64 21 33
+f 64 33 47
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl016.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl016.obj
new file mode 100644
index 0000000000000000000000000000000000000000..902a57d13c9dda07856ca60b46a564ac702aaef9
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl016.obj
@@ -0,0 +1,188 @@
+v 0.027915264 0.037708140 0.011949949
+v 0.037074191 0.023632707 0.005840345
+v 0.029446737 0.035700449 0.004119167
+v 0.020816101 0.032954338 -0.000196151
+v 0.024343305 0.023538411 0.003202228
+v 0.035723563 0.023931400 0.002157659
+v 0.023490797 0.037552252 0.003140588
+v 0.036900468 0.030600528 0.007257580
+v 0.036508167 0.026285210 0.008434485
+v 0.020420849 0.027457969 0.003180318
+v 0.031408245 0.030208226 0.002157659
+v 0.029446737 0.022754495 -0.000196151
+v 0.027485229 0.038446561 0.005688374
+v 0.023954514 0.036877354 0.002157659
+v 0.036900468 0.024716003 0.002942262
+v 0.027476695 0.021970757 0.003117814
+v 0.027485229 0.037269656 0.011965200
+v 0.036664630 0.030425586 0.008508722
+v 0.020816101 0.033346639 0.003726866
+v 0.029446737 0.025500607 -0.000196151
+v 0.020429688 0.027467648 0.001167536
+v 0.030359775 0.022058718 0.000348831
+v 0.023660723 0.037806378 0.005831160
+v 0.030623642 0.037661957 0.008042184
+v 0.023562213 0.036877354 0.002157659
+v 0.027092927 0.036877354 0.003334564
+v 0.024739118 0.037661957 0.002942262
+v 0.036900468 0.026677512 0.003726866
+v 0.036774673 0.023465649 0.003180987
+v 0.033762055 0.028639020 0.002549961
+v 0.036508167 0.023539098 0.006080675
+v 0.027488928 0.021971954 0.001067681
+v 0.037055427 0.026389000 0.008337402
+v 0.020708754 0.033208193 0.000333956
+v 0.030231341 0.022754495 -0.000196151
+v 0.025523721 0.029815925 -0.000196151
+v 0.020816101 0.032169734 -0.000196151
+v 0.027092927 0.038446561 0.005296072
+v 0.027670370 0.037888555 0.011890369
+v 0.034938960 0.032562036 0.006472977
+v 0.027485229 0.038054259 0.004903770
+v 0.035723563 0.032169734 0.007649882
+v 0.026308324 0.035308148 0.002157659
+v 0.025916023 0.037661957 0.003334564
+v 0.036900468 0.028639020 0.005296072
+v 0.036900468 0.025892908 0.003334564
+v 0.036508167 0.023539098 0.002549961
+v 0.030240678 0.021969727 0.003094458
+v 0.035723563 0.025500607 0.002549961
+v 0.034546659 0.030992830 0.004511469
+v 0.032585150 0.030600528 0.002942262
+v 0.027092927 0.028246718 -0.000196151
+v 0.029839039 0.021969892 0.000196151
+v 0.024354602 0.023543440 0.001163761
+v 0.020426762 0.030603312 0.001074739
+v 0.029839039 0.024716003 -0.000196151
+v 0.021993006 0.032562036 -0.000196151
+v 0.026664807 0.038406698 0.005719909
+v 0.030623642 0.037661957 0.008434485
+v 0.033369754 0.033738941 0.006080675
+v 0.026700626 0.037661957 0.003726866
+v 0.035723563 0.032169734 0.007257580
+v 0.035331262 0.030992830 0.005296072
+v 0.034938960 0.027854416 0.002942262
+f 16 9 5
+f 17 9 1
+f 17 5 9
+f 17 10 5
+f 19 10 17
+f 20 12 4
+f 23 19 17
+f 25 14 4
+f 25 4 7
+f 27 25 7
+f 27 14 25
+f 29 15 2
+f 31 9 16
+f 32 16 5
+f 33 18 1
+f 33 1 9
+f 33 2 8
+f 33 8 18
+f 33 31 2
+f 33 9 31
+f 34 7 4
+f 34 23 7
+f 34 19 23
+f 35 12 20
+f 36 20 4
+f 37 4 12
+f 37 12 21
+f 38 27 7
+f 39 23 17
+f 39 17 1
+f 41 13 24
+f 41 38 13
+f 42 18 8
+f 43 14 26
+f 43 11 36
+f 43 30 11
+f 43 26 30
+f 44 26 14
+f 44 14 27
+f 44 27 38
+f 45 8 2
+f 45 2 28
+f 46 28 2
+f 46 2 15
+f 47 6 15
+f 47 15 29
+f 47 29 22
+f 47 35 6
+f 47 22 35
+f 48 29 2
+f 48 22 29
+f 48 2 31
+f 48 31 16
+f 49 15 6
+f 49 6 20
+f 49 20 30
+f 49 46 15
+f 49 30 46
+f 51 30 26
+f 51 26 3
+f 51 50 30
+f 51 3 50
+f 52 30 20
+f 52 11 30
+f 52 36 11
+f 52 20 36
+f 53 32 12
+f 53 22 48
+f 53 48 16
+f 53 16 32
+f 53 35 22
+f 53 12 35
+f 54 21 12
+f 54 12 32
+f 54 32 5
+f 54 5 10
+f 54 10 21
+f 55 34 4
+f 55 21 10
+f 55 10 19
+f 55 19 34
+f 55 37 21
+f 55 4 37
+f 56 35 20
+f 56 20 6
+f 56 6 35
+f 57 36 4
+f 57 4 14
+f 57 43 36
+f 57 14 43
+f 58 38 7
+f 58 7 23
+f 58 23 39
+f 58 39 13
+f 58 13 38
+f 59 24 13
+f 59 13 39
+f 59 39 1
+f 59 42 24
+f 59 1 18
+f 59 18 42
+f 60 24 40
+f 60 41 24
+f 60 3 41
+f 60 50 3
+f 61 41 3
+f 61 3 26
+f 61 26 44
+f 61 44 38
+f 61 38 41
+f 62 40 24
+f 62 24 42
+f 62 42 8
+f 62 8 40
+f 63 40 8
+f 63 8 45
+f 63 45 28
+f 63 28 50
+f 63 60 40
+f 63 50 60
+f 64 46 30
+f 64 28 46
+f 64 50 28
+f 64 30 50
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl017.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl017.obj
new file mode 100644
index 0000000000000000000000000000000000000000..114f5103bdd70852257e1659dc7a2c67b64e1d89
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl017.obj
@@ -0,0 +1,113 @@
+v 0.011023672 0.041243360 0.003954900
+v 0.011008560 0.040408069 0.004119167
+v 0.008262449 0.038054259 -0.000196151
+v -0.024298587 0.034915846 -0.000196151
+v -0.024320765 0.040060810 0.005243168
+v 0.008260721 0.033736513 0.003244229
+v 0.011008560 0.041192672 0.001373056
+v 0.008262507 0.033751260 0.001231950
+v -0.024295708 0.033345449 0.003231193
+v 0.003554829 0.033738941 0.003726866
+v -0.016871812 0.041274555 0.003942214
+v -0.004683505 0.040800371 -0.000196151
+v -0.023906285 0.033738941 -0.000196151
+v 0.010979258 0.040342243 0.001595780
+v -0.024425891 0.033526314 0.000501629
+v -0.016846629 0.033352240 0.001107294
+v -0.023513984 0.039623466 0.005296072
+v -0.016844639 0.033345772 0.003307004
+v -0.000760489 0.041192672 0.000196151
+v -0.024298587 0.040015767 0.002157659
+v -0.024394434 0.039818130 0.005067432
+v -0.024298587 0.033738941 -0.000196151
+v -0.023906285 0.033346639 0.000196151
+v -0.012137236 0.033738941 0.003726866
+v -0.016844856 0.041192672 0.001373056
+v 0.005908639 0.040015767 -0.000196151
+v -0.023608706 0.040208148 0.005070114
+v -0.005468108 0.041192672 0.000196151
+v -0.013314141 0.039623466 -0.000196151
+v 0.000416416 0.040800371 -0.000196151
+v 0.008262449 0.039231164 -0.000196151
+v -0.008606522 0.040408069 -0.000196151
+v -0.019198666 0.037661957 -0.000196151
+v 0.007477846 0.041192672 0.000980754
+v 0.003554829 0.040408069 -0.000196151
+v 0.007477846 0.039623466 -0.000196151
+v -0.009391125 0.040800371 0.000196151
+v -0.022337079 0.036092751 -0.000196151
+v -0.022337079 0.040015767 0.001765357
+f 5 2 1
+f 7 1 2
+f 10 6 2
+f 11 5 1
+f 12 3 4
+f 13 4 3
+f 13 3 8
+f 14 8 3
+f 14 3 7
+f 14 7 2
+f 14 2 6
+f 14 6 8
+f 16 8 6
+f 17 10 2
+f 17 2 5
+f 17 5 9
+f 18 6 10
+f 18 16 6
+f 19 11 1
+f 20 4 15
+f 21 15 9
+f 21 9 5
+f 21 20 15
+f 21 5 20
+f 22 15 4
+f 22 4 13
+f 23 9 15
+f 23 13 8
+f 23 8 16
+f 23 22 13
+f 23 15 22
+f 23 18 9
+f 23 16 18
+f 24 17 9
+f 24 10 17
+f 24 18 10
+f 24 9 18
+f 25 20 11
+f 26 3 12
+f 27 20 5
+f 27 5 11
+f 27 11 20
+f 28 19 12
+f 28 25 11
+f 28 11 19
+f 29 12 4
+f 30 26 12
+f 30 12 19
+f 31 7 3
+f 31 3 26
+f 32 28 12
+f 32 29 25
+f 32 12 29
+f 33 29 4
+f 33 25 29
+f 34 30 19
+f 34 7 30
+f 34 19 1
+f 34 1 7
+f 35 30 7
+f 35 7 26
+f 35 26 30
+f 36 31 26
+f 36 26 7
+f 36 7 31
+f 37 32 25
+f 37 25 28
+f 37 28 32
+f 38 33 4
+f 38 4 20
+f 38 20 33
+f 39 33 20
+f 39 20 25
+f 39 25 33
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl018.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl018.obj
new file mode 100644
index 0000000000000000000000000000000000000000..7a6b7bba6826c1ec4f8f656931cbe4d0f1322626
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl018.obj
@@ -0,0 +1,92 @@
+v 0.024342431 0.027455955 0.003105345
+v 0.032979836 -0.014122018 0.003260935
+v 0.032978002 0.015693446 0.000994542
+v 0.020429688 0.027467648 0.001167536
+v 0.005510623 -0.029415231 0.003487670
+v 0.024348813 0.027463991 0.001044097
+v 0.032982475 0.015692977 0.003205392
+v 0.009438522 0.004708457 0.003695517
+v 0.024346816 -0.029421623 0.000980754
+v 0.032982709 -0.014123886 0.001079495
+v 0.027479511 0.023537978 0.003119026
+v 0.020420849 0.027457969 0.003180318
+v 0.005518615 -0.021582229 0.001655803
+v 0.009831655 -0.003529714 0.004119167
+v 0.024335075 -0.029420432 0.003036341
+v 0.010616259 -0.029421623 0.000980754
+v 0.027504390 -0.025124315 0.001001711
+v 0.027487348 0.023540280 0.001030545
+v 0.009439354 0.004708620 0.002206000
+v 0.005515626 -0.021574794 0.003697069
+v 0.021208403 0.016085367 0.003726866
+v 0.009439354 -0.001175905 0.004119167
+v 0.014931577 -0.027460114 0.003726866
+v 0.005517772 -0.029423792 0.001458887
+v 0.027492595 -0.025106797 0.003107042
+v 0.009439354 0.001570207 0.001765357
+v 0.020816101 0.018439177 0.003726866
+v 0.009439354 -0.003529714 0.004119167
+v 0.009831655 -0.001175905 0.004119167
+v 0.012577767 -0.029421623 0.003726866
+v 0.017285387 -0.025106305 0.003726866
+v 0.020423800 0.018439177 0.003726866
+f 6 4 1
+f 10 7 2
+f 10 3 7
+f 11 6 1
+f 11 1 7
+f 12 1 4
+f 16 6 3
+f 16 3 9
+f 16 4 6
+f 17 9 3
+f 17 3 10
+f 17 10 2
+f 17 15 9
+f 18 11 7
+f 18 7 3
+f 18 3 6
+f 18 6 11
+f 19 12 4
+f 19 8 12
+f 20 13 5
+f 20 19 13
+f 20 8 19
+f 21 14 2
+f 21 2 7
+f 22 12 8
+f 22 8 20
+f 24 4 16
+f 24 5 13
+f 24 16 9
+f 25 17 2
+f 25 15 17
+f 25 23 15
+f 26 19 4
+f 26 13 19
+f 26 24 13
+f 26 4 24
+f 27 21 7
+f 27 7 1
+f 27 1 12
+f 28 22 20
+f 28 14 22
+f 29 22 14
+f 29 14 21
+f 29 27 22
+f 29 21 27
+f 30 15 23
+f 30 5 24
+f 30 24 9
+f 30 9 15
+f 30 20 5
+f 30 28 20
+f 30 23 14
+f 30 14 28
+f 31 2 14
+f 31 14 23
+f 31 25 2
+f 31 23 25
+f 32 27 12
+f 32 12 22
+f 32 22 27
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl022.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl022.obj
new file mode 100644
index 0000000000000000000000000000000000000000..0b6ae5bb7387da67239b6d6836e148af0bd3ae28
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl022.obj
@@ -0,0 +1,188 @@
+v -0.019198666 0.057669341 0.048841554
+v -0.016844856 0.055707833 0.048056951
+v -0.016844856 0.052961721 0.026480361
+v -0.029398508 0.044723387 0.014319010
+v -0.038658976 0.047665635 0.047880516
+v -0.020375571 0.045900292 0.014711311
+v -0.025867794 0.054923229 0.044526236
+v -0.016844856 0.058061643 0.046095442
+v -0.034890731 0.047861800 0.048841554
+v -0.016844856 0.050607911 0.028834170
+v -0.022337079 0.048254102 0.014711311
+v -0.027437000 0.054923229 0.048056951
+v -0.016835505 0.058031771 0.048054094
+v -0.019198666 0.055707833 0.048841554
+v -0.034890731 0.040408069 0.025695757
+v -0.020375571 0.045900292 0.014319010
+v -0.027437000 0.046292593 0.015888216
+v -0.017237158 0.057669341 0.044526236
+v -0.024690889 0.056100134 0.047664649
+v -0.034498430 0.051000213 0.048841554
+v -0.018414062 0.058061643 0.048056951
+v -0.019198666 0.054923229 0.048449252
+v -0.018349172 0.057350632 0.048623306
+v -0.039177343 0.045863780 0.043360746
+v -0.016844856 0.055315531 0.046880046
+v -0.016918915 0.052429593 0.026414259
+v -0.020270854 0.048704196 0.015974944
+v -0.022729380 0.047861800 0.014319010
+v -0.025475492 0.043546482 0.015103613
+v -0.023906285 0.047469498 0.014319010
+v -0.036067636 0.049823308 0.045703141
+v -0.016844856 0.057669341 0.044526236
+v -0.017237158 0.052961721 0.026480361
+v -0.025867794 0.055707833 0.047664649
+v -0.025083190 0.055315531 0.044918537
+v -0.025867794 0.055315531 0.048841554
+v -0.038029144 0.048254102 0.048841554
+v -0.020767872 0.057277039 0.048841554
+v -0.017237158 0.058061643 0.046095442
+v -0.035283033 0.045900292 0.043349331
+v -0.038029144 0.047861800 0.048841554
+v -0.035283033 0.043546482 0.024518852
+v -0.016844856 0.051392515 0.031972583
+v -0.034498430 0.047469498 0.047272347
+v -0.020278560 0.046420086 0.014386447
+v -0.016923081 0.050919103 0.027968439
+v -0.020767872 0.049038705 0.015888216
+v -0.025635127 0.043884729 0.014224904
+v -0.022729380 0.044723387 0.014319010
+v -0.025867794 0.046684895 0.014711311
+v -0.022337079 0.048646403 0.015888216
+v -0.030183112 0.045900292 0.019811233
+v -0.028613905 0.054138626 0.046880046
+v -0.038813748 0.048254102 0.047664649
+v -0.036067636 0.050215610 0.047272347
+v -0.025083190 0.055707833 0.046487744
+v -0.025867794 0.055707833 0.048056951
+v -0.024690889 0.056100134 0.048056951
+v -0.018414062 0.058061643 0.047664649
+v -0.019198666 0.057669341 0.046880046
+v -0.030449118 0.043369401 0.014766718
+v -0.035346525 0.040492565 0.024492049
+v -0.036852239 0.044331085 0.030011075
+v -0.034498430 0.047861800 0.048449252
+f 13 10 3
+f 14 1 9
+f 15 6 10
+f 16 10 6
+f 20 9 1
+f 21 13 8
+f 21 1 13
+f 22 14 9
+f 22 9 2
+f 22 2 14
+f 23 13 1
+f 23 1 14
+f 23 14 2
+f 23 2 13
+f 25 13 2
+f 26 3 10
+f 30 11 28
+f 32 18 8
+f 32 3 18
+f 32 13 3
+f 32 8 13
+f 33 18 3
+f 35 7 34
+f 36 20 1
+f 36 12 20
+f 37 9 20
+f 38 1 21
+f 38 36 1
+f 39 21 8
+f 39 8 18
+f 40 24 15
+f 40 9 24
+f 41 24 9
+f 41 5 24
+f 41 9 37
+f 43 25 15
+f 43 15 10
+f 43 10 13
+f 43 13 25
+f 44 25 2
+f 44 40 15
+f 44 15 25
+f 45 26 16
+f 45 3 26
+f 45 27 3
+f 45 28 11
+f 45 16 28
+f 46 26 10
+f 46 10 16
+f 46 16 26
+f 47 3 27
+f 47 33 3
+f 47 45 11
+f 47 27 45
+f 48 28 16
+f 48 30 28
+f 48 4 30
+f 49 16 6
+f 49 6 15
+f 49 15 29
+f 49 48 16
+f 49 29 48
+f 50 30 4
+f 50 4 17
+f 51 11 30
+f 51 7 35
+f 51 47 11
+f 51 33 47
+f 51 18 33
+f 51 30 50
+f 51 50 17
+f 51 17 7
+f 52 17 4
+f 53 7 17
+f 53 52 31
+f 53 17 52
+f 53 34 7
+f 53 12 34
+f 54 24 5
+f 54 41 37
+f 54 5 41
+f 54 37 20
+f 55 20 12
+f 55 53 31
+f 55 12 53
+f 55 54 20
+f 55 31 54
+f 56 35 34
+f 56 34 19
+f 57 34 12
+f 57 12 36
+f 57 36 38
+f 57 19 34
+f 58 38 21
+f 58 21 19
+f 58 57 38
+f 58 19 57
+f 59 19 21
+f 59 21 39
+f 60 39 18
+f 60 35 56
+f 60 59 39
+f 60 56 19
+f 60 19 59
+f 60 51 35
+f 60 18 51
+f 61 42 4
+f 61 4 48
+f 61 48 29
+f 62 15 24
+f 62 42 61
+f 62 61 29
+f 62 29 15
+f 63 4 42
+f 63 52 4
+f 63 31 52
+f 63 54 31
+f 63 24 54
+f 63 62 24
+f 63 42 62
+f 64 44 2
+f 64 2 9
+f 64 9 40
+f 64 40 44
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl023.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl023.obj
new file mode 100644
index 0000000000000000000000000000000000000000..0ed77cecfe5e65d5054832352a3bfdfa67508a9a
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl023.obj
@@ -0,0 +1,98 @@
+v 0.020419237 0.030594118 0.003096065
+v 0.020413729 0.015691093 0.003347264
+v 0.020423800 0.020400685 0.000980754
+v -0.012924139 0.033746203 0.001144834
+v -0.012921840 0.018831479 0.004119167
+v 0.020426762 0.030603312 0.001074739
+v 0.020031498 0.022362194 0.003726866
+v -0.012921840 0.033738941 0.003726866
+v -0.014491389 0.000384018 0.002080714
+v -0.014491046 0.027854416 0.000980754
+v 0.020423800 0.020400685 0.003726866
+v 0.008262507 0.033751260 0.001231950
+v 0.004339432 0.033738941 0.003726866
+v 0.009442372 0.000384504 0.002140011
+v -0.016844856 0.004708620 0.003334564
+v -0.014495709 0.029438347 0.001313456
+v 0.020429919 0.015696478 0.001516890
+v 0.009439354 0.000393302 0.004119167
+v 0.019246895 0.023146797 0.003726866
+v 0.008260721 0.033736513 0.003244229
+v -0.002329695 0.023931400 0.004119167
+v -0.014488861 0.029414884 0.003449303
+v -0.016845333 0.003923831 0.002169740
+v 0.016896434 0.008241202 0.001844082
+v -0.014483756 0.000394730 0.003752940
+v 0.016886524 0.008238049 0.003479577
+v -0.010960331 0.021969892 0.004119167
+v 0.005908639 0.018046876 0.004119167
+v -0.016845780 0.004708472 0.002177709
+v -0.014485616 0.006672468 0.003788924
+v -0.016844856 0.003924017 0.003334564
+v -0.003114299 0.023931400 0.004119167
+v -0.011744935 0.021577590 0.004119167
+v -0.012529538 0.020008384 0.004119167
+f 10 6 3
+f 10 4 6
+f 10 3 9
+f 11 6 1
+f 11 1 7
+f 12 6 4
+f 12 4 8
+f 12 1 6
+f 13 12 8
+f 14 9 3
+f 16 8 4
+f 16 4 10
+f 17 11 2
+f 17 3 6
+f 17 6 11
+f 18 9 14
+f 19 7 1
+f 19 1 13
+f 20 13 1
+f 20 1 12
+f 20 12 13
+f 21 13 8
+f 21 19 13
+f 21 5 18
+f 22 15 5
+f 22 5 8
+f 22 16 15
+f 22 8 16
+f 23 10 9
+f 24 14 3
+f 24 3 17
+f 25 18 5
+f 25 9 18
+f 26 2 11
+f 26 11 18
+f 26 18 14
+f 26 14 24
+f 26 24 17
+f 26 17 2
+f 27 5 21
+f 28 21 18
+f 28 7 19
+f 28 19 21
+f 28 18 11
+f 28 11 7
+f 29 16 10
+f 29 10 23
+f 29 15 16
+f 30 25 5
+f 31 23 9
+f 31 9 25
+f 31 29 23
+f 31 15 29
+f 31 25 30
+f 31 30 5
+f 31 5 15
+f 32 27 21
+f 32 21 8
+f 32 8 27
+f 33 27 8
+f 33 5 27
+f 34 33 8
+f 34 8 5
+f 34 5 33
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl024.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl024.obj
new file mode 100644
index 0000000000000000000000000000000000000000..362abc9f1ce2864ed62ede469587b621bef4b6ae
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl024.obj
@@ -0,0 +1,188 @@
+v 0.036115865 -0.045505990 0.048841554
+v 0.014931577 -0.050213609 0.031187980
+v 0.008262449 -0.056882737 0.048841554
+v 0.014611648 -0.050818552 0.031131913
+v 0.034546659 -0.041975275 0.031187980
+v 0.005924684 -0.054647788 0.037042693
+v 0.022777609 -0.054528927 0.047272347
+v 0.030811764 -0.041829810 0.031113308
+v 0.006708190 -0.054247355 0.035867828
+v 0.019246895 -0.055705832 0.048841554
+v 0.031800547 -0.049429006 0.047664649
+v 0.022385308 -0.050213609 0.031187980
+v 0.034546659 -0.041582973 0.041387823
+v 0.029839039 -0.041975275 0.031187980
+v 0.008654750 -0.058844245 0.047272347
+v 0.036115865 -0.045898291 0.048056951
+v 0.027877531 -0.051782816 0.048841554
+v 0.023562213 -0.052567419 0.041387823
+v 0.031015944 -0.045113688 0.031580282
+v 0.030231341 -0.045505990 0.031187980
+v 0.014931577 -0.053352022 0.031580282
+v 0.035331262 -0.041582973 0.037072505
+v 0.032192849 -0.045505990 0.048841554
+v 0.005908639 -0.054921229 0.040210918
+v 0.009831655 -0.052567419 0.034326393
+v 0.030623642 -0.041582973 0.032364885
+v 0.010223957 -0.058451944 0.048841554
+v 0.016893085 -0.056882737 0.047272347
+v 0.031800547 -0.049429006 0.048449252
+v 0.023562213 -0.054136626 0.048449252
+v 0.025916023 -0.052959721 0.048056951
+v 0.016108482 -0.053352022 0.032757187
+v 0.034938960 -0.046290593 0.045703141
+v 0.033762055 -0.042759878 0.031187980
+v 0.025523721 -0.048644403 0.031580282
+v 0.028269832 -0.047467498 0.033149488
+v 0.014510528 -0.052839486 0.031218424
+v 0.005908639 -0.056882737 0.037464806
+v 0.035331262 -0.041975275 0.033149488
+v 0.034154357 -0.041975275 0.042564728
+v 0.005908639 -0.054528927 0.038641711
+v 0.034717484 -0.041348769 0.032053965
+v 0.008200336 -0.058613873 0.048345019
+v 0.009047052 -0.058844245 0.048449252
+v 0.016893085 -0.056882737 0.048449252
+v 0.008654750 -0.056098134 0.036287901
+v 0.036115865 -0.045898291 0.048841554
+v 0.033762055 -0.047859799 0.048056951
+v 0.024739118 -0.053352022 0.048841554
+v 0.025523721 -0.052959721 0.046880046
+v 0.025916023 -0.052959721 0.048449252
+v 0.019639196 -0.051782816 0.032364885
+v 0.016893085 -0.052567419 0.031187980
+v 0.016108482 -0.054921229 0.038641711
+v 0.032192849 -0.049036704 0.047272347
+v 0.031800547 -0.044329085 0.031187980
+v 0.023169911 -0.050213609 0.032364885
+v 0.027877531 -0.047075196 0.031187980
+v 0.031408245 -0.049036704 0.045310839
+v 0.005877886 -0.056350026 0.037108485
+v 0.006646532 -0.055884870 0.035950250
+v 0.034938960 -0.041975275 0.031972583
+v 0.034938960 -0.042759878 0.034326393
+v 0.034154357 -0.041582973 0.041387823
+f 8 4 2
+f 9 2 4
+f 10 1 3
+f 12 4 8
+f 14 8 2
+f 17 1 10
+f 22 13 1
+f 23 3 1
+f 25 14 2
+f 25 2 9
+f 26 8 14
+f 27 10 3
+f 31 11 29
+f 31 30 7
+f 34 8 5
+f 37 9 4
+f 39 22 1
+f 39 1 16
+f 40 3 23
+f 40 24 3
+f 40 23 1
+f 40 1 13
+f 41 25 9
+f 41 9 6
+f 41 14 25
+f 41 26 14
+f 42 13 22
+f 42 5 8
+f 42 8 26
+f 42 22 39
+f 43 27 3
+f 43 38 15
+f 43 3 24
+f 43 24 38
+f 44 15 28
+f 44 43 15
+f 44 27 43
+f 45 10 27
+f 45 44 28
+f 45 27 44
+f 45 30 10
+f 45 28 7
+f 45 7 30
+f 46 38 21
+f 46 15 38
+f 47 16 1
+f 47 1 17
+f 47 17 29
+f 48 29 11
+f 48 47 29
+f 48 16 47
+f 49 17 10
+f 49 10 30
+f 50 11 31
+f 50 18 35
+f 50 35 36
+f 50 31 7
+f 50 7 18
+f 51 30 31
+f 51 49 30
+f 51 17 49
+f 51 31 29
+f 51 29 17
+f 52 18 7
+f 52 7 32
+f 53 32 21
+f 53 52 32
+f 53 12 52
+f 53 21 37
+f 53 37 4
+f 53 4 12
+f 54 21 32
+f 54 46 21
+f 54 28 15
+f 54 15 46
+f 54 32 7
+f 54 7 28
+f 55 33 16
+f 55 34 33
+f 55 19 34
+f 55 48 11
+f 55 16 48
+f 56 34 19
+f 56 19 20
+f 56 20 8
+f 56 8 34
+f 57 35 18
+f 57 12 35
+f 57 52 12
+f 57 18 52
+f 58 35 12
+f 58 36 35
+f 58 20 19
+f 58 19 36
+f 58 12 8
+f 58 8 20
+f 59 36 19
+f 59 50 36
+f 59 11 50
+f 59 55 11
+f 59 19 55
+f 60 41 6
+f 60 38 24
+f 60 24 41
+f 61 37 21
+f 61 21 38
+f 61 38 60
+f 61 9 37
+f 61 60 6
+f 61 6 9
+f 62 34 5
+f 62 42 39
+f 62 5 42
+f 63 39 16
+f 63 16 33
+f 63 62 39
+f 63 33 34
+f 63 34 62
+f 64 40 13
+f 64 26 41
+f 64 41 24
+f 64 24 40
+f 64 42 26
+f 64 13 42
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl025.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl025.obj
new file mode 100644
index 0000000000000000000000000000000000000000..86968d4b9039b060c593ac410e87af2fa7139f65
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl025.obj
@@ -0,0 +1,188 @@
+v -0.024298587 -0.053352022 0.048841554
+v -0.015267655 -0.057621505 0.045322217
+v -0.014491046 -0.054921229 0.042564728
+v -0.020404081 -0.043616807 0.006820819
+v -0.030575413 -0.044329085 0.015495915
+v -0.014491046 -0.048644403 0.008042184
+v -0.029398508 -0.043544481 0.022949646
+v -0.015275649 -0.055313531 0.045310839
+v -0.029398508 -0.054136626 0.047664649
+v -0.014491046 -0.056098134 0.036680203
+v -0.014491046 -0.045898291 0.008042184
+v -0.020375571 -0.046682895 0.006865279
+v -0.029692298 -0.043380624 0.012796380
+v -0.031360016 -0.049821308 0.048449252
+v -0.018414062 -0.043936783 0.007257580
+v -0.025475492 -0.055705832 0.048841554
+v -0.014449941 -0.048111288 0.007687871
+v -0.014521057 -0.055423552 0.042537574
+v -0.023906285 -0.047075196 0.012357502
+v -0.019590967 -0.056098134 0.040603219
+v -0.023121682 -0.043152180 0.010395993
+v -0.030853158 -0.043353937 0.015157775
+v -0.029006207 -0.044329085 0.012357502
+v -0.024690889 -0.052959721 0.048841554
+v -0.032144620 -0.052175117 0.046487744
+v -0.014491046 -0.046682895 0.011180597
+v -0.024243187 -0.055497663 0.048556996
+v -0.032144620 -0.052175117 0.048841554
+v -0.014491046 -0.052175117 0.020988138
+v -0.014475576 -0.056009434 0.038271428
+v -0.016928608 -0.044933851 0.006734674
+v -0.017629459 -0.047467498 0.006865279
+v -0.022337079 -0.047467498 0.011572898
+v -0.025475492 -0.055705832 0.047664649
+v -0.016452554 -0.057667340 0.044133934
+v -0.024304815 -0.043168287 0.009999212
+v -0.019590967 -0.043544481 0.007257580
+v -0.030967715 -0.043936783 0.015103613
+v -0.029398508 -0.043152180 0.021380439
+v -0.024298587 -0.046290593 0.010395993
+v -0.028613905 -0.052959721 0.041780124
+v -0.029398508 -0.043936783 0.012357502
+v -0.029006207 -0.050998213 0.048841554
+v -0.026652397 -0.055313531 0.048841554
+v -0.031360016 -0.052959721 0.047664649
+v -0.032317964 -0.048535169 0.042871030
+v -0.018414062 -0.043936783 0.006865279
+v -0.014549516 -0.046235780 0.007557420
+v -0.019698992 -0.043818524 0.006696822
+v -0.016754560 -0.047170077 0.006984069
+v -0.014883348 -0.049429006 0.010788295
+v -0.021552476 -0.055705832 0.041780124
+v -0.015275649 -0.057667340 0.043741633
+v -0.019983269 -0.056490436 0.042564728
+v -0.016844856 -0.057667340 0.044526236
+v -0.014883348 -0.052175117 0.020988138
+v -0.029351829 -0.043466838 0.012379572
+v -0.023121682 -0.043152180 0.010003692
+v -0.031360016 -0.045505990 0.021380439
+v -0.023513984 -0.043152180 0.011180597
+v -0.031752318 -0.049036704 0.046095442
+v -0.028221603 -0.045113688 0.013534406
+v -0.023513984 -0.046682895 0.010395993
+v -0.030183112 -0.052175117 0.042172426
+f 8 1 2
+f 8 2 3
+f 14 8 7
+f 15 8 3
+f 15 7 8
+f 18 3 2
+f 24 1 8
+f 24 16 1
+f 26 15 3
+f 26 11 15
+f 26 17 11
+f 26 3 17
+f 27 16 2
+f 27 2 1
+f 27 1 16
+f 28 16 24
+f 29 17 10
+f 29 6 17
+f 30 17 3
+f 30 10 17
+f 30 3 18
+f 30 18 2
+f 30 2 10
+f 32 17 6
+f 36 13 22
+f 38 22 13
+f 38 23 5
+f 39 36 22
+f 39 7 15
+f 39 15 37
+f 40 23 12
+f 41 34 9
+f 42 4 12
+f 42 12 23
+f 42 38 13
+f 42 23 38
+f 43 24 8
+f 43 8 14
+f 43 28 24
+f 43 14 28
+f 44 28 9
+f 44 16 28
+f 44 34 16
+f 44 9 34
+f 45 28 25
+f 45 25 9
+f 45 9 28
+f 46 22 38
+f 46 25 28
+f 46 39 22
+f 47 15 11
+f 47 11 31
+f 47 37 15
+f 47 4 37
+f 48 31 11
+f 48 11 17
+f 48 17 31
+f 49 12 4
+f 49 47 31
+f 49 4 47
+f 49 32 12
+f 49 31 32
+f 50 32 31
+f 50 31 17
+f 50 17 32
+f 51 33 12
+f 51 12 32
+f 51 32 6
+f 51 6 29
+f 51 20 33
+f 52 19 33
+f 52 41 19
+f 52 34 41
+f 53 35 10
+f 53 10 2
+f 54 20 35
+f 54 34 52
+f 54 52 33
+f 54 33 20
+f 55 2 16
+f 55 16 34
+f 55 54 35
+f 55 34 54
+f 55 53 2
+f 55 35 53
+f 56 29 10
+f 56 10 35
+f 56 35 20
+f 56 51 29
+f 56 20 51
+f 57 36 4
+f 57 13 36
+f 57 42 13
+f 57 4 42
+f 58 21 37
+f 58 39 21
+f 58 36 39
+f 58 37 4
+f 58 4 36
+f 59 38 5
+f 59 5 25
+f 59 46 38
+f 59 25 46
+f 60 39 37
+f 60 37 21
+f 60 21 39
+f 61 14 7
+f 61 7 39
+f 61 39 46
+f 61 46 28
+f 61 28 14
+f 62 40 19
+f 62 23 40
+f 62 19 41
+f 62 41 5
+f 62 5 23
+f 63 33 19
+f 63 19 40
+f 63 40 12
+f 63 12 33
+f 64 25 5
+f 64 5 41
+f 64 41 9
+f 64 9 25
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl028.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl028.obj
new file mode 100644
index 0000000000000000000000000000000000000000..aaff1476ec5141bcb0092453732c769647aafdd9
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl028.obj
@@ -0,0 +1,128 @@
+v -0.032536921 0.029031321 0.003726866
+v -0.034890491 -0.012160247 0.003319981
+v -0.032142828 0.010984705 0.003270232
+v -0.032536921 0.028246718 -0.000196151
+v -0.044495140 0.012615420 0.003832551
+v -0.034899757 -0.012163865 0.001209303
+v -0.031919707 0.028804908 0.000686934
+v -0.034890731 -0.010198842 0.003726866
+v -0.039206049 0.026285210 0.001765357
+v -0.044387293 -0.010217472 0.003554547
+v -0.032145387 0.029031621 0.003310860
+v -0.032144620 0.012554653 0.000980754
+v -0.032536921 0.028639020 0.003726866
+v -0.039206049 0.026677512 0.004119167
+v -0.034890731 0.028246718 0.000980754
+v -0.043140351 -0.012165215 0.003286420
+v -0.043521367 0.002747112 -0.000196151
+v -0.032536921 0.029031321 0.000588452
+v -0.032144620 0.027462115 -0.000196151
+v -0.032146868 0.010987022 0.001432100
+v -0.044305971 0.013731558 0.001373056
+v -0.036852239 0.026285210 0.004119167
+v -0.043913669 0.012554653 0.004119167
+v -0.038813748 0.026677512 0.001765357
+v -0.033397289 0.029115000 0.003455710
+v -0.040980249 -0.012235999 0.000116807
+v -0.032144620 0.028246718 -0.000196151
+v -0.044383906 0.013756593 0.004001085
+v -0.041167557 0.014908462 -0.000196151
+v -0.044305971 -0.008629636 0.000980754
+v -0.033246225 0.028955519 0.001317242
+v -0.041952161 -0.012160351 -0.000196151
+v -0.038029144 0.021185289 -0.000196151
+v -0.041952161 0.012162351 -0.000196151
+v -0.044305971 0.009023938 0.000980754
+v -0.043129066 -0.008237334 -0.000196151
+v -0.043129066 -0.012160351 0.002157659
+v -0.033321525 0.027462115 -0.000196151
+v -0.043913669 0.000001000 0.000196151
+v -0.043913669 -0.010198842 0.000980754
+v -0.037244541 0.022362194 -0.000196151
+v -0.043913669 0.003139413 0.000196151
+v -0.043521367 -0.003922016 -0.000196151
+v -0.044223391 -0.010175992 0.001630557
+f 8 2 3
+f 11 3 7
+f 13 8 3
+f 13 11 1
+f 13 3 11
+f 16 2 8
+f 16 8 10
+f 18 11 7
+f 18 4 15
+f 19 12 6
+f 19 7 12
+f 19 17 4
+f 20 12 7
+f 20 7 3
+f 20 6 12
+f 20 3 2
+f 20 2 6
+f 21 14 9
+f 22 13 1
+f 22 1 14
+f 22 8 13
+f 23 10 8
+f 23 22 14
+f 23 8 22
+f 24 15 4
+f 24 4 9
+f 24 9 14
+f 25 14 1
+f 25 1 11
+f 25 11 18
+f 25 24 14
+f 25 15 24
+f 26 6 2
+f 26 2 16
+f 27 18 7
+f 27 4 18
+f 27 19 4
+f 27 7 19
+f 28 21 5
+f 28 14 21
+f 28 23 14
+f 28 5 10
+f 28 10 23
+f 29 21 9
+f 29 4 17
+f 30 10 5
+f 31 25 18
+f 31 18 15
+f 31 15 25
+f 32 19 6
+f 32 6 26
+f 32 17 19
+f 33 29 9
+f 33 4 29
+f 34 29 17
+f 34 17 21
+f 34 21 29
+f 35 21 17
+f 35 30 5
+f 35 5 21
+f 36 17 32
+f 37 32 26
+f 37 26 16
+f 37 16 10
+f 38 9 4
+f 38 4 33
+f 39 30 35
+f 40 36 32
+f 40 30 36
+f 40 32 37
+f 41 38 33
+f 41 33 9
+f 41 9 38
+f 42 39 35
+f 42 35 17
+f 42 17 39
+f 43 36 30
+f 43 30 39
+f 43 39 17
+f 43 17 36
+f 44 40 37
+f 44 37 10
+f 44 10 30
+f 44 30 40
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl029.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl029.obj
new file mode 100644
index 0000000000000000000000000000000000000000..85634f73b8395367b226c33bf8f065a49ba32474
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl029.obj
@@ -0,0 +1,188 @@
+v -0.029006207 -0.043152180 0.020595836
+v -0.020358199 -0.047015688 0.010019746
+v -0.020390563 -0.038865998 0.003762755
+v -0.024298587 -0.038836862 0.001765357
+v -0.033321525 -0.039229163 0.006865279
+v -0.020375571 -0.043544481 0.003334564
+v -0.020375571 -0.044329085 0.010788295
+v -0.029398508 -0.043936783 0.011965200
+v -0.031752318 -0.035306147 0.006080675
+v -0.020350223 -0.038783271 0.001549960
+v -0.029790810 -0.039229163 0.003726866
+v -0.021231250 -0.044901694 0.011915801
+v -0.021160174 -0.044329085 0.011965200
+v -0.025083190 -0.046290593 0.012357502
+v -0.032144620 -0.041190672 0.015888216
+v -0.029398508 -0.042759878 0.020203534
+v -0.020375571 -0.045898291 0.005688374
+v -0.020767872 -0.040798370 0.001765357
+v -0.031892748 -0.035490222 0.005413841
+v -0.029111059 -0.043715704 0.020543816
+v -0.020442432 -0.044891003 0.010741825
+v -0.032144620 -0.041190672 0.009219088
+v -0.032339112 -0.040273007 0.015808101
+v -0.020767872 -0.047075196 0.008434485
+v -0.023513984 -0.043544481 0.004119167
+v -0.020375571 -0.040798370 0.001765357
+v -0.021944777 -0.039229163 0.001373056
+v -0.021944777 -0.038836862 0.001373056
+v -0.027044698 -0.038836862 0.002549961
+v -0.030967715 -0.039229163 0.004511469
+v -0.032144620 -0.035306147 0.006080675
+v -0.026652397 -0.045505990 0.012357502
+v -0.029858865 -0.043263312 0.020562073
+v -0.031752318 -0.040013767 0.015888216
+v -0.020375571 -0.047075196 0.008434485
+v -0.021552476 -0.047075196 0.009611390
+v -0.029398508 -0.042759878 0.007649882
+v -0.021552476 -0.044721386 0.004511469
+v -0.022729380 -0.041190672 0.002157659
+v -0.020375571 -0.042367577 0.002549961
+v -0.020375571 -0.039621465 0.001373056
+v -0.025867794 -0.038836862 0.002157659
+v -0.029790810 -0.038836862 0.003726866
+v -0.032229831 -0.035423457 0.005572873
+v -0.025083190 -0.043152180 0.004511469
+v -0.033291144 -0.038800915 0.007668567
+v -0.029398508 -0.043936783 0.012357502
+v -0.029695139 -0.043388252 0.020643300
+v -0.020767872 -0.046682895 0.007257580
+v -0.023513984 -0.045898291 0.007649882
+v -0.030575413 -0.042759878 0.010003692
+v -0.031752318 -0.040798370 0.007257580
+v -0.020767872 -0.043544481 0.003334564
+v -0.024690889 -0.041975275 0.003334564
+v -0.027044698 -0.039229163 0.002549961
+v -0.022337079 -0.040406068 0.001765357
+v -0.021552476 -0.041582973 0.002157659
+v -0.030967715 -0.038836862 0.004511469
+v -0.029398508 -0.042367577 0.006865279
+v -0.024298587 -0.043544481 0.004511469
+v -0.020375571 -0.046682895 0.007257580
+v -0.023121682 -0.046682895 0.009611390
+v -0.020767872 -0.045898291 0.005688374
+v -0.029790810 -0.043152180 0.009611390
+f 10 7 2
+f 10 3 7
+f 10 9 3
+f 13 7 3
+f 13 3 1
+f 13 1 12
+f 16 1 3
+f 16 3 9
+f 17 10 2
+f 17 6 10
+f 19 9 10
+f 20 12 1
+f 20 2 12
+f 20 14 2
+f 21 12 2
+f 21 2 7
+f 21 13 12
+f 21 7 13
+f 22 15 5
+f 22 8 15
+f 23 5 15
+f 28 19 10
+f 28 4 19
+f 28 27 4
+f 32 14 20
+f 33 23 15
+f 33 1 16
+f 34 16 9
+f 34 31 23
+f 34 9 31
+f 34 33 16
+f 34 23 33
+f 36 2 14
+f 36 35 2
+f 36 24 35
+f 38 6 17
+f 40 26 10
+f 40 10 6
+f 40 18 26
+f 41 26 18
+f 41 10 26
+f 41 28 10
+f 41 27 28
+f 42 29 19
+f 42 19 4
+f 43 29 11
+f 43 11 30
+f 44 30 5
+f 44 19 29
+f 44 29 43
+f 44 31 9
+f 44 9 19
+f 45 30 11
+f 46 5 23
+f 46 23 31
+f 46 44 5
+f 46 31 44
+f 47 8 32
+f 47 33 15
+f 47 15 8
+f 48 20 1
+f 48 1 33
+f 48 33 47
+f 48 47 32
+f 48 32 20
+f 49 35 24
+f 50 49 24
+f 51 8 22
+f 52 22 5
+f 52 5 30
+f 52 51 22
+f 52 37 51
+f 53 38 25
+f 53 25 39
+f 53 6 38
+f 53 40 6
+f 54 39 25
+f 54 45 11
+f 54 25 45
+f 55 11 29
+f 55 29 42
+f 55 42 4
+f 55 54 11
+f 55 39 54
+f 56 41 18
+f 56 27 41
+f 56 4 27
+f 56 55 4
+f 56 39 55
+f 57 18 40
+f 57 56 18
+f 57 39 56
+f 57 53 39
+f 57 40 53
+f 58 44 43
+f 58 43 30
+f 58 30 44
+f 59 30 45
+f 59 52 30
+f 59 37 52
+f 59 50 37
+f 59 45 50
+f 60 45 25
+f 60 25 38
+f 60 50 45
+f 61 49 17
+f 61 35 49
+f 61 17 2
+f 61 2 35
+f 62 36 14
+f 62 14 32
+f 62 50 24
+f 62 24 36
+f 63 38 17
+f 63 17 49
+f 63 49 50
+f 63 60 38
+f 63 50 60
+f 64 51 37
+f 64 8 51
+f 64 37 50
+f 64 50 62
+f 64 62 32
+f 64 32 8
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl030.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl030.obj
new file mode 100644
index 0000000000000000000000000000000000000000..cc19d2f6ff14c518a209d06b6a497337c6c7fd19
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl030.obj
@@ -0,0 +1,170 @@
+v -0.024293696 -0.019220822 0.003481169
+v -0.016852122 -0.036107682 0.003452456
+v -0.016844856 -0.031383131 0.000980754
+v -0.036459938 -0.023144796 -0.000196151
+v -0.032005723 -0.038657385 0.005827687
+v -0.024298717 -0.019225305 0.001492779
+v -0.016843842 -0.029419161 0.003258796
+v -0.036852239 -0.024714003 0.003726866
+v -0.021160174 -0.036483052 -0.000196151
+v -0.027829575 -0.019221886 0.001379982
+v -0.016844856 -0.033736941 0.003726866
+v -0.020371407 -0.023134689 0.003415749
+v -0.032144620 -0.034913845 0.005688374
+v -0.036852239 -0.029029321 0.001765357
+v -0.016847094 -0.036096243 0.001054492
+v -0.021160174 -0.023144796 0.000980754
+v -0.027437000 -0.019614082 0.000980754
+v -0.027823012 -0.019220548 0.003410915
+v -0.027044698 -0.038444560 0.005688374
+v -0.036719443 -0.028918402 0.004327904
+v -0.036852239 -0.023537098 0.000196151
+v -0.031360016 -0.037659957 0.003334564
+v -0.027044698 -0.033344639 -0.000196151
+v -0.020375571 -0.035306147 -0.000196151
+v -0.020376110 -0.023144918 0.001397990
+v -0.027829302 -0.019614082 0.000980754
+v -0.034890731 -0.023144796 -0.000196151
+v -0.036852239 -0.027067813 0.004119167
+v -0.036594895 -0.023004796 0.000389513
+v -0.020390563 -0.038865998 0.003762755
+v -0.030575413 -0.035698449 0.005688374
+v -0.032144620 -0.038836862 0.005296072
+v -0.036459938 -0.023537098 -0.000196151
+v -0.031752318 -0.036483052 0.002942262
+v -0.026260095 -0.038836862 0.002157659
+v -0.020375571 -0.037267655 0.000196151
+v -0.016846388 -0.029421967 0.001443853
+v -0.036850373 -0.023140264 0.003195057
+v -0.027180460 -0.039023763 0.005504355
+v -0.031752318 -0.034913845 0.005688374
+v -0.027437000 -0.038052259 0.005688374
+v -0.032144620 -0.037659957 0.003726866
+v -0.036852239 -0.027852416 0.001373056
+v -0.024298587 -0.034913845 -0.000196151
+v -0.029790810 -0.038052259 0.002942262
+v -0.020350223 -0.038783271 0.001549960
+v -0.022729380 -0.038836862 0.001373056
+v -0.030967715 -0.038836862 0.004119167
+v -0.032144620 -0.038444560 0.004511469
+v -0.036067636 -0.024321701 -0.000196151
+v -0.024690889 -0.038836862 0.001765357
+v -0.020375571 -0.038444560 0.000980754
+v -0.020767872 -0.037267655 0.000196151
+v -0.028613905 -0.038836862 0.002942262
+v -0.030183112 -0.038444560 0.003334564
+v -0.025475492 -0.038444560 0.001765357
+v -0.020767872 -0.038444560 0.000980754
+v -0.030967715 -0.038444560 0.003726866
+f 10 1 6
+f 11 3 7
+f 12 6 1
+f 12 11 7
+f 15 11 2
+f 15 3 11
+f 17 10 6
+f 17 6 16
+f 18 1 10
+f 18 13 1
+f 19 2 11
+f 19 11 12
+f 19 12 1
+f 20 5 13
+f 21 14 8
+f 23 4 9
+f 24 3 15
+f 24 16 3
+f 24 9 4
+f 25 16 6
+f 25 6 12
+f 26 17 4
+f 26 4 10
+f 26 10 17
+f 27 17 16
+f 27 4 17
+f 27 24 4
+f 27 16 24
+f 28 18 8
+f 28 13 18
+f 28 20 13
+f 28 8 14
+f 28 14 20
+f 29 18 10
+f 29 10 4
+f 29 4 21
+f 30 15 2
+f 30 2 19
+f 32 20 14
+f 32 5 20
+f 33 21 4
+f 33 4 23
+f 34 23 22
+f 34 22 14
+f 34 14 23
+f 36 24 15
+f 36 9 24
+f 37 25 12
+f 37 12 7
+f 37 7 3
+f 37 3 16
+f 37 16 25
+f 38 8 18
+f 38 18 29
+f 38 29 21
+f 38 21 8
+f 39 30 19
+f 39 19 5
+f 39 5 32
+f 40 13 5
+f 40 5 31
+f 40 31 1
+f 40 1 13
+f 41 31 5
+f 41 5 19
+f 41 19 1
+f 41 1 31
+f 42 14 22
+f 43 14 21
+f 43 21 33
+f 44 23 9
+f 45 22 23
+f 45 23 35
+f 46 15 30
+f 47 46 30
+f 47 30 39
+f 48 39 32
+f 49 32 14
+f 49 14 42
+f 49 48 32
+f 50 43 33
+f 50 33 23
+f 50 23 14
+f 50 14 43
+f 51 9 47
+f 51 47 39
+f 51 39 35
+f 52 36 15
+f 52 15 46
+f 52 46 47
+f 53 47 9
+f 53 9 36
+f 53 36 52
+f 54 35 39
+f 54 39 48
+f 54 45 35
+f 55 42 22
+f 55 54 48
+f 55 22 45
+f 55 45 54
+f 56 51 35
+f 56 44 9
+f 56 9 51
+f 56 35 23
+f 56 23 44
+f 57 53 52
+f 57 52 47
+f 57 47 53
+f 58 55 48
+f 58 42 55
+f 58 49 42
+f 58 48 49
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl031.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl031.obj
new file mode 100644
index 0000000000000000000000000000000000000000..78050e97cc5a63504b21d3373317c1461e5d7169
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl031.obj
@@ -0,0 +1,110 @@
+v -0.012921840 0.033738941 0.003726866
+v -0.014490740 -0.010198782 0.003711491
+v -0.012922682 0.036095412 0.001040887
+v -0.032144620 0.027462115 0.000980754
+v -0.034890731 0.000001000 0.003726866
+v -0.020376110 -0.023144918 0.001397990
+v -0.016843959 0.036089164 0.003220638
+v -0.012922765 0.006671777 0.002201824
+v -0.014491046 -0.006275826 0.004119167
+v -0.034110932 -0.023147449 0.001043859
+v -0.012921186 0.036090135 0.003251483
+v -0.016844930 0.036092984 0.000986023
+v -0.032146028 0.027462664 0.003291071
+v -0.016847214 -0.021576120 0.001482002
+v -0.031360016 -0.021575590 0.003726866
+v -0.013314141 0.018046876 0.004119167
+v -0.012916834 0.006672286 0.003814715
+v -0.034903914 0.010994687 0.001326893
+v -0.021552476 0.031777433 0.003726866
+v -0.021945536 0.033347973 0.001016509
+v -0.012921840 0.007454731 0.001765357
+v -0.034890731 -0.011375747 0.003726866
+v -0.016842005 -0.021568670 0.003513851
+v -0.034104752 -0.023141457 0.003231759
+v -0.018021761 0.006670128 0.004119167
+v -0.014491050 -0.010198954 0.001769161
+v -0.012921840 0.018046876 0.004119167
+v -0.034899757 -0.012163865 0.001209303
+v -0.034886811 0.010983825 0.003193820
+v -0.013706443 0.033738941 0.003726866
+v -0.021944245 0.033344509 0.003266905
+v -0.021944777 0.031385131 0.003726866
+v -0.014883348 -0.006275826 0.004119167
+v -0.020371407 -0.023134689 0.003415749
+v -0.031752318 -0.020790987 0.003726866
+v -0.012921840 0.018046876 0.001373056
+v -0.034890491 -0.012160247 0.003319981
+v -0.018021761 0.005100922 0.004119167
+f 10 3 6
+f 11 3 7
+f 12 7 3
+f 12 10 4
+f 12 3 10
+f 14 6 3
+f 17 9 2
+f 17 11 1
+f 17 3 11
+f 18 13 4
+f 20 12 4
+f 20 4 13
+f 21 17 8
+f 22 5 18
+f 23 2 9
+f 23 9 15
+f 23 6 14
+f 24 10 6
+f 25 9 16
+f 25 5 22
+f 26 17 2
+f 26 8 17
+f 26 21 8
+f 26 23 14
+f 26 2 23
+f 27 16 9
+f 27 9 17
+f 27 17 1
+f 27 1 16
+f 28 18 4
+f 28 4 10
+f 28 22 18
+f 28 10 24
+f 29 18 5
+f 29 5 13
+f 29 13 18
+f 30 7 19
+f 30 11 7
+f 30 1 11
+f 30 19 16
+f 30 16 1
+f 31 19 7
+f 31 13 19
+f 31 20 13
+f 31 7 12
+f 31 12 20
+f 32 19 13
+f 32 13 5
+f 32 16 19
+f 32 25 16
+f 32 5 25
+f 33 15 9
+f 33 9 25
+f 34 23 15
+f 34 15 24
+f 34 24 6
+f 34 6 23
+f 35 24 15
+f 35 22 24
+f 35 33 22
+f 35 15 33
+f 36 26 14
+f 36 14 3
+f 36 21 26
+f 36 3 17
+f 36 17 21
+f 37 28 24
+f 37 24 22
+f 37 22 28
+f 38 33 25
+f 38 25 22
+f 38 22 33
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl032.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl032.obj
new file mode 100644
index 0000000000000000000000000000000000000000..7f2cd12f9d55a1c45053cadb8d634ea3355e2d80
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl032.obj
@@ -0,0 +1,188 @@
+v 0.003493529 0.049867381 0.010092334
+v 0.011008560 0.041584974 0.004511469
+v 0.005908639 0.045507990 0.002549961
+v -0.016844856 0.041584974 0.001373056
+v -0.014491046 0.046292593 0.009611390
+v 0.010223957 0.041584974 0.001373056
+v -0.008998823 0.048646403 0.005296072
+v 0.003579843 0.047778859 0.009924955
+v -0.016871812 0.041274555 0.003942214
+v -0.008998823 0.041977275 0.000980754
+v 0.008262449 0.049038705 0.008042184
+v -0.013706443 0.049431007 0.010003692
+v -0.012921840 0.046684895 0.010003692
+v 0.010194722 0.041062817 0.001718402
+v 0.001985623 0.041977275 0.000980754
+v -0.009391125 0.046292593 0.002942262
+v 0.010993561 0.045463429 0.004559573
+v 0.008373556 0.046636570 0.008733905
+v 0.002770226 0.049823308 0.007257580
+v -0.016452554 0.045507990 0.003726866
+v -0.014527073 0.046828216 0.009966328
+v -0.014457550 0.048858472 0.010050287
+v 0.010998363 0.041169764 0.001842867
+v 0.011023672 0.041243360 0.003954900
+v -0.010932293 0.041060821 0.001343582
+v -0.000760489 0.043154180 0.001373056
+v 0.006300941 0.043546482 0.001765357
+v 0.001593321 0.047861800 0.004119167
+v -0.011352633 0.044723387 0.002157659
+v 0.011008560 0.043546482 0.002157659
+v 0.007477846 0.047077197 0.004119167
+v 0.008208959 0.048734845 0.008904135
+v 0.003554829 0.050215610 0.009611390
+v -0.007429617 0.050607911 0.009611390
+v -0.014098744 0.048646403 0.006865279
+v -0.016929923 0.045000523 0.006739570
+v -0.012921840 0.047077197 0.004119167
+v -0.003513114 0.050239403 0.010097437
+v -0.016844856 0.041192672 0.001373056
+v -0.010960331 0.041584974 0.000980754
+v 0.001593321 0.043154180 0.001373056
+v 0.004731734 0.041584974 0.000980754
+v -0.007037315 0.048646403 0.004903770
+v 0.003947131 0.046292593 0.002942262
+v -0.016844856 0.042761879 0.001765357
+v -0.009783426 0.043938784 0.001765357
+v 0.005516337 0.044723387 0.002157659
+v 0.008654750 0.041977275 0.001373056
+v 0.011008560 0.042369577 0.001765357
+v 0.010616259 0.045507990 0.003726866
+v 0.004339432 0.048646403 0.005688374
+v 0.010223957 0.044331085 0.002549961
+v 0.002770226 0.050215610 0.008826787
+v -0.007821918 0.050215610 0.008434485
+v -0.013706443 0.049431007 0.009611390
+v -0.014491046 0.049038705 0.008826787
+v -0.016744662 0.045181572 0.007013342
+v -0.012921840 0.046292593 0.003334564
+v -0.014098744 0.048254102 0.006080675
+v -0.007429617 0.050607911 0.010003692
+v -0.016897762 0.041745680 0.004164175
+v 0.004339432 0.042761879 0.001373056
+v 0.004712709 0.041064980 0.001337880
+v -0.006252712 0.047469498 0.003726866
+f 9 2 5
+f 13 5 2
+f 13 8 1
+f 18 2 17
+f 18 13 2
+f 18 8 13
+f 18 1 8
+f 21 5 13
+f 22 21 13
+f 23 14 6
+f 24 2 9
+f 24 9 14
+f 24 14 23
+f 24 17 2
+f 25 14 9
+f 26 15 10
+f 26 16 3
+f 30 17 24
+f 32 18 17
+f 32 17 11
+f 32 1 18
+f 33 32 11
+f 33 1 32
+f 36 9 5
+f 36 5 21
+f 38 22 13
+f 38 13 1
+f 39 25 9
+f 40 10 15
+f 40 25 39
+f 40 39 4
+f 41 26 3
+f 41 15 26
+f 42 15 41
+f 42 40 15
+f 42 6 14
+f 43 19 28
+f 43 37 7
+f 44 3 16
+f 44 31 3
+f 44 28 31
+f 45 36 20
+f 45 4 36
+f 45 40 4
+f 45 10 40
+f 46 26 10
+f 46 45 29
+f 46 10 45
+f 46 29 16
+f 46 16 26
+f 47 30 27
+f 47 3 30
+f 47 41 3
+f 47 27 41
+f 48 30 6
+f 48 27 30
+f 48 42 27
+f 48 6 42
+f 49 23 6
+f 49 6 30
+f 49 30 24
+f 49 24 23
+f 50 31 11
+f 50 11 17
+f 50 17 30
+f 51 31 28
+f 51 28 19
+f 51 19 11
+f 51 11 31
+f 52 30 3
+f 52 3 31
+f 52 50 30
+f 52 31 50
+f 53 33 11
+f 53 11 19
+f 53 34 33
+f 53 19 34
+f 54 35 34
+f 54 7 35
+f 54 43 7
+f 54 34 19
+f 54 19 43
+f 55 12 34
+f 55 34 35
+f 56 35 20
+f 56 55 35
+f 56 22 12
+f 56 12 55
+f 56 36 22
+f 56 20 36
+f 57 36 21
+f 57 21 22
+f 57 22 36
+f 58 16 29
+f 58 29 45
+f 58 45 20
+f 58 20 37
+f 58 43 16
+f 58 37 43
+f 59 35 7
+f 59 7 37
+f 59 37 20
+f 59 20 35
+f 60 12 22
+f 60 22 38
+f 60 34 12
+f 60 33 34
+f 60 38 1
+f 60 1 33
+f 61 36 4
+f 61 4 39
+f 61 39 9
+f 61 9 36
+f 62 42 41
+f 62 41 27
+f 62 27 42
+f 63 42 14
+f 63 14 25
+f 63 25 40
+f 63 40 42
+f 64 43 28
+f 64 16 43
+f 64 44 16
+f 64 28 44
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl033.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl033.obj
new file mode 100644
index 0000000000000000000000000000000000000000..9b607aacefb1661476519c332d88d26468a2221d
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl033.obj
@@ -0,0 +1,188 @@
+v -0.035283033 0.047469498 0.048841554
+v -0.032929223 0.045507990 0.037072505
+v -0.032536921 0.047861800 0.031580282
+v -0.041167557 0.038838862 0.024518852
+v -0.047444384 0.035308148 0.046095442
+v -0.035283033 0.040015767 0.024518852
+v -0.043521367 0.044331085 0.046880046
+v -0.032853159 0.048503872 0.037111392
+v -0.034890731 0.047469498 0.048056951
+v -0.032536921 0.044723387 0.033149488
+v -0.036067636 0.043154180 0.024518852
+v -0.042344462 0.032954338 0.024518852
+v -0.047444384 0.040015767 0.048841554
+v -0.039206049 0.047861800 0.048056951
+v -0.046659780 0.035308148 0.044133934
+v -0.035010433 0.048065122 0.047999804
+v -0.032120771 0.047817112 0.033161681
+v -0.042894540 0.033099381 0.024465266
+v -0.031980634 0.047162217 0.030487213
+v -0.040382954 0.041192672 0.028834170
+v -0.035283033 0.040015767 0.025303456
+v -0.032536921 0.044723387 0.033541790
+v -0.046267479 0.037661957 0.048841554
+v -0.047836685 0.039623466 0.046095442
+v -0.038421446 0.047861800 0.048841554
+v -0.044305971 0.043546482 0.048449252
+v -0.033321525 0.048646403 0.036287901
+v -0.037636843 0.047861800 0.043349331
+v -0.043913669 0.033346639 0.030403377
+v -0.032536921 0.047469498 0.030403377
+v -0.035202685 0.043041523 0.024554102
+v -0.032144620 0.047861800 0.031580282
+v -0.032929223 0.043938784 0.031972583
+v -0.039598351 0.040408069 0.024518852
+v -0.032144620 0.047077197 0.033149488
+v -0.049013590 0.036092751 0.046487744
+v -0.046267479 0.037269656 0.048056951
+v -0.048228987 0.035700449 0.038249410
+v -0.046659780 0.041192672 0.046880046
+v -0.035363961 0.047999263 0.048505999
+v -0.042736764 0.045115689 0.048056951
+v -0.045482875 0.041977275 0.048841554
+v -0.039206049 0.047861800 0.047272347
+v -0.036459938 0.043154180 0.025303456
+v -0.034106128 0.048254102 0.036287901
+v -0.042344462 0.032954338 0.024911154
+v -0.044430848 0.033461440 0.029968662
+v -0.035675334 0.043546482 0.024911154
+v -0.048379411 0.035136946 0.046561096
+v -0.049013590 0.038054259 0.046487744
+v -0.042736764 0.037269656 0.024911154
+v -0.042736764 0.041584974 0.036680203
+v -0.046267479 0.039623466 0.041387823
+v -0.047836685 0.040015767 0.048449252
+v -0.041559859 0.045900292 0.048449252
+v -0.040775256 0.046684895 0.047664649
+v -0.043129066 0.043938784 0.044526236
+v -0.036852239 0.046292593 0.036287901
+v -0.038813748 0.041192672 0.024911154
+v -0.033321525 0.047861800 0.033149488
+v -0.032929223 0.045115689 0.035895600
+v -0.047976484 0.035120492 0.038328174
+v -0.049267262 0.036281276 0.045231710
+v -0.049013590 0.038054259 0.046095442
+f 9 1 5
+f 15 9 5
+f 15 2 9
+f 16 9 8
+f 16 1 9
+f 17 8 9
+f 18 11 6
+f 18 6 12
+f 21 12 6
+f 22 10 19
+f 23 1 13
+f 25 13 1
+f 25 16 14
+f 27 16 8
+f 27 14 16
+f 29 15 5
+f 31 19 6
+f 31 6 11
+f 31 30 19
+f 32 17 19
+f 32 3 27
+f 32 27 8
+f 32 8 17
+f 32 30 3
+f 32 19 30
+f 33 19 10
+f 33 6 19
+f 33 21 6
+f 33 10 22
+f 34 4 20
+f 34 18 4
+f 34 11 18
+f 35 2 22
+f 35 17 9
+f 35 9 2
+f 35 22 19
+f 35 19 17
+f 36 23 13
+f 37 23 5
+f 37 5 1
+f 37 1 23
+f 39 26 7
+f 40 25 1
+f 40 1 16
+f 40 16 25
+f 41 7 26
+f 42 26 13
+f 42 13 25
+f 43 14 27
+f 44 30 11
+f 45 43 27
+f 45 28 43
+f 46 15 29
+f 46 33 22
+f 46 12 21
+f 46 21 33
+f 47 18 12
+f 47 46 29
+f 47 12 46
+f 48 31 11
+f 48 11 30
+f 48 30 31
+f 49 5 23
+f 49 23 36
+f 49 47 29
+f 49 29 5
+f 51 4 18
+f 51 18 38
+f 52 20 4
+f 52 4 39
+f 53 39 4
+f 53 24 39
+f 53 51 24
+f 53 4 51
+f 54 13 26
+f 54 26 39
+f 54 39 24
+f 54 50 36
+f 54 36 13
+f 55 41 26
+f 55 25 14
+f 55 14 41
+f 55 42 25
+f 55 26 42
+f 56 41 14
+f 56 14 43
+f 56 43 7
+f 56 7 41
+f 57 7 43
+f 57 20 52
+f 57 52 39
+f 57 39 7
+f 58 43 28
+f 58 28 45
+f 58 57 43
+f 58 20 57
+f 59 34 20
+f 59 58 44
+f 59 20 58
+f 59 44 11
+f 59 11 34
+f 60 58 45
+f 60 44 58
+f 60 3 30
+f 60 30 44
+f 60 45 27
+f 60 27 3
+f 61 46 22
+f 61 22 2
+f 61 2 15
+f 61 15 46
+f 62 38 18
+f 62 18 47
+f 62 47 49
+f 63 36 50
+f 63 38 62
+f 63 62 49
+f 63 49 36
+f 64 63 50
+f 64 38 63
+f 64 51 38
+f 64 24 51
+f 64 54 24
+f 64 50 54
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl035.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl035.obj
new file mode 100644
index 0000000000000000000000000000000000000000..6e240ea908cea625bc684769c80174d67d429f50
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl035.obj
@@ -0,0 +1,188 @@
+v -0.046667264 0.039624373 0.048642402
+v -0.044698272 0.037269656 0.043349331
+v -0.042540850 0.037876279 0.028509300
+v -0.045482875 0.031385131 0.018634328
+v -0.059998036 0.017262272 0.048841554
+v -0.042736764 0.030208226 0.019026629
+v -0.048228987 0.039623466 0.047664649
+v -0.042736764 0.038054259 0.031187980
+v -0.057644226 0.017654574 0.048841554
+v -0.042234769 0.031676591 0.018671584
+v -0.051587627 0.020324161 0.020644127
+v -0.044698272 0.039231164 0.035895600
+v -0.054113511 0.030992830 0.048841554
+v -0.044277545 0.039203613 0.037082388
+v -0.046659780 0.037269656 0.048841554
+v -0.042736764 0.034131243 0.029226472
+v -0.043129066 0.036485052 0.024518852
+v -0.047236620 0.024045356 0.018579062
+v -0.053328908 0.017262272 0.031187980
+v -0.046893282 0.039849786 0.046406555
+v -0.045875177 0.036485052 0.031972583
+v -0.048228987 0.039231164 0.048841554
+v -0.053721210 0.031385131 0.045310839
+v -0.044908681 0.037855714 0.043277862
+v -0.057644226 0.017262272 0.048449252
+v -0.042344462 0.031385131 0.020595836
+v -0.042762211 0.035723842 0.031179338
+v -0.045090574 0.031777433 0.018634328
+v -0.042579592 0.036342269 0.024572949
+v -0.043129066 0.038054259 0.028441869
+v -0.045482875 0.025892908 0.018634328
+v -0.049798193 0.024323702 0.018634328
+v -0.056075020 0.017262272 0.031187980
+v -0.049013590 0.020792987 0.020595836
+v -0.047836685 0.039623466 0.046487744
+v -0.047836685 0.039623466 0.048841554
+v -0.045090574 0.038838862 0.035895600
+v -0.045090574 0.033346639 0.021772741
+v -0.054113511 0.031777433 0.048056951
+v -0.050190495 0.036485052 0.046095442
+v -0.044698272 0.036877354 0.042564728
+v -0.053328908 0.017262272 0.031580282
+v -0.042344462 0.030992830 0.019418931
+v -0.042736764 0.034523544 0.030011075
+v -0.042602684 0.031230213 0.018546626
+v -0.042490755 0.037046167 0.026172732
+v -0.042736764 0.030208226 0.018634328
+v -0.049501449 0.023799432 0.018717607
+v -0.049013590 0.027462115 0.020988138
+v -0.052152003 0.020792987 0.020988138
+v -0.058428829 0.023146797 0.047272347
+v -0.049594133 0.020480577 0.020544589
+v -0.046267479 0.024716003 0.018634328
+v -0.045482875 0.025892908 0.019026629
+v -0.048228987 0.039623466 0.048056951
+v -0.048228987 0.039231164 0.046487744
+v -0.043129066 0.037269656 0.026088059
+v -0.051759702 0.034523544 0.048841554
+v -0.052936607 0.032954338 0.046487744
+v -0.055290416 0.017262272 0.039426315
+v -0.042736764 0.033346639 0.027264964
+v -0.049405892 0.020400685 0.021772741
+v -0.048228987 0.027462115 0.018634328
+v -0.049013590 0.027854416 0.021772741
+f 13 5 9
+f 14 8 3
+f 14 3 12
+f 15 2 1
+f 15 13 9
+f 20 14 12
+f 20 1 14
+f 22 13 15
+f 24 14 1
+f 24 1 2
+f 24 2 8
+f 24 8 14
+f 25 15 9
+f 25 2 15
+f 25 9 5
+f 25 5 19
+f 26 10 3
+f 27 8 2
+f 27 3 8
+f 28 17 10
+f 29 10 17
+f 30 12 3
+f 33 19 5
+f 33 11 19
+f 35 20 12
+f 35 7 20
+f 36 1 20
+f 36 22 15
+f 36 15 1
+f 37 35 12
+f 38 28 4
+f 41 25 16
+f 41 16 2
+f 41 2 25
+f 42 25 19
+f 42 6 26
+f 43 26 6
+f 43 10 26
+f 44 27 2
+f 44 2 16
+f 44 16 26
+f 44 26 3
+f 44 3 27
+f 45 4 28
+f 45 18 32
+f 45 28 10
+f 46 3 10
+f 46 10 29
+f 47 6 31
+f 47 43 6
+f 47 10 43
+f 47 45 10
+f 47 31 45
+f 48 32 18
+f 48 11 32
+f 50 32 11
+f 50 11 33
+f 50 49 32
+f 51 33 5
+f 51 50 33
+f 51 23 49
+f 51 49 50
+f 51 39 23
+f 51 5 13
+f 51 13 39
+f 52 19 11
+f 52 18 34
+f 52 48 18
+f 52 11 48
+f 53 34 18
+f 53 45 31
+f 53 18 45
+f 54 31 6
+f 54 53 31
+f 54 34 53
+f 55 36 20
+f 55 20 7
+f 55 22 36
+f 55 39 22
+f 55 7 39
+f 56 37 21
+f 56 7 35
+f 56 35 37
+f 56 21 40
+f 57 21 37
+f 57 38 21
+f 57 17 28
+f 57 28 38
+f 57 30 3
+f 57 3 46
+f 57 46 29
+f 57 29 17
+f 57 37 12
+f 57 12 30
+f 58 39 13
+f 58 13 22
+f 58 22 39
+f 59 38 23
+f 59 23 39
+f 59 39 7
+f 59 56 40
+f 59 7 56
+f 59 40 21
+f 59 21 38
+f 60 42 16
+f 60 16 25
+f 60 25 42
+f 61 42 26
+f 61 26 16
+f 61 16 42
+f 62 6 42
+f 62 54 6
+f 62 34 54
+f 62 42 19
+f 62 52 34
+f 62 19 52
+f 63 45 32
+f 63 4 45
+f 63 32 49
+f 64 38 4
+f 64 63 49
+f 64 4 63
+f 64 49 23
+f 64 23 38
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl038.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl038.obj
new file mode 100644
index 0000000000000000000000000000000000000000..86a11ad30fbdb9d1dbf15bb5f6dfe4c19e32487c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl038.obj
@@ -0,0 +1,188 @@
+v 0.036508167 0.045507990 0.048841554
+v 0.036704536 0.042205382 0.047981634
+v 0.036508167 0.041977275 0.037072505
+v 0.029054436 0.040800371 0.013926708
+v 0.023562213 0.048646403 0.040210918
+v 0.036115865 0.038446561 0.037464806
+v 0.032192849 0.049038705 0.047272347
+v 0.036508167 0.045507990 0.047272347
+v 0.036702266 0.038609888 0.037402340
+v 0.025523721 0.040800371 0.017849724
+v 0.035723563 0.042369577 0.048449252
+v 0.023954514 0.051000213 0.037464806
+v 0.035331262 0.046684895 0.048449252
+v 0.032977452 0.040800371 0.023341947
+v 0.036115865 0.041977275 0.048056951
+v 0.028873365 0.040164542 0.014004224
+v 0.023562213 0.044331085 0.025303456
+v 0.035723563 0.040015767 0.041387823
+v 0.027092927 0.049431007 0.048841554
+v 0.036115865 0.042369577 0.048841554
+v 0.025523721 0.045507990 0.020595836
+v 0.035331262 0.046684895 0.048056951
+v 0.036508167 0.044331085 0.043741633
+v 0.031800547 0.042761879 0.026480361
+v 0.036632576 0.038551246 0.037032466
+v 0.031408245 0.040800371 0.019418931
+v 0.024777522 0.042039812 0.017439498
+v 0.026308324 0.040408069 0.018634328
+v 0.023562213 0.048254102 0.039034013
+v 0.023380309 0.051061472 0.039124612
+v 0.027485229 0.048646403 0.048449252
+v 0.029839039 0.050215610 0.048841554
+v 0.036596171 0.042471805 0.048505999
+v 0.031408245 0.049038705 0.045310839
+v 0.034154357 0.047469498 0.047272347
+v 0.031800547 0.049431007 0.048056951
+v 0.034546659 0.042761879 0.033541790
+v 0.032775428 0.040160643 0.023420767
+v 0.036115865 0.038446561 0.037072505
+v 0.032192849 0.041192672 0.022557344
+v 0.029839039 0.041977275 0.019418931
+v 0.024739118 0.041584974 0.017457423
+v 0.028435839 0.040496020 0.014023585
+v 0.024995317 0.044540339 0.018692593
+v 0.025523721 0.040800371 0.017457423
+v 0.023551703 0.051373388 0.040608454
+v 0.027092927 0.048646403 0.047664649
+v 0.028269832 0.048254102 0.048841554
+v 0.032192849 0.048646403 0.048841554
+v 0.025131419 0.049823308 0.035503298
+v 0.031800547 0.048254102 0.043741633
+v 0.035723563 0.046292593 0.047664649
+v 0.031800547 0.049431007 0.047664649
+v 0.035331262 0.045507990 0.044133934
+v 0.031226310 0.040163378 0.019496818
+v 0.031015944 0.041977275 0.022165042
+v 0.026308324 0.044723387 0.019811233
+v 0.024739118 0.041584974 0.018242026
+v 0.023562213 0.046684895 0.021772741
+v 0.028612176 0.040340879 0.013948095
+v 0.027137495 0.050277594 0.048578117
+v 0.024346816 0.051392515 0.039818616
+v 0.023562213 0.051000213 0.037464806
+v 0.034938960 0.043154180 0.048841554
+f 8 1 2
+f 13 1 8
+f 15 9 2
+f 15 6 9
+f 18 15 11
+f 18 6 15
+f 20 1 19
+f 20 15 2
+f 20 11 15
+f 22 13 8
+f 23 3 14
+f 23 9 3
+f 23 8 2
+f 23 2 9
+f 25 14 3
+f 25 3 9
+f 25 9 6
+f 26 16 4
+f 28 17 10
+f 28 6 17
+f 29 17 6
+f 29 6 18
+f 29 18 11
+f 29 11 5
+f 30 29 5
+f 30 17 29
+f 32 19 1
+f 33 20 2
+f 33 2 1
+f 33 1 20
+f 35 7 22
+f 36 13 22
+f 37 23 14
+f 37 8 23
+f 38 14 25
+f 39 25 6
+f 39 16 25
+f 39 28 16
+f 39 6 28
+f 40 26 24
+f 40 14 26
+f 40 24 35
+f 41 26 4
+f 43 42 27
+f 44 43 27
+f 44 4 43
+f 45 28 10
+f 45 16 28
+f 45 10 42
+f 46 30 5
+f 46 5 19
+f 47 31 19
+f 47 19 5
+f 47 5 11
+f 47 11 31
+f 48 20 19
+f 48 19 31
+f 49 32 1
+f 49 1 13
+f 49 36 32
+f 49 13 36
+f 50 34 21
+f 50 21 12
+f 50 12 34
+f 51 34 7
+f 51 35 24
+f 51 7 35
+f 52 35 22
+f 52 22 8
+f 53 36 22
+f 53 22 7
+f 53 7 34
+f 53 32 36
+f 54 37 14
+f 54 35 52
+f 54 52 8
+f 54 8 37
+f 54 40 35
+f 54 14 40
+f 55 38 25
+f 55 25 16
+f 55 16 26
+f 55 26 14
+f 55 14 38
+f 56 24 26
+f 56 26 41
+f 56 51 24
+f 57 41 4
+f 57 4 21
+f 57 21 34
+f 57 34 51
+f 57 56 41
+f 57 51 56
+f 58 42 10
+f 58 10 17
+f 58 17 42
+f 59 42 17
+f 59 27 42
+f 59 44 27
+f 59 17 30
+f 59 12 21
+f 59 21 4
+f 59 4 44
+f 60 43 4
+f 60 4 16
+f 60 16 45
+f 60 45 42
+f 60 42 43
+f 61 46 19
+f 61 19 32
+f 61 32 46
+f 62 46 32
+f 62 32 53
+f 62 53 34
+f 62 34 12
+f 63 30 46
+f 63 59 30
+f 63 12 59
+f 63 62 12
+f 63 46 62
+f 64 48 31
+f 64 31 11
+f 64 11 20
+f 64 20 48
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl040.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl040.obj
new file mode 100644
index 0000000000000000000000000000000000000000..a4443af6f7bb49f0d170f03e659a9db3bc9e7fe7
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl040.obj
@@ -0,0 +1,188 @@
+v 0.041215786 0.023539098 0.007257580
+v 0.044217665 0.015644376 0.007346611
+v 0.039254278 0.018046876 0.002157659
+v 0.030234845 0.023540448 0.000225847
+v 0.027475710 0.015694031 0.003092795
+v 0.038077373 0.023539098 0.003334564
+v 0.042392691 0.017654574 0.004511469
+v 0.037685072 0.023146797 0.007257580
+v 0.038469675 0.012162351 0.001373056
+v 0.031408245 0.023146797 0.000196151
+v 0.044354199 0.016477669 0.006865279
+v 0.040823485 0.021969892 0.004903770
+v 0.042000390 0.013339256 0.007257580
+v 0.037685072 0.023539098 0.007257580
+v 0.042000390 0.012554653 0.002942262
+v 0.027495248 0.015698652 0.001216226
+v 0.036508167 0.022754495 0.002157659
+v 0.031015944 0.023539098 0.000196151
+v 0.034546659 0.016085367 -0.000196151
+v 0.044354199 0.016477669 0.007257580
+v 0.039254278 0.023539098 0.004119167
+v 0.042784993 0.018831479 0.005688374
+v 0.038077373 0.012162351 0.004119167
+v 0.027476695 0.021970757 0.003117814
+v 0.042392691 0.015300764 0.003726866
+v 0.042392691 0.012162351 0.003334564
+v 0.038077373 0.022754495 0.002942262
+v 0.033762055 0.017654574 -0.000196151
+v 0.035723563 0.023539098 0.002157659
+v 0.033369754 0.015693066 -0.000196151
+v 0.040431183 0.014123859 0.002157659
+v 0.042392691 0.021185289 0.007257580
+v 0.041215786 0.023539098 0.006865279
+v 0.041608088 0.020400685 0.004903770
+v 0.042000390 0.016477669 0.003726866
+v 0.042000390 0.012162351 0.006865279
+v 0.037968452 0.012135290 0.001705353
+v 0.030240720 0.023538933 0.003093381
+v 0.027488928 0.021971954 0.001067681
+v 0.043177295 0.016085367 0.004903770
+v 0.039646580 0.012162351 0.001765357
+v 0.042392691 0.013339256 0.003334564
+v 0.038861977 0.018831479 0.002157659
+v 0.038469675 0.023146797 0.003334564
+v 0.032585150 0.019616082 -0.000196151
+v 0.034546659 0.015693066 -0.000196151
+v 0.040038881 0.015693066 0.002157659
+v 0.043569596 0.018439177 0.007257580
+v 0.040431183 0.023539098 0.005688374
+v 0.042392691 0.021185289 0.006865279
+v 0.042392691 0.018439177 0.004903770
+v 0.042000390 0.021185289 0.006080675
+v 0.041608088 0.018439177 0.004119167
+v 0.042604421 0.012215065 0.006744906
+v 0.029839039 0.021577590 0.000196151
+v 0.042784993 0.016477669 0.004511469
+v 0.042000390 0.012162351 0.002942262
+v 0.039646580 0.012554653 0.001765357
+v 0.043961898 0.016085367 0.006080675
+v 0.039254278 0.019223780 0.002549961
+v 0.031800547 0.020008384 -0.000196151
+v 0.040823485 0.016869971 0.002942262
+v 0.041215786 0.023146797 0.006472977
+v 0.043569596 0.018439177 0.006865279
+f 13 8 5
+f 13 2 8
+f 14 8 2
+f 14 2 1
+f 14 1 4
+f 18 17 10
+f 20 2 11
+f 21 6 4
+f 24 5 8
+f 24 8 14
+f 26 11 2
+f 29 17 18
+f 29 18 4
+f 29 4 6
+f 30 9 16
+f 30 28 19
+f 32 1 2
+f 33 1 32
+f 33 4 1
+f 34 21 12
+f 36 13 5
+f 36 5 23
+f 37 16 9
+f 37 36 23
+f 37 26 36
+f 37 23 5
+f 37 5 16
+f 38 24 14
+f 38 14 4
+f 38 4 24
+f 39 16 5
+f 39 5 24
+f 39 24 4
+f 41 37 9
+f 42 26 15
+f 42 25 40
+f 42 15 31
+f 43 17 27
+f 43 3 19
+f 43 19 28
+f 44 43 27
+f 44 6 21
+f 44 27 17
+f 44 29 6
+f 44 17 29
+f 45 10 17
+f 45 43 28
+f 45 17 43
+f 45 18 10
+f 45 28 30
+f 46 30 19
+f 46 9 30
+f 46 41 9
+f 46 19 41
+f 47 31 19
+f 47 19 3
+f 47 42 31
+f 48 20 11
+f 48 32 2
+f 48 2 20
+f 49 12 21
+f 49 21 4
+f 49 4 33
+f 50 33 32
+f 50 32 48
+f 51 34 22
+f 51 22 7
+f 51 7 34
+f 52 34 12
+f 52 33 50
+f 52 22 34
+f 53 34 7
+f 53 7 35
+f 53 21 34
+f 53 44 21
+f 53 35 44
+f 54 36 26
+f 54 26 2
+f 54 2 13
+f 54 13 36
+f 55 39 4
+f 55 16 39
+f 56 40 25
+f 56 7 22
+f 56 35 7
+f 56 25 35
+f 57 15 26
+f 57 26 37
+f 57 37 41
+f 57 41 31
+f 57 31 15
+f 58 41 19
+f 58 19 31
+f 58 31 41
+f 59 42 40
+f 59 11 26
+f 59 26 42
+f 59 40 56
+f 59 56 22
+f 59 22 11
+f 60 3 43
+f 60 43 44
+f 61 4 18
+f 61 18 45
+f 61 55 4
+f 61 45 30
+f 61 30 16
+f 61 16 55
+f 62 35 25
+f 62 25 42
+f 62 42 47
+f 62 47 3
+f 62 3 60
+f 62 60 44
+f 62 44 35
+f 63 49 33
+f 63 12 49
+f 63 52 12
+f 63 33 52
+f 64 50 48
+f 64 48 11
+f 64 52 50
+f 64 11 22
+f 64 22 52
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl041.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl041.obj
new file mode 100644
index 0000000000000000000000000000000000000000..2303fbf80b841cf7e181445107cc9185c355636a
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl041.obj
@@ -0,0 +1,176 @@
+v 0.046086392 -0.017581072 0.024580822
+v 0.046315708 -0.023144796 0.024518852
+v 0.045531104 -0.018044875 0.013142105
+v 0.040431183 -0.021967891 0.012357502
+v 0.034546659 -0.034913845 0.023341947
+v 0.042784993 -0.017652573 0.014319010
+v 0.046708009 -0.018437177 0.018634328
+v 0.036508167 -0.032952337 0.024911154
+v 0.039254278 -0.029421623 0.012357502
+v 0.045169311 -0.017532756 0.013231958
+v 0.045138803 -0.017652573 0.023341947
+v 0.033369754 -0.034913845 0.019811233
+v 0.046708009 -0.017652573 0.021772741
+v 0.046708009 -0.020398685 0.020988138
+v 0.040431183 -0.032952337 0.024518852
+v 0.036963194 -0.033392165 0.024891408
+v 0.040823485 -0.027067813 0.012357502
+v 0.042784993 -0.017652573 0.014711311
+v 0.045111921 -0.017643671 0.013148781
+v 0.045531104 -0.017652573 0.024518852
+v 0.036115865 -0.028637019 0.012749803
+v 0.046505956 -0.017585658 0.018684509
+v 0.046708009 -0.019614082 0.019811233
+v 0.046708009 -0.020398685 0.021772741
+v 0.037292770 -0.034521544 0.018634328
+v 0.045138803 -0.025498606 0.024518852
+v 0.035882923 -0.034642288 0.024468686
+v 0.040942833 -0.022040460 0.012321782
+v 0.043569596 -0.023537098 0.014711311
+v 0.040823485 -0.031383131 0.021380439
+v 0.036115865 -0.028637019 0.012357502
+v 0.045923406 -0.017652573 0.014711311
+v 0.045531104 -0.018437177 0.013534406
+v 0.033413153 -0.035349130 0.018619138
+v 0.035723563 -0.034129242 0.024518852
+v 0.038627787 -0.034328021 0.024591824
+v 0.045531104 -0.024714003 0.023734249
+v 0.040823485 -0.027460114 0.013142105
+v 0.038861977 -0.032952337 0.019026629
+v 0.036130274 -0.029432995 0.012353001
+v 0.045923406 -0.018044875 0.014711311
+v 0.034938960 -0.035306147 0.018634328
+v 0.033608988 -0.035543088 0.019727499
+v 0.038861977 -0.034521544 0.023341947
+v 0.046315708 -0.023144796 0.024126551
+v 0.045138803 -0.025498606 0.023734249
+v 0.040038881 -0.028637019 0.013142105
+v 0.038077373 -0.033736941 0.018634328
+v 0.039646580 -0.031775432 0.018634328
+v 0.045923406 -0.018437177 0.015103613
+v 0.034832683 -0.035522863 0.023251907
+v 0.040431183 -0.032952337 0.024126551
+v 0.037685072 -0.034521544 0.019418931
+v 0.042784993 -0.025498606 0.015495915
+v 0.043569596 -0.024714003 0.016672820
+v 0.040431183 -0.031775432 0.020988138
+v 0.040823485 -0.032167734 0.023341947
+v 0.038861977 -0.034129242 0.022165042
+v 0.042784993 -0.025106305 0.014711311
+v 0.040038881 -0.032560036 0.021772741
+f 13 1 2
+f 14 7 13
+f 16 2 1
+f 16 1 8
+f 18 10 6
+f 18 1 10
+f 18 11 1
+f 18 12 11
+f 19 10 3
+f 19 4 6
+f 19 6 10
+f 20 8 1
+f 20 1 11
+f 20 5 8
+f 20 12 5
+f 20 11 12
+f 21 18 6
+f 21 12 18
+f 22 10 1
+f 22 1 13
+f 22 13 7
+f 23 7 14
+f 24 14 13
+f 24 13 2
+f 24 2 14
+f 26 16 15
+f 26 2 16
+f 27 16 8
+f 28 17 9
+f 28 3 17
+f 28 19 3
+f 28 4 19
+f 31 21 6
+f 31 6 4
+f 31 12 21
+f 31 4 28
+f 32 3 10
+f 32 10 22
+f 32 22 7
+f 33 23 14
+f 33 14 29
+f 33 29 17
+f 33 17 3
+f 34 12 31
+f 35 27 8
+f 35 8 5
+f 35 5 27
+f 36 15 16
+f 36 16 27
+f 37 29 14
+f 38 9 17
+f 40 31 28
+f 40 28 9
+f 40 34 31
+f 40 9 34
+f 41 32 7
+f 41 3 32
+f 42 34 9
+f 42 9 25
+f 43 5 12
+f 43 12 34
+f 43 34 42
+f 44 15 36
+f 45 37 14
+f 45 14 2
+f 45 2 26
+f 46 26 15
+f 46 45 26
+f 46 37 45
+f 47 38 30
+f 47 9 38
+f 48 25 9
+f 48 9 39
+f 49 39 9
+f 50 23 33
+f 50 33 3
+f 50 3 41
+f 50 41 7
+f 50 7 23
+f 51 27 5
+f 51 5 43
+f 51 43 42
+f 51 42 44
+f 51 44 36
+f 51 36 27
+f 52 15 44
+f 52 46 15
+f 53 44 42
+f 53 42 25
+f 53 25 48
+f 53 48 39
+f 54 46 30
+f 54 30 38
+f 55 29 37
+f 55 37 46
+f 55 46 54
+f 56 49 9
+f 56 39 49
+f 56 47 30
+f 56 9 47
+f 57 30 46
+f 57 46 52
+f 57 56 30
+f 57 52 56
+f 58 52 44
+f 58 44 53
+f 58 53 39
+f 59 17 29
+f 59 55 54
+f 59 29 55
+f 59 54 38
+f 59 38 17
+f 60 56 52
+f 60 39 56
+f 60 58 39
+f 60 52 58
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl042.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl042.obj
new file mode 100644
index 0000000000000000000000000000000000000000..e564fe9fed5cc68b91a058eb567b85a86c3acd76
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl042.obj
@@ -0,0 +1,188 @@
+v -0.032536921 -0.047859799 0.043349331
+v -0.029699618 -0.050846531 0.039077128
+v -0.029790810 -0.041190672 0.015888216
+v -0.039206049 -0.031775432 0.006472977
+v -0.038421446 -0.045113688 0.035895600
+v -0.038421446 -0.030990829 0.008042184
+v -0.027778005 -0.046568869 0.028080574
+v -0.034890731 -0.049429006 0.043349331
+v -0.030183112 -0.043152180 0.010788295
+v -0.038029144 -0.041582973 0.037072505
+v -0.030575413 -0.048644403 0.042172426
+v -0.029790810 -0.041975275 0.018634328
+v -0.036067636 -0.047075196 0.036680203
+v -0.030183112 -0.050605911 0.036680203
+v -0.029790810 -0.041190672 0.010395993
+v -0.040382954 -0.034129242 0.008826787
+v -0.035411993 -0.046462485 0.043296210
+v -0.029790810 -0.047859799 0.038641711
+v -0.030495929 -0.051258306 0.042210006
+v -0.034106128 -0.046682895 0.043349331
+v -0.034890731 -0.049429006 0.042564728
+v -0.035283033 -0.042367577 0.019418931
+v -0.031360016 -0.044329085 0.017065121
+v -0.027653115 -0.048313786 0.026182337
+v -0.032144620 -0.036875354 0.007649882
+v -0.032929223 -0.040013767 0.007649882
+v -0.042825031 -0.033802339 0.018607343
+v -0.038645520 -0.041814925 0.036991215
+v -0.039598351 -0.033736941 0.018634328
+v -0.030183112 -0.050998213 0.038249410
+v -0.032144620 -0.048252101 0.043349331
+v -0.034890731 -0.046290593 0.043349331
+v -0.035144398 -0.048806109 0.043414878
+v -0.035283033 -0.049036704 0.042172426
+v -0.032144620 -0.050213609 0.039426315
+v -0.032929223 -0.050605911 0.043349331
+v -0.037636843 -0.042759878 0.026088059
+v -0.028613905 -0.048252101 0.025303456
+v -0.032144620 -0.042759878 0.013534406
+v -0.027718313 -0.048828433 0.028108957
+v -0.027829302 -0.046682895 0.024518852
+v -0.029623410 -0.042873739 0.010867442
+v -0.032536921 -0.036483052 0.008042184
+v -0.038029144 -0.031775432 0.006472977
+v -0.039990653 -0.033736941 0.007649882
+v -0.039598351 -0.034913845 0.008826787
+v -0.032036636 -0.039886002 0.007716295
+v -0.042344462 -0.035306147 0.018242026
+v -0.039123180 -0.030921725 0.007728936
+v -0.038813748 -0.032952337 0.014711311
+v -0.038194782 -0.044858604 0.037151722
+v -0.033713826 -0.049821308 0.041387823
+v -0.031752318 -0.050998213 0.041387823
+v -0.033713826 -0.050213609 0.042957029
+v -0.032138387 -0.050595543 0.043352278
+v -0.033713826 -0.050213609 0.043349331
+v -0.037636843 -0.045505990 0.035110997
+v -0.030183112 -0.043544481 0.011965200
+v -0.028221603 -0.048252101 0.024911154
+v -0.028221603 -0.048644403 0.026088059
+v -0.031752318 -0.041975275 0.010395993
+v -0.030967715 -0.043544481 0.013534406
+v -0.027674649 -0.048746497 0.027740021
+v -0.028221603 -0.049036704 0.027657265
+f 12 7 3
+f 12 3 6
+f 12 6 7
+f 15 3 7
+f 18 11 2
+f 18 2 7
+f 18 7 6
+f 18 10 11
+f 19 2 11
+f 20 11 10
+f 20 1 11
+f 25 3 15
+f 28 17 10
+f 28 27 5
+f 28 10 27
+f 29 10 18
+f 29 27 10
+f 30 2 19
+f 31 19 11
+f 31 11 1
+f 32 20 10
+f 32 10 17
+f 33 1 20
+f 33 31 1
+f 33 20 32
+f 33 32 17
+f 33 17 28
+f 33 5 8
+f 34 21 8
+f 34 8 5
+f 34 13 21
+f 35 23 14
+f 35 13 23
+f 39 37 22
+f 39 13 37
+f 40 7 2
+f 41 15 7
+f 41 7 24
+f 42 41 24
+f 42 15 41
+f 43 25 6
+f 43 6 3
+f 43 3 25
+f 44 6 25
+f 44 26 4
+f 45 4 26
+f 45 27 4
+f 45 16 27
+f 46 26 22
+f 46 45 26
+f 46 16 45
+f 46 37 16
+f 46 22 37
+f 47 25 15
+f 47 15 42
+f 47 42 9
+f 47 9 26
+f 47 44 25
+f 47 26 44
+f 48 27 16
+f 48 5 27
+f 48 37 5
+f 48 16 37
+f 49 4 27
+f 49 44 4
+f 49 6 44
+f 49 29 6
+f 49 27 29
+f 50 29 18
+f 50 18 6
+f 50 6 29
+f 51 33 28
+f 51 28 5
+f 51 5 33
+f 52 21 13
+f 52 13 35
+f 53 35 14
+f 53 14 30
+f 53 52 35
+f 53 21 52
+f 53 30 19
+f 53 19 36
+f 54 8 21
+f 54 53 36
+f 54 21 53
+f 55 36 19
+f 55 19 31
+f 55 31 33
+f 55 33 36
+f 56 36 33
+f 56 33 8
+f 56 54 36
+f 56 8 54
+f 57 37 13
+f 57 5 37
+f 57 34 5
+f 57 13 34
+f 59 42 24
+f 59 9 42
+f 60 30 14
+f 60 14 38
+f 60 38 58
+f 60 58 9
+f 60 9 59
+f 60 59 24
+f 61 26 9
+f 61 58 39
+f 61 9 58
+f 61 39 22
+f 61 22 26
+f 62 23 13
+f 62 13 39
+f 62 39 58
+f 62 14 23
+f 62 58 38
+f 62 38 14
+f 63 24 7
+f 63 7 40
+f 63 60 24
+f 64 40 2
+f 64 2 30
+f 64 30 60
+f 64 63 40
+f 64 60 63
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl043.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl043.obj
new file mode 100644
index 0000000000000000000000000000000000000000..d47354e2dc062ef6f70b844f10ec5fe08d439fa1
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl043.obj
@@ -0,0 +1,188 @@
+v 0.054052068 -0.017618894 0.048574124
+v 0.054554042 -0.018044875 0.048449252
+v 0.055730947 -0.010198842 0.044918537
+v 0.046609022 -0.015782522 0.024439588
+v 0.043086596 -0.036015802 0.043377592
+v 0.048277216 -0.018437177 0.024518852
+v 0.055683610 -0.010191235 0.046499553
+v 0.049061819 -0.029421623 0.048841554
+v 0.051807931 -0.013337255 0.031187980
+v 0.039816601 -0.029555819 0.024465748
+v 0.054161740 -0.019221780 0.048841554
+v 0.055730947 -0.012552652 0.046487744
+v 0.048669517 -0.010198842 0.028049567
+v 0.053377137 -0.010198842 0.046487744
+v 0.042784993 -0.029421623 0.024518852
+v 0.053769439 -0.020006383 0.046487744
+v 0.051807931 -0.018044875 0.048841554
+v 0.055730947 -0.012552652 0.046095442
+v 0.051106912 -0.010149230 0.028126582
+v 0.039646580 -0.029029321 0.024518852
+v 0.047100311 -0.032560036 0.044526236
+v 0.043569596 -0.036090750 0.042564728
+v 0.048537740 -0.015777344 0.024454150
+v 0.051023327 -0.015691065 0.030795679
+v 0.054161740 -0.019614082 0.047664649
+v 0.052200232 -0.017652573 0.048841554
+v 0.044354199 -0.031383131 0.045310839
+v 0.051415629 -0.011375747 0.028049567
+v 0.054946344 -0.015691065 0.046095442
+v 0.050238724 -0.010198842 0.034718695
+v 0.042949245 -0.035902302 0.042635786
+v 0.045923406 -0.025498606 0.026480361
+v 0.043961898 -0.033736941 0.037464806
+v 0.045138803 -0.034913845 0.045310839
+v 0.045923406 -0.024321701 0.024518852
+v 0.047492613 -0.020790987 0.024518852
+v 0.054161740 -0.019614082 0.048056951
+v 0.051415629 -0.026283209 0.047664649
+v 0.054554042 -0.018044875 0.047272347
+v 0.047100311 -0.028637019 0.048841554
+v 0.043177295 -0.032167734 0.043349331
+v 0.051807931 -0.012552652 0.030403377
+v 0.055730947 -0.010591144 0.044918537
+v 0.051807931 -0.010198842 0.029226472
+v 0.052592534 -0.017260272 0.038641711
+v 0.039646580 -0.029029321 0.024911154
+v 0.045923406 -0.032167734 0.039818616
+v 0.043569596 -0.034129242 0.037464806
+v 0.045138803 -0.034521544 0.043349331
+v 0.049846422 -0.029029321 0.048449252
+v 0.043569596 -0.036090750 0.043349331
+v 0.045531104 -0.025106305 0.024518852
+v 0.052592534 -0.023537098 0.047664649
+v 0.050238724 -0.028244718 0.047272347
+v 0.047884914 -0.025498606 0.033541790
+v 0.046708009 -0.029029321 0.048449252
+v 0.047260625 -0.029501289 0.048585572
+v 0.052984835 -0.010198842 0.045310839
+v 0.051023327 -0.014906462 0.030011075
+v 0.055730947 -0.011375747 0.045310839
+v 0.051807931 -0.010591144 0.029226472
+v 0.053769439 -0.018044875 0.044133934
+v 0.048669517 -0.010198842 0.028441869
+v 0.044746501 -0.032560036 0.037072505
+f 11 7 1
+f 11 2 7
+f 12 7 2
+f 12 3 7
+f 17 8 11
+f 18 3 12
+f 18 12 2
+f 19 7 3
+f 19 14 7
+f 20 13 4
+f 20 4 10
+f 22 10 15
+f 23 15 10
+f 23 10 4
+f 23 4 13
+f 23 13 19
+f 26 17 11
+f 26 11 1
+f 26 1 7
+f 26 7 14
+f 26 14 17
+f 28 23 19
+f 28 6 23
+f 29 9 18
+f 31 22 5
+f 31 10 22
+f 36 24 16
+f 36 6 24
+f 36 35 23
+f 36 23 6
+f 37 25 2
+f 37 2 11
+f 39 18 2
+f 39 2 25
+f 39 29 18
+f 39 25 16
+f 40 17 14
+f 40 8 17
+f 41 5 27
+f 41 27 14
+f 42 28 18
+f 42 18 9
+f 42 9 24
+f 43 3 18
+f 44 28 19
+f 44 19 3
+f 44 3 43
+f 45 24 9
+f 45 9 29
+f 45 16 24
+f 46 13 20
+f 46 5 41
+f 46 31 5
+f 46 20 10
+f 46 10 31
+f 47 32 21
+f 47 15 32
+f 47 21 33
+f 48 33 22
+f 48 22 15
+f 48 15 33
+f 49 33 21
+f 49 22 33
+f 49 34 22
+f 49 21 34
+f 50 34 21
+f 50 8 34
+f 50 38 11
+f 50 11 8
+f 51 34 5
+f 51 5 22
+f 51 22 34
+f 52 35 32
+f 52 32 15
+f 52 15 23
+f 52 23 35
+f 53 16 25
+f 53 25 37
+f 53 37 11
+f 53 11 38
+f 53 38 36
+f 53 36 16
+f 54 21 32
+f 54 32 38
+f 54 50 21
+f 54 38 50
+f 55 38 32
+f 55 32 35
+f 55 35 36
+f 55 36 38
+f 56 40 14
+f 56 14 27
+f 56 27 5
+f 56 5 34
+f 56 34 40
+f 57 40 34
+f 57 34 8
+f 57 8 40
+f 58 41 14
+f 58 46 41
+f 58 30 46
+f 58 14 19
+f 58 19 30
+f 59 42 24
+f 59 24 6
+f 59 6 28
+f 59 28 42
+f 60 43 18
+f 60 18 28
+f 60 28 43
+f 61 44 43
+f 61 43 28
+f 61 28 44
+f 62 45 29
+f 62 29 39
+f 62 39 16
+f 62 16 45
+f 63 46 30
+f 63 13 46
+f 63 30 19
+f 63 19 13
+f 64 47 33
+f 64 33 15
+f 64 15 47
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl046.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl046.obj
new file mode 100644
index 0000000000000000000000000000000000000000..fddf616cf649e56d488eb93a5c3ba4aa6dc581c6
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl046.obj
@@ -0,0 +1,188 @@
+v 0.017677688 0.050215610 0.028049567
+v 0.017677688 0.048254102 0.028049567
+v 0.017817856 0.043803431 0.009930225
+v 0.012962593 0.043916178 0.003752982
+v 0.003548542 0.052533702 0.020604928
+v 0.002770226 0.047469498 0.010395993
+v 0.014539275 0.047469498 0.008434485
+v 0.008262449 0.051392515 0.024518852
+v 0.017285387 0.043546482 0.010395993
+v 0.017677688 0.046684895 0.010788295
+v 0.015323878 0.051392515 0.024126551
+v 0.003554829 0.050215610 0.009611390
+v 0.016500783 0.050215610 0.028049567
+v 0.003554829 0.049823308 0.019811233
+v 0.017677688 0.043546482 0.010395993
+v 0.012970069 0.043938784 0.007257580
+v 0.017677688 0.051000213 0.026872662
+v 0.014539275 0.044723387 0.004119167
+v 0.002770226 0.050607911 0.010788295
+v 0.009439354 0.049038705 0.008826787
+v 0.003557236 0.047499260 0.008819212
+v 0.016500783 0.048646403 0.028049567
+v 0.017285387 0.048254102 0.028049567
+v 0.003576050 0.050478053 0.020529040
+v 0.014082614 0.043747576 0.003933275
+v 0.017285387 0.050607911 0.024518852
+v 0.016411500 0.051199272 0.027709809
+v 0.017677688 0.046292593 0.010003692
+v 0.012970069 0.045115689 0.004119167
+v 0.002764346 0.050574510 0.012366004
+v 0.007085544 0.052569420 0.020988138
+v 0.012577767 0.048646403 0.010395993
+v 0.014539275 0.045507990 0.004903770
+v 0.003508424 0.049559693 0.008893891
+v 0.008262449 0.050215610 0.024518852
+v 0.002770226 0.047861800 0.011965200
+v 0.017285387 0.043546482 0.010003692
+v 0.014517967 0.043875479 0.004187504
+v 0.014146973 0.044331085 0.003726866
+v 0.017677688 0.049038705 0.019418931
+v 0.016893085 0.051392515 0.026872662
+v 0.017534049 0.050689323 0.027741797
+v 0.009404717 0.052420923 0.024555777
+v 0.004731734 0.049823308 0.008826787
+v 0.002731114 0.049993425 0.010060249
+v 0.003947131 0.052569420 0.019026629
+v 0.008654750 0.052177118 0.020595836
+v 0.014146973 0.047861800 0.009219088
+v 0.014539275 0.047077197 0.007649882
+v 0.013362370 0.045507990 0.004511469
+v 0.002786026 0.048449504 0.012307769
+v 0.017677688 0.050607911 0.025303456
+v 0.016893085 0.051000213 0.025303456
+v 0.010616259 0.052569420 0.024126551
+v 0.016738753 0.051058481 0.027748090
+v 0.003947131 0.050215610 0.009611390
+v 0.002800028 0.047838077 0.009909883
+v 0.003554829 0.052569420 0.019026629
+v 0.009047052 0.049823308 0.011572898
+v 0.014931577 0.050607911 0.020595836
+v 0.011008560 0.048646403 0.008826787
+v 0.013362370 0.046292593 0.005688374
+v 0.013362370 0.045115689 0.004119167
+v 0.010616259 0.052569420 0.024518852
+f 1 2 3
+f 13 2 1
+f 15 3 2
+f 15 2 9
+f 16 9 6
+f 16 6 4
+f 17 1 3
+f 21 4 6
+f 22 13 8
+f 22 2 13
+f 23 9 2
+f 23 2 22
+f 25 16 4
+f 25 9 16
+f 27 13 1
+f 27 8 13
+f 28 10 3
+f 28 3 18
+f 28 7 10
+f 33 28 18
+f 34 4 21
+f 34 29 4
+f 35 23 22
+f 35 14 23
+f 35 24 14
+f 35 22 8
+f 35 8 5
+f 35 5 24
+f 36 23 14
+f 36 6 9
+f 36 9 23
+f 36 14 24
+f 37 25 15
+f 37 15 9
+f 37 9 25
+f 38 3 15
+f 38 15 25
+f 38 18 3
+f 39 25 4
+f 39 38 25
+f 39 18 38
+f 39 4 29
+f 40 10 26
+f 40 3 10
+f 42 1 17
+f 42 17 41
+f 43 5 8
+f 43 8 27
+f 44 34 12
+f 44 29 34
+f 45 30 19
+f 45 36 30
+f 45 6 36
+f 45 19 12
+f 45 12 34
+f 49 7 28
+f 49 28 33
+f 50 33 18
+f 50 29 44
+f 51 36 24
+f 51 24 5
+f 51 5 30
+f 51 30 36
+f 52 40 26
+f 52 17 3
+f 52 3 40
+f 52 41 17
+f 53 11 41
+f 53 52 26
+f 53 41 52
+f 54 41 11
+f 54 31 46
+f 54 47 31
+f 55 27 1
+f 55 1 42
+f 55 42 41
+f 55 41 27
+f 56 20 44
+f 56 44 12
+f 56 46 31
+f 56 12 19
+f 56 19 46
+f 57 45 34
+f 57 34 21
+f 57 21 6
+f 57 6 45
+f 58 46 19
+f 58 54 46
+f 58 30 5
+f 58 19 30
+f 59 31 47
+f 59 56 31
+f 59 20 56
+f 60 48 32
+f 60 11 53
+f 60 53 26
+f 60 7 48
+f 60 26 10
+f 60 10 7
+f 60 59 47
+f 60 54 11
+f 60 47 54
+f 61 48 7
+f 61 32 48
+f 61 20 59
+f 61 60 32
+f 61 59 60
+f 61 7 49
+f 62 33 50
+f 62 20 61
+f 62 61 49
+f 62 49 33
+f 62 50 44
+f 62 44 20
+f 63 39 29
+f 63 29 50
+f 63 50 18
+f 63 18 39
+f 64 5 43
+f 64 43 27
+f 64 27 41
+f 64 41 54
+f 64 58 5
+f 64 54 58
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl047.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl047.obj
new file mode 100644
index 0000000000000000000000000000000000000000..cca1aac538c9791eeb5019c6dad6a592d8552d8b
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl047.obj
@@ -0,0 +1,188 @@
+v -0.007037315 -0.057667340 0.048841554
+v -0.007037315 -0.059628849 0.048841554
+v -0.007052851 -0.055437153 0.039003296
+v -0.015275649 -0.054921229 0.039034013
+v -0.024298587 -0.053352022 0.048841554
+v -0.008214220 -0.057667340 0.039034013
+v -0.007037315 -0.057667340 0.039034013
+v -0.007037315 -0.055313531 0.040603219
+v -0.023513984 -0.052175117 0.043349331
+v -0.011395569 -0.054893470 0.038942146
+v -0.024298587 -0.056098134 0.047272347
+v -0.007037315 -0.059628849 0.046487744
+v -0.007037315 -0.057275039 0.048449252
+v -0.007821918 -0.054921229 0.039034013
+v -0.023906285 -0.052175117 0.044526236
+v -0.015275649 -0.056490436 0.039034013
+v -0.014493572 -0.054550377 0.039028607
+v -0.023906285 -0.056098134 0.048841554
+v -0.007429617 -0.058844245 0.043349331
+v -0.018806364 -0.058059642 0.047664649
+v -0.023906285 -0.053352022 0.048841554
+v -0.023513984 -0.052175117 0.043741633
+v -0.007821918 -0.054921229 0.039426315
+v -0.024450525 -0.052512858 0.044434395
+v -0.013706443 -0.056882737 0.039034013
+v -0.024337723 -0.052262112 0.043325675
+v -0.024298587 -0.056098134 0.048449252
+v -0.007037315 -0.058844245 0.043349331
+v -0.010568030 -0.059628849 0.048449252
+v -0.019198666 -0.057275039 0.044526236
+v -0.024298587 -0.052567419 0.046095442
+v -0.023906285 -0.052567419 0.046095442
+v -0.011352633 -0.057275039 0.039034013
+v -0.024298587 -0.054921229 0.043349331
+v -0.021160174 -0.057275039 0.048449252
+v -0.013706443 -0.058844245 0.048841554
+v -0.008998823 -0.059628849 0.046880046
+v -0.019590967 -0.057667340 0.046487744
+v -0.015275649 -0.056882737 0.040210918
+v -0.024298587 -0.055313531 0.044526236
+v -0.021552476 -0.056098134 0.043349331
+v -0.023513984 -0.056490436 0.048449252
+v -0.009783426 -0.059628849 0.048841554
+v -0.022729380 -0.056490436 0.048841554
+v -0.018806364 -0.058059642 0.048056951
+v -0.007429617 -0.059628849 0.046487744
+v -0.012529538 -0.059236547 0.047664649
+v -0.009783426 -0.057667340 0.039426315
+v -0.023513984 -0.055313531 0.043349331
+v -0.019198666 -0.056882737 0.043349331
+v -0.021160174 -0.056882737 0.045703141
+v -0.021160174 -0.057275039 0.047272347
+v -0.015275649 -0.058451944 0.048841554
+v -0.016452554 -0.058451944 0.048449252
+v -0.016844856 -0.058451944 0.047664649
+v -0.017629459 -0.057667340 0.044918537
+v -0.010175728 -0.059628849 0.047664649
+v -0.008606522 -0.058059642 0.040603219
+v -0.023906285 -0.055313531 0.043741633
+v -0.019983269 -0.057667340 0.047272347
+v -0.023513984 -0.056490436 0.048056951
+v -0.016844856 -0.058451944 0.048056951
+v -0.010568030 -0.059628849 0.048056951
+v -0.017629459 -0.057275039 0.043349331
+f 5 2 1
+f 7 1 2
+f 8 7 3
+f 8 1 7
+f 10 7 6
+f 10 3 7
+f 12 7 2
+f 13 1 8
+f 14 8 3
+f 14 3 10
+f 15 13 8
+f 16 4 10
+f 17 10 4
+f 17 14 10
+f 17 9 14
+f 18 2 5
+f 19 6 7
+f 21 5 1
+f 21 1 13
+f 22 14 9
+f 22 9 15
+f 23 15 8
+f 23 8 14
+f 23 22 15
+f 23 14 22
+f 25 16 10
+f 26 4 16
+f 26 17 4
+f 26 9 17
+f 26 15 9
+f 27 18 5
+f 27 24 11
+f 27 5 24
+f 28 19 7
+f 28 7 12
+f 28 12 19
+f 29 12 2
+f 31 5 21
+f 31 24 5
+f 31 26 24
+f 31 15 26
+f 32 21 13
+f 32 13 15
+f 32 31 21
+f 32 15 31
+f 33 25 10
+f 33 10 6
+f 34 26 16
+f 34 24 26
+f 36 2 18
+f 37 12 29
+f 39 30 16
+f 39 16 25
+f 39 25 30
+f 40 11 24
+f 40 24 34
+f 41 30 38
+f 42 35 18
+f 42 18 27
+f 42 27 11
+f 43 36 29
+f 43 29 2
+f 43 2 36
+f 44 36 18
+f 44 18 35
+f 46 37 19
+f 46 19 12
+f 46 12 37
+f 48 33 6
+f 48 25 33
+f 49 34 16
+f 49 16 41
+f 50 41 16
+f 50 16 30
+f 50 30 41
+f 51 41 38
+f 51 38 11
+f 52 11 38
+f 53 44 35
+f 53 35 36
+f 53 36 44
+f 54 36 35
+f 54 35 45
+f 54 29 36
+f 55 45 20
+f 55 20 38
+f 56 48 47
+f 56 25 48
+f 56 47 55
+f 56 55 38
+f 56 38 30
+f 57 37 29
+f 57 48 37
+f 57 47 48
+f 58 48 6
+f 58 6 19
+f 58 19 37
+f 58 37 48
+f 59 40 34
+f 59 34 49
+f 59 11 40
+f 59 49 41
+f 59 51 11
+f 59 41 51
+f 60 52 38
+f 60 38 20
+f 60 35 42
+f 60 45 35
+f 60 20 45
+f 61 42 11
+f 61 11 52
+f 61 60 42
+f 61 52 60
+f 62 55 29
+f 62 45 55
+f 62 54 45
+f 62 29 54
+f 63 55 47
+f 63 29 55
+f 63 57 29
+f 63 47 57
+f 64 56 30
+f 64 30 25
+f 64 25 56
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl048.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl048.obj
new file mode 100644
index 0000000000000000000000000000000000000000..116b50fbedfe97f12edd94186aa49859bd0ea76c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl048.obj
@@ -0,0 +1,188 @@
+v 0.011008560 -0.045898291 0.009611390
+v 0.011008560 -0.048644403 0.009611390
+v 0.011008560 -0.042367577 0.001765357
+v -0.008214220 -0.041975275 0.000980754
+v -0.009364138 -0.049515421 0.009296234
+v 0.006693242 -0.041582973 0.004119167
+v 0.011024232 -0.041622426 0.003990345
+v -0.004650552 -0.050232403 0.010490193
+v 0.002770226 -0.048252101 0.004511469
+v -0.009394661 -0.041591212 0.003593336
+v 0.011008560 -0.047075196 0.005296072
+v 0.008654750 -0.041582973 0.001373056
+v 0.004731734 -0.050213609 0.010395993
+v -0.009391125 -0.047859799 0.004511469
+v 0.000416416 -0.045505990 0.002157659
+v 0.010616259 -0.045898291 0.009611390
+v 0.011008560 -0.043544481 0.002157659
+v 0.010989029 -0.041509366 0.001980068
+v -0.008191796 -0.041441288 0.001322453
+v 0.004731734 -0.047467498 0.010395993
+v -0.002329695 -0.050213609 0.007649882
+v 0.004731734 -0.049429006 0.006865279
+v -0.009391125 -0.042759878 0.001373056
+v -0.009409503 -0.047573575 0.009190026
+v 0.001985623 -0.042367577 0.000980754
+v -0.003898902 -0.046290593 0.002549961
+v 0.009439354 -0.047075196 0.004511469
+v 0.003947131 -0.041975275 0.000980754
+v 0.003916694 -0.041445194 0.001339783
+v 0.000772744 -0.050229065 0.010489311
+v -0.007429617 -0.048252101 0.004511469
+v -0.004683505 -0.050605911 0.010003692
+v 0.011008560 -0.048252101 0.007649882
+v 0.000024114 -0.049821308 0.006865279
+v 0.000024114 -0.050605911 0.009611390
+v -0.009386279 -0.041552356 0.001446894
+v -0.009391125 -0.049821308 0.008434485
+v -0.005461760 -0.050141653 0.010414141
+v 0.004339432 -0.047467498 0.010395993
+v 0.004731734 -0.045113688 0.002157659
+v -0.001937394 -0.043936783 0.001373056
+v -0.001545092 -0.046290593 0.002549961
+v -0.009391125 -0.044721386 0.002157659
+v 0.010616259 -0.046682895 0.004511469
+v 0.004339432 -0.049036704 0.006080675
+v 0.008654750 -0.041975275 0.001373056
+v 0.000808718 -0.050605911 0.010003692
+v 0.004731734 -0.050213609 0.010003692
+v 0.009831655 -0.048252101 0.006865279
+v -0.003898902 -0.049821308 0.006865279
+v -0.001152790 -0.050213609 0.007649882
+v -0.002721997 -0.050605911 0.009219088
+v -0.008998823 -0.049429006 0.006865279
+v -0.005464995 -0.048506496 0.010332329
+v 0.006693242 -0.044721386 0.002157659
+v 0.005516337 -0.045505990 0.002549961
+v -0.005468108 -0.042367577 0.000980754
+v -0.000368187 -0.043936783 0.001373056
+v -0.000368187 -0.046682895 0.002942262
+v -0.007429617 -0.045113688 0.002157659
+v -0.009391125 -0.047075196 0.003726866
+v 0.011008560 -0.045505990 0.003726866
+v 0.006693242 -0.047859799 0.004903770
+v 0.002770226 -0.049429006 0.006472977
+f 7 1 2
+f 7 6 1
+f 13 2 1
+f 16 1 6
+f 16 6 10
+f 17 3 7
+f 18 7 3
+f 18 3 12
+f 18 6 7
+f 19 10 6
+f 20 13 1
+f 20 1 16
+f 24 10 23
+f 24 16 10
+f 28 4 19
+f 28 25 4
+f 29 18 12
+f 29 28 19
+f 29 12 28
+f 29 19 6
+f 29 6 18
+f 30 20 8
+f 30 13 20
+f 31 26 9
+f 33 11 7
+f 33 7 2
+f 34 31 9
+f 36 19 4
+f 36 4 23
+f 36 23 10
+f 36 10 19
+f 37 24 14
+f 37 5 24
+f 37 32 5
+f 38 24 5
+f 38 32 8
+f 38 5 32
+f 39 20 16
+f 39 16 24
+f 39 8 20
+f 41 15 26
+f 42 26 15
+f 42 9 26
+f 42 15 40
+f 43 24 23
+f 44 27 17
+f 44 11 27
+f 45 27 22
+f 46 28 12
+f 46 12 3
+f 46 3 17
+f 47 30 8
+f 47 8 32
+f 47 13 30
+f 47 32 35
+f 48 33 2
+f 48 2 13
+f 48 22 33
+f 48 13 47
+f 48 47 35
+f 48 35 22
+f 49 27 11
+f 49 11 33
+f 49 33 22
+f 49 22 27
+f 50 34 21
+f 50 31 34
+f 51 34 22
+f 51 22 35
+f 51 21 34
+f 52 35 32
+f 52 51 35
+f 52 21 51
+f 52 37 21
+f 52 32 37
+f 53 37 14
+f 53 14 31
+f 53 31 50
+f 53 50 21
+f 53 21 37
+f 54 39 24
+f 54 24 38
+f 54 38 8
+f 54 8 39
+f 55 17 27
+f 55 46 17
+f 55 28 46
+f 55 25 28
+f 56 27 9
+f 56 55 27
+f 56 40 55
+f 57 41 4
+f 57 4 25
+f 57 25 41
+f 58 40 15
+f 58 15 41
+f 58 41 25
+f 58 55 40
+f 58 25 55
+f 59 42 40
+f 59 9 42
+f 59 56 9
+f 59 40 56
+f 60 41 26
+f 60 4 41
+f 60 43 23
+f 60 23 4
+f 61 31 14
+f 61 14 24
+f 61 24 43
+f 61 43 60
+f 61 60 26
+f 61 26 31
+f 62 44 17
+f 62 11 44
+f 62 17 7
+f 62 7 11
+f 63 45 9
+f 63 9 27
+f 63 27 45
+f 64 34 9
+f 64 9 45
+f 64 45 22
+f 64 22 34
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl050.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl050.obj
new file mode 100644
index 0000000000000000000000000000000000000000..78fed785eadb89fcb063343358393425595e03e9
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl050.obj
@@ -0,0 +1,188 @@
+v 0.025211915 0.046046784 0.024478761
+v 0.030623642 0.037661957 0.012357502
+v 0.023169911 0.043546482 0.008826787
+v 0.019639196 0.040800371 0.002942262
+v 0.017677688 0.047077197 0.024126551
+v 0.026700626 0.038054259 0.004119167
+v 0.030623642 0.038838862 0.011572898
+v 0.028662134 0.038446561 0.018242026
+v 0.018069990 0.049823308 0.023341947
+v 0.018462291 0.043938784 0.004903770
+v 0.029054436 0.040408069 0.018634328
+v 0.025131419 0.045507990 0.019811233
+v 0.028662134 0.038838862 0.007257580
+v 0.024739118 0.043546482 0.024518852
+v 0.019639196 0.040015767 0.006080675
+v 0.028269832 0.036877354 0.012749803
+v 0.017650172 0.049763752 0.024535046
+v 0.020423800 0.041192672 0.003334564
+v 0.024739118 0.040800371 0.005296072
+v 0.018069990 0.046292593 0.010003692
+v 0.028662134 0.041977275 0.017457423
+v 0.029242388 0.038676599 0.018166059
+v 0.028662134 0.041977275 0.016672820
+v 0.022777609 0.047861800 0.023734249
+v 0.029839039 0.038446561 0.008434485
+v 0.023169911 0.044331085 0.024518852
+v 0.023490797 0.037552252 0.003140588
+v 0.017677688 0.041584974 0.005296072
+v 0.029624598 0.037423306 0.007838677
+v 0.028662134 0.036877354 0.013534406
+v 0.018304677 0.049482171 0.024611608
+v 0.023954514 0.038054259 0.002942262
+v 0.018462291 0.042761879 0.003726866
+v 0.023169911 0.041977275 0.005688374
+v 0.026308324 0.038054259 0.003726866
+v 0.026700626 0.038446561 0.004511469
+v 0.025131419 0.040800371 0.005688374
+v 0.018462291 0.045507990 0.008042184
+v 0.017677688 0.049038705 0.020203534
+v 0.029121552 0.038921008 0.018607200
+v 0.025131419 0.044723387 0.017065121
+v 0.025131419 0.045900292 0.021380439
+v 0.020423800 0.049038705 0.024518852
+v 0.022777609 0.044331085 0.010788295
+v 0.018462291 0.049823308 0.023734249
+v 0.025131419 0.041977275 0.008434485
+v 0.029839039 0.038054259 0.007649882
+v 0.029839039 0.039231164 0.010788295
+v 0.023562213 0.037661957 0.006472977
+v 0.019607055 0.039969713 0.003007307
+v 0.017677688 0.041584974 0.006865279
+v 0.017528002 0.044053671 0.005489505
+v 0.026496123 0.037434367 0.004299212
+v 0.029249095 0.037115601 0.013455729
+v 0.030478931 0.037467331 0.009281039
+v 0.029070357 0.036896840 0.012351067
+v 0.023562213 0.047469498 0.024518852
+v 0.018069990 0.047077197 0.024518852
+v 0.020031498 0.040800371 0.002942262
+v 0.021208403 0.041192672 0.003726866
+v 0.019246895 0.042761879 0.004119167
+v 0.021993006 0.042369577 0.005296072
+v 0.026308324 0.037661957 0.003726866
+v 0.026308324 0.038446561 0.004119167
+f 11 2 7
+f 21 11 7
+f 21 1 11
+f 22 2 11
+f 23 21 7
+f 23 1 21
+f 26 5 8
+f 26 8 14
+f 30 8 5
+f 30 22 8
+f 31 26 14
+f 36 6 35
+f 37 36 19
+f 37 13 36
+f 37 34 3
+f 37 19 34
+f 38 20 3
+f 39 17 9
+f 39 9 20
+f 40 14 8
+f 40 8 22
+f 40 1 14
+f 40 22 11
+f 40 11 1
+f 41 3 12
+f 41 12 23
+f 42 23 12
+f 42 1 23
+f 42 12 24
+f 43 17 31
+f 44 24 12
+f 44 12 3
+f 44 3 20
+f 44 20 24
+f 45 24 20
+f 45 20 9
+f 45 43 24
+f 45 9 17
+f 45 17 43
+f 46 25 13
+f 46 3 41
+f 46 37 3
+f 46 13 37
+f 47 13 25
+f 47 36 13
+f 47 6 36
+f 47 25 7
+f 48 23 7
+f 48 7 25
+f 48 25 46
+f 48 46 41
+f 48 41 23
+f 49 27 16
+f 49 15 27
+f 49 16 30
+f 49 30 5
+f 49 5 15
+f 50 27 15
+f 50 32 27
+f 50 4 32
+f 50 33 4
+f 50 28 33
+f 51 15 5
+f 51 50 15
+f 51 28 50
+f 52 33 28
+f 52 10 33
+f 52 38 10
+f 52 20 38
+f 52 39 20
+f 52 17 39
+f 52 28 51
+f 52 51 5
+f 52 5 17
+f 53 16 27
+f 54 2 22
+f 54 22 30
+f 55 29 47
+f 55 47 7
+f 55 7 2
+f 56 30 16
+f 56 29 55
+f 56 54 30
+f 56 55 2
+f 56 2 54
+f 56 53 29
+f 56 16 53
+f 57 31 14
+f 57 14 1
+f 57 43 31
+f 57 24 43
+f 57 42 24
+f 57 1 42
+f 58 31 17
+f 58 17 5
+f 58 5 26
+f 58 26 31
+f 59 32 4
+f 59 35 32
+f 59 18 35
+f 59 33 18
+f 59 4 33
+f 60 35 18
+f 60 19 35
+f 61 33 10
+f 61 19 60
+f 61 60 18
+f 61 18 33
+f 62 34 19
+f 62 3 34
+f 62 38 3
+f 62 10 38
+f 62 61 10
+f 62 19 61
+f 63 27 32
+f 63 32 35
+f 63 53 27
+f 63 35 6
+f 63 6 47
+f 63 47 29
+f 63 29 53
+f 64 36 35
+f 64 35 19
+f 64 19 36
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl051.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl051.obj
new file mode 100644
index 0000000000000000000000000000000000000000..fa46e17f2e845e9f0e7e918a75874d16745a3c20
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl051.obj
@@ -0,0 +1,188 @@
+v -0.020481645 0.048592946 0.015800365
+v -0.014491046 0.046684895 0.011572898
+v -0.015275649 0.048254102 0.006472977
+v -0.021552476 0.040408069 0.001765357
+v -0.025083190 0.043546482 0.014319010
+v -0.016833512 0.039992117 0.001452221
+v -0.025867794 0.044723387 0.008042184
+v -0.014459808 0.049262923 0.011616352
+v -0.020400653 0.046753363 0.015870780
+v -0.014452099 0.045399209 0.003863746
+v -0.023513984 0.040015767 0.005688374
+v -0.016867353 0.040072886 0.003492879
+v -0.021944777 0.048254102 0.013926708
+v -0.021552476 0.045115689 0.004903770
+v -0.021906355 0.048170068 0.015911427
+v -0.014491046 0.048254102 0.006472977
+v -0.017629459 0.042369577 0.001765357
+v -0.024242413 0.039902216 0.002771264
+v -0.018021761 0.040015767 0.004119167
+v -0.014491046 0.045507990 0.007649882
+v -0.021552476 0.046684895 0.007649882
+v -0.014883348 0.049431007 0.010395993
+v -0.021944777 0.046684895 0.015888216
+v -0.025867794 0.046292593 0.013534406
+v -0.014491046 0.045900292 0.003726866
+v -0.019198666 0.040408069 0.001373056
+v -0.023906285 0.041584974 0.002942262
+v -0.026652397 0.041977275 0.010788295
+v -0.026260095 0.042369577 0.011965200
+v -0.023906285 0.044331085 0.005296072
+v -0.020767872 0.046684895 0.006865279
+v -0.018806364 0.047469498 0.007257580
+v -0.014491046 0.048646403 0.007649882
+v -0.015275649 0.049431007 0.011180597
+v -0.025734881 0.046001902 0.014399300
+v -0.025867794 0.045507990 0.010395993
+v -0.014491046 0.047077197 0.004903770
+v -0.016844856 0.041192672 0.001373056
+v -0.015667951 0.046292593 0.004119167
+v -0.019198666 0.040015767 0.001373056
+v -0.023513984 0.040408069 0.002157659
+v -0.024298587 0.040408069 0.002549961
+v -0.024407291 0.040236545 0.005429062
+v -0.024298587 0.042369577 0.003726866
+v -0.021552476 0.045507990 0.005296072
+v -0.021160174 0.047077197 0.008434485
+v -0.021552476 0.047469498 0.010395993
+v -0.016452554 0.048254102 0.007649882
+v -0.019590967 0.047077197 0.006865279
+v -0.014883348 0.048646403 0.007649882
+v -0.014491046 0.049431007 0.010395993
+v -0.025960221 0.043742325 0.014264523
+v -0.024690889 0.045507990 0.008434485
+v -0.026652397 0.043546482 0.010395993
+v -0.024298587 0.047077197 0.013534406
+v -0.016844856 0.043154180 0.002157659
+v -0.017629459 0.041192672 0.001373056
+v -0.014883348 0.047077197 0.004903770
+v -0.018806364 0.043154180 0.002549961
+v -0.021944777 0.041192672 0.002157659
+v -0.023513984 0.040015767 0.002157659
+v -0.025950441 0.041364487 0.007554821
+v -0.024298587 0.044331085 0.005688374
+v -0.024298587 0.041192672 0.002942262
+f 9 8 1
+f 9 2 8
+f 10 8 2
+f 12 9 5
+f 12 2 9
+f 15 9 1
+f 15 1 13
+f 16 8 10
+f 19 6 12
+f 19 18 6
+f 19 11 18
+f 19 12 5
+f 20 10 2
+f 20 2 12
+f 20 12 6
+f 20 6 10
+f 23 5 9
+f 23 9 15
+f 24 15 13
+f 25 10 6
+f 29 19 5
+f 29 11 19
+f 29 28 11
+f 32 31 21
+f 33 16 3
+f 33 8 16
+f 34 22 13
+f 34 13 1
+f 34 1 8
+f 35 23 15
+f 35 15 24
+f 37 16 10
+f 37 10 25
+f 37 3 16
+f 38 25 6
+f 39 37 25
+f 40 4 26
+f 40 6 18
+f 40 38 6
+f 40 26 38
+f 43 28 18
+f 43 18 11
+f 43 11 28
+f 44 30 14
+f 44 14 27
+f 45 14 30
+f 45 21 31
+f 45 39 14
+f 45 3 39
+f 46 32 21
+f 47 13 22
+f 47 22 32
+f 47 32 46
+f 47 46 21
+f 48 32 22
+f 48 22 3
+f 48 3 32
+f 49 32 3
+f 49 31 32
+f 49 45 31
+f 49 3 45
+f 50 33 3
+f 50 3 22
+f 50 22 33
+f 51 33 22
+f 51 8 33
+f 51 34 8
+f 51 22 34
+f 52 5 23
+f 52 23 35
+f 52 29 5
+f 52 28 29
+f 52 35 24
+f 53 7 36
+f 53 47 21
+f 53 36 47
+f 54 24 36
+f 54 52 24
+f 54 28 52
+f 54 36 7
+f 55 36 24
+f 55 24 13
+f 55 47 36
+f 55 13 47
+f 56 38 17
+f 56 25 38
+f 56 39 25
+f 56 17 39
+f 57 38 26
+f 57 17 38
+f 57 26 4
+f 57 4 17
+f 58 39 3
+f 58 3 37
+f 58 37 39
+f 59 27 14
+f 59 14 39
+f 59 39 17
+f 60 41 27
+f 60 17 4
+f 60 4 41
+f 60 59 17
+f 60 27 59
+f 61 40 18
+f 61 42 41
+f 61 18 42
+f 61 41 4
+f 61 4 40
+f 62 42 18
+f 62 18 28
+f 62 28 54
+f 62 54 7
+f 62 7 42
+f 63 44 7
+f 63 30 44
+f 63 7 53
+f 63 53 21
+f 63 45 30
+f 63 21 45
+f 64 42 7
+f 64 7 44
+f 64 44 27
+f 64 27 41
+f 64 41 42
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl053.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl053.obj
new file mode 100644
index 0000000000000000000000000000000000000000..837dba6f4b7cf509f6e204701f1884549f997a8b
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl053.obj
@@ -0,0 +1,188 @@
+v -0.028202724 0.045475922 0.024527865
+v -0.025083190 0.043154180 0.012749803
+v -0.027044698 0.045507990 0.012357502
+v -0.038029144 0.037661957 0.012357502
+v -0.042344462 0.031777433 0.021772741
+v -0.038673531 0.033186079 0.012271885
+v -0.036459938 0.042761879 0.024126551
+v -0.025867794 0.046292593 0.013926708
+v -0.028221603 0.044723387 0.024126551
+v -0.025083190 0.043154180 0.012357502
+v -0.034106128 0.040800371 0.012357502
+v -0.045090574 0.032169734 0.019026629
+v -0.038421446 0.032954338 0.013142105
+v -0.042344462 0.032954338 0.024518852
+v -0.025734881 0.046001902 0.014399300
+v -0.032144620 0.045507990 0.022949646
+v -0.025111937 0.043607392 0.012340555
+v -0.037244541 0.038446561 0.012357502
+v -0.042344462 0.033346639 0.014319010
+v -0.039990653 0.039623466 0.023341947
+v -0.044557658 0.031681418 0.018675113
+v -0.038421446 0.041192672 0.024518852
+v -0.029006207 0.044331085 0.024518852
+v -0.042344462 0.032562036 0.023734249
+v -0.032013006 0.045323451 0.024576593
+v -0.026260095 0.046292593 0.014319010
+v -0.035283033 0.043154180 0.022557344
+v -0.029790810 0.044331085 0.013926708
+v -0.025821260 0.045406218 0.012385612
+v -0.025151556 0.043691341 0.012709501
+v -0.042344462 0.034131243 0.015495915
+v -0.039990653 0.035308148 0.012357502
+v -0.038029144 0.038838862 0.015495915
+v -0.045482875 0.033738941 0.024518852
+v -0.039990653 0.032954338 0.012357502
+v -0.045090574 0.031777433 0.019026629
+v -0.042234769 0.031676591 0.018671584
+v -0.038813748 0.040800371 0.023734249
+v -0.045245961 0.033184868 0.024587569
+v -0.033713826 0.044723387 0.024518852
+v -0.026260095 0.046292593 0.013926708
+v -0.031360016 0.044723387 0.018634328
+v -0.036459938 0.039231164 0.012749803
+v -0.028613905 0.045115689 0.013926708
+v -0.030183112 0.043546482 0.012357502
+v -0.026260095 0.045900292 0.012749803
+v -0.025720254 0.045577609 0.012838930
+v -0.043129066 0.033346639 0.015888216
+v -0.039598351 0.036092751 0.012749803
+v -0.041559859 0.037661957 0.022557344
+v -0.043521367 0.036092751 0.024518852
+v -0.042233156 0.032853221 0.014357320
+v -0.045482875 0.032169734 0.020595836
+v -0.045203153 0.031586431 0.020676969
+v -0.038421446 0.041192672 0.024126551
+v -0.039206049 0.040015767 0.022165042
+v -0.045423331 0.031736774 0.021790012
+v -0.033713826 0.044723387 0.024126551
+v -0.036459938 0.042761879 0.024518852
+v -0.030967715 0.043154180 0.012749803
+v -0.036852239 0.042369577 0.023734249
+v -0.036067636 0.040015767 0.014319010
+v -0.030575413 0.045507990 0.019418931
+v -0.027829302 0.045115689 0.012357502
+f 9 5 2
+f 9 2 1
+f 13 2 5
+f 13 10 2
+f 13 6 10
+f 15 8 1
+f 15 1 2
+f 17 10 6
+f 17 6 3
+f 17 8 15
+f 17 2 10
+f 18 11 6
+f 18 6 4
+f 23 14 9
+f 23 9 1
+f 24 5 9
+f 24 9 14
+f 25 1 16
+f 26 16 1
+f 26 1 8
+f 29 17 3
+f 30 17 15
+f 30 15 2
+f 30 2 17
+f 32 4 6
+f 33 18 4
+f 35 6 13
+f 35 32 6
+f 35 19 32
+f 36 12 19
+f 37 13 5
+f 37 35 13
+f 37 21 35
+f 39 23 1
+f 39 14 23
+f 39 1 25
+f 39 25 22
+f 40 25 16
+f 41 26 8
+f 41 16 26
+f 42 27 16
+f 43 18 33
+f 43 11 18
+f 45 6 11
+f 46 29 3
+f 46 8 29
+f 46 41 8
+f 46 3 41
+f 47 29 8
+f 47 8 17
+f 47 17 29
+f 48 19 12
+f 48 32 19
+f 49 31 4
+f 49 4 32
+f 49 48 31
+f 49 32 48
+f 50 20 33
+f 50 33 4
+f 50 4 31
+f 51 22 20
+f 51 20 50
+f 51 50 31
+f 51 31 48
+f 51 48 12
+f 51 12 34
+f 51 39 22
+f 51 34 39
+f 52 35 21
+f 52 21 36
+f 52 36 19
+f 52 19 35
+f 53 34 12
+f 53 12 36
+f 54 36 21
+f 54 37 5
+f 54 21 37
+f 55 22 7
+f 55 38 20
+f 55 20 22
+f 56 33 20
+f 56 20 38
+f 56 43 33
+f 57 24 14
+f 57 14 39
+f 57 53 36
+f 57 36 54
+f 57 39 34
+f 57 34 53
+f 57 54 5
+f 57 5 24
+f 58 40 16
+f 58 7 40
+f 59 40 7
+f 59 7 22
+f 59 22 25
+f 59 25 40
+f 60 11 42
+f 60 45 11
+f 61 7 58
+f 61 58 16
+f 61 16 27
+f 61 43 56
+f 61 56 38
+f 61 55 7
+f 61 38 55
+f 62 11 43
+f 62 42 11
+f 62 27 42
+f 62 61 27
+f 62 43 61
+f 63 16 41
+f 63 41 3
+f 63 42 16
+f 63 44 28
+f 63 60 42
+f 63 28 60
+f 64 3 6
+f 64 6 45
+f 64 63 3
+f 64 44 63
+f 64 28 44
+f 64 60 28
+f 64 45 60
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl054.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl054.obj
new file mode 100644
index 0000000000000000000000000000000000000000..f9ce8b09b9bc7ac4e0e9d6098412d984c5957a03
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl054.obj
@@ -0,0 +1,179 @@
+v -0.058469546 -0.005446776 0.040299408
+v -0.050190495 -0.016475669 0.019418931
+v -0.053721210 -0.002352810 0.022165042
+v -0.053897701 -0.012109528 0.018689101
+v -0.058821131 -0.016475669 0.040210918
+v -0.051677041 -0.012231815 0.018552643
+v -0.059213433 -0.002352810 0.032757187
+v -0.055682718 -0.016083367 0.039426315
+v -0.053328908 -0.016475669 0.019418931
+v -0.050975098 -0.013337255 0.018634328
+v -0.053880522 -0.002365686 0.020947074
+v -0.056859623 -0.002352810 0.034326393
+v -0.059998036 -0.011768049 0.040603219
+v -0.056025989 -0.016622881 0.039333112
+v -0.050190495 -0.016083367 0.018634328
+v -0.053907202 -0.002336135 0.021041315
+v -0.056859623 -0.006275826 0.024518852
+v -0.059138571 -0.002351529 0.034345396
+v -0.057644226 -0.005491223 0.039034013
+v -0.050190495 -0.016083367 0.019418931
+v -0.056467321 -0.014906462 0.040995521
+v -0.055290416 -0.010983446 0.020988138
+v -0.054898115 -0.016475669 0.036680203
+v -0.050412035 -0.016570676 0.018574177
+v -0.057251925 -0.016475669 0.033934092
+v -0.057266116 -0.002346340 0.026968643
+v -0.059213433 -0.005098921 0.033149488
+v -0.059823229 -0.007431111 0.039078605
+v -0.054898115 -0.016083367 0.036680203
+v -0.055290416 -0.002352810 0.028441869
+v -0.058821131 -0.008629636 0.040603219
+v -0.057644226 -0.008629636 0.040603219
+v -0.056467321 -0.014514160 0.040995521
+v -0.058471540 -0.016368235 0.040695981
+v -0.056440805 -0.016464297 0.040610419
+v -0.059998036 -0.010983446 0.039426315
+v -0.054113511 -0.013337255 0.018634328
+v -0.055682718 -0.008629636 0.021772741
+v -0.056275744 -0.016561750 0.040156418
+v -0.053721210 -0.016475669 0.020595836
+v -0.058428829 -0.002352810 0.029618774
+v -0.059879372 -0.010184166 0.040633490
+v -0.059727792 -0.006634703 0.038318348
+v -0.056930756 -0.014923834 0.040976838
+v -0.059998036 -0.011768049 0.040210918
+v -0.059998036 -0.007060429 0.037464806
+v -0.057251925 -0.005883524 0.025695757
+v -0.055682718 -0.014121859 0.025303456
+v -0.053328908 -0.015691065 0.018634328
+v -0.055682718 -0.009414239 0.021772741
+v -0.059213433 -0.003529714 0.032757187
+v -0.059998036 -0.006668128 0.037464806
+v -0.059605734 -0.010983446 0.037857108
+v -0.057644226 -0.005098921 0.026872662
+v -0.056075020 -0.014906462 0.027657265
+v -0.054113511 -0.014514160 0.019811233
+v -0.052634430 -0.016382806 0.018714508
+v -0.056859623 -0.012944954 0.028834170
+v -0.058428829 -0.002745111 0.029618774
+v -0.056859623 -0.016083367 0.031972583
+v -0.058821131 -0.011375747 0.035110997
+f 11 6 4
+f 11 10 6
+f 11 3 10
+f 15 10 3
+f 15 6 10
+f 16 3 11
+f 18 1 12
+f 18 12 16
+f 19 12 1
+f 19 8 12
+f 20 15 3
+f 20 2 15
+f 23 14 2
+f 23 2 20
+f 24 14 9
+f 24 2 14
+f 24 15 2
+f 24 6 15
+f 25 14 5
+f 26 16 11
+f 26 18 16
+f 26 7 18
+f 26 11 17
+f 29 20 12
+f 29 12 8
+f 29 23 20
+f 29 8 23
+f 30 20 3
+f 30 12 20
+f 30 16 12
+f 30 3 16
+f 32 19 1
+f 32 1 31
+f 33 21 8
+f 33 8 19
+f 33 19 32
+f 33 32 31
+f 34 13 5
+f 35 8 21
+f 35 21 34
+f 35 34 5
+f 37 4 6
+f 37 11 4
+f 38 17 11
+f 38 11 37
+f 39 23 8
+f 39 14 23
+f 39 8 35
+f 39 35 5
+f 39 5 14
+f 40 9 14
+f 40 14 25
+f 41 26 17
+f 41 7 26
+f 42 1 28
+f 42 31 1
+f 42 33 31
+f 43 28 1
+f 43 1 18
+f 44 34 21
+f 44 13 34
+f 44 42 13
+f 44 21 33
+f 44 33 42
+f 45 5 13
+f 45 13 36
+f 46 27 36
+f 46 36 13
+f 47 36 27
+f 47 17 22
+f 48 22 37
+f 49 9 40
+f 49 37 6
+f 49 6 24
+f 50 22 17
+f 50 17 38
+f 50 38 37
+f 50 37 22
+f 51 7 41
+f 51 46 7
+f 51 27 46
+f 52 28 43
+f 52 43 18
+f 52 18 7
+f 52 7 46
+f 52 42 28
+f 52 46 13
+f 52 13 42
+f 53 47 22
+f 53 36 47
+f 54 41 17
+f 54 17 47
+f 54 47 27
+f 54 27 51
+f 55 25 5
+f 56 48 37
+f 56 55 48
+f 56 40 55
+f 56 49 40
+f 56 37 49
+f 57 49 24
+f 57 24 9
+f 57 9 49
+f 58 22 48
+f 59 54 51
+f 59 51 41
+f 59 41 54
+f 60 55 40
+f 60 40 25
+f 60 25 55
+f 61 53 22
+f 61 22 58
+f 61 36 53
+f 61 58 48
+f 61 48 55
+f 61 45 36
+f 61 55 5
+f 61 5 45
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl056.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl056.obj
new file mode 100644
index 0000000000000000000000000000000000000000..d4128cfbce891170d57057690bc64cde71e68eff
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl056.obj
@@ -0,0 +1,101 @@
+v 0.038083687 0.013341681 0.003712221
+v 0.038101196 -0.014131679 0.003685879
+v 0.038077373 0.002747112 -0.000196151
+v 0.033369754 0.015693066 -0.000196151
+v 0.032979836 -0.014122018 0.003260935
+v 0.038077373 0.014123859 0.001373056
+v 0.034938960 -0.014121859 0.003726866
+v 0.032982433 0.014123771 0.003206469
+v 0.035331262 -0.014121859 -0.000196151
+v 0.035723563 0.014123859 -0.000196151
+v 0.035723563 0.015693066 0.002157659
+v 0.038152301 0.014142474 0.001929071
+v 0.034795317 -0.014227410 0.000017000
+v 0.032977452 0.014908462 0.000588452
+v 0.035723563 0.014123859 0.003726866
+v 0.038077373 -0.007060429 0.000196151
+v 0.037685072 0.005885525 -0.000196151
+v 0.035331262 0.015693066 0.000196151
+v 0.032977452 0.015693066 0.002157659
+v 0.037323437 0.014137935 0.003564987
+v 0.035723563 0.015693066 0.000588452
+v 0.037941735 -0.014057097 0.001733356
+v 0.032982709 -0.014123886 0.001079495
+v 0.033369754 0.015300764 -0.000196151
+v 0.034938960 -0.012944954 0.003726866
+v 0.036900468 -0.010591144 -0.000196151
+v 0.038077373 -0.001175905 -0.000196151
+v 0.036508167 0.011377748 -0.000196151
+v 0.034546659 0.015693066 -0.000196151
+v 0.035723563 0.015300764 0.000196151
+v 0.032977452 0.015693066 0.000588452
+v 0.035723563 -0.013729557 -0.000196151
+v 0.036115865 0.012946954 -0.000196151
+v 0.035331262 0.014908462 -0.000196151
+v 0.036508167 -0.011768049 -0.000196151
+f 7 2 1
+f 9 4 3
+f 10 3 4
+f 12 1 2
+f 12 3 6
+f 13 7 5
+f 13 2 7
+f 15 7 1
+f 16 12 2
+f 17 6 3
+f 17 3 10
+f 18 4 11
+f 19 11 4
+f 19 15 11
+f 19 8 15
+f 19 14 5
+f 19 5 8
+f 20 12 11
+f 20 1 12
+f 20 15 1
+f 20 11 15
+f 21 12 6
+f 21 11 12
+f 21 18 11
+f 21 6 18
+f 22 13 9
+f 22 2 13
+f 22 16 2
+f 23 13 5
+f 23 5 14
+f 24 4 9
+f 24 9 13
+f 24 13 23
+f 24 23 14
+f 24 14 4
+f 25 15 8
+f 25 7 15
+f 25 8 5
+f 25 5 7
+f 26 9 3
+f 26 16 22
+f 27 3 12
+f 27 12 16
+f 27 26 3
+f 27 16 26
+f 28 17 10
+f 28 6 17
+f 29 10 4
+f 29 4 18
+f 30 18 6
+f 31 19 4
+f 31 4 14
+f 31 14 19
+f 32 22 9
+f 32 9 26
+f 33 28 10
+f 33 10 6
+f 33 6 28
+f 34 10 29
+f 34 29 18
+f 34 18 30
+f 34 30 6
+f 34 6 10
+f 35 32 26
+f 35 26 22
+f 35 22 32
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl057.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl057.obj
new file mode 100644
index 0000000000000000000000000000000000000000..f81213222865fe4059f4f042c8fd28fed0bbdd9c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl057.obj
@@ -0,0 +1,188 @@
+v 0.043137624 -0.021557716 0.012368457
+v 0.043177295 -0.022360193 0.011572898
+v 0.040823485 -0.021575590 0.005296072
+v 0.032542544 -0.021544824 0.001122572
+v 0.035965984 -0.029612963 0.012281784
+v 0.041739239 -0.021457929 0.007434101
+v 0.039646580 -0.028637019 0.012357502
+v 0.040038881 -0.022360193 0.012357502
+v 0.038861977 -0.021575590 0.003334564
+v 0.040431183 -0.021575590 0.011965200
+v 0.043177295 -0.021575590 0.010788295
+v 0.042784993 -0.023537098 0.012357502
+v 0.040431183 -0.025498606 0.007257580
+v 0.030231341 -0.025106305 0.003726866
+v 0.042861290 -0.022217818 0.012444767
+v 0.033264882 -0.021499862 0.001329828
+v 0.041106293 -0.021747547 0.012272862
+v 0.042392691 -0.021575590 0.008042184
+v 0.042000390 -0.025106305 0.011965200
+v 0.037685072 -0.027852416 0.005296072
+v 0.035723563 -0.029029321 0.012357502
+v 0.032619077 -0.021593951 0.003348935
+v 0.042000390 -0.025106305 0.012357502
+v 0.042000390 -0.022360193 0.007257580
+v 0.038861977 -0.029421623 0.010788295
+v 0.042784993 -0.023537098 0.011965200
+v 0.038861977 -0.021967891 0.003334564
+v 0.040431183 -0.022752495 0.005296072
+v 0.030623642 -0.029421623 0.001765357
+v 0.038655383 -0.029284602 0.012416213
+v 0.042392691 -0.021967891 0.008042184
+v 0.039646580 -0.028637019 0.011965200
+v 0.041608088 -0.025106305 0.010395993
+v 0.038077373 -0.029029321 0.007257580
+v 0.035723563 -0.021967891 0.001765357
+v 0.040431183 -0.025106305 0.006865279
+v 0.040823485 -0.021967891 0.005296072
+v 0.038861977 -0.025106305 0.004903770
+v 0.034938960 -0.027067813 0.002549961
+v 0.030173200 -0.025064321 0.001174279
+v 0.030346773 -0.029519985 0.004244733
+v 0.036900468 -0.029421623 0.005688374
+v 0.043177295 -0.021967891 0.010788295
+v 0.038861977 -0.028244718 0.008042184
+v 0.041608088 -0.023537098 0.007649882
+v 0.035723563 -0.021575590 0.001765357
+v 0.033369754 -0.022360193 0.000980754
+v 0.039254278 -0.026675511 0.006472977
+v 0.041215786 -0.023537098 0.006865279
+v 0.038469675 -0.027067813 0.005688374
+v 0.036900468 -0.024714003 0.002942262
+v 0.030231341 -0.028637019 0.004511469
+v 0.030231341 -0.029421623 0.001765357
+v 0.038077373 -0.029421623 0.008042184
+v 0.033369754 -0.029421623 0.002942262
+v 0.034546659 -0.029421623 0.003726866
+v 0.040823485 -0.025106305 0.007649882
+v 0.041608088 -0.024714003 0.009611390
+v 0.030231341 -0.026283209 0.000980754
+v 0.034546659 -0.026283209 0.002157659
+v 0.037685072 -0.029029321 0.006472977
+v 0.041215786 -0.022360193 0.006080675
+v 0.040038881 -0.024714003 0.006080675
+v 0.038469675 -0.024321701 0.004119167
+f 9 6 3
+f 10 1 6
+f 11 6 1
+f 11 1 2
+f 12 2 1
+f 15 1 8
+f 15 12 1
+f 16 6 9
+f 16 10 6
+f 17 10 8
+f 17 8 1
+f 17 1 10
+f 18 3 6
+f 18 6 11
+f 21 8 14
+f 21 15 8
+f 22 4 14
+f 22 16 4
+f 22 10 16
+f 22 14 8
+f 22 8 10
+f 23 12 15
+f 23 19 12
+f 23 7 19
+f 24 3 18
+f 26 2 12
+f 26 12 19
+f 27 9 3
+f 30 21 5
+f 30 15 21
+f 30 23 15
+f 30 7 23
+f 30 25 7
+f 30 5 25
+f 31 24 18
+f 31 18 11
+f 32 25 19
+f 32 19 7
+f 32 7 25
+f 33 19 25
+f 33 26 19
+f 35 9 27
+f 37 3 24
+f 37 28 27
+f 37 27 3
+f 40 14 4
+f 41 5 21
+f 43 31 11
+f 43 11 2
+f 43 2 26
+f 43 24 31
+f 44 33 25
+f 44 34 13
+f 45 43 26
+f 45 24 43
+f 45 36 24
+f 45 13 36
+f 46 16 9
+f 46 9 35
+f 46 4 16
+f 47 46 35
+f 47 4 46
+f 48 36 13
+f 49 24 36
+f 50 20 38
+f 50 36 48
+f 50 42 20
+f 50 38 28
+f 51 20 39
+f 51 35 27
+f 52 41 21
+f 52 21 14
+f 52 14 40
+f 53 29 41
+f 53 52 40
+f 53 41 52
+f 54 25 5
+f 54 5 42
+f 54 44 25
+f 54 34 44
+f 55 41 29
+f 55 29 39
+f 56 42 5
+f 56 5 41
+f 56 41 55
+f 56 55 39
+f 56 39 20
+f 56 20 42
+f 57 44 13
+f 57 33 44
+f 57 13 45
+f 58 45 26
+f 58 26 33
+f 58 57 45
+f 58 33 57
+f 59 40 4
+f 59 4 47
+f 59 47 29
+f 59 53 40
+f 59 29 53
+f 60 39 29
+f 60 29 47
+f 60 47 35
+f 60 51 39
+f 60 35 51
+f 61 50 48
+f 61 42 50
+f 61 54 42
+f 61 34 54
+f 61 48 13
+f 61 13 34
+f 62 37 24
+f 62 24 49
+f 62 49 36
+f 62 36 28
+f 62 28 37
+f 63 50 28
+f 63 28 36
+f 63 36 50
+f 64 38 20
+f 64 20 51
+f 64 51 27
+f 64 27 28
+f 64 28 38
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl058.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl058.obj
new file mode 100644
index 0000000000000000000000000000000000000000..a412070b605b4db97ef9df61d54afbd7fd4955f6
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl058.obj
@@ -0,0 +1,188 @@
+v 0.014146973 -0.051390514 0.034718695
+v 0.017622996 -0.051650843 0.031223837
+v 0.017677688 -0.043152180 0.009611390
+v 0.011488373 -0.044641683 0.006663903
+v 0.003547284 -0.052091583 0.019047385
+v 0.013754672 -0.047467498 0.007649882
+v 0.003554829 -0.049821308 0.019026629
+v 0.017677688 -0.049036704 0.031187980
+v 0.009439354 -0.055313531 0.034326393
+v 0.017677688 -0.045505990 0.007257580
+v 0.004679318 -0.049581992 0.009672716
+v 0.015716180 -0.042759878 0.007257580
+v 0.014653667 -0.051726613 0.034630076
+v 0.006693242 -0.052959721 0.032757187
+v 0.015716180 -0.052959721 0.030795679
+v 0.017677688 -0.051782816 0.029618774
+v 0.011400862 -0.047859799 0.006865279
+v 0.004731734 -0.047467498 0.010788295
+v 0.003522597 -0.051818258 0.017938395
+v 0.017677688 -0.043152180 0.006865279
+v 0.017285387 -0.043152180 0.009611390
+v 0.017285387 -0.049036704 0.031187980
+v 0.014146973 -0.054136626 0.034326393
+v 0.008262449 -0.052959721 0.034326393
+v 0.004731734 -0.051782816 0.026872662
+v 0.006662443 -0.055172911 0.033185523
+v 0.017677688 -0.050998213 0.026480361
+v 0.012970069 -0.047467498 0.006865279
+v 0.011008560 -0.047859799 0.006865279
+v 0.005124036 -0.050213609 0.010395993
+v 0.003569268 -0.050015619 0.017802900
+v 0.004764482 -0.047664422 0.009553161
+v 0.011400862 -0.044329085 0.007257580
+v 0.015716180 -0.042759878 0.006865279
+v 0.016541002 -0.042838768 0.006810711
+v 0.014512673 -0.053633061 0.034354566
+v 0.014146973 -0.054136626 0.033934092
+v 0.017285387 -0.052175117 0.030403377
+v 0.014541026 -0.051787959 0.034717339
+v 0.008223368 -0.055135100 0.034372117
+v 0.009831655 -0.052175117 0.033541790
+v 0.007840042 -0.053649443 0.034352247
+v 0.004705975 -0.053851436 0.026551223
+v 0.006720158 -0.053551410 0.033099480
+v 0.013754672 -0.048252101 0.010395993
+v 0.016893085 -0.045898291 0.007257580
+v 0.017285387 -0.045505990 0.006865279
+v 0.005908639 -0.049821308 0.009611390
+v 0.004731734 -0.050213609 0.010395993
+v 0.011043118 -0.044844829 0.006785757
+v 0.016893085 -0.052175117 0.029618774
+v 0.010616259 -0.052175117 0.034326393
+v 0.005516337 -0.054136626 0.026088059
+v 0.012185465 -0.047859799 0.007257580
+v 0.017677688 -0.046290593 0.009611390
+v 0.017677688 -0.045113688 0.006865279
+v 0.004731734 -0.052175117 0.017849724
+v 0.014931577 -0.051390514 0.023734249
+v 0.007085544 -0.055313531 0.031972583
+v 0.013362370 -0.047859799 0.008434485
+v 0.014931577 -0.047075196 0.008042184
+v 0.017285387 -0.045898291 0.007649882
+v 0.005516337 -0.052567419 0.019811233
+v 0.006693242 -0.055313531 0.031972583
+f 10 3 8
+f 13 8 1
+f 13 2 8
+f 16 10 8
+f 16 8 2
+f 18 7 12
+f 19 5 7
+f 20 3 10
+f 21 8 3
+f 21 3 12
+f 22 12 7
+f 22 21 12
+f 22 8 21
+f 22 1 8
+f 25 7 5
+f 25 22 7
+f 27 10 16
+f 28 17 4
+f 29 4 17
+f 31 7 18
+f 31 19 7
+f 31 11 19
+f 32 31 18
+f 32 11 31
+f 32 29 11
+f 33 18 12
+f 33 32 18
+f 34 33 12
+f 34 4 33
+f 35 12 3
+f 35 3 20
+f 35 34 12
+f 35 4 34
+f 36 23 2
+f 36 2 13
+f 37 23 9
+f 38 16 2
+f 38 2 23
+f 38 23 37
+f 38 37 15
+f 38 27 16
+f 39 13 1
+f 39 9 23
+f 39 36 13
+f 39 23 36
+f 40 39 1
+f 40 9 39
+f 40 26 9
+f 41 14 24
+f 41 25 14
+f 41 22 25
+f 42 24 14
+f 42 26 40
+f 42 40 1
+f 42 1 24
+f 43 26 25
+f 43 25 5
+f 44 14 25
+f 44 25 26
+f 44 42 14
+f 44 26 42
+f 46 6 28
+f 47 46 28
+f 47 10 46
+f 47 28 4
+f 48 29 17
+f 48 11 29
+f 49 19 11
+f 49 48 30
+f 49 11 48
+f 50 4 29
+f 50 29 32
+f 50 33 4
+f 50 32 33
+f 51 38 15
+f 51 27 38
+f 52 41 24
+f 52 24 1
+f 52 1 22
+f 52 22 41
+f 53 43 5
+f 54 28 6
+f 54 17 28
+f 54 48 17
+f 54 30 48
+f 55 10 27
+f 56 20 10
+f 56 10 47
+f 56 35 20
+f 56 47 4
+f 56 4 35
+f 57 5 19
+f 57 19 49
+f 57 53 5
+f 57 49 30
+f 58 51 15
+f 58 45 27
+f 58 27 51
+f 58 15 37
+f 59 54 58
+f 59 58 37
+f 59 37 9
+f 60 54 6
+f 60 6 45
+f 60 58 54
+f 60 45 58
+f 61 45 6
+f 61 55 27
+f 61 27 45
+f 62 46 10
+f 62 10 55
+f 62 55 61
+f 62 61 6
+f 62 6 46
+f 63 57 30
+f 63 53 57
+f 63 59 53
+f 63 30 54
+f 63 54 59
+f 64 59 9
+f 64 9 26
+f 64 26 43
+f 64 43 53
+f 64 53 59
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl059.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl059.obj
new file mode 100644
index 0000000000000000000000000000000000000000..c5983064ba95d0c42d82bfd1ac4bc91c6d5765c4
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl059.obj
@@ -0,0 +1,188 @@
+v -0.052152003 0.020008384 0.031187980
+v -0.044305971 0.023539098 0.010003692
+v -0.052544305 0.023931400 0.027657265
+v -0.048228987 0.020008384 0.006865279
+v -0.047444384 0.013731558 0.007257580
+v -0.047444384 0.023931400 0.009219088
+v -0.049405892 0.023539098 0.027657265
+v -0.045323218 0.019741696 0.006659273
+v -0.056859623 0.013731558 0.030011075
+v -0.044600218 0.024113103 0.009915570
+v -0.050582797 0.016085367 0.008042184
+v -0.050582797 0.023931400 0.028049567
+v -0.054113511 0.013731558 0.031187980
+v -0.047707283 0.013794438 0.006662050
+v -0.045090574 0.019616082 0.007257580
+v -0.044360613 0.023965143 0.008810422
+v -0.055682718 0.018439177 0.031187980
+v -0.050975098 0.023931400 0.021772741
+v -0.050190495 0.016477669 0.007257580
+v -0.054426174 0.020539732 0.031100234
+v -0.049731905 0.024100716 0.027567524
+v -0.050295424 0.013637659 0.007078316
+v -0.049798193 0.016869971 0.006865279
+v -0.045119117 0.020416097 0.006840003
+v -0.047444384 0.023539098 0.008826787
+v -0.044305971 0.023146797 0.008826787
+v -0.056075020 0.016085367 0.029226472
+v -0.053721210 0.019223780 0.024126551
+v -0.055018256 0.019137389 0.031257929
+v -0.056696655 0.013682357 0.031228924
+v -0.052544305 0.023931400 0.028049567
+v -0.052449863 0.020527101 0.031106891
+v -0.052152003 0.013731558 0.011572898
+v -0.050975098 0.014123859 0.007649882
+v -0.050582797 0.014516161 0.006865279
+v -0.046750410 0.023797241 0.008911449
+v -0.044305971 0.023146797 0.009219088
+v -0.056467321 0.016085367 0.030795679
+v -0.055290416 0.018831479 0.029618774
+v -0.053721210 0.021185289 0.027264964
+v -0.050975098 0.016477669 0.010003692
+v -0.056467321 0.016085367 0.031187980
+v -0.055290416 0.019223780 0.030403377
+v -0.055682718 0.013731558 0.025303456
+v -0.052152003 0.014123859 0.011572898
+v -0.050582797 0.015300764 0.007257580
+v -0.050975098 0.013731558 0.007649882
+v -0.056859623 0.014123859 0.030011075
+v -0.055682718 0.017654574 0.029618774
+v -0.052936607 0.023146797 0.027657265
+v -0.054898115 0.018831479 0.028049567
+v -0.049798193 0.017654574 0.007649882
+v -0.055682718 0.018439177 0.030795679
+v -0.050975098 0.015300764 0.008434485
+v -0.056859623 0.014516161 0.031187980
+v -0.055682718 0.014123859 0.025303456
+v -0.052152003 0.023931400 0.026088059
+v -0.053721210 0.018439177 0.022949646
+v -0.052544305 0.020400685 0.021772741
+v -0.052544305 0.014516161 0.013534406
+v -0.056859623 0.014516161 0.030795679
+v -0.052544305 0.017654574 0.017457423
+v -0.050582797 0.019616082 0.013534406
+v -0.051367400 0.015693066 0.010395993
+f 10 7 2
+f 13 2 7
+f 13 7 1
+f 14 8 5
+f 15 5 8
+f 15 13 5
+f 15 2 13
+f 16 6 10
+f 16 10 2
+f 18 10 6
+f 21 7 10
+f 21 10 18
+f 22 5 13
+f 22 14 5
+f 23 4 8
+f 23 8 14
+f 24 8 4
+f 24 15 8
+f 25 4 6
+f 26 16 2
+f 26 24 16
+f 26 15 24
+f 29 13 1
+f 29 20 17
+f 29 1 20
+f 30 13 29
+f 30 22 13
+f 30 9 22
+f 31 20 12
+f 31 21 3
+f 31 12 21
+f 32 20 1
+f 32 12 20
+f 32 21 12
+f 32 1 7
+f 32 7 21
+f 35 23 14
+f 35 14 22
+f 35 19 23
+f 36 4 25
+f 36 24 4
+f 36 16 24
+f 36 25 6
+f 36 6 16
+f 37 26 2
+f 37 2 15
+f 37 15 26
+f 40 18 28
+f 41 19 11
+f 42 30 29
+f 42 29 17
+f 42 17 38
+f 43 31 3
+f 43 17 20
+f 43 20 31
+f 44 33 22
+f 44 22 9
+f 45 34 33
+f 45 33 44
+f 46 35 34
+f 46 19 35
+f 47 22 33
+f 47 33 34
+f 47 35 22
+f 47 34 35
+f 48 38 27
+f 48 44 9
+f 49 27 38
+f 50 18 40
+f 50 43 3
+f 50 40 39
+f 50 39 43
+f 51 40 28
+f 51 39 40
+f 51 49 39
+f 52 19 41
+f 52 23 19
+f 52 6 4
+f 52 4 23
+f 53 43 39
+f 53 38 17
+f 53 17 43
+f 53 49 38
+f 53 39 49
+f 54 11 19
+f 54 19 46
+f 54 46 34
+f 54 34 45
+f 55 42 38
+f 55 30 42
+f 55 48 9
+f 55 9 30
+f 56 45 44
+f 56 44 48
+f 56 48 27
+f 57 50 3
+f 57 18 50
+f 57 21 18
+f 57 3 21
+f 58 51 28
+f 58 49 51
+f 59 28 18
+f 60 27 49
+f 60 56 27
+f 60 45 56
+f 61 55 38
+f 61 38 48
+f 61 48 55
+f 62 58 28
+f 62 52 41
+f 62 28 59
+f 63 59 18
+f 63 62 59
+f 63 52 62
+f 63 18 6
+f 63 6 52
+f 64 54 45
+f 64 45 60
+f 64 58 62
+f 64 62 41
+f 64 41 11
+f 64 11 54
+f 64 60 49
+f 64 49 58
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl061.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl061.obj
new file mode 100644
index 0000000000000000000000000000000000000000..6a7f3277f2b139eb7a5ef2b1f40aa0812560a7cd
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl061.obj
@@ -0,0 +1,140 @@
+v 0.056515550 0.008239335 0.048841554
+v 0.056123249 -0.010198842 0.046487744
+v 0.052984835 0.002354810 0.030795679
+v 0.049972925 0.008270830 0.029978823
+v 0.053377137 -0.010198842 0.046487744
+v 0.052592534 0.008239335 0.030795679
+v 0.056907852 0.004708620 0.046880046
+v 0.056907852 0.004708620 0.048841554
+v 0.054536067 0.008268615 0.047962998
+v 0.049910538 -0.010213605 0.031171690
+v 0.052592534 0.007454731 0.030011075
+v 0.056515550 -0.006275826 0.045310839
+v 0.056515550 0.008239335 0.046487744
+v 0.056907852 0.007454731 0.047664649
+v 0.051023327 0.008239335 0.035895600
+v 0.054554042 0.004708620 0.048841554
+v 0.052592534 -0.009021937 0.031187980
+v 0.051889851 0.008168419 0.030090412
+v 0.049937360 -0.008249744 0.029988283
+v 0.054946344 0.002747112 0.038641711
+v 0.056515550 -0.007452731 0.046095442
+v 0.054764047 0.008280808 0.048580524
+v 0.055338645 0.008239335 0.041387823
+v 0.056907852 0.007062430 0.048449252
+v 0.049846422 -0.009806541 0.032757187
+v 0.053377137 -0.009414239 0.046487744
+v 0.052592534 -0.010198842 0.031972583
+v 0.052984835 -0.003922016 0.030795679
+v 0.052592534 -0.005883524 0.030011075
+v 0.051889977 -0.010148983 0.031265378
+v 0.054161740 0.002747112 0.035503298
+v 0.056515550 -0.007060429 0.045703141
+v 0.056515550 0.007847033 0.046095442
+v 0.055730947 0.008239335 0.042957029
+v 0.056907852 0.007454731 0.048056951
+v 0.056907852 0.005885525 0.048841554
+v 0.049846422 -0.010198842 0.032757187
+v 0.049846422 0.008239335 0.031187980
+v 0.051023327 -0.009806541 0.037464806
+v 0.053377137 -0.003529714 0.032364885
+v 0.055730947 -0.006275826 0.042172426
+v 0.051023327 -0.008237334 0.030011075
+v 0.056123249 -0.010198842 0.046095442
+v 0.056907852 0.006277826 0.047272347
+v 0.051023327 -0.010198842 0.037464806
+v 0.054554042 -0.007845033 0.038249410
+v 0.055338645 -0.007845033 0.041387823
+v 0.056123249 -0.009021937 0.045310839
+f 8 5 2
+f 10 2 5
+f 14 8 7
+f 14 13 1
+f 16 8 1
+f 16 5 8
+f 16 9 15
+f 18 11 4
+f 18 4 6
+f 18 6 11
+f 20 11 7
+f 20 7 12
+f 21 8 2
+f 21 7 8
+f 22 1 13
+f 22 16 1
+f 22 9 16
+f 22 15 9
+f 22 4 15
+f 22 6 4
+f 23 11 6
+f 23 6 22
+f 24 8 14
+f 26 16 15
+f 26 5 16
+f 28 11 3
+f 29 19 4
+f 29 4 11
+f 29 11 28
+f 30 27 10
+f 30 17 27
+f 31 20 3
+f 31 3 11
+f 31 11 20
+f 32 12 7
+f 32 7 21
+f 33 13 14
+f 34 7 11
+f 34 11 23
+f 34 13 33
+f 34 23 22
+f 34 22 13
+f 35 24 14
+f 35 14 1
+f 35 1 24
+f 36 24 1
+f 36 1 8
+f 36 8 24
+f 37 19 10
+f 38 25 15
+f 38 15 4
+f 38 4 19
+f 38 37 25
+f 38 19 37
+f 39 5 26
+f 39 25 37
+f 39 26 15
+f 39 15 25
+f 40 20 12
+f 40 12 28
+f 40 28 3
+f 40 3 20
+f 41 29 28
+f 41 28 12
+f 42 30 10
+f 42 10 19
+f 42 19 29
+f 42 29 17
+f 42 17 30
+f 43 32 21
+f 43 21 2
+f 43 2 10
+f 43 10 27
+f 44 33 14
+f 44 14 7
+f 44 34 33
+f 44 7 34
+f 45 39 37
+f 45 5 39
+f 45 37 10
+f 45 10 5
+f 46 17 29
+f 46 29 41
+f 46 41 12
+f 47 43 27
+f 47 27 17
+f 47 17 46
+f 47 46 12
+f 47 12 32
+f 48 47 32
+f 48 32 43
+f 48 43 47
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl062.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl062.obj
new file mode 100644
index 0000000000000000000000000000000000000000..daeabb7e49ffc6122ed610bc5a8ca644fd8e9bba
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl062.obj
@@ -0,0 +1,77 @@
+v -0.021944245 0.033344509 0.003266905
+v -0.021944603 0.030600456 0.003328293
+v -0.021945536 0.033347973 0.001016509
+v -0.029790810 0.030992830 -0.000196151
+v -0.032144620 0.032169734 0.004511469
+v -0.024304176 0.025902729 0.001244105
+v -0.030967715 0.033346639 0.004511469
+v -0.021947705 0.030605673 0.001118731
+v -0.032144620 0.025892908 0.000588452
+v -0.024298097 0.025892706 0.003316978
+v -0.032168407 0.033372911 0.004426338
+v -0.025083190 0.032954338 -0.000196151
+v -0.032146028 0.027462664 0.003291071
+v -0.031752318 0.032169734 0.004511469
+v -0.030967715 0.032954338 0.004511469
+v -0.032144620 0.033346639 0.001765357
+v -0.031360016 0.027069813 -0.000196151
+v -0.027826076 0.025891574 0.003218735
+v -0.032146081 0.025894054 0.001020864
+v -0.032144620 0.027854416 -0.000196151
+v -0.026652397 0.033346639 -0.000196151
+v -0.031752318 0.025892908 0.000588452
+v -0.032144620 0.027069813 -0.000196151
+v -0.031360016 0.029423623 -0.000196151
+v -0.027829302 0.032562036 -0.000196151
+v -0.025083190 0.033346639 -0.000196151
+v -0.032144620 0.029031321 0.000196151
+f 1 2 3
+f 7 2 1
+f 8 3 2
+f 10 8 2
+f 10 6 8
+f 11 7 1
+f 11 1 3
+f 11 5 7
+f 12 8 6
+f 12 3 8
+f 13 5 11
+f 14 7 5
+f 14 5 10
+f 15 10 2
+f 15 2 7
+f 15 14 10
+f 15 7 14
+f 17 4 12
+f 17 12 6
+f 18 10 5
+f 18 5 13
+f 19 13 11
+f 19 18 13
+f 19 9 18
+f 20 4 17
+f 21 12 4
+f 21 16 11
+f 22 17 6
+f 22 9 17
+f 22 6 10
+f 22 18 9
+f 22 10 18
+f 23 20 17
+f 23 17 9
+f 23 11 20
+f 23 19 11
+f 23 9 19
+f 24 16 4
+f 24 4 20
+f 25 21 4
+f 25 4 16
+f 25 16 21
+f 26 3 12
+f 26 12 21
+f 26 21 11
+f 26 11 3
+f 27 24 20
+f 27 16 24
+f 27 20 11
+f 27 11 16
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl063.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl063.obj
new file mode 100644
index 0000000000000000000000000000000000000000..147a9747d85151625176d5cc9ae6c9ca21038adc
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_bowl/kalas_bowl063.obj
@@ -0,0 +1,125 @@
+v 0.056907852 0.004708620 0.048841554
+v 0.056123249 -0.012160351 0.048056951
+v 0.056907852 -0.006275826 0.046880046
+v 0.054652277 0.001970193 0.046463088
+v 0.052200232 -0.017260272 0.048841554
+v 0.056413760 0.001962645 0.046513066
+v 0.056907852 -0.006275826 0.048449252
+v 0.055079112 0.004717176 0.048635102
+v 0.054554042 -0.017260272 0.046487744
+v 0.056726691 0.004696520 0.048103121
+v 0.056907852 -0.007452731 0.047664649
+v 0.054554042 -0.017652573 0.048841554
+v 0.051807931 -0.016867970 0.047272347
+v 0.055730947 -0.012944954 0.046487744
+v 0.054896400 0.004739325 0.048298727
+v 0.054819753 0.004729404 0.047990259
+v 0.056907852 -0.007452731 0.048056951
+v 0.055730947 -0.012552652 0.048841554
+v 0.054946344 -0.016867970 0.047664649
+v 0.051929461 -0.017695777 0.046455379
+v 0.056123249 -0.012160351 0.047664649
+v 0.056515550 -0.009021937 0.046880046
+v 0.056515550 -0.008237334 0.046487744
+v 0.051807931 -0.017260272 0.047664649
+v 0.055730947 -0.013729557 0.048449252
+v 0.056907852 -0.002745111 0.048841554
+v 0.056515550 -0.009021937 0.048449252
+v 0.054554042 -0.017652573 0.046880046
+v 0.052200232 -0.017652573 0.048841554
+v 0.052162420 -0.016993988 0.046393339
+v 0.055730947 -0.014121859 0.047664649
+v 0.056907852 -0.007060429 0.047272347
+v 0.056123249 -0.011375747 0.046880046
+v 0.051807931 -0.017652573 0.047664649
+v 0.055338645 -0.014514160 0.048841554
+v 0.056123249 -0.010198842 0.048841554
+v 0.055338645 -0.014514160 0.046487744
+v 0.053917416 -0.017582631 0.046550181
+v 0.055338645 -0.015298764 0.047272347
+v 0.055730947 -0.014121859 0.048056951
+v 0.056515550 -0.006668128 0.048841554
+v 0.055338645 -0.015298764 0.048449252
+v 0.054946344 -0.016867970 0.048056951
+f 7 3 1
+f 8 5 1
+f 10 1 3
+f 10 3 6
+f 11 3 7
+f 12 1 5
+f 15 8 1
+f 15 1 10
+f 15 5 8
+f 16 10 6
+f 16 6 4
+f 16 15 10
+f 16 4 13
+f 17 11 7
+f 17 2 11
+f 18 1 12
+f 20 13 4
+f 21 11 2
+f 22 11 21
+f 23 6 3
+f 23 22 14
+f 23 3 22
+f 23 4 6
+f 24 5 15
+f 24 16 13
+f 24 15 16
+f 25 2 17
+f 26 7 1
+f 26 1 18
+f 27 17 7
+f 27 25 17
+f 27 18 25
+f 28 19 12
+f 28 12 20
+f 29 20 12
+f 29 12 5
+f 29 5 24
+f 30 9 20
+f 30 23 14
+f 30 20 4
+f 30 4 23
+f 31 21 2
+f 32 22 3
+f 32 3 11
+f 32 11 22
+f 33 22 21
+f 33 14 22
+f 33 31 14
+f 33 21 31
+f 34 24 13
+f 34 13 20
+f 34 29 24
+f 34 20 29
+f 35 25 18
+f 35 18 12
+f 36 26 18
+f 36 27 7
+f 36 18 27
+f 37 28 9
+f 37 14 31
+f 37 30 14
+f 37 9 30
+f 38 28 20
+f 38 20 9
+f 38 9 28
+f 39 31 19
+f 39 19 28
+f 39 37 31
+f 39 28 37
+f 40 31 2
+f 40 2 25
+f 40 19 31
+f 41 36 7
+f 41 7 26
+f 41 26 36
+f 42 40 25
+f 42 35 12
+f 42 25 35
+f 43 42 12
+f 43 12 19
+f 43 19 40
+f 43 40 42
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate001.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate001.obj
new file mode 100644
index 0000000000000000000000000000000000000000..7e2ffe17fd2e59977a51ebed8e7a03f11f624e56
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate001.obj
@@ -0,0 +1,68 @@
+v 0.077639328 0.022303347 -0.007459782
+v 0.079999591 -0.020678810 -0.006154603
+v 0.079298214 -0.009800342 -0.012624986
+v 0.061481082 0.025840153 -0.010749565
+v 0.061469910 0.025829271 -0.008758581
+v 0.061479782 -0.020493401 -0.010668364
+v 0.075734272 0.022275138 -0.012624986
+v 0.079375044 0.015164862 -0.007391871
+v 0.077516243 -0.016928226 -0.005497101
+v 0.066824651 0.025839948 -0.008772730
+v 0.075734272 -0.020492169 -0.012624986
+v 0.080486041 -0.017244315 -0.006610622
+v 0.079118740 0.015193910 -0.009663317
+v 0.061466429 -0.020503910 -0.008657347
+v 0.066824422 0.025839101 -0.010841324
+v 0.079298214 -0.020492169 -0.010843015
+v 0.072754617 -0.020672359 -0.011666361
+v 0.063260474 -0.006236400 -0.012624986
+v 0.077133966 0.022120105 -0.009740993
+v 0.079298214 0.006237398 -0.012624986
+v 0.077516243 -0.020492169 -0.005497101
+v 0.070388359 0.022275138 -0.012624986
+v 0.077516243 -0.016928226 -0.012624986
+v 0.077516243 0.016929225 -0.012624986
+f 9 1 5
+f 9 8 1
+f 9 2 8
+f 10 5 1
+f 10 4 5
+f 11 7 3
+f 12 8 2
+f 13 1 8
+f 14 9 5
+f 14 5 4
+f 14 4 6
+f 15 4 10
+f 15 10 1
+f 15 1 7
+f 16 12 2
+f 16 3 12
+f 17 6 11
+f 17 14 6
+f 17 2 14
+f 17 16 2
+f 17 11 16
+f 18 11 6
+f 18 6 4
+f 18 7 11
+f 19 7 1
+f 19 1 13
+f 20 3 7
+f 20 12 3
+f 20 13 8
+f 20 8 12
+f 21 14 2
+f 21 2 9
+f 21 9 14
+f 22 15 7
+f 22 4 15
+f 22 18 4
+f 22 7 18
+f 23 16 11
+f 23 11 3
+f 23 3 16
+f 24 19 13
+f 24 7 19
+f 24 20 7
+f 24 13 20
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate002.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate002.obj
new file mode 100644
index 0000000000000000000000000000000000000000..bfa73af4674bc29c0a96bfbd07b40ff0aba2a0a4
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate002.obj
@@ -0,0 +1,92 @@
+v -0.073951302 0.034748936 -0.003715130
+v -0.070368859 0.024057109 -0.008726266
+v -0.069266974 0.032467866 -0.011409513
+v -0.072169331 0.032966965 -0.012624986
+v -0.086343782 0.008009779 -0.003682003
+v -0.079049841 0.008009715 -0.010027081
+v -0.070243191 0.034665138 -0.010445506
+v -0.070684092 0.034883024 -0.008331345
+v -0.081079187 0.008019369 -0.003715130
+v -0.082861158 0.031184994 -0.005497101
+v -0.070387360 0.024057109 -0.012624986
+v -0.075733274 0.034748936 -0.010843015
+v -0.073951302 0.032966965 -0.003715130
+v -0.079297216 0.008019369 -0.005497101
+v -0.085187536 0.007847060 -0.006142944
+v -0.081452334 0.030728365 -0.003101774
+v -0.082861158 0.008019369 -0.010843015
+v -0.080114646 0.034355113 -0.003320851
+v -0.086425100 0.013365283 -0.007279073
+v -0.085019938 0.020161844 -0.002946726
+v -0.073951302 0.031184994 -0.012624986
+v -0.081079187 0.032966965 -0.007279073
+v -0.082861158 0.024057109 -0.009061044
+v -0.086425100 0.020493167 -0.005497101
+v -0.084643129 0.016929225 -0.009061044
+v -0.077515245 0.031184994 -0.010843015
+v -0.084643129 0.024057109 -0.007279073
+v -0.081079187 0.018711196 -0.010843015
+v -0.081079187 0.029403023 -0.009061044
+v -0.082861158 0.029403023 -0.007279073
+v -0.082861158 0.011583311 -0.010843015
+v -0.079297216 0.032966965 -0.009061044
+f 7 3 4
+f 8 1 2
+f 8 2 3
+f 8 3 7
+f 11 3 2
+f 11 2 6
+f 11 4 3
+f 12 8 7
+f 12 1 8
+f 12 7 4
+f 13 9 2
+f 13 2 1
+f 14 6 2
+f 14 2 9
+f 15 9 5
+f 15 14 9
+f 15 6 14
+f 16 13 1
+f 17 11 6
+f 17 6 15
+f 18 1 12
+f 18 16 1
+f 18 10 16
+f 19 15 5
+f 19 17 15
+f 20 16 10
+f 20 5 9
+f 20 9 13
+f 20 13 16
+f 21 4 11
+f 21 11 17
+f 21 12 4
+f 22 18 12
+f 22 10 18
+f 24 19 5
+f 24 5 20
+f 24 20 10
+f 25 17 19
+f 26 12 21
+f 27 24 10
+f 27 23 25
+f 27 25 19
+f 27 19 24
+f 28 25 23
+f 28 26 21
+f 28 23 26
+f 29 26 23
+f 29 12 26
+f 29 23 27
+f 30 27 10
+f 30 10 22
+f 30 29 27
+f 30 22 29
+f 31 28 21
+f 31 21 17
+f 31 17 25
+f 31 25 28
+f 32 29 22
+f 32 22 12
+f 32 12 29
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate007.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate007.obj
new file mode 100644
index 0000000000000000000000000000000000000000..e9cf2ab05def1cee221b4378df35f92e359578e6
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate007.obj
@@ -0,0 +1,149 @@
+v 0.080218208 -0.038750572 0.006591041
+v 0.084644127 -0.032965967 -0.000151188
+v 0.077516243 -0.031183995 -0.000151188
+v 0.071984951 -0.031125162 -0.010453584
+v 0.065042445 -0.063259476 -0.001933159
+v 0.081080185 -0.031183995 -0.007279073
+v 0.083634483 -0.030823454 0.002059436
+v 0.079298214 -0.045439764 0.001630783
+v 0.063260474 -0.057913562 0.003412754
+v 0.068606387 -0.049003707 -0.010843015
+v 0.077516243 -0.038311880 0.006976696
+v 0.084644127 -0.031183995 -0.000151188
+v 0.078283492 -0.030700350 -0.007727000
+v 0.082862156 -0.038311880 0.001630783
+v 0.065042445 -0.065041447 0.003412754
+v 0.075734272 -0.045439764 -0.007279073
+v 0.063260474 -0.047221736 -0.005497101
+v 0.075734272 -0.034747938 -0.010843015
+v 0.075734272 -0.031183995 -0.010843015
+v 0.079612607 -0.040243767 0.006844905
+v 0.068606387 -0.061477505 0.001630783
+v 0.062168069 -0.063836710 0.002281109
+v 0.062580743 -0.064291819 0.003817408
+v 0.082862156 -0.034747938 -0.003715130
+v 0.065042445 -0.059695533 -0.007279073
+v 0.072170330 -0.032965967 -0.005497101
+v 0.063192806 -0.045396985 -0.010599368
+v 0.072170330 -0.043657793 -0.010843015
+v 0.079298214 -0.034747938 -0.009061044
+v 0.065042445 -0.065041447 0.001630783
+v 0.068606387 -0.061477505 0.003412754
+v 0.077516243 -0.047221736 -0.001933159
+v 0.077516243 -0.040093851 0.006976696
+v 0.082862156 -0.038311880 -0.000151188
+v 0.070388359 -0.050785678 -0.009061044
+v 0.063260474 -0.063259476 -0.001933159
+v 0.066824416 -0.061477505 -0.001933159
+v 0.072416692 -0.031290982 -0.008208322
+v 0.063272033 -0.045467626 -0.008473695
+v 0.063260474 -0.052567649 -0.010843015
+v 0.068558686 -0.036499752 -0.010671259
+v 0.073952301 -0.040093851 -0.010843015
+v 0.073952301 -0.045439764 -0.009061044
+v 0.079298214 -0.038311880 -0.007279073
+v 0.081080185 -0.032965967 -0.007279073
+v 0.075734272 -0.050785678 -0.000151188
+v 0.072170330 -0.050785678 -0.007279073
+v 0.079298214 -0.045439764 -0.000151188
+v 0.065042445 -0.052567649 -0.010843015
+v 0.063260474 -0.059695533 -0.007279073
+v 0.075734272 -0.047221736 -0.005497101
+f 7 1 2
+f 11 7 3
+f 11 1 7
+f 12 7 2
+f 13 4 3
+f 13 3 7
+f 13 12 6
+f 13 7 12
+f 14 2 1
+f 17 9 11
+f 19 13 6
+f 19 4 13
+f 19 18 10
+f 20 14 1
+f 20 8 14
+f 20 1 11
+f 22 9 17
+f 23 11 9
+f 23 9 22
+f 23 15 20
+f 24 12 2
+f 24 6 12
+f 26 17 11
+f 26 11 3
+f 28 10 18
+f 29 19 6
+f 29 18 19
+f 30 21 15
+f 30 5 21
+f 30 23 22
+f 30 15 23
+f 31 20 15
+f 31 15 21
+f 31 8 20
+f 33 23 20
+f 33 20 11
+f 33 11 23
+f 34 24 2
+f 34 2 14
+f 34 14 8
+f 35 10 28
+f 36 25 5
+f 36 30 22
+f 36 5 30
+f 37 21 5
+f 37 5 25
+f 38 26 3
+f 38 3 4
+f 39 38 4
+f 39 17 26
+f 39 26 38
+f 39 27 22
+f 39 22 17
+f 40 22 27
+f 40 19 10
+f 41 4 19
+f 41 40 27
+f 41 19 40
+f 41 39 4
+f 41 27 39
+f 42 29 28
+f 42 28 18
+f 42 18 29
+f 43 29 16
+f 43 28 29
+f 43 35 28
+f 43 16 35
+f 44 24 16
+f 44 16 29
+f 45 29 6
+f 45 6 24
+f 45 44 29
+f 45 24 44
+f 46 21 37
+f 46 31 21
+f 46 8 31
+f 47 35 16
+f 47 25 35
+f 47 37 25
+f 48 34 8
+f 48 32 34
+f 48 46 32
+f 48 8 46
+f 49 40 10
+f 49 25 40
+f 49 35 25
+f 49 10 35
+f 50 40 25
+f 50 25 36
+f 50 36 22
+f 50 22 40
+f 51 47 16
+f 51 32 46
+f 51 46 37
+f 51 37 47
+f 51 34 32
+f 51 16 24
+f 51 24 34
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate009.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate009.obj
new file mode 100644
index 0000000000000000000000000000000000000000..5f249b5ff91f296621408421efb20cb81fa007d2
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate009.obj
@@ -0,0 +1,59 @@
+v -0.018712047 -0.063247989 -0.008576945
+v -0.018712114 -0.068593484 -0.008624253
+v -0.020492169 -0.066823418 -0.012624986
+v -0.056131591 -0.056131591 -0.012624986
+v -0.056368235 -0.061721663 -0.007808080
+v -0.043659329 -0.052568827 -0.010725802
+v -0.018710485 -0.068606089 -0.010789586
+v -0.043673035 -0.068633736 -0.007323264
+v -0.018710863 -0.063261093 -0.010719566
+v -0.050785678 -0.052567649 -0.012624986
+v -0.043644010 -0.052564962 -0.008477230
+v -0.038485047 -0.070709419 -0.007781152
+v -0.056163718 -0.052598492 -0.008563006
+v -0.055768540 -0.052123614 -0.011481848
+v -0.029443927 -0.070501925 -0.008463052
+v -0.036529909 -0.070387360 -0.012624986
+v -0.020492169 -0.068605389 -0.012624986
+v -0.056131591 -0.061477505 -0.010843015
+v -0.029364947 -0.070299752 -0.011601704
+v -0.043657793 -0.066823418 -0.012624986
+v -0.050785678 -0.061477505 -0.012624986
+f 7 1 2
+f 8 2 1
+f 9 6 1
+f 9 7 3
+f 9 1 7
+f 10 3 4
+f 10 9 3
+f 10 6 9
+f 11 1 6
+f 11 8 1
+f 11 5 8
+f 12 2 8
+f 13 4 5
+f 13 5 11
+f 14 11 6
+f 14 6 10
+f 14 13 11
+f 14 10 4
+f 14 4 13
+f 15 7 2
+f 15 2 12
+f 16 4 3
+f 16 15 12
+f 16 12 8
+f 17 16 3
+f 17 3 7
+f 18 5 4
+f 18 16 8
+f 18 8 5
+f 19 17 7
+f 19 7 15
+f 19 15 16
+f 19 16 17
+f 20 4 16
+f 20 16 18
+f 21 20 18
+f 21 18 4
+f 21 4 20
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate014.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate014.obj
new file mode 100644
index 0000000000000000000000000000000000000000..c1d213ee5e57c0683e16ef04b4aa8e330a829e1c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate014.obj
@@ -0,0 +1,65 @@
+v -0.029412593 0.066840170 -0.008632947
+v -0.029413078 0.061494980 -0.008695139
+v -0.031183995 0.066824416 -0.012624986
+v -0.056131591 0.056132590 -0.012624986
+v -0.061749635 0.056404720 -0.007815727
+v -0.034749387 0.049007890 -0.010565884
+v -0.029402674 0.066826025 -0.010788383
+v -0.050566592 0.064810321 -0.006852799
+v -0.054349620 0.049004705 -0.012624986
+v -0.034761803 0.068635089 -0.008553464
+v -0.029402870 0.061480362 -0.010681259
+v -0.043640484 0.068579455 -0.007214217
+v -0.050785678 0.065042445 -0.010843015
+v -0.034737695 0.048989437 -0.008710760
+v -0.031183995 0.065042445 -0.012624986
+v -0.059695533 0.052568647 -0.012624986
+v -0.034421658 0.068037985 -0.011690692
+v -0.040093851 0.068606387 -0.012624986
+v -0.061616661 0.049128802 -0.008124227
+v -0.059102929 0.048528232 -0.011125313
+v -0.061477505 0.056132590 -0.010843015
+v -0.045439764 0.065042445 -0.012624986
+v -0.061352721 0.048903531 -0.010319023
+f 9 4 3
+f 10 1 7
+f 11 7 1
+f 11 1 2
+f 12 2 1
+f 12 1 10
+f 13 5 8
+f 13 8 12
+f 14 12 8
+f 14 2 12
+f 14 11 2
+f 14 6 11
+f 15 9 3
+f 15 6 9
+f 15 11 6
+f 15 3 7
+f 15 7 11
+f 16 4 9
+f 17 10 7
+f 17 7 3
+f 18 3 4
+f 18 17 3
+f 18 13 12
+f 18 12 10
+f 18 10 17
+f 19 14 8
+f 19 8 5
+f 20 9 6
+f 20 6 14
+f 20 14 19
+f 20 16 9
+f 21 13 4
+f 21 4 16
+f 21 5 13
+f 21 19 5
+f 22 18 4
+f 22 4 13
+f 22 13 18
+f 23 20 19
+f 23 16 20
+f 23 21 16
+f 23 19 21
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate015.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate015.obj
new file mode 100644
index 0000000000000000000000000000000000000000..f40697326cd45f373c53a9c04ac1958bb9db8af0
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate015.obj
@@ -0,0 +1,101 @@
+v 0.061478503 0.072170330 0.012322610
+v 0.077257064 0.048846956 0.011432339
+v 0.072170330 0.057914561 0.003412754
+v 0.052568647 0.068606387 -0.007279073
+v 0.052568647 0.070388359 0.010540639
+v 0.070461177 0.050848928 0.001593262
+v 0.056132590 0.070388359 -0.003715130
+v 0.073952301 0.059696532 0.012322610
+v 0.052568647 0.072170330 0.012322610
+v 0.077564900 0.049035992 0.008736413
+v 0.068606387 0.059696532 -0.000151188
+v 0.052568647 0.066824416 0.003412754
+v 0.063260474 0.070388359 0.008758667
+v 0.073952301 0.049004705 0.012322610
+v 0.054350618 0.077516243 0.008758667
+v 0.051814286 0.069462971 -0.004399721
+v 0.077516243 0.054350618 0.012322610
+v 0.054350618 0.068606387 -0.007279073
+v 0.052199415 0.066371472 -0.006741944
+v 0.068606387 0.061478503 0.001630783
+v 0.070388359 0.052568647 0.012322610
+v 0.068606387 0.050786676 0.005194725
+v 0.054350618 0.077516243 0.012322610
+v 0.061478503 0.070388359 0.005194725
+v 0.061478503 0.072170330 0.010540639
+v 0.051988257 0.076518662 0.011354000
+v 0.052568647 0.077516243 0.008758667
+v 0.077516243 0.054350618 0.008758667
+v 0.057914561 0.068606387 -0.003715130
+v 0.054350618 0.070388359 -0.005497101
+v 0.069571820 0.051612018 0.001133320
+v 0.066824416 0.052568647 0.005194725
+v 0.061478503 0.068606387 0.001630783
+v 0.072170330 0.061478503 0.008758667
+v 0.073952301 0.059696532 0.010540639
+f 9 8 1
+f 11 3 6
+f 13 1 8
+f 14 8 9
+f 14 10 2
+f 14 6 10
+f 16 12 5
+f 17 2 10
+f 17 14 2
+f 17 8 14
+f 18 11 6
+f 19 4 18
+f 19 16 4
+f 19 12 16
+f 20 3 11
+f 21 14 9
+f 21 9 5
+f 22 6 14
+f 22 14 21
+f 23 9 1
+f 23 1 15
+f 24 15 13
+f 24 7 15
+f 24 13 20
+f 25 15 1
+f 25 1 13
+f 25 13 15
+f 26 16 5
+f 26 5 9
+f 26 9 23
+f 27 4 16
+f 27 16 26
+f 27 26 23
+f 27 23 15
+f 28 17 10
+f 28 8 17
+f 28 10 6
+f 28 6 3
+f 29 11 18
+f 29 20 11
+f 29 7 20
+f 30 15 7
+f 30 29 18
+f 30 7 29
+f 30 18 4
+f 30 27 15
+f 30 4 27
+f 31 19 18
+f 31 18 6
+f 31 22 19
+f 31 6 22
+f 32 12 19
+f 32 19 22
+f 32 5 12
+f 32 22 21
+f 32 21 5
+f 33 24 20
+f 33 20 7
+f 33 7 24
+f 34 20 13
+f 34 28 3
+f 34 3 20
+f 35 8 28
+f 35 28 34
+f 35 34 13
+f 35 13 8
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate016.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate016.obj
new file mode 100644
index 0000000000000000000000000000000000000000..7f7c8e5369bd4839ce3e4f4cd9a3dd3d56252080
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate016.obj
@@ -0,0 +1,68 @@
+v 0.060475242 0.065627243 0.002245418
+v 0.062112319 0.060232693 0.001289082
+v 0.061478503 0.056132590 -0.010843015
+v 0.045440763 0.065042445 -0.012624986
+v 0.041877053 0.049005568 -0.008763180
+v 0.054350618 0.066824416 -0.009061044
+v 0.061478503 0.066824416 -0.001933159
+v 0.054350618 0.065042445 0.001630783
+v 0.061639987 0.049185917 -0.008142510
+v 0.054350618 0.049004705 -0.012624986
+v 0.052568647 0.065042445 -0.010843015
+v 0.061478503 0.059696532 -0.009061044
+v 0.059696532 0.059696532 0.001630783
+v 0.042090325 0.065282032 -0.008420656
+v 0.054846502 0.067436052 0.001336263
+v 0.041876821 0.059696532 -0.012624986
+v 0.059696532 0.052568647 -0.012624986
+v 0.059148967 0.048649746 -0.011386601
+v 0.049773062 0.067764854 -0.004687045
+v 0.041878351 0.049005059 -0.010693813
+v 0.041488394 0.064359524 -0.011217226
+v 0.061334902 0.048883144 -0.010179576
+v 0.049004705 0.063260474 -0.012624986
+v 0.052568647 0.066824416 -0.009061044
+f 7 1 2
+f 9 3 2
+f 11 4 6
+f 12 7 2
+f 12 2 3
+f 12 6 7
+f 12 11 6
+f 12 3 11
+f 13 8 5
+f 13 2 1
+f 13 1 8
+f 13 9 2
+f 13 5 9
+f 14 5 8
+f 15 8 1
+f 15 1 7
+f 15 14 8
+f 16 4 10
+f 17 10 4
+f 17 11 3
+f 18 9 5
+f 18 10 17
+f 19 4 14
+f 19 14 15
+f 19 15 7
+f 19 7 6
+f 20 16 10
+f 20 18 5
+f 20 10 18
+f 21 14 4
+f 21 4 16
+f 21 5 14
+f 21 20 5
+f 21 16 20
+f 22 17 3
+f 22 3 9
+f 22 18 17
+f 22 9 18
+f 23 17 4
+f 23 4 11
+f 23 11 17
+f 24 19 6
+f 24 6 4
+f 24 4 19
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate017.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate017.obj
new file mode 100644
index 0000000000000000000000000000000000000000..e193141176e09d5a5e0e92c5ec6eaae36444ee15
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate017.obj
@@ -0,0 +1,92 @@
+v -0.034747938 -0.077515245 0.001630783
+v -0.034441324 -0.082215626 0.001911458
+v -0.034747938 -0.075733274 -0.010843015
+v -0.049003707 -0.061477505 -0.012624986
+v -0.049730119 -0.071481842 0.001136846
+v -0.043659164 -0.061468661 -0.008527992
+v -0.034629411 -0.070127781 -0.010341434
+v -0.046548135 -0.076219877 0.002201809
+v -0.047221736 -0.070387360 0.001630783
+v -0.034783215 -0.070483810 -0.008275840
+v -0.047221736 -0.065041447 -0.012624986
+v -0.044127634 -0.062033859 -0.011632239
+v -0.035902552 -0.081611544 0.002191825
+v -0.049003707 -0.075733274 -0.003715130
+v -0.043657793 -0.072169331 0.001630783
+v -0.048478618 -0.074995230 0.001982350
+v -0.049249048 -0.061694507 -0.008313874
+v -0.036529909 -0.081079187 -0.005497101
+v -0.047221736 -0.077515245 -0.001933159
+v -0.041875822 -0.075733274 -0.009061044
+v -0.049003707 -0.068605389 -0.010843015
+v -0.049003707 -0.063259476 -0.012624986
+v -0.034747938 -0.081079187 -0.005497101
+v -0.036529909 -0.082861158 -0.001933159
+v -0.041875822 -0.077515245 -0.007279073
+v -0.038311880 -0.077515245 -0.009061044
+v -0.047221736 -0.075733274 -0.005497101
+v -0.040093851 -0.073951302 -0.010843015
+v -0.034747938 -0.082861158 -0.001933159
+v -0.038311880 -0.079297216 -0.007279073
+v -0.047221736 -0.072169331 -0.009061044
+v -0.036529909 -0.075733274 -0.010843015
+f 7 1 2
+f 7 2 3
+f 10 7 6
+f 10 6 1
+f 10 1 7
+f 11 7 3
+f 12 4 6
+f 12 6 7
+f 12 11 4
+f 12 7 11
+f 13 2 1
+f 15 1 6
+f 15 6 9
+f 15 13 1
+f 15 9 8
+f 15 8 13
+f 16 9 5
+f 16 8 9
+f 16 5 14
+f 17 9 6
+f 17 6 4
+f 17 5 9
+f 19 13 8
+f 19 16 14
+f 19 8 16
+f 19 14 18
+f 21 14 5
+f 22 17 4
+f 22 5 17
+f 22 21 5
+f 22 4 11
+f 22 11 21
+f 23 18 3
+f 23 3 2
+f 24 19 18
+f 24 2 13
+f 24 13 19
+f 24 18 23
+f 27 14 21
+f 27 20 25
+f 27 18 14
+f 28 21 11
+f 28 20 21
+f 29 24 23
+f 29 23 2
+f 29 2 24
+f 30 25 20
+f 30 20 26
+f 30 27 25
+f 30 18 27
+f 31 27 21
+f 31 21 20
+f 31 20 27
+f 32 28 11
+f 32 11 3
+f 32 26 20
+f 32 20 28
+f 32 30 26
+f 32 3 18
+f 32 18 30
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate018.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate018.obj
new file mode 100644
index 0000000000000000000000000000000000000000..c2b1b7141c0161692b661ea64052b5c39aac0c21
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate018.obj
@@ -0,0 +1,98 @@
+v 0.009755006 0.091308633 0.008939179
+v 0.015147254 0.082862156 -0.000151188
+v 0.015147254 0.084644127 -0.009061044
+v -0.015146255 0.084644127 -0.009061044
+v -0.018881304 0.087688509 0.008283050
+v 0.015212447 0.081473720 -0.005845704
+v 0.009801340 0.091772012 0.003412754
+v 0.015147254 0.088208070 -0.001933159
+v 0.009801340 0.086426099 0.006976696
+v 0.014929685 0.082007558 -0.007864137
+v -0.018710198 0.082862156 0.001630783
+v 0.009801340 0.088208070 -0.005497101
+v -0.018710198 0.091772012 0.006976696
+v 0.015053497 0.087569399 0.001875977
+v 0.015356752 0.082344803 -0.004835360
+v -0.009800342 0.086426099 0.008758667
+v -0.015057776 0.082303306 -0.007824002
+v 0.008019369 0.081080185 -0.003715130
+v 0.013365283 0.088208070 -0.003715130
+v -0.018490481 0.090165637 0.009366263
+v -0.018710198 0.084644127 -0.007279073
+v -0.006236400 0.088208070 -0.005497101
+v 0.015306864 0.085458209 0.001314890
+v 0.009841181 0.089094280 0.008426370
+v -0.016928226 0.082862156 0.001630783
+v -0.018643513 0.082513085 -0.006989365
+v 0.013365283 0.086426099 -0.007279073
+v -0.018710198 0.086426099 -0.005497101
+v -0.006236400 0.091772012 0.003412754
+v -0.018710198 0.088208070 -0.001933159
+v -0.015146255 0.086426099 -0.007279073
+v -0.015146255 0.088208070 -0.003715130
+v -0.011582313 0.089990041 -0.000151188
+v -0.013364284 0.091772012 0.005194725
+f 8 7 1
+f 10 4 3
+f 10 3 6
+f 13 1 7
+f 14 8 1
+f 15 6 3
+f 15 3 8
+f 15 2 6
+f 16 5 11
+f 16 2 9
+f 17 10 6
+f 17 4 10
+f 18 6 2
+f 18 17 6
+f 19 12 7
+f 19 7 8
+f 20 13 5
+f 20 1 13
+f 20 16 1
+f 20 5 16
+f 21 11 5
+f 22 7 12
+f 23 9 2
+f 23 2 15
+f 23 15 8
+f 23 8 14
+f 23 14 1
+f 24 16 9
+f 24 1 16
+f 24 23 1
+f 24 9 23
+f 25 16 11
+f 25 2 16
+f 25 18 2
+f 25 11 18
+f 26 18 11
+f 26 17 18
+f 26 11 21
+f 26 21 4
+f 26 4 17
+f 27 12 19
+f 27 3 4
+f 27 22 12
+f 27 19 8
+f 27 8 3
+f 28 21 5
+f 28 4 21
+f 29 13 7
+f 29 7 22
+f 30 28 5
+f 30 5 13
+f 31 4 28
+f 31 27 4
+f 31 22 27
+f 31 28 30
+f 32 31 30
+f 32 22 31
+f 32 30 13
+f 33 29 22
+f 33 22 32
+f 34 13 29
+f 34 29 33
+f 34 33 32
+f 34 32 13
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate019.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate019.obj
new file mode 100644
index 0000000000000000000000000000000000000000..e409c70ce589eb722c78a21f036504002b1102d7
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate019.obj
@@ -0,0 +1,113 @@
+v -0.013364284 -0.088207071 0.012322610
+v -0.008082693 -0.085364764 -0.002203285
+v -0.024056111 -0.075733274 -0.005497101
+v -0.025838082 -0.079297216 -0.010843015
+v -0.022274140 -0.091771014 0.008758667
+v -0.011498549 -0.078822556 -0.009753003
+v -0.013225434 -0.091076876 0.009026052
+v -0.015146255 -0.086425100 -0.007279073
+v -0.029778037 -0.086131975 0.011640000
+v -0.018618181 -0.077106459 -0.009902227
+v -0.008018371 -0.082861158 -0.001933159
+v -0.013308047 -0.091196519 0.011114744
+v -0.029402024 -0.082861158 -0.007279073
+v -0.013364284 -0.088207071 -0.003715130
+v -0.011582313 -0.084643129 -0.009061044
+v -0.025838082 -0.084643129 0.012322610
+v -0.011582313 -0.081079187 -0.010843015
+v -0.029409709 -0.075757278 -0.007343794
+v -0.011582313 -0.079297216 -0.005497101
+v -0.029402024 -0.089989042 0.012322610
+v -0.016928226 -0.088207071 -0.003715130
+v -0.024056111 -0.082861158 -0.009061044
+v -0.011582313 -0.086425100 -0.007279073
+v -0.018710198 -0.081079187 -0.010843015
+v -0.008140399 -0.084175073 -0.004492566
+v -0.025670583 -0.076940337 -0.009278885
+v -0.024129234 -0.075961693 -0.007894938
+v -0.029402024 -0.081079187 -0.009061044
+v -0.016928226 -0.077515245 -0.005497101
+v -0.022274140 -0.091771014 0.012322610
+v -0.029402024 -0.089989042 0.008758667
+v -0.020492169 -0.086425100 -0.005497101
+v -0.016928226 -0.084643129 -0.009061044
+v -0.008018595 -0.084645544 -0.003716559
+v -0.029365313 -0.077389240 -0.008718227
+v -0.029776560 -0.076540374 -0.006754841
+v -0.018710198 -0.086425100 0.012322610
+v -0.027620053 -0.089989042 0.006976696
+v -0.029402024 -0.086425100 -0.000151188
+f 10 3 6
+f 11 1 2
+f 12 7 2
+f 12 2 1
+f 12 5 7
+f 14 7 5
+f 17 10 6
+f 17 4 10
+f 18 16 3
+f 18 9 16
+f 19 11 6
+f 19 1 11
+f 20 16 9
+f 20 1 16
+f 21 14 5
+f 21 8 14
+f 22 8 13
+f 23 2 7
+f 23 7 14
+f 23 14 8
+f 23 8 15
+f 24 17 15
+f 24 4 17
+f 24 22 4
+f 25 15 17
+f 25 23 15
+f 25 17 6
+f 25 6 11
+f 26 18 10
+f 26 10 4
+f 27 18 3
+f 27 3 10
+f 27 10 18
+f 28 22 13
+f 28 4 22
+f 29 19 6
+f 29 6 3
+f 29 3 16
+f 29 1 19
+f 30 20 5
+f 30 5 12
+f 30 12 1
+f 30 1 20
+f 31 5 20
+f 31 20 9
+f 32 13 8
+f 32 8 21
+f 33 15 8
+f 33 8 22
+f 33 24 15
+f 33 22 24
+f 34 2 23
+f 34 23 25
+f 34 25 11
+f 34 11 2
+f 35 26 4
+f 35 18 26
+f 35 4 28
+f 36 28 13
+f 36 13 9
+f 36 9 18
+f 36 35 28
+f 36 18 35
+f 37 29 16
+f 37 16 1
+f 37 1 29
+f 38 21 5
+f 38 5 31
+f 39 31 9
+f 39 9 13
+f 39 38 31
+f 39 13 32
+f 39 32 21
+f 39 21 38
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate020.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate020.obj
new file mode 100644
index 0000000000000000000000000000000000000000..372e33379c86b9717858c5c7af275c19b4cd1955
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate020.obj
@@ -0,0 +1,110 @@
+v 0.061478503 -0.061477505 0.008758667
+v 0.064226836 -0.062357884 0.008292045
+v 0.063329666 -0.054406834 -0.005579371
+v 0.052568647 -0.065041447 -0.010843015
+v 0.050752973 -0.075678852 0.007002100
+v 0.063260474 -0.066823418 0.001630783
+v 0.047222734 -0.063259476 -0.005497101
+v 0.063260474 -0.056131591 -0.000151188
+v 0.062285809 -0.067526771 0.009337305
+v 0.064042663 -0.054991547 -0.004445178
+v 0.063260474 -0.061477505 -0.005497101
+v 0.050786676 -0.072169331 -0.007279073
+v 0.050468231 -0.062919595 -0.009776160
+v 0.050786676 -0.070387360 0.006976696
+v 0.061478503 -0.054349620 -0.003715130
+v 0.063260474 -0.068605389 0.005194725
+v 0.063260474 -0.063259476 -0.003715130
+v 0.061964675 -0.054837096 -0.006329565
+v 0.056132590 -0.065041447 -0.009061044
+v 0.047276023 -0.070465598 -0.003787732
+v 0.049004705 -0.072169331 -0.007279073
+v 0.050786676 -0.068605389 0.005194725
+v 0.059696532 -0.063259476 0.008758667
+v 0.061478503 -0.056131591 -0.000151188
+v 0.054350618 -0.075733274 0.006976696
+v 0.056132590 -0.073951302 0.003412754
+v 0.056132590 -0.070387360 -0.003715130
+v 0.047340738 -0.063382337 -0.008140055
+v 0.054350618 -0.066823418 -0.009061044
+v 0.047222734 -0.068605389 -0.000151188
+v 0.048154993 -0.072665812 -0.003143788
+v 0.052568647 -0.075733274 0.001630783
+v 0.054350618 -0.075733274 0.005194725
+v 0.047365923 -0.065240142 -0.007645286
+v 0.049121861 -0.074140480 0.001542476
+v 0.050786676 -0.073951302 -0.003715130
+v 0.052568647 -0.072169331 -0.005497101
+v 0.050786676 -0.075733274 0.001630783
+f 8 1 2
+f 9 2 1
+f 10 8 2
+f 11 3 10
+f 14 5 9
+f 15 10 3
+f 15 8 10
+f 16 6 2
+f 16 2 9
+f 17 10 2
+f 17 2 6
+f 17 11 10
+f 18 3 11
+f 18 4 13
+f 18 15 3
+f 19 18 11
+f 19 4 18
+f 21 4 12
+f 22 14 1
+f 22 1 7
+f 23 14 9
+f 23 9 1
+f 23 1 14
+f 24 15 7
+f 24 7 1
+f 24 1 8
+f 24 8 15
+f 25 16 9
+f 25 9 5
+f 27 17 6
+f 27 6 16
+f 27 16 26
+f 28 18 13
+f 28 7 15
+f 28 15 18
+f 28 13 4
+f 28 4 21
+f 28 20 7
+f 29 12 4
+f 29 4 19
+f 29 19 11
+f 30 7 20
+f 30 22 7
+f 30 14 22
+f 30 5 14
+f 31 20 21
+f 32 27 26
+f 33 26 16
+f 33 16 25
+f 33 32 26
+f 33 25 32
+f 34 28 21
+f 34 21 20
+f 34 20 28
+f 35 30 20
+f 35 5 30
+f 35 20 31
+f 36 21 12
+f 36 12 32
+f 37 32 12
+f 37 27 32
+f 37 17 27
+f 37 12 29
+f 37 29 11
+f 37 11 17
+f 38 32 25
+f 38 25 5
+f 38 5 35
+f 38 36 32
+f 38 35 31
+f 38 31 21
+f 38 21 36
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate021.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate021.obj
new file mode 100644
index 0000000000000000000000000000000000000000..51e5ba431fdc18c9b3255a206dc5e96ec9d5c356
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate021.obj
@@ -0,0 +1,116 @@
+v 0.029403023 0.089990041 0.012322610
+v 0.031184994 0.081080185 0.006976696
+v 0.029403023 0.081080185 -0.009061044
+v 0.015147254 0.081080185 -0.010843015
+v 0.009801340 0.086426099 0.006976696
+v 0.025538330 0.076732552 -0.009436485
+v 0.015147254 0.091772012 0.005194725
+v 0.031184994 0.086426099 0.001630783
+v 0.025839080 0.084644127 0.012322610
+v 0.029256624 0.077119421 -0.008395195
+v 0.025839080 0.079298214 -0.010843015
+v 0.015123246 0.077307193 -0.010179599
+v 0.020493167 0.086426099 -0.005497101
+v 0.015087741 0.091117348 0.011539440
+v 0.009801340 0.091772012 0.005194725
+v 0.030974714 0.085758835 0.007244513
+v 0.022275138 0.091772012 0.008758667
+v 0.029841574 0.085841737 0.011624046
+v 0.025839080 0.077516243 -0.003715130
+v 0.031250598 0.081221605 0.001579725
+v 0.020493167 0.081080185 -0.010843015
+v 0.016929225 0.077516243 -0.005497101
+v 0.027621052 0.084644127 -0.005497101
+v 0.018711196 0.086426099 0.012322610
+v 0.009755006 0.091308633 0.008939179
+v 0.009876247 0.086310332 0.001006015
+v 0.029403023 0.089990041 0.010540639
+v 0.022275138 0.091772012 0.012322610
+v 0.022275138 0.082862156 -0.009061044
+v 0.009801340 0.084644127 0.003412754
+v 0.029403023 0.081080185 0.006976696
+v 0.029403023 0.082862156 -0.007279073
+v 0.024057109 0.089990041 0.005194725
+v 0.011583311 0.086426099 0.008758667
+v 0.015385479 0.089290218 0.011353656
+v 0.009543884 0.088681169 0.002202568
+v 0.015184562 0.077654483 -0.008026155
+v 0.022275138 0.084644127 -0.007279073
+v 0.024057109 0.086426099 -0.003715130
+v 0.009841181 0.089094280 0.008426370
+f 11 10 6
+f 11 3 10
+f 12 11 6
+f 12 4 11
+f 15 13 4
+f 15 7 13
+f 16 8 1
+f 16 2 8
+f 17 7 15
+f 18 9 2
+f 18 1 9
+f 18 16 1
+f 18 2 16
+f 19 10 2
+f 19 6 10
+f 20 8 2
+f 20 2 10
+f 20 10 3
+f 20 3 8
+f 21 11 4
+f 21 3 11
+f 21 4 13
+f 22 12 6
+f 22 6 19
+f 24 9 1
+f 24 22 9
+f 26 4 12
+f 27 17 1
+f 27 1 8
+f 28 1 17
+f 28 24 1
+f 28 14 24
+f 28 25 14
+f 28 17 15
+f 28 15 25
+f 29 21 13
+f 29 3 21
+f 30 26 12
+f 31 22 19
+f 31 19 2
+f 31 2 9
+f 31 9 22
+f 32 23 8
+f 32 8 3
+f 32 13 23
+f 32 3 29
+f 33 27 8
+f 33 17 27
+f 33 7 17
+f 34 22 24
+f 34 30 22
+f 34 5 30
+f 35 24 14
+f 35 14 25
+f 35 34 24
+f 35 25 34
+f 36 15 4
+f 36 4 26
+f 36 26 30
+f 36 25 15
+f 36 30 5
+f 36 5 25
+f 37 30 12
+f 37 12 22
+f 37 22 30
+f 38 32 29
+f 38 29 13
+f 38 13 32
+f 39 23 13
+f 39 13 7
+f 39 7 33
+f 39 33 8
+f 39 8 23
+f 40 34 25
+f 40 25 5
+f 40 5 34
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate023.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate023.obj
new file mode 100644
index 0000000000000000000000000000000000000000..c09074f4f6d447298cc6ac5856b08a4d2626b899
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate023.obj
@@ -0,0 +1,89 @@
+v 0.015049543 0.082272334 -0.004974622
+v 0.015184562 0.077654483 -0.008026155
+v 0.009801340 0.082862156 -0.010843015
+v -0.024056111 0.079298214 -0.010843015
+v -0.025838082 0.079298214 -0.000151188
+v 0.000841977 0.076670758 -0.011456539
+v 0.015147254 0.081080185 -0.010843015
+v -0.017073317 0.083932626 -0.002336462
+v -0.002672457 0.079298214 -0.012624986
+v -0.037776783 0.076362658 -0.006155696
+v 0.015348350 0.080393396 -0.006881245
+v 0.014929685 0.082007558 -0.007864137
+v 0.015123246 0.077307193 -0.010179599
+v -0.026124293 0.082065402 -0.000554750
+v -0.006236400 0.082862156 -0.010843015
+v -0.002623207 0.076677679 -0.011387859
+v 0.000891485 0.079298214 -0.012624986
+v -0.025838082 0.077516243 -0.003715130
+v -0.040093851 0.079298214 -0.003715130
+v -0.018710198 0.081080185 -0.010843015
+v -0.023857451 0.076865556 -0.009346742
+v -0.039606102 0.076652533 -0.004772780
+v -0.029444423 0.081217385 -0.000205045
+v -0.038311880 0.079298214 -0.007279073
+v -0.036529909 0.081080185 -0.003715130
+v -0.032635089 0.076730407 -0.007793724
+v -0.039910322 0.077191249 -0.003565477
+v -0.040093851 0.079298214 -0.005497101
+v -0.029402024 0.081080185 -0.009061044
+v -0.032965967 0.079298214 -0.009061044
+v -0.036529909 0.081080185 -0.005497101
+f 5 2 1
+f 8 1 3
+f 10 6 2
+f 11 7 1
+f 11 1 2
+f 12 7 3
+f 12 3 1
+f 12 1 7
+f 13 2 6
+f 13 11 2
+f 13 7 11
+f 14 5 1
+f 14 1 8
+f 15 8 3
+f 15 3 9
+f 16 9 6
+f 16 4 9
+f 16 6 10
+f 17 9 3
+f 17 3 7
+f 17 7 13
+f 17 13 6
+f 17 6 9
+f 18 2 5
+f 20 15 9
+f 20 9 4
+f 21 16 10
+f 21 4 16
+f 22 10 2
+f 22 2 18
+f 23 5 14
+f 24 10 22
+f 25 14 8
+f 25 23 14
+f 25 19 23
+f 26 21 10
+f 26 4 21
+f 27 22 18
+f 27 18 5
+f 27 23 19
+f 27 5 23
+f 28 24 22
+f 28 19 25
+f 28 27 19
+f 28 22 27
+f 29 20 4
+f 29 15 20
+f 29 8 15
+f 30 4 26
+f 30 29 4
+f 30 24 29
+f 30 26 10
+f 30 10 24
+f 31 28 25
+f 31 24 28
+f 31 29 24
+f 31 25 8
+f 31 8 29
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate024.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate024.obj
new file mode 100644
index 0000000000000000000000000000000000000000..a592c3a137f4404be29a166782f700f5f9851113
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate024.obj
@@ -0,0 +1,110 @@
+v 0.081080185 0.049004705 0.012322610
+v 0.088208070 0.032966965 0.012322610
+v 0.082862156 0.040094849 0.001630783
+v 0.072170330 0.047222734 -0.009061044
+v 0.070388359 0.050786676 0.008758667
+v 0.078714254 0.029831082 -0.004453914
+v 0.073952301 0.056132590 0.003412754
+v 0.088208070 0.034748936 0.008758667
+v 0.075734272 0.045440763 0.012322610
+v 0.081080185 0.036530907 -0.005497101
+v 0.070131841 0.043533061 -0.008605656
+v 0.081080185 0.049004705 0.008758667
+v 0.069999234 0.055803419 0.008968451
+v 0.088208070 0.034748936 0.012322610
+v 0.084742855 0.027656380 0.006933367
+v 0.075734272 0.047222734 -0.005497101
+v 0.077516243 0.029403023 -0.001933159
+v 0.073952301 0.043658792 -0.009061044
+v 0.073166258 0.055491889 0.009135421
+v 0.079298214 0.050786676 0.012322610
+v 0.069229272 0.055187822 0.000404367
+v 0.088208070 0.032966965 0.008758667
+v 0.079314513 0.029408846 -0.003725182
+v 0.082862156 0.031184994 0.012322610
+v 0.081080185 0.043658792 0.001630783
+v 0.075734272 0.052568647 0.001630783
+v 0.073952301 0.045440763 -0.009061044
+v 0.072170330 0.052568647 -0.005497101
+v 0.070388359 0.043658792 -0.001933159
+v 0.079215400 0.032933033 -0.005442148
+v 0.069325570 0.051751622 -0.004325321
+v 0.072170330 0.056132590 -0.000151188
+v 0.085335031 0.027868285 0.008455447
+v 0.079889341 0.032479024 -0.004706877
+v 0.082862156 0.027621052 0.008758667
+v 0.079298214 0.045440763 -0.000151188
+v 0.079298214 0.050786676 0.006976696
+v 0.069555912 0.046588703 -0.007878668
+f 9 2 1
+f 10 3 8
+f 12 7 1
+f 12 1 8
+f 13 5 9
+f 14 8 1
+f 14 1 2
+f 14 2 8
+f 17 11 6
+f 18 6 11
+f 18 11 4
+f 19 7 13
+f 20 13 9
+f 20 9 1
+f 20 19 13
+f 20 1 7
+f 20 7 19
+f 21 5 13
+f 22 8 2
+f 22 10 8
+f 23 22 15
+f 23 10 22
+f 23 17 6
+f 24 2 9
+f 24 9 5
+f 25 3 10
+f 25 10 16
+f 25 12 8
+f 25 8 3
+f 27 16 10
+f 27 18 4
+f 27 10 18
+f 28 26 16
+f 28 27 4
+f 28 16 27
+f 29 17 5
+f 29 11 17
+f 30 18 10
+f 30 6 18
+f 31 28 4
+f 31 21 28
+f 32 21 13
+f 32 13 7
+f 32 28 21
+f 32 7 26
+f 32 26 28
+f 33 22 2
+f 33 15 22
+f 33 2 24
+f 34 23 6
+f 34 10 23
+f 34 30 10
+f 34 6 30
+f 35 23 15
+f 35 17 23
+f 35 33 24
+f 35 15 33
+f 35 24 5
+f 35 5 17
+f 36 25 16
+f 36 12 25
+f 36 16 26
+f 37 26 7
+f 37 7 12
+f 37 36 26
+f 37 12 36
+f 38 29 5
+f 38 5 21
+f 38 21 31
+f 38 11 29
+f 38 31 4
+f 38 4 11
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate026.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate026.obj
new file mode 100644
index 0000000000000000000000000000000000000000..cc0eeab86e0f1508eadba4042375948661a58b02
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate026.obj
@@ -0,0 +1,107 @@
+v 0.077516243 0.043658792 -0.003715130
+v 0.080158983 0.026146640 -0.004245945
+v 0.079298214 0.027621052 -0.010843015
+v 0.054350618 0.049004705 -0.012624986
+v 0.056124177 0.025829477 -0.008727534
+v 0.066824416 0.049004705 -0.010843015
+v 0.079298214 0.040094849 -0.005497101
+v 0.070388359 0.041876821 -0.003715130
+v 0.073109840 0.022014007 -0.011363696
+v 0.068606387 0.040094849 -0.012624986
+v 0.063260474 0.049004705 -0.005497101
+v 0.077813315 0.039379402 -0.003085006
+v 0.079298214 0.034748936 -0.009061044
+v 0.077516243 0.043658792 -0.005497101
+v 0.077516243 0.025839080 -0.003715130
+v 0.077133966 0.022120105 -0.009740993
+v 0.063260474 0.025839080 -0.012624986
+v 0.061478503 0.049004705 -0.012624986
+v 0.073952301 0.029403023 -0.012624986
+v 0.067206748 0.049247536 -0.005855362
+v 0.054421738 0.049057893 -0.008598421
+v 0.079298214 0.038312878 -0.007279073
+v 0.072170330 0.041876821 -0.010843015
+v 0.075734272 0.029403023 -0.003715130
+v 0.079298214 0.025839080 -0.010843015
+v 0.075734272 0.022275138 -0.005497101
+v 0.066824574 0.022275692 -0.010797757
+v 0.056133950 0.025840245 -0.010655805
+v 0.065042445 0.045440763 -0.012624986
+v 0.070388359 0.022275138 -0.012624986
+v 0.054350618 0.045440763 -0.012624986
+v 0.075734272 0.041876821 -0.009061044
+v 0.078454853 0.022490251 -0.006875133
+v 0.066815780 0.022265280 -0.008766000
+v 0.054429448 0.045499717 -0.008741450
+v 0.057914561 0.036530907 -0.012624986
+v 0.077516243 0.041876821 -0.007279073
+f 11 8 1
+f 11 5 8
+f 12 7 1
+f 12 2 7
+f 12 1 8
+f 13 2 3
+f 14 1 7
+f 15 2 12
+f 17 4 10
+f 18 10 4
+f 19 17 10
+f 20 18 4
+f 20 6 18
+f 20 11 1
+f 20 14 6
+f 20 1 14
+f 21 5 11
+f 21 20 4
+f 21 11 20
+f 22 7 2
+f 22 2 13
+f 22 14 7
+f 23 19 10
+f 23 3 19
+f 24 8 5
+f 24 5 15
+f 24 15 12
+f 24 12 8
+f 25 16 9
+f 25 3 2
+f 25 19 3
+f 26 2 15
+f 26 15 5
+f 26 9 16
+f 28 27 5
+f 29 18 6
+f 29 10 18
+f 29 23 10
+f 29 6 23
+f 30 17 19
+f 30 9 27
+f 30 28 17
+f 30 27 28
+f 30 25 9
+f 30 19 25
+f 31 21 4
+f 31 28 5
+f 31 4 17
+f 32 22 13
+f 32 13 3
+f 32 3 23
+f 32 23 6
+f 32 6 14
+f 33 25 2
+f 33 16 25
+f 33 26 16
+f 33 2 26
+f 34 26 5
+f 34 5 27
+f 34 27 9
+f 34 9 26
+f 35 31 5
+f 35 5 21
+f 35 21 31
+f 36 31 17
+f 36 17 28
+f 36 28 31
+f 37 32 14
+f 37 14 22
+f 37 22 32
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate028.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate028.obj
new file mode 100644
index 0000000000000000000000000000000000000000..ec2ca2c6f71ec6a293c5b18ae8b107e050b4a288
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate028.obj
@@ -0,0 +1,113 @@
+v 0.034244937 -0.084840458 0.007612778
+v 0.034748936 -0.086425100 0.005194725
+v 0.034556675 -0.075361487 -0.008255901
+v 0.009801340 -0.082861158 -0.010843015
+v 0.015139347 -0.089923749 0.007001502
+v 0.029403023 -0.082861158 -0.007279073
+v 0.035437648 -0.080798411 0.004567252
+v 0.024057109 -0.089989042 0.006976696
+v 0.034748936 -0.079297216 -0.009061044
+v 0.035094145 -0.081831645 0.006662182
+v 0.024057109 -0.086425100 -0.003715130
+v 0.034748936 -0.082861158 -0.003715130
+v 0.022275138 -0.081079187 -0.010843015
+v 0.018711196 -0.084643129 0.006976696
+v 0.024057109 -0.089989042 0.005194725
+v 0.032966965 -0.082861158 -0.005497101
+v 0.024057109 -0.082861158 -0.009061044
+v 0.023951978 -0.075478496 -0.010263623
+v 0.031184994 -0.081079187 0.006976696
+v 0.009801340 -0.084643129 0.003412754
+v 0.015147254 -0.089989042 0.001630783
+v 0.027621052 -0.084643129 -0.005497101
+v 0.015147254 -0.086425100 -0.007279073
+v 0.031184994 -0.075733274 -0.010843015
+v 0.024057109 -0.082861158 0.006976696
+v 0.034748936 -0.075733274 -0.001933159
+v 0.009801340 -0.079297216 -0.005497101
+v 0.018711196 -0.089989042 0.001630783
+v 0.009801340 -0.086425100 -0.007279073
+v 0.031184994 -0.086425100 0.001630783
+v 0.015147254 -0.084643129 -0.009061044
+v 0.020493167 -0.086425100 -0.005497101
+v 0.031184994 -0.077515245 -0.010843015
+v 0.032966965 -0.079297216 0.005194725
+v 0.032966965 -0.075733274 -0.001933159
+v 0.009763954 -0.078956978 -0.009935026
+v 0.009845226 -0.087106387 0.003122732
+v 0.011583311 -0.082861158 -0.010843015
+v 0.024057109 -0.075733274 -0.005497101
+f 8 2 1
+f 8 1 5
+f 9 3 7
+f 10 1 2
+f 10 2 7
+f 12 9 7
+f 12 7 2
+f 14 5 1
+f 15 2 8
+f 16 6 9
+f 16 9 12
+f 16 12 2
+f 17 13 9
+f 17 9 6
+f 19 1 10
+f 20 5 14
+f 21 15 8
+f 21 8 5
+f 22 6 16
+f 23 17 6
+f 24 13 4
+f 24 4 18
+f 24 18 3
+f 24 3 9
+f 25 14 1
+f 25 1 19
+f 25 20 14
+f 26 10 7
+f 26 7 3
+f 28 11 15
+f 28 15 21
+f 28 21 23
+f 29 23 21
+f 29 4 23
+f 30 22 16
+f 30 16 2
+f 30 2 15
+f 30 15 11
+f 30 11 22
+f 31 23 4
+f 31 13 17
+f 31 17 23
+f 32 22 11
+f 32 28 23
+f 32 11 28
+f 32 23 6
+f 32 6 22
+f 33 24 9
+f 33 9 13
+f 33 13 24
+f 34 19 10
+f 34 10 26
+f 34 25 19
+f 34 27 20
+f 34 20 25
+f 35 26 3
+f 35 3 18
+f 35 34 26
+f 36 18 4
+f 36 20 27
+f 36 29 20
+f 36 4 29
+f 37 29 21
+f 37 21 5
+f 37 5 20
+f 37 20 29
+f 38 31 4
+f 38 4 13
+f 38 13 31
+f 39 35 18
+f 39 27 34
+f 39 34 35
+f 39 36 27
+f 39 18 36
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate029.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate029.obj
new file mode 100644
index 0000000000000000000000000000000000000000..11f7fb92f0ec6c32c349c82fb6f5c669447dc8f8
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate029.obj
@@ -0,0 +1,65 @@
+v 0.015147254 -0.088207071 0.012322610
+v 0.014993939 -0.090903571 0.011379392
+v 0.015103048 -0.085997980 0.001816582
+v -0.018869183 -0.086992834 0.001383980
+v -0.013364284 -0.093552985 0.012322610
+v -0.013364284 -0.086425100 0.008758667
+v 0.015439619 -0.087831238 0.006424136
+v 0.013365283 -0.093552985 0.012322610
+v -0.013364284 -0.088207071 0.012322610
+v 0.015147254 -0.091771014 0.005194725
+v -0.019010515 -0.087614223 0.008316088
+v 0.015147254 -0.089989042 0.001630783
+v -0.000890486 -0.093552985 0.006976696
+v -0.018710198 -0.091771014 0.006976696
+v -0.018391350 -0.090177040 0.009372672
+v 0.011583311 -0.093552985 0.008758667
+v 0.009801340 -0.091771014 0.003412754
+v -0.013364284 -0.093552985 0.008758667
+v 0.013365283 -0.093552985 0.010540639
+v 0.006237398 -0.093552985 0.006976696
+v -0.016928226 -0.089989042 0.001630783
+v -0.018710198 -0.089989042 0.003412754
+v -0.011582313 -0.091771014 0.003412754
+f 6 1 3
+f 6 3 4
+f 7 1 2
+f 7 3 1
+f 8 2 1
+f 8 1 5
+f 9 5 1
+f 9 1 6
+f 10 7 2
+f 10 2 8
+f 11 9 6
+f 11 6 4
+f 12 3 7
+f 12 7 10
+f 12 4 3
+f 13 8 5
+f 14 11 4
+f 15 5 9
+f 15 9 11
+f 15 14 5
+f 15 11 14
+f 16 8 13
+f 17 12 10
+f 18 13 5
+f 18 5 14
+f 19 16 10
+f 19 10 8
+f 19 8 16
+f 20 16 13
+f 20 13 17
+f 20 17 10
+f 20 10 16
+f 21 4 12
+f 21 12 17
+f 21 18 14
+f 22 21 14
+f 22 14 4
+f 22 4 21
+f 23 21 17
+f 23 17 13
+f 23 13 18
+f 23 18 21
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate030.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate030.obj
new file mode 100644
index 0000000000000000000000000000000000000000..c0b7460f36fefcf42e257b9033ccf9ba62a8723c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate030.obj
@@ -0,0 +1,65 @@
+v -0.052553008 -0.040090997 -0.008551745
+v -0.052552687 -0.052564732 -0.008579713
+v -0.052567649 -0.049003707 -0.012624986
+v -0.066823418 -0.043657793 -0.012624986
+v -0.070747675 -0.043863220 -0.005844298
+v -0.063259476 -0.054349620 -0.010843015
+v -0.059695533 -0.040093851 -0.012624986
+v -0.059695533 -0.054349620 -0.005497101
+v -0.056131591 -0.054349620 -0.012624986
+v -0.070387360 -0.043657793 -0.010843015
+v -0.052569298 -0.040095116 -0.010717152
+v -0.068605389 -0.040093851 -0.005497101
+v -0.063390423 -0.054454518 -0.005661249
+v -0.052567649 -0.052567649 -0.012624986
+v -0.056446517 -0.054669736 -0.008296252
+v -0.057913562 -0.054349620 -0.012624986
+v -0.069871716 -0.039683127 -0.009647924
+v -0.057913562 -0.041875822 -0.012624986
+v -0.059695533 -0.052567649 -0.005497101
+v -0.059695533 -0.052567649 -0.012624986
+v -0.066232334 -0.039774798 -0.010976160
+v -0.070912849 -0.040367525 -0.007148782
+v -0.066823418 -0.041875822 -0.005497101
+f 1 2 3
+f 7 3 4
+f 8 2 1
+f 9 4 3
+f 10 6 5
+f 10 4 6
+f 11 1 3
+f 13 5 6
+f 13 12 5
+f 13 8 12
+f 14 9 3
+f 14 3 2
+f 15 2 8
+f 15 8 13
+f 15 13 6
+f 15 14 2
+f 15 9 14
+f 16 4 9
+f 16 15 6
+f 16 9 15
+f 17 4 10
+f 17 12 1
+f 17 1 11
+f 18 11 3
+f 18 3 7
+f 18 7 11
+f 19 12 8
+f 19 8 1
+f 20 16 6
+f 20 6 4
+f 20 4 16
+f 21 7 4
+f 21 4 17
+f 21 17 11
+f 21 11 7
+f 22 17 10
+f 22 10 5
+f 22 5 12
+f 22 12 17
+f 23 19 1
+f 23 1 12
+f 23 12 19
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate031.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate031.obj
new file mode 100644
index 0000000000000000000000000000000000000000..f6fc8ae61637ed805119b65a5cff31b5c1146e22
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate031.obj
@@ -0,0 +1,92 @@
+v -0.066823418 0.050786676 -0.000151188
+v -0.056142635 0.043675254 -0.008693836
+v -0.056142149 0.049020443 -0.008631644
+v -0.061477505 0.049004705 -0.012624986
+v -0.069877998 0.039786448 -0.009456348
+v -0.056131591 0.047222734 -0.012624986
+v -0.062344833 0.053271649 -0.006710286
+v -0.068605389 0.040094849 -0.005497101
+v -0.056133277 0.043660036 -0.010696211
+v -0.065041447 0.043658792 -0.012624986
+v -0.067868345 0.053320599 -0.000671978
+v -0.056952805 0.049731776 -0.011539665
+v -0.066735215 0.040036346 -0.010548043
+v -0.057913562 0.043658792 -0.012624986
+v -0.075376538 0.043476981 -0.003555330
+v -0.070387360 0.052568647 -0.007279073
+v -0.065041447 0.052568647 -0.010843015
+v -0.066919142 0.040171292 -0.008356457
+v -0.068641418 0.052594575 -0.000169145
+v -0.071165322 0.040533568 -0.006854023
+v -0.073951302 0.045440763 -0.009061044
+v -0.061202928 0.052346022 -0.009690010
+v -0.068605389 0.052568647 -0.009061044
+v -0.065041447 0.045440763 -0.012624986
+v -0.069367443 0.051335067 -0.000530994
+v -0.075733274 0.047222734 -0.003715130
+v -0.073951302 0.043658792 -0.009061044
+v -0.072169331 0.050786676 -0.007279073
+v -0.066823418 0.050786676 -0.010843015
+v -0.070387360 0.045440763 -0.010843015
+v -0.074870801 0.043206915 -0.004685301
+v -0.075733274 0.047222734 -0.005497101
+f 1 2 3
+f 8 2 1
+f 9 6 3
+f 9 3 2
+f 10 4 6
+f 11 1 3
+f 11 3 7
+f 12 6 4
+f 12 7 3
+f 12 3 6
+f 13 8 5
+f 13 5 10
+f 13 9 2
+f 14 10 6
+f 14 6 9
+f 14 13 10
+f 14 9 13
+f 15 8 1
+f 17 12 4
+f 18 13 2
+f 18 2 8
+f 18 8 13
+f 19 1 11
+f 19 11 16
+f 20 15 5
+f 20 5 8
+f 20 8 15
+f 22 17 7
+f 22 7 12
+f 22 12 17
+f 23 7 17
+f 23 16 11
+f 23 11 7
+f 24 17 4
+f 24 4 10
+f 25 15 1
+f 25 1 19
+f 26 19 16
+f 26 25 19
+f 26 15 25
+f 27 10 5
+f 28 23 21
+f 28 16 23
+f 29 23 17
+f 29 21 23
+f 29 17 24
+f 30 24 10
+f 30 29 24
+f 30 21 29
+f 30 27 21
+f 30 10 27
+f 31 27 5
+f 31 5 15
+f 31 15 27
+f 32 21 27
+f 32 28 21
+f 32 26 16
+f 32 16 28
+f 32 27 15
+f 32 15 26
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate033.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate033.obj
new file mode 100644
index 0000000000000000000000000000000000000000..1a0b69f33f8824b811edb5161a0c538deb4b1ba9
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate033.obj
@@ -0,0 +1,32 @@
+v 0.054421738 0.049057893 -0.008598421
+v 0.061466429 -0.020503910 -0.008657347
+v 0.061481082 0.025840153 -0.010749565
+v 0.049005395 0.049005295 -0.010748121
+v 0.048993588 -0.020502979 -0.008548086
+v 0.054351293 0.049004986 -0.010818563
+v 0.061469910 0.025829271 -0.008758581
+v 0.049004938 0.049005567 -0.008762062
+v 0.049006653 -0.020494045 -0.010577030
+v 0.056206265 0.045495862 -0.008645826
+v 0.061479782 -0.020493401 -0.010668364
+v 0.056133036 0.045440948 -0.010826849
+f 5 2 1
+f 6 4 1
+f 7 1 2
+f 8 5 1
+f 8 1 4
+f 8 4 5
+f 9 2 5
+f 9 5 4
+f 9 4 6
+f 10 6 1
+f 10 7 3
+f 10 1 7
+f 11 2 9
+f 11 7 2
+f 11 3 7
+f 12 10 3
+f 12 6 10
+f 12 3 11
+f 12 11 9
+f 12 9 6
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate036.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate036.obj
new file mode 100644
index 0000000000000000000000000000000000000000..074c9ea04893116338adc0241c5e7cab542bb91b
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate036.obj
@@ -0,0 +1,128 @@
+v 0.040094849 -0.079297216 0.012322610
+v 0.051230742 -0.072807745 0.006685630
+v 0.047222734 -0.070387360 0.003412754
+v 0.034711361 -0.073878642 -0.008903689
+v 0.041876821 -0.084643129 0.008758667
+v 0.047222734 -0.072169331 -0.009061044
+v 0.034748936 -0.073951302 -0.003715130
+v 0.045440763 -0.073951302 0.008758667
+v 0.041876821 -0.084643129 0.012322610
+v 0.048471323 -0.069754004 -0.006456910
+v 0.036530907 -0.081079187 -0.005497101
+v 0.050786676 -0.077515245 0.003412754
+v 0.049559773 -0.074749299 0.008394844
+v 0.049004705 -0.070387360 0.005194725
+v 0.034748936 -0.079297216 0.006976696
+v 0.043522279 -0.070144140 -0.008508237
+v 0.038312878 -0.077515245 -0.009061044
+v 0.034650927 -0.084441445 0.007066538
+v 0.047222734 -0.075733274 -0.005497101
+v 0.047222734 -0.081079187 0.008758667
+v 0.051629387 -0.071598887 0.004642364
+v 0.049004705 -0.072169331 -0.007279073
+v 0.043935644 -0.079867642 0.011667802
+v 0.046487765 -0.069514633 -0.007928152
+v 0.041876821 -0.070387360 -0.003715130
+v 0.034748936 -0.081079187 -0.005497101
+v 0.036530907 -0.084643129 0.003412754
+v 0.039503705 -0.083461454 0.011388443
+v 0.034113144 -0.083334794 0.003995562
+v 0.045440763 -0.073951302 -0.009061044
+v 0.038312878 -0.079297216 -0.007279073
+v 0.045440763 -0.081079187 0.003412754
+v 0.048341208 -0.078225832 0.009258780
+v 0.050925732 -0.070565503 0.001534932
+v 0.050786676 -0.075733274 -0.000151188
+v 0.034301700 -0.076648444 -0.007722516
+v 0.038312878 -0.082861158 -0.000151188
+v 0.036530907 -0.082861158 -0.001933159
+v 0.034748936 -0.082861158 -0.001933159
+v 0.041876821 -0.077515245 -0.007279073
+v 0.047222734 -0.077515245 -0.001933159
+v 0.047222734 -0.081079187 0.006976696
+v 0.050063859 -0.076348075 0.007521521
+v 0.049004705 -0.073951302 -0.005497101
+f 8 3 7
+f 13 8 1
+f 13 2 8
+f 14 8 2
+f 14 3 8
+f 15 8 7
+f 15 1 8
+f 16 6 4
+f 17 4 6
+f 18 1 15
+f 20 9 5
+f 21 2 12
+f 21 14 2
+f 22 6 10
+f 22 19 6
+f 23 13 1
+f 23 1 9
+f 24 14 10
+f 24 3 14
+f 24 10 6
+f 24 6 16
+f 25 16 4
+f 25 4 7
+f 25 7 3
+f 25 24 16
+f 25 3 24
+f 26 17 11
+f 27 5 9
+f 27 9 18
+f 28 18 9
+f 28 9 1
+f 28 1 18
+f 29 18 15
+f 29 15 7
+f 30 17 6
+f 30 6 19
+f 31 19 11
+f 31 11 17
+f 31 17 30
+f 32 11 19
+f 33 9 20
+f 33 23 9
+f 33 13 23
+f 34 10 14
+f 34 14 21
+f 34 22 10
+f 35 21 12
+f 35 12 19
+f 35 34 21
+f 35 22 34
+f 36 4 17
+f 36 17 26
+f 36 26 29
+f 36 29 7
+f 36 7 4
+f 37 32 5
+f 38 26 11
+f 38 11 32
+f 38 32 37
+f 38 37 5
+f 38 5 27
+f 39 27 18
+f 39 18 29
+f 39 29 26
+f 39 38 27
+f 39 26 38
+f 40 31 30
+f 40 30 19
+f 40 19 31
+f 41 32 19
+f 41 19 12
+f 41 12 32
+f 42 20 5
+f 42 5 32
+f 42 32 12
+f 42 12 20
+f 43 2 13
+f 43 13 33
+f 43 12 2
+f 43 33 20
+f 43 20 12
+f 44 35 19
+f 44 19 22
+f 44 22 35
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate037.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate037.obj
new file mode 100644
index 0000000000000000000000000000000000000000..e5e9f7083322076f94b0b28039e4447f58c46e5c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate037.obj
@@ -0,0 +1,83 @@
+v 0.077516243 -0.054349620 0.012322610
+v 0.077516243 -0.054349620 0.008758667
+v 0.076334442 -0.050080371 0.007488875
+v 0.069760432 -0.051630215 0.001047222
+v 0.058784066 -0.069855394 0.011733802
+v 0.073952301 -0.054349620 0.001630783
+v 0.077193412 -0.050586772 0.011529169
+v 0.065042445 -0.068605389 0.008758667
+v 0.075734272 -0.052567649 0.003412754
+v 0.077516243 -0.052567649 0.006976696
+v 0.072170330 -0.050785678 0.012322610
+v 0.072170330 -0.061477505 0.012322610
+v 0.070388359 -0.061477505 0.005194725
+v 0.062580743 -0.064291819 0.003817408
+v 0.073434873 -0.050401510 0.001899237
+v 0.074745832 -0.050051803 0.003925580
+v 0.065042445 -0.054349620 0.003412754
+v 0.065042445 -0.068605389 0.012322610
+v 0.075734272 -0.056131591 0.006976696
+v 0.058075504 -0.068811572 0.008647729
+v 0.065837453 -0.056855862 0.001246901
+v 0.065042445 -0.065041447 0.003412754
+v 0.068606387 -0.050785678 0.005194725
+v 0.057914561 -0.066823418 0.012322610
+v 0.073952301 -0.056131591 0.003412754
+v 0.072170330 -0.061477505 0.008758667
+v 0.072170330 -0.056131591 0.001630783
+v 0.065042445 -0.054349620 0.001630783
+v 0.065042445 -0.066823418 0.005194725
+f 10 7 1
+f 10 1 2
+f 10 3 7
+f 11 1 7
+f 11 7 3
+f 12 2 1
+f 12 1 11
+f 15 9 6
+f 15 6 4
+f 16 10 9
+f 16 3 10
+f 16 9 15
+f 16 11 3
+f 18 5 8
+f 18 8 12
+f 18 12 11
+f 19 10 2
+f 19 9 10
+f 20 8 5
+f 22 20 14
+f 22 14 21
+f 23 15 4
+f 23 16 15
+f 23 11 16
+f 24 23 17
+f 24 11 23
+f 24 18 11
+f 24 5 18
+f 24 20 5
+f 25 19 13
+f 25 6 9
+f 25 9 19
+f 26 19 2
+f 26 2 12
+f 26 12 8
+f 26 8 13
+f 26 13 19
+f 27 21 4
+f 27 4 6
+f 27 22 21
+f 27 13 22
+f 27 25 13
+f 27 6 25
+f 28 4 21
+f 28 23 4
+f 28 17 23
+f 28 24 17
+f 28 20 24
+f 28 21 14
+f 28 14 20
+f 29 22 13
+f 29 13 8
+f 29 8 20
+f 29 20 22
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate042.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate042.obj
new file mode 100644
index 0000000000000000000000000000000000000000..0cfa5ed856e1aa1b6ecaef563a2a7dc972da56d4
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate042.obj
@@ -0,0 +1,71 @@
+v 0.088208070 0.015147254 -0.001933159
+v 0.088208070 -0.013364284 -0.003715130
+v 0.088208070 0.009801340 -0.005497101
+v 0.081080185 0.015147254 -0.010843015
+v 0.079298214 -0.020492169 -0.003715130
+v 0.082862156 -0.009800342 -0.010843015
+v 0.086426099 0.015147254 -0.007279073
+v 0.086889940 -0.013230474 -0.001428889
+v 0.080190568 0.015351765 -0.006807222
+v 0.086426099 -0.015146255 -0.007279073
+v 0.078809200 -0.018555651 -0.009180484
+v 0.088208070 0.015147254 -0.003715130
+v 0.082862156 0.011583311 -0.010843015
+v 0.083266970 -0.020565421 -0.002087578
+v 0.084090897 0.015379348 -0.002425454
+v 0.084644127 0.015147254 -0.009061044
+v 0.088208070 -0.008018371 -0.005497101
+v 0.081080185 -0.018710198 -0.010843015
+v 0.079118740 0.015193910 -0.009663317
+v 0.082862156 -0.020492169 -0.009061044
+v 0.079298214 -0.018710198 -0.003715130
+v 0.081080185 -0.016928226 -0.001933159
+v 0.084644127 -0.015146255 -0.009061044
+v 0.079243313 -0.020474818 -0.008874392
+v 0.081080185 -0.020492169 -0.001933159
+f 1 2 3
+f 8 2 1
+f 10 7 3
+f 10 6 7
+f 12 1 3
+f 12 3 7
+f 13 7 6
+f 13 6 4
+f 14 2 8
+f 15 1 12
+f 15 12 7
+f 16 4 9
+f 16 13 4
+f 16 7 13
+f 16 15 7
+f 16 9 15
+f 17 10 3
+f 17 3 2
+f 17 2 10
+f 18 4 6
+f 19 9 4
+f 19 18 11
+f 19 4 18
+f 20 10 2
+f 20 2 14
+f 21 15 9
+f 21 9 19
+f 21 19 11
+f 21 11 5
+f 22 8 1
+f 22 1 15
+f 22 15 21
+f 22 21 5
+f 23 18 6
+f 23 6 10
+f 23 20 18
+f 23 10 20
+f 24 20 14
+f 24 14 5
+f 24 5 11
+f 24 11 18
+f 24 18 20
+f 25 14 8
+f 25 8 22
+f 25 22 5
+f 25 5 14
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate045.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate045.obj
new file mode 100644
index 0000000000000000000000000000000000000000..50b3601ed1c0f7306e9b68ba42020036a47f250b
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate045.obj
@@ -0,0 +1,119 @@
+v -0.075733274 -0.045439764 0.012322610
+v -0.063259476 -0.057913562 0.005194725
+v -0.064883657 -0.043532109 -0.010477311
+v -0.068605389 -0.047221736 -0.010843015
+v -0.073951302 -0.057913562 0.006976696
+v -0.063259476 -0.059695533 -0.007279073
+v -0.079297216 -0.043657793 -0.000151188
+v -0.063259476 -0.047221736 -0.005497101
+v -0.066137361 -0.060632276 0.008224994
+v -0.063259476 -0.052567649 -0.010843015
+v -0.081803712 -0.043020049 0.011435007
+v -0.071256969 -0.043085314 -0.007996333
+v -0.075733274 -0.047221736 -0.005497101
+v -0.065041447 -0.057913562 0.008758667
+v -0.077515245 -0.043657793 0.012322610
+v -0.062914361 -0.048712689 -0.009794433
+v -0.081079187 -0.049003707 0.012322610
+v -0.064270640 -0.060645791 0.004647773
+v -0.072169331 -0.059695533 0.006976696
+v -0.065041447 -0.059695533 -0.007279073
+v -0.068041956 -0.043262418 -0.009339748
+v -0.081490726 -0.042966645 0.009361271
+v -0.072169331 -0.047221736 -0.009061044
+v -0.077515245 -0.043657793 -0.003715130
+v -0.065377602 -0.043832863 -0.008335655
+v -0.082861158 -0.045439764 0.012322610
+v -0.081079187 -0.049003707 0.008758667
+v -0.072169331 -0.052567649 0.012322610
+v -0.072169331 -0.059695533 0.008758667
+v -0.065041447 -0.052567649 -0.010843015
+v -0.068605389 -0.057913562 -0.003715130
+v -0.070387360 -0.052567649 -0.007279073
+v -0.079297216 -0.045439764 -0.000151188
+v -0.082861158 -0.045439764 0.008758667
+v -0.079297216 -0.050785678 0.006976696
+v -0.072169331 -0.050785678 0.012322610
+v -0.073951302 -0.057913562 0.008758667
+v -0.068605389 -0.052567649 -0.009061044
+v -0.070387360 -0.054349620 -0.005497101
+v -0.077515245 -0.045439764 -0.003715130
+v -0.079297216 -0.047221736 0.001630783
+f 10 4 3
+f 14 8 1
+f 14 2 8
+f 15 1 8
+f 15 3 11
+f 16 8 2
+f 16 2 6
+f 16 6 10
+f 16 10 3
+f 16 3 8
+f 17 1 15
+f 18 6 2
+f 18 14 9
+f 18 2 14
+f 19 18 9
+f 20 10 6
+f 20 19 5
+f 20 6 18
+f 20 18 19
+f 21 3 4
+f 21 4 12
+f 21 11 3
+f 22 21 12
+f 22 11 21
+f 23 12 4
+f 24 23 13
+f 24 12 23
+f 24 22 12
+f 24 7 22
+f 25 15 8
+f 25 8 3
+f 25 3 15
+f 26 17 15
+f 26 15 11
+f 27 5 17
+f 27 17 26
+f 28 1 17
+f 28 9 14
+f 29 5 19
+f 29 19 9
+f 29 28 17
+f 29 9 28
+f 30 4 10
+f 30 10 20
+f 30 23 4
+f 31 20 5
+f 31 5 13
+f 32 20 13
+f 32 13 23
+f 33 7 24
+f 34 26 11
+f 34 11 22
+f 34 22 7
+f 34 7 33
+f 34 27 26
+f 35 5 27
+f 36 28 14
+f 36 14 1
+f 36 1 28
+f 37 29 17
+f 37 17 5
+f 37 5 29
+f 38 30 20
+f 38 23 30
+f 38 32 23
+f 38 20 32
+f 39 31 13
+f 39 13 20
+f 39 20 31
+f 40 33 24
+f 40 24 13
+f 40 13 33
+f 41 34 33
+f 41 27 34
+f 41 35 27
+f 41 33 13
+f 41 13 5
+f 41 5 35
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate046.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate046.obj
new file mode 100644
index 0000000000000000000000000000000000000000..c1cd86cbe3698c56a32130924e8437b4d547389e
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate046.obj
@@ -0,0 +1,44 @@
+v 0.090700834 0.009637463 0.007383360
+v 0.091772012 -0.008018371 0.003412754
+v 0.089990041 0.006237398 -0.001933159
+v 0.085154781 0.009820690 -0.002135492
+v 0.086426099 -0.008018371 0.006976696
+v 0.085130002 -0.008090989 -0.002131284
+v 0.089990041 0.009801340 -0.000151188
+v 0.090788768 -0.007918557 0.007352851
+v 0.086426099 0.009801340 0.006976696
+v 0.089990041 -0.004454429 -0.001933159
+v 0.087243325 0.009711799 -0.001548178
+v 0.091772012 0.009801340 0.003412754
+v 0.084644127 0.009801340 0.001630783
+v 0.089990041 -0.008018371 -0.000151188
+v 0.084644127 -0.008018371 0.001630783
+v 0.087276605 -0.007954450 -0.001555391
+f 6 2 5
+f 8 5 2
+f 8 1 5
+f 9 5 1
+f 10 3 2
+f 10 6 4
+f 10 4 3
+f 11 7 3
+f 11 3 4
+f 11 4 7
+f 12 2 3
+f 12 3 7
+f 12 8 2
+f 12 1 8
+f 12 9 1
+f 12 7 4
+f 12 4 9
+f 13 5 9
+f 13 9 4
+f 13 4 6
+f 14 10 2
+f 14 2 6
+f 15 13 6
+f 15 6 5
+f 15 5 13
+f 16 14 6
+f 16 6 10
+f 16 10 14
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate047.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate047.obj
new file mode 100644
index 0000000000000000000000000000000000000000..c769c2dd35ae8610133f9211bb55bebd41ae80bd
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate047.obj
@@ -0,0 +1,44 @@
+v 0.068596679 -0.020492592 -0.008715522
+v 0.068960969 -0.040345348 -0.007979867
+v 0.068606387 -0.040093851 -0.012624986
+v 0.049006653 -0.020494045 -0.010577030
+v 0.048993588 -0.020502979 -0.008548086
+v 0.068610385 -0.020492036 -0.010698615
+v 0.066824588 -0.045439866 -0.007279374
+v 0.066824416 -0.031183995 -0.012624986
+v 0.069395314 -0.031428038 -0.011640561
+v 0.049005208 -0.045440392 -0.010768062
+v 0.049003348 -0.045431005 -0.008516123
+v 0.065042445 -0.045439764 -0.012624986
+v 0.060299289 -0.045960389 -0.011310012
+v 0.066523542 -0.045256056 -0.009724089
+v 0.059696532 -0.041875822 -0.012624986
+v 0.065042445 -0.032965967 -0.012624986
+f 5 1 4
+f 6 4 1
+f 7 2 1
+f 7 1 5
+f 7 3 2
+f 8 4 6
+f 9 6 1
+f 9 1 2
+f 9 2 3
+f 9 8 6
+f 9 3 8
+f 10 5 4
+f 11 7 5
+f 11 5 10
+f 12 8 3
+f 13 11 10
+f 13 7 11
+f 13 12 7
+f 13 10 12
+f 14 12 3
+f 14 3 7
+f 14 7 12
+f 15 12 10
+f 15 10 4
+f 15 8 12
+f 16 15 4
+f 16 4 8
+f 16 8 15
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate048.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate048.obj
new file mode 100644
index 0000000000000000000000000000000000000000..a64caadea28c61394018fc0c272f01b3888958c9
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate048.obj
@@ -0,0 +1,47 @@
+v 0.013365283 0.093553983 0.012322610
+v 0.015385479 0.089290218 0.011353656
+v 0.011583311 0.093553983 0.008758667
+v -0.022274140 0.091772012 0.008758667
+v -0.018710198 0.086426099 0.012322610
+v -0.015146255 0.086426099 0.008758667
+v -0.013364284 0.093553983 0.012322610
+v 0.014934644 0.090691132 0.009230853
+v -0.022404159 0.086846832 0.008593512
+v 0.015173185 0.088332851 0.008711373
+v -0.009800342 0.093553983 0.008758667
+v -0.022274140 0.091772012 0.012322610
+v 0.015087741 0.091117348 0.011539440
+v 0.013365283 0.093553983 0.010540639
+v -0.015146255 0.086426099 0.010540639
+v -0.013364284 0.093553983 0.010540639
+v -0.022614222 0.087660218 0.011499590
+f 5 2 1
+f 7 1 3
+f 7 5 1
+f 9 6 5
+f 10 2 5
+f 10 8 2
+f 10 3 8
+f 10 9 3
+f 10 6 9
+f 11 7 3
+f 11 9 4
+f 11 3 9
+f 12 7 4
+f 12 5 7
+f 13 1 2
+f 13 2 8
+f 14 8 3
+f 14 3 1
+f 14 13 8
+f 14 1 13
+f 15 10 5
+f 15 5 6
+f 15 6 10
+f 16 11 4
+f 16 4 7
+f 16 7 11
+f 17 9 5
+f 17 5 12
+f 17 12 4
+f 17 4 9
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate049.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate049.obj
new file mode 100644
index 0000000000000000000000000000000000000000..99f2e4be7621d4149f916c3c84d1b5895e9505d3
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate049.obj
@@ -0,0 +1,59 @@
+v 0.075734272 -0.029402024 -0.003715130
+v 0.078170803 -0.031418881 -0.004218429
+v 0.077636862 -0.020524590 -0.007577569
+v 0.070388359 -0.020492169 -0.012624986
+v 0.067723860 -0.036065044 -0.011237376
+v 0.068596679 -0.020492592 -0.008715522
+v 0.077516243 -0.022274140 -0.005497101
+v 0.068933343 -0.036671894 -0.008512312
+v 0.077516243 -0.031183995 -0.010843015
+v 0.077123514 -0.020368052 -0.009507831
+v 0.068606387 -0.025838082 -0.012624986
+v 0.078542578 -0.029770319 -0.004504291
+v 0.075734272 -0.022274140 -0.005497101
+v 0.075734272 -0.031183995 -0.003715130
+v 0.070388359 -0.036529909 -0.012624986
+v 0.068610385 -0.020492036 -0.010698615
+v 0.075734272 -0.020492169 -0.012624986
+v 0.072266096 -0.036571497 -0.007640292
+v 0.075734272 -0.024056111 -0.012624986
+v 0.071677245 -0.036373420 -0.009807179
+v 0.072170330 -0.034747938 -0.012624986
+f 8 6 5
+f 10 6 3
+f 10 3 9
+f 12 7 1
+f 12 1 2
+f 12 3 7
+f 12 9 3
+f 12 2 9
+f 13 6 1
+f 13 1 7
+f 13 7 3
+f 13 3 6
+f 14 2 1
+f 14 1 6
+f 14 6 8
+f 15 8 5
+f 15 5 11
+f 15 11 4
+f 16 10 4
+f 16 6 10
+f 16 4 11
+f 16 11 5
+f 16 5 6
+f 17 10 9
+f 17 4 10
+f 17 15 4
+f 18 14 8
+f 18 2 14
+f 18 8 15
+f 18 9 2
+f 19 17 9
+f 19 15 17
+f 20 18 15
+f 20 15 9
+f 20 9 18
+f 21 19 9
+f 21 9 15
+f 21 15 19
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate052.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate052.obj
new file mode 100644
index 0000000000000000000000000000000000000000..42722286745e64c89f4c14aa7e691432fdac66a6
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate052.obj
@@ -0,0 +1,69 @@
+v 0.040301174 -0.081479758 0.011535523
+v 0.039503705 -0.083461454 0.011388443
+v 0.040331590 -0.081585312 0.006745741
+v 0.008055103 -0.088761798 0.006740550
+v 0.015147254 -0.088207071 0.012322610
+v 0.020493167 -0.091771014 0.006976696
+v 0.032966965 -0.081079187 0.008758667
+v 0.040094849 -0.084643129 0.006976696
+v 0.038312878 -0.086425100 0.012322610
+v 0.033582940 -0.082421837 0.006415492
+v 0.007986198 -0.091497092 0.008862733
+v 0.034748936 -0.081079187 0.012322610
+v 0.034748936 -0.088207071 0.010540639
+v 0.036530907 -0.086425100 0.006976696
+v 0.013365283 -0.086425100 0.006976696
+v 0.008080937 -0.089407745 0.008264896
+v 0.007915461 -0.090912976 0.007302674
+v 0.038312878 -0.086425100 0.008758667
+v 0.022275138 -0.091771014 0.012322610
+v 0.013365283 -0.086425100 0.008758667
+v 0.029403023 -0.089989042 0.010540639
+v 0.014993939 -0.090903571 0.011379392
+v 0.022275138 -0.091771014 0.008758667
+v 0.034748936 -0.088207071 0.012322610
+v 0.029403023 -0.089989042 0.012322610
+f 8 3 1
+f 9 8 1
+f 10 6 4
+f 10 3 8
+f 10 7 3
+f 12 1 3
+f 12 3 7
+f 12 9 1
+f 12 5 9
+f 14 10 8
+f 14 6 10
+f 15 10 4
+f 15 7 10
+f 16 11 5
+f 17 4 6
+f 17 6 11
+f 17 16 4
+f 17 11 16
+f 18 8 9
+f 18 9 13
+f 18 14 8
+f 19 11 6
+f 19 9 5
+f 20 12 7
+f 20 7 15
+f 20 5 12
+f 20 16 5
+f 20 15 4
+f 20 4 16
+f 21 18 13
+f 21 6 14
+f 21 14 18
+f 22 19 5
+f 22 5 11
+f 22 11 19
+f 23 21 19
+f 23 19 6
+f 23 6 21
+f 24 21 13
+f 24 13 9
+f 24 9 19
+f 25 24 19
+f 25 19 21
+f 25 21 24
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate053.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate053.obj
new file mode 100644
index 0000000000000000000000000000000000000000..2a53075f6d3d0b6035c46d7ee52e728de0301a52
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate053.obj
@@ -0,0 +1,62 @@
+v 0.061478503 -0.050785678 -0.005497101
+v 0.063260474 -0.061477505 -0.005497101
+v 0.063260474 -0.047221736 -0.012624986
+v 0.057914561 -0.045439764 -0.012624986
+v 0.050786676 -0.061477505 -0.005497101
+v 0.063272033 -0.045467626 -0.008473695
+v 0.059083136 -0.062583005 -0.004845970
+v 0.050784911 -0.045428798 -0.008518249
+v 0.054350618 -0.063259476 -0.010843015
+v 0.062460342 -0.044732279 -0.011481200
+v 0.064059908 -0.051256627 -0.006902925
+v 0.063260474 -0.061477505 -0.007279073
+v 0.051336560 -0.063996443 -0.006706891
+v 0.052568647 -0.059695533 -0.012624986
+v 0.050787099 -0.045440292 -0.010780013
+v 0.063260474 -0.054349620 -0.010843015
+v 0.059696532 -0.063259476 -0.007279073
+v 0.050468231 -0.062919595 -0.009776160
+v 0.050786676 -0.052567649 -0.012624986
+v 0.061478503 -0.050785678 -0.012624986
+v 0.057914561 -0.063259476 -0.009061044
+v 0.050306134 -0.059182643 -0.011015072
+f 7 2 1
+f 7 1 5
+f 8 5 1
+f 8 1 6
+f 10 6 3
+f 10 3 4
+f 10 8 6
+f 11 6 1
+f 11 1 2
+f 11 3 6
+f 12 11 2
+f 13 7 5
+f 14 4 3
+f 15 10 4
+f 15 8 10
+f 16 3 11
+f 16 11 12
+f 16 9 14
+f 17 12 2
+f 17 2 7
+f 17 7 13
+f 18 14 9
+f 18 9 13
+f 18 13 5
+f 19 15 4
+f 19 4 14
+f 20 16 14
+f 20 14 3
+f 20 3 16
+f 21 12 17
+f 21 16 12
+f 21 9 16
+f 21 17 13
+f 21 13 9
+f 22 18 5
+f 22 5 8
+f 22 8 15
+f 22 15 19
+f 22 19 14
+f 22 14 18
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate054.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate054.obj
new file mode 100644
index 0000000000000000000000000000000000000000..352af6814ab46f3d8dc5ba089c72f26e04424f35
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate054.obj
@@ -0,0 +1,80 @@
+v -0.022274140 0.075734272 -0.005497101
+v -0.018721255 0.068622871 -0.008695660
+v -0.018710198 0.075734272 -0.012624986
+v -0.036529909 0.070388359 -0.012624986
+v -0.043657793 0.066824416 -0.005497101
+v -0.027620053 0.066824416 -0.012624986
+v -0.041875822 0.077516243 -0.007279073
+v -0.018756342 0.077703804 -0.007764766
+v -0.041875822 0.066824416 -0.005497101
+v -0.018711735 0.068610194 -0.010713781
+v -0.043282989 0.068014643 -0.009573267
+v -0.031183995 0.077516243 -0.010843015
+v -0.043105636 0.074756508 -0.004677133
+v -0.018582210 0.077137911 -0.009987181
+v -0.041875822 0.077516243 -0.005497101
+v -0.024067005 0.066840655 -0.008674669
+v -0.020492169 0.070388359 -0.012624986
+v -0.043799284 0.067044575 -0.007809229
+v -0.035184986 0.067606531 -0.011686605
+v -0.043657793 0.070388359 -0.010843015
+v -0.024056111 0.075734272 -0.012624986
+v -0.022564933 0.078323719 -0.006878146
+v -0.043657793 0.075734272 -0.007279073
+v -0.024057516 0.066827897 -0.010724864
+v -0.040093851 0.073952301 -0.010843015
+v -0.038311880 0.077516243 -0.009061044
+v -0.041875822 0.075734272 -0.009061044
+v -0.043657793 0.073952301 -0.009061044
+f 6 4 3
+f 8 1 2
+f 9 2 1
+f 9 5 6
+f 13 9 1
+f 13 5 9
+f 14 8 2
+f 14 3 12
+f 14 10 3
+f 14 2 10
+f 15 13 1
+f 16 2 9
+f 16 10 2
+f 17 10 6
+f 17 6 3
+f 17 3 10
+f 18 6 5
+f 19 4 6
+f 19 18 11
+f 19 6 18
+f 20 19 11
+f 20 4 19
+f 20 11 18
+f 21 12 3
+f 21 3 4
+f 22 14 12
+f 22 8 14
+f 22 7 15
+f 22 15 1
+f 22 1 8
+f 23 15 7
+f 23 13 15
+f 23 18 5
+f 23 5 13
+f 24 16 9
+f 24 9 6
+f 24 6 10
+f 24 10 16
+f 25 4 20
+f 25 21 4
+f 25 12 21
+f 26 22 12
+f 26 7 22
+f 26 12 25
+f 27 23 7
+f 27 25 20
+f 27 26 25
+f 27 7 26
+f 28 20 18
+f 28 18 23
+f 28 27 20
+f 28 23 27
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate055.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate055.obj
new file mode 100644
index 0000000000000000000000000000000000000000..8489d27ac0a000828e2c2ec4ec6dfb20034d3266
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate055.obj
@@ -0,0 +1,50 @@
+v 0.024089960 -0.068662115 -0.008429519
+v 0.023951228 -0.078895852 -0.006618106
+v 0.024057109 -0.075733274 -0.012624986
+v -0.029402024 -0.072169331 -0.012624986
+v -0.029195965 -0.077071200 -0.006587098
+v 0.024057109 -0.070387360 -0.012624986
+v 0.024057109 -0.079297216 -0.010843015
+v -0.029403919 -0.068593619 -0.008609023
+v -0.018698764 -0.079247069 -0.007193114
+v -0.015146255 -0.077515245 -0.012624986
+v -0.029402024 -0.077515245 -0.010843015
+v -0.015146255 -0.068605389 -0.012624986
+v 0.018711196 -0.077515245 -0.012624986
+v -0.018554985 -0.078607682 -0.009256106
+v -0.029468191 -0.068801102 -0.011225951
+v -0.027620053 -0.073951302 -0.012624986
+v 0.024057245 -0.068605673 -0.010814595
+v -0.022274140 -0.075733274 -0.012624986
+f 5 2 1
+f 6 1 3
+f 6 3 4
+f 7 1 2
+f 7 3 1
+f 8 5 1
+f 9 7 2
+f 9 2 5
+f 10 4 3
+f 11 5 8
+f 11 9 5
+f 12 6 4
+f 13 10 3
+f 13 3 7
+f 13 7 10
+f 14 10 7
+f 14 7 9
+f 14 11 10
+f 14 9 11
+f 15 11 8
+f 15 4 11
+f 15 12 4
+f 15 8 12
+f 16 11 4
+f 16 4 10
+f 17 12 8
+f 17 8 1
+f 17 1 6
+f 17 6 12
+f 18 16 10
+f 18 10 11
+f 18 11 16
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate056.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate056.obj
new file mode 100644
index 0000000000000000000000000000000000000000..f0115d6fc9206cbbc736abdaf628a2bc92687b10
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate056.obj
@@ -0,0 +1,59 @@
+v -0.079297216 -0.020492169 -0.003715130
+v -0.071350602 -0.027263760 -0.011279225
+v -0.072150539 -0.018710198 -0.008749044
+v -0.075733274 -0.018710198 -0.012624986
+v -0.081079187 -0.027620053 -0.009061044
+v -0.073095921 -0.018876339 -0.011165204
+v -0.079325640 -0.016935730 -0.007315419
+v -0.077515245 -0.027620053 -0.003715130
+v -0.073951302 -0.027620053 -0.012624986
+v -0.077515245 -0.016928226 -0.005497101
+v -0.081931055 -0.020710576 -0.004349152
+v -0.081079187 -0.018710198 -0.010843015
+v -0.077892295 -0.016998269 -0.008073492
+v -0.072152419 -0.027616756 -0.008749241
+v -0.077515245 -0.025838082 -0.003715130
+v -0.081314483 -0.018746054 -0.005687018
+v -0.080588640 -0.027407493 -0.003500614
+v -0.081079187 -0.022274140 -0.010843015
+v -0.079892045 -0.017084557 -0.006867559
+v -0.079297216 -0.027620053 -0.010843015
+v -0.075733274 -0.022274140 -0.012624986
+f 6 3 2
+f 6 2 4
+f 9 4 2
+f 9 8 5
+f 11 1 10
+f 13 6 4
+f 13 12 7
+f 13 4 12
+f 13 7 10
+f 13 10 3
+f 13 3 6
+f 14 2 3
+f 14 3 8
+f 14 9 2
+f 14 8 9
+f 15 8 3
+f 15 10 1
+f 15 3 10
+f 16 11 10
+f 16 12 11
+f 17 1 11
+f 17 15 1
+f 17 8 15
+f 17 11 5
+f 17 5 8
+f 18 5 11
+f 18 11 12
+f 18 12 4
+f 19 16 10
+f 19 10 7
+f 19 7 12
+f 19 12 16
+f 20 18 9
+f 20 9 5
+f 20 5 18
+f 21 18 4
+f 21 4 9
+f 21 9 18
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate057.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate057.obj
new file mode 100644
index 0000000000000000000000000000000000000000..d1d4505a10cfa0af7adba7e813095b231de11834
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate057.obj
@@ -0,0 +1,86 @@
+v -0.022153252 0.089613800 0.011476083
+v -0.018710198 0.084644127 0.006976696
+v -0.018710198 0.086426099 -0.005497101
+v -0.024056111 0.082862156 -0.009061044
+v -0.029402024 0.081080185 0.005194725
+v -0.018691382 0.080663262 -0.008393271
+v -0.029402024 0.089990041 0.010540639
+v -0.018697182 0.089914744 0.008788621
+v -0.025838082 0.084644127 0.012322610
+v -0.018543979 0.081800522 -0.007801792
+v -0.028998430 0.080057499 -0.006542886
+v -0.018710198 0.081080185 -0.001933159
+v -0.029402024 0.082862156 -0.007279073
+v -0.022274140 0.084644127 0.010540639
+v -0.029402024 0.089990041 0.012322610
+v -0.020492169 0.089990041 0.003412754
+v -0.018710198 0.089990041 0.003412754
+v -0.023761045 0.080305298 -0.007728259
+v -0.022274140 0.081080185 0.001630783
+v -0.018710198 0.082862156 0.003412754
+v -0.020492169 0.086426099 -0.005497101
+v -0.029657704 0.085603756 0.011326374
+v -0.022614222 0.087660218 0.011499590
+v -0.018881304 0.087688509 0.008283050
+v -0.027620053 0.081080185 0.005194725
+v -0.027620053 0.088208070 0.003412754
+v -0.029402024 0.088208070 0.005194725
+v -0.025838082 0.082862156 0.008758667
+v -0.025838082 0.086426099 -0.001933159
+v -0.024056111 0.089990041 0.005194725
+f 10 6 4
+f 10 4 3
+f 10 8 2
+f 12 6 10
+f 13 4 11
+f 13 11 5
+f 15 1 8
+f 15 9 1
+f 16 7 15
+f 17 10 3
+f 17 8 10
+f 17 3 16
+f 17 16 15
+f 17 15 8
+f 18 11 4
+f 18 4 6
+f 18 6 12
+f 19 18 12
+f 19 11 18
+f 20 12 10
+f 20 10 2
+f 20 19 12
+f 20 2 14
+f 20 14 19
+f 21 3 4
+f 21 4 13
+f 21 16 3
+f 22 13 5
+f 22 15 7
+f 22 5 9
+f 22 9 15
+f 23 14 8
+f 23 8 1
+f 23 1 9
+f 23 9 14
+f 24 14 2
+f 24 2 8
+f 24 8 14
+f 25 5 11
+f 25 11 19
+f 25 9 5
+f 27 22 7
+f 27 13 22
+f 27 26 13
+f 28 25 19
+f 28 19 14
+f 28 14 9
+f 28 9 25
+f 29 26 21
+f 29 21 13
+f 29 13 26
+f 30 7 16
+f 30 16 21
+f 30 21 26
+f 30 27 7
+f 30 26 27
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate058.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate058.obj
new file mode 100644
index 0000000000000000000000000000000000000000..5406430bcfbdf609949ec81101ea5aa248b7338b
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate058.obj
@@ -0,0 +1,68 @@
+v -0.034747938 -0.081079187 0.012322610
+v -0.034446092 -0.085557627 0.011473042
+v -0.034747938 -0.077515245 0.001630783
+v -0.049010789 -0.072180003 0.001625967
+v -0.049003707 -0.081079187 0.012322610
+v -0.047221736 -0.072169331 0.006976696
+v -0.034103158 -0.083285637 0.004002987
+v -0.038311880 -0.086425100 0.012322610
+v -0.034747938 -0.077515245 0.003412754
+v -0.045439764 -0.072169331 0.001630783
+v -0.045439764 -0.075733274 0.012322610
+v -0.049914617 -0.075323767 0.009921249
+v -0.049003707 -0.077515245 0.001630783
+v -0.034747938 -0.086425100 0.006976696
+v -0.049003707 -0.072169331 0.006976696
+v -0.045439764 -0.072169331 0.003412754
+v -0.049443612 -0.076394449 0.011601015
+v -0.038311880 -0.082861158 0.001630783
+v -0.049003707 -0.081079187 0.008758667
+v -0.047221736 -0.073951302 0.010540639
+v -0.034441324 -0.082215626 0.001911458
+v -0.038311880 -0.086425100 0.008758667
+v -0.036529909 -0.084643129 0.003412754
+v -0.036529909 -0.086425100 0.006976696
+f 7 1 2
+f 8 2 1
+f 8 1 5
+f 9 6 1
+f 9 7 3
+f 9 1 7
+f 10 3 4
+f 10 9 3
+f 11 5 1
+f 11 1 6
+f 13 12 4
+f 14 7 2
+f 14 2 8
+f 15 10 4
+f 15 6 10
+f 15 4 12
+f 16 10 6
+f 16 6 9
+f 16 9 10
+f 17 12 5
+f 17 5 11
+f 17 15 12
+f 18 13 4
+f 18 4 3
+f 19 5 12
+f 19 12 13
+f 19 8 5
+f 19 13 18
+f 20 11 6
+f 20 6 15
+f 20 17 11
+f 20 15 17
+f 21 18 3
+f 21 3 7
+f 21 7 14
+f 22 19 18
+f 22 14 8
+f 22 8 19
+f 23 21 14
+f 23 18 21
+f 23 22 18
+f 24 23 14
+f 24 14 22
+f 24 22 23
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate060.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate060.obj
new file mode 100644
index 0000000000000000000000000000000000000000..60b0e751fd8daf6454288b8f38b8d0b3f031e614
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate060.obj
@@ -0,0 +1,50 @@
+v -0.056131591 -0.068605389 0.012322610
+v -0.055642352 -0.073323315 0.011515514
+v -0.056893686 -0.069458645 0.008333700
+v -0.077515245 -0.052567649 0.008758667
+v -0.077515245 -0.054349620 0.012322610
+v -0.059695533 -0.073951302 0.008758667
+v -0.070387360 -0.052567649 0.008758667
+v -0.055650073 -0.073274500 0.009081062
+v -0.059695533 -0.073951302 0.012322610
+v -0.072169331 -0.061477505 0.008758667
+v -0.068605389 -0.054349620 0.008758667
+v -0.072169331 -0.052567649 0.012322610
+v -0.077515245 -0.054349620 0.008758667
+v -0.073951302 -0.059695533 0.012322610
+v -0.068605389 -0.054349620 0.010540639
+v -0.076463537 -0.051932816 0.011393187
+v -0.073951302 -0.059695533 0.010540639
+v -0.070387360 -0.052567649 0.010540639
+f 7 3 4
+f 8 1 2
+f 8 3 1
+f 8 6 3
+f 8 2 6
+f 9 2 1
+f 9 1 5
+f 9 6 2
+f 10 3 6
+f 10 6 9
+f 11 1 3
+f 11 3 7
+f 12 5 1
+f 13 4 3
+f 13 3 10
+f 13 5 4
+f 14 10 9
+f 14 9 5
+f 14 5 13
+f 15 11 7
+f 15 12 1
+f 15 1 11
+f 16 7 4
+f 16 4 5
+f 16 5 12
+f 17 14 13
+f 17 13 10
+f 17 10 14
+f 18 15 7
+f 18 12 15
+f 18 16 12
+f 18 7 16
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate061.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate061.obj
new file mode 100644
index 0000000000000000000000000000000000000000..0aaec316300469c40e0d9737a7e30960fb29a26c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate061.obj
@@ -0,0 +1,68 @@
+v -0.043639913 0.077484581 0.001645363
+v -0.043657793 0.072170330 0.001630783
+v -0.043657793 0.070388359 -0.010843015
+v -0.050785678 0.066824416 -0.010843015
+v -0.051601891 0.069800397 0.001048725
+v -0.047221736 0.063260474 -0.005497101
+v -0.047221736 0.075734272 -0.005497101
+v -0.043596585 0.064945405 -0.010488472
+v -0.046461472 0.076169962 0.002250718
+v -0.051472978 0.063988679 -0.006834377
+v -0.049003707 0.068606387 0.001630783
+v -0.043657793 0.077516243 -0.003715130
+v -0.047221736 0.072170330 -0.009061044
+v -0.050785678 0.075734272 -0.000151188
+v -0.043870389 0.065435370 -0.008398817
+v -0.050785678 0.065042445 -0.010843015
+v -0.050785678 0.073952301 -0.003715130
+v -0.050800388 0.063277429 -0.007329460
+v -0.050785678 0.075734272 0.001630783
+v -0.045439764 0.070388359 -0.010843015
+v -0.045439764 0.077516243 -0.003715130
+v -0.047489888 0.063569557 -0.008197610
+v -0.051173627 0.072744391 0.001369592
+v -0.047221736 0.077516243 -0.000151188
+f 8 1 2
+f 9 2 1
+f 11 6 2
+f 11 5 10
+f 11 2 9
+f 12 3 7
+f 12 8 3
+f 12 1 8
+f 13 4 7
+f 15 8 2
+f 15 2 6
+f 15 6 8
+f 16 4 3
+f 16 3 8
+f 16 10 4
+f 17 10 5
+f 17 4 10
+f 17 5 14
+f 17 14 7
+f 17 7 4
+f 18 11 10
+f 18 6 11
+f 18 10 16
+f 19 11 9
+f 19 14 5
+f 19 9 1
+f 20 3 4
+f 20 4 13
+f 20 13 7
+f 20 7 3
+f 21 12 7
+f 21 7 14
+f 22 18 16
+f 22 16 8
+f 22 8 6
+f 22 6 18
+f 23 19 5
+f 23 5 11
+f 23 11 19
+f 24 21 14
+f 24 14 19
+f 24 19 1
+f 24 1 12
+f 24 12 21
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate063.obj b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate063.obj
new file mode 100644
index 0000000000000000000000000000000000000000..ea8d40d3f569f44fddb3bc461ce58a1e7c7c8175
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/assets/plates/kalas_plate/kalas_plate063.obj
@@ -0,0 +1,26 @@
+v 0.020518252 0.077607878 -0.007767813
+v 0.020565265 0.072437477 -0.008722805
+v 0.020493167 0.075734272 -0.012624986
+v -0.018710198 0.077516243 -0.012624986
+v -0.018725087 0.072201152 -0.008647622
+v 0.015147254 0.077516243 -0.012624986
+v 0.020780450 0.073306369 -0.011692210
+v -0.018756342 0.077703804 -0.007764766
+v 0.020462798 0.077251804 -0.010003821
+v -0.019106221 0.073512111 -0.011681547
+f 5 2 1
+f 6 3 4
+f 7 1 2
+f 7 2 5
+f 8 5 1
+f 8 6 4
+f 8 1 6
+f 9 6 1
+f 9 3 6
+f 9 7 3
+f 9 1 7
+f 10 7 5
+f 10 4 3
+f 10 3 7
+f 10 8 4
+f 10 5 8
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/.gitignore b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..f3e6bd02800d58c8c156b8fa4b5ffc58e4de6005
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/.gitignore
@@ -0,0 +1 @@
+videos/
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/__init__.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/__pycache__/__init__.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4d97a64c650e01102115f396acb9d92bbfdfb908
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/__pycache__/__init__.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/__pycache__/demo_manual_control.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/__pycache__/demo_manual_control.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..687c331798642251a1c8ea04cbc103d751cd9e63
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/__pycache__/demo_manual_control.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/__pycache__/demo_random_action.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/__pycache__/demo_random_action.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b9330c5d8a31b9dfca631659c29557d7c6a5b7df
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/__pycache__/demo_random_action.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/__pycache__/demo_robot.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/__pycache__/demo_robot.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3402ed35fd856d21e682fee5ad82c4807aaabd4c
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/__pycache__/demo_robot.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/README.md b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..843d6423436cb4676a0a34307fbedb9edb22bf85
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/README.md
@@ -0,0 +1,76 @@
+# Performance Benchmarking
+
+See [the performance benchmarking documentation](https://maniskill.readthedocs.io/en/latest/user_guide/additional_resources/performance_benchmarking.html) for in depth details.
+
+If you plan to run the code here you need to git clone ManiSkill and change your directory to this one before running the code
+
+Code Structure:
+- `scripts/`: Bash scripts to run a matrix of performance tests. Results are saved to a local `benchmark_results` folder
+- `plot_results.py`: Run this code to generate graphs of performance results saved to `benchmark_results`
+- `envs/`: custom environments built for benchmarking, designed to be as close as possible between different simulators. Currently only Cartpole environment is tuned correctly for benchmarking across all simulators.
+
+
+## Setup
+
+### ManiSkill
+
+See https://maniskill.readthedocs.io/en/latest/user_guide/getting_started/installation.html and then run
+
+```
+pip install pynvml
+```
+
+
+
+
+
+### Isaac Lab
+
+See https://isaac-sim.github.io/IsaacLab/source/setup/installation/index.html to create a conda/mamba environment.
+
+Then run `pip install pynvml tyro pandas`.
+
+## Running the Benchmark
+
+All scripts are provided in the scripts folder that you can simply run directly. Otherwise example usages are shown below for benchmarking simulation and simulation+rendering FPS.
+
+See the `scripts/` folder for the full list of commands used to generate official results, those commands save results to the `benchmark_results` folder in a .csv format. Running a benchmark with the same configurations of cameras/number of environments/choice of GPU will override the previous result. Example commands are shown below
+
+### ManiSkill
+
+```bash
+python gpu_sim.py -e "CartpoleBalanceBenchmark-v1" \
+ -n=2048 -o=state
+
+python gpu_sim.py -e "CartpoleBalanceBenchmark-v1" \
+ -n=1024 -o=rgb --num-cams=1 --cam-width=256 --cam-height=256
+
+python gpu_sim.py -e "FrankaMoveBenchmark-v1" \
+ -n=2048 -o=state --sim-freq=100 --control-freq=50
+
+python gpu_sim.py -e "FrankaPickCubeBenchmark-v1" \
+ -n=2048 -o=state --sim-freq=100 --control-freq=50
+```
+
+### Isaac Lab
+
+```bash
+python isaac_lab_gpu_sim.py --task Isaac-Cartpole-Direct-Benchmark-v0 --headless \
+ --num-envs=2048 --obs-mode=state --save-results
+
+python isaac_lab_gpu_sim.py --task Isaac-Cartpole-RGB-Camera-Direct-Benchmark-v0 --headless \
+ --num-cams=1 --cam-width=512 --cam-height=512 --enable_cameras \
+ --num-envs=128 --obs-mode=rgb --save-results
+```
+
+## Generating Plots
+
+Comparing ManiSkill and Isaac Lab
+```bash
+python plot_results.py -e CartpoleBalanceBenchmark-v1 -f benchmark_results/maniskill.csv benchmark_results/isaac_lab.csv
+```
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/__init__.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/__init__.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e596cebd4e43c2299836e085fe2f2538d35c6932
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/__init__.py
@@ -0,0 +1,8 @@
+try:
+ from .maniskill import *
+except:
+ pass
+try:
+ from .isaaclab import *
+except:
+ pass
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/isaaclab/__init__.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/isaaclab/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a68208684cb2c60dfc660359315f79acd0a574f
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/isaaclab/__init__.py
@@ -0,0 +1,30 @@
+import gymnasium as gym
+
+from .franka import FrankaEnvCfg
+from .cartpole_visual import CartpoleRGBCameraBenchmarkEnvCfg
+from .cartpole_state import CartpoleEnvCfg
+gym.register(
+ id="Isaac-Cartpole-RGB-Camera-Direct-Benchmark-v0",
+ entry_point="envs.isaaclab.cartpole_visual:CartpoleCameraBenchmarkEnv",
+ disable_env_checker=True,
+ kwargs={
+ "env_cfg_entry_point": CartpoleRGBCameraBenchmarkEnvCfg,
+ },
+)
+gym.register(
+ id="Isaac-Cartpole-Direct-Benchmark-v0",
+ entry_point="envs.isaaclab.cartpole_state:CartpoleBenchmarkEnv",
+ disable_env_checker=True,
+ kwargs={
+ "env_cfg_entry_point": CartpoleEnvCfg,
+ },
+)
+
+gym.register(
+ id="Isaac-Franka-Direct-Benchmark-v0",
+ entry_point="envs.isaaclab.franka:FrankaBenchmarkEnv",
+ disable_env_checker=True,
+ kwargs={
+ "env_cfg_entry_point": FrankaEnvCfg,
+ },
+)
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/isaaclab/cartpole_state.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/isaaclab/cartpole_state.py
new file mode 100644
index 0000000000000000000000000000000000000000..44407dc00e31a1a9df508368809f8462d5178f18
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/isaaclab/cartpole_state.py
@@ -0,0 +1,137 @@
+# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
+from __future__ import annotations
+
+import math
+import torch
+from collections.abc import Sequence
+
+from omni.isaac.lab_assets.cartpole import CARTPOLE_CFG
+
+import omni.isaac.lab.sim as sim_utils
+from omni.isaac.lab.assets import Articulation, ArticulationCfg
+from omni.isaac.lab.envs import DirectRLEnv, DirectRLEnvCfg
+from omni.isaac.lab.scene import InteractiveSceneCfg
+from omni.isaac.lab.sim import SimulationCfg
+from omni.isaac.lab.sim.spawners.from_files import GroundPlaneCfg, spawn_ground_plane
+from omni.isaac.lab.utils import configclass
+from omni.isaac.lab.utils.math import sample_uniform
+
+
+@configclass
+class CartpoleEnvCfg(DirectRLEnvCfg):
+ # simulation
+ sim: SimulationCfg = SimulationCfg(dt=1 / 120)
+
+ # robot
+ robot_cfg: ArticulationCfg = CARTPOLE_CFG.replace(prim_path="/World/envs/env_.*/Robot")
+ cart_dof_name = "slider_to_cart"
+ pole_dof_name = "cart_to_pole"
+
+ # scene
+ scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=4096, env_spacing=4.0, replicate_physics=True)
+
+ # env
+ decimation = 2
+ episode_length_s = 5.0
+ action_scale = 100.0 # [N]
+ num_actions = 1
+ num_observations = 4
+ num_states = 0
+
+ # reset
+ max_cart_pos = 3.0 # the cart is reset if it exceeds that position [m]
+ initial_pole_angle_range = [-0.25, 0.25] # the range in which the pole angle is sampled from on reset [rad]
+
+ # reward scales
+ rew_scale_alive = 1.0
+ rew_scale_terminated = -2.0
+ rew_scale_pole_pos = -1.0
+ rew_scale_cart_vel = -0.01
+ rew_scale_pole_vel = -0.005
+
+
+class CartpoleBenchmarkEnv(DirectRLEnv):
+ cfg: CartpoleEnvCfg
+
+ def __init__(self, cfg: CartpoleEnvCfg, render_mode: str | None = None, **kwargs):
+ super().__init__(cfg, render_mode, **kwargs)
+
+ self._cart_dof_idx, _ = self.cartpole.find_joints(self.cfg.cart_dof_name)
+ self._pole_dof_idx, _ = self.cartpole.find_joints(self.cfg.pole_dof_name)
+ self.action_scale = self.cfg.action_scale
+
+ self.joint_pos = self.cartpole.data.joint_pos
+ self.joint_vel = self.cartpole.data.joint_vel
+
+ def _setup_scene(self):
+ self.cartpole = Articulation(self.cfg.robot_cfg)
+ # add ground plane
+ spawn_ground_plane(prim_path="/World/ground", cfg=GroundPlaneCfg())
+ # clone, filter, and replicate
+ self.scene.clone_environments(copy_from_source=False)
+ self.scene.filter_collisions(global_prim_paths=[])
+ # add articultion to scene
+ self.scene.articulations["cartpole"] = self.cartpole
+ # add lights
+ light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75))
+ light_cfg.func("/World/Light", light_cfg)
+
+ def _pre_physics_step(self, actions: torch.Tensor) -> None:
+ self.actions = self.action_scale * actions.clone()
+
+ def _apply_action(self) -> None:
+ self.cartpole.set_joint_effort_target(self.actions, joint_ids=self._cart_dof_idx)
+
+ def _get_observations(self) -> dict:
+ obs = torch.cat(
+ (
+ self.joint_pos[:, self._pole_dof_idx[0]].unsqueeze(dim=1),
+ self.joint_vel[:, self._pole_dof_idx[0]].unsqueeze(dim=1),
+ self.joint_pos[:, self._cart_dof_idx[0]].unsqueeze(dim=1),
+ self.joint_vel[:, self._cart_dof_idx[0]].unsqueeze(dim=1),
+ ),
+ dim=-1,
+ )
+ observations = {"policy": obs}
+ return observations
+
+ def _get_rewards(self) -> torch.Tensor:
+ total_reward = torch.zeros((self.num_envs,), device=self.sim.device)
+ return total_reward
+
+ def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]:
+ # self.joint_pos = self.cartpole.data.joint_pos
+ # self.joint_vel = self.cartpole.data.joint_vel
+
+ time_out = self.episode_length_buf >= self.max_episode_length - 1
+ # out_of_bounds = torch.any(torch.abs(self.joint_pos[:, self._cart_dof_idx]) > self.cfg.max_cart_pos, dim=1)
+ # out_of_bounds = out_of_bounds | torch.any(torch.abs(self.joint_pos[:, self._pole_dof_idx]) > math.pi / 2, dim=1)
+ return time_out, time_out
+
+ def _reset_idx(self, env_ids: Sequence[int] | None):
+ if env_ids is None:
+ env_ids = self.cartpole._ALL_INDICES
+ super()._reset_idx(env_ids)
+
+ joint_pos = self.cartpole.data.default_joint_pos[env_ids]
+ joint_pos[:, self._pole_dof_idx] += sample_uniform(
+ self.cfg.initial_pole_angle_range[0] * math.pi,
+ self.cfg.initial_pole_angle_range[1] * math.pi,
+ joint_pos[:, self._pole_dof_idx].shape,
+ joint_pos.device,
+ )
+ joint_vel = self.cartpole.data.default_joint_vel[env_ids]
+
+ default_root_state = self.cartpole.data.default_root_state[env_ids]
+ default_root_state[:, :3] += self.scene.env_origins[env_ids]
+
+ self.joint_pos[env_ids] = joint_pos
+ self.joint_vel[env_ids] = joint_vel
+
+ self.cartpole.write_root_pose_to_sim(default_root_state[:, :7], env_ids)
+ self.cartpole.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids)
+ self.cartpole.write_joint_state_to_sim(joint_pos, joint_vel, None, env_ids)
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/isaaclab/cartpole_visual.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/isaaclab/cartpole_visual.py
new file mode 100644
index 0000000000000000000000000000000000000000..ef80acaed36d1164dadfe5627498f8fca74ff5ca
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/isaaclab/cartpole_visual.py
@@ -0,0 +1,231 @@
+# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
+from __future__ import annotations
+
+import gymnasium as gym
+import math
+import numpy as np
+import torch
+from collections.abc import Sequence
+
+from omni.isaac.lab_assets.cartpole import CARTPOLE_CFG
+
+import omni.isaac.lab.sim as sim_utils
+from omni.isaac.lab.assets import Articulation, ArticulationCfg
+from omni.isaac.lab.envs import DirectRLEnv, DirectRLEnvCfg, ViewerCfg
+from omni.isaac.lab.scene import InteractiveSceneCfg
+from omni.isaac.lab.sensors import TiledCamera, TiledCameraCfg
+from omni.isaac.lab.sim import SimulationCfg
+from omni.isaac.lab.sim.spawners.from_files import GroundPlaneCfg, spawn_ground_plane
+from omni.isaac.lab.utils import configclass
+from omni.isaac.lab.utils.math import sample_uniform
+
+
+@configclass
+class CartpoleRGBCameraBenchmarkEnvCfg(DirectRLEnvCfg):
+ # simulation
+ sim: SimulationCfg = SimulationCfg(dt=1 / 120, render_interval=2)
+
+ # robot
+ robot_cfg: ArticulationCfg = CARTPOLE_CFG.replace(prim_path="/World/envs/env_.*/Robot")
+ cart_dof_name = "slider_to_cart"
+ pole_dof_name = "cart_to_pole"
+
+ # camera
+ tiled_camera: TiledCameraCfg = TiledCameraCfg(
+ prim_path="/World/envs/env_.*/Camera",
+ offset=TiledCameraCfg.OffsetCfg(pos=(-7.0, 0.0, 3.0), rot=(0.9945, 0.0, 0.1045, 0.0), convention="world"),
+ data_types=["rgb"],
+ spawn=sim_utils.PinholeCameraCfg(
+ focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 20.0)
+ ),
+ width=128,
+ height=128,
+ )
+
+ # change viewer settings
+ viewer = ViewerCfg(eye=(20.0, 20.0, 20.0))
+
+ # scene
+ scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=256, env_spacing=25.0, replicate_physics=True)
+
+ # env
+ decimation = 2
+ episode_length_s = 5.0
+ action_scale = 100.0 # [N]
+ num_actions = 1
+ num_channels = 3
+ num_observations = num_channels * tiled_camera.height * tiled_camera.width
+ num_states = 0
+
+ # reset
+ max_cart_pos = 3.0 # the cart is reset if it exceeds that position [m]
+ initial_pole_angle_range = [-0.125, 0.125] # the range in which the pole angle is sampled from on reset [rad]
+
+ # reward scales
+ rew_scale_alive = 1.0
+ rew_scale_terminated = -2.0
+ rew_scale_pole_pos = -1.0
+ rew_scale_cart_vel = -0.01
+ rew_scale_pole_vel = -0.005
+
+class CartpoleCameraBenchmarkEnv(DirectRLEnv):
+ """Benchmark environment for CartPole task with a camera.
+
+ Modification from original:
+ - Remove reward / evaluation functions
+ - Support RGB+Depth and multiple camera setups
+ """
+
+ cfg: CartpoleRGBCameraBenchmarkEnvCfg
+
+ def __init__(
+ self, cfg: CartpoleRGBCameraBenchmarkEnvCfg, render_mode: str | None = None, camera_width=128, camera_height=128, num_cameras=1, obs_mode="rgb", **kwargs
+ ):
+ # configure cameras
+ data_types = []
+ if "rgb" in obs_mode:
+ data_types.append("rgb")
+ if "depth" in obs_mode:
+ data_types.append("depth")
+ if "segmentation" in obs_mode:
+ data_types.append("semantic_segmentation")
+ self.data_types = data_types
+
+ self.num_cameras = num_cameras
+ self.tiled_camera_cfgs = []
+ for i in range(num_cameras):
+ tiled_camera_cfg = TiledCameraCfg(
+ prim_path=f"/World/envs/env_.*/Camera_{i}",
+ offset=TiledCameraCfg.OffsetCfg(pos=(-4.0, 0.0, 3.0), rot=(0.9945, 0.0, 0.1045, 0.0), convention="world"),
+ data_types=data_types,
+ spawn=sim_utils.PinholeCameraCfg(
+ focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 24.7)
+ ),
+ width=camera_width,
+ height=camera_height,
+ )
+ self.tiled_camera_cfgs.append(tiled_camera_cfg)
+ super().__init__(cfg, render_mode, **kwargs)
+
+ self._cart_dof_idx, _ = self._cartpole.find_joints(self.cfg.cart_dof_name)
+ self._pole_dof_idx, _ = self._cartpole.find_joints(self.cfg.pole_dof_name)
+ self.action_scale = self.cfg.action_scale
+
+ self.joint_pos = self._cartpole.data.joint_pos
+ self.joint_vel = self._cartpole.data.joint_vel
+
+ if len(self.cfg.tiled_camera.data_types) != 1:
+ raise ValueError(
+ "The Cartpole camera environment only supports one image type at a time but the following were"
+ f" provided: {self.cfg.tiled_camera.data_types}"
+ )
+
+ def close(self):
+ """Cleanup for the environment."""
+ super().close()
+
+ def _configure_gym_env_spaces(self):
+ """Configure the action and observation spaces for the Gym environment."""
+ # observation space (unbounded since we don't impose any limits)
+ self.num_actions = self.cfg.num_actions
+ self.num_observations = self.cfg.num_observations
+ self.num_states = self.cfg.num_states
+
+ # set up spaces
+ self.single_observation_space = gym.spaces.Dict()
+ self.single_observation_space["rgb"] = gym.spaces.Box(
+ low=-np.inf,
+ high=np.inf,
+ shape=(self.num_cameras, self.tiled_camera_cfgs[0].height, self.tiled_camera_cfgs[0].width, 3),
+ )
+ self.single_action_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(self.num_actions,))
+
+ # batch the spaces for vectorized environments
+ self.observation_space = gym.vector.utils.batch_space(self.single_observation_space, self.num_envs)
+ self.action_space = gym.vector.utils.batch_space(self.single_action_space, self.num_envs)
+
+ # RL specifics
+ self.actions = torch.zeros(self.num_envs, self.num_actions, device=self.sim.device)
+
+ def _setup_scene(self):
+ """Setup the scene with the cartpole and camera."""
+ self._cartpole = Articulation(self.cfg.robot_cfg)
+ # if self.has_rgb:
+ self.tiled_cameras = [TiledCamera(cfg) for cfg in self.tiled_camera_cfgs]
+ # if self.has_depth:
+ # self.tiled_depth_cameras = [TiledCamera(cfg) for cfg in self.tiled_depth_camera_cfgs]
+
+ # add ground plane
+ spawn_ground_plane(prim_path="/World/ground", cfg=GroundPlaneCfg(size=(500, 500)))
+ # clone, filter, and replicate
+ self.scene.clone_environments(copy_from_source=False)
+ self.scene.filter_collisions(global_prim_paths=[])
+
+ # add articultion and sensors to scene
+ self.scene.articulations["cartpole"] = self._cartpole
+ for i in range(self.num_cameras):
+ self.scene.sensors[f"tiled_camera_{i}"] = self.tiled_cameras[i]
+ # add lights
+ light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75))
+ light_cfg.func("/World/Light", light_cfg)
+
+ def _pre_physics_step(self, actions: torch.Tensor) -> None:
+ self.actions = self.action_scale * actions.clone()
+
+ def _apply_action(self) -> None:
+ self._cartpole.set_joint_effort_target(self.actions, joint_ids=self._cart_dof_idx)
+
+ def _get_observations(self) -> dict:
+ # data_type = "rgb" if "rgb" in self.cfg.tiled_camera.data_types else "depth"
+ # observations = {"policy": self._tiled_camera.data.output[data_type].clone()}
+ observations = {"sensors": {}}
+ for i in range(self.num_cameras):
+ observations["sensors"][f"cam_{i}"] = {}
+ for i, (cam, cfg) in enumerate(zip(self.tiled_cameras, self.tiled_camera_cfgs)):
+ for data_type in self.data_types:
+ observations["sensors"][f"cam_{i}"][data_type] = cam.data.output[data_type].clone()
+ # if self.has_depth:
+ # for i, (cam, cfg) in enumerate(zip(self.tiled_depth_cameras, self.tiled_depth_camera_cfgs)):
+ # observations["sensors"][f"cam_{i}"]["depth"] = cam.data.output["depth"].clone()
+ return observations
+
+ def _get_rewards(self) -> torch.Tensor:
+ total_reward = torch.zeros((self.num_envs,), device=self.sim.device)
+ return total_reward
+
+ def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]:
+ self.joint_pos = self._cartpole.data.joint_pos
+ self.joint_vel = self._cartpole.data.joint_vel
+
+ time_out = self.episode_length_buf >= self.max_episode_length - 1
+ out_of_bounds = torch.any(torch.abs(self.joint_pos[:, self._cart_dof_idx]) > self.cfg.max_cart_pos, dim=1)
+ out_of_bounds = out_of_bounds | torch.any(torch.abs(self.joint_pos[:, self._pole_dof_idx]) > math.pi / 2, dim=1)
+ return out_of_bounds, time_out
+
+ def _reset_idx(self, env_ids: Sequence[int] | None):
+ if env_ids is None:
+ env_ids = self._cartpole._ALL_INDICES
+ super()._reset_idx(env_ids)
+
+ joint_pos = self._cartpole.data.default_joint_pos[env_ids]
+ joint_pos[:, self._pole_dof_idx] += sample_uniform(
+ self.cfg.initial_pole_angle_range[0] * math.pi,
+ self.cfg.initial_pole_angle_range[1] * math.pi,
+ joint_pos[:, self._pole_dof_idx].shape,
+ joint_pos.device,
+ )
+ joint_vel = self._cartpole.data.default_joint_vel[env_ids]
+
+ default_root_state = self._cartpole.data.default_root_state[env_ids]
+ default_root_state[:, :3] += self.scene.env_origins[env_ids]
+
+ self.joint_pos[env_ids] = joint_pos
+ self.joint_vel[env_ids] = joint_vel
+
+ self._cartpole.write_root_pose_to_sim(default_root_state[:, :7], env_ids)
+ self._cartpole.write_root_velocity_to_sim(default_root_state[:, 7:], env_ids)
+ self._cartpole.write_joint_state_to_sim(joint_pos, joint_vel, None, env_ids)
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/isaaclab/franka.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/isaaclab/franka.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab8be9b4a496d73a31b2e2c3e2cd718a7efdb2d9
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/isaaclab/franka.py
@@ -0,0 +1,288 @@
+# Copyright (c) 2022-2024, The Isaac Lab Project Developers.
+# All rights reserved.
+#
+# SPDX-License-Identifier: BSD-3-Clause
+
+from __future__ import annotations
+
+import numpy as np
+import torch
+
+from omni.isaac.core.utils.stage import get_current_stage
+from omni.isaac.core.utils.torch.transformations import tf_combine, tf_inverse, tf_vector
+from pxr import UsdGeom
+
+import omni.isaac.lab.sim as sim_utils
+from omni.isaac.lab.actuators.actuator_cfg import ImplicitActuatorCfg
+from omni.isaac.lab.assets import Articulation, ArticulationCfg, RigidObject, RigidObjectCfg
+from omni.isaac.lab.envs import DirectRLEnv, DirectRLEnvCfg
+from omni.isaac.lab.scene import InteractiveSceneCfg
+from omni.isaac.lab.sim import SimulationCfg
+from omni.isaac.lab.terrains import TerrainImporterCfg
+from omni.isaac.lab.utils import configclass
+from omni.isaac.lab.utils.assets import ISAAC_NUCLEUS_DIR
+from omni.isaac.lab.utils.math import sample_uniform
+from omni.isaac.lab.sensors import TiledCamera, TiledCameraCfg
+
+
+@configclass
+class FrankaEnvCfg(DirectRLEnvCfg):
+ # env
+ episode_length_s = 8.3333 # 500 timesteps
+ decimation = 2
+ num_actions = 9
+ num_observations = 23
+ num_states = 0
+
+ # simulation
+ sim: SimulationCfg = SimulationCfg(
+ dt=1 / 120,
+ render_interval=decimation,
+ disable_contact_processing=True,
+ physics_material=sim_utils.RigidBodyMaterialCfg(
+ friction_combine_mode="multiply",
+ restitution_combine_mode="multiply",
+ static_friction=1.0,
+ dynamic_friction=1.0,
+ restitution=0.0,
+ ),
+ )
+
+ # scene
+ scene: InteractiveSceneCfg = InteractiveSceneCfg(num_envs=4096, env_spacing=20.0, replicate_physics=True)
+ # add cube
+ # cube: RigidObjectCfg = RigidObjectCfg(
+ # prim_path="/World/envs/env_.*/cube",
+ # spawn=sim_utils.CuboidCfg(
+ # size=(0.1, 0.1, 0.1),
+ # rigid_props=sim_utils.RigidBodyPropertiesCfg(max_depenetration_velocity=1.0, disable_gravity=False),
+ # mass_props=sim_utils.MassPropertiesCfg(mass=1.0),
+ # physics_material=sim_utils.RigidBodyMaterialCfg(),
+ # collision_props=sim_utils.CollisionPropertiesCfg(),
+ # visual_material=sim_utils.PreviewSurfaceCfg(diffuse_color=(0.5, 0.0, 0.0)),
+ # ),
+ # init_state=RigidObjectCfg.InitialStateCfg(pos=(1.0, -0.2, 0.05)),
+ # )
+ # robot
+ robot = ArticulationCfg(
+ prim_path="/World/envs/env_.*/Robot",
+ spawn=sim_utils.UsdFileCfg(
+ usd_path=f"{ISAAC_NUCLEUS_DIR}/Robots/Franka/franka_instanceable.usd",
+ activate_contact_sensors=False,
+ rigid_props=sim_utils.RigidBodyPropertiesCfg(
+ disable_gravity=True,
+ max_depenetration_velocity=5.0,
+ ),
+ articulation_props=sim_utils.ArticulationRootPropertiesCfg(
+ enabled_self_collisions=True, solver_position_iteration_count=8, solver_velocity_iteration_count=0
+ ),
+ ),
+ init_state=ArticulationCfg.InitialStateCfg(
+ joint_pos={
+ "panda_joint1": 0.5,
+ "panda_joint2": np.pi / 8,
+ "panda_joint3": 0,
+ "panda_joint4": -np.pi * 5 / 8,
+ "panda_joint5": 0,
+ "panda_joint6": np.pi * 3 / 4,
+ "panda_joint7": np.pi / 4,
+ # "panda_finger_joint1": 0.04,
+ # "panda_finger_joint2": 0.04,
+ "panda_finger_joint.*": 0.035,
+ },
+ pos=(1.5, 0.0, 0.0),
+ rot=(0.0, 0.0, 0.0, 1.0),
+ ),
+ actuators={
+ "panda_shoulder": ImplicitActuatorCfg(
+ joint_names_expr=["panda_joint[1-4]"],
+ effort_limit=87.0,
+ velocity_limit=2.175,
+ stiffness=80.0,
+ damping=4.0,
+ ),
+ "panda_forearm": ImplicitActuatorCfg(
+ joint_names_expr=["panda_joint[5-7]"],
+ effort_limit=12.0,
+ velocity_limit=2.61,
+ stiffness=80.0,
+ damping=4.0,
+ ),
+ "panda_hand": ImplicitActuatorCfg(
+ joint_names_expr=["panda_finger_joint.*"],
+ effort_limit=200.0,
+ velocity_limit=0.2,
+ stiffness=2e3,
+ damping=1e2,
+ ),
+ },
+ )
+ # in-hand object
+ # object: RigidObjectCfg = RigidObjectCfg(
+ # prim_path="/World/envs/env_.*/object",
+ # spawn=sim_utils.UsdFileCfg(
+ # usd_path=f"{ISAAC_NUCLEUS_DIR}/Props/Blocks/DexCube/dex_cube_instanceable.usd",
+ # rigid_props=sim_utils.RigidBodyPropertiesCfg(
+ # kinematic_enabled=False,
+ # disable_gravity=False,
+ # enable_gyroscopic_forces=True,
+ # solver_position_iteration_count=8,
+ # solver_velocity_iteration_count=0,
+ # sleep_threshold=0.005,
+ # stabilization_threshold=0.0025,
+ # max_depenetration_velocity=1000.0,
+ # ),
+ # mass_props=sim_utils.MassPropertiesCfg(density=567.0),
+ # ),
+ # init_state=RigidObjectCfg.InitialStateCfg(pos=(1.0, -0.2, 0.1), rot=(1.0, 0.0, 0.0, 0.0)),
+ # )
+
+ # ground plane
+ terrain = TerrainImporterCfg(
+ prim_path="/World/ground",
+ terrain_type="plane",
+ collision_group=-1,
+ physics_material=sim_utils.RigidBodyMaterialCfg(
+ friction_combine_mode="multiply",
+ restitution_combine_mode="multiply",
+ static_friction=1.0,
+ dynamic_friction=1.0,
+ restitution=0.0,
+ ),
+ )
+
+ action_scale = 50
+ dof_velocity_scale = 0.1
+
+
+class FrankaBenchmarkEnv(DirectRLEnv):
+ # pre-physics step calls
+ # |-- _pre_physics_step(action)
+ # |-- _apply_action()
+ # post-physics step calls
+ # |-- _get_dones()
+ # |-- _get_rewards()
+ # |-- _reset_idx(env_ids)
+ # |-- _get_observations()
+
+ cfg: FrankaEnvCfg
+
+ def __init__(self, cfg: FrankaEnvCfg, render_mode: str | None = None, camera_width=128, camera_height=128, num_cameras=1, obs_mode="rgb", **kwargs):
+ # configure cameras
+ data_types = []
+ if "rgb" in obs_mode:
+ data_types.append("rgb")
+ if "depth" in obs_mode:
+ data_types.append("depth")
+ if "segmentation" in obs_mode:
+ data_types.append("semantic_segmentation")
+ self.data_types = data_types
+ self.obs_mode = obs_mode
+ self.num_cameras = num_cameras
+ self.tiled_camera_cfgs = []
+ for i in range(num_cameras):
+ tiled_camera_cfg = TiledCameraCfg(
+ prim_path=f"/World/envs/env_.*/Camera_{i}",
+ offset=TiledCameraCfg.OffsetCfg(pos=(-0.4, 0.0, 1.0), rot=(0.9689124, 0.0, 0.247404, 0.0), convention="world"),
+ data_types=data_types,
+ spawn=sim_utils.PinholeCameraCfg(
+ focal_length=24.0, focus_distance=400.0, horizontal_aperture=20.955, clipping_range=(0.1, 15.0)
+ ),
+ width=camera_width,
+ height=camera_height,
+ )
+ self.tiled_camera_cfgs.append(tiled_camera_cfg)
+ super().__init__(cfg, render_mode, **kwargs)
+ self.dt = self.cfg.sim.dt * self.cfg.decimation
+
+ # create auxiliary variables for computing applied action, observations and rewards
+ self.robot_dof_lower_limits = self._robot.data.soft_joint_pos_limits[0, :, 0].to(device=self.device)
+ self.robot_dof_upper_limits = self._robot.data.soft_joint_pos_limits[0, :, 1].to(device=self.device)
+
+ self.robot_dof_speed_scales = torch.ones_like(self.robot_dof_lower_limits)
+ self.robot_dof_speed_scales[self._robot.find_joints("panda_finger_joint1")[0]] = 0.1
+ self.robot_dof_speed_scales[self._robot.find_joints("panda_finger_joint2")[0]] = 0.1
+
+ self.robot_dof_targets = torch.zeros((self.num_envs, self._robot.num_joints), device=self.device)
+
+ def _setup_scene(self):
+ self._robot = Articulation(self.cfg.robot)
+ # self._cube = RigidObject(self.cfg.cube)
+ self.scene.articulations["robot"] = self._robot
+ # self.scene.rigid_objects["cube"] = self._cube
+ # self._object = RigidObject(self.cfg.object)
+ # self.scene.rigid_objects["object"] = self._object
+
+ self.cfg.terrain.num_envs = self.scene.cfg.num_envs
+ self.cfg.terrain.env_spacing = self.scene.cfg.env_spacing
+ self._terrain = self.cfg.terrain.class_type(self.cfg.terrain)
+ self.tiled_cameras = [TiledCamera(cfg) for cfg in self.tiled_camera_cfgs]
+ # clone, filter, and replicate
+ self.scene.clone_environments(copy_from_source=False)
+ self.scene.filter_collisions(global_prim_paths=[self.cfg.terrain.prim_path])
+
+ # add lights
+ light_cfg = sim_utils.DomeLightCfg(intensity=2000.0, color=(0.75, 0.75, 0.75))
+ light_cfg.func("/World/Light", light_cfg)
+ for i in range(self.num_cameras):
+ self.scene.sensors[f"tiled_camera_{i}"] = self.tiled_cameras[i]
+
+ # pre-physics step calls
+
+ def _pre_physics_step(self, actions: torch.Tensor):
+ self.actions = actions.clone().clamp(-1, 1) * 2
+ # targets = self.robot_dof_targets + self.robot_dof_speed_scales * self.dt * self.actions * self.cfg.action_scale
+ # delta joint pos controller
+ self.robot_dof_targets[:] = torch.clamp(self.actions + self._robot.data.joint_pos, self.robot_dof_lower_limits, self.robot_dof_upper_limits)
+
+ def _apply_action(self):
+ self._robot.set_joint_position_target(self.robot_dof_targets)
+
+ # post-physics step calls
+
+ def _get_dones(self) -> tuple[torch.Tensor, torch.Tensor]:
+ truncated = self.episode_length_buf >= self.max_episode_length - 1
+ return torch.zeros_like(truncated), truncated
+
+ def _get_rewards(self) -> torch.Tensor:
+ total_reward = torch.zeros((self.num_envs,), device=self.sim.device)
+ return total_reward
+
+ def _reset_idx(self, env_ids: torch.Tensor | None):
+ super()._reset_idx(env_ids)
+ # robot state
+ joint_pos = self._robot.data.default_joint_pos[env_ids] + sample_uniform(
+ 0.0, 0.0,
+ (len(env_ids), self._robot.num_joints),
+ self.device,
+ )
+ joint_pos = torch.clamp(joint_pos, self.robot_dof_lower_limits, self.robot_dof_upper_limits)
+ joint_vel = torch.zeros_like(joint_pos)
+ self._robot.set_joint_position_target(joint_pos, env_ids=env_ids)
+ self._robot.write_joint_state_to_sim(joint_pos, joint_vel, env_ids=env_ids)
+ def _get_visual_observations(self) -> dict:
+ observations = {"sensors": {}}
+ for i in range(self.num_cameras):
+ observations["sensors"][f"cam_{i}"] = {}
+ for i, (cam, cfg) in enumerate(zip(self.tiled_cameras, self.tiled_camera_cfgs)):
+ for data_type in self.data_types:
+ observations["sensors"][f"cam_{i}"][data_type] = cam.data.output[data_type].clone()
+ return observations
+ def _get_observations(self) -> dict:
+ dof_pos_scaled = (
+ 2.0
+ * (self._robot.data.joint_pos - self.robot_dof_lower_limits)
+ / (self.robot_dof_upper_limits - self.robot_dof_lower_limits)
+ - 1.0
+ )
+ obs = torch.cat(
+ (
+ dof_pos_scaled,
+ self._robot.data.joint_vel * self.cfg.dof_velocity_scale,
+ ),
+ dim=-1,
+ )
+ obs = {"state": torch.clamp(obs, -5.0, 5.0)}
+ if self.obs_mode != "state":
+ obs["sensors"] = self._get_visual_observations()["sensors"]
+ return obs
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/maniskill/assets/cartpole.xml b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/maniskill/assets/cartpole.xml
new file mode 100644
index 0000000000000000000000000000000000000000..1c4e1010db6861f4dd4562e449aff127530b6930
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/maniskill/assets/cartpole.xml
@@ -0,0 +1,37 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/maniskill/assets/common/materials.xml b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/maniskill/assets/common/materials.xml
new file mode 100644
index 0000000000000000000000000000000000000000..2bf8a7df73de436d1ad6c00bc6f474f53836fec7
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/maniskill/assets/common/materials.xml
@@ -0,0 +1,27 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/maniskill/assets/common/skybox.xml b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/maniskill/assets/common/skybox.xml
new file mode 100644
index 0000000000000000000000000000000000000000..67bd1b7552e323e96d296263d2d47011241b7c5f
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/maniskill/assets/common/skybox.xml
@@ -0,0 +1,6 @@
+
+
+
+
+
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/maniskill/assets/common/visual.xml b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/maniskill/assets/common/visual.xml
new file mode 100644
index 0000000000000000000000000000000000000000..fca4585cfdac41871ea2f6b8374908aed0e0b5bf
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/maniskill/assets/common/visual.xml
@@ -0,0 +1,7 @@
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/maniskill/cartpole.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/maniskill/cartpole.py
new file mode 100644
index 0000000000000000000000000000000000000000..0cc5301524c51ebea7bc9a9a8be64aeaba3d3023
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/maniskill/cartpole.py
@@ -0,0 +1,140 @@
+import os
+import numpy as np
+import sapien
+import torch
+from mani_skill.agents.base_agent import BaseAgent
+from mani_skill.agents.controllers.passive_controller import PassiveControllerConfig
+from mani_skill.agents.controllers.pd_joint_pos import PDJointPosControllerConfig
+from mani_skill.envs.tasks.control.cartpole import CartpoleBalanceEnv
+from mani_skill.sensors.camera import CameraConfig
+from mani_skill.utils import sapien_utils
+from mani_skill.utils.building.ground import build_ground
+from mani_skill.utils.registration import register_env
+from mani_skill.utils.structs.pose import Pose
+from mani_skill.utils.structs.types import SceneConfig, SimConfig
+from typing import Optional, Union
+MJCF_FILE = f"{os.path.join(os.path.dirname(__file__), 'assets/cartpole.xml')}"
+
+
+class CartPoleRobot(BaseAgent):
+ uid = "cart_pole"
+ mjcf_path = MJCF_FILE
+ disable_self_collisions = True
+
+ @property
+ def _controller_configs(self):
+ # NOTE it is impossible to copy joint properties from original xml files, have to tune manually until
+ # it looks approximately correct
+ pd_joint_delta_pos = PDJointPosControllerConfig(
+ ["slider"],
+ -1,
+ 1,
+ damping=200,
+ stiffness=2000,
+ use_delta=True,
+ )
+ rest = PassiveControllerConfig(["hinge_1"], damping=0, friction=0)
+ return dict(
+ pd_joint_delta_pos=dict(
+ slider=pd_joint_delta_pos, rest=rest, balance_passive_force=False
+ )
+ )
+
+ def _load_articulation(self, initial_pose: Optional[Union[sapien.Pose, Pose]] = None):
+ """
+ Load the robot articulation
+ """
+ loader = self.scene.create_mjcf_loader()
+ asset_path = str(self.mjcf_path)
+
+ loader.name = self.uid
+
+ # only need the robot
+ builder = loader.parse(asset_path)["articulation_builders"][0]
+ builder.initial_pose = initial_pose
+ self.robot = builder.build(name="cartpole")
+ assert self.robot is not None, f"Fail to load URDF/MJCF from {asset_path}"
+
+ # Cache robot link ids
+ self.robot_link_names = [link.name for link in self.robot.get_links()]
+
+
+@register_env("CartpoleBalanceBenchmark-v1", max_episode_steps=1000)
+class CartPoleBalanceBenchmarkEnv(CartpoleBalanceEnv):
+ def __init__(
+ self, *args, camera_width=128, camera_height=128, num_cameras=1, **kwargs
+ ):
+ self.camera_width = camera_width
+ self.camera_height = camera_height
+ self.num_cameras = num_cameras
+ super().__init__(*args, robot_uids=CartPoleRobot, **kwargs)
+
+ @property
+ def _default_sim_config(self):
+ return SimConfig(
+ sim_freq=120,
+ spacing=20,
+ control_freq=60,
+ scene_config=SceneConfig(
+ bounce_threshold=0.5,
+ solver_position_iterations=4,
+ solver_velocity_iterations=0,
+ ),
+ )
+
+ @property
+ def _default_sensor_configs(self):
+ from transforms3d.euler import euler2quat
+
+ q = euler2quat(0, np.deg2rad(11.988), np.pi / 2)
+ pose = sapien.Pose((0.0, -4.0, 3.0), q=q)
+ sensor_configs = []
+ if self.num_cameras is not None:
+ for i in range(self.num_cameras):
+ sensor_configs.append(
+ CameraConfig(
+ uid=f"base_camera_{i}",
+ pose=pose,
+ width=self.camera_width,
+ height=self.camera_height,
+ far=25,
+ fov=0.63,
+ )
+ )
+ return sensor_configs
+
+ @property
+ def _default_human_render_camera_configs(self):
+ return dict()
+
+ def _load_agent(self, options: dict):
+ super()._load_agent(options, sapien.Pose())
+
+ def _load_scene(self, options: dict):
+ loader = self.scene.create_mjcf_loader()
+ actor_builders = loader.parse(MJCF_FILE)["actor_builders"]
+ for a in actor_builders:
+ a.initial_pose = sapien.Pose()
+ a.build(a.name)
+ # isaac uses a 0.5mx0.5m grid so we downscale the grid which is 4x4 squares by 2 by assumign the texture square length is 2
+ self.ground = build_ground(
+ self.scene,
+ texture_file=os.path.join(
+ os.path.dirname(__file__), "assets/black_grid.png"
+ ),
+ texture_square_len=2,
+ mipmap_levels=7,
+ )
+
+ def _load_lighting(self, options: dict):
+ """Loads lighting into the scene. Called by `self._reconfigure`. If not overriden will set some simple default lighting"""
+ self.scene.set_ambient_light(np.array([1, 1, 1]) * 0.3)
+ for i in range(self.num_envs):
+ self.scene.sub_scenes[i].set_environment_map(
+ os.path.join(
+ os.path.dirname(__file__), "kloofendal_28d_misty_puresky_1k.hdr"
+ )
+ )
+
+ def compute_dense_reward(self, obs, action, info):
+ return torch.zeros(self.num_envs, device=self.device)
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/mujoco/.gitignore b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/mujoco/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..d067a4c248d8fb9323d923b2b46b602113580b4a
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/mujoco/.gitignore
@@ -0,0 +1 @@
+franka_description
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/mujoco/panda.xml b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/mujoco/panda.xml
new file mode 100644
index 0000000000000000000000000000000000000000..1f00b8fc33aafe20070f98f4dc66431fa3bdcfed
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/mujoco/panda.xml
@@ -0,0 +1,118 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/mujoco/panda_pick_cube.xml b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/mujoco/panda_pick_cube.xml
new file mode 100644
index 0000000000000000000000000000000000000000..4ec06c004e7487de14a7366da9a9f5040d488c9e
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/envs/mujoco/panda_pick_cube.xml
@@ -0,0 +1,32 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/gpu_sim.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/gpu_sim.py
new file mode 100644
index 0000000000000000000000000000000000000000..7332091dd26ad35e8a2225fa8966ce1909322bf8
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/gpu_sim.py
@@ -0,0 +1,221 @@
+import argparse
+from dataclasses import dataclass
+from pathlib import Path
+from typing import Annotated, Optional
+import gymnasium as gym
+import numpy as np
+import torch
+import tqdm
+import tyro
+
+import mani_skill.envs
+from mani_skill.envs.sapien_env import BaseEnv
+from mani_skill.examples.benchmarking.profiling import Profiler
+from mani_skill.utils.visualization.misc import images_to_video, tile_images
+from mani_skill.utils.wrappers.flatten import FlattenActionSpaceWrapper
+import mani_skill.examples.benchmarking.envs
+from mani_skill.utils.wrappers.gymnasium import CPUGymWrapper # import benchmark env code
+from gymnasium.vector.async_vector_env import AsyncVectorEnv
+BENCHMARK_ENVS = ["FrankaPickCubeBenchmark-v1", "CartpoleBalanceBenchmark-v1", "FrankaMoveBenchmark-v1"]
+@dataclass
+class Args:
+ env_id: Annotated[str, tyro.conf.arg(aliases=["-e"])] = "PickCube-v1"
+ obs_mode: Annotated[str, tyro.conf.arg(aliases=["-o"])] = "state"
+ control_mode: Annotated[str, tyro.conf.arg(aliases=["-c"])] = "pd_joint_delta_pos"
+ num_envs: Annotated[int, tyro.conf.arg(aliases=["-n"])] = 1024
+ cpu_sim: bool = False
+ """Whether to use the CPU or GPU simulation"""
+ seed: int = 0
+ save_example_image: bool = False
+ control_freq: Optional[int] = 60
+ sim_freq: Optional[int] = 120
+ num_cams: Optional[int] = None
+ """Number of cameras. Only used by benchmark environments"""
+ cam_width: Optional[int] = None
+ """Width of cameras. Only used by benchmark environments"""
+ cam_height: Optional[int] = None
+ """Height of cameras. Only used by benchmark environments"""
+ render_mode: str = "rgb_array"
+ """Which set of cameras/sensors to render for video saving. 'cameras' value will save a video showing all sensor/camera data in the observation, e.g. rgb and depth. 'rgb_array' value will show a higher quality render of the environment running."""
+ save_video: bool = False
+ """Whether to save videos"""
+ save_results: Optional[str] = None
+ """Path to save results to. Should be path/to/results.csv"""
+def main(args: Args):
+ profiler = Profiler(output_format="stdout")
+ num_envs = args.num_envs
+ sim_config = dict()
+ if args.control_freq:
+ sim_config["control_freq"] = args.control_freq
+ if args.sim_freq:
+ sim_config["sim_freq"] = args.sim_freq
+ kwargs = dict()
+ if args.env_id in BENCHMARK_ENVS:
+ kwargs = dict(
+ camera_width=args.cam_width,
+ camera_height=args.cam_height,
+ num_cameras=args.num_cams,
+ )
+ if not args.cpu_sim:
+ env = gym.make(
+ args.env_id,
+ num_envs=num_envs,
+ obs_mode=args.obs_mode,
+ render_mode=args.render_mode,
+ control_mode=args.control_mode,
+ sim_config=sim_config,
+ **kwargs
+ )
+ if isinstance(env.action_space, gym.spaces.Dict):
+ env = FlattenActionSpaceWrapper(env)
+ base_env: BaseEnv = env.unwrapped
+ else:
+ def make_env():
+ def _init():
+ env = gym.make(args.env_id,
+ obs_mode=args.obs_mode,
+ sim_config=sim_config,
+ render_mode=args.render_mode,
+ control_mode=args.control_mode,
+ **kwargs)
+ env = CPUGymWrapper(env, )
+ return env
+ return _init
+ env = AsyncVectorEnv([make_env() for _ in range(num_envs)], context="forkserver") if args.num_envs > 1 else make_env()()
+ base_env = make_env()().unwrapped
+
+ base_env.print_sim_details()
+ images = []
+ video_nrows = int(np.sqrt(num_envs))
+ with torch.inference_mode():
+ env.reset(seed=2022)
+ env.step(env.action_space.sample()) # warmup step
+ env.reset(seed=2022)
+ if args.save_video:
+ images.append(env.render().cpu().numpy())
+ N = 1000
+ with profiler.profile("env.step", total_steps=N, num_envs=num_envs):
+ for i in range(N):
+ actions = (
+ 2 * torch.rand(env.action_space.shape, device=base_env.device)
+ - 1
+ )
+ if args.cpu_sim:
+ actions = actions.numpy() # gymnasium async vector env processes torch actions very slowly.
+ obs, rew, terminated, truncated, info = env.step(actions)
+ if args.save_video:
+ images.append(env.render().cpu().numpy())
+ profiler.log_stats("env.step")
+
+ if args.save_video:
+ images = [tile_images(rgbs, nrows=video_nrows) for rgbs in images]
+ images_to_video(
+ images,
+ output_dir="./videos/ms3_benchmark",
+ video_name=f"mani_skill_gpu_sim-random_actions-{args.env_id}-num_envs={num_envs}-obs_mode={args.obs_mode}-render_mode={args.render_mode}",
+ fps=30,
+ )
+ del images
+
+ # if environment has some predefined actions run those
+ if hasattr(env.unwrapped, "fixed_trajectory"):
+ for k, v in env.unwrapped.fixed_trajectory.items():
+ obs, _ = env.reset()
+ env.step(torch.zeros(env.action_space.shape, device=base_env.device))
+ obs, _ = env.reset()
+ if args.save_video:
+ images = []
+ images.append(env.render().cpu().numpy())
+ actions = v["actions"]
+ if v["control_mode"] == "pd_joint_pos":
+ env.unwrapped.agent.set_control_mode(v["control_mode"])
+ env.unwrapped.agent.controller.reset()
+ N = v["shake_steps"] if "shake_steps" in v else 0
+ N += sum([a[1] for a in actions])
+ with profiler.profile(f"{k}_env.step", total_steps=N, num_envs=num_envs):
+ i = 0
+ for action in actions:
+ for _ in range(action[1]):
+ a = torch.tile(action[0], (num_envs, 1))
+ if args.cpu_sim:
+ a = a.numpy()
+ env.step(a)
+ i += 1
+ if args.save_video:
+ images.append(env.render().cpu().numpy())
+ # runs a "shake" test, typically used to check stability of contacts/grasping
+ if "shake_steps" in v:
+ env.unwrapped.agent.set_control_mode("pd_joint_target_delta_pos")
+ env.unwrapped.agent.controller.reset()
+ while i < N:
+ actions = v["shake_action_fn"]()
+ env.step(actions)
+ if args.save_video:
+ images.append(env.render().cpu().numpy())
+ i += 1
+ profiler.log_stats(f"{k}_env.step")
+ if args.save_video:
+ images = [tile_images(rgbs, nrows=video_nrows) for rgbs in images]
+ images_to_video(
+ images,
+ output_dir="./videos/ms3_benchmark",
+ video_name=f"mani_skill_gpu_sim-fixed_trajectory={k}-{args.env_id}-num_envs={num_envs}-obs_mode={args.obs_mode}-render_mode={args.render_mode}",
+ fps=30,
+ )
+ del images
+ env.reset(seed=2022)
+ N = 1000
+ with profiler.profile("env.step+env.reset", total_steps=N, num_envs=num_envs):
+ for i in range(N):
+ actions = (
+ 2 * torch.rand(env.action_space.shape, device=base_env.device) - 1
+ )
+ if args.cpu_sim:
+ actions = actions.numpy()
+ obs, rew, terminated, truncated, info = env.step(actions)
+ if i % 200 == 0 and i != 0:
+ env.reset()
+ profiler.log_stats("env.step+env.reset")
+ if args.save_example_image:
+ obs, _ = env.reset(seed=2022)
+ import matplotlib.pyplot as plt
+ for cam_name, cam_data in obs["sensor_data"].items():
+ for k, v in cam_data.items():
+ imgs = v.cpu().numpy()
+ imgs = tile_images(imgs, nrows=int(np.sqrt(args.num_envs)))
+ cmap = None
+ if k == "depth":
+ imgs[imgs == np.inf] = 0
+ imgs = imgs[ :, :, 0]
+ cmap = "gray"
+ plt.imsave(f"maniskill_{cam_name}_{k}.png", imgs, cmap=cmap)
+
+ env.close()
+ if args.save_results:
+ # append results to csv
+ try:
+ assert (
+ args.save_video == False
+ ), "Saving video slows down speed a lot and it will distort results"
+ Path("benchmark_results").mkdir(parents=True, exist_ok=True)
+ data = dict(
+ env_id=args.env_id,
+ obs_mode=args.obs_mode,
+ num_envs=args.num_envs,
+ control_mode=args.control_mode,
+ gpu_type=torch.cuda.get_device_name()
+ )
+ data.update(
+ num_cameras=args.num_cams,
+ camera_width=args.cam_width,
+ camera_height=args.cam_height,
+ )
+ profiler.update_csv(
+ args.save_results,
+ data,
+ )
+ except:
+ pass
+
+if __name__ == "__main__":
+ main(tyro.cli(Args))
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/isaac_lab_gpu_sim.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/isaac_lab_gpu_sim.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3de84348d19b0a19a4c3df3517f0d57c4ff957d
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/isaac_lab_gpu_sim.py
@@ -0,0 +1,135 @@
+import argparse
+import sys
+
+import numpy as np
+from omni.isaac.lab.app import AppLauncher
+
+# add argparse arguments
+parser = argparse.ArgumentParser(description="Benchmark Isaac Lab")
+parser.add_argument("--num-envs", type=int, default=None, help="Number of environments to simulate.")
+parser.add_argument("--save-example-image", action="store_true", help="Save the last image output of each modality and camera to disk")
+parser.add_argument("--task", type=str, default=None, help="Name of the task.")
+parser.add_argument("--obs-mode", type=str, default="state", help="Observation mode")
+parser.add_argument("--num-cams", type=int, default=None, help="Number of cameras. Only used by benchmark environments")
+parser.add_argument("--cam-width", type=int, default=None, help="Width of cameras. Only used by benchmark environments")
+parser.add_argument("--cam-height", type=int, default=None, help="Height of cameras. Only used by benchmark environments")
+parser.add_argument("--seed", type=int, default=None, help="Seed used for the environment")
+parser.add_argument(
+ "--save-results", action="store_true", help="whether to save results to a csv file"
+)
+# append AppLauncher cli args
+AppLauncher.add_app_launcher_args(parser)
+# parse the arguments
+args_cli, hydra_args = parser.parse_known_args()
+# clear out sys.argv for Hydra
+sys.argv = [sys.argv[0]] + hydra_args
+
+# launch omniverse app
+app_launcher = AppLauncher(args_cli)
+simulation_app = app_launcher.app
+
+"""Rest everything follows."""
+
+import gymnasium as gym
+from profiling import Profiler, tile_images
+import torch
+from pathlib import Path
+import envs.isaaclab
+import numpy as np
+import omni.isaac.lab_tasks # noqa: F401
+from omni.isaac.lab_tasks.utils import parse_env_cfg
+
+def main():
+ profiler = Profiler(output_format="stdout")
+
+ env_cfg = parse_env_cfg(
+ args_cli.task, num_envs=args_cli.num_envs
+ )
+ # create isaac environment
+ if args_cli.obs_mode != "state":
+ env = gym.make(args_cli.task, cfg=env_cfg, camera_width=args_cli.cam_width, camera_height=args_cli.cam_height, num_cameras=args_cli.num_cams, obs_mode=args_cli.obs_mode)
+ else:
+ env = gym.make(args_cli.task, cfg=env_cfg, obs_mode=args_cli.obs_mode, num_cameras=0)
+ with torch.inference_mode():
+ env.reset(seed=2022)
+ env_created = True
+ env.step(torch.from_numpy(env.action_space.sample()).cuda()) # warmup step
+ env.reset(seed=2022)
+ torch.manual_seed(0)
+
+ N = 1000
+ with profiler.profile("env.step", total_steps=N, num_envs=args_cli.num_envs):
+ for i in range(N):
+ actions = (
+ 2 * torch.rand(env.action_space.shape, device=env.unwrapped.device)
+ - 1
+ )
+ obs, rew, terminated, truncated, info = env.step(actions)
+ profiler.log_stats("env.step")
+ env.reset(seed=2022)
+ N = 1000
+ with profiler.profile("env.step+env.reset", total_steps=N, num_envs=args_cli.num_envs):
+ for i in range(N):
+ actions = (
+ 2 * torch.rand(env.action_space.shape, device=env.unwrapped.device) - 1
+ )
+ obs, rew, terminated, truncated, info = env.step(actions)
+ if i % 200 == 0 and i != 0:
+ env.reset()
+ profiler.log_stats("env.step+env.reset")
+
+ if args_cli.save_example_image:
+ obs, _ = env.reset(seed=2022)
+ import matplotlib.pyplot as plt
+ for cam_name, cam_data in obs["sensors"].items():
+ for k, v in cam_data.items():
+ imgs = v.cpu().numpy()
+ imgs = tile_images(imgs, nrows=int(np.sqrt(args_cli.num_envs)))
+ cmap = None
+ if k == "depth":
+ imgs[imgs == np.inf] = 0
+ imgs = imgs[ :, :, 0]
+ cmap = "gray"
+ plt.imsave(f"isaac_{cam_name}_{k}.png", imgs, cmap=cmap)
+ env.close()
+
+
+ # append results to csv
+ env_id_mapping = {
+ "Isaac-Cartpole-RGB-Camera-Direct-Benchmark-v0": "CartpoleBalanceBenchmark-v1",
+ "Isaac-Cartpole-Direct-Benchmark-v0": "CartpoleBalanceBenchmark-v1",
+ "Isaac-Cartpole-Direct-v0": "CartpoleBalanceBenchmark-v1",
+ "Isaac-Franka-Direct-Benchmark-v0": "FrankaBenchmark-v1",
+ }
+
+ if args_cli.obs_mode in ["rgb", "rgbd", "depth"]:
+ sensor_settings_str = []
+ for i in range(args_cli.num_cams):
+ cam_type = "RGB" if args_cli.obs_mode == "rgb" else "Depth"
+ sensor_settings_str.append(f"{cam_type}({args_cli.cam_width}x{args_cli.cam_height})")
+ sensor_settings_str = ", ".join(sensor_settings_str)
+ else:
+ sensor_settings_str = None
+ if args_cli.save_results:
+ Path("benchmark_results").mkdir(parents=True, exist_ok=True)
+ profiler.update_csv(
+ "benchmark_results/isaac_lab.csv",
+ dict(
+ env_id=env_id_mapping[args_cli.task],
+ obs_mode=args_cli.obs_mode,
+ num_envs=args_cli.num_envs,
+ # control_mode=args.control_mode,
+ num_cameras=args_cli.num_cams,
+ camera_width=args_cli.cam_width,
+ camera_height=args_cli.cam_height,
+ sensor_settings=sensor_settings_str,
+ gpu_type=torch.cuda.get_device_name()
+ ),
+ )
+
+
+if __name__ == "__main__":
+ # run the main function
+ main()
+ # close sim app
+ simulation_app.close()
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/plot_results.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/plot_results.py
new file mode 100644
index 0000000000000000000000000000000000000000..ba04709de384ef74e6b26ca3ce374e33f786f821
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/plot_results.py
@@ -0,0 +1,257 @@
+"""
+Run
+python plot_results.py -e CartpoleBalanceBenchmark-v1 -f benchmark_results/maniskill.csv benchmark_results/isaac_lab.csv
+"""
+import matplotlib.pyplot as plt
+import argparse
+import numpy as np
+import pandas as pd
+import os
+import os.path as osp
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-e", "--env-id", required=True, help="ID of the environment")
+ parser.add_argument("-f", "--files", nargs='+', required=True, help="Paths to the benchmark result files to be plotted")
+ return parser.parse_args()
+
+COLOR_PALLETE = [
+ "#e02b35",
+ "#59a89c",
+ "#4190F1",
+ "#a559aa"
+ "#f0c571"
+]
+COLOR_MAP = {
+ "ManiSkill3": "#e02b35",
+ "Isaac Lab": "#59a89c"
+}
+
+def filter_df(df, df_filter):
+ for k, v in df_filter.items():
+ parts = k.split("$:")
+ if parts[0] == "<":
+ k = parts[1]
+ df = df[df[k] < v]
+ else:
+ df = df[df[k] == v]
+ return df
+
+def draw_bar_plot_envs_vs_fps(ax, data, df_filter, xname="num_envs", yname="env.step/fps", annotate_label=None):
+ ax.set_xlabel("Number of Parallel Environments")
+ ax.set_ylabel("FPS")
+ width = 0.8 / len(data)
+
+ num_envs_list = []
+ plotted_bars = 0
+ for i, (exp_name, df) in enumerate(data.items()):
+ df = filter_df(df, df_filter)
+ if len(df) == 0: continue
+ df = df.sort_values(xname)
+ xs = np.arange(len(df)) + i * width
+ ax.bar(xs, df[yname], label=exp_name, color=COLOR_MAP[exp_name], width=width, zorder=3)
+ plotted_bars += 1
+ if len(df[xname]) > len(num_envs_list):
+ global_xs = np.arange(len(df)) + i * width
+ num_envs_list = df[xname]
+ if annotate_label is not None:
+ for j, (x_val, y_val, annotate_data) in enumerate(zip(xs, df[yname], df[annotate_label])):
+ if "gpu_mem_use" in annotate_label:
+ ax.annotate(f'{annotate_data / (1024 * 1024 * 1024):0.1f} GB', (x_val, y_val), textcoords="offset points", xytext=(0,5), ha='center', fontsize=7)
+ else:
+ ax.annotate(annotate_data, (x_val, y_val), textcoords="offset points", xytext=(0,5), ha='center', fontsize=7)
+ ax.set_xticks(np.arange(len(num_envs_list)) + (plotted_bars - 1) * width / 2, num_envs_list)
+ plt.legend()
+ ax.grid(True, axis='y', zorder=0)
+ plt.tight_layout()
+def draw_line_plot_envs_vs_fps(ax, data, df_filter, xname="num_envs", yname="env.step/fps", annotate_label=None):
+ ax.set_xlabel("Number of Parallel Environments")
+ ax.set_ylabel("FPS")
+ for i, (exp_name, df) in enumerate(data.items()):
+ df = filter_df(df, df_filter)
+ df = df.sort_values(xname)
+ if len(df) == 0: continue
+ if annotate_label is not None:
+ for j, (x, y) in enumerate(zip(df[xname], df[yname])):
+ if "gpu_mem_use" in annotate_label:
+ ax.annotate(f'{df[annotate_label].iloc[j] / (1024 * 1024 * 1024):0.1f} GB', (x, y), textcoords="offset points", xytext=(0,5), ha='center', fontsize=7)
+ else:
+ ax.annotate(df[annotate_label].iloc[j], (x, y), textcoords="offset points", xytext=(0,5), ha='center', fontsize=7)
+ ax.plot(df[xname], df[yname], '-o', label=exp_name, color=COLOR_MAP[exp_name], zorder=3)
+ plt.legend()
+ ax.grid(True, zorder=0)
+ plt.tight_layout()
+def main(args):
+
+ data: dict[str, pd.DataFrame] = dict()
+
+ for file in args.files:
+ df = pd.read_csv(file)
+ exp_name = os.path.basename(file).split('.')[0]
+ if exp_name == "maniskill":
+ exp_name = "ManiSkill3"
+ if exp_name == "isaac_lab":
+ exp_name = "Isaac Lab"
+ data[exp_name] = df
+ # modify matplotlib settings for higher quality images
+ plt.rcParams["figure.figsize"] = [10, 4] # set figure size
+ plt.rcParams["figure.dpi"] = 200 # set figure dpi
+ plt.rcParams["savefig.dpi"] = 200 # set savefig dpi
+
+ root_save_path = f"benchmark_results/{'_'.join([os.path.basename(file).split('.')[0] for file in args.files])}/{args.env_id}"
+ # Create root_save_path if it doesn't exist
+ os.makedirs(root_save_path, exist_ok=True)
+ print(f"Saving figures to {root_save_path}")
+
+ ### RENDERING RESULTS ###
+ # generate plot of RGB FPS against number of parallel environments with 1x 128x128 camera
+ for obs_mode in ["rgb", "rgb+depth", "depth"]:
+ cam_sizes = [80, 128, 160, 224, 256, 512]
+ for cam_size in cam_sizes:
+ fig, ax = plt.subplots()
+ ax.set_title(f"{args.env_id}: {obs_mode} FPS vs Number of Parallel Envs. 1x{cam_size}x{cam_size} Camera")
+ draw_bar_plot_envs_vs_fps(
+ ax, data,
+ {"env_id": args.env_id, "obs_mode": obs_mode, "camera_width": cam_size, "camera_height": cam_size, "num_cameras": 1}, annotate_label="env.step/gpu_mem_use")
+ save_path = f"fps_num_envs_1x{cam_size}x{cam_size}_{obs_mode}.png"
+ fig.savefig(osp.join(root_save_path, save_path))
+ plt.close(fig)
+ print(f"Saved figure to {save_path}")
+
+ # generate plot of RGB FPS against square cameras and camera width under 16GB of GPU memory
+ for obs_mode in ["rgb", "rgb+depth"]:
+ fig, ax = plt.subplots()
+ ax.grid(True)
+ ax.set_xlabel("Camera Width/Height")
+ ax.set_ylabel("FPS")
+ ax.set_title(f"{args.env_id}: Highest RGB FPS vs Camera Size under 16GB GPU memory")
+ for i, (exp_name, df) in enumerate(data.items()):
+ df = df[df["env_id"] == args.env_id]
+ df = df[df["env.step/gpu_mem_use"] < 16 * 1024 * 1024 * 1024]
+ df = df[(df["obs_mode"] == obs_mode)]
+ df = df[df["num_cameras"] == 1]
+ df = df[df["camera_height"] == df["camera_width"]]
+ if len(df) == 0: continue
+ ids = df.groupby("camera_width").idxmax()["env.step/fps"].to_list()
+ df = df.loc[ids]
+ df = df.sort_values("camera_width")
+ for j, (x, y) in enumerate(zip(df["camera_width"], df["env.step/fps"])):
+ ax.annotate(f'{df["num_envs"].iloc[j]} envs', (x, y), textcoords="offset points", xytext=(0,5), ha='center')
+ ax.plot(df["camera_width"], df["env.step/fps"], '-o', label=exp_name, color=COLOR_PALLETE[i % len(COLOR_PALLETE)])
+ plt.legend()
+ plt.tight_layout()
+ save_path = osp.join(root_save_path, f"fps_camera_size_{obs_mode}.png")
+ fig.savefig(save_path)
+ plt.close(fig)
+ print(f"Saved figure to {save_path}")
+
+
+ # generate plot of RGB FPS against number of 128x128 cameras under 16GB of GPU memory
+ for camera_size in [80, 128, 160, 224, 256, 512]:
+ for obs_mode in ["rgb", "rgb+depth"]:
+ fig, ax = plt.subplots()
+ ax.grid(True)
+ ax.set_xlabel("Number of Cameras")
+ ax.set_ylabel("FPS")
+ ax.set_title(f"{args.env_id}: Highest RGB FPS vs Number of {camera_size}x{camera_size} Cameras under 16GB GPU memory")
+ for i, (exp_name, df) in enumerate(data.items()):
+ df = df[df["env_id"] == args.env_id]
+ df = df[df["env.step/gpu_mem_use"] < 16 * 1024 * 1024 * 1024]
+ df = df[(df["obs_mode"] == obs_mode)]
+ df = df[df["camera_width"] == camera_size]
+ df = df[df["camera_height"] == camera_size]
+ ids = df.groupby("num_cameras").idxmax()["env.step/fps"].to_list()
+ df = df.loc[ids]
+ df = df.sort_values("camera_width")
+ if len(df) == 0: continue
+ for j, (x, y) in enumerate(zip(df["num_cameras"], df["env.step/fps"])):
+ ax.annotate(f'{df["num_envs"].iloc[j]} envs', (x, y), textcoords="offset points", xytext=(0,5), ha='center')
+ ax.plot(df["num_cameras"], df["env.step/fps"], '-o', label=exp_name, color=COLOR_PALLETE[i % len(COLOR_PALLETE)])
+ plt.legend()
+ plt.tight_layout()
+ save_path = osp.join(root_save_path, f"fps_num_cameras_{camera_size}x{camera_size}_{obs_mode}.png")
+ fig.savefig(save_path)
+ print(f"Saved figure to {save_path}")
+ plt.close(fig)
+
+ # generate plot for RT/google dataset settings, which is 1x 640x480 cameras
+ for obs_mode in ["RGB", "Depth"]:
+ fig, ax = plt.subplots(figsize=(8, 6))
+ ax.set_title(f"{args.env_id}: FPS with 1x 640x480 {obs_mode} Cameras")
+ draw_bar_plot_envs_vs_fps(ax, data, {"env_id": args.env_id, "obs_mode": obs_mode.lower(), "num_cameras": 1, "camera_width": 640, "camera_height": 480}, annotate_label="env.step/gpu_mem_use")
+ plt.legend()
+ plt.tight_layout()
+ save_path = osp.join(root_save_path, f"fps_rt_dataset_setup_{obs_mode.lower()}_bar.png")
+ fig.savefig(save_path)
+ plt.close(fig)
+ print(f"Saved figure to {save_path}")
+
+ # generate plot for droit dataset settings, which is 3x 320x180 cameras
+ for obs_mode in ["RGB", "Depth"]:
+ fig, ax = plt.subplots(figsize=(8, 6))
+ ax.set_title(f"{args.env_id}: FPS with 3x 320x180 {obs_mode} Cameras")
+ draw_bar_plot_envs_vs_fps(ax, data, {"env_id": args.env_id, "obs_mode": obs_mode.lower(), "num_cameras": 3, "camera_width": 320, "camera_height": 180}, annotate_label="env.step/gpu_mem_use")
+ save_path = osp.join(root_save_path, f"fps_droid_dataset_setup_{obs_mode.lower()}.png")
+ fig.savefig(save_path)
+ plt.close(fig)
+ print(f"Saved figure to {save_path}")
+
+ ### State results ###
+ # generate plot of state FPS against number of parallel environments
+ fig, ax = plt.subplots()
+ ax.set_title(f"{args.env_id} random actions: State FPS vs Number of Parallel Environments")
+ draw_bar_plot_envs_vs_fps(ax, data, {"env_id": args.env_id, "obs_mode": "state"}, annotate_label="env.step/gpu_mem_use")
+ save_path = osp.join(root_save_path, f"fps_num_envs_state.png")
+ fig.savefig(save_path)
+ plt.close(fig)
+ print(f"Saved figure to {save_path}")
+
+ # Print column names of first entry in data
+ first_key = list(data.keys())[0]
+ first_df = data[first_key]
+ fixed_trajectory_cols = []
+ for col in first_df.columns:
+ if "_env.step/fps" in col:
+ # special fixed trajectory runs
+ fixed_trajectory_cols.append(col)
+ for col in fixed_trajectory_cols:
+ fixed_name = '_'.join(col.split('_')[:-1])
+ fig, ax = plt.subplots()
+ ax.set_title(f"{args.env_id} {fixed_name} actions: State FPS vs Number of Parallel Environments")
+ draw_bar_plot_envs_vs_fps(ax, data, {"env_id": args.env_id, "obs_mode": "state"}, yname=col, annotate_label="env.step/gpu_mem_use")
+ save_path = osp.join(root_save_path, f"fps_num_envs_state_{fixed_name}.png")
+ fig.savefig(save_path)
+ plt.close(fig)
+ print(f"Saved figure to {save_path}")
+
+
+
+
+ ### Special figures for maniskill ###
+ if "maniskill" in data.keys():
+ # Generate line plots of rendering FPS for different env_ids against number of parallel environments
+ fig, ax = plt.subplots(figsize=(10, 4))
+ ax.grid(True)
+ ax.set_xlabel("Number of Parallel Environments")
+ ax.set_ylabel("FPS")
+ ax.set_title("Simulation+Rendering FPS vs Number of Parallel Environments for Different Tasks")
+
+ df = data["maniskill"]
+ df = df[(df["obs_mode"] == "rgb") & (df["num_envs"] >= 16) & (df["num_cameras"] == 1) & (df["camera_width"] == 128)]
+ env_ids = df["env_id"].unique()
+ for i, env_id in enumerate(env_ids):
+ env_df = df[df["env_id"] == env_id].sort_values("num_envs")
+ ax.plot(env_df["num_envs"], env_df["env.step/fps"], '-o', label=env_id, color=COLOR_PALLETE[i % len(COLOR_PALLETE)])
+
+ for x, y, mem_use in zip(env_df["num_envs"], env_df["env.step/fps"], env_df["env.step/gpu_mem_use"]):
+ ax.annotate(f'{mem_use / (1024 * 1024 * 1024):0.1f} GB', (x, y), textcoords="offset points", xytext=(0,5), ha='center', fontsize=7)
+
+ ax.legend()
+ plt.tight_layout()
+ fig.savefig("benchmark_results/fps_vs_num_envs_different_tasks.png")
+
+# To use this script, run it from the command line with the paths to the benchmark result files as arguments.
+# For example:
+# python plot_results.py -f file1.csv file2.csv file3.csv
+ return
+if __name__ == "__main__":
+ main(parse_args())
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/profiling.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/profiling.py
new file mode 100644
index 0000000000000000000000000000000000000000..9a5b12aa7f305a9b45c3ac08745d1fc78feb78cc
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/profiling.py
@@ -0,0 +1,227 @@
+import os
+import time
+from contextlib import contextmanager
+from typing import List, Literal, Optional
+import imageio
+import numpy as np
+
+import psutil
+import torch
+import pynvml
+import subprocess as sp
+
+import tqdm
+def flatten_dict_keys(d: dict, prefix=""):
+ """Flatten a dict by expanding its keys recursively."""
+ out = dict()
+ for k, v in d.items():
+ if isinstance(v, dict):
+ out.update(flatten_dict_keys(v, prefix + k + "/"))
+ else:
+ out[prefix + k] = v
+ return out
+class Profiler:
+ """
+ A simple class to help profile/benchmark simulator code
+ """
+
+ def __init__(
+ self, output_format: Literal["stdout", "json"], synchronize_torch: bool = True
+ ) -> None:
+ self.output_format = output_format
+ self.synchronize_torch = synchronize_torch
+ self.stats = dict()
+ # Initialize NVML
+ pynvml.nvmlInit()
+
+ # Get handle for the first GPU (index 0)
+ self.handle = pynvml.nvmlDeviceGetHandleByIndex(0)
+
+ # Get the PID of the current process
+ self.current_pid = os.getpid()
+
+ def log(self, msg):
+ """log a message to stdout"""
+ if self.output_format == "stdout":
+ print(msg)
+
+ def update_csv(self, csv_path: str, data: dict):
+ """Update a csv file with the given data (a dict representing a unique identifier of the result row)
+ and stats. If the file does not exist, it will be created. The update will replace an existing row
+ if the given data matches the data in the row. If there are multiple matches, only the first match
+ will be replaced and the rest are deleted"""
+ import pandas as pd
+ import os
+
+ if os.path.exists(csv_path):
+ df = pd.read_csv(csv_path)
+ else:
+ df = pd.DataFrame()
+ stats_flat = flatten_dict_keys(self.stats)
+ cond = None
+
+ for k in stats_flat:
+ if k not in df:
+ df[k] = None
+ for k in data:
+ if k not in df:
+ df[k] = None
+
+ mask = df[k].isna() if data[k] is None else df[k] == data[k]
+ if cond is None:
+ cond = mask
+ else:
+ cond = cond & mask
+ data_dict = {**data, **stats_flat}
+ if not cond.any():
+ df = pd.concat([df, pd.DataFrame(data_dict, index=[len(df)])])
+ else:
+ # replace the first instance
+ df.loc[df.loc[cond].index[0]] = data_dict
+ df.drop(df.loc[cond].index[1:], inplace=True)
+ # delete other instances
+ df.to_csv(csv_path, index=False)
+
+ @contextmanager
+ def profile(self, name: str, total_steps: int, num_envs: int):
+ print(f"start recording {name} metrics")
+ process = psutil.Process(os.getpid())
+ cpu_mem_use = process.memory_info().rss
+ gpu_mem_use = self.get_current_process_gpu_memory()
+ torch.cuda.synchronize()
+ stime = time.time()
+ yield
+ dt = time.time() - stime
+ # dt: delta time (s)
+ # fps: frames per second
+ # psps: parallel steps per second (number of env.step calls per second)
+ self.stats[name] = dict(
+ dt=dt,
+ fps=total_steps * num_envs / dt,
+ psps=total_steps / dt,
+ total_steps=total_steps,
+ cpu_mem_use=cpu_mem_use,
+ gpu_mem_use=gpu_mem_use,
+ )
+ torch.cuda.synchronize()
+
+ def log_stats(self, name: str):
+ stats = self.stats[name]
+ self.log(
+ f"{name}: {stats['fps']:0.3f} steps/s, {stats['psps']:0.3f} parallel steps/s, {stats['total_steps']} steps in {stats['dt']:0.3f}s"
+ )
+ self.log(
+ f"{' ' * 4}CPU mem: {stats['cpu_mem_use'] / (1024**2):0.3f} MB, GPU mem: {stats['gpu_mem_use'] / (1024**2):0.3f} MB"
+ )
+
+ def get_current_process_gpu_memory(self):
+ # Get all processes running on the GPU
+ processes = pynvml.nvmlDeviceGetComputeRunningProcesses(self.handle)
+
+ # Iterate through the processes to find the current process
+ for process in processes:
+ if process.pid == self.current_pid:
+ memory_usage = process.usedGpuMemory
+ return memory_usage
+def images_to_video(
+ images: List[np.ndarray],
+ output_dir: str,
+ video_name: str,
+ fps: int = 10,
+ quality: Optional[float] = 5,
+ verbose: bool = True,
+ **kwargs,
+):
+ r"""Calls imageio to run FFMPEG on a list of images. For more info on
+ parameters, see https://imageio.readthedocs.io/en/stable/format_ffmpeg.html
+ Args:
+ images: The list of images. Images should be HxWx3 in RGB order.
+ output_dir: The folder to put the video in.
+ video_name: The name for the video.
+ fps: Frames per second for the video. Not all values work with FFMPEG,
+ use at your own risk.
+ quality: Default is 5. Uses variable bit rate. Highest quality is 10,
+ lowest is 0. Set to None to prevent variable bitrate flags to
+ FFMPEG so you can manually specify them using output_params
+ instead. Specifying a fixed bitrate using ‘bitrate’ disables
+ this parameter.
+ References:
+ https://github.com/facebookresearch/habitat-lab/blob/main/habitat/utils/visualizations/utils.py
+ """
+ assert 0 <= quality <= 10
+ if not os.path.exists(output_dir):
+ os.makedirs(output_dir)
+ video_name = video_name.replace(" ", "_").replace("\n", "_") + ".mp4"
+ output_path = os.path.join(output_dir, video_name)
+ writer = imageio.get_writer(output_path, fps=fps, quality=quality, **kwargs)
+ if verbose:
+ print(f"Video created: {output_path}")
+ images_iter = tqdm.tqdm(images)
+ else:
+ images_iter = images
+ for im in images_iter:
+ writer.append_data(im)
+ writer.close()
+
+def tile_images(images, nrows=1):
+ """
+ Tile multiple images to a single image comprised of nrows and an appropriate number of columns to fit all the images.
+ The images can also be batched (e.g. of shape (B, H, W, C)), but give images must all have the same batch size.
+
+ if nrows is 1, images can be of different sizes. If nrows > 1, they must all be the same size.
+ """
+ # Sort images in descending order of vertical height
+ batched = False
+ if len(images[0].shape) == 4:
+ batched = True
+ if nrows == 1:
+ images = sorted(images, key=lambda x: x.shape[0 + batched], reverse=True)
+
+ columns = []
+ if batched:
+ max_h = images[0].shape[1] * nrows
+ cur_h = 0
+ cur_w = images[0].shape[2]
+ else:
+ max_h = images[0].shape[0] * nrows
+ cur_h = 0
+ cur_w = images[0].shape[1]
+
+ # Arrange images in columns from left to right
+ column = []
+ for im in images:
+ if cur_h + im.shape[0 + batched] <= max_h and cur_w == im.shape[1 + batched]:
+ column.append(im)
+ cur_h += im.shape[0 + batched]
+ else:
+ columns.append(column)
+ column = [im]
+ cur_h, cur_w = im.shape[0 + batched : 2 + batched]
+ columns.append(column)
+
+ # Tile columns
+ total_width = sum(x[0].shape[1 + batched] for x in columns)
+
+ is_torch = False
+ if torch is not None:
+ is_torch = isinstance(images[0], torch.Tensor)
+
+ output_shape = (max_h, total_width, 3)
+ if batched:
+ output_shape = (images[0].shape[0], max_h, total_width, 3)
+ if is_torch:
+ output_image = torch.zeros(output_shape, dtype=images[0].dtype)
+ else:
+ output_image = np.zeros(output_shape, dtype=images[0].dtype)
+ cur_x = 0
+ for column in columns:
+ cur_w = column[0].shape[1 + batched]
+ next_x = cur_x + cur_w
+ if is_torch:
+ column_image = torch.concatenate(column, dim=0 + batched)
+ else:
+ column_image = np.concatenate(column, axis=0 + batched)
+ cur_h = column_image.shape[0 + batched]
+ output_image[..., :cur_h, cur_x:next_x, :] = column_image
+ cur_x = next_x
+ return output_image
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/scripts/isaac_lab.sh b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/scripts/isaac_lab.sh
new file mode 100644
index 0000000000000000000000000000000000000000..0f039a28f051aa7a4dab8d59c7e0a57b7fa17147
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/scripts/isaac_lab.sh
@@ -0,0 +1,101 @@
+# Benchmark state FPS
+for n in 4 16 32 64 128 256 512 1024 2048 4096 8192 16384
+do
+ python isaac_lab_gpu_sim.py \
+ --task "Isaac-Cartpole-Direct-Benchmark-v0" \
+ --num-envs $n --obs-mode state \
+ --headless --save-results
+done
+
+# Benchmark number of cameras
+for num_cams in 2 3 4
+do
+ for n in 4 16 32 64 128 256
+ do
+ for cam_size in 80 128 160 224 256 512
+ do
+ python isaac_lab_gpu_sim.py \
+ --task "Isaac-Cartpole-RGB-Camera-Direct-Benchmark-v0" \
+ --num-envs $n --obs-mode rgb \
+ --num-cams=$num_cams --cam-width=$cam_size --cam-height=$cam_size \
+ --enable_cameras --headless --save-results
+ done
+ done
+done
+
+# Benchmark different number of environments and camera sizes
+for obs_mode in rgb rgb+depth depth
+do
+ for cam_size in 80 128 160 224 256 512
+ do
+ for n in 4 16 32 64 128 256 512 1024
+ do
+ python isaac_lab_gpu_sim.py \
+ --task "Isaac-Cartpole-RGB-Camera-Direct-Benchmark-v0" \
+ --num-envs $n --obs-mode $obs_mode \
+ --num-cams=1 --cam-width=$cam_size --cam-height=$cam_size \
+ --enable_cameras --headless --save-results
+ done
+ done
+done
+
+# Benchmark high number of environments and small camera sizes
+for obs_mode in rgb rgb+depth
+do
+ for n in 2048 4096
+ do
+ for cam_size in 80 128
+ do
+ python isaac_lab_gpu_sim.py \
+ --task "Isaac-Cartpole-RGB-Camera-Direct-Benchmark-v0" \
+ --num-envs $n --obs-mode $obs_mode \
+ --num-cams=1 --cam-width=$cam_size --cam-height=$cam_size \
+ --enable_cameras --headless --save-results
+ done
+ done
+done
+
+# benchmark realistic settings
+# droid dataset
+for n in 4 16 32 64 128 256
+do
+ python isaac_lab_gpu_sim.py \
+ --task "Isaac-Cartpole-RGB-Camera-Direct-Benchmark-v0" \
+ --num-envs $n --obs-mode depth \
+ --num-cams=3 --cam-width=320 --cam-height=180 \
+ --enable_cameras --headless --save-results
+done
+
+# rt dataset
+for n in 4 16 32 64 128
+do
+ python isaac_lab_gpu_sim.py \
+ --task "Isaac-Cartpole-RGB-Camera-Direct-Benchmark-v0" \
+ --num-envs $n --obs-mode depth \
+ --num-cams=1 --cam-width=640 --cam-height=480 \
+ --enable_cameras --headless --save-results
+done
+
+for obs_mode in depth rgb
+do
+ for n in 4 16 32 64 128
+ do
+ python isaac_lab_gpu_sim.py \
+ --task "Isaac-Franka-Direct-Benchmark-v0" \
+ --num-envs $n --obs-mode $obs_mode \
+ --num-cams=1 --cam-width=640 --cam-height=480 \
+ --enable_cameras --headless --save-results
+ done
+done
+
+for obs_mode in rgb depth
+do
+ for n in 4 16 32 64 128 256
+ do
+ python isaac_lab_gpu_sim.py \
+ --task "Isaac-Franka-Direct-Benchmark-v0" \
+ --num-envs $n --obs-mode $obs_mode \
+ --num-cams=3 --cam-width=320 --cam-height=180 \
+ --enable_cameras --headless --save-results
+ done
+done
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/scripts/maniskill.sh b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/scripts/maniskill.sh
new file mode 100644
index 0000000000000000000000000000000000000000..7af4eac5ca131327b018fabe5407fb87476a7882
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/benchmarking/scripts/maniskill.sh
@@ -0,0 +1,104 @@
+# Benchmark camera modalities
+# for obs_mode in rgb depth rgbd
+# do
+# for n in 4 16 32 64 128 256 512 1024
+# do
+# python gpu_sim.py -e "CartpoleBalanceBenchmark-v1" \
+# -n=$n -o=rgb --num-cams=$num_cams --cam-width=128 --cam-height=128
+# done
+# done
+
+# Benchmark state FPS
+for n in 4 16 32 64 128 256 512 1024 2048 4096 8192 16384
+do
+ python gpu_sim.py -e "CartpoleBalanceBenchmark-v1" \
+ -n=$n -o=state --save-results benchmark_results/maniskill.csv
+done
+
+for n in 1024 2048 4096 8192
+do
+ python gpu_sim.py -e "FrankaMoveBenchmark-v1" \
+ -n=$n -o=state --sim-freq=100 --control-freq=50 --save-results benchmark_results/maniskill.csv
+done
+
+for n in 1024 2048 4096 8192
+do
+ python gpu_sim.py -e "FrankaPickCubeBenchmark-v1" \
+ -n=$n -o=state --sim-freq=100 --control-freq=50 --save-results benchmark_results/maniskill.csv
+done
+
+# Benchmark number of cameras
+for num_cams in {2..6}
+do
+ for n in 4 16 32 64 128 256 512 1024
+ do
+ for cam_size in 80 128 160 224 256 512
+ do
+ python gpu_sim.py -e "CartpoleBalanceBenchmark-v1" \
+ -n=$n -o=rgb --num-cams=$num_cams --cam-width=$cam_size --cam-height=$cam_size --save-results benchmark_results/maniskill.csv
+ done
+ done
+done
+
+# Benchmark different number of environments and camera sizes
+for obs_mode in rgb rgb+depth depth
+do
+ for n in 4 16 32 64 128 256 512 1024
+ do
+ for cam_size in 80 128 160 224 256 512
+ do
+ python gpu_sim.py -e "CartpoleBalanceBenchmark-v1" \
+ -n=$n -o=$obs_mode --num-cams=1 --cam-width=$cam_size --cam-height=$cam_size --save-results benchmark_results/maniskill.csv
+ done
+ done
+done
+
+# Benchmark different number of environments and default maniskill environments
+for env_id in "PickCube-v1" "OpenCabinetDrawer-v1"
+do
+ for n in 4 16 32 64 128 256 512 1024
+ do
+ python gpu_sim.py -e $env_id \
+ -n=$n -o=rgb --num-cams=1 --cam-width=128 --cam-height=128 --sim-freq=100 --control-freq=50 --save-results benchmark_results/maniskill.csv
+ done
+done
+
+
+# benchmark realistic settings
+# droid dataset
+for obs_mode in rgb depth
+do
+ for n in 4 16 32 64 128 256 512 1024
+ do
+ python gpu_sim.py -e "CartpoleBalanceBenchmark-v1" \
+ -n=$n -o=$obs_mode --num-cams=3 --cam-width=320 --cam-height=180 --save-results benchmark_results/maniskill.csv
+ done
+done
+
+# google RT datasets
+for obs_mode in rgb depth
+do
+ for n in 4 16 32 64 128 256 512 1024
+ do
+ python gpu_sim.py -e "CartpoleBalanceBenchmark-v1" \
+ -n=$n -o=$obs_mode --num-cams=1 --cam-width=640 --cam-height=480 --save-results benchmark_results/maniskill.csv
+ done
+done
+
+for obs_mode in depth rgb
+do
+ for n in 4 16 32 64 128 256 512 1024
+ do
+ python gpu_sim.py -e "FrankaBenchmark-v1" \
+ -n=$n -o=$obs_mode --num-cams=1 --cam-width=640 --cam-height=480 --save-results benchmark_results/maniskill.csv
+ done
+done
+
+for obs_mode in depth rgb
+do
+ for n in 4 16 32 64 128 256 512 1024
+ do
+ python gpu_sim.py -e "FrankaBenchmark-v1" \
+ -n=$n -o=$obs_mode --num-cams=3 --cam-width=320 --cam-height=180 --save-results benchmark_results/maniskill.csv
+ done
+done
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_manual_control.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_manual_control.py
new file mode 100644
index 0000000000000000000000000000000000000000..b61e1d4cf1e4477f086310c039d3297ce3b4a3a1
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_manual_control.py
@@ -0,0 +1,236 @@
+import argparse
+import signal
+
+import gymnasium as gym
+import numpy as np
+from matplotlib import pyplot as plt
+
+signal.signal(signal.SIGINT, signal.SIG_DFL) # allow ctrl+c
+from mani_skill.envs.sapien_env import BaseEnv
+from mani_skill.utils import common, visualization
+from mani_skill.utils.wrappers import RecordEpisode
+
+
+def parse_args():
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-e", "--env-id", type=str, required=True)
+ parser.add_argument("-o", "--obs-mode", type=str)
+ parser.add_argument("--reward-mode", type=str)
+ parser.add_argument("-c", "--control-mode", type=str, default="pd_ee_delta_pose")
+ parser.add_argument("--render-mode", type=str, default="sensors")
+ parser.add_argument("--enable-sapien-viewer", action="store_true")
+ parser.add_argument("--record-dir", type=str)
+ args, opts = parser.parse_known_args()
+
+ # Parse env kwargs
+ print("opts:", opts)
+ eval_str = lambda x: eval(x[1:]) if x.startswith("@") else x
+ env_kwargs = dict((x, eval_str(y)) for x, y in zip(opts[0::2], opts[1::2]))
+ print("env_kwargs:", env_kwargs)
+ args.env_kwargs = env_kwargs
+
+ return args
+
+
+def main():
+ np.set_printoptions(suppress=True, precision=3)
+ args = parse_args()
+
+ env: BaseEnv = gym.make(
+ args.env_id,
+ obs_mode=args.obs_mode,
+ reward_mode=args.reward_mode,
+ control_mode=args.control_mode,
+ render_mode=args.render_mode,
+ **args.env_kwargs
+ )
+
+ record_dir = args.record_dir
+ if record_dir:
+ record_dir = record_dir.format(env_id=args.env_id)
+ env = RecordEpisode(env, record_dir, render_mode=args.render_mode)
+
+ print("Observation space", env.observation_space)
+ print("Action space", env.action_space)
+ print("Control mode", env.control_mode)
+ print("Reward mode", env.reward_mode)
+
+ obs, _ = env.reset()
+ after_reset = True
+
+ # Viewer
+ if args.enable_sapien_viewer:
+ env.render_human()
+ renderer = visualization.ImageRenderer()
+ # disable all default plt shortcuts that are lowercase letters
+ plt.rcParams["keymap.fullscreen"].remove("f")
+ plt.rcParams["keymap.home"].remove("h")
+ plt.rcParams["keymap.home"].remove("r")
+ plt.rcParams["keymap.back"].remove("c")
+ plt.rcParams["keymap.forward"].remove("v")
+ plt.rcParams["keymap.pan"].remove("p")
+ plt.rcParams["keymap.zoom"].remove("o")
+ plt.rcParams["keymap.save"].remove("s")
+ plt.rcParams["keymap.grid"].remove("g")
+ plt.rcParams["keymap.yscale"].remove("l")
+ plt.rcParams["keymap.xscale"].remove("k")
+
+ def render_wait():
+ if not args.enable_sapien_viewer:
+ return
+ while True:
+ env.render_human()
+ sapien_viewer = env.viewer
+ if sapien_viewer.window.key_down("0"):
+ break
+
+ # Embodiment
+ has_base = "base" in env.agent.controller.configs
+ num_arms = sum("arm" in x for x in env.agent.controller.configs)
+ has_gripper = any("gripper" in x for x in env.agent.controller.configs)
+ gripper_action = 1
+ EE_ACTION = 0.1
+
+ while True:
+ # -------------------------------------------------------------------------- #
+ # Visualization
+ # -------------------------------------------------------------------------- #
+ if args.enable_sapien_viewer:
+ env.render_human()
+
+ render_frame = env.render().cpu().numpy()[0]
+
+ if after_reset:
+ after_reset = False
+ # Re-focus on opencv viewer
+ if args.enable_sapien_viewer:
+ renderer.close()
+ renderer = visualization.ImageRenderer()
+ pass
+ # -------------------------------------------------------------------------- #
+ # Interaction
+ # -------------------------------------------------------------------------- #
+ # Input
+ renderer(render_frame)
+ # key = opencv_viewer.imshow(render_frame.cpu().numpy()[0])
+ key = renderer.last_event.key if renderer.last_event is not None else None
+ body_action = np.zeros([3])
+ base_action = np.zeros([2]) # hardcoded for fetch robot
+
+ # Parse end-effector action
+ if (
+ "pd_ee_delta_pose" in args.control_mode
+ or "pd_ee_target_delta_pose" in args.control_mode
+ ):
+ ee_action = np.zeros([6])
+ elif (
+ "pd_ee_delta_pos" in args.control_mode
+ or "pd_ee_target_delta_pos" in args.control_mode
+ ):
+ ee_action = np.zeros([3])
+ else:
+ raise NotImplementedError(args.control_mode)
+
+ # Base. Hardcoded for Fetch robot at the moment. In the future write interface to do this
+ if has_base:
+ if key == "w": # forward
+ base_action[0] = 1
+ elif key == "s": # backward
+ base_action[0] = -1
+ elif key == "q": # rotate counter
+ base_action[2] = 1
+ elif key == "e": # rotate clockwise
+ base_action[2] = -1
+ elif key == "z": # lift
+ body_action[2] = 1
+ elif key == "x": # lower
+ body_action[2] = -1
+ elif key == "v": # rotate head left
+ body_action[0] = 1
+ elif key == "b": # rotate head right
+ body_action[0] = -1
+ elif key == "n": # tilt head down
+ body_action[1] = 1
+ elif key == "m": # rotate head up
+ body_action[1] = -1
+
+ # End-effector
+ if num_arms > 0:
+ # Position
+ if key == "i": # +x
+ ee_action[0] = EE_ACTION
+ elif key == "k": # -x
+ ee_action[0] = -EE_ACTION
+ elif key == "j": # +y
+ ee_action[1] = EE_ACTION
+ elif key == "l": # -y
+ ee_action[1] = -EE_ACTION
+ elif key == "u": # +z
+ ee_action[2] = EE_ACTION
+ elif key == "o": # -z
+ ee_action[2] = -EE_ACTION
+
+ # Rotation (axis-angle)
+ if key == "1":
+ ee_action[3:6] = (1, 0, 0)
+ elif key == "2":
+ ee_action[3:6] = (-1, 0, 0)
+ elif key == "3":
+ ee_action[3:6] = (0, 1, 0)
+ elif key == "4":
+ ee_action[3:6] = (0, -1, 0)
+ elif key == "5":
+ ee_action[3:6] = (0, 0, 1)
+ elif key == "6":
+ ee_action[3:6] = (0, 0, -1)
+
+ # Gripper
+ if has_gripper:
+ if key == "f": # open gripper
+ gripper_action = 1
+ elif key == "g": # close gripper
+ gripper_action = -1
+
+ # Other functions
+ if key == "0": # switch to SAPIEN viewer
+ render_wait()
+ elif key == "r": # reset env
+ obs, _ = env.reset()
+ gripper_action = 1
+ after_reset = True
+ continue
+ elif key == None: # exit
+ break
+
+ # Visualize observation
+ if key == "v":
+ if "pointcloud" in env.obs_mode:
+ import trimesh
+
+ xyzw = obs["pointcloud"]["xyzw"]
+ mask = xyzw[..., 3] > 0
+ rgb = obs["pointcloud"]["rgb"]
+ if "robot_seg" in obs["pointcloud"]:
+ robot_seg = obs["pointcloud"]["robot_seg"]
+ rgb = np.uint8(robot_seg * [11, 61, 127])
+ trimesh.PointCloud(xyzw[mask, :3], rgb[mask]).show()
+
+ # -------------------------------------------------------------------------- #
+ # Post-process action
+ # -------------------------------------------------------------------------- #
+ action_dict = dict(
+ base=base_action, arm=ee_action, body=body_action, gripper=gripper_action
+ )
+ action_dict = common.to_tensor(action_dict)
+ action = env.agent.controller.from_action_dict(action_dict)
+
+ obs, reward, terminated, truncated, info = env.step(action)
+ print("reward", reward)
+ print("terminated", terminated, "truncated", truncated)
+ print("info", info)
+
+ env.close()
+
+
+if __name__ == "__main__":
+ main()
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_random_action.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_random_action.py
new file mode 100644
index 0000000000000000000000000000000000000000..466254ffa8c5a31ea47af60b41adb053df01e365
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_random_action.py
@@ -0,0 +1,133 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+
+from mani_skill.envs.sapien_env import BaseEnv
+from mani_skill.utils import gym_utils
+from mani_skill.utils.wrappers import RecordEpisode
+
+
+import tyro
+from dataclasses import dataclass
+from typing import List, Optional, Annotated, Union
+
+@dataclass
+class Args:
+ env_id: Annotated[str, tyro.conf.arg(aliases=["-e"])] = "PushCube-v1"
+ """The environment ID of the task you want to simulate"""
+
+ obs_mode: Annotated[str, tyro.conf.arg(aliases=["-o"])] = "none"
+ """Observation mode"""
+
+ robot_uids: Annotated[Optional[str], tyro.conf.arg(aliases=["-r"])] = None
+ """Robot UID(s) to use. Can be a comma separated list of UIDs or empty string to have no agents. If not given then defaults to the environments default robot"""
+
+ sim_backend: Annotated[str, tyro.conf.arg(aliases=["-b"])] = "auto"
+ """Which simulation backend to use. Can be 'auto', 'cpu', 'gpu'"""
+
+ reward_mode: Optional[str] = None
+ """Reward mode"""
+
+ num_envs: Annotated[int, tyro.conf.arg(aliases=["-n"])] = 1
+ """Number of environments to run."""
+
+ control_mode: Annotated[Optional[str], tyro.conf.arg(aliases=["-c"])] = None
+ """Control mode"""
+
+ render_mode: str = "rgb_array"
+ """Render mode"""
+
+ shader: str = "default"
+ """Change shader used for all cameras in the environment for rendering. Default is 'minimal' which is very fast. Can also be 'rt' for ray tracing and generating photo-realistic renders. Can also be 'rt-fast' for a faster but lower quality ray-traced renderer"""
+
+ record_dir: Optional[str] = None
+ """Directory to save recordings"""
+
+ pause: Annotated[bool, tyro.conf.arg(aliases=["-p"])] = False
+ """If using human render mode, auto pauses the simulation upon loading"""
+
+ quiet: bool = False
+ """Disable verbose output."""
+
+ seed: Annotated[Optional[Union[int, List[int]]], tyro.conf.arg(aliases=["-s"])] = None
+ """Seed(s) for random actions and simulator. Can be a single integer or a list of integers. Default is None (no seeds)"""
+
+ rand_level: int = 0
+ """Randomization level of objects in the env"""
+
+def main(args: Args):
+ np.set_printoptions(suppress=True, precision=3)
+ verbose = not args.quiet
+ if isinstance(args.seed, int):
+ args.seed = [args.seed]
+ if args.seed is not None:
+ np.random.seed(args.seed[0])
+ parallel_in_single_scene = args.render_mode == "human"
+ if args.render_mode == "human" and args.obs_mode in ["sensor_data", "rgb", "rgbd", "depth", "point_cloud"]:
+ print("Disabling parallel single scene/GUI render as observation mode is a visual one. Change observation mode to state or state_dict to see a parallel env render")
+ parallel_in_single_scene = False
+ if args.render_mode == "human" and args.num_envs == 1:
+ parallel_in_single_scene = False
+ env_kwargs = dict(
+ obs_mode=args.obs_mode,
+ reward_mode=args.reward_mode,
+ control_mode=args.control_mode,
+ render_mode=args.render_mode,
+ sensor_configs=dict(shader_pack=args.shader),
+ human_render_camera_configs=dict(shader_pack=args.shader),
+ viewer_camera_configs=dict(shader_pack=args.shader),
+ num_envs=args.num_envs,
+ sim_backend=args.sim_backend,
+ sim_config=dict(scene_config=dict(enable_pcm=False)),
+ enable_shadow=True,
+ parallel_in_single_scene=parallel_in_single_scene,
+ rand_level=args.rand_level,
+ )
+ if args.robot_uids is not None:
+ env_kwargs["robot_uids"] = tuple(args.robot_uids.split(","))
+ env: BaseEnv = gym.make(
+ args.env_id,
+ **env_kwargs
+ )
+ record_dir = args.record_dir
+ if record_dir:
+ record_dir = record_dir.format(env_id=args.env_id)
+ env = RecordEpisode(env, record_dir, info_on_video=False, save_trajectory=False, max_steps_per_video=gym_utils.find_max_episode_steps_value(env))
+
+ if verbose:
+ print("Observation space", env.observation_space)
+ print("Action space", env.action_space)
+ if env.unwrapped.agent is not None:
+ print("Control mode", env.unwrapped.control_mode)
+ print("Reward mode", env.unwrapped.reward_mode)
+
+ obs, _ = env.reset(seed=args.seed, options=dict(reconfigure=True))
+ if args.seed is not None and env.action_space is not None:
+ env.action_space.seed(args.seed[0])
+ if args.render_mode is not None:
+ viewer = env.render()
+ if isinstance(viewer, sapien.utils.Viewer):
+ viewer.paused = args.pause
+ env.render()
+ while True:
+ action = env.action_space.sample() if env.action_space is not None else None
+ obs, reward, terminated, truncated, info = env.step(action)
+ if verbose:
+ print("reward", reward)
+ print("terminated", terminated)
+ print("truncated", truncated)
+ print("info", info)
+ if args.render_mode is not None:
+ env.render()
+ if args.render_mode is None or args.render_mode != "human":
+ if (terminated | truncated).any():
+ break
+ env.close()
+
+ if record_dir:
+ print(f"Saving video to {record_dir}")
+
+
+if __name__ == "__main__":
+ parsed_args = tyro.cli(Args)
+ main(parsed_args)
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_reset_distribution.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_reset_distribution.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c9241ac8c7eb2bfca2008f6a9d172fb5c81e222
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_reset_distribution.py
@@ -0,0 +1,65 @@
+import argparse
+
+import gymnasium as gym
+import numpy as np
+
+from mani_skill.envs.sapien_env import BaseEnv
+from mani_skill.sensors.camera import CameraConfig
+from mani_skill.utils.wrappers.record import RecordEpisode
+def parse_args(args=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-e", "--env-id", type=str, default="PushCube-v1", help="The environment ID of the task you want to simulate")
+ parser.add_argument("-b", "--sim-backend", type=str, default="auto", help="Which simulation backend to use. Can be 'auto', 'cpu', 'gpu'")
+ parser.add_argument("--shader", default="minimal", type=str, help="Change shader used for rendering. Default is 'default' which is very fast. Can also be 'rt' for ray tracing and generating photo-realistic renders. Can also be 'rt-fast' for a faster but lower quality ray-traced renderer")
+ parser.add_argument("--render-mode", type=str, default="rgb_array", help="Can be 'human' to open a viewer, or rgb_array / sensors which change the cameras saved videos use")
+ parser.add_argument("--record-dir", type=str, default="videos/reset_distributions", help="Where to save recorded videos. If none, no videos are saved")
+ parser.add_argument("-n", "--num-resets", type=int, default=20, help="Number of times to reset the environment")
+ parser.add_argument(
+ "-s",
+ "--seed",
+ type=int,
+ help="Seed the random actions and environment. Default is no seed",
+ )
+ args = parser.parse_args()
+ return args
+
+
+def main(args):
+ if args.seed is not None:
+ np.random.seed(args.seed)
+ env: BaseEnv = gym.make(
+ args.env_id,
+ num_envs=1,
+ obs_mode="none",
+ reward_mode="none",
+ render_mode=args.render_mode,
+ sensor_configs=dict(shader_pack=args.shader),
+ human_render_camera_configs=dict(shader_pack=args.shader),
+ viewer_camera_configs=dict(shader_pack=args.shader),
+ sim_backend=args.sim_backend,
+ )
+ if args.record_dir is not None and args.render_mode != "human":
+ # we are not saving video via the wrapper as it does not save empty trajectories
+ env = RecordEpisode(env, output_dir=args.record_dir, save_video=False, save_trajectory=False, video_fps=10)
+ env.reset(seed=args.seed)
+
+ if args.render_mode == "human":
+ viewer = env.render()
+ print("Rendering reset distribution in GUI. Press 'r' to reset and 'q' to quit")
+ while True:
+ viewer = env.render_human()
+ if viewer.window.key_press("r"):
+ env.reset()
+ elif viewer.window.key_press("q"):
+ break
+ else:
+ for _ in range(args.num_resets):
+ env.reset()
+ env.render_images.append(env.capture_image())
+ name = f"{args.env_id}_reset_distribution"
+ env.flush_video(name=name)
+ print(f"Saved video to {env.output_dir}/{name}.mp4")
+ env.close()
+
+if __name__ == "__main__":
+ main(parse_args())
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_robot.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_robot.py
new file mode 100644
index 0000000000000000000000000000000000000000..54fb6e2a23cdfabf8d54c3f4a357382acf66fa1b
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_robot.py
@@ -0,0 +1,95 @@
+"""
+Instantiates a empty environment with a floor, and attempts to place any given robot in there
+"""
+
+import argparse
+
+import gymnasium as gym
+import mani_skill
+from mani_skill.agents.controllers.base_controller import DictController
+from mani_skill.envs.sapien_env import BaseEnv
+def parse_args(args=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-r", "--robot-uid", type=str, default="panda", help="The id of the robot to place in the environment")
+ parser.add_argument("-b", "--sim-backend", type=str, default="auto", help="Which simulation backend to use. Can be 'auto', 'cpu', 'gpu'")
+ parser.add_argument("-c", "--control-mode", type=str, default="pd_joint_pos", help="The control mode to use. Note that for new robots being implemented if the _controller_configs is not implemented in the selected robot, we by default provide two default controllers, 'pd_joint_pos' and 'pd_joint_delta_pos' ")
+ parser.add_argument("-k", "--keyframe", type=str, help="The name of the keyframe of the robot to display")
+ parser.add_argument("--shader", default="default", type=str, help="Change shader used for rendering. Default is 'default' which is very fast. Can also be 'rt' for ray tracing and generating photo-realistic renders. Can also be 'rt-fast' for a faster but lower quality ray-traced renderer")
+ parser.add_argument("--keyframe-actions", action="store_true", help="Whether to use the selected keyframe to set joint targets to try and hold the robot in its position")
+ parser.add_argument("--random-actions", action="store_true", help="Whether to sample random actions to control the agent. If False, no control signals are sent and it is just rendering.")
+ parser.add_argument("--none-actions", action="store_true", help="If set, then the scene and rendering will update each timestep but no joints will be controlled via code. You can use this to control the robot freely via the GUI.")
+ parser.add_argument("--zero-actions", action="store_true", help="Whether to send zero actions to the robot. If False, no control signals are sent and it is just rendering.")
+ parser.add_argument("--sim-freq", type=int, default=100, help="Simulation frequency")
+ parser.add_argument("--control-freq", type=int, default=20, help="Control frequency")
+ parser.add_argument(
+ "-s",
+ "--seed",
+ type=int,
+ help="Seed the random actions and environment. Default is no seed",
+ )
+ args = parser.parse_args()
+ return args
+
+def main():
+ args = parse_args()
+ env = gym.make(
+ "Empty-v1",
+ obs_mode="none",
+ reward_mode="none",
+ enable_shadow=True,
+ control_mode=args.control_mode,
+ robot_uids=args.robot_uid,
+ sensor_configs=dict(shader_pack=args.shader),
+ human_render_camera_configs=dict(shader_pack=args.shader),
+ viewer_camera_configs=dict(shader_pack=args.shader),
+ render_mode="human",
+ sim_config=dict(sim_freq=args.sim_freq, control_freq=args.control_freq),
+ sim_backend=args.sim_backend,
+ )
+ env.reset(seed=0)
+ env: BaseEnv = env.unwrapped
+ print(f"Selected robot {args.robot_uid}. Control mode: {args.control_mode}")
+ print("Selected Robot has the following keyframes to view: ")
+ print(env.agent.keyframes.keys())
+ env.agent.robot.set_qpos(env.agent.robot.qpos * 0)
+ kf = None
+ if len(env.agent.keyframes) > 0:
+ kf_name = None
+ if args.keyframe is not None:
+ kf_name = args.keyframe
+ kf = env.agent.keyframes[kf_name]
+ else:
+ for kf_name, kf in env.agent.keyframes.items():
+ # keep the first keyframe we find
+ break
+ if kf.qpos is not None:
+ env.agent.robot.set_qpos(kf.qpos)
+ if kf.qvel is not None:
+ env.agent.robot.set_qvel(kf.qvel)
+ env.agent.robot.set_pose(kf.pose)
+ if kf_name is not None:
+ print(f"Viewing keyframe {kf_name}")
+ if env.gpu_sim_enabled:
+ env.scene._gpu_apply_all()
+ env.scene.px.gpu_update_articulation_kinematics()
+ env.scene._gpu_fetch_all()
+ viewer = env.render()
+ viewer.paused = True
+ viewer = env.render()
+ while True:
+ if args.random_actions:
+ env.step(env.action_space.sample())
+ elif args.none_actions:
+ env.step(None)
+ elif args.zero_actions:
+ env.step(env.action_space.sample() * 0)
+ elif args.keyframe_actions:
+ assert kf is not None, "this robot has no keyframes, cannot use it to set actions"
+ if isinstance(env.agent.controller, DictController):
+ env.step(env.agent.controller.from_qpos(kf.qpos))
+ else:
+ env.step(kf.qpos)
+ viewer = env.render()
+
+if __name__ == "__main__":
+ main()
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_vis_pcd.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_vis_pcd.py
new file mode 100644
index 0000000000000000000000000000000000000000..54404fe49f13e17bd38220081e71a0250470ba2c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_vis_pcd.py
@@ -0,0 +1,61 @@
+import argparse
+
+import gymnasium as gym
+import numpy as np
+
+from mani_skill.envs.sapien_env import BaseEnv
+from mani_skill.sensors.camera import CameraConfig
+import trimesh
+import trimesh.scene
+def parse_args(args=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-e", "--env-id", type=str, default="PushCube-v1", help="The environment ID of the task you want to simulate")
+ parser.add_argument("--cam-width", type=int, help="Override the width of every camera in the environment")
+ parser.add_argument("--cam-height", type=int, help="Override the height of every camera in the environment")
+ parser.add_argument(
+ "-s",
+ "--seed",
+ type=int,
+ help="Seed the random actions and environment. Default is no seed",
+ )
+ args = parser.parse_args()
+ return args
+
+
+def main(args):
+ if args.seed is not None:
+ np.random.seed(args.seed)
+ sensor_configs = dict()
+ if args.cam_width:
+ sensor_configs["width"] = args.cam_width
+ if args.cam_height:
+ sensor_configs["height"] = args.cam_height
+ env: BaseEnv = gym.make(
+ args.env_id,
+ obs_mode="pointcloud",
+ reward_mode="none",
+ sensor_configs=sensor_configs,
+ )
+
+ obs, _ = env.reset(seed=args.seed)
+ while True:
+ action = env.action_space.sample()
+ obs, reward, terminated, truncated, info = env.step(action)
+ xyz = obs["pointcloud"]["xyzw"][0, ..., :3]
+ colors = obs["pointcloud"]["rgb"][0]
+ pcd = trimesh.points.PointCloud(xyz, colors)
+
+
+ # view from first camera
+ for uid, config in env.unwrapped._sensor_configs.items():
+ if isinstance(config, CameraConfig):
+ cam2world = obs["sensor_param"][uid]["cam2world_gl"][0]
+ camera = trimesh.scene.Camera(uid, (1024, 1024), fov=(np.rad2deg(config.fov), np.rad2deg(config.fov)))
+ break
+ trimesh.Scene([pcd], camera=camera, camera_transform=cam2world).show()
+ if terminated or truncated:
+ break
+ env.close()
+
+if __name__ == "__main__":
+ main(parse_args())
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_vis_segmentation.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_vis_segmentation.py
new file mode 100644
index 0000000000000000000000000000000000000000..3fc8bfb484b527421b50ab155742532a884be0a8
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_vis_segmentation.py
@@ -0,0 +1,144 @@
+import signal
+
+from mani_skill.utils import common
+from mani_skill.utils import visualization
+signal.signal(signal.SIGINT, signal.SIG_DFL) # allow ctrl+c
+
+import argparse
+
+import gymnasium as gym
+import numpy as np
+# color pallete generated via https://medialab.github.io/iwanthue/
+color_pallete = np.array([[164,74,82],
+[85,200,95],
+[149,88,210],
+[111,185,57],
+[89,112,223],
+[194,181,43],
+[219,116,216],
+[71,146,48],
+[214,70,164],
+[157,183,57],
+[154,68,158],
+[82,196,133],
+[225,64,121],
+[50,141,77],
+[224,59,84],
+[74,201,189],
+[237,93,68],
+[77,188,225],
+[182,58,29],
+[77,137,200],
+[230,155,53],
+[93,90,162],
+[213,106,38],
+[150,153,224],
+[120,134,37],
+[186,135,220],
+[78,110,27],
+[182,61,117],
+[106,184,145],
+[184,62,65],
+[44,144,124],
+[229,140,186],
+[48,106,60],
+[167,102,155],
+[160,187,114],
+[150,74,107],
+[204,177,86],
+[34,106,77],
+[226,129,94],
+[72,106,45],
+[222,125,129],
+[101,146,86],
+[150,89,44],
+[147,138,73],
+[210,156,106],
+[102,96,32],
+[168,124,34]]
+, np.uint8)
+from mani_skill.envs.sapien_env import BaseEnv
+from mani_skill.sensors.camera import Camera
+from mani_skill.utils.structs import Actor, Link
+def parse_args(args=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-e", "--env-id", type=str, default="PushCube-v1", help="The environment ID of the task you want to simulate")
+ parser.add_argument("--id", type=str, help="The ID or name of actor you want to segment and render")
+ parser.add_argument("--num-envs", type=int, default=1, help="Number of environments to run. Used for some basic testing and not visualized")
+ parser.add_argument("--cam-width", type=int, help="Override the width of every camera in the environment")
+ parser.add_argument("--cam-height", type=int, help="Override the height of every camera in the environment")
+ parser.add_argument(
+ "-s",
+ "--seed",
+ type=int,
+ help="Seed the random actions and environment. Default is no seed",
+ )
+ args = parser.parse_args()
+ return args
+
+
+def main(args):
+ if args.seed is not None:
+ np.random.seed(args.seed)
+ sensor_configs = dict()
+ if args.cam_width:
+ sensor_configs["width"] = args.cam_width
+ if args.cam_height:
+ sensor_configs["height"] = args.cam_height
+
+ env: BaseEnv = gym.make(
+ args.env_id,
+ obs_mode="rgb+depth+segmentation",
+ num_envs=args.num_envs,
+ sensor_configs=sensor_configs
+ )
+
+ obs, _ = env.reset(seed=args.seed)
+ selected_id = args.id
+ if selected_id is not None and selected_id.isdigit():
+ selected_id = int(selected_id)
+
+ n_cams = 0
+ for config in env.unwrapped._sensors.values():
+ if isinstance(config, Camera):
+ n_cams += 1
+ print(f"Visualizing {n_cams} RGBD cameras")
+
+ print("ID to Actor/Link name mappings")
+ print("0: Background")
+
+ reverse_seg_id_map = dict()
+ for obj_id, obj in sorted(env.unwrapped.segmentation_id_map.items()):
+ if isinstance(obj, Actor):
+ print(f"{obj_id}: Actor, name - {obj.name}")
+ elif isinstance(obj, Link):
+ print(f"{obj_id}: Link, name - {obj.name}")
+ reverse_seg_id_map[obj.name] = obj_id
+ if selected_id is not None and not isinstance(selected_id, int):
+ selected_id = reverse_seg_id_map[selected_id]
+
+ renderer = visualization.ImageRenderer()
+ while True:
+ action = env.action_space.sample()
+ obs, reward, terminated, truncated, info = env.step(action)
+ cam_num = 0
+ imgs=[]
+ for cam in obs["sensor_data"].keys():
+ if "rgb" in obs["sensor_data"][cam]:
+
+ rgb = common.to_numpy(obs["sensor_data"][cam]["rgb"][0])
+ seg = common.to_numpy(obs["sensor_data"][cam]["segmentation"][0])
+ if selected_id is not None:
+ seg = seg == selected_id
+ imgs.append(rgb)
+ seg_rgb = np.zeros_like(rgb)
+ seg = seg % len(color_pallete)
+ for id, color in enumerate(color_pallete):
+ seg_rgb[(seg == id)[..., 0]] = color
+ imgs.append(seg_rgb)
+ cam_num += 1
+ img = visualization.tile_images(imgs, nrows=n_cams)
+ renderer(img)
+
+if __name__ == "__main__":
+ main(parse_args())
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_vis_textures.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_vis_textures.py
new file mode 100644
index 0000000000000000000000000000000000000000..bdc4184d118037e5c1b55f79f84dc520f06be829
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/demo_vis_textures.py
@@ -0,0 +1,85 @@
+import signal
+import sys
+
+from matplotlib import pyplot as plt
+import torch
+
+from mani_skill.utils import common
+from mani_skill.utils import visualization
+signal.signal(signal.SIGINT, signal.SIG_DFL) # allow ctrl+c
+
+import argparse
+
+import gymnasium as gym
+import numpy as np
+
+from mani_skill.envs.sapien_env import BaseEnv
+from mani_skill.sensors.camera import Camera
+def parse_args(args=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-e", "--env-id", type=str, default="PushCube-v1", help="The environment ID of the task you want to simulate")
+ parser.add_argument("-o", "--obs-mode", type=str, default="rgb+depth", help="Can be rgb or rgb+depth, rgb+normal, albedo+depth etc. Which ever image-like textures you want to visualize can be tacked on")
+ parser.add_argument("--shader", default="default", type=str, help="Change shader used for all cameras in the environment for rendering. Default is 'minimal' which is very fast. Can also be 'rt' for ray tracing and generating photo-realistic renders. Can also be 'rt-fast' for a faster but lower quality ray-traced renderer")
+ parser.add_argument("--num-envs", type=int, default=1, help="Number of environments to run. Used for some basic testing and not visualized")
+ parser.add_argument("--cam-width", type=int, help="Override the width of every camera in the environment")
+ parser.add_argument("--cam-height", type=int, help="Override the height of every camera in the environment")
+ parser.add_argument(
+ "-s",
+ "--seed",
+ type=int,
+ help="Seed the random actions and environment. Default is no seed",
+ )
+ args = parser.parse_args()
+ return args
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+
+def main(args):
+ if args.seed is not None:
+ np.random.seed(args.seed)
+ sensor_configs = dict()
+ if args.cam_width:
+ sensor_configs["width"] = args.cam_width
+ if args.cam_height:
+ sensor_configs["height"] = args.cam_height
+ sensor_configs["shader_pack"] = args.shader
+ env: BaseEnv = gym.make(
+ args.env_id,
+ obs_mode=args.obs_mode,
+ num_envs=args.num_envs,
+ sensor_configs=sensor_configs
+ )
+
+ obs, _ = env.reset(seed=args.seed)
+ n_cams = 0
+ for config in env.unwrapped._sensors.values():
+ if isinstance(config, Camera):
+ n_cams += 1
+ print(f"Visualizing {n_cams} cameras")
+
+ renderer = visualization.ImageRenderer()
+
+ while True:
+ action = env.action_space.sample()
+ obs, reward, terminated, truncated, info = env.step(action)
+ cam_num = 0
+ imgs=[]
+ for cam in obs["sensor_data"].keys():
+ for texture in obs["sensor_data"][cam].keys():
+ if obs["sensor_data"][cam][texture].dtype == torch.uint8:
+ data = common.to_numpy(obs["sensor_data"][cam][texture][0])
+ imgs.append(data)
+ else:
+ data = common.to_numpy(obs["sensor_data"][cam][texture][0]).astype(np.float32)
+ data = data / (data.max() - data.min())
+ data_rgb = np.zeros((data.shape[0], data.shape[1], 3), dtype=np.uint8)
+ data_rgb[..., :] = data * 255
+ imgs.append(data_rgb)
+ cam_num += 1
+ img = visualization.tile_images(imgs, nrows=n_cams)
+ renderer(img)
+
+if __name__ == "__main__":
+ main(parse_args())
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/README.md b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..30a4194538c8c974868320027c4a062010e3e5c9
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/README.md
@@ -0,0 +1,3 @@
+# Motion Planning Examples
+
+This folder has example code for running motion planning to solve various ManiSkill tasks. These are also used to generate some of the demonstration datasets
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/__init__.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/__pycache__/__init__.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cf994c6b6d9dbaf66ace4e3b90d7fe31551f2f1c
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/__pycache__/__init__.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/__init__.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/motionplanner.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/motionplanner.py
new file mode 100644
index 0000000000000000000000000000000000000000..f42e70e1034a5ae3816781ea4133af41cac660d8
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/motionplanner.py
@@ -0,0 +1,273 @@
+import mplib
+import numpy as np
+import sapien
+import trimesh
+
+from mani_skill.agents.base_agent import BaseAgent
+from mani_skill.envs.sapien_env import BaseEnv
+from mani_skill.envs.scene import ManiSkillScene
+from mani_skill.utils.structs.pose import to_sapien_pose
+import sapien.physx as physx
+OPEN = [1,-1]
+CLOSED = [-1,1]
+
+
+class PiperArmMotionPlanningSolver:
+ def __init__(
+ self,
+ env: BaseEnv,
+ debug: bool = False,
+ vis: bool = True,
+ base_pose: sapien.Pose = None, # TODO mplib doesn't support robot base being anywhere but 0
+ visualize_target_grasp_pose: bool = True,
+ print_env_info: bool = True,
+ joint_vel_limits=0.9,
+ joint_acc_limits=0.9,
+ ):
+ self.env = env
+ self.base_env: BaseEnv = env.unwrapped
+ self.env_agent: BaseAgent = self.base_env.agent
+ self.robot = self.env_agent.robot
+ self.joint_vel_limits = joint_vel_limits
+ self.joint_acc_limits = joint_acc_limits
+
+ self.base_pose = to_sapien_pose(base_pose)
+
+ self.planner = self.setup_planner()
+ self.control_mode = self.base_env.control_mode
+
+ self.debug = debug
+ self.vis = vis
+ self.print_env_info = print_env_info
+ self.visualize_target_grasp_pose = visualize_target_grasp_pose
+ self.gripper_state = OPEN
+ self.grasp_pose_visual = None #sapien.Pose(p=[0.4, -0.2, 0.4], q = [1, 0,0, 0])
+ if self.vis and self.visualize_target_grasp_pose:
+ if "grasp_pose_visual" not in self.base_env.scene.actors:
+ self.grasp_pose_visual = build_piper_gripper_grasp_pose_visual(
+ self.base_env.scene
+ )
+ else:
+ self.grasp_pose_visual = self.base_env.scene.actors["grasp_pose_visual"]
+ self.grasp_pose_visual.set_pose(self.base_env.agent.tcp.pose)
+ self.elapsed_steps = 0
+
+ self.use_point_cloud = False #
+ self.collision_pts_changed = False
+ self.all_collision_pts = None
+
+ def render_wait(self):
+ if not self.vis or not self.debug:
+ return
+ print("Press [c] to continue")
+ viewer = self.base_env.render_human()
+ while True:
+ if viewer.window.key_down("c"):
+ break
+ self.base_env.render_human()
+
+ def setup_planner(self):
+ link_names = [link.get_name() for link in self.robot.get_links()]
+ joint_names = [joint.get_name() for joint in self.robot.get_active_joints()]
+ # print (joint_names)
+ # print (link_names)
+ planner = mplib.Planner(
+ urdf=self.env_agent.urdf_path,
+ srdf=self.env_agent.urdf_path.replace(".urdf", ".srdf"),
+ user_link_names=link_names,
+ user_joint_names=joint_names,
+ move_group="gripper_base",
+ joint_vel_limits=np.ones(6) * self.joint_vel_limits,
+ joint_acc_limits=np.ones(6) * self.joint_acc_limits,
+ )
+ planner.set_base_pose(np.hstack([self.base_pose.p, self.base_pose.q]))
+ return planner
+
+ def follow_path(self, result, refine_steps: int = 0):
+ n_step = result["position"].shape[0]
+ for i in range(n_step + refine_steps):
+ qpos = result["position"][min(i, n_step - 1)]
+ if self.control_mode == "pd_joint_pos_vel":
+ qvel = result["velocity"][min(i, n_step - 1)]
+ action = np.hstack([qpos, qvel, self.gripper_state])
+ else:
+ action = np.hstack([qpos, self.gripper_state])
+ obs, reward, terminated, truncated, info = self.env.step(action)
+ self.elapsed_steps += 1
+ if self.print_env_info:
+ print(
+ f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
+ )
+ if self.vis:
+ self.base_env.render_human()
+ return obs, reward, terminated, truncated, info
+
+ def move_to_pose_with_RRTConnect(
+ self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0
+ ):
+ pose = to_sapien_pose(pose)
+ if self.grasp_pose_visual is not None:
+ self.grasp_pose_visual.set_pose(pose)
+ pose = sapien.Pose(p=pose.p, q=pose.q)
+ result = self.planner.plan_qpos_to_pose(
+ np.concatenate([pose.p, pose.q]),
+ self.robot.get_qpos().cpu().numpy()[0],
+ time_step=self.base_env.control_timestep,
+ use_point_cloud=self.use_point_cloud,
+ wrt_world=True,
+ )
+ if result["status"] != "Success":
+ print(result["status"])
+ self.render_wait()
+ return -1
+ self.render_wait()
+ if dry_run:
+ return result
+ return self.follow_path(result, refine_steps=refine_steps)
+
+ def move_to_pose_with_screw(
+ self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0
+ ):
+ pose = to_sapien_pose(pose)
+ # try screw two times before giving up
+ if self.grasp_pose_visual is not None:
+ self.grasp_pose_visual.set_pose(pose)
+ pose = sapien.Pose(p=pose.p , q=pose.q)
+ result = self.planner.plan_screw(
+ np.concatenate([pose.p, pose.q]),
+ self.robot.get_qpos().cpu().numpy()[0],
+ time_step=self.base_env.control_timestep,
+ use_point_cloud=self.use_point_cloud,
+ )
+ if result["status"] != "Success":
+ result = self.planner.plan_screw(
+ np.concatenate([pose.p, pose.q]),
+ self.robot.get_qpos().cpu().numpy()[0],
+ time_step=self.base_env.control_timestep,
+ use_point_cloud=self.use_point_cloud,
+ )
+ if result["status"] != "Success":
+ print(result["status"])
+ self.render_wait()
+ return -1
+ self.render_wait()
+ if dry_run:
+ return result
+ return self.follow_path(result, refine_steps=refine_steps)
+
+ def open_gripper(self):
+ self.gripper_state = OPEN
+ # print(self.gripper_state)
+ qpos = self.robot.get_qpos()[0, :-2].cpu().numpy()
+ for i in range(6):
+ if self.control_mode == "pd_joint_pos":
+ action = np.hstack([qpos, self.gripper_state])
+ else:
+ action = np.hstack([qpos, qpos * 0, self.gripper_state])
+ # Debug print to check the action being sent
+ #print(f"Action at step {i}: {action}")
+ obs, reward, terminated, truncated, info = self.env.step(action)
+ self.elapsed_steps += 1
+ if self.print_env_info:
+ print(
+ f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
+ )
+ if self.vis:
+ self.base_env.render_human()
+ return obs, reward, terminated, truncated, info
+
+ def close_gripper(self, t=6, gripper_state = CLOSED):
+ self.gripper_state = gripper_state
+ #print(self.gripper_state)
+ qpos = self.robot.get_qpos()[0, :-2].cpu().numpy()
+ #print(qpos.shape)
+ for i in range(t):
+ if self.control_mode == "pd_joint_pos":
+ action = np.hstack([qpos, self.gripper_state])
+ else:
+ action = np.hstack([qpos, qpos * 0, self.gripper_state])
+ obs, reward, terminated, truncated, info = self.env.step(action)
+ self.elapsed_steps += 1
+ if self.print_env_info:
+ print(
+ f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
+ )
+ if self.vis:
+ self.base_env.render_human()
+ return obs, reward, terminated, truncated, info
+
+ def add_box_collision(self, extents: np.ndarray, pose: sapien.Pose):
+ self.use_point_cloud = True
+ box = trimesh.creation.box(extents, transform=pose.to_transformation_matrix())
+ pts, _ = trimesh.sample.sample_surface(box, 512)
+ if self.all_collision_pts is None:
+ self.all_collision_pts = pts
+ else:
+ self.all_collision_pts = np.vstack([self.all_collision_pts, pts])
+ self.planner.update_point_cloud(self.all_collision_pts)
+
+ def add_collision_pts(self, pts: np.ndarray):
+ if self.all_collision_pts is None:
+ self.all_collision_pts = pts
+ else:
+ self.all_collision_pts = np.vstack([self.all_collision_pts, pts])
+ self.planner.update_point_cloud(self.all_collision_pts)
+
+ def clear_collisions(self):
+ self.all_collision_pts = None
+ self.use_point_cloud = False
+
+ def close(self):
+ pass
+
+from transforms3d import quaternions
+
+
+def build_piper_gripper_grasp_pose_visual(scene: ManiSkillScene):
+ builder = scene.create_actor_builder()
+ grasp_pose_visual_width = 0.01
+ grasp_width = 0.05
+
+ builder.add_sphere_visual(
+ pose=sapien.Pose(p=[0, 0, 0.08]),
+ # pose=sapien.Pose(p=[0.04, -.116, 0.196]),
+ radius=grasp_pose_visual_width,
+ material=sapien.render.RenderMaterial(base_color=[0.3, 0.4, 0.8, 0.7])
+ )
+
+ builder.add_box_visual(
+ pose=sapien.Pose(p=[0, 0, -0.08]),
+ half_size=[grasp_pose_visual_width, grasp_pose_visual_width, 0.02],
+ material=sapien.render.RenderMaterial(base_color=[0, 1, 0, 0.7]),
+ )
+ builder.add_box_visual(
+ pose=sapien.Pose(p=[0, 0, -0.05]),
+ half_size=[grasp_pose_visual_width, grasp_width, grasp_pose_visual_width],
+ material=sapien.render.RenderMaterial(base_color=[0, 1, 0, 0.7]),
+ )
+ builder.add_box_visual(
+ pose=sapien.Pose(
+ p=[
+ 0.03 - grasp_pose_visual_width * 3,
+ grasp_width + grasp_pose_visual_width,
+ 0.03 - 0.05,
+ ],
+ q=quaternions.axangle2quat(np.array([0, 1, 0]), theta=np.pi / 2),
+ ),
+ half_size=[0.04, grasp_pose_visual_width, grasp_pose_visual_width],
+ material=sapien.render.RenderMaterial(base_color=[0, 0, 1, 0.7]),
+ )
+ builder.add_box_visual(
+ pose=sapien.Pose(
+ p=[
+ 0.03 - grasp_pose_visual_width * 3,
+ -grasp_width - grasp_pose_visual_width,
+ 0.03 - 0.05,
+ ],
+ q=quaternions.axangle2quat(np.array([0, 1, 0]), theta=np.pi / 2),
+ ),
+ half_size=[0.04, grasp_pose_visual_width, grasp_pose_visual_width],
+ material=sapien.render.RenderMaterial(base_color=[1, 0, 0, 0.7]),
+ )
+ grasp_pose_visual = builder.build_kinematic(name="grasp_pose_visual")
+ return grasp_pose_visual
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/run.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..a7f80df6cf3056b6c02041c65fbf75bccbfcc72d
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/run.py
@@ -0,0 +1,158 @@
+import multiprocessing as mp
+import os
+from copy import deepcopy
+import time
+import argparse
+import gymnasium as gym
+import numpy as np
+from tqdm import tqdm
+import os.path as osp
+import sapien.core as sapien
+from mani_skill.utils.wrappers.record import RecordEpisode
+from mani_skill.trajectory.merge_trajectory import merge_trajectories
+from mani_skill.examples.motionplanning.agilex.solutions import solveBowlOnRack
+import tkinter as tk
+MP_SOLUTIONS = {
+ "PlaceBowlOnRack-v1": solveBowlOnRack
+}
+def parse_args(args=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-e", "--env-id", type=str, default="PickCube-v1", help=f"Environment to run motion planning solver on. Available options are {list(MP_SOLUTIONS.keys())}")
+ parser.add_argument("-o", "--obs-mode", type=str, default="none", help="Observation mode to use. Usually this is kept as 'none' as observations are not necesary to be stored, they can be replayed later via the mani_skill.trajectory.replay_trajectory script.")
+ parser.add_argument("-n", "--num-traj", type=int, default=10, help="Number of trajectories to generate.")
+ parser.add_argument("--only-count-success", action="store_true", help="If true, generates trajectories until num_traj of them are successful and only saves the successful trajectories/videos")
+ parser.add_argument("--reward-mode", type=str)
+ parser.add_argument("-b", "--sim-backend", type=str, default="auto", help="Which simulation backend to use. Can be 'auto', 'cpu', 'gpu'")
+ parser.add_argument("--render-mode", type=str, default="rgb_array", help="can be 'sensors' or 'rgb_array' which only affect what is saved to videos")
+ parser.add_argument("--vis", action="store_true", help="whether or not to open a GUI to visualize the solution live")
+ parser.add_argument("--save-video", action="store_true", help="whether or not to save videos locally")
+ parser.add_argument("--traj-name", type=str, help="The name of the trajectory .h5 file that will be created.")
+ parser.add_argument("--shader", default="default", type=str, help="Change shader used for rendering. Default is 'default' which is very fast. Can also be 'rt' for ray tracing and generating photo-realistic renders. Can also be 'rt-fast' for a faster but lower quality ray-traced renderer")
+ parser.add_argument("--record-dir", type=str, default="demos", help="where to save the recorded trajectories")
+ parser.add_argument("--num-procs", type=int, default=1, help="Number of processes to use to help parallelize the trajectory replay process. This uses CPU multiprocessing and only works with the CPU simulation backend at the moment.")
+ return parser.parse_args()
+
+def _main(args, proc_id: int = 0, start_seed: int = 0) -> str:
+ env_id = args.env_id
+ #print(env_id)
+ env = gym.make(
+ env_id,
+ robot_uids="piper",
+ obs_mode=args.obs_mode,
+ control_mode="pd_joint_pos",
+ # control_mode="pd_joint_pos_vel",
+ render_mode=args.render_mode,
+ reward_mode="dense" if args.reward_mode is None else args.reward_mode,
+ sensor_configs=dict(shader_pack=args.shader),
+ human_render_camera_configs=dict(
+ shader_pack=args.shader,
+ ),
+ viewer_camera_configs=dict(shader_pack=args.shader),
+ sim_backend=args.sim_backend,
+ sim_config = dict(
+ scene_config=dict(
+ enable_ccd=True,
+ enable_pcm=False,
+ enable_enhanced_determinism = True,
+ )
+ )
+ )
+ # import pdb; pdb.set_trace()
+ if env_id not in MP_SOLUTIONS:
+ raise RuntimeError(f"No already written motion planning solutions for {env_id}. Available options are {list(MP_SOLUTIONS.keys())}")
+
+ if not args.traj_name:
+ new_traj_name = time.strftime("%Y%m%d_%H%M%S")
+ else:
+ new_traj_name = args.traj_name
+
+ if args.num_procs > 1:
+ new_traj_name = new_traj_name + "." + str(proc_id)
+ env = RecordEpisode(
+ env,
+ output_dir=osp.join(args.record_dir, env_id, "motionplanning"),
+ trajectory_name=new_traj_name, save_video=args.save_video,
+ source_type="motionplanning",
+ source_desc="official motion planning solution from ManiSkill contributors",
+ video_fps=30,
+ save_on_reset=False
+ )
+ output_h5_path = env._h5_file.filename
+ solve = MP_SOLUTIONS[env_id]
+ print(f"Motion Planning Running on {env_id}")
+ pbar = tqdm(range(args.num_traj), desc=f"proc_id: {proc_id}")
+ seed = start_seed
+ successes = []
+ solution_episode_lengths = []
+ failed_motion_plans = 0
+ passed = 0
+ while True:
+ try:
+ res = solve(env, seed=seed, debug=False, vis=True if args.vis else False)
+ except Exception as e:
+ print(f"Cannot find valid solution because of an error in motion planning solution: {e}")
+ res = -1
+ #break
+
+ if res == -1:
+ success = False
+ failed_motion_plans += 1
+ else:
+ success = res[-1]["success"].item()
+ elapsed_steps = res[-1]["elapsed_steps"].item()
+ solution_episode_lengths.append(elapsed_steps)
+ successes.append(success)
+ if args.only_count_success and not success:
+ seed += 1
+ env.flush_trajectory(save=False)
+ if args.save_video:
+ env.flush_video(save=False)
+ continue
+ else:
+ env.flush_trajectory()
+ if args.save_video:
+ env.flush_video()
+ pbar.update(1)
+ pbar.set_postfix(
+ dict(
+ success_rate=np.mean(successes),
+ failed_motion_plan_rate=failed_motion_plans / (seed + 1),
+ avg_episode_length=np.mean(solution_episode_lengths),
+ max_episode_length=np.max(solution_episode_lengths),
+ # min_episode_length=np.min(solution_episode_lengths)
+ )
+ )
+ seed += 1
+ passed += 1
+ if passed == args.num_traj:
+ break
+ env.close()
+ return output_h5_path
+
+def main(args):
+ if args.num_procs > 1 and args.num_procs < args.num_traj:
+ if args.num_traj < args.num_procs:
+ raise ValueError("Number of trajectories should be greater than or equal to number of processes")
+ args.num_traj = args.num_traj // args.num_procs
+ seeds = [*range(0, args.num_procs * args.num_traj, args.num_traj)]
+ pool = mp.Pool(args.num_procs)
+ proc_args = [(deepcopy(args), i, seeds[i]) for i in range(args.num_procs)]
+ res = pool.starmap(_main, proc_args)
+ pool.close()
+ # Merge trajectory files
+ output_path = res[0][: -len("0.h5")] + "h5"
+ merge_trajectories(output_path, res)
+ for h5_path in res:
+ tqdm.write(f"Remove {h5_path}")
+ os.remove(h5_path)
+ json_path = h5_path.replace(".h5", ".json")
+ tqdm.write(f"Remove {json_path}")
+ os.remove(json_path)
+ else:
+ _main(args)
+
+if __name__ == "__main__":
+ # start = time.time()
+ mp.set_start_method("spawn")
+ main(parse_args())
+ # print(f"Total time taken: {time.time() - start}")
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/solutions/__init__.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/solutions/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b294096aa0bcd537cab66425d317f31a7384768
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/solutions/__init__.py
@@ -0,0 +1 @@
+from .bowl_on_rack import solve as solveBowlOnRack
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/solutions/bowl_on_rack.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/solutions/bowl_on_rack.py
new file mode 100644
index 0000000000000000000000000000000000000000..69d1389ca09faa1a43b9b3fade4cec6b46feda97
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/solutions/bowl_on_rack.py
@@ -0,0 +1,187 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceBowlOnRackEnv
+from mani_skill.examples.motionplanning.agilex.motionplanner import PiperArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.agilex.utils import compute_grasp_info_by_obb, get_actor_obb
+
+def main():
+ env: PlaceBowlOnRackEnv = gym.make(
+ "PlaceBowlOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ ##print(res[-1])
+ env.close()
+
+def solve(env: PlaceBowlOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+ ##print("Debug")
+
+ # Check collision shapes
+ # ##print(f"Debug: Bowl collision shapes: {env.unwrapped.bowl.get_collision_shapes()}")
+ # ##print(f"Debug: Rack collision shapes: {env.unwrapped.rack.get_collision_shapes()}")
+
+ planner = PiperArmMotionPlanningSolver(
+ env,
+ debug=False,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.9,
+ joint_acc_limits=0.9,
+ )
+
+ env = env.unwrapped
+ FINGER_LENGTH = 0.076
+
+ ##print(env.bowl.pose.sp)
+ obb = get_actor_obb(env.bowl)
+ # ##print(obb)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ ##print(env.bowl.pose.sp.p)
+ # # #print(center)
+ # # #print(approaching)
+ # # #print(target_closing)
+ # # #print(closing)
+ # ##print(center)
+ # ##print(env.bowl.pose.sp.p)
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center+[0,0.06,0])
+ # #offset = sapien.Pose([0, 0, 0.35])
+ # ##print(grasp_pose)
+ grasp_pose = sapien.Pose(grasp_pose.p, grasp_pose.q)
+ # print("grasp pose: ", grasp_pose)
+ # # -------------------------------------------------------------------------- #
+ # # Reach
+ # # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0, 0, 0.17], grasp_pose.q)
+ ##print(f"Reach Pose: {reach_pose}")
+ res = planner.move_to_pose_with_RRTConnect(reach_pose) #, dry_run=True, refine_steps=5)
+ # res = planner.move_to_pose_with_screw()
+ env.render()
+ # #time.sleep(0.1)
+ if res == -1:
+ # ##print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ ##print(f"Grasp Pose: {grasp_pose}")
+ res = planner.move_to_pose_with_RRTConnect(sapien.Pose([0,0,0.12])*grasp_pose)
+ env.render()
+ #time.sleep(0.1)
+ if res == -1:
+ ##print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+ env.render()
+ #time.sleep(0.1)
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose([0, 0, 0.17]) * grasp_pose
+ #print(f"Lift Pose: {lift_pose}")
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ #time.sleep(0.1)
+ if res == -1:
+ ##print("Failed to lift pose")
+ return res
+
+
+ # -------------------------------------------------------------------------- #
+ # Pre Place on rack
+ # -------------------------------------------------------------------------- #
+ rack_pose = env.rack.pose.sp
+ # print("rack pose :", rack_pose)
+ #rack_pose.p+[0.05,0.12,0.3]
+
+ goal_pose = sapien.Pose(rack_pose.p+[0.1,0,0.3], euler2quat(np.pi/2,0,-8*np.pi/20))
+ # print("goal_pose: ", goal_pose)
+ ##print(env.agent.tcp.pose.sp)
+ pre_place_pose = (
+ goal_pose
+ #* env.bowl.pose.sp.inv()
+ * lift_pose
+ )
+ #print(env.bowl.pose.sp.inv())
+ ##print(env.agent.tcp.pose.sp)
+ # print("pre place pose : ", pre_place_pose)
+ #pre_place_pose.p = rack_pose.p+[0,0.14,0.4]
+ # #print(lift_pose)
+ # #print(pre_place_pose)
+ # #print(rack_pose)
+ # #print(env.bowl.pose.sp)
+ # #print(env.agent.tcp.pose.sp)
+ # place_pose = sapien.Pose(rack_pose.p+[0,0,0.3], rotation_quaternion.q)
+ # #place_pose = sapien.Pose(rack_pose.p+[0,0,0.3],lift_pose.q)
+ # #* sapien.Pose([0, 0, 0.15])
+ # ###print(f"Rack Pose: {rack_pose}")
+ # ###print(f"Place Pose: {place_pose}")
+ res = planner.move_to_pose_with_RRTConnect(pre_place_pose)
+ env.render()
+
+ #-------------------------------------------------------------------------- #
+ #Place
+ #-------------------------------------------------------------------------- #
+ #place_pose = goal_pose*sapien.Pose([0,0.1,-0.2]) * env.bowl.pose.sp.inv() * env.agent.tcp.pose.sp
+ #* sapien.Pose([0, 0, -0.15])
+ ##print(f"Lower Pose: {lower_pose}")
+ ##print(pre_place_pose)
+ place_pose = sapien.Pose([0, -0.05, -0.1]+pre_place_pose.p, pre_place_pose.q)
+ #euler2quat(0,-np.pi/9,0))
+ # print ("place pose: ", place_pose)
+ res = planner.move_to_pose_with_RRTConnect(place_pose)
+ #print(place_pose)
+ ##print(env.bowl.pose.sp)
+ ##print(env.rack.pose.sp)
+ env.render()
+ #time.sleep(1)
+ planner.open_gripper()
+ if res == -1:
+ ##print("Failed to lower pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Retreat
+ # -------------------------------------------------------------------------- #
+ # #print(env.agent.tcp.pose.sp)
+ retreat_pose = sapien.Pose([-0.2, 0, 0.05], euler2quat(0,np.pi/9,0))* place_pose
+ # #print(retreat_pose)
+ #print(f"Retreat Pose: {retreat_pose}")
+ res = planner.move_to_pose_with_RRTConnect(retreat_pose)
+ # env.render()
+ #res=-1
+
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/utils.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc65734a821fe14f251ec9a959a72decac5d4c69
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/agilex/utils.py
@@ -0,0 +1,90 @@
+import numpy as np
+import sapien
+import sapien.physx as physx
+import sapien.render
+import trimesh
+from transforms3d import quaternions
+from mani_skill.utils.structs import Actor
+from mani_skill.utils import common
+from mani_skill.utils.geometry.trimesh_utils import get_component_mesh
+
+
+def get_actor_obb(actor: Actor, to_world_frame=True, vis=False):
+ mesh = get_component_mesh(
+ actor._objs[0].find_component_by_type(physx.PhysxRigidDynamicComponent),
+ to_world_frame=to_world_frame,
+ )
+ assert mesh is not None, "can not get actor mesh for {}".format(actor)
+
+ obb: trimesh.primitives.Box = mesh.bounding_box_oriented
+
+ if vis:
+ obb.visual.vertex_colors = (255, 0, 0, 10)
+ trimesh.Scene([mesh, obb]).show()
+
+ return obb
+
+
+def compute_grasp_info_by_obb(
+ obb: trimesh.primitives.Box,
+ approaching=(0, 0, -1),
+ target_closing=None,
+ depth=0.0,
+ ortho=True,
+):
+ """Compute grasp info given an oriented bounding box.
+ The grasp info includes axes to define grasp frame, namely approaching, closing, orthogonal directions and center.
+
+ Args:
+ obb: oriented bounding box to grasp
+ approaching: direction to approach the object
+ target_closing: target closing direction, used to select one of multiple solutions
+ depth: displacement from hand to tcp along the approaching vector. Usually finger length.
+ ortho: whether to orthogonalize closing w.r.t. approaching.
+ """
+ # NOTE(jigu): DO NOT USE `x.extents`, which is inconsistent with `x.primitive.transform`!
+ extents = np.array(obb.primitive.extents)
+ T = np.array(obb.primitive.transform)
+
+ # Assume normalized
+ approaching = np.array(approaching)
+
+ # Find the axis closest to approaching vector
+ angles = approaching @ T[:3, :3] # [3]
+ inds0 = np.argsort(np.abs(angles))
+ ind0 = inds0[-1]
+
+ # Find the shorter axis as closing vector
+ inds1 = np.argsort(extents[inds0[0:-1]])
+ ind1 = inds0[0:-1][inds1[0]]
+ ind2 = inds0[0:-1][inds1[1]]
+
+ # If sizes are close, choose the one closest to the target closing
+ if target_closing is not None and 0.99 < (extents[ind1] / extents[ind2]) < 1.01:
+ vec1 = T[:3, ind1]
+ vec2 = T[:3, ind2]
+ if np.abs(target_closing @ vec1) < np.abs(target_closing @ vec2):
+ ind1 = inds0[0:-1][inds1[1]]
+ ind2 = inds0[0:-1][inds1[0]]
+ closing = T[:3, ind1]
+
+ # Flip if far from target
+ if target_closing is not None and target_closing @ closing < 0:
+ closing = -closing
+
+ # Reorder extents
+ extents = extents[[ind0, ind1, ind2]]
+
+ # Find the origin on the surface
+ center = T[:3, 3].copy()
+ half_size = extents[0] * 0.5
+ center = center + approaching * (-half_size + min(depth, half_size))
+
+ if ortho:
+ closing = closing - (approaching @ closing) * approaching
+ closing = common.np_normalize_vector(closing)
+
+ grasp_info = dict(
+ approaching=approaching, closing=closing, center=center, extents=extents
+ )
+ return grasp_info
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/__pycache__/motionplanner.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/__pycache__/motionplanner.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..55b639fab598cb41b77e63b55b1f7cf5009a8daf
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/__pycache__/motionplanner.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/__pycache__/run.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/__pycache__/run.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1ef3ede3c4468343a390bec685ae766704c3bc0d
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/__pycache__/run.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/__pycache__/utils.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bf70baf940959d31d1fe7cfb6d0a793299a2ecc0
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/__pycache__/utils.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/generate.sh b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/generate.sh
new file mode 100644
index 0000000000000000000000000000000000000000..22d392635b195b01b72978da7474e304a4dda884
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/generate.sh
@@ -0,0 +1,9 @@
+# Generate all motion planning demos for the dataset
+for env_id in PushCube-v1 PickCube-v1 StackCube-v1 PegInsertionSide-v1 PlugCharger-v1
+do
+ python -m mani_skill.examples.motionplanning.panda.run --env-id $env_id \
+ --traj-name="trajectory" --only-count-success --save-video -n 1 \
+ --shader="rt" # generate sample videos
+ mv demos/$env_id/motionplanning/0.mp4 demos/$env_id/motionplanning/sample.mp4
+ python -m mani_skill.examples.motionplanning.panda.run --env-id $env_id --traj-name="trajectory" -n 1000 --only-count-success
+done
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/motionplanner.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/motionplanner.py
new file mode 100644
index 0000000000000000000000000000000000000000..37f17da9ac5d55da0bc6395d865a1f92801b3c3b
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/motionplanner.py
@@ -0,0 +1,373 @@
+import mplib
+import numpy as np
+import sapien
+import trimesh
+from functools import partial
+from typing import Callable, Optional
+import torch
+
+from mani_skill.agents.base_agent import BaseAgent
+from mani_skill.envs.sapien_env import BaseEnv
+from mani_skill.envs.scene import ManiSkillScene
+from mani_skill.utils.structs.pose import to_sapien_pose
+from mani_skill.utils.geometry import rotation_conversions
+import sapien.physx as physx
+OPEN = -1
+CLOSED = 1
+
+
+class NoahBiArmMotionPlanningSolver:
+ def __init__(
+ self,
+ env: BaseEnv,
+ debug: bool = False,
+ vis: bool = True,
+ base_pose: sapien.Pose = None, # TODO mplib doesn't support robot base being anywhere but 0
+ visualize_target_grasp_pose: bool = True,
+ print_env_info: bool = True,
+ joint_vel_limits=0.1,
+ joint_acc_limits=0.1,
+ version="r" # can be r, rc, rcw
+ ):
+ self.env = env
+ self.base_env: BaseEnv = env.unwrapped
+ self.env_agent: BaseAgent = self.base_env.agent
+ self.robot = self.env_agent.robot
+ self.tcp = self.env_agent.tcp
+ self.joint_vel_limits = joint_vel_limits
+ self.joint_acc_limits = joint_acc_limits
+ self.version = version
+
+ self.base_pose = to_sapien_pose(base_pose)
+
+ self.planner = self.setup_planner()
+ self.control_mode = self.base_env.control_mode
+
+ self.debug = debug
+ self.vis = vis
+ self.print_env_info = print_env_info
+ self.visualize_target_grasp_pose = visualize_target_grasp_pose
+ self.gripper_state = OPEN
+ self.grasp_pose_visual = None
+ if self.vis and self.visualize_target_grasp_pose:
+ if "grasp_pose_visual" not in self.base_env.scene.actors:
+ self.grasp_pose_visual = build_panda_gripper_grasp_pose_visual(
+ self.base_env.scene
+ )
+ else:
+ self.grasp_pose_visual = self.base_env.scene.actors["grasp_pose_visual"]
+ self.grasp_pose_visual.set_pose(self.base_env.agent.tcp.pose)
+ self.elapsed_steps = 0
+
+ self.use_point_cloud = False
+ self.collision_pts_changed = False
+ self.all_collision_pts = None
+
+ def render_wait(self):
+ if not self.vis or not self.debug:
+ return
+ print("Press [c] to continue")
+ viewer = self.base_env.render_human()
+ while True:
+ if viewer.window.key_down("c"):
+ break
+ self.base_env.render_human()
+
+ def get_num_joints(self):
+ if self.version == "r":
+ return 9
+ elif self.version == "rc":
+ return 10
+ elif self.version == "rcw":
+ return 11
+
+ def get_num_gripper_joints(self):
+ if self.version in ["r", "rc", "rcw"]:
+ return 2
+
+ def setup_planner(self):
+ link_names = [link.get_name() for link in self.robot.get_links()]
+ joint_names = [joint.get_name() for joint in self.robot.get_active_joints()]
+ planner = mplib.Planner(
+ urdf=self.env_agent.urdf_path,
+ srdf=self.env_agent.urdf_path.replace(".urdf", ".srdf"),
+ user_link_names=link_names,
+ user_joint_names=joint_names,
+ move_group=self.env_agent.ee_link_name,
+ joint_vel_limits=np.ones(self.get_num_joints()-self.get_num_gripper_joints())* self.joint_vel_limits,
+ joint_acc_limits=np.ones(self.get_num_joints()-self.get_num_gripper_joints())* self.joint_acc_limits,
+ )
+ # if mplib version 0.2.1
+ b_pose = mplib.Pose(self.base_pose.p, self.base_pose.q)
+ planner.set_base_pose(b_pose)
+
+ # elif mplib version 0.1.1
+ # planner.set_base_pose(np.hstack([self.base_pose.p, self.base_pose.q]))
+ return planner
+
+ def follow_path(self, result, refine_steps: int = 0):
+ n_step = result["position"].shape[0]
+ for i in range(n_step + refine_steps):
+ qpos = result["position"][min(i, n_step - 1)]
+ if self.control_mode == "pd_joint_pos_vel":
+ qvel = result["velocity"][min(i, n_step - 1)]
+ action = np.hstack([qpos, qvel, self.gripper_state])
+ else:
+ action = np.hstack([qpos, self.gripper_state])
+ obs, reward, terminated, truncated, info = self.env.step(action)
+ self.elapsed_steps += 1
+ if self.print_env_info:
+ print(
+ f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
+ )
+ if self.vis:
+ self.base_env.render_human()
+ return obs, reward, terminated, truncated, info
+
+ def move_to_pose_with_RRTConnect(
+ self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0, t: int = 100
+ ):
+ pose = to_sapien_pose(pose)
+ if self.grasp_pose_visual is not None:
+ self.grasp_pose_visual.set_pose(pose)
+ pose = sapien.Pose(p=pose.p, q=pose.q)
+
+ result = None
+ min_result = None
+ min_duration = float('inf')
+
+ for attempt in range(t):
+ result = self.planner.plan_pose(
+ mplib.Pose(pose.p, pose.q),
+ self.robot.get_qpos().cpu().numpy()[0],
+ time_step=self.base_env.control_timestep,
+ wrt_world=True,
+ )
+
+ if result["status"] == "Success":
+ if result["duration"] < min_duration:
+ min_result = result
+ min_duration = result["duration"]
+
+ result = min_result
+
+ if result is None or result["status"] != "Success":
+ return -1
+
+ self.render_wait()
+ if dry_run:
+ return result
+ return self.follow_path(result, refine_steps=refine_steps)
+
+
+ def move_to_pose_with_screw(
+ self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0, trials: int = 100
+ ):
+ pose = to_sapien_pose(pose)
+ if self.grasp_pose_visual is not None:
+ self.grasp_pose_visual.set_pose(pose)
+ pose = sapien.Pose(p=pose.p, q=pose.q)
+
+ base_q = pose.q # save the original quaternion
+ noise_level = 1e-3 # small noise scale; adjust as needed
+ result = None
+
+ for attempt in range(trials):
+ # For all trials after the first, add a small random perturbation
+ if attempt > 0:
+ noise = np.random.normal(scale=noise_level, size=base_q.shape)
+ noisy_q = base_q + noise
+ noisy_q = noisy_q / np.linalg.norm(noisy_q) # re-normalize to ensure a valid quaternion
+ else:
+ noisy_q = base_q
+
+ # Create a new pose for this trial using the potentially noisy quaternion
+ trial_pose = sapien.Pose(p=pose.p, q=noisy_q)
+ result = self.planner.plan_screw(
+ mplib.Pose(trial_pose.p, trial_pose.q),
+ self.robot.get_qpos().cpu().numpy()[0],
+ time_step=self.base_env.control_timestep,
+ )
+ if result["status"] == "Success":
+ break
+
+ if result is None or result["status"] != "Success":
+ print(result["status"] if result is not None else "No result")
+ self.render_wait()
+ return -1
+
+ self.render_wait()
+ if dry_run:
+ return result
+ return self.follow_path(result, refine_steps=refine_steps)
+
+ def make_f(self, f):
+ if f is not None:
+ return partial(f, self)
+
+ def make_j(self, j):
+ if j is not None:
+ return partial(j, self)
+
+ def get_eef_x(self):
+ raise NotImplementedError
+ move_link_idx = self.planner.link_name_2_idx[self.planner.move_group]
+ move_joint_idx = self.planner.move_group_joint_indices
+ self.planner.pinocchio_model.compute_forward_kinematics(self.planner.robot.get_qpos())
+ new_pose = self.planner.pinocchio_model.get_link_pose(move_link_idx)
+ eef_rot = rotation_conversions.quaternion_to_matrix(torch.tensor(new_pose.q))
+ eef_x = eef_rot[:, 0].cpu().numpy().astype(np.float32).reshape(-1)
+ eef_y = eef_rot[:, 1].cpu().numpy().astype(np.float32).reshape(-1)
+ eef_z = eef_rot[:, 2].cpu().numpy().astype(np.float32).reshape(-1)
+ return eef_x
+
+ def move_to_pose_with_CRRTConnect(
+ self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0,
+ f: Optional[Callable] = None, j: Optional[Callable] = None,
+ ):
+
+ pose = to_sapien_pose(pose)
+ if self.grasp_pose_visual is not None:
+ self.grasp_pose_visual.set_pose(pose)
+ pose = sapien.Pose(p=pose.p, q=pose.q)
+ # print(self.get_eef_z())
+ # breakpoint()
+ # print("control time step")
+ # print(self.base_env.control_timestep)
+ result = self.planner.plan_pose(
+ # np.concatenate([pose.p, pose.q]),
+ mplib.Pose(pose.p, pose.q),
+ self.robot.get_qpos().cpu().numpy()[0],
+ time_step=self.base_env.control_timestep,
+ # use_point_cloud=self.use_point_cloud,
+ wrt_world=True,
+ constraint_function=self.make_f(f),
+ constraint_jacobian=self.make_j(j),
+ constraint_tolerance= 1e-2,
+ )
+ if result["status"] != "Success":
+ print(result["status"])
+ self.render_wait()
+ return -1
+ self.render_wait()
+ if dry_run:
+ return result
+ return self.follow_path(result, refine_steps=refine_steps)
+
+ def open_gripper(self, gripper_state=OPEN):
+ self.gripper_state = gripper_state
+ qpos = self.robot.get_qpos()[0, :-2].cpu().numpy()
+ for i in range(6):
+ if self.control_mode == "pd_joint_pos":
+ action = np.hstack([qpos, self.gripper_state])
+ else:
+ action = np.hstack([qpos, qpos * 0, self.gripper_state])
+ obs, reward, terminated, truncated, info = self.env.step(action)
+ self.elapsed_steps += 1
+ if self.print_env_info:
+ print(
+ f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
+ )
+ if self.vis:
+ self.base_env.render_human()
+ return obs, reward, terminated, truncated, info
+
+ def close_gripper(self, t=6, gripper_state = CLOSED):
+ self.gripper_state = gripper_state
+ qpos = self.robot.get_qpos()[0, :-2].cpu().numpy()
+ for i in range(t):
+ if self.control_mode == "pd_joint_pos":
+ action = np.hstack([qpos, self.gripper_state])
+ else:
+ action = np.hstack([qpos, qpos * 0, self.gripper_state])
+ obs, reward, terminated, truncated, info = self.env.step(action)
+ self.elapsed_steps += 1
+ if self.print_env_info:
+ print(
+ f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
+ )
+ if self.vis:
+ self.base_env.render_human()
+ return obs, reward, terminated, truncated, info
+
+ def add_box_collision(self, extents: np.ndarray, pose: sapien.Pose):
+ self.use_point_cloud = True
+ box = trimesh.creation.box(extents, transform=pose.to_transformation_matrix())
+ pts, _ = trimesh.sample.sample_surface(box, 256)
+ if self.all_collision_pts is None:
+ self.all_collision_pts = pts
+ else:
+ self.all_collision_pts = np.vstack([self.all_collision_pts, pts])
+ self.planner.update_point_cloud(self.all_collision_pts)
+
+ def add_collision_pts(self, pts: np.ndarray):
+ if self.all_collision_pts is None:
+ self.all_collision_pts = pts
+ else:
+ self.all_collision_pts = np.vstack([self.all_collision_pts, pts])
+ self.planner.update_point_cloud(self.all_collision_pts)
+
+ def clear_collisions(self):
+ self.all_collision_pts = None
+ self.use_point_cloud = False
+
+ def close(self):
+ pass
+
+from transforms3d import quaternions
+
+
+def build_panda_gripper_grasp_pose_visual(scene: ManiSkillScene):
+ builder = scene.create_actor_builder()
+ grasp_pose_visual_width = 0.01
+ grasp_width = 0.05
+
+ builder.add_sphere_visual(
+ pose=sapien.Pose(p=[0, 0, 0.0]),
+ radius=grasp_pose_visual_width,
+ material=sapien.render.RenderMaterial(base_color=[0.3, 0.4, 0.8, 0.7])
+ )
+
+ builder.add_box_visual(
+ pose=sapien.Pose(
+ p=[0, 0, -0.08],
+ q=quaternions.axangle2quat(np.array([0, 0, 1]), theta=np.pi / 2),
+
+ ),
+ half_size=[grasp_pose_visual_width, grasp_pose_visual_width, 0.02],
+ material=sapien.render.RenderMaterial(base_color=[0, 1, 0, 0.7]),
+ )
+ builder.add_box_visual(
+ pose=sapien.Pose(
+ p=[0, 0, -0.05],
+ q=quaternions.axangle2quat(np.array([0, 0, 1]), theta=np.pi / 2),
+ ),
+ half_size=[grasp_pose_visual_width, grasp_width, grasp_pose_visual_width],
+ material=sapien.render.RenderMaterial(base_color=[0, 1, 0, 0.7]),
+ )
+ builder.add_box_visual(
+ pose=sapien.Pose(
+ p=[
+ grasp_width + grasp_pose_visual_width,
+ 0.03 - grasp_pose_visual_width * 3,
+ 0.03 - 0.05,
+ ],
+ q=quaternions.axangle2quat(np.array([0, 1, 0]), theta=np.pi / 2),
+ ),
+ half_size=[0.04, grasp_pose_visual_width, grasp_pose_visual_width],
+ material=sapien.render.RenderMaterial(base_color=[0, 0, 1, 0.7]),
+ )
+ builder.add_box_visual(
+ pose=sapien.Pose(
+ p=[
+ -grasp_width - grasp_pose_visual_width,
+ 0.03 - grasp_pose_visual_width * 3,
+ 0.03 - 0.05,
+ ],
+ q=quaternions.axangle2quat(np.array([0, 1, 0]), theta=np.pi / 2),
+ ),
+ half_size=[0.04, grasp_pose_visual_width, grasp_pose_visual_width],
+ material=sapien.render.RenderMaterial(base_color=[1, 0, 0, 0.7]),
+ )
+ grasp_pose_visual = builder.build_kinematic(name="grasp_pose_visual")
+ return grasp_pose_visual
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/run.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac243caec42f8be22e35200ec7259c450028285e
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/run.py
@@ -0,0 +1,218 @@
+import multiprocessing as mp
+import os
+from copy import deepcopy
+import time
+import argparse
+import gymnasium as gym
+import numpy as np
+from tqdm import tqdm
+import os.path as osp
+import sapien.core as sapien
+import tkinter as tk
+from mani_skill.utils.wrappers.record import RecordEpisode
+from mani_skill.trajectory.merge_trajectory import merge_trajectories
+
+from mani_skill.examples.motionplanning.noahbiarm.solutions import (
+ solvePushCube,
+ solvePickCube,
+ solveStackCube,
+ solvePegInsertionSide,
+ solvePlugCharger,
+ solvePullCubeTool,
+ solveLiftPegUpright,
+ solvePullCube,
+ solveMugOnRack,
+ solveStackMugOnRack,
+ solveStackBowl,
+ solveForkFromRack,
+ solveStackPlateOnRack,
+ solveMugOnCoffeeMachine,
+ solveMugFromCoffeeMachine,
+ solveSpoonOnRack,
+ solveBowlOnRack,
+ solveBowlOnRack2,
+ solveBowlOnRack3,
+ solveBowlOnRack4,
+ solvePlateOnRack,
+ solvePlateOnRack2,
+ solvePlateOnRack3,
+ solvePlateOnRack4,
+ solveForkOnRack,
+ solveForkOnRack2,
+ solveForkOnRack3,
+ solveForkOnRack4,
+ solveKnifeOnRack,
+ solveKnifeOnRack2,
+ solveKnifeOnRack3,
+ solveKnifeOnRack4,
+ )
+
+MP_SOLUTIONS = {
+ "PickCube-v1": solvePickCube,
+ "StackCube-v1": solveStackCube,
+ "PegInsertionSide-v1": solvePegInsertionSide,
+ "PlugCharger-v1": solvePlugCharger,
+ "PushCube-v1": solvePushCube,
+ "PullCubeTool-v1": solvePullCubeTool,
+ "LiftPegUpright-v1": solveLiftPegUpright,
+ "PullCube-v1": solvePullCube,
+ "PlaceMugOnRack-v1": solveMugOnRack,
+ "StackMugOnRack-v1": solveStackMugOnRack,
+ "StackBowl-v1": solveStackBowl,
+ "StackPlateOnRack-v1": solveStackPlateOnRack,
+ "PlaceMugOnCoffeeMachine-v1": solveMugOnCoffeeMachine,
+ "PickMugFromCoffeeMachine-v1": solveMugFromCoffeeMachine,
+ "PlaceSpoonOnRack-v1": solveSpoonOnRack,
+ "PickForkFromRack-v1": solveForkFromRack,
+ "PlaceBowlOnRack-v1": solveBowlOnRack,
+ "PlaceBowlOnRack-v2": solveBowlOnRack2,
+ "PlaceBowlOnRack-v3": solveBowlOnRack3,
+ "PlaceBowlOnRack-v4": solveBowlOnRack4,
+ "PlacePlateOnRack-v1": solvePlateOnRack,
+ "PlacePlateOnRack-v2": solvePlateOnRack2,
+ "PlacePlateOnRack-v3": solvePlateOnRack3,
+ "PlacePlateOnRack-v4": solvePlateOnRack4,
+ "PlaceForkOnRack-v1": solveForkOnRack,
+ "PlaceForkOnRack-v2": solveForkOnRack2,
+ "PlaceForkOnRack-v3": solveForkOnRack3,
+ "PlaceForkOnRack-v4": solveForkOnRack4,
+ "PlaceKnifeOnRack-v1": solveKnifeOnRack,
+ "PlaceKnifeOnRack-v2": solveKnifeOnRack2,
+ "PlaceKnifeOnRack-v3": solveKnifeOnRack3,
+ "PlaceKnifeOnRack-v4": solveKnifeOnRack4,
+}
+
+def parse_args(args=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-e", "--env-id", type=str, default="PickCube-v1", help=f"Environment to run motion planning solver on. Available options are {list(MP_SOLUTIONS.keys())}")
+ parser.add_argument("-o", "--obs-mode", type=str, default="none", help="Observation mode to use. Usually this is kept as 'none' as observations are not necesary to be stored, they can be replayed later via the mani_skill.trajectory.replay_trajectory script.")
+ parser.add_argument("-n", "--num-traj", type=int, default=10, help="Number of trajectories to generate.")
+ parser.add_argument("--only-count-success", action="store_true", help="If true, generates trajectories until num_traj of them are successful and only saves the successful trajectories/videos")
+ parser.add_argument("--reward-mode", type=str)
+ parser.add_argument("-b", "--sim-backend", type=str, default="auto", help="Which simulation backend to use. Can be 'auto', 'cpu', 'gpu'")
+ parser.add_argument("--render-mode", type=str, default="rgb_array", help="can be 'sensors' or 'rgb_array' which only affect what is saved to videos")
+ parser.add_argument("--vis", action="store_true", help="whether or not to open a GUI to visualize the solution live")
+ parser.add_argument("--save-video", action="store_true", help="whether or not to save videos locally")
+ parser.add_argument("--traj-name", type=str, help="The name of the trajectory .h5 file that will be created.")
+ parser.add_argument("--shader", default="default", type=str, help="Change shader used for rendering. Default is 'default' which is very fast. Can also be 'rt' for ray tracing and generating photo-realistic renders. Can also be 'rt-fast' for a faster but lower quality ray-traced renderer")
+ parser.add_argument("--record-dir", type=str, default="demos", help="where to save the recorded trajectories")
+ parser.add_argument("--num-procs", type=int, default=1, help="Number of processes to use to help parallelize the trajectory replay process. This uses CPU multiprocessing and only works with the CPU simulation backend at the moment.")
+ parser.add_argument("--rand_level", type=int, default=0, help="the level of randomization of objects in the task")
+ parser.add_argument("--robot_uids", type=str, default="noahbiarm_r", help="set robot uids")
+ return parser.parse_args()
+
+def _main(args, proc_id: int = 0, start_seed: int = 0) -> str:
+ env_id = args.env_id
+ print(env_id)
+ env = gym.make(
+ env_id,
+ obs_mode=args.obs_mode,
+ control_mode="pd_joint_pos",
+ render_mode=args.render_mode,
+ reward_mode="none" if args.reward_mode is None else args.reward_mode,
+ sensor_configs=dict(shader_pack=args.shader),
+ human_render_camera_configs=dict(
+ shader_pack=args.shader,
+ ),
+ viewer_camera_configs=dict(shader_pack=args.shader),
+ sim_backend=args.sim_backend,
+ rand_level=args.rand_level,
+ robot_uids=args.robot_uids,
+ )
+ if env_id not in MP_SOLUTIONS:
+ raise RuntimeError(f"No already written motion planning solutions for {env_id}. Available options are {list(MP_SOLUTIONS.keys())}")
+
+ if not args.traj_name:
+ new_traj_name = time.strftime("%Y%m%d_%H%M%S")
+ else:
+ new_traj_name = args.traj_name
+
+ if args.num_procs > 1:
+ new_traj_name = new_traj_name + "." + str(proc_id)
+ env = RecordEpisode(
+ env,
+ output_dir=osp.join(args.record_dir, env_id, "motionplanning"),
+ trajectory_name=new_traj_name, save_video=args.save_video,
+ source_type="motionplanning",
+ source_desc="official motion planning solution from ManiSkill contributors",
+ video_fps=30,
+ save_on_reset=False
+ )
+ output_h5_path = env._h5_file.filename
+ solve = MP_SOLUTIONS[env_id]
+ print(f"Motion Planning Running on {env_id}")
+ pbar = tqdm(range(args.num_traj), desc=f"proc_id: {proc_id}")
+ seed = start_seed
+ successes = []
+ solution_episode_lengths = []
+ failed_motion_plans = 0
+ passed = 0
+ while True:
+ try:
+ res = solve(env, seed=seed, debug=False, vis=True if args.vis else False)
+ except Exception as e:
+ print(f"Cannot find valid solution because of an error in motion planning solution: {e}")
+ res = -1
+
+ if res == -1:
+ success = False
+ failed_motion_plans += 1
+ else:
+ success = res[-1]["success"].item()
+ elapsed_steps = res[-1]["elapsed_steps"].item()
+ solution_episode_lengths.append(elapsed_steps)
+ successes.append(success)
+ if args.only_count_success and not success:
+ seed += 1
+ env.flush_trajectory(save=False)
+ if args.save_video:
+ env.flush_video(save=False)
+ continue
+ else:
+ env.flush_trajectory()
+ if args.save_video:
+ env.flush_video()
+ pbar.update(1)
+ pbar.set_postfix(
+ dict(
+ success_rate=np.mean(successes),
+ failed_motion_plan_rate=failed_motion_plans / (seed + 1),
+ avg_episode_length=np.mean(solution_episode_lengths),
+ max_episode_length=np.max(solution_episode_lengths),
+ # min_episode_length=np.min(solution_episode_lengths)
+ )
+ )
+ seed += 1
+ passed += 1
+ if passed == args.num_traj:
+ break
+ env.close()
+ return output_h5_path
+
+def main(args):
+ if args.num_procs > 1 and args.num_procs < args.num_traj:
+ if args.num_traj < args.num_procs:
+ raise ValueError("Number of trajectories should be greater than or equal to number of processes")
+ args.num_traj = args.num_traj // args.num_procs
+ seeds = [*range(0, args.num_procs * args.num_traj, args.num_traj)]
+ pool = mp.Pool(args.num_procs)
+ proc_args = [(deepcopy(args), i, seeds[i]) for i in range(args.num_procs)]
+ res = pool.starmap(_main, proc_args)
+ pool.close()
+ # Merge trajectory files
+ output_path = res[0][: -len("0.h5")] + "h5"
+ merge_trajectories(output_path, res)
+ for h5_path in res:
+ tqdm.write(f"Remove {h5_path}")
+ os.remove(h5_path)
+ json_path = h5_path.replace(".h5", ".json")
+ tqdm.write(f"Remove {json_path}")
+ os.remove(json_path)
+ else:
+ _main(args)
+
+if __name__ == "__main__":
+ # start = time.time()
+ mp.set_start_method("spawn")
+ main(parse_args())
+ # print(f"Total time taken: {time.time() - start}")
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__init__.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..52b5997c5a94c4c5afbe283e0694d498609143bf
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__init__.py
@@ -0,0 +1,32 @@
+from .pick_cube import solve as solvePickCube
+from .stack_cube import solve as solveStackCube
+from .peg_insertion_side import solve as solvePegInsertionSide
+from .plug_charger import solve as solvePlugCharger
+from .push_cube import solve as solvePushCube
+from .pull_cube_tool import solve as solvePullCubeTool
+from .lift_peg_upright import solve as solveLiftPegUpright
+from .pull_cube import solve as solvePullCube
+from .mug_on_rack import solve as solveMugOnRack
+from .stack_mug_on_rack import solve as solveStackMugOnRack
+from .stack_bowl import solve as solveStackBowl
+from .fork_from_rack import solve as solveForkFromRack
+from .stack_plate_on_rack import solve as solveStackPlateOnRack
+from .mug_on_coffee_machine import solve as solveMugOnCoffeeMachine
+from .mug_from_coffee_machine import solve as solveMugFromCoffeeMachine
+from .spoon_on_rack import solve as solveSpoonOnRack
+from .bowl_on_rack import solve as solveBowlOnRack
+from .bowl_on_rack_v2 import solve as solveBowlOnRack2
+from .bowl_on_rack_v3 import solve as solveBowlOnRack3
+from .bowl_on_rack_v4 import solve as solveBowlOnRack4
+from .plate_on_rack import solve as solvePlateOnRack
+from .plate_on_rack2 import solve as solvePlateOnRack2
+from .plate_on_rack_v3 import solve as solvePlateOnRack3
+from .plate_on_rack_v4 import solve as solvePlateOnRack4
+from .fork_on_rack import solve as solveForkOnRack
+from .fork_on_rack_v2 import solve as solveForkOnRack2
+from .fork_on_rack_v3 import solve as solveForkOnRack3
+from .fork_on_rack_v4 import solve as solveForkOnRack4
+from .knife_on_rack import solve as solveKnifeOnRack
+from .knife_on_rack_v2 import solve as solveKnifeOnRack2
+from .knife_on_rack_v3 import solve as solveKnifeOnRack3
+from .knife_on_rack_v4 import solve as solveKnifeOnRack4
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/__init__.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ce10e01705ac1f4e12f0b23bd9ff885cd90e56c8
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/__init__.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/bowl_on_rack.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/bowl_on_rack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a1213733e5183986b9536c2a4d86732b54744899
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/bowl_on_rack.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/bowl_on_rack_v2.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/bowl_on_rack_v2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0a5edb051856fd2b3ed7be3560fd8ffc9633fd42
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/bowl_on_rack_v2.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/bowl_on_rack_v3.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/bowl_on_rack_v3.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6965ba0fcd51df820e81d804858e27046e3b33d1
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/bowl_on_rack_v3.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/bowl_on_rack_v4.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/bowl_on_rack_v4.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5b4ecbdc4632eb93c2d3e633d2b34aad237a9bb8
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/bowl_on_rack_v4.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/fork_from_rack.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/fork_from_rack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..06b3eeed5db6199282e16d180607bb2070a4c529
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/fork_from_rack.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/fork_on_rack.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/fork_on_rack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..0b629bd66fc9bb8dc54e70189ff90e9b07cb8e5c
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/fork_on_rack.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/fork_on_rack_v2.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/fork_on_rack_v2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..23aa09b0a6a214f72692194a6889b579c0f7ac28
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/fork_on_rack_v2.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/fork_on_rack_v3.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/fork_on_rack_v3.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c7f88b69bf83d50683114a2f5649ea8e28a83ae5
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/fork_on_rack_v3.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/fork_on_rack_v4.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/fork_on_rack_v4.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..1a42a460d28b6e5880bac7e43bc274faa0a006f9
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/fork_on_rack_v4.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/knife_on_rack.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/knife_on_rack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..fdd34760a0c2eaf60f0f1efeb6e40ebf36c9097f
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/knife_on_rack.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/knife_on_rack_v2.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/knife_on_rack_v2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..25c2b3a34002c49f2b758820ba79ff3f51c227fb
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/knife_on_rack_v2.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/knife_on_rack_v3.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/knife_on_rack_v3.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3da26cac0051e9929b01336ea50ba9b52fc8a940
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/knife_on_rack_v3.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/knife_on_rack_v4.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/knife_on_rack_v4.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b0f1c155540f2a0e7445a7ff7aa77b76cbbc7d43
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/knife_on_rack_v4.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/lift_peg_upright.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/lift_peg_upright.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..849c27928146a649d0e6172fc0a05f02f6002458
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/lift_peg_upright.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/mug_from_coffee_machine.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/mug_from_coffee_machine.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b02087aa76fdb6770812bf95a74ee833584f5424
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/mug_from_coffee_machine.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/mug_on_coffee_machine.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/mug_on_coffee_machine.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..baba90d7b4ee81ebb0432f54b78f30da4c0ad178
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/mug_on_coffee_machine.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/mug_on_rack.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/mug_on_rack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..933dcb04ccb3a16df2c9f21be3bba1ff0fb098ba
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/mug_on_rack.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/peg_insertion_side.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/peg_insertion_side.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c97888559e5e472c2bfe8e40bd89a092ca33a9dc
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/peg_insertion_side.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/pick_cube.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/pick_cube.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c2b73dd6059025d22963ced7e69c20ee8bcec0f1
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/pick_cube.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/plate_on_rack.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/plate_on_rack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..a64acfd0761f9d9e4693548120c87491b7bc42b5
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/plate_on_rack.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/plate_on_rack2.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/plate_on_rack2.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c116931e53565eec3b5fc72c42a1140ee0651872
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/plate_on_rack2.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/plate_on_rack_v3.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/plate_on_rack_v3.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..00e9fb9556fec10428d7ed195ff0f8cca0943cb3
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/plate_on_rack_v3.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/plate_on_rack_v4.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/plate_on_rack_v4.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5e34dacd5f4773bdc5816b7181b558957636fafc
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/plate_on_rack_v4.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/plug_charger.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/plug_charger.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..76637aadd1b3e76b947c2561341f637ea0a88c4d
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/plug_charger.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/pull_cube.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/pull_cube.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..435494c3405ce82e3e0b7f37e20d6044e6ae2225
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/pull_cube.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/pull_cube_tool.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/pull_cube_tool.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f37cbe709f464228a1efe84130faf1f286184609
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/pull_cube_tool.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/push_cube.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/push_cube.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..55d53ea57ddaca7351a1c6c5e23d054dee3f48d4
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/push_cube.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/spoon_on_rack.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/spoon_on_rack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d97793c00403397d13e3a6cd30902858903cdc16
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/spoon_on_rack.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/stack_bowl.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/stack_bowl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5651672689534985d93c201c75a327a64994bef1
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/stack_bowl.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/stack_cube.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/stack_cube.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5bed03b19f1cddfe5a238b74859187c14feb63c7
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/stack_cube.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/stack_mug_on_rack.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/stack_mug_on_rack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7615499e02607e97d124423ac3c9964d20b5cc0b
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/stack_mug_on_rack.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/stack_plate_on_rack.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/stack_plate_on_rack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..19868759d9d364a64082903c3d24d4f499abc079
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/__pycache__/stack_plate_on_rack.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/bowl_on_rack.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/bowl_on_rack.py
new file mode 100644
index 0000000000000000000000000000000000000000..598ed49be9abe01dfe256681f3f125c89a62498f
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/bowl_on_rack.py
@@ -0,0 +1,152 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceBowlOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceBowlOnRackEnv = gym.make(
+ "PlaceBowlOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceBowlOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ ], env.unwrapped.control_mode
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1]
+ )
+
+ env = env.unwrapped
+
+ # to make the objects settle
+ for _ in range(10):
+ kf = env.agent.keyframes["vertical_grasp"]
+ env.step(env.agent.controller.from_qpos(kf.qpos))
+ env.render()
+
+ FINGER_LENGTH = 0.025
+ BOWL_D = env.bowl_extents[0]
+ BOWL_Z = env.bowl_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = BOWL_Z + RACK_Z + FINGER_LENGTH
+
+ obb = get_actor_obb(env.bowl)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.bowl.pose.p)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, BOWL_Z], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ reach_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, BOWL_Z*0.0], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to grasp pose")
+ return res
+
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, BOWL_Z], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ print("Failed to lift pose")
+ return res
+
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, 1.5*ENV_Z_OFFSET], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ print("Failed to lift pose")
+ return res
+
+
+ # # -------------------------------------------------------------------------- #
+ # # Hover over goalsite (rack pose)
+ # # -------------------------------------------------------------------------- #
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ hover_pose = sapien.Pose(pose.sp.p + [0, BOWL_Z, BOWL_Z], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(pose.sp.p + [0, BOWL_Z, BOWL_Z/10], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ planner.open_gripper()
+
+ # # -------------------------------------------------------------------------- #
+ # # raise hand
+ # # -------------------------------------------------------------------------- #
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ raise_pose = sapien.Pose(pose.sp.p + [0, BOWL_Z, BOWL_Z], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(raise_pose)
+ if res == -1:
+ print("Failed to raise pose")
+ return res
+
+
+ planner.close()
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/bowl_on_rack_v2.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/bowl_on_rack_v2.py
new file mode 100644
index 0000000000000000000000000000000000000000..abf967aa2d32f8fd61b11a125bd857f287eb8f1e
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/bowl_on_rack_v2.py
@@ -0,0 +1,152 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceBowlOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceBowlOnRackEnv = gym.make(
+ "PlaceBowlOnRack-v2",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceBowlOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ ], env.unwrapped.control_mode
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1]
+ )
+
+ env = env.unwrapped
+
+ # to make the objects settle
+ for _ in range(10):
+ kf = env.agent.keyframes["vertical_grasp"]
+ env.step(env.agent.controller.from_qpos(kf.qpos))
+ env.render()
+
+ FINGER_LENGTH = 0.025
+ BOWL_D = env.bowl_extents[0]
+ BOWL_Z = env.bowl_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = BOWL_Z + RACK_Z + FINGER_LENGTH
+
+ obb = get_actor_obb(env.bowl)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.bowl.pose.p)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, BOWL_Z], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ reach_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, BOWL_Z*0.0], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to grasp pose")
+ return res
+
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, BOWL_Z], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ print("Failed to lift pose")
+ return res
+
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, 1.5*ENV_Z_OFFSET], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ print("Failed to lift pose")
+ return res
+
+
+ # # -------------------------------------------------------------------------- #
+ # # Hover over goalsite (rack pose)
+ # # -------------------------------------------------------------------------- #
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ hover_pose = sapien.Pose(pose.sp.p + [0, BOWL_Z, BOWL_Z], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(pose.sp.p + [0, BOWL_Z, BOWL_Z/10], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ planner.open_gripper()
+
+ # # -------------------------------------------------------------------------- #
+ # # raise hand
+ # # -------------------------------------------------------------------------- #
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ raise_pose = sapien.Pose(pose.sp.p + [0, BOWL_Z, BOWL_Z], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(raise_pose)
+ if res == -1:
+ print("Failed to raise pose")
+ return res
+
+
+ planner.close()
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/bowl_on_rack_v3.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/bowl_on_rack_v3.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ba0cd3d62cb51b936195ff45c3d696bbaefba01
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/bowl_on_rack_v3.py
@@ -0,0 +1,152 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceBowlOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceBowlOnRackEnv = gym.make(
+ "PlaceBowlOnRack-v3",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceBowlOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ ], env.unwrapped.control_mode
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1]
+ )
+
+ env = env.unwrapped
+
+ # to make the objects settle
+ for _ in range(10):
+ kf = env.agent.keyframes["vertical_grasp"]
+ env.step(env.agent.controller.from_qpos(kf.qpos))
+ env.render()
+
+ FINGER_LENGTH = 0.025
+ BOWL_D = env.bowl_extents[0]
+ BOWL_Z = env.bowl_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = BOWL_Z + RACK_Z + FINGER_LENGTH
+
+ obb = get_actor_obb(env.bowl)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.bowl.pose.p)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, BOWL_Z], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ reach_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, BOWL_Z*0.0], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to grasp pose")
+ return res
+
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, BOWL_Z], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ print("Failed to lift pose")
+ return res
+
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, 1.5*ENV_Z_OFFSET], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ print("Failed to lift pose")
+ return res
+
+
+ # # -------------------------------------------------------------------------- #
+ # # Hover over goalsite (rack pose)
+ # # -------------------------------------------------------------------------- #
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ hover_pose = sapien.Pose(pose.sp.p + [0, BOWL_Z, BOWL_Z], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(pose.sp.p + [0, BOWL_Z, BOWL_Z/10], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ planner.open_gripper()
+
+ # # -------------------------------------------------------------------------- #
+ # # raise hand
+ # # -------------------------------------------------------------------------- #
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ raise_pose = sapien.Pose(pose.sp.p + [0, BOWL_Z, BOWL_Z], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(raise_pose)
+ if res == -1:
+ print("Failed to raise pose")
+ return res
+
+
+ planner.close()
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/bowl_on_rack_v4.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/bowl_on_rack_v4.py
new file mode 100644
index 0000000000000000000000000000000000000000..296f46486534545043cb8d9d62cb2e0016aae6f2
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/bowl_on_rack_v4.py
@@ -0,0 +1,152 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceBowlOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceBowlOnRackEnv = gym.make(
+ "PlaceBowlOnRack-v4",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceBowlOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ ], env.unwrapped.control_mode
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1]
+ )
+
+ env = env.unwrapped
+
+ # to make the objects settle
+ for _ in range(10):
+ kf = env.agent.keyframes["vertical_grasp"]
+ env.step(env.agent.controller.from_qpos(kf.qpos))
+ env.render()
+
+ FINGER_LENGTH = 0.025
+ BOWL_D = env.bowl_extents[0]
+ BOWL_Z = env.bowl_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = BOWL_Z + RACK_Z + FINGER_LENGTH
+
+ obb = get_actor_obb(env.bowl)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.bowl.pose.p)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, BOWL_Z], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ reach_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, BOWL_Z*0.0], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to grasp pose")
+ return res
+
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, BOWL_Z], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ print("Failed to lift pose")
+ return res
+
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, 1.5*ENV_Z_OFFSET], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ print("Failed to lift pose")
+ return res
+
+
+ # # -------------------------------------------------------------------------- #
+ # # Hover over goalsite (rack pose)
+ # # -------------------------------------------------------------------------- #
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ hover_pose = sapien.Pose(pose.sp.p + [0, BOWL_Z, BOWL_Z], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(pose.sp.p + [0, BOWL_Z, BOWL_Z/10], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ planner.open_gripper()
+
+ # # -------------------------------------------------------------------------- #
+ # # raise hand
+ # # -------------------------------------------------------------------------- #
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ raise_pose = sapien.Pose(pose.sp.p + [0, BOWL_Z, BOWL_Z], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(raise_pose)
+ if res == -1:
+ print("Failed to raise pose")
+ return res
+
+
+ planner.close()
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/fork_from_rack.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/fork_from_rack.py
new file mode 100644
index 0000000000000000000000000000000000000000..92b95890e177969de499aa033f8cbb095d84f2c7
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/fork_from_rack.py
@@ -0,0 +1,151 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PickForkFromRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PickForkFromRackEnv = gym.make(
+ "PickForkFromRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="none",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PickForkFromRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ )
+
+ env = env.unwrapped
+
+ # to make the objects settle
+ for _ in range(15):
+ kf = env.agent.keyframes["vertical_grasp"]
+ env.step(env.agent.controller.from_qpos(kf.qpos))
+ env.render()
+ env.render_human()
+
+ FINGER_LENGTH = 0.025
+ FORK_Z = env.fork_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = FORK_Z + RACK_Z + FINGER_LENGTH
+
+ obb = get_actor_obb(env.fork)
+ approaching = np.array([1, 0, 0], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ tail_p, tail_q = env.get_fork_tail_pose()
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, np.array(tail_p[0]))
+
+ # make sure grasp pose is cam up
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 2] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ grasp_pose = sapien.Pose(grasp_pose.p, grasp_q)
+
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = Pose.create_from_pq(p=grasp_pose.p + [-FORK_Z*3, 0, FORK_Z], q=grasp_pose.q)
+ # reach_pose = grasp_pose * sapien.Pose([0, 0, -FORK_Z-FINGER_LENGTH*0.4])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(grasp_pose)
+ if res == -1:
+ print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ temp_pose = Pose.create_from_pq(p=p, q=grasp_pose.q)
+ lift_pose = Pose.create_from_pq(p=temp_pose.sp.p + [0, 0, ENV_Z_OFFSET*1.5], q=temp_pose.q)
+ res = planner.move_to_pose_with_screw(lift_pose)
+ if res == -1:
+ print("Failed to lift pose 1")
+ return res
+
+ lift_pose = Pose.create_from_pq(p=temp_pose.sp.p + [-2*FORK_Z, 0, ENV_Z_OFFSET*1.5], q=temp_pose.q)
+ res = planner.move_to_pose_with_screw(lift_pose)
+ if res == -1:
+ print("Failed to lift pose 2")
+ return res
+
+ # # -------------------------------------------------------------------------- #
+ # # Hover over goalsite (rack pose)
+ # # -------------------------------------------------------------------------- #
+ # goal_extents = torch.from_numpy(env.goal_extents)
+ hover_p = np.array(env.final_site.pose.p[0]) + np.array([-ENV_Z_OFFSET, 0, 2.5*ENV_Z_OFFSET])
+
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 1] = 0
+ euler[:, 2] = -np.pi
+ euler[:, 0] = -np.pi/2
+
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+
+ hover_pose = sapien.Pose(hover_p, grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(hover_pose.p - [-ENV_Z_OFFSET-2.5*FORK_Z, 0, 2.5*ENV_Z_OFFSET - FORK_Z], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+ planner.open_gripper()
+
+
+ planner.close()
+ return res
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/fork_on_rack.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/fork_on_rack.py
new file mode 100644
index 0000000000000000000000000000000000000000..0ec1d1ab65c30c15806d17d0e13086d507c65600
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/fork_on_rack.py
@@ -0,0 +1,147 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceForkOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceForkOnRackEnv = gym.make(
+ "PlaceForkOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="none",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceForkOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1]
+ )
+
+ env = env.unwrapped
+
+ FINGER_LENGTH = 0.025
+ obb = get_actor_obb(env.fork)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.fork.pose.sp.p)
+ # grasp_pose = grasp_pose * sapien.Pose([0, 0, -FINGER_LENGTH*0.6])
+ FORK_Z = env.fork_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = FORK_Z + RACK_Z + FINGER_LENGTH
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -FORK_Z-FINGER_LENGTH*0.4])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, 0, 2*ENV_Z_OFFSET], q=grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Hover over goalsite (rack pose)
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler [:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_q) *\
+ sapien.Pose([0, 0, -2*ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(hover_pose.p - [FINGER_LENGTH/2, 0, 2*ENV_Z_OFFSET-RACK_Z*1.2], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose 1")
+ return res
+
+ lower_pose = sapien.Pose(hover_pose.p - [FINGER_LENGTH/2, 0, 2*ENV_Z_OFFSET-RACK_Z*0.8], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose 2")
+ return res
+ planner.open_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # raise hand
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler [:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_q) *\
+ sapien.Pose([0, 0, -ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to raise pose")
+ return res
+
+
+
+ planner.close()
+ return res
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/fork_on_rack_v2.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/fork_on_rack_v2.py
new file mode 100644
index 0000000000000000000000000000000000000000..a9b096707857974b2b7304cc0deeaed6c5d73ed9
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/fork_on_rack_v2.py
@@ -0,0 +1,148 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceForkOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceForkOnRackEnv = gym.make(
+ "PlaceForkOnRack-v2",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="none",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceForkOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1],
+ )
+
+ env = env.unwrapped
+
+ FINGER_LENGTH = 0.025
+ obb = get_actor_obb(env.fork)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.fork.pose.sp.p)
+ grasp_pose = grasp_pose * sapien.Pose([0, 0, -FINGER_LENGTH*0.6])
+ FORK_Z = env.fork_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = FORK_Z + RACK_Z + FINGER_LENGTH
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -FORK_Z-FINGER_LENGTH*0.4])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, 0, 2*ENV_Z_OFFSET], q=grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Hover over goalsite (rack pose)
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler [:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_q) *\
+ sapien.Pose([0, 0, -2*ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(hover_pose.p - [FINGER_LENGTH/2, 0, 2*ENV_Z_OFFSET-RACK_Z*1.2], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose 1")
+ return res
+
+ lower_pose = sapien.Pose(hover_pose.p - [FINGER_LENGTH/2, 0, 2*ENV_Z_OFFSET-RACK_Z*0.8], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose 2")
+ return res
+
+ planner.open_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # raise hand
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler [:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_q) *\
+ sapien.Pose([0, 0, -ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to raise pose")
+ return res
+
+
+
+ planner.close()
+ return res
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/fork_on_rack_v3.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/fork_on_rack_v3.py
new file mode 100644
index 0000000000000000000000000000000000000000..040b15c49900395ab92a29b914e1e5d66318384c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/fork_on_rack_v3.py
@@ -0,0 +1,147 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceForkOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceForkOnRackEnv = gym.make(
+ "PlaceForkOnRack-v3",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="none",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceForkOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1]
+ )
+
+ env = env.unwrapped
+
+ FINGER_LENGTH = 0.025
+ obb = get_actor_obb(env.fork)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.fork.pose.sp.p)
+ # grasp_pose = grasp_pose * sapien.Pose([0, 0, -FINGER_LENGTH*0.6])
+ FORK_Z = env.fork_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = FORK_Z + RACK_Z + FINGER_LENGTH
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -FORK_Z-FINGER_LENGTH*0.4])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, 0, 2*ENV_Z_OFFSET], q=grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Hover over goalsite (rack pose)
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler [:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_q) *\
+ sapien.Pose([0, 0, -2*ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(hover_pose.p - [FINGER_LENGTH/2, 0, 2*ENV_Z_OFFSET-RACK_Z*1.2], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose 1")
+ return res
+
+ lower_pose = sapien.Pose(hover_pose.p - [FINGER_LENGTH/2, 0, 2*ENV_Z_OFFSET-RACK_Z*0.8], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose 2")
+ return res
+ planner.open_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # raise hand
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler [:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_q) *\
+ sapien.Pose([0, 0, -ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to raise pose")
+ return res
+
+
+
+ planner.close()
+ return res
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/fork_on_rack_v4.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/fork_on_rack_v4.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ebb5de9b70436886ec2838b370f7988f995cb59
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/fork_on_rack_v4.py
@@ -0,0 +1,147 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceForkOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceForkOnRackEnv = gym.make(
+ "PlaceForkOnRack-v4",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="none",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceForkOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1]
+ )
+
+ env = env.unwrapped
+
+ FINGER_LENGTH = 0.025
+ obb = get_actor_obb(env.fork)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.fork.pose.sp.p)
+ # grasp_pose = grasp_pose * sapien.Pose([0, 0, -FINGER_LENGTH*0.6])
+ FORK_Z = env.fork_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = FORK_Z + RACK_Z + FINGER_LENGTH
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -FORK_Z-FINGER_LENGTH*0.4])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, 0, 2*ENV_Z_OFFSET], q=grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Hover over goalsite (rack pose)
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler [:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_q) *\
+ sapien.Pose([0, 0, -2*ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(hover_pose.p - [FINGER_LENGTH/2, 0, 2*ENV_Z_OFFSET-RACK_Z*1.2], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose 1")
+ return res
+
+ lower_pose = sapien.Pose(hover_pose.p - [FINGER_LENGTH/2, 0, 2*ENV_Z_OFFSET-RACK_Z*0.8], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose 2")
+ return res
+ planner.open_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # raise hand
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler [:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_q) *\
+ sapien.Pose([0, 0, -ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to raise pose")
+ return res
+
+
+
+ planner.close()
+ return res
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/knife_on_rack.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/knife_on_rack.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5b39f07cd10f600f746068c29204b676cc3d604
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/knife_on_rack.py
@@ -0,0 +1,148 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceKnifeOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceKnifeOnRackEnv = gym.make(
+ "PlaceKnifeOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceKnifeOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1],
+ )
+
+ env = env.unwrapped
+
+ FINGER_LENGTH = 0.025
+ obb = get_actor_obb(env.fork)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.fork.pose.sp.p)
+ # grasp_pose = grasp_pose * sapien.Pose([0, 0, -FINGER_LENGTH*0.6])
+ FORK_Z = env.fork_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = FORK_Z + RACK_Z + FINGER_LENGTH
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -FORK_Z-FINGER_LENGTH*0.4])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, 0, 2*ENV_Z_OFFSET], q=grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Hover over goalsite (rack pose)
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler [:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_q) *\
+ sapien.Pose([0, 0, -2*ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(hover_pose.p - [FINGER_LENGTH/2, 0, 2*ENV_Z_OFFSET-RACK_Z*1.2], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose 1")
+ return res
+
+ lower_pose = sapien.Pose(hover_pose.p - [FINGER_LENGTH/2, 0, 2*ENV_Z_OFFSET-RACK_Z*0.8], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose 2")
+ return res
+
+ planner.open_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # raise hand
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler [:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_q) *\
+ sapien.Pose([0, 0, -ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to raise pose")
+ return res
+
+
+
+ planner.close()
+ return res
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/knife_on_rack_v2.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/knife_on_rack_v2.py
new file mode 100644
index 0000000000000000000000000000000000000000..4a67f8a3750c06137612ba9e94083fbadd802ea4
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/knife_on_rack_v2.py
@@ -0,0 +1,148 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceKnifeOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceKnifeOnRackEnv = gym.make(
+ "PlaceKnifeOnRack-v2",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceKnifeOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1],
+ )
+
+ env = env.unwrapped
+
+ FINGER_LENGTH = 0.025
+ obb = get_actor_obb(env.knife)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.knife.pose.sp.p)
+ grasp_pose = grasp_pose * sapien.Pose([0, 0, -FINGER_LENGTH*0.6])
+ FORK_Z = env.knife_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = FORK_Z + RACK_Z + FINGER_LENGTH
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -FORK_Z-FINGER_LENGTH*0.4])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, 0, 2*ENV_Z_OFFSET], q=grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Hover over goalsite (rack pose)
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler [:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_q) *\
+ sapien.Pose([0, 0, -2*ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(hover_pose.p - [FINGER_LENGTH/2, 0, 2*ENV_Z_OFFSET-RACK_Z*1.2], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose 1")
+ return res
+
+ lower_pose = sapien.Pose(hover_pose.p - [FINGER_LENGTH/2, 0, 2*ENV_Z_OFFSET-RACK_Z*0.8], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose 2")
+ return res
+
+ planner.open_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # raise hand
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler [:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_q) *\
+ sapien.Pose([0, 0, -ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to raise pose")
+ return res
+
+
+
+ planner.close()
+ return res
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/knife_on_rack_v3.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/knife_on_rack_v3.py
new file mode 100644
index 0000000000000000000000000000000000000000..dce8ff749b8001358d064692d6098bb8451636f9
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/knife_on_rack_v3.py
@@ -0,0 +1,148 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceKnifeOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceKnifeOnRackEnv = gym.make(
+ "PlaceKnifeOnRack-v3",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceKnifeOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1],
+ )
+
+ env = env.unwrapped
+
+ FINGER_LENGTH = 0.025
+ obb = get_actor_obb(env.fork)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.fork.pose.sp.p)
+ # grasp_pose = grasp_pose * sapien.Pose([0, 0, -FINGER_LENGTH*0.6])
+ FORK_Z = env.fork_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = FORK_Z + RACK_Z + FINGER_LENGTH
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -FORK_Z-FINGER_LENGTH*0.4])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, 0, 2*ENV_Z_OFFSET], q=grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Hover over goalsite (rack pose)
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler [:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_q) *\
+ sapien.Pose([0, 0, -2*ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(hover_pose.p - [FINGER_LENGTH/2, 0, 2*ENV_Z_OFFSET-RACK_Z*1.2], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose 1")
+ return res
+
+ lower_pose = sapien.Pose(hover_pose.p - [FINGER_LENGTH/2, 0, 2*ENV_Z_OFFSET-RACK_Z*0.8], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose 2")
+ return res
+
+ planner.open_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # raise hand
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler [:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_q) *\
+ sapien.Pose([0, 0, -ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to raise pose")
+ return res
+
+
+
+ planner.close()
+ return res
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/knife_on_rack_v4.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/knife_on_rack_v4.py
new file mode 100644
index 0000000000000000000000000000000000000000..174966cd2f923bf1295d0e80841d50dfddde7640
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/knife_on_rack_v4.py
@@ -0,0 +1,148 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceKnifeOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceKnifeOnRackEnv = gym.make(
+ "PlaceKnifeOnRack-v4",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceKnifeOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1],
+ )
+
+ env = env.unwrapped
+
+ FINGER_LENGTH = 0.025
+ obb = get_actor_obb(env.knife)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.knife.pose.sp.p)
+ # grasp_pose = grasp_pose * sapien.Pose([0, 0, -FINGER_LENGTH*0.6])
+ KNIFE_Z = env.knife_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = KNIFE_Z + RACK_Z + FINGER_LENGTH
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -KNIFE_Z-FINGER_LENGTH*0.4])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, 0, 2*ENV_Z_OFFSET], q=grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Hover over goalsite (rack pose)
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler [:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_q) *\
+ sapien.Pose([0, 0, -2*ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(hover_pose.p - [FINGER_LENGTH/2, 0, 2*ENV_Z_OFFSET-RACK_Z*1.2], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose 1")
+ return res
+
+ lower_pose = sapien.Pose(hover_pose.p - [FINGER_LENGTH/2, 0, 2*ENV_Z_OFFSET-RACK_Z*0.8], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose 2")
+ return res
+
+ planner.open_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # raise hand
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler [:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_q) *\
+ sapien.Pose([0, 0, -ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to raise pose")
+ return res
+
+
+
+ planner.close()
+ return res
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/lift_peg_upright.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/lift_peg_upright.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8a36cbafef569a406613ce27e27f50ba3aa0bf7
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/lift_peg_upright.py
@@ -0,0 +1,106 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+
+from mani_skill.envs.tasks import LiftPegUprightEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+
+def main():
+ env: LiftPegUprightEnv = gym.make(
+ "LiftPegUpright-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="rgb_array",
+ reward_mode="dense",
+ )
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ print(res[-1])
+ env.close()
+
+def solve(env: LiftPegUprightEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+ FINGER_LENGTH = 0.025
+
+ obb = get_actor_obb(env.peg)
+ approaching = np.array([0, 0, -1])
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy()
+ peg_init_pose = env.peg.pose
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ offset = sapien.Pose([0.10, 0, 0])
+ grasp_pose = grasp_pose * offset
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -0.05])
+ res = planner.move_to_pose_with_screw(reach_pose)
+ if res == -1: return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(grasp_pose)
+ if res == -1: return res
+ planner.close_gripper(gripper_state=-0.6)
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose([0, 0, 0.30]) * grasp_pose
+ res = planner.move_to_pose_with_screw(lift_pose)
+ if res == -1: return res
+
+ # -------------------------------------------------------------------------- #
+ # Place upright
+ # -------------------------------------------------------------------------- #
+ theta = np.pi/10
+ rotation_quat = np.array([np.cos(theta), 0, np.sin(theta), 0])
+
+ final_pose = lift_pose * sapien.Pose(
+ p=[0, 0, 0],
+ q=rotation_quat
+ )
+ res = planner.move_to_pose_with_screw(final_pose)
+ if res == -1: return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose([0, 0, -0.10]) * final_pose
+ res = planner.move_to_pose_with_screw(lower_pose)
+ if res == -1: return res
+
+ planner.close()
+
+ planner.open_gripper()
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/mug_from_coffee_machine.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/mug_from_coffee_machine.py
new file mode 100644
index 0000000000000000000000000000000000000000..94807d46068cd700a22f80f988da9e4819db6695
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/mug_from_coffee_machine.py
@@ -0,0 +1,163 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat,quat2euler
+
+from mani_skill.envs.tasks import PickMugFromCoffeeMachineEnv
+
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PickMugFromCoffeeMachineEnv = gym.make(
+ "PickMugFromCoffeeMachine-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ sim_config=dict(scene_config=dict(enable_pcm=False)),
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PickMugFromCoffeeMachineEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ )
+
+ env = env.unwrapped
+
+ FINGER_LENGTH = 0.025
+ MUG_Z = env.mug_extents[2]
+ MUG_D = env.mug_extents[0]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = MUG_Z + RACK_Z + FINGER_LENGTH * 2.5
+ EPS = 1e-2
+
+ def f(self, x, out):
+ # breakpoint()
+ # Set the robot's joint configuration to x.
+ # breakpoint()
+ self.planner.robot.set_qpos(x)
+ # For perfect alignment, the dot product with [0, 0, 1] should be 1.
+ out[0] = self.get_eef_x().dot(np.array([0, 0, 1])) - 1
+
+
+ def j(self, x, out):
+
+ # breakpoint()
+ # Pad the joint configuration.
+ full_qpos = self.planner.pad_move_group_qpos(x)
+ # Compute the Jacobian for the last link in the move group.
+ jac = self.planner.robot.get_pinocchio_model().compute_single_link_jacobian(
+ full_qpos, len(self.planner.move_group_joint_indices) - 1
+ )
+ # Extract the rotational part of the Jacobian.
+ rot_jac = jac[3:, self.planner.move_group_joint_indices]
+ # Compute the derivative of the constraint for each joint.
+ for i in range(len(self.planner.move_group_joint_indices)):
+ out[i] = np.cross(rot_jac[:, i], self.get_eef_x()).dot(np.array([0, 0, 1]))
+
+ obb = get_actor_obb(env.mug)
+
+ tip_p, tip_q = env.get_mug_tip_pose()
+ tip_pose = Pose.create_from_pq(p=tip_p, q=tip_q)
+
+
+ approaching = np.array([1, 0, 0], dtype=np.float32)
+
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, tip_pose.sp.p)
+ grasp_pose = sapien.Pose(grasp_pose.p - [0, 0, MUG_Z*0.1], grasp_pose.q)
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p - [MUG_Z/2, 0, 0], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ grasp_pose = sapien.Pose(grasp_pose.p + [FINGER_LENGTH/2, 0, 0], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Back to Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p - [MUG_Z*1.2, 0, -MUG_Z/2], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Hover next to final goal goalsite
+ # -------------------------------------------------------------------------- #
+ p, q = env.final_site.pose.p, grasp_pose.q
+ p[:, 2] = float(grasp_pose.p[2]) * 1
+ pose = Pose.create_from_pq(p=p, q=q)
+ hover_pose = sapien.Pose(pose.sp.p + [FINGER_LENGTH/2, 0, 0] , grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(hover_pose.p - [0, 0, MUG_Z*0.8], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ planner.open_gripper()
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # move back a bit
+ # -------------------------------------------------------------------------- #
+ back_pose = sapien.Pose(hover_pose.p - [MUG_Z, 0, MUG_Z*0.8], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(back_pose)
+ if res == -1:
+ print("Failed to back pose")
+ return res
+
+ planner.close()
+ return res
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/mug_on_coffee_machine.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/mug_on_coffee_machine.py
new file mode 100644
index 0000000000000000000000000000000000000000..173c43aa0002a0f0243119dc98040111fbf031fa
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/mug_on_coffee_machine.py
@@ -0,0 +1,158 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat,quat2euler
+
+from mani_skill.envs.tasks import PlaceMugOnCoffeeMachineEnv
+
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceMugOnCoffeeMachineEnv = gym.make(
+ "PlaceMugOnCoffeeMachine-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ sim_config=dict(scene_config=dict(enable_pcm=False)),
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceMugOnCoffeeMachineEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ )
+
+ env = env.unwrapped
+
+
+ FINGER_LENGTH = 0.025
+ MUG_Z = env.mug_extents[2]
+ MUG_D = env.mug_extents[0]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = MUG_Z + RACK_Z + FINGER_LENGTH * 2.5
+ EPS = 1e-2
+
+ def f(self, x, out):
+ # breakpoint()
+ # Set the robot's joint configuration to x.
+ # breakpoint()
+ self.planner.robot.set_qpos(x)
+ # For perfect alignment, the dot product with [0, 0, 1] should be 1.
+ out[0] = self.get_eef_x().dot(np.array([0, 0, 1])) - 1
+
+
+ def j(self, x, out):
+
+ # breakpoint()
+ # Pad the joint configuration.
+ full_qpos = self.planner.pad_move_group_qpos(x)
+ # Compute the Jacobian for the last link in the move group.
+ jac = self.planner.robot.get_pinocchio_model().compute_single_link_jacobian(
+ full_qpos, len(self.planner.move_group_joint_indices) - 1
+ )
+ # Extract the rotational part of the Jacobian.
+ rot_jac = jac[3:, self.planner.move_group_joint_indices]
+ # Compute the derivative of the constraint for each joint.
+ for i in range(len(self.planner.move_group_joint_indices)):
+ out[i] = np.cross(rot_jac[:, i], self.get_eef_x()).dot(np.array([0, 0, 1]))
+
+ obb = get_actor_obb(env.mug)
+
+ tip_p, tip_q = env.get_mug_tip_pose()
+ tip_pose = Pose.create_from_pq(p=tip_p, q=tip_q)
+
+ approaching = np.array([1, 0, 0], dtype=np.float32)
+
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, tip_pose.sp.p)
+ grasp_pose = sapien.Pose(grasp_pose.p - [0, 0, MUG_Z*0.2], grasp_pose.q)
+
+
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p - [2*MUG_Z, 0, 0], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+ reach_pose = sapien.Pose(grasp_pose.p - [MUG_Z/2, 0, 0], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ print("Failed to grasppose")
+ return res
+ planner.close_gripper()
+
+
+ # -------------------------------------------------------------------------- #
+ # Hover next to goalsite (rack pose)
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ goal_pose = Pose.create_from_pq(p=p, q=q)
+ offset = torch.tensor([-MUG_D, 0, MUG_Z]).to(p.device)
+ hover_pose = Pose.create_from_pq(p=p + offset, q=grasp_pose.q)
+
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # move on goal & Release
+ # -------------------------------------------------------------------------- #
+ offset = torch.tensor([0, 0, MUG_Z*0.4]).to(p.device)
+ lower_pose = Pose.create_from_pq(p=p + offset, q=grasp_pose.q)
+
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ planner.open_gripper()
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+
+ planner.close()
+ return res
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/mug_on_rack.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/mug_on_rack.py
new file mode 100644
index 0000000000000000000000000000000000000000..53dbd221f039b690e1324f8499559df32585e036
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/mug_on_rack.py
@@ -0,0 +1,127 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat,quat2euler
+
+from mani_skill.envs.tasks import PlaceMugOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceMugOnRackEnv = gym.make(
+ "PlaceMugOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceMugOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ )
+
+ env = env.unwrapped
+
+ FINGER_LENGTH = 0.025
+ MUG_Z = env.mug_extents[2]
+ MUG_D = env.mug_extents[0]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = MUG_Z + RACK_Z + FINGER_LENGTH
+ EPS = 1e-2
+
+ obb = get_actor_obb(env.mug)
+ tip_p, tip_q = env.get_mug_tip_pose()
+ tip_pose = Pose.create_from_pq(p=tip_p, q=tip_q)
+
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, tip_pose.sp.p)
+ grasp_pose = grasp_pose * sapien.Pose([0, 0, MUG_Z*0.3])
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ # reach_pose = grasp_pose * sapien.Pose([0, 0, -ENV_Z_OFFSET])
+ # res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ # if res == -1:
+ # # print("Failed to reach pose")
+ # return res
+
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -MUG_Z/2])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+
+ # -------------------------------------------------------------------------- #
+ # Hover over goalsite (rack pose)
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=grasp_pose.q)
+ euler = [0, 0, 0]
+ offset = [0, 0, -ENV_Z_OFFSET/2]
+ hover_pose = sapien.Pose(pose.sp.p, grasp_pose.q) *\
+ sapien.Pose(offset, rotation_conversions.euler_to_quaternion(torch.tensor(euler)))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(hover_pose.p - [0, 0, MUG_Z/2], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ # print("Failed to lower pose")
+ return res
+ planner.open_gripper()
+
+ planner.close()
+ return res
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/peg_insertion_side.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/peg_insertion_side.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfe632130d0261c0a764f2d58a331dd44d20cfac
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/peg_insertion_side.py
@@ -0,0 +1,99 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+
+from mani_skill.envs.tasks import PegInsertionSideEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import \
+ PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import (
+ compute_grasp_info_by_obb, get_actor_obb)
+
+
+def main():
+ env: PegInsertionSideEnv = gym.make(
+ "PegInsertionSide-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="rgb_array",
+ reward_mode="dense",
+ )
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ print(res[-1])
+ env.close()
+
+
+def solve(env: PegInsertionSideEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+ env = env.unwrapped
+ FINGER_LENGTH = 0.025
+
+ obb = get_actor_obb(env.peg)
+ approaching = np.array([0, 0, -1])
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].numpy()
+
+ peg_init_pose = env.peg.pose
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb, approaching=approaching, target_closing=target_closing, depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ offset = sapien.Pose([-max(0.05, env.peg_half_sizes[0, 0] / 2 + 0.01), 0, 0])
+ grasp_pose = grasp_pose * (offset)
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * (sapien.Pose([0, 0, -0.05]))
+ res = planner.move_to_pose_with_screw(reach_pose)
+ if res == -1: return res
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(grasp_pose)
+ if res == -1: return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Align Peg
+ # -------------------------------------------------------------------------- #
+
+ # align the peg with the hole
+ insert_pose = env.goal_pose * peg_init_pose.inv() * grasp_pose
+ offset = sapien.Pose([-0.01 - env.peg_half_sizes[0, 0], 0, 0])
+ pre_insert_pose = insert_pose * (offset)
+ res = planner.move_to_pose_with_screw(pre_insert_pose)
+ if res == -1: return res
+ # refine the insertion pose
+ for i in range(3):
+ delta_pose = env.goal_pose * (offset) * env.peg.pose.inv()
+ pre_insert_pose = delta_pose * pre_insert_pose
+ res = planner.move_to_pose_with_screw(pre_insert_pose)
+ if res == -1: return res
+
+ # -------------------------------------------------------------------------- #
+ # Insert
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(insert_pose * (sapien.Pose([0.05, 0, 0])))
+ if res == -1: return res
+ planner.close()
+ return res
+
+
+if __name__ == "__main__":
+ main()
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/pick_cube.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/pick_cube.py
new file mode 100644
index 0000000000000000000000000000000000000000..aca7dd0c91cacab9275cedd82f12970a54f23c55
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/pick_cube.py
@@ -0,0 +1,59 @@
+import numpy as np
+import sapien
+
+from mani_skill.envs.tasks import PickCubeEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import \
+ PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import (
+ compute_grasp_info_by_obb, get_actor_obb)
+
+def solve(env: PickCubeEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ )
+
+ FINGER_LENGTH = 0.025
+ env = env.unwrapped
+
+ # retrieves the object oriented bounding box (trimesh box object)
+ obb = get_actor_obb(env.cube)
+
+ approaching = np.array([0, 0, -1])
+ # get transformation matrix of the tcp pose, is default batched and on torch
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy()
+ # we can build a simple grasp pose using this information for Panda
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH,
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.cube.pose.sp.p)
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -0.05])
+ planner.move_to_pose_with_screw(reach_pose)
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ planner.move_to_pose_with_screw(grasp_pose)
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Move to goal pose
+ # -------------------------------------------------------------------------- #
+ goal_pose = sapien.Pose(env.goal_site.pose.sp.p, grasp_pose.q)
+ res = planner.move_to_pose_with_screw(goal_pose)
+
+ planner.close()
+ return res
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b8f2fc3453c68c9a9f62fcd5ae177b3be3a8648
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack.py
@@ -0,0 +1,183 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat, quat2euler
+
+from mani_skill.envs.tasks import PlacePlateOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlacePlateOnRackEnv = gym.make(
+ "PlacePlateOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlacePlateOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1],
+ )
+
+ env = env.unwrapped
+
+ # to make the objects settle
+ for _ in range(10):
+ kf = env.agent.keyframes["vertical_grasp"]
+ env.step(env.agent.controller.from_qpos(kf.qpos))
+ env.render()
+
+
+ FINGER_LENGTH = 0.025
+ PLATE_D = env.plate_extents[0]
+ PLATE_Z = env.plate_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = PLATE_D + RACK_Z + FINGER_LENGTH
+
+
+ obb = get_actor_obb(env.plate)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = 0
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ grasp_pose = sapien.Pose(grasp_pose.p + [0, -PLATE_D/3, 0], grasp_q)
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0,0,PLATE_Z], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose(grasp_pose.p + [0, 0, 1.2*PLATE_D], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ if res == -1:
+ return res
+
+
+ # # -------------------------------------------------------------------------- #
+ # # Hover on top of the goal
+ # # -------------------------------------------------------------------------- #
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+
+ goal_euler = rotation_conversions.quaternion_to_euler(torch.tensor(pose.q).reshape(1, -1))
+ d0 = (np.pi - torch.abs(goal_euler[:, 0])) * torch.sign(goal_euler[:, 0])
+
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = -np.pi/2
+ euler[:, 1] = -np.pi/2
+ euler[:, 2] = -np.pi/2
+ hover_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+
+ hover_pose = sapien.Pose(pose.sp.p, hover_q)*\
+ sapien.Pose([PLATE_D/3, -1.7*PLATE_D, -2*PLATE_Z],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(pose.sp.p, hover_q) *\
+ sapien.Pose([PLATE_D/3, -PLATE_D/2, -2*PLATE_Z],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ lower_pose = sapien.Pose(pose.sp.p, hover_q) *\
+ sapien.Pose( [PLATE_D/3, 0, -1.5*PLATE_Z],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ planner.open_gripper()
+
+
+ # -------------------------------------------------------------------------- #
+ # stay there for a while
+ # -------------------------------------------------------------------------- #
+ for _ in range(10):
+ qpos = env.agent.robot.get_qpos()[0, :-2].cpu().numpy()
+ gripper_state = -1 # open
+ action = np.hstack([qpos, gripper_state])
+ env.step(action)
+ env.render()
+
+
+ # -------------------------------------------------------------------------- #
+ # Slightly push forward
+ # -------------------------------------------------------------------------- #
+ forward_pose = sapien.Pose(pose.sp.p, hover_q) *\
+ sapien.Pose([PLATE_D/3, 0, -PLATE_Z*1.3],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_screw(forward_pose)
+ if res == -1:
+ print("Failed to forward pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # stay there for a while
+ # -------------------------------------------------------------------------- #
+ for _ in range(10):
+ qpos = env.agent.robot.get_qpos()[0, :-2].cpu().numpy()
+ gripper_state = -1 # open
+ action = np.hstack([qpos, gripper_state])
+ env.step(action)
+ env.render()
+
+ planner.close()
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack2.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack2.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2edc717617f5ef7b1e8e0645a4e103edd2c9dc5
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack2.py
@@ -0,0 +1,177 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat, quat2euler
+
+from mani_skill.envs.tasks import PlacePlateOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlacePlateOnRackEnv = gym.make(
+ "PlacePlateOnRack-v2",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlacePlateOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1],
+ )
+
+ env = env.unwrapped
+
+ # to make the objects settle
+ for _ in range(10):
+ kf = env.agent.keyframes["vertical_grasp"]
+ env.step(env.agent.controller.from_qpos(kf.qpos))
+ env.render()
+
+
+ FINGER_LENGTH = 0.025
+ PLATE_D = env.plate_extents[0]
+ PLATE_Z = env.plate_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = PLATE_D + RACK_Z + FINGER_LENGTH
+
+
+ obb = get_actor_obb(env.plate)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ grasp_pose = sapien.Pose(grasp_pose.p + [0, -PLATE_D/3, PLATE_Z/5], grasp_q)
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p, grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose(grasp_pose.p + [0, 0, PLATE_D], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ if res == -1:
+ return res
+
+
+ # # -------------------------------------------------------------------------- #
+ # # Hover on top of the goal
+ # # -------------------------------------------------------------------------- #
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+
+ goal_euler = rotation_conversions.quaternion_to_euler(torch.tensor(pose.q).reshape(1, -1))
+ d0 = (np.pi - torch.abs(goal_euler[:, 0])) * torch.sign(goal_euler[:, 0])
+
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = -np.pi/2 + np.pi/10 - d0
+ euler[:, 1] = -np.pi/10
+ euler[:, 2] = -np.pi/2
+ hover_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+
+ hover_pose = sapien.Pose(pose.sp.p + [0, 0, 1.6*PLATE_D], hover_q)
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(pose.sp.p, hover_q) *\
+ sapien.Pose([PLATE_D/3, -PLATE_D/4, -PLATE_Z],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ lower_pose = sapien.Pose(pose.sp.p, hover_q) *\
+ sapien.Pose( [PLATE_D/3, 0, -PLATE_Z],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ planner.open_gripper()
+
+
+ # -------------------------------------------------------------------------- #
+ # stay there for a while
+ # -------------------------------------------------------------------------- #
+ for _ in range(10):
+ qpos = env.agent.robot.get_qpos()[0, :-2].cpu().numpy()
+ gripper_state = -1 # open
+ action = np.hstack([qpos, gripper_state])
+ env.step(action)
+ env.render()
+
+
+ # -------------------------------------------------------------------------- #
+ # Slightly push forward
+ # -------------------------------------------------------------------------- #
+ forward_pose = sapien.Pose(pose.sp.p, hover_q) *\
+ sapien.Pose([PLATE_D/3, 0, -PLATE_Z/5],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_screw(lower_pose)
+ if res == -1:
+ print("Failed to forward pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # stay there for a while
+ # -------------------------------------------------------------------------- #
+ for _ in range(10):
+ qpos = env.agent.robot.get_qpos()[0, :-2].cpu().numpy()
+ gripper_state = -1 # open
+ action = np.hstack([qpos, gripper_state])
+ env.step(action)
+ env.render()
+
+ planner.close()
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack_from_side.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack_from_side.py
new file mode 100644
index 0000000000000000000000000000000000000000..44c09f44919e4ab78a27166350d3031f707db146
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack_from_side.py
@@ -0,0 +1,183 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat, quat2euler
+
+from mani_skill.envs.tasks import PlacePlateOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlacePlateOnRackEnv = gym.make(
+ "PlacePlateOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlacePlateOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1],
+ )
+
+ env = env.unwrapped
+
+ # to make the objects settle
+ for _ in range(10):
+ kf = env.agent.keyframes["vertical_grasp"]
+ env.step(env.agent.controller.from_qpos(kf.qpos))
+ env.render()
+
+
+ FINGER_LENGTH = 0.025
+ PLATE_D = env.plate_extents[0]
+ PLATE_Z = env.plate_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = PLATE_D + RACK_Z + FINGER_LENGTH
+
+
+ obb = get_actor_obb(env.plate)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ grasp_pose = sapien.Pose(grasp_pose.p + [0, -PLATE_D/3, 0], grasp_q)
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0,0,PLATE_Z], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose(grasp_pose.p + [0, 0, PLATE_D], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ if res == -1:
+ return res
+
+
+ # # -------------------------------------------------------------------------- #
+ # # Hover on top of the goal
+ # # -------------------------------------------------------------------------- #
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+
+ goal_euler = rotation_conversions.quaternion_to_euler(torch.tensor(pose.q).reshape(1, -1))
+ d0 = (np.pi - torch.abs(goal_euler[:, 0])) * torch.sign(goal_euler[:, 0])
+
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = -np.pi/2 + np.pi/10 - d0
+ euler[:, 1] = -np.pi/10
+ euler[:, 2] = -np.pi/2
+ hover_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+
+ hover_pose = sapien.Pose(pose.sp.p, hover_q)*\
+ sapien.Pose([PLATE_D/3, -1.7*PLATE_D, -2*PLATE_Z],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(pose.sp.p, hover_q) *\
+ sapien.Pose([PLATE_D/3, -PLATE_D/2, -2*PLATE_Z],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ lower_pose = sapien.Pose(pose.sp.p, hover_q) *\
+ sapien.Pose( [PLATE_D/3, 0, -1.5*PLATE_Z],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ planner.open_gripper()
+
+
+ # -------------------------------------------------------------------------- #
+ # stay there for a while
+ # -------------------------------------------------------------------------- #
+ for _ in range(10):
+ qpos = env.agent.robot.get_qpos()[0, :-2].cpu().numpy()
+ gripper_state = -1 # open
+ action = np.hstack([qpos, gripper_state])
+ env.step(action)
+ env.render()
+
+
+ # -------------------------------------------------------------------------- #
+ # Slightly push forward
+ # -------------------------------------------------------------------------- #
+ forward_pose = sapien.Pose(pose.sp.p, hover_q) *\
+ sapien.Pose([PLATE_D/3, 0, -PLATE_Z*1.3],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_screw(forward_pose)
+ if res == -1:
+ print("Failed to forward pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # stay there for a while
+ # -------------------------------------------------------------------------- #
+ for _ in range(10):
+ qpos = env.agent.robot.get_qpos()[0, :-2].cpu().numpy()
+ gripper_state = -1 # open
+ action = np.hstack([qpos, gripper_state])
+ env.step(action)
+ env.render()
+
+ planner.close()
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack_from_top.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack_from_top.py
new file mode 100644
index 0000000000000000000000000000000000000000..8f72998feee8eb141bc0af61d11b7487b6c2a975
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack_from_top.py
@@ -0,0 +1,175 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat, quat2euler
+
+from mani_skill.envs.tasks import PlacePlateOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlacePlateOnRackEnv = gym.make(
+ "PlacePlateOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlacePlateOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1],
+ )
+
+ env = env.unwrapped
+
+ # to make the objects settle
+ for _ in range(10):
+ kf = env.agent.keyframes["vertical_grasp"]
+ env.step(env.agent.controller.from_qpos(kf.qpos))
+ env.render()
+
+
+ FINGER_LENGTH = 0.025
+ PLATE_D = env.plate_extents[0]
+ PLATE_Z = env.plate_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = PLATE_D + RACK_Z + FINGER_LENGTH
+
+
+ obb = get_actor_obb(env.plate)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = 0
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ grasp_pose = sapien.Pose(grasp_pose.p + [+PLATE_D/3, 0, 0], grasp_q)
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0, 0, PLATE_Z], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose, t=1000)
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose(grasp_pose.p + [0, 0, 1.2*PLATE_D], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ if res == -1:
+ return res
+
+
+ # # -------------------------------------------------------------------------- #
+ # # Hover on top of the goal
+ # # -------------------------------------------------------------------------- #
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+
+ goal_euler = rotation_conversions.quaternion_to_euler(torch.tensor(pose.q).reshape(1, -1))
+ d0 = (np.pi - torch.abs(goal_euler[:, 0])) * torch.sign(goal_euler[:, 0])
+
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = -np.pi/2
+ euler[:, 1] = -np.pi/2
+ euler[:, 2] = -np.pi/2
+ hover_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+
+ hover_pose = sapien.Pose(pose.sp.p + [0, 0, 1.5*PLATE_D], hover_q)
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(pose.sp.p + [-2.4*FINGER_LENGTH, 0, 0.8 *PLATE_D], hover_q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ lower_pose = sapien.Pose(pose.sp.p + [-2.0*FINGER_LENGTH, 0, 0.35 * PLATE_D], hover_q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ planner.open_gripper()
+
+
+ # -------------------------------------------------------------------------- #
+ # stay there for a while
+ # -------------------------------------------------------------------------- #
+ for _ in range(10):
+ qpos = env.agent.robot.get_qpos()[0, :-2].cpu().numpy()
+ gripper_state = -1 # open
+ action = np.hstack([qpos, gripper_state])
+ env.step(action)
+ env.render()
+
+
+ # -------------------------------------------------------------------------- #
+ # Slightly push forward
+ # -------------------------------------------------------------------------- #
+ forward_pose = sapien.Pose(pose.sp.p + [-1*FINGER_LENGTH, 0, 0.35 * PLATE_D], hover_q)
+ res = planner.move_to_pose_with_RRTConnect(forward_pose)
+ if res == -1:
+ print("Failed to forward pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # stay there for a while
+ # -------------------------------------------------------------------------- #
+ for _ in range(10):
+ qpos = env.agent.robot.get_qpos()[0, :-2].cpu().numpy()
+ gripper_state = -1 # open
+ action = np.hstack([qpos, gripper_state])
+ env.step(action)
+ env.render()
+
+ planner.close()
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack_v3.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack_v3.py
new file mode 100644
index 0000000000000000000000000000000000000000..14f4b958fd4da5416bbab85db8834b038d3a8ce2
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack_v3.py
@@ -0,0 +1,183 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat, quat2euler
+
+from mani_skill.envs.tasks import PlacePlateOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlacePlateOnRackEnv = gym.make(
+ "PlacePlateOnRack-v3",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlacePlateOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1],
+ )
+
+ env = env.unwrapped
+
+ # to make the objects settle
+ for _ in range(10):
+ kf = env.agent.keyframes["vertical_grasp"]
+ env.step(env.agent.controller.from_qpos(kf.qpos))
+ env.render()
+
+
+ FINGER_LENGTH = 0.025
+ PLATE_D = env.plate_extents[0]
+ PLATE_Z = env.plate_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = PLATE_D + RACK_Z + FINGER_LENGTH
+
+
+ obb = get_actor_obb(env.plate)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ grasp_pose = sapien.Pose(grasp_pose.p + [0, -PLATE_D/3, 0], grasp_q)
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0,0,PLATE_Z], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose(grasp_pose.p + [0, 0, PLATE_D], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ if res == -1:
+ return res
+
+
+ # # -------------------------------------------------------------------------- #
+ # # Hover on top of the goal
+ # # -------------------------------------------------------------------------- #
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+
+ goal_euler = rotation_conversions.quaternion_to_euler(torch.tensor(pose.q).reshape(1, -1))
+ d0 = (np.pi - torch.abs(goal_euler[:, 0])) * torch.sign(goal_euler[:, 0])
+
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = -np.pi/2 + np.pi/10 - d0
+ euler[:, 1] = -np.pi/10
+ euler[:, 2] = -np.pi/2
+ hover_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+
+ hover_pose = sapien.Pose(pose.sp.p, hover_q)*\
+ sapien.Pose([PLATE_D/3, -1.7*PLATE_D, -2*PLATE_Z],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(pose.sp.p, hover_q) *\
+ sapien.Pose([PLATE_D/3, -PLATE_D/2, -2*PLATE_Z],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ lower_pose = sapien.Pose(pose.sp.p, hover_q) *\
+ sapien.Pose( [PLATE_D/3, 0, -1.5*PLATE_Z],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ planner.open_gripper()
+
+
+ # -------------------------------------------------------------------------- #
+ # stay there for a while
+ # -------------------------------------------------------------------------- #
+ for _ in range(10):
+ qpos = env.agent.robot.get_qpos()[0, :-2].cpu().numpy()
+ gripper_state = -1 # open
+ action = np.hstack([qpos, gripper_state])
+ env.step(action)
+ env.render()
+
+
+ # -------------------------------------------------------------------------- #
+ # Slightly push forward
+ # -------------------------------------------------------------------------- #
+ forward_pose = sapien.Pose(pose.sp.p, hover_q) *\
+ sapien.Pose([PLATE_D/3, 0, -PLATE_Z*1.3],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_screw(forward_pose)
+ if res == -1:
+ print("Failed to forward pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # stay there for a while
+ # -------------------------------------------------------------------------- #
+ for _ in range(10):
+ qpos = env.agent.robot.get_qpos()[0, :-2].cpu().numpy()
+ gripper_state = -1 # open
+ action = np.hstack([qpos, gripper_state])
+ env.step(action)
+ env.render()
+
+ planner.close()
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack_v4.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack_v4.py
new file mode 100644
index 0000000000000000000000000000000000000000..e20617a600deb4b179e010281bcfbb87a70885fd
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plate_on_rack_v4.py
@@ -0,0 +1,183 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat, quat2euler
+
+from mani_skill.envs.tasks import PlacePlateOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.noahbiarm.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlacePlateOnRackEnv = gym.make(
+ "PlacePlateOnRack-v4",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlacePlateOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ version=env.robot_uids.split("_")[-1],
+ )
+
+ env = env.unwrapped
+
+ # to make the objects settle
+ for _ in range(10):
+ kf = env.agent.keyframes["vertical_grasp"]
+ env.step(env.agent.controller.from_qpos(kf.qpos))
+ env.render()
+
+
+ FINGER_LENGTH = 0.025
+ PLATE_D = env.plate_extents[0]
+ PLATE_Z = env.plate_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = PLATE_D + RACK_Z + FINGER_LENGTH
+
+
+ obb = get_actor_obb(env.plate)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = -np.pi/2
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ grasp_pose = sapien.Pose(grasp_pose.p + [0, -PLATE_D/3, 0], grasp_q)
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0,0,PLATE_Z], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose(grasp_pose.p + [0, 0, PLATE_D], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ if res == -1:
+ return res
+
+
+ # # -------------------------------------------------------------------------- #
+ # # Hover on top of the goal
+ # # -------------------------------------------------------------------------- #
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+
+ goal_euler = rotation_conversions.quaternion_to_euler(torch.tensor(pose.q).reshape(1, -1))
+ d0 = (np.pi - torch.abs(goal_euler[:, 0])) * torch.sign(goal_euler[:, 0])
+
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = -np.pi/2 + np.pi/10 - d0
+ euler[:, 1] = -np.pi/10
+ euler[:, 2] = -np.pi/2
+ hover_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+
+ hover_pose = sapien.Pose(pose.sp.p, hover_q)*\
+ sapien.Pose([PLATE_D/3, -1.7*PLATE_D, -2*PLATE_Z],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(pose.sp.p, hover_q) *\
+ sapien.Pose([PLATE_D/3, -PLATE_D/2, -2*PLATE_Z],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ lower_pose = sapien.Pose(pose.sp.p, hover_q) *\
+ sapien.Pose( [PLATE_D/3, 0, -1.5*PLATE_Z],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ planner.open_gripper()
+
+
+ # -------------------------------------------------------------------------- #
+ # stay there for a while
+ # -------------------------------------------------------------------------- #
+ for _ in range(10):
+ qpos = env.agent.robot.get_qpos()[0, :-2].cpu().numpy()
+ gripper_state = -1 # open
+ action = np.hstack([qpos, gripper_state])
+ env.step(action)
+ env.render()
+
+
+ # -------------------------------------------------------------------------- #
+ # Slightly push forward
+ # -------------------------------------------------------------------------- #
+ forward_pose = sapien.Pose(pose.sp.p, hover_q) *\
+ sapien.Pose([PLATE_D/3, 0, -PLATE_Z*1.3],
+ rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+ res = planner.move_to_pose_with_screw(forward_pose)
+ if res == -1:
+ print("Failed to forward pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # stay there for a while
+ # -------------------------------------------------------------------------- #
+ for _ in range(10):
+ qpos = env.agent.robot.get_qpos()[0, :-2].cpu().numpy()
+ gripper_state = -1 # open
+ action = np.hstack([qpos, gripper_state])
+ env.step(action)
+ env.render()
+
+ planner.close()
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plug_charger.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plug_charger.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e1806ee72ee1a531816e78082f390a6c0eeef73
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/plug_charger.py
@@ -0,0 +1,105 @@
+import gymnasium as gym
+import numpy as np
+import sapien.core as sapien
+import trimesh
+from tqdm import tqdm
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlugChargerEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import \
+ PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import (
+ compute_grasp_info_by_obb, get_actor_obb)
+
+
+def main():
+ env: PlugChargerEnv = gym.make(
+ "PlugCharger-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="rgb_array",
+ reward_mode="sparse",
+ )
+ for seed in tqdm(range(100)):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ print(res[-1])
+ env.close()
+
+
+def solve(env: PlugChargerEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=False,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ )
+
+ FINGER_LENGTH = 0.025
+ env = env.unwrapped
+ charger_base_pose = env.charger_base_pose
+ charger_base_size = np.array(env.unwrapped._base_size) * 2
+
+ obb = trimesh.primitives.Box(
+ extents=charger_base_size,
+ transform=charger_base_pose.sp.to_transformation_matrix(),
+ )
+
+ approaching = np.array([0, 0, -1])
+ target_closing = env.agent.tcp.pose.sp.to_transformation_matrix()[:3, 1]
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH,
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+
+ # add a angle to grasp
+ grasp_angle = np.deg2rad(15)
+ grasp_pose = grasp_pose * sapien.Pose(q=euler2quat(0, grasp_angle, 0))
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -0.05])
+ planner.move_to_pose_with_screw(reach_pose)
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ planner.move_to_pose_with_screw(grasp_pose)
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Align
+ # -------------------------------------------------------------------------- #
+ pre_insert_pose = (
+ env.goal_pose.sp
+ * sapien.Pose([-0.05, 0.0, 0.0])
+ * env.charger.pose.sp.inv()
+ * env.agent.tcp.pose.sp
+ )
+ insert_pose = env.goal_pose.sp * env.charger.pose.sp.inv() * env.agent.tcp.pose.sp
+ planner.move_to_pose_with_screw(pre_insert_pose, refine_steps=0)
+ planner.move_to_pose_with_screw(pre_insert_pose, refine_steps=5)
+ # -------------------------------------------------------------------------- #
+ # Insert
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(insert_pose)
+
+ planner.close()
+ return res
+
+
+if __name__ == "__main__":
+ main()
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/pull_cube.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/pull_cube.py
new file mode 100644
index 0000000000000000000000000000000000000000..e640f044075afe5a24422acf146adefa7a1a8ab1
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/pull_cube.py
@@ -0,0 +1,32 @@
+import numpy as np
+import sapien
+
+from mani_skill.envs.tasks import PushCubeEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import \
+ PandaArmMotionPlanningSolver
+
+def solve(env: PushCubeEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ )
+
+ FINGER_LENGTH = 0.025
+ env = env.unwrapped
+ planner.close_gripper()
+ reach_pose = sapien.Pose(p=env.obj.pose.sp.p + np.array([0.05, 0, 0]), q=env.agent.tcp.pose.sp.q)
+ planner.move_to_pose_with_screw(reach_pose)
+
+ # -------------------------------------------------------------------------- #
+ # Move to goal pose
+ # -------------------------------------------------------------------------- #
+ goal_pose = sapien.Pose(p=env.goal_region.pose.sp.p + np.array([0.05, 0, 0]),q=env.agent.tcp.pose.sp.q)
+ res = planner.move_to_pose_with_screw(goal_pose)
+
+ planner.close()
+ return res
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/pull_cube_tool.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/pull_cube_tool.py
new file mode 100644
index 0000000000000000000000000000000000000000..8cc26e3d555b459c30e573b6bbcfc2fef27314f4
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/pull_cube_tool.py
@@ -0,0 +1,97 @@
+import numpy as np
+import sapien
+
+from mani_skill.envs.tasks import PullCubeToolEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+
+def solve(env: PullCubeToolEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+
+ # Get tool OBB and compute grasp pose
+ tool_obb = get_actor_obb(env.l_shape_tool)
+ approaching = np.array([0, 0, -1])
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy()
+
+ grasp_info = compute_grasp_info_by_obb(
+ tool_obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=0.03,
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.l_shape_tool.pose.sp.p)
+ offset = sapien.Pose([0.02, 0, 0])
+ grasp_pose = grasp_pose * (offset)
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -0.05])
+ res = planner.move_to_pose_with_screw(reach_pose)
+ if res == -1: return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(grasp_pose)
+ if res == -1: return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift tool to safe height
+ # -------------------------------------------------------------------------- #
+ lift_height = 0.35
+ lift_pose = sapien.Pose(grasp_pose.p + np.array([0, 0, lift_height]))
+ lift_pose.set_q(grasp_pose.q) # Maintain grasp orientation
+ res = planner.move_to_pose_with_screw(lift_pose)
+ if res == -1: return res
+
+ cube_pos = env.cube.pose.sp.p
+ approach_offset = sapien.Pose(
+ [-(env.hook_length + env.cube_half_size + 0.08),
+ -0.0,
+ lift_height - 0.05]
+ )
+ approach_pose = sapien.Pose(cube_pos) * approach_offset
+ approach_pose.set_q(grasp_pose.q)
+
+ res = planner.move_to_pose_with_screw(approach_pose)
+ if res == -1: return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower tool behind cube
+ # -------------------------------------------------------------------------- #
+ behind_offset = sapien.Pose(
+ [-(env.hook_length + env.cube_half_size),
+ -0.067,
+ 0]
+ )
+ hook_pose = sapien.Pose(cube_pos) * behind_offset
+ hook_pose.set_q(grasp_pose.q)
+
+ res = planner.move_to_pose_with_screw(hook_pose)
+ if res == -1: return res
+
+ # -------------------------------------------------------------------------- #
+ # Pull cube
+ # -------------------------------------------------------------------------- #
+ pull_offset = sapien.Pose([-0.35, 0, 0])
+ target_pose = hook_pose * pull_offset
+ res = planner.move_to_pose_with_screw(target_pose)
+ if res == -1: return res
+
+ planner.close()
+ return res
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/push_cube.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/push_cube.py
new file mode 100644
index 0000000000000000000000000000000000000000..4064015dd2c898ba2398a3f52283bfbd519ba903
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/push_cube.py
@@ -0,0 +1,32 @@
+import numpy as np
+import sapien
+
+from mani_skill.envs.tasks import PushCubeEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import \
+ PandaArmMotionPlanningSolver
+
+def solve(env: PushCubeEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ )
+
+ FINGER_LENGTH = 0.025
+ env = env.unwrapped
+ planner.close_gripper()
+ reach_pose = sapien.Pose(p=env.obj.pose.sp.p + np.array([-0.05, 0, 0]), q=env.agent.tcp.pose.sp.q)
+ planner.move_to_pose_with_screw(reach_pose)
+
+ # -------------------------------------------------------------------------- #
+ # Move to goal pose
+ # -------------------------------------------------------------------------- #
+ goal_pose = sapien.Pose(p=env.goal_region.pose.sp.p + np.array([-0.12, 0, 0]),q=env.agent.tcp.pose.sp.q)
+ res = planner.move_to_pose_with_screw(goal_pose)
+
+ planner.close()
+ return res
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/spoon_on_rack.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/spoon_on_rack.py
new file mode 100644
index 0000000000000000000000000000000000000000..31631974220ffa41bfdf8375495443e61c26d793
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/spoon_on_rack.py
@@ -0,0 +1,120 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceSpoonOnRackEnv
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import NoahBiArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceSpoonOnRackEnv = gym.make(
+ "PlaceSpoonOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceSpoonOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ )
+
+ env = env.unwrapped
+
+ FINGER_LENGTH = 0.025
+ obb = get_actor_obb(env.fork)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.fork.pose.sp.p)
+ grasp_pose = grasp_pose * sapien.Pose([0, 0, -FINGER_LENGTH*0.6])
+ FORK_Z = env.fork_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = FORK_Z + RACK_Z + FINGER_LENGTH
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -FORK_Z-FINGER_LENGTH])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, 0, FORK_Z/2], q=grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Hover over goalsite (rack pose)
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_pose.q) *\
+ sapien.Pose([0, 0, -2*ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, np.pi/2])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(hover_pose.p - [0, 0, 2*ENV_Z_OFFSET -RACK_Z*0.8], hover_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ planner.open_gripper()
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+
+ planner.close()
+ return res
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/stack_bowl.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/stack_bowl.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ffccb9f4cf72b4cea8eb377a906e3c23629a002
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/stack_bowl.py
@@ -0,0 +1,152 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat, quat2euler
+
+from mani_skill.envs.tasks import StackBowlEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+
+def main():
+ env: StackBowlEnv = gym.make(
+ "StackBowl-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ #print(res[-1])
+ env.close()
+
+def solve(env: StackBowlEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ "pd_ee_delta_pose",
+ ], env.unwrapped.control_mode
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+ FINGER_LENGTH = 0.025
+ init_tcp_pose = env.agent.tcp.pose.sp
+
+ #rotate the ee for 90 along z axis for panda_wrist_cam
+ if env.robot_uids == "panda_wristcam":
+ res = planner.move_to_pose_with_RRTConnect(init_tcp_pose * sapien.Pose([0, 0, 0], euler2quat(0, 0, np.pi/2)))
+
+ obb = get_actor_obb(env.bowl)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ grasp_offset = obb.extents[0] * 0.5
+
+ grasp_pose = sapien.Pose(grasp_pose.p + [0, grasp_offset, 0], grasp_pose.q)
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0, 0, 0.3], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+
+ angles = quat2euler(reach_pose.q)
+
+ # Rotate gripper to make it parallel to y axis
+ res = planner.move_to_pose_with_RRTConnect(sapien.Pose(
+ reach_pose.p,
+ euler2quat(angles[0], angles[1], 0)
+ ))
+
+ env.render()
+
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to grasp pose")
+ return res
+ planner.close_gripper(gripper_state=-1)
+ env.render()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose([0, 0, 0.20]) * grasp_pose
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to lift pose")
+ return res
+
+
+ # -------------------------------------------------------------------------- #
+ # Place on bowl2
+ # -------------------------------------------------------------------------- #
+ bowl2_pose = env.bowl2.pose.sp
+
+ place_pose = sapien.Pose(bowl2_pose.p+[0.,grasp_offset,0.2+obb.extents[2]],lift_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(place_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to place on rack")
+ return res
+
+ angles = quat2euler(place_pose.q)
+
+ # Rotate gripper to make it parallel to y axis
+ res = planner.move_to_pose_with_RRTConnect(sapien.Pose(
+ place_pose.p,
+ euler2quat(angles[0], angles[1], 0)
+ ))
+
+ env.render()
+
+ # -------------------------------------------------------------------------- #
+ # Lower
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(place_pose.p+[0,0,-0.2],place_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ env.render()
+ planner.open_gripper()
+ if res == -1:
+ return res
+
+ planner.close()
+
+ env.render()
+
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/stack_cube.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/stack_cube.py
new file mode 100644
index 0000000000000000000000000000000000000000..a45b207bf36f08621457773ef8b3c9eefce38a24
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/stack_cube.py
@@ -0,0 +1,86 @@
+import argparse
+import gymnasium as gym
+import numpy as np
+import sapien
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import StackCubeEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import \
+ PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import (
+ compute_grasp_info_by_obb, get_actor_obb)
+from mani_skill.utils.wrappers.record import RecordEpisode
+
+def solve(env: StackCubeEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ )
+ FINGER_LENGTH = 0.025
+ env = env.unwrapped
+ obb = get_actor_obb(env.cubeA)
+
+ approaching = np.array([0, 0, -1])
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].numpy()
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH,
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+
+ # Search a valid pose
+ angles = np.arange(0, np.pi * 2 / 3, np.pi / 2)
+ angles = np.repeat(angles, 2)
+ angles[1::2] *= -1
+ for angle in angles:
+ delta_pose = sapien.Pose(q=euler2quat(0, 0, angle))
+ grasp_pose2 = grasp_pose * delta_pose
+ res = planner.move_to_pose_with_screw(grasp_pose2, dry_run=True)
+ if res == -1:
+ continue
+ grasp_pose = grasp_pose2
+ break
+ else:
+ print("Fail to find a valid grasp pose")
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -0.05])
+ planner.move_to_pose_with_screw(reach_pose)
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ planner.move_to_pose_with_screw(grasp_pose)
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose([0, 0, 0.1]) * grasp_pose
+ planner.move_to_pose_with_screw(lift_pose)
+
+ # -------------------------------------------------------------------------- #
+ # Stack
+ # -------------------------------------------------------------------------- #
+ goal_pose = env.cubeB.pose * sapien.Pose([0, 0, env.cube_half_size[2] * 2])
+ offset = (goal_pose.p - env.cubeA.pose.p).numpy()[0] # remember that all data in ManiSkill is batched and a torch tensor
+ align_pose = sapien.Pose(lift_pose.p + offset, lift_pose.q)
+ planner.move_to_pose_with_screw(align_pose)
+
+ res = planner.open_gripper()
+ planner.close()
+ return res
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/stack_mug_on_rack.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/stack_mug_on_rack.py
new file mode 100644
index 0000000000000000000000000000000000000000..a10acf33f9192f300065cfcdf12159b2bcabdfc1
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/stack_mug_on_rack.py
@@ -0,0 +1,199 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import StackMugOnRackEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+
+def main():
+ env: StackMugOnRackEnv = gym.make(
+ "StackMugOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ # print(res[-1])
+ env.close()
+
+def solve(env: StackMugOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+ FINGER_LENGTH = 0.025
+
+ obb = get_actor_obb(env.mug)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ grasp_pose = sapien.Pose(grasp_pose.p + [0.0, 0, 0.06], grasp_pose.q)
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0, 0, 0.2], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ env.render()
+ if res == -1:
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ env.render()
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper(gripper_state=-0.6)
+ env.render()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose([0, 0, 0.20]) * grasp_pose
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+
+ # -------------------------------------------------------------------------- #
+ # Place on rack
+ # -------------------------------------------------------------------------- #
+ rack_pose = env.rack.pose.sp
+
+ place_pose = sapien.Pose(rack_pose.p+[-0.05,0.1,0.3],lift_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(place_pose)
+ env.render()
+ if res == -1:
+ # print("Failed to place on rack")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(place_pose.p+[0,0,-0.10],place_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ env.render()
+ #time.sleep(1)
+ planner.open_gripper()
+ if res == -1:
+ # print("Failed to lower pose")
+ return res
+
+ #-------------------------------------------------------------------------- #
+ # raise
+ # #-------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(sapien.Pose(lower_pose.p+[0,0,0.2],lower_pose.q))
+ env.render()
+ obb = get_actor_obb(env.mug2)
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ grasp_pose = sapien.Pose(grasp_pose.p + [0, 0, 0], grasp_pose.q)
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0, 0, 0.2], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ env.render()
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ env.render()
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper(gripper_state=-0.6)
+ env.render()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose([0, 0, 0.30]) * grasp_pose
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+
+ # -------------------------------------------------------------------------- #
+ # Place on rack
+ # -------------------------------------------------------------------------- #
+ rack_pose = env.rack.pose.sp
+ place_pose = sapien.Pose(rack_pose.p+[-0.05,0.1,0.3],lift_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(place_pose)
+ env.render()
+ #time.sleep(0.1)
+ if res == -1:
+ # print("Failed to place on rack")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(place_pose.p+[0,0,-0.02],place_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ planner.open_gripper()
+ env.render()
+
+ # #-------------------------------------------------------------------------- #
+ # # raise
+ # #-------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(sapien.Pose(lower_pose.p+[0,0,0.2],lower_pose.q))
+ env.render()
+
+
+ planner.close()
+
+ env.render()
+ #time.sleep(0.1)
+
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/stack_plate_on_rack.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/stack_plate_on_rack.py
new file mode 100644
index 0000000000000000000000000000000000000000..6148550a9dd647006b5becd54e0e611975437d33
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/solutions/stack_plate_on_rack.py
@@ -0,0 +1,231 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat, quat2euler
+
+from mani_skill.envs.tasks import StackPlateOnRackEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+
+def main():
+ env: StackPlateOnRackEnv = gym.make(
+ "StackPlateOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ #print(res[-1])
+ env.close()
+
+def solve(env: StackPlateOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+ FINGER_LENGTH = 0.025
+
+ init_arm_pose= env.agent.tcp.pose.sp
+ #print(init_arm_pose)
+ #time.sleep(2)
+
+ obb = get_actor_obb(env.plate)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ ##print(center)
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ #offset = sapien.Pose([0, 0, 0.35])
+ ##print(grasp_pose)
+ grasp_pose = sapien.Pose(grasp_pose.p + [0, -0.09, -0.0199], grasp_pose.q)
+
+ ##print(grasp_pose)
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0, 0, 0.1], grasp_pose.q)
+ #grasp_pose * sapien.Pose([0, 0, -0.2])
+ #print(f"Reach Pose: {reach_pose}")
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ env.render()
+ #time.sleep(0.1)
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ #print(f"Grasp Pose: {grasp_pose}")
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ env.render()
+ #time.sleep(0.1)
+ if res == -1:
+ #print("Failed to grasp pose")
+ return res
+ planner.close_gripper(gripper_state=-1)
+ env.render()
+ #time.sleep(0.1)
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose([0, 0, 0.30]) * grasp_pose
+ #print(f"Lift Pose: {lift_pose}")
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ #time.sleep(0.1)
+ if res == -1:
+ #print("Failed to lift pose")
+ return res
+
+ ##print(env.plate.pose.sp)
+ #print(env.agent.tcp.pose.sp)
+ # -------------------------------------------------------------------------- #
+ # Place on rack
+ # -------------------------------------------------------------------------- #
+ rack_pose = env.rack.pose.sp
+ rotation_quaternion = sapien.Pose([0, 0, 0], euler2quat(-np.pi/2+np.pi/20,0,-np.pi/2))
+ place_pose = (
+ sapien.Pose(rack_pose.p+[-0.147,-0.01,0.3],rotation_quaternion.q)
+ * env.plate.pose.sp.inv()
+ * env.agent.tcp.pose.sp
+ )
+ res = planner.move_to_pose_with_RRTConnect(place_pose)
+ env.render()
+ #time.sleep(0.1)
+ if res == -1:
+ #print("Failed to place on rack")
+ return res
+
+
+ # -------------------------------------------------------------------------- #
+ # Lower
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(place_pose.p+[0,0,-0.2],place_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ env.render()
+ planner.open_gripper()
+ if res == -1:
+ #print("Failed to lower pose")
+ return res
+
+ res = planner.move_to_pose_with_RRTConnect(sapien.Pose(place_pose.p+[0.2,0,0], place_pose.q))
+ env.render()
+ # -------------------------------------------------------------------------- #
+ # Raise and reset the gripper
+ # -------------------------------------------------------------------------- #
+ raise_pose = sapien.Pose(lower_pose.p+[0,0,0.4],[0,1,0,0])
+
+
+ res = planner.move_to_pose_with_RRTConnect(raise_pose)
+ env.render()
+
+
+ # -------------------------------------------------------------------------- #
+ # Plate 2
+
+ obb = get_actor_obb(env.plate1)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ grasp_pose = sapien.Pose(grasp_pose.p + [0, -0.09, -0.0199], grasp_pose.q)
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0, 0, 0.1], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to grasp pose")
+ return res
+ planner.close_gripper(gripper_state=-1)
+ env.render()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose([0, 0, 0.30]) * grasp_pose
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Place on rack
+ # -------------------------------------------------------------------------- #
+ rack_pose = env.rack.pose.sp
+ rotation_quaternion = sapien.Pose([0, 0, 0], euler2quat(-np.pi/2+np.pi/30,0,-np.pi/2))
+ place_pose = (
+ sapien.Pose(rack_pose.p+[-0.120,-0.01,0.3],rotation_quaternion.q)
+ * env.plate1.pose.sp.inv()
+ * env.agent.tcp.pose.sp
+ )
+ res = planner.move_to_pose_with_RRTConnect(place_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to place on rack")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(place_pose.p+[0,0,-0.20],place_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ env.render()
+ planner.open_gripper()
+ if res == -1:
+ #print("Failed to lower pose")
+ return res
+
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/utils.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb2cb60c4d4bb660daa86eff4468c8f142d88bdf
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/noahbiarm/utils.py
@@ -0,0 +1,116 @@
+import numpy as np
+import sapien
+import sapien.physx as physx
+import sapien.render
+import trimesh
+import torch
+from transforms3d import quaternions
+from mani_skill.utils.structs import Actor
+from mani_skill.utils import common
+from mani_skill.utils.geometry.trimesh_utils import get_component_mesh
+
+
+def get_actor_obb(actor: Actor, to_world_frame=True, vis=False):
+ mesh = get_component_mesh(
+ actor._objs[0].find_component_by_type(physx.PhysxRigidDynamicComponent),
+ to_world_frame=to_world_frame,
+ )
+ assert mesh is not None, "can not get actor mesh for {}".format(actor)
+
+ obb: trimesh.primitives.Box = mesh.bounding_box_oriented
+
+ if vis:
+ obb.visual.vertex_colors = (255, 0, 0, 10)
+ trimesh.Scene([mesh, obb]).show()
+
+ return obb
+
+def get_3d_bbox(actor, to_world_frame=True, vis=False):
+ """
+ Compute the oriented 3D bounding box of an actor and return its representation as batched tensors.
+
+ Args:
+ actor: The Actor object.
+ to_world_frame (bool): Whether to get the bounding box in world coordinates.
+ vis (bool): If True, visualize the bounding box overlaid on the mesh.
+
+ Returns:
+ dict: A dictionary containing:
+ - 'vertices_world': (1,8,3) tensor
+ - 'rotation': (1,3,3) tensor
+ - 'translation': (1,3) tensor
+ - 'extents': (1,3) tensor
+ """
+ obb = get_actor_obb(actor, to_world_frame=to_world_frame, vis=vis)
+
+ obb_data = {
+ "vertices_world": torch.tensor(obb.vertices, dtype=torch.float32).unsqueeze(0), # (1,8,3)
+ "rotation": torch.tensor(obb.transform[:3, :3], dtype=torch.float32).unsqueeze(0), # (1,3,3)
+ "translation": torch.tensor(obb.transform[:3, 3], dtype=torch.float32).unsqueeze(0),# (1,3)
+ "extents": torch.tensor(obb.extents, dtype=torch.float32).unsqueeze(0), # (1,3)
+ }
+ return obb_data
+
+def compute_grasp_info_by_obb(
+ obb: trimesh.primitives.Box,
+ approaching=(0, 0, -1),
+ target_closing=None,
+ depth=0.0,
+ ortho=True,
+):
+ """Compute grasp info given an oriented bounding box.
+ The grasp info includes axes to define grasp frame, namely approaching, closing, orthogonal directions and center.
+
+ Args:
+ obb: oriented bounding box to grasp
+ approaching: direction to approach the object
+ target_closing: target closing direction, used to select one of multiple solutions
+ depth: displacement from hand to tcp along the approaching vector. Usually finger length.
+ ortho: whether to orthogonalize closing w.r.t. approaching.
+ """
+ # NOTE(jigu): DO NOT USE `x.extents`, which is inconsistent with `x.primitive.transform`!
+ extents = np.array(obb.primitive.extents)
+ T = np.array(obb.primitive.transform)
+
+ # Assume normalized
+ approaching = np.array(approaching)
+
+ # Find the axis closest to approaching vector
+ angles = approaching @ T[:3, :3] # [3]
+ inds0 = np.argsort(np.abs(angles))
+ ind0 = inds0[-1]
+
+ # Find the shorter axis as closing vector
+ inds1 = np.argsort(extents[inds0[0:-1]])
+ ind1 = inds0[0:-1][inds1[0]]
+ ind2 = inds0[0:-1][inds1[1]]
+
+ # If sizes are close, choose the one closest to the target closing
+ if target_closing is not None and 0.99 < (extents[ind1] / extents[ind2]) < 1.01:
+ vec1 = T[:3, ind1]
+ vec2 = T[:3, ind2]
+ if np.abs(target_closing @ vec1) < np.abs(target_closing @ vec2):
+ ind1 = inds0[0:-1][inds1[1]]
+ ind2 = inds0[0:-1][inds1[0]]
+ closing = T[:3, ind1]
+
+ # Flip if far from target
+ if target_closing is not None and target_closing @ closing < 0:
+ closing = -closing
+
+ # Reorder extents
+ extents = extents[[ind0, ind1, ind2]]
+
+ # Find the origin on the surface
+ center = T[:3, 3].copy()
+ half_size = extents[0] * 0.5
+ center = center + approaching * (-half_size + min(depth, half_size))
+
+ if ortho:
+ closing = closing - (approaching @ closing) * approaching
+ closing = common.np_normalize_vector(closing)
+
+ grasp_info = dict(
+ approaching=approaching, closing=closing, center=center, extents=extents
+ )
+ return grasp_info
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/__init__.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/__pycache__/__init__.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..cee0b4bb961060b3caaf8250597c19c4911a3078
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/__pycache__/__init__.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/__pycache__/motionplanner.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/__pycache__/motionplanner.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..5c018666584020aea1eba2fdeeccc835ed9b02f4
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/__pycache__/motionplanner.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/__pycache__/run.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/__pycache__/run.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..76254ab6a98b583be172d5cc7637c640e1c6b296
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/__pycache__/run.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/__pycache__/utils.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/__pycache__/utils.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ff3e3b302c9e6e56ec9a43008380d53d43d1305b
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/__pycache__/utils.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/generate.sh b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/generate.sh
new file mode 100644
index 0000000000000000000000000000000000000000..22d392635b195b01b72978da7474e304a4dda884
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/generate.sh
@@ -0,0 +1,9 @@
+# Generate all motion planning demos for the dataset
+for env_id in PushCube-v1 PickCube-v1 StackCube-v1 PegInsertionSide-v1 PlugCharger-v1
+do
+ python -m mani_skill.examples.motionplanning.panda.run --env-id $env_id \
+ --traj-name="trajectory" --only-count-success --save-video -n 1 \
+ --shader="rt" # generate sample videos
+ mv demos/$env_id/motionplanning/0.mp4 demos/$env_id/motionplanning/sample.mp4
+ python -m mani_skill.examples.motionplanning.panda.run --env-id $env_id --traj-name="trajectory" -n 1000 --only-count-success
+done
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/motionplanner.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/motionplanner.py
new file mode 100644
index 0000000000000000000000000000000000000000..2312a30f0eb4ccda94fe1a3977ca68bcb9512b7f
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/motionplanner.py
@@ -0,0 +1,387 @@
+import mplib
+import numpy as np
+import sapien
+import trimesh
+from functools import partial
+from typing import Callable, Optional
+import torch
+
+from mani_skill.agents.base_agent import BaseAgent
+from mani_skill.envs.sapien_env import BaseEnv
+from mani_skill.envs.scene import ManiSkillScene
+from mani_skill.utils.structs.pose import to_sapien_pose
+from mani_skill.utils.geometry import rotation_conversions
+import sapien.physx as physx
+OPEN = 1
+CLOSED = -1
+
+
+class PandaArmMotionPlanningSolver:
+ def __init__(
+ self,
+ env: BaseEnv,
+ debug: bool = False,
+ vis: bool = True,
+ base_pose: sapien.Pose = None, # TODO mplib doesn't support robot base being anywhere but 0
+ visualize_target_grasp_pose: bool = True,
+ print_env_info: bool = True,
+ joint_vel_limits=0.9,
+ joint_acc_limits=0.9,
+ ):
+ self.env = env
+ self.base_env: BaseEnv = env.unwrapped
+ self.env_agent: BaseAgent = self.base_env.agent
+ self.robot = self.env_agent.robot
+ self.tcp = self.env_agent.tcp
+ self.joint_vel_limits = joint_vel_limits
+ self.joint_acc_limits = joint_acc_limits
+
+ self.base_pose = to_sapien_pose(base_pose)
+
+ self.planner = self.setup_planner()
+ self.control_mode = self.base_env.control_mode
+
+ self.debug = debug
+ self.vis = vis
+ self.print_env_info = print_env_info
+ self.visualize_target_grasp_pose = visualize_target_grasp_pose
+ self.gripper_state = OPEN
+ self.grasp_pose_visual = None
+ if self.vis and self.visualize_target_grasp_pose:
+ if "grasp_pose_visual" not in self.base_env.scene.actors:
+ self.grasp_pose_visual = build_panda_gripper_grasp_pose_visual(
+ self.base_env.scene
+ )
+ else:
+ self.grasp_pose_visual = self.base_env.scene.actors["grasp_pose_visual"]
+ self.grasp_pose_visual.set_pose(self.base_env.agent.tcp.pose)
+ self.elapsed_steps = 0
+
+ self.use_point_cloud = False
+ self.collision_pts_changed = False
+ self.all_collision_pts = None
+
+ def render_wait(self):
+ if not self.vis or not self.debug:
+ return
+ print("Press [c] to continue")
+ viewer = self.base_env.render_human()
+ while True:
+ if viewer.window.key_down("c"):
+ break
+ self.base_env.render_human()
+
+ def setup_planner(self):
+ link_names = [link.get_name() for link in self.robot.get_links()]
+ joint_names = [joint.get_name() for joint in self.robot.get_active_joints()]
+ planner = mplib.Planner(
+ urdf=self.env_agent.urdf_path,
+ srdf=self.env_agent.urdf_path.replace(".urdf", ".srdf"),
+ user_link_names=link_names,
+ user_joint_names=joint_names,
+ move_group="panda_hand_tcp",
+ joint_vel_limits=np.ones(7) * self.joint_vel_limits,
+ joint_acc_limits=np.ones(7) * self.joint_acc_limits,
+ )
+ # if mplib version 0.2.1
+ b_pose = mplib.Pose(self.base_pose.p, self.base_pose.q)
+ planner.set_base_pose(b_pose)
+
+ # elif mplib version 0.1.1
+ # planner.set_base_pose(np.hstack([self.base_pose.p, self.base_pose.q]))
+ return planner
+
+ def follow_path(self, result, refine_steps: int = 0):
+ n_step = result["position"].shape[0]
+ for i in range(n_step + refine_steps):
+ qpos = result["position"][min(i, n_step - 1)]
+ if self.control_mode == "pd_joint_pos_vel":
+ qvel = result["velocity"][min(i, n_step - 1)]
+ action = np.hstack([qpos, qvel, self.gripper_state])
+ else:
+ action = np.hstack([qpos, self.gripper_state])
+ obs, reward, terminated, truncated, info = self.env.step(action)
+ self.elapsed_steps += 1
+ if self.print_env_info:
+ print(
+ f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
+ )
+ if self.vis:
+ self.base_env.render_human()
+ return obs, reward, terminated, truncated, info
+
+ def move_to_pose_with_RRTConnect(
+ self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0, t: int = 100
+ ):
+ pose = to_sapien_pose(pose)
+ if self.grasp_pose_visual is not None:
+ self.grasp_pose_visual.set_pose(pose)
+ pose = sapien.Pose(p=pose.p, q=pose.q)
+
+ result = None
+ min_result = None
+ min_duration = float('inf')
+ for attempt in range(t):
+ result = self.planner.plan_pose(
+ # np.concatenate([pose.p, pose.q]),
+ mplib.Pose(pose.p, pose.q),
+ self.robot.get_qpos().cpu().numpy()[0],
+ time_step=self.base_env.control_timestep,
+ # use_point_cloud=self.use_point_cloud,
+ wrt_world=True,
+ )
+ if result["status"] == "Success":
+ if result["duration"] < min_duration:
+ min_result = result
+ min_duration = result["duration"]
+
+ result = min_result
+
+ if result is None or result["status"] != "Success":
+ return -1
+
+
+ self.render_wait()
+ if dry_run:
+ return result
+ return self.follow_path(result, refine_steps=refine_steps)
+
+
+ # def move_to_pose_with_RRTConnect(
+ # self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0, t: int = 100
+ # ):
+ # pose = to_sapien_pose(pose)
+ # if self.grasp_pose_visual is not None:
+ # self.grasp_pose_visual.set_pose(pose)
+ # pose = sapien.Pose(p=pose.p, q=pose.q)
+
+ # result = None
+ # min_result = None
+ # min_duration = float('inf')
+
+ # for attempt in range(t):
+ # result = self.planner.plan_pose(
+ # mplib.Pose(pose.p, pose.q),
+ # self.robot.get_qpos().cpu().numpy()[0],
+ # time_step=self.base_env.control_timestep,
+ # wrt_world=True,
+ # )
+
+ # if result["status"] == "Success":
+ # if result["duration"] < min_duration:
+ # min_result = result
+ # min_duration = result["duration"]
+
+ # result = min_result
+
+ # if result is None or result["status"] != "Success":
+ # return -1
+
+ # self.render_wait()
+ # if dry_run:
+ # return result
+ # return self.follow_path(result, refine_steps=refine_steps)
+
+ def move_to_pose_with_screw(
+ self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0, trials: int = 10
+ ):
+ pose = to_sapien_pose(pose)
+ if self.grasp_pose_visual is not None:
+ self.grasp_pose_visual.set_pose(pose)
+ pose = sapien.Pose(p=pose.p, q=pose.q)
+
+ base_q = pose.q # save the original quaternion
+ noise_level = 1e-2 # small noise scale; adjust as needed
+ result = None
+
+ for attempt in range(trials):
+ # For all trials after the first, add a small random perturbation
+ if attempt > 0:
+ noise = np.random.normal(scale=noise_level, size=base_q.shape)
+ noisy_q = base_q + noise
+ noisy_q = noisy_q / np.linalg.norm(noisy_q) # re-normalize to ensure a valid quaternion
+ else:
+ noisy_q = base_q
+
+ # Create a new pose for this trial using the potentially noisy quaternion
+ trial_pose = sapien.Pose(p=pose.p, q=noisy_q)
+ result = self.planner.plan_screw(
+ mplib.Pose(trial_pose.p, trial_pose.q),
+ self.robot.get_qpos().cpu().numpy()[0],
+ time_step=self.base_env.control_timestep,
+ )
+ if result["status"] == "Success":
+ break
+
+ if result is None or result["status"] != "Success":
+ print(result["status"] if result is not None else "No result")
+ self.render_wait()
+ return -1
+
+ self.render_wait()
+ if dry_run:
+ return result
+ return self.follow_path(result, refine_steps=refine_steps)
+
+ def make_f(self, f):
+ if f is not None:
+ return partial(f, self)
+
+ def make_j(self, j):
+ if j is not None:
+ return partial(j, self)
+
+ def get_eef_x(self):
+ move_link_idx = self.planner.link_name_2_idx[self.planner.move_group]
+ move_joint_idx = self.planner.move_group_joint_indices
+ self.planner.pinocchio_model.compute_forward_kinematics(self.planner.robot.get_qpos())
+ new_pose = self.planner.pinocchio_model.get_link_pose(move_link_idx)
+ eef_rot = rotation_conversions.quaternion_to_matrix(torch.tensor(new_pose.q))
+ eef_x = eef_rot[:, 0].cpu().numpy().astype(np.float32).reshape(-1)
+ # eef_y = eef_rot[:, 1].cpu().numpy().astype(np.float32).reshape(-1)
+ # eef_z = eef_rot[:, 2].cpu().numpy().astype(np.float32).reshape(-1)
+ return eef_x
+
+ def move_to_pose_with_CRRTConnect(
+ self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0,
+ f: Optional[Callable] = None, j: Optional[Callable] = None,
+ ):
+
+ pose = to_sapien_pose(pose)
+ if self.grasp_pose_visual is not None:
+ self.grasp_pose_visual.set_pose(pose)
+ pose = sapien.Pose(p=pose.p, q=pose.q)
+ # print(self.get_eef_z())
+ # breakpoint()
+ # print("control time step")
+ # print(self.base_env.control_timestep)
+ result = self.planner.plan_pose(
+ # np.concatenate([pose.p, pose.q]),
+ mplib.Pose(pose.p, pose.q),
+ self.robot.get_qpos().cpu().numpy()[0],
+ time_step=self.base_env.control_timestep,
+ # use_point_cloud=self.use_point_cloud,
+ wrt_world=True,
+ constraint_function=self.make_f(f),
+ constraint_jacobian=self.make_j(j),
+ constraint_tolerance= 1e-2,
+ )
+ if result["status"] != "Success":
+ print(result["status"])
+ self.render_wait()
+ return -1
+ self.render_wait()
+ if dry_run:
+ return result
+ return self.follow_path(result, refine_steps=refine_steps)
+
+ def open_gripper(self):
+ self.gripper_state = OPEN
+ qpos = self.robot.get_qpos()[0, :-2].cpu().numpy()
+ for i in range(6):
+ if self.control_mode == "pd_joint_pos":
+ action = np.hstack([qpos, self.gripper_state])
+ else:
+ action = np.hstack([qpos, qpos * 0, self.gripper_state])
+ obs, reward, terminated, truncated, info = self.env.step(action)
+ self.elapsed_steps += 1
+ if self.print_env_info:
+ print(
+ f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
+ )
+ if self.vis:
+ self.base_env.render_human()
+ return obs, reward, terminated, truncated, info
+
+ def close_gripper(self, t=6, gripper_state = CLOSED):
+ self.gripper_state = gripper_state
+ qpos = self.robot.get_qpos()[0, :-2].cpu().numpy()
+ for i in range(t):
+ if self.control_mode == "pd_joint_pos":
+ action = np.hstack([qpos, self.gripper_state])
+ else:
+ action = np.hstack([qpos, qpos * 0, self.gripper_state])
+ obs, reward, terminated, truncated, info = self.env.step(action)
+ self.elapsed_steps += 1
+ if self.print_env_info:
+ print(
+ f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
+ )
+ if self.vis:
+ self.base_env.render_human()
+ return obs, reward, terminated, truncated, info
+
+ def add_box_collision(self, extents: np.ndarray, pose: sapien.Pose):
+ self.use_point_cloud = True
+ box = trimesh.creation.box(extents, transform=pose.to_transformation_matrix())
+ pts, _ = trimesh.sample.sample_surface(box, 256)
+ if self.all_collision_pts is None:
+ self.all_collision_pts = pts
+ else:
+ self.all_collision_pts = np.vstack([self.all_collision_pts, pts])
+ self.planner.update_point_cloud(self.all_collision_pts)
+
+ def add_collision_pts(self, pts: np.ndarray):
+ if self.all_collision_pts is None:
+ self.all_collision_pts = pts
+ else:
+ self.all_collision_pts = np.vstack([self.all_collision_pts, pts])
+ self.planner.update_point_cloud(self.all_collision_pts)
+
+ def clear_collisions(self):
+ self.all_collision_pts = None
+ self.use_point_cloud = False
+
+ def close(self):
+ pass
+
+from transforms3d import quaternions
+
+
+def build_panda_gripper_grasp_pose_visual(scene: ManiSkillScene):
+ builder = scene.create_actor_builder()
+ grasp_pose_visual_width = 0.01
+ grasp_width = 0.05
+
+ builder.add_sphere_visual(
+ pose=sapien.Pose(p=[0, 0, 0.0]),
+ radius=grasp_pose_visual_width,
+ material=sapien.render.RenderMaterial(base_color=[0.3, 0.4, 0.8, 0.7])
+ )
+
+ builder.add_box_visual(
+ pose=sapien.Pose(p=[0, 0, -0.08]),
+ half_size=[grasp_pose_visual_width, grasp_pose_visual_width, 0.02],
+ material=sapien.render.RenderMaterial(base_color=[0, 1, 0, 0.7]),
+ )
+ builder.add_box_visual(
+ pose=sapien.Pose(p=[0, 0, -0.05]),
+ half_size=[grasp_pose_visual_width, grasp_width, grasp_pose_visual_width],
+ material=sapien.render.RenderMaterial(base_color=[0, 1, 0, 0.7]),
+ )
+ builder.add_box_visual(
+ pose=sapien.Pose(
+ p=[
+ 0.03 - grasp_pose_visual_width * 3,
+ grasp_width + grasp_pose_visual_width,
+ 0.03 - 0.05,
+ ],
+ q=quaternions.axangle2quat(np.array([0, 1, 0]), theta=np.pi / 2),
+ ),
+ half_size=[0.04, grasp_pose_visual_width, grasp_pose_visual_width],
+ material=sapien.render.RenderMaterial(base_color=[0, 0, 1, 0.7]),
+ )
+ builder.add_box_visual(
+ pose=sapien.Pose(
+ p=[
+ 0.03 - grasp_pose_visual_width * 3,
+ -grasp_width - grasp_pose_visual_width,
+ 0.03 - 0.05,
+ ],
+ q=quaternions.axangle2quat(np.array([0, 1, 0]), theta=np.pi / 2),
+ ),
+ half_size=[0.04, grasp_pose_visual_width, grasp_pose_visual_width],
+ material=sapien.render.RenderMaterial(base_color=[1, 0, 0, 0.7]),
+ )
+ grasp_pose_visual = builder.build_kinematic(name="grasp_pose_visual")
+ return grasp_pose_visual
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/run.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/run.py
new file mode 100644
index 0000000000000000000000000000000000000000..eb9fd44a37c5f747d97506e222250ff4f01206b6
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/run.py
@@ -0,0 +1,201 @@
+import multiprocessing as mp
+import os
+from copy import deepcopy
+import time
+import argparse
+import gymnasium as gym
+import numpy as np
+from tqdm import tqdm
+import os.path as osp
+import sapien.core as sapien
+import tkinter as tk
+from mani_skill.utils.wrappers.record import RecordEpisode
+from mani_skill.trajectory.merge_trajectory import merge_trajectories
+
+from mani_skill.examples.motionplanning.panda.solutions import (
+ solvePushCube,
+ solvePickCube,
+ solveStackCube,
+ solvePegInsertionSide,
+ solvePlugCharger,
+ solvePullCubeTool,
+ solveLiftPegUpright,
+ solvePullCube,
+ solvePlateOnRack,
+ solveMugOnRack,
+ solveBowlOnRack,
+ solveStackMugOnRack,
+ solveStackBowl,
+ solveForkOnRack,
+ solveStackPlateOnRack,
+ solveMugOnCoffeeMachine,
+ solveMugFromCoffeeMachine,
+ solveSpoonOnRack,
+ solveKnifeOnRack,
+ solveGraspFork_v0,
+ solveGraspBowl_v0,
+ solveGraspPlate_v0,
+ solveGraspCup_v0,
+ )
+
+MP_SOLUTIONS = {
+ "PickCube-v1": solvePickCube,
+ "StackCube-v1": solveStackCube,
+ "PegInsertionSide-v1": solvePegInsertionSide,
+ "PlugCharger-v1": solvePlugCharger,
+ "PushCube-v1": solvePushCube,
+ "PullCubeTool-v1": solvePullCubeTool,
+ "LiftPegUpright-v1": solveLiftPegUpright,
+ "PullCube-v1": solvePullCube,
+ "PlacePlateOnRack-v1": solvePlateOnRack,
+ "PlaceMugOnRack-v1": solveMugOnRack,
+ "PlaceBowlOnRack-v1": solveBowlOnRack,
+ "StackMugOnRack-v1": solveStackMugOnRack,
+ "StackBowl-v1": solveStackBowl,
+ "PlaceForkOnRack-v1": solveForkOnRack,
+ "StackPlateOnRack-v1": solveStackPlateOnRack,
+ "PlaceMugOnCoffeeMachine-v1": solveMugOnCoffeeMachine,
+ "PickMugFromCoffeeMachine-v1": solveMugFromCoffeeMachine,
+ "PlaceSpoonOnRack-v1": solveSpoonOnRack,
+ "PlaceKnifeOnRack-v1": solveKnifeOnRack,
+ "GraspFork-v0": solveGraspFork_v0,
+ "GraspBowl-v0": solveGraspBowl_v0,
+ "GraspPlate-v0": solveGraspPlate_v0,
+ "GraspCup-v0": solveGraspCup_v0
+}
+
+def parse_args(args=None):
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-e", "--env-id", type=str, default="PickCube-v1", help=f"Environment to run motion planning solver on. Available options are {list(MP_SOLUTIONS.keys())}")
+ parser.add_argument("-o", "--obs-mode", type=str, default="none", help="Observation mode to use. Usually this is kept as 'none' as observations are not necesary to be stored, they can be replayed later via the mani_skill.trajectory.replay_trajectory script.")
+ parser.add_argument("-n", "--num-traj", type=int, default=10, help="Number of trajectories to generate.")
+ parser.add_argument("--only-count-success", action="store_true", help="If true, generates trajectories until num_traj of them are successful and only saves the successful trajectories/videos")
+ parser.add_argument("--reward-mode", type=str)
+ parser.add_argument("-b", "--sim-backend", type=str, default="auto", help="Which simulation backend to use. Can be 'auto', 'cpu', 'gpu'")
+ parser.add_argument("--render-mode", type=str, default="rgb_array", help="can be 'sensors' or 'rgb_array' which only affect what is saved to videos")
+ parser.add_argument("--vis", action="store_true", help="whether or not to open a GUI to visualize the solution live")
+ parser.add_argument("--save-video", action="store_true", help="whether or not to save videos locally")
+ parser.add_argument("--traj-name", type=str, help="The name of the trajectory .h5 file that will be created.")
+ parser.add_argument("--shader", default="default", type=str, help="Change shader used for rendering. Default is 'default' which is very fast. Can also be 'rt' for ray tracing and generating photo-realistic renders. Can also be 'rt-fast' for a faster but lower quality ray-traced renderer")
+ parser.add_argument("--record-dir", type=str, default="demos", help="where to save the recorded trajectories")
+ parser.add_argument("--num-procs", type=int, default=1, help="Number of processes to use to help parallelize the trajectory replay process. This uses CPU multiprocessing and only works with the CPU simulation backend at the moment.")
+ parser.add_argument("--rand_level", type=int, default=0, help="the level of randomization of objects in the task")
+ parser.add_argument("--robot_uids", type=str, default="panda_wristcam", help="set robot uids")
+ return parser.parse_args()
+
+def _main(args, proc_id: int = 0, start_seed: int = 0) -> str:
+ env_id = args.env_id
+ print(env_id)
+ env = gym.make(
+ env_id,
+ obs_mode=args.obs_mode,
+ control_mode="pd_joint_pos",
+ render_mode=args.render_mode,
+ reward_mode="dense" if args.reward_mode is None else args.reward_mode,
+ sensor_configs=dict(shader_pack=args.shader),
+ human_render_camera_configs=dict(
+ shader_pack=args.shader,
+ ),
+ viewer_camera_configs=dict(shader_pack=args.shader),
+ sim_backend=args.sim_backend,
+ rand_level=args.rand_level,
+ robot_uids=args.robot_uids,
+ )
+
+ if env_id not in MP_SOLUTIONS:
+ raise RuntimeError(f"No already written motion planning solutions for {env_id}. Available options are {list(MP_SOLUTIONS.keys())}")
+
+ if not args.traj_name:
+ new_traj_name = time.strftime("%Y%m%d_%H%M%S")
+ else:
+ new_traj_name = args.traj_name
+
+ if args.num_procs > 1:
+ new_traj_name = new_traj_name + "." + str(proc_id)
+ env = RecordEpisode(
+ env,
+ output_dir=osp.join(args.record_dir, env_id, "motionplanning"),
+ trajectory_name=new_traj_name, save_video=args.save_video,
+ source_type="motionplanning",
+ source_desc="official motion planning solution from ManiSkill contributors",
+ video_fps=30,
+ save_on_reset=False
+ )
+ output_h5_path = env._h5_file.filename
+ solve = MP_SOLUTIONS[env_id]
+ print(f"Motion Planning Running on {env_id}")
+ pbar = tqdm(range(args.num_traj), desc=f"proc_id: {proc_id}")
+ seed = start_seed
+ successes = []
+ solution_episode_lengths = []
+ failed_motion_plans = 0
+ passed = 0
+ while True:
+ try:
+ res = solve(env, seed=seed, debug=False, vis=True if args.vis else False)
+ except Exception as e:
+ print(f"Cannot find valid solution because of an error in motion planning solution: {e}")
+ res = -1
+
+ if res == -1:
+ success = False
+ failed_motion_plans += 1
+ else:
+ success = res[-1]["success"].item()
+ elapsed_steps = res[-1]["elapsed_steps"].item()
+ solution_episode_lengths.append(elapsed_steps)
+ successes.append(success)
+ if args.only_count_success and not success:
+ seed += 1
+ env.flush_trajectory(save=False)
+ if args.save_video:
+ env.flush_video(save=False)
+ continue
+ else:
+ env.flush_trajectory()
+ if args.save_video:
+ env.flush_video()
+ pbar.update(1)
+ pbar.set_postfix(
+ dict(
+ success_rate=np.mean(successes),
+ failed_motion_plan_rate=failed_motion_plans / (seed + 1),
+ avg_episode_length=np.mean(solution_episode_lengths),
+ max_episode_length=np.max(solution_episode_lengths),
+ # min_episode_length=np.min(solution_episode_lengths)
+ )
+ )
+ seed += 1
+ passed += 1
+ if passed == args.num_traj:
+ break
+ env.close()
+ return output_h5_path
+
+def main(args):
+ if args.num_procs > 1 and args.num_procs < args.num_traj:
+ if args.num_traj < args.num_procs:
+ raise ValueError("Number of trajectories should be greater than or equal to number of processes")
+ args.num_traj = args.num_traj // args.num_procs
+ seeds = [*range(0, args.num_procs * args.num_traj, args.num_traj)]
+ pool = mp.Pool(args.num_procs)
+ proc_args = [(deepcopy(args), i, seeds[i]) for i in range(args.num_procs)]
+ res = pool.starmap(_main, proc_args)
+ pool.close()
+ # Merge trajectory files
+ output_path = res[0][: -len("0.h5")] + "h5"
+ merge_trajectories(output_path, res)
+ for h5_path in res:
+ tqdm.write(f"Remove {h5_path}")
+ os.remove(h5_path)
+ json_path = h5_path.replace(".h5", ".json")
+ tqdm.write(f"Remove {json_path}")
+ os.remove(json_path)
+ else:
+ _main(args)
+
+if __name__ == "__main__":
+ # start = time.time()
+ mp.set_start_method("spawn")
+ main(parse_args())
+ # print(f"Total time taken: {time.time() - start}")
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__init__.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2ab39b92651a802f794932b22599398d28f9ae1
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__init__.py
@@ -0,0 +1,23 @@
+from .pick_cube import solve as solvePickCube
+from .stack_cube import solve as solveStackCube
+from .peg_insertion_side import solve as solvePegInsertionSide
+from .plug_charger import solve as solvePlugCharger
+from .push_cube import solve as solvePushCube
+from .pull_cube_tool import solve as solvePullCubeTool
+from .lift_peg_upright import solve as solveLiftPegUpright
+from .pull_cube import solve as solvePullCube
+from .plate_on_rack import solve as solvePlateOnRack
+from .mug_on_rack import solve as solveMugOnRack
+from .bowl_on_rack import solve as solveBowlOnRack
+from .stack_mug_on_rack import solve as solveStackMugOnRack
+from .stack_bowl import solve as solveStackBowl
+from .fork_on_rack import solve as solveForkOnRack
+from .stack_plate_on_rack import solve as solveStackPlateOnRack
+from .mug_on_coffee_machine import solve as solveMugOnCoffeeMachine
+from .mug_from_coffee_machine import solve as solveMugFromCoffeeMachine
+from .spoon_on_rack import solve as solveSpoonOnRack
+from .knife_on_rack import solve as solveKnifeOnRack
+from .grasp_fork_v0 import solve as solveGraspFork_v0
+from .grasp_bowl_v0 import solve as solveGraspBowl_v0
+from .grasp_plate_v0 import solve as solveGraspPlate_v0
+from .grasp_cup_v0 import solve as solveGraspCup_v0
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/__init__.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7a71ebc71afad3645ff0c16f9729dd6b6f37835a
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/__init__.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/grasp_cup_v0.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/grasp_cup_v0.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6efe1efc9bff7ab7557aa4bb1ac1cb4aeaebb59
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/grasp_cup_v0.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/lift_peg_upright.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/lift_peg_upright.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..d64e685f76235d0bd8e8a1f2bf1d74e43b19c6f9
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/lift_peg_upright.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/plug_charger.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/plug_charger.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..559b9490cda1dc2a3ce3ddf4d878e35e22cd0a5a
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/plug_charger.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/spoon_on_rack.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/spoon_on_rack.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..ff20d0b69e79b32d4ce430fdac9c1b9a32dc8630
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/__pycache__/spoon_on_rack.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/bowl_on_rack.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/bowl_on_rack.py
new file mode 100644
index 0000000000000000000000000000000000000000..ebf087fa7c83a787fe1c9616b833b0c2114ff8aa
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/bowl_on_rack.py
@@ -0,0 +1,189 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceBowlOnRackEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+
+def main():
+ env: PlaceBowlOnRackEnv = gym.make(
+ "PlaceBowlOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ #print(res[-1])
+ env.close()
+
+def solve(env: PlaceBowlOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+ #print("Debug")
+
+ # Check collision shapes
+ # #print(f"Debug: Bowl collision shapes: {env.unwrapped.bowl.get_collision_shapes()}")
+ # #print(f"Debug: Rack collision shapes: {env.unwrapped.rack.get_collision_shapes()}")
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+ FINGER_LENGTH = 0.025
+ #time.sleep(2)
+ #print(env.bowl.pose.sp)
+ obb = get_actor_obb(env.bowl)
+ #print(obb)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ # print(env.bowl.pose.sp.p)
+ # print(center)
+ # print(approaching)
+ # print(target_closing)
+ # print(closing)
+ #print(center)
+ #print(env.bowl.pose.sp.p)
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center+[0,0.06,0])
+ #offset = sapien.Pose([0, 0, 0.35])
+ #print(grasp_pose)
+ grasp_pose = sapien.Pose(grasp_pose.p, grasp_pose.q)
+ #print(grasp_pose)
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0, 0, 0.3], grasp_pose.q)
+ #grasp_pose * sapien.Pose([0, 0, -0.2])
+ #print(f"Reach Pose: {reach_pose}")
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ env.render()
+ #time.sleep(0.1)
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ #print(f"Grasp Pose: {grasp_pose}")
+ res = planner.move_to_pose_with_RRTConnect(sapien.Pose([0,0,0.08])*grasp_pose)
+ env.render()
+ #time.sleep(0.1)
+ if res == -1:
+ #print("Failed to grasp pose")
+ return res
+ planner.close_gripper(gripper_state=-0.6)
+ env.render()
+ #time.sleep(0.1)
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose([0, 0, 0.40]) * grasp_pose
+ #print(f"Lift Pose: {lift_pose}")
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ #time.sleep(0.1)
+ if res == -1:
+ #print("Failed to lift pose")
+ return res
+
+
+ # -------------------------------------------------------------------------- #
+ # Pre Place on rack
+ # -------------------------------------------------------------------------- #
+ rack_pose = env.rack.pose.sp
+
+ goal_pose = sapien.Pose(rack_pose.p+[0,-0.2,0.4], euler2quat(13*np.pi/20,0,0))
+ #print(goal_pose)
+ #print(env.agent.tcp.pose.sp)
+ pre_place_pose = (
+ goal_pose
+ * env.bowl.pose.sp.inv()
+ * env.agent.tcp.pose.sp
+ )
+ #print(pre_place_pose)
+ #pre_place_pose.p = rack_pose.p+[0,0.14,0.4]
+ # print(lift_pose)
+ # print(pre_place_pose)
+ # print(rack_pose)
+ # print(env.bowl.pose.sp)
+ # print(env.agent.tcp.pose.sp)
+ # place_pose = sapien.Pose(rack_pose.p+[0,0,0.3], rotation_quaternion.q)
+ # #place_pose = sapien.Pose(rack_pose.p+[0,0,0.3],lift_pose.q)
+ # #* sapien.Pose([0, 0, 0.15])
+ # ##print(f"Rack Pose: {rack_pose}")
+ # ##print(f"Place Pose: {place_pose}")
+ res = planner.move_to_pose_with_RRTConnect(pre_place_pose, refine_steps=5)
+ env.render()
+ # print(rack_pose)
+ # print(env.bowl.pose.sp)
+ # print(env.bowl.pose.sp.inv())
+ # #print(place_pose)
+ # #time.sleep(0.1)
+ # if res == -1:
+ # #print("Failed to place on rack")
+ # return res
+
+ #-------------------------------------------------------------------------- #
+ #Place
+ #-------------------------------------------------------------------------- #
+ #place_pose = goal_pose*sapien.Pose([0,0.1,-0.2]) * env.bowl.pose.sp.inv() * env.agent.tcp.pose.sp
+ #* sapien.Pose([0, 0, -0.15])
+ #print(f"Lower Pose: {lower_pose}")
+ #print(pre_place_pose)
+ place_pose = sapien.Pose([0, 0, -0.13]+pre_place_pose.p, pre_place_pose.q)
+ #euler2quat(0,-np.pi/9,0))
+ res = planner.move_to_pose_with_RRTConnect(place_pose)
+ #print(place_pose)
+ #print(env.bowl.pose.sp)
+ #print(env.rack.pose.sp)
+ env.render()
+ #time.sleep(1)
+ planner.open_gripper()
+ if res == -1:
+ #print("Failed to lower pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Retreat
+ # -------------------------------------------------------------------------- #
+ # print(env.agent.tcp.pose.sp)
+ # retreat_pose = sapien.Pose([0,-0.3,0],euler2quat(0,0,0))
+ # print(retreat_pose)
+ # # #print(f"Retreat Pose: {retreat_pose}")
+ # res = planner.move_to_pose_with_RRTConnect(retreat_pose)
+ # env.render()
+
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/fork_on_rack.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/fork_on_rack.py
new file mode 100644
index 0000000000000000000000000000000000000000..df0beef502d23d11929f95f41c30c24c78fd1108
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/fork_on_rack.py
@@ -0,0 +1,127 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceForkOnRackEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceForkOnRackEnv = gym.make(
+ "PlaceForkOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceForkOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+ #rotate the ee for 90 along z axis for panda_wrist_cam
+ if env.robot_uids == "panda_wristcam":
+ init_tcp_pose = env.agent.tcp.pose.sp
+ q_wrist = rotation_conversions.euler_to_quaternion(torch.tensor([np.pi/2, 0, 0]))
+ res = planner.move_to_pose_with_RRTConnect(init_tcp_pose * sapien.Pose([0, 0, 0], q_wrist))
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+
+ FINGER_LENGTH = 0.025
+ obb = get_actor_obb(env.fork)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.fork.pose.sp.p)
+ FORK_Z = env.fork_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = FORK_Z + RACK_Z + FINGER_LENGTH
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -ENV_Z_OFFSET])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(grasp_pose)
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(reach_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Hover over goalsite (rack pose)
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_pose.q) *\
+ sapien.Pose([0, 0, -ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, np.pi/2, 0])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(hover_pose.p - [0, 0, RACK_Z/2 + FORK_Z], hover_pose.q)
+ res = planner.move_to_pose_with_screw(lower_pose)
+ planner.open_gripper()
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+
+ planner.close()
+ return res
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/grasp_bowl_v0.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/grasp_bowl_v0.py
new file mode 100644
index 0000000000000000000000000000000000000000..06b5807f8eeec5481e75a229fe3fdf6d3b75325b
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/grasp_bowl_v0.py
@@ -0,0 +1,140 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+import random
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import GraspBowlEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: GraspBowlEnv = gym.make(
+ "GraspBowl-v0",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ #print(res[-1])
+ env.close()
+
+def solve(env: GraspBowlEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+ #print("Debug")
+
+ # Check collision shapes
+ # #print(f"Debug: Bowl collision shapes: {env.unwrapped.bowl.get_collision_shapes()}")
+ # #print(f"Debug: Rack collision shapes: {env.unwrapped.rack.get_collision_shapes()}")
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+ FINGER_LENGTH = 0.025
+ BOWL_D = env.bowl_extents[0]
+ BOWL_Z = env.bowl_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = BOWL_Z + RACK_Z + FINGER_LENGTH
+
+ obb = get_actor_obb(env.bowl)
+
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.bowl.pose.p)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = 0
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = Pose.create_from_pq(p=grasp_pose.p + [random.uniform(-0.05, 0.05), BOWL_D*0.28+random.uniform(-0.05, 0.05), BOWL_Z * random.uniform(4.0, 5.0)], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ reach_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, BOWL_Z*0.0], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to grasp pose")
+ return res
+
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = Pose.create_from_pq(p=grasp_pose.p + [random.uniform(-0.1, 0.1), BOWL_D*0.28 + random.uniform(-0.1, 0.1) , BOWL_Z* random.uniform(2.0, 5.0)], q=grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ if res == -1:
+ print("Failed to lift pose")
+ return res
+
+ # lift_pose = Pose.create_from_pq(p=grasp_pose.p + [0, BOWL_D*0.28, 1.5*ENV_Z_OFFSET], q=grasp_q)
+ # res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ # if res == -1:
+ # print("Failed to lift pose")
+ # return res
+
+ # # -------------------------------------------------------------------------- #
+ # # Hover over goalsite (rack pose)
+ # # -------------------------------------------------------------------------- #
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ hover_pose = sapien.Pose(pose.sp.p + [random.uniform(-0.1, 0.1), BOWL_Z+random.uniform(-0.1, 0.1), BOWL_Z*random.uniform(2.0, 4.0)], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ print("Failed to hover pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(pose.sp.p + [0, BOWL_Z, BOWL_Z/10], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+ planner.open_gripper()
+
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/grasp_cup_v0.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/grasp_cup_v0.py
new file mode 100644
index 0000000000000000000000000000000000000000..c2cb8c7970a60fc56cf34eb08ff341afff570355
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/grasp_cup_v0.py
@@ -0,0 +1,143 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat,quat2euler
+
+from mani_skill.envs.tasks import GraspCupEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: GraspCupEnv = gym.make(
+ "GraspCup-v0",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: GraspCupEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+
+ FINGER_LENGTH = 0.025
+ MUG_Z = env.mug_extents[2]
+ MUG_D = env.mug_extents[0]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = MUG_Z + RACK_Z + FINGER_LENGTH
+ EPS = 1e-2
+
+ obb = get_actor_obb(env.mug)
+ mug_rot = rotation_conversions.quaternion_to_matrix(env.mug.pose.q)
+ mug_euler = rotation_conversions.quaternion_to_euler(env.mug.pose.q).cpu().numpy().astype(np.float32).reshape(-1)
+ x_new = mug_rot[:, 0].cpu().numpy().astype(np.float32).reshape(-1)
+ y_new = mug_rot[:, 1].cpu().numpy().astype(np.float32).reshape(-1)
+ z_new = mug_rot[:, 2].cpu().numpy().astype(np.float32).reshape(-1)
+
+ tip_p, tip_q = env.get_mug_tip_pose()
+ tip_pose = Pose.create_from_pq(p=tip_p, q=tip_q)
+
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, tip_pose.sp.p)
+ grasp_pose = grasp_pose * sapien.Pose([0, 0, MUG_Z*0.3])
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ # reach_pose = grasp_pose * sapien.Pose([0, 0, -ENV_Z_OFFSET])
+ # res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ # if res == -1:
+ # # print("Failed to reach pose")
+ # return res
+
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -MUG_Z])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(grasp_pose)
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+
+ # -------------------------------------------------------------------------- #
+ # lyft
+ # -----------------------------------------------------------------
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -MUG_Z*2])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # # Hover over goalsite (rack pose)
+ # # -------------------------------------------------------------------------- #
+ # goal_extents = torch.from_numpy(env.goal_extents)
+ # p, q = env.get_goal_site_pose()
+ # pose = Pose.create_from_pq(p=p, q=grasp_pose.q)
+ # euler = [0, 0, 0]
+ # offset = [0, 0, -RACK_Z]
+ # hover_pose = sapien.Pose(pose.sp.p, grasp_pose.q) *\
+ # sapien.Pose(offset, rotation_conversions.euler_to_quaternion(torch.tensor(euler)))
+ # res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ # if res == -1:
+ # # print("Failed to lift pose")
+ # return res
+
+
+ # # -------------------------------------------------------------------------- #
+ # # Lower & Release
+ # # -------------------------------------------------------------------------- #
+ # lower_pose = sapien.Pose(hover_pose.p - [0, 0, MUG_Z], hover_pose.q)
+ # res = planner.move_to_pose_with_screw(lower_pose)
+ # if res == -1:
+ # # print("Failed to lower pose")
+ # return res
+ # planner.open_gripper()
+
+
+ planner.close()
+ return res
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/grasp_fork_v0.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/grasp_fork_v0.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ad79028e834a66e6c83ccaad3590fa865e2a7da
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/grasp_fork_v0.py
@@ -0,0 +1,126 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceForkOnRackEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: GraspForkEnv = gym.make(
+ "GraspFork-v0",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceForkOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+ # #rotate the ee for 90 along z axis for panda_wrist_cam
+ # if env.robot_uids == "panda_wristcam":
+ # init_tcp_pose = env.agent.tcp.pose.sp
+ # q_wrist = rotation_conversions.euler_to_quaternion(torch.tensor([np.pi/2, 0, 0]))
+ # res = planner.move_to_pose_with_RRTConnect(init_tcp_pose * sapien.Pose([0, 0, 0], q_wrist))
+ # if res == -1:
+ # return res
+
+
+ FINGER_LENGTH = 0.025
+ obb = get_actor_obb(env.fork)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.fork.pose.sp.p)
+ FORK_Z = env.fork_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = FORK_Z + RACK_Z + FINGER_LENGTH
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -ENV_Z_OFFSET])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # # -------------------------------------------------------------------------- #
+ # # Hover over goalsite (rack pose)
+ # # -------------------------------------------------------------------------- #
+ # goal_extents = torch.from_numpy(env.goal_extents)
+ # p, q = env.get_goal_site_pose()
+ # pose = Pose.create_from_pq(p=p, q=q)
+ # hover_pose = sapien.Pose(pose.sp.p, grasp_pose.q) *\
+ # sapien.Pose([0, 0, -ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, np.pi/2, 0])))
+ # res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ # if res == -1:
+ # # print("Failed to lift pose")
+ # return res
+
+ # # -------------------------------------------------------------------------- #
+ # # Lower & Release
+ # # -------------------------------------------------------------------------- #
+ # lower_pose = sapien.Pose(hover_pose.p - [0, 0, RACK_Z/2 + FORK_Z], hover_pose.q)
+ # res = planner.move_to_pose_with_screw(lower_pose)
+ # planner.open_gripper()
+ # if res == -1:
+ # print("Failed to lower pose")
+ # return res
+
+
+ planner.close()
+ return res
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/grasp_plate_v0.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/grasp_plate_v0.py
new file mode 100644
index 0000000000000000000000000000000000000000..455b8d256c9bf2f41fee02d23ffef6af10d86661
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/grasp_plate_v0.py
@@ -0,0 +1,176 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat, quat2euler
+
+from mani_skill.envs.tasks import GraspPlateEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: GraspPlateEnv = gym.make(
+ "GraspPlate-v0",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: GraspPlateEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ "pd_ee_delta_pose",
+ "pd_ee_delta_pose_vel",
+ ], env.unwrapped.control_mode
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+
+ FINGER_LENGTH = 0.025
+ PLATE_D = env.plate_extents[0]
+ PLATE_Z = env.plate_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = PLATE_D + RACK_Z + FINGER_LENGTH
+
+ obb = get_actor_obb(env.plate)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+ euler[:, 0] = 0
+ grasp_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+ grasp_pose = sapien.Pose(grasp_pose.p + [0, -PLATE_D/3, 0], grasp_q)
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0,0,PLATE_Z*3], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose(grasp_pose.p + [0, 0, PLATE_D], grasp_q)
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ if res == -1:
+ return res
+
+# # # -------------------------------------------------------------------------- #
+# # # Hover on top of the goal
+# # # -------------------------------------------------------------------------- #
+# p, q = env.get_goal_site_pose()
+# pose = Pose.create_from_pq(p=p, q=q)
+
+# goal_euler = rotation_conversions.quaternion_to_euler(torch.tensor(pose.q).reshape(1, -1))
+# d0 = (np.pi - torch.abs(goal_euler[:, 0])) * torch.sign(goal_euler[:, 0])
+
+# euler = rotation_conversions.quaternion_to_euler(torch.tensor(grasp_pose.q).reshape(1, -1))
+# euler[:, 0] = -np.pi/2 + np.pi/10 - d0
+# euler[:, 1] = -np.pi/10
+# euler[:, 2] = -np.pi/2
+# hover_q = np.array(rotation_conversions.euler_to_quaternion(euler)).reshape(-1)
+
+# hover_pose = sapien.Pose(pose.sp.p, hover_q)*\
+# sapien.Pose([PLATE_D/3, -1.7*PLATE_D, -2*PLATE_Z],
+# rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+# res = planner.move_to_pose_with_RRTConnect(hover_pose)
+# if res == -1:
+# print("Failed to hover pose")
+# return res
+
+# # -------------------------------------------------------------------------- #
+# # Lower & Release
+# # -------------------------------------------------------------------------- #
+# lower_pose = sapien.Pose(pose.sp.p, hover_q) *\
+# sapien.Pose([PLATE_D/3, -PLATE_D/2, -2*PLATE_Z],
+# rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+# res = planner.move_to_pose_with_RRTConnect(lower_pose)
+# if res == -1:
+# print("Failed to lower pose")
+# return res
+
+# lower_pose = sapien.Pose(pose.sp.p, hover_q) *\
+# sapien.Pose( [PLATE_D/3, 0, -1.5*PLATE_Z],
+# rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+# res = planner.move_to_pose_with_RRTConnect(lower_pose)
+# if res == -1:
+# print("Failed to lower pose")
+# return res
+
+# planner.open_gripper()
+
+
+# # -------------------------------------------------------------------------- #
+# # stay there for a while
+# # -------------------------------------------------------------------------- #
+# for _ in range(10):
+# qpos = env.agent.robot.get_qpos()[0, :-2].cpu().numpy()
+# gripper_state = -1 # open
+# action = np.hstack([qpos, gripper_state])
+# env.step(action)
+# env.render()
+
+
+# # -------------------------------------------------------------------------- #
+# # Slightly push forward
+# # -------------------------------------------------------------------------- #
+# forward_pose = sapien.Pose(pose.sp.p, hover_q) *\
+# sapien.Pose([PLATE_D/3, 0, -PLATE_Z*1.3],
+# rotation_conversions.euler_to_quaternion(torch.tensor([0, 0, 0])))
+# res = planner.move_to_pose_with_screw(forward_pose)
+# if res == -1:
+# print("Failed to forward pose")
+# return res
+
+# # -------------------------------------------------------------------------- #
+# # stay there for a while
+# # -------------------------------------------------------------------------- #
+# for _ in range(10):
+# qpos = env.agent.robot.get_qpos()[0, :-2].cpu().numpy()
+# gripper_state = -1 # open
+# action = np.hstack([qpos, gripper_state])
+# env.step(action)
+# env.render()
+
+ planner.close()
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/knife_on_rack.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/knife_on_rack.py
new file mode 100644
index 0000000000000000000000000000000000000000..acb3e3b007dbce169f99b108b3b035387a96dd79
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/knife_on_rack.py
@@ -0,0 +1,127 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceKnifeOnRackEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceKnifeOnRackEnv = gym.make(
+ "PlaceKnifeOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceKnifeOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+ #rotate the ee for 90 along z axis for panda_wrist_cam
+ if env.robot_uids == "panda_wristcam":
+ init_tcp_pose = env.agent.tcp.pose.sp
+ q_wrist = rotation_conversions.euler_to_quaternion(torch.tensor([np.pi/2, 0, 0]))
+ res = planner.move_to_pose_with_RRTConnect(init_tcp_pose * sapien.Pose([0, 0, 0], q_wrist))
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+
+ FINGER_LENGTH = 0.025
+ obb = get_actor_obb(env.fork)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.fork.pose.sp.p)
+ FORK_Z = env.fork_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = FORK_Z + RACK_Z + FINGER_LENGTH
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -ENV_Z_OFFSET])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(grasp_pose)
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(reach_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Hover over goalsite (rack pose)
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_pose.q) *\
+ sapien.Pose([0, 0, -ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, np.pi/2, 0])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(hover_pose.p - [0, 0, RACK_Z/2 + FORK_Z], hover_pose.q)
+ res = planner.move_to_pose_with_screw(lower_pose)
+ planner.open_gripper()
+ if res == -1:
+ # print("Failed to lower pose")
+ return res
+
+
+ planner.close()
+ return res
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/lift_peg_upright.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/lift_peg_upright.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8a36cbafef569a406613ce27e27f50ba3aa0bf7
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/lift_peg_upright.py
@@ -0,0 +1,106 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+
+from mani_skill.envs.tasks import LiftPegUprightEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+
+def main():
+ env: LiftPegUprightEnv = gym.make(
+ "LiftPegUpright-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="rgb_array",
+ reward_mode="dense",
+ )
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ print(res[-1])
+ env.close()
+
+def solve(env: LiftPegUprightEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+ FINGER_LENGTH = 0.025
+
+ obb = get_actor_obb(env.peg)
+ approaching = np.array([0, 0, -1])
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy()
+ peg_init_pose = env.peg.pose
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ offset = sapien.Pose([0.10, 0, 0])
+ grasp_pose = grasp_pose * offset
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -0.05])
+ res = planner.move_to_pose_with_screw(reach_pose)
+ if res == -1: return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(grasp_pose)
+ if res == -1: return res
+ planner.close_gripper(gripper_state=-0.6)
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose([0, 0, 0.30]) * grasp_pose
+ res = planner.move_to_pose_with_screw(lift_pose)
+ if res == -1: return res
+
+ # -------------------------------------------------------------------------- #
+ # Place upright
+ # -------------------------------------------------------------------------- #
+ theta = np.pi/10
+ rotation_quat = np.array([np.cos(theta), 0, np.sin(theta), 0])
+
+ final_pose = lift_pose * sapien.Pose(
+ p=[0, 0, 0],
+ q=rotation_quat
+ )
+ res = planner.move_to_pose_with_screw(final_pose)
+ if res == -1: return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose([0, 0, -0.10]) * final_pose
+ res = planner.move_to_pose_with_screw(lower_pose)
+ if res == -1: return res
+
+ planner.close()
+
+ planner.open_gripper()
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/mug_from_coffee_machine.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/mug_from_coffee_machine.py
new file mode 100644
index 0000000000000000000000000000000000000000..60802b52633ab0562fc170111cffb5eda7327615
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/mug_from_coffee_machine.py
@@ -0,0 +1,228 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat,quat2euler
+
+from mani_skill.envs.tasks import PickMugFromCoffeeMachineEnv
+
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PickMugFromCoffeeMachineEnv = gym.make(
+ "PickMugFromCoffeeMachine-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ sim_config=dict(scene_config=dict(enable_pcm=False)),
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PickMugFromCoffeeMachineEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+
+ # rotate the ee for 90 along z axis for panda_wrist_cam
+ if env.robot_uids == "panda_wristcam":
+ init_tcp_pose = env.agent.tcp.pose.sp
+ q_wrist = rotation_conversions.euler_to_quaternion(torch.tensor([np.pi/2, 0, 0]))
+ res = planner.move_to_pose_with_RRTConnect(init_tcp_pose * sapien.Pose([0, 0, 0], q_wrist))
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ FINGER_LENGTH = 0.025
+ MUG_Z = env.mug_extents[2]
+ MUG_D = env.mug_extents[0]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = MUG_Z + RACK_Z + FINGER_LENGTH * 2.5
+ EPS = 1e-2
+
+ def f(self, x, out):
+ # breakpoint()
+ # Set the robot's joint configuration to x.
+ # breakpoint()
+ self.planner.robot.set_qpos(x)
+ # For perfect alignment, the dot product with [0, 0, 1] should be 1.
+ out[0] = self.get_eef_x().dot(np.array([0, 0, 1])) - 1
+
+
+ def j(self, x, out):
+
+ # breakpoint()
+ # Pad the joint configuration.
+ full_qpos = self.planner.pad_move_group_qpos(x)
+ # Compute the Jacobian for the last link in the move group.
+ jac = self.planner.robot.get_pinocchio_model().compute_single_link_jacobian(
+ full_qpos, len(self.planner.move_group_joint_indices) - 1
+ )
+ # Extract the rotational part of the Jacobian.
+ rot_jac = jac[3:, self.planner.move_group_joint_indices]
+ # Compute the derivative of the constraint for each joint.
+ for i in range(len(self.planner.move_group_joint_indices)):
+ out[i] = np.cross(rot_jac[:, i], self.get_eef_x()).dot(np.array([0, 0, 1]))
+
+ obb = get_actor_obb(env.mug)
+ mug_rot = rotation_conversions.quaternion_to_matrix(env.mug.pose.q)
+ mug_euler = rotation_conversions.quaternion_to_euler(env.mug.pose.q).cpu().numpy().astype(np.float32).reshape(-1)
+ x_new = mug_rot[:, 0].cpu().numpy().astype(np.float32).reshape(-1)
+ y_new = mug_rot[:, 1].cpu().numpy().astype(np.float32).reshape(-1)
+ z_new = mug_rot[:, 2].cpu().numpy().astype(np.float32).reshape(-1)
+
+ tip_p, tip_q = env.get_mug_tip_pose()
+ tip_pose = Pose.create_from_pq(p=tip_p, q=tip_q)
+
+ mode = None
+ diff = np.linalg.norm(z_new - np.array([0, 0, 1], dtype=np.float32))
+ if diff < EPS:
+ mode = "on_tip"
+ else:
+ diff = np.linalg.norm(z_new - np.array([0, 0, -1], dtype=np.float32))
+ if diff < EPS:
+ mode = "on_tail"
+ else:
+ mode = "on_side"
+ print(mode)
+
+ p, q = env.get_goal_site_pose()
+ side = int((p[0, 1] > 0)*2-1)
+ print(side)
+
+
+ if mode == "on_tip":
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ elif mode == "on_tail":
+ approaching = np.array([0, side, 0], dtype=np.float32)
+ elif mode == "on_side":
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+
+ if mode == "on_tip":
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, tip_pose.sp.p)
+ grasp_pose = grasp_pose * sapien.Pose([0, 0, -2*MUG_Z+FINGER_LENGTH])
+ elif mode == "on_tail":
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, tip_pose.sp.p - np.array([0, 0, MUG_Z*0.5]))
+ # grasp_pose = grasp_pose * sapien.Pose([MUG_Z*0.2, 0, 0])
+ elif mode == "on_side":
+ pose = env.mug.pose.sp * sapien.Pose([0, 0, -FINGER_LENGTH])
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, pose.p)
+ grasp_pose = grasp_pose * sapien.Pose([0, 0, -FINGER_LENGTH])
+
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ if mode=="on_tip":
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -ENV_Z_OFFSET])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+ elif mode=="on_tail":
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -MUG_Z])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+ # reach_pose = grasp_pose * sapien.Pose([0, 0, -MUG_Z/2])
+ # res = planner.move_to_pose_with_CRRTConnect(reach_pose, f=None, j=None)
+ # if res == -1:
+ # print("Failed to reach pose")
+ # return res
+ elif mode=="on_side":
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -ENV_Z_OFFSET])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(grasp_pose)
+ if res == -1:
+ print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+
+
+ # -------------------------------------------------------------------------- #
+ # Hover next to goalsite (rack pose)
+ # -------------------------------------------------------------------------- #
+ p, q = env.final_site.pose.p, grasp_pose.q
+ p[:, 2] = float(grasp_pose.p[2])
+ pose = Pose.create_from_pq(p=p, q=q)
+
+ if mode == "on_tip":
+ euler = [0, 0, 0]
+ offset = [0, 0, -ENV_Z_OFFSET]
+ elif mode == "on_tail":
+ euler = [0, 0, 0]
+ offset = [0, 0, 0]
+ elif mode == "on_side":
+ euler = [0, 0, 0]
+ offset = [0, 0, -ENV_Z_OFFSET]
+ hover_pose = sapien.Pose(pose.sp.p, grasp_pose.q)
+ res = planner.move_to_pose_with_CRRTConnect(hover_pose, f=None, j=None)
+ if res == -1:
+ print("Failed to lift pose")
+ return res
+
+
+ # -------------------------------------------------------------------------- #
+ # move right & Release
+ # -------------------------------------------------------------------------- #
+ if mode == "on_tip":
+ lower_pose = sapien.Pose(hover_pose.p - [0, 0, RACK_Z*0.9 + MUG_Z], hover_pose.q)
+ elif mode == "on_tail":
+ lower_pose = sapien.Pose(hover_pose.p - [0, 0, hover_pose.p[2]-MUG_Z*0.5], hover_pose.q)
+ elif mode == "on_side":
+ lower_pose = sapien.Pose(hover_pose.p - [0, 0, RACK_Z*0.9], hover_pose.q)
+
+ res = planner.move_to_pose_with_CRRTConnect(lower_pose, f=None, j=None)
+ planner.open_gripper()
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+
+ planner.close()
+ return res
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/mug_on_coffee_machine.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/mug_on_coffee_machine.py
new file mode 100644
index 0000000000000000000000000000000000000000..079b220befdeeb8f9bd4fa6e70b6d77690dc41df
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/mug_on_coffee_machine.py
@@ -0,0 +1,225 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat,quat2euler
+
+from mani_skill.envs.tasks import PlaceMugOnCoffeeMachineEnv
+
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceMugOnCoffeeMachineEnv = gym.make(
+ "PlaceMugOnCoffeeMachine-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ sim_config=dict(scene_config=dict(enable_pcm=False)),
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceMugOnCoffeeMachineEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+
+ # rotate the ee for 90 along z axis for panda_wrist_cam
+ if env.robot_uids == "panda_wristcam":
+ init_tcp_pose = env.agent.tcp.pose.sp
+ q_wrist = rotation_conversions.euler_to_quaternion(torch.tensor([np.pi/2, 0, 0]))
+ res = planner.move_to_pose_with_RRTConnect(init_tcp_pose * sapien.Pose([0, 0, 0], q_wrist))
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ FINGER_LENGTH = 0.025
+ MUG_Z = env.mug_extents[2]
+ MUG_D = env.mug_extents[0]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = MUG_Z + RACK_Z + FINGER_LENGTH * 2.5
+ EPS = 1e-2
+
+ def f(self, x, out):
+ # breakpoint()
+ # Set the robot's joint configuration to x.
+ # breakpoint()
+ self.planner.robot.set_qpos(x)
+ # For perfect alignment, the dot product with [0, 0, 1] should be 1.
+ out[0] = self.get_eef_x().dot(np.array([0, 0, 1])) - 1
+
+
+ def j(self, x, out):
+
+ # breakpoint()
+ # Pad the joint configuration.
+ full_qpos = self.planner.pad_move_group_qpos(x)
+ # Compute the Jacobian for the last link in the move group.
+ jac = self.planner.robot.get_pinocchio_model().compute_single_link_jacobian(
+ full_qpos, len(self.planner.move_group_joint_indices) - 1
+ )
+ # Extract the rotational part of the Jacobian.
+ rot_jac = jac[3:, self.planner.move_group_joint_indices]
+ # Compute the derivative of the constraint for each joint.
+ for i in range(len(self.planner.move_group_joint_indices)):
+ out[i] = np.cross(rot_jac[:, i], self.get_eef_x()).dot(np.array([0, 0, 1]))
+
+ obb = get_actor_obb(env.mug)
+ mug_rot = rotation_conversions.quaternion_to_matrix(env.mug.pose.q)
+ mug_euler = rotation_conversions.quaternion_to_euler(env.mug.pose.q).cpu().numpy().astype(np.float32).reshape(-1)
+ x_new = mug_rot[:, 0].cpu().numpy().astype(np.float32).reshape(-1)
+ y_new = mug_rot[:, 1].cpu().numpy().astype(np.float32).reshape(-1)
+ z_new = mug_rot[:, 2].cpu().numpy().astype(np.float32).reshape(-1)
+
+ tip_p, tip_q = env.get_mug_tip_pose()
+ tip_pose = Pose.create_from_pq(p=tip_p, q=tip_q)
+
+ mode = None
+ diff = np.linalg.norm(z_new - np.array([0, 0, 1], dtype=np.float32))
+ if diff < EPS:
+ mode = "on_tip"
+ else:
+ diff = np.linalg.norm(z_new - np.array([0, 0, -1], dtype=np.float32))
+ if diff < EPS:
+ mode = "on_tail"
+ else:
+ mode = "on_side"
+ print(mode)
+
+
+
+ if mode == "on_tip":
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ elif mode == "on_tail":
+ approaching = np.array([1, 0, 0], dtype=np.float32)
+ elif mode == "on_side":
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+
+ if mode == "on_tip":
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, tip_pose.sp.p)
+ grasp_pose = grasp_pose * sapien.Pose([0, 0, -2*MUG_Z+FINGER_LENGTH])
+ elif mode == "on_tail":
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, tip_pose.sp.p)
+ grasp_pose = grasp_pose * sapien.Pose([-MUG_Z*0.2, 0, 0])
+ elif mode == "on_side":
+ pose = env.mug.pose.sp * sapien.Pose([0, 0, -FINGER_LENGTH])
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, pose.p)
+ grasp_pose = grasp_pose * sapien.Pose([0, 0, -FINGER_LENGTH])
+
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ if mode=="on_tip":
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -ENV_Z_OFFSET])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+ elif mode=="on_tail":
+ reach_pose = grasp_pose * sapien.Pose([MUG_Z*2, 0, -MUG_Z])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+ reach_pose = grasp_pose * sapien.Pose([MUG_Z/2, 0, -MUG_Z])
+ res = planner.move_to_pose_with_screw(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+ elif mode=="on_side":
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -ENV_Z_OFFSET])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ print("Failed to reach pose")
+ return res
+
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(grasp_pose)
+ if res == -1:
+ print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+
+ # -------------------------------------------------------------------------- #
+ # Hover next to goalsite (rack pose)
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ side = int((p[0, 1] > 0)*2-1)
+ print(side)
+ pose = Pose.create_from_pq(p=p, q=q)
+ if mode == "on_tip":
+ euler = [0, 0, 0]
+ offset = [0, 0, -ENV_Z_OFFSET]
+ elif mode == "on_tail":
+ euler = [0, 0, side * np.pi/2]
+ offset = [MUG_Z/2, side*MUG_Z, 0]
+ elif mode == "on_side":
+ euler = [0, 0, 0]
+ offset = [0, 0, -ENV_Z_OFFSET]
+ hover_pose = sapien.Pose(pose.sp.p, grasp_pose.q) *\
+ sapien.Pose(offset, rotation_conversions.euler_to_quaternion(torch.tensor(euler)))
+ res = planner.move_to_pose_with_CRRTConnect(hover_pose)
+ if res == -1:
+ print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # move right & Release
+ # -------------------------------------------------------------------------- #
+ if mode == "on_tip":
+ lower_pose = sapien.Pose(hover_pose.p - [0, 0, RACK_Z*0.9 + MUG_Z], hover_pose.q)
+ elif mode == "on_tail":
+ lower_pose = sapien.Pose(hover_pose.p - [0, -side*MUG_Z, 0], hover_pose.q)
+ elif mode == "on_side":
+ lower_pose = sapien.Pose(hover_pose.p - [0, 0, RACK_Z*0.9], hover_pose.q)
+
+ res = planner.move_to_pose_with_screw(lower_pose)
+ planner.open_gripper()
+ if res == -1:
+ print("Failed to lower pose")
+ return res
+
+
+ planner.close()
+ return res
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/peg_insertion_side.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/peg_insertion_side.py
new file mode 100644
index 0000000000000000000000000000000000000000..cfe632130d0261c0a764f2d58a331dd44d20cfac
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/peg_insertion_side.py
@@ -0,0 +1,99 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+
+from mani_skill.envs.tasks import PegInsertionSideEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import \
+ PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import (
+ compute_grasp_info_by_obb, get_actor_obb)
+
+
+def main():
+ env: PegInsertionSideEnv = gym.make(
+ "PegInsertionSide-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="rgb_array",
+ reward_mode="dense",
+ )
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ print(res[-1])
+ env.close()
+
+
+def solve(env: PegInsertionSideEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+ env = env.unwrapped
+ FINGER_LENGTH = 0.025
+
+ obb = get_actor_obb(env.peg)
+ approaching = np.array([0, 0, -1])
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].numpy()
+
+ peg_init_pose = env.peg.pose
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb, approaching=approaching, target_closing=target_closing, depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ offset = sapien.Pose([-max(0.05, env.peg_half_sizes[0, 0] / 2 + 0.01), 0, 0])
+ grasp_pose = grasp_pose * (offset)
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * (sapien.Pose([0, 0, -0.05]))
+ res = planner.move_to_pose_with_screw(reach_pose)
+ if res == -1: return res
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(grasp_pose)
+ if res == -1: return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Align Peg
+ # -------------------------------------------------------------------------- #
+
+ # align the peg with the hole
+ insert_pose = env.goal_pose * peg_init_pose.inv() * grasp_pose
+ offset = sapien.Pose([-0.01 - env.peg_half_sizes[0, 0], 0, 0])
+ pre_insert_pose = insert_pose * (offset)
+ res = planner.move_to_pose_with_screw(pre_insert_pose)
+ if res == -1: return res
+ # refine the insertion pose
+ for i in range(3):
+ delta_pose = env.goal_pose * (offset) * env.peg.pose.inv()
+ pre_insert_pose = delta_pose * pre_insert_pose
+ res = planner.move_to_pose_with_screw(pre_insert_pose)
+ if res == -1: return res
+
+ # -------------------------------------------------------------------------- #
+ # Insert
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(insert_pose * (sapien.Pose([0.05, 0, 0])))
+ if res == -1: return res
+ planner.close()
+ return res
+
+
+if __name__ == "__main__":
+ main()
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/pick_cube.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/pick_cube.py
new file mode 100644
index 0000000000000000000000000000000000000000..6774b9eb7dd997feb9283a0a9dfe358446fb25c3
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/pick_cube.py
@@ -0,0 +1,76 @@
+import numpy as np
+import sapien
+
+from mani_skill.envs.tasks import PickCubeEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import \
+ PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import (
+ compute_grasp_info_by_obb, get_actor_obb)
+
+def main():
+ env: GraspBowlEnv = gym.make(
+ "GraspBowl-v0",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ #print(res[-1])
+ env.close()
+
+def solve(env: PickCubeEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ )
+
+ FINGER_LENGTH = 0.025
+ env = env.unwrapped
+
+ # retrieves the object oriented bounding box (trimesh box object)
+ obb = get_actor_obb(env.cube)
+
+ approaching = np.array([0, 0, -1])
+ # get transformation matrix of the tcp pose, is default batched and on torch
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy()
+ # we can build a simple grasp pose using this information for Panda
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH,
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.cube.pose.sp.p)
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -0.05])
+ planner.move_to_pose_with_screw(reach_pose)
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ planner.move_to_pose_with_screw(grasp_pose)
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Move to goal pose
+ # -------------------------------------------------------------------------- #
+ goal_pose = sapien.Pose(env.goal_site.pose.sp.p, grasp_pose.q)
+ res = planner.move_to_pose_with_screw(goal_pose)
+
+ planner.close()
+ return res
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/plate_on_rack.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/plate_on_rack.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9ca0e422bcc98553db9ce65ee3e6e308983af90
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/plate_on_rack.py
@@ -0,0 +1,132 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat, quat2euler
+
+from mani_skill.envs.tasks import PlacePlateOnRackEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+
+def main():
+ env: PlacePlateOnRackEnv = gym.make(
+ "PlacePlateOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlacePlateOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ "pd_ee_delta_pose",
+ "pd_ee_delta_pose_vel",
+ ], env.unwrapped.control_mode
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+ FINGER_LENGTH = 0.025
+
+ obb = get_actor_obb(env.plate)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ grasp_pose = sapien.Pose(grasp_pose.p + [-0.1, 0, 0], grasp_pose.q)
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0, 0, 0.2], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ env.render()
+ if res == -1:
+ return res
+ planner.close_gripper(gripper_state=-1)
+ env.render()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose([0, 0, 0.30]) * grasp_pose
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ if res == -1:
+ return res
+
+
+ # -------------------------------------------------------------------------- #
+ # Place on rack
+ # -------------------------------------------------------------------------- #
+ rack_pose = env.rack.pose.sp
+ rack_z_rot = quat2euler(env.rack.pose.sp.q)[2]
+ #make plate vertical to rack
+ goal_pose_q = euler2quat(np.pi/2,np.pi/2,rack_z_rot+np.pi/2)
+ goal_pose = sapien.Pose(rack_pose.p+[0,0,0.3],q = goal_pose_q)
+ place_pose = (
+ goal_pose
+ * env.plate.pose.sp.inv()
+ * lift_pose
+ )
+ res = planner.move_to_pose_with_RRTConnect(place_pose)
+ env.render()
+ #time.sleep(0.1)
+ if res == -1:
+ #print("Failed to place on rack")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(place_pose.p+[0,0,-0.2],place_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ env.render()
+ planner.open_gripper()
+ if res == -1:
+ #print("Failed to lower pose")
+ return res
+
+ # #-------------------------------------------------------------------------- #
+ # # raise
+ # #-------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(sapien.Pose(lower_pose.p+[0,0,-0.03],lower_pose.q))
+ env.render()
+
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/plug_charger.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/plug_charger.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e1806ee72ee1a531816e78082f390a6c0eeef73
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/plug_charger.py
@@ -0,0 +1,105 @@
+import gymnasium as gym
+import numpy as np
+import sapien.core as sapien
+import trimesh
+from tqdm import tqdm
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlugChargerEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import \
+ PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import (
+ compute_grasp_info_by_obb, get_actor_obb)
+
+
+def main():
+ env: PlugChargerEnv = gym.make(
+ "PlugCharger-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="rgb_array",
+ reward_mode="sparse",
+ )
+ for seed in tqdm(range(100)):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ print(res[-1])
+ env.close()
+
+
+def solve(env: PlugChargerEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=False,
+ print_env_info=False,
+ joint_vel_limits=0.5,
+ joint_acc_limits=0.5,
+ )
+
+ FINGER_LENGTH = 0.025
+ env = env.unwrapped
+ charger_base_pose = env.charger_base_pose
+ charger_base_size = np.array(env.unwrapped._base_size) * 2
+
+ obb = trimesh.primitives.Box(
+ extents=charger_base_size,
+ transform=charger_base_pose.sp.to_transformation_matrix(),
+ )
+
+ approaching = np.array([0, 0, -1])
+ target_closing = env.agent.tcp.pose.sp.to_transformation_matrix()[:3, 1]
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH,
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+
+ # add a angle to grasp
+ grasp_angle = np.deg2rad(15)
+ grasp_pose = grasp_pose * sapien.Pose(q=euler2quat(0, grasp_angle, 0))
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -0.05])
+ planner.move_to_pose_with_screw(reach_pose)
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ planner.move_to_pose_with_screw(grasp_pose)
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Align
+ # -------------------------------------------------------------------------- #
+ pre_insert_pose = (
+ env.goal_pose.sp
+ * sapien.Pose([-0.05, 0.0, 0.0])
+ * env.charger.pose.sp.inv()
+ * env.agent.tcp.pose.sp
+ )
+ insert_pose = env.goal_pose.sp * env.charger.pose.sp.inv() * env.agent.tcp.pose.sp
+ planner.move_to_pose_with_screw(pre_insert_pose, refine_steps=0)
+ planner.move_to_pose_with_screw(pre_insert_pose, refine_steps=5)
+ # -------------------------------------------------------------------------- #
+ # Insert
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(insert_pose)
+
+ planner.close()
+ return res
+
+
+if __name__ == "__main__":
+ main()
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/push_cube.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/push_cube.py
new file mode 100644
index 0000000000000000000000000000000000000000..4064015dd2c898ba2398a3f52283bfbd519ba903
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/push_cube.py
@@ -0,0 +1,32 @@
+import numpy as np
+import sapien
+
+from mani_skill.envs.tasks import PushCubeEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import \
+ PandaArmMotionPlanningSolver
+
+def solve(env: PushCubeEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ )
+
+ FINGER_LENGTH = 0.025
+ env = env.unwrapped
+ planner.close_gripper()
+ reach_pose = sapien.Pose(p=env.obj.pose.sp.p + np.array([-0.05, 0, 0]), q=env.agent.tcp.pose.sp.q)
+ planner.move_to_pose_with_screw(reach_pose)
+
+ # -------------------------------------------------------------------------- #
+ # Move to goal pose
+ # -------------------------------------------------------------------------- #
+ goal_pose = sapien.Pose(p=env.goal_region.pose.sp.p + np.array([-0.12, 0, 0]),q=env.agent.tcp.pose.sp.q)
+ res = planner.move_to_pose_with_screw(goal_pose)
+
+ planner.close()
+ return res
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/spoon_on_rack.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/spoon_on_rack.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1b9ae847a86996ddb7d23d89258b12ad7a4c981
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/spoon_on_rack.py
@@ -0,0 +1,127 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import PlaceSpoonOnRackEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+from mani_skill.utils.geometry import rotation_conversions
+from mani_skill.utils.structs.pose import Pose
+
+def main():
+ env: PlaceSpoonOnRackEnv = gym.make(
+ "PlaceSpoonOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ env.close()
+
+def solve(env: PlaceSpoonOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+ #rotate the ee for 90 along z axis for panda_wrist_cam
+ if env.robot_uids == "panda_wristcam":
+ init_tcp_pose = env.agent.tcp.pose.sp
+ q_wrist = rotation_conversions.euler_to_quaternion(torch.tensor([np.pi/2, 0, 0]))
+ res = planner.move_to_pose_with_RRTConnect(init_tcp_pose * sapien.Pose([0, 0, 0], q_wrist))
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+
+ FINGER_LENGTH = 0.025
+ obb = get_actor_obb(env.fork)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, env.fork.pose.sp.p)
+ FORK_Z = env.fork_extents[2]
+ RACK_Z = env.rack_extents[2]
+ ENV_Z_OFFSET = FORK_Z + RACK_Z + FINGER_LENGTH
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -ENV_Z_OFFSET])
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ if res == -1:
+ # print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(grasp_pose)
+ if res == -1:
+ # print("Failed to grasp pose")
+ return res
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_screw(reach_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Hover over goalsite (rack pose)
+ # -------------------------------------------------------------------------- #
+ goal_extents = torch.from_numpy(env.goal_extents)
+ p, q = env.get_goal_site_pose()
+ pose = Pose.create_from_pq(p=p, q=q)
+ hover_pose = sapien.Pose(pose.sp.p, grasp_pose.q) *\
+ sapien.Pose([0, 0, -ENV_Z_OFFSET], rotation_conversions.euler_to_quaternion(torch.tensor([0, np.pi/2, 0])))
+ res = planner.move_to_pose_with_RRTConnect(hover_pose)
+ if res == -1:
+ # print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower & Release
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(hover_pose.p - [0, 0, RACK_Z/2 + FORK_Z], hover_pose.q)
+ res = planner.move_to_pose_with_screw(lower_pose)
+ planner.open_gripper()
+ if res == -1:
+ # print("Failed to lower pose")
+ return res
+
+
+ planner.close()
+ return res
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/stack_bowl.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/stack_bowl.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ffccb9f4cf72b4cea8eb377a906e3c23629a002
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/stack_bowl.py
@@ -0,0 +1,152 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat, quat2euler
+
+from mani_skill.envs.tasks import StackBowlEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+
+def main():
+ env: StackBowlEnv = gym.make(
+ "StackBowl-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ #print(res[-1])
+ env.close()
+
+def solve(env: StackBowlEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ "pd_ee_delta_pose",
+ ], env.unwrapped.control_mode
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+ FINGER_LENGTH = 0.025
+ init_tcp_pose = env.agent.tcp.pose.sp
+
+ #rotate the ee for 90 along z axis for panda_wrist_cam
+ if env.robot_uids == "panda_wristcam":
+ res = planner.move_to_pose_with_RRTConnect(init_tcp_pose * sapien.Pose([0, 0, 0], euler2quat(0, 0, np.pi/2)))
+
+ obb = get_actor_obb(env.bowl)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ grasp_offset = obb.extents[0] * 0.5
+
+ grasp_pose = sapien.Pose(grasp_pose.p + [0, grasp_offset, 0], grasp_pose.q)
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0, 0, 0.3], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+
+ angles = quat2euler(reach_pose.q)
+
+ # Rotate gripper to make it parallel to y axis
+ res = planner.move_to_pose_with_RRTConnect(sapien.Pose(
+ reach_pose.p,
+ euler2quat(angles[0], angles[1], 0)
+ ))
+
+ env.render()
+
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to grasp pose")
+ return res
+ planner.close_gripper(gripper_state=-1)
+ env.render()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose([0, 0, 0.20]) * grasp_pose
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to lift pose")
+ return res
+
+
+ # -------------------------------------------------------------------------- #
+ # Place on bowl2
+ # -------------------------------------------------------------------------- #
+ bowl2_pose = env.bowl2.pose.sp
+
+ place_pose = sapien.Pose(bowl2_pose.p+[0.,grasp_offset,0.2+obb.extents[2]],lift_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(place_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to place on rack")
+ return res
+
+ angles = quat2euler(place_pose.q)
+
+ # Rotate gripper to make it parallel to y axis
+ res = planner.move_to_pose_with_RRTConnect(sapien.Pose(
+ place_pose.p,
+ euler2quat(angles[0], angles[1], 0)
+ ))
+
+ env.render()
+
+ # -------------------------------------------------------------------------- #
+ # Lower
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(place_pose.p+[0,0,-0.2],place_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ env.render()
+ planner.open_gripper()
+ if res == -1:
+ return res
+
+ planner.close()
+
+ env.render()
+
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/stack_cube.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/stack_cube.py
new file mode 100644
index 0000000000000000000000000000000000000000..a45b207bf36f08621457773ef8b3c9eefce38a24
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/stack_cube.py
@@ -0,0 +1,86 @@
+import argparse
+import gymnasium as gym
+import numpy as np
+import sapien
+from transforms3d.euler import euler2quat
+
+from mani_skill.envs.tasks import StackCubeEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import \
+ PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import (
+ compute_grasp_info_by_obb, get_actor_obb)
+from mani_skill.utils.wrappers.record import RecordEpisode
+
+def solve(env: StackCubeEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ )
+ FINGER_LENGTH = 0.025
+ env = env.unwrapped
+ obb = get_actor_obb(env.cubeA)
+
+ approaching = np.array([0, 0, -1])
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].numpy()
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH,
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+
+ # Search a valid pose
+ angles = np.arange(0, np.pi * 2 / 3, np.pi / 2)
+ angles = np.repeat(angles, 2)
+ angles[1::2] *= -1
+ for angle in angles:
+ delta_pose = sapien.Pose(q=euler2quat(0, 0, angle))
+ grasp_pose2 = grasp_pose * delta_pose
+ res = planner.move_to_pose_with_screw(grasp_pose2, dry_run=True)
+ if res == -1:
+ continue
+ grasp_pose = grasp_pose2
+ break
+ else:
+ print("Fail to find a valid grasp pose")
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = grasp_pose * sapien.Pose([0, 0, -0.05])
+ planner.move_to_pose_with_screw(reach_pose)
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ planner.move_to_pose_with_screw(grasp_pose)
+ planner.close_gripper()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose([0, 0, 0.1]) * grasp_pose
+ planner.move_to_pose_with_screw(lift_pose)
+
+ # -------------------------------------------------------------------------- #
+ # Stack
+ # -------------------------------------------------------------------------- #
+ goal_pose = env.cubeB.pose * sapien.Pose([0, 0, env.cube_half_size[2] * 2])
+ offset = (goal_pose.p - env.cubeA.pose.p).numpy()[0] # remember that all data in ManiSkill is batched and a torch tensor
+ align_pose = sapien.Pose(lift_pose.p + offset, lift_pose.q)
+ planner.move_to_pose_with_screw(align_pose)
+
+ res = planner.open_gripper()
+ planner.close()
+ return res
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/stack_plate_on_rack.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/stack_plate_on_rack.py
new file mode 100644
index 0000000000000000000000000000000000000000..6148550a9dd647006b5becd54e0e611975437d33
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/solutions/stack_plate_on_rack.py
@@ -0,0 +1,231 @@
+import gymnasium as gym
+import numpy as np
+import sapien
+import torch
+import time
+from transforms3d.euler import euler2quat, quat2euler
+
+from mani_skill.envs.tasks import StackPlateOnRackEnv
+from mani_skill.examples.motionplanning.panda.motionplanner import PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda.utils import compute_grasp_info_by_obb, get_actor_obb
+
+def main():
+ env: StackPlateOnRackEnv = gym.make(
+ "StackPlateOnRack-v1",
+ obs_mode="none",
+ control_mode="pd_joint_pos",
+ render_mode="human",
+ reward_mode="dense",
+ )
+
+ # Wrap the environment with RecordVideo
+ env = gym.wrappers.RecordVideo(env, video_folder="./videos", episode_trigger=lambda x: True)
+ for seed in range(100):
+ res = solve(env, seed=seed, debug=False, vis=True)
+ #print(res[-1])
+ env.close()
+
+def solve(env: StackPlateOnRackEnv, seed=None, debug=False, vis=False):
+ env.reset(seed=seed)
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=vis,
+ print_env_info=False,
+ joint_vel_limits=0.75,
+ joint_acc_limits=0.75,
+ )
+
+ env = env.unwrapped
+ FINGER_LENGTH = 0.025
+
+ init_arm_pose= env.agent.tcp.pose.sp
+ #print(init_arm_pose)
+ #time.sleep(2)
+
+ obb = get_actor_obb(env.plate)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ ##print(center)
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ #offset = sapien.Pose([0, 0, 0.35])
+ ##print(grasp_pose)
+ grasp_pose = sapien.Pose(grasp_pose.p + [0, -0.09, -0.0199], grasp_pose.q)
+
+ ##print(grasp_pose)
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0, 0, 0.1], grasp_pose.q)
+ #grasp_pose * sapien.Pose([0, 0, -0.2])
+ #print(f"Reach Pose: {reach_pose}")
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ env.render()
+ #time.sleep(0.1)
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ #print(f"Grasp Pose: {grasp_pose}")
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ env.render()
+ #time.sleep(0.1)
+ if res == -1:
+ #print("Failed to grasp pose")
+ return res
+ planner.close_gripper(gripper_state=-1)
+ env.render()
+ #time.sleep(0.1)
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose([0, 0, 0.30]) * grasp_pose
+ #print(f"Lift Pose: {lift_pose}")
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ #time.sleep(0.1)
+ if res == -1:
+ #print("Failed to lift pose")
+ return res
+
+ ##print(env.plate.pose.sp)
+ #print(env.agent.tcp.pose.sp)
+ # -------------------------------------------------------------------------- #
+ # Place on rack
+ # -------------------------------------------------------------------------- #
+ rack_pose = env.rack.pose.sp
+ rotation_quaternion = sapien.Pose([0, 0, 0], euler2quat(-np.pi/2+np.pi/20,0,-np.pi/2))
+ place_pose = (
+ sapien.Pose(rack_pose.p+[-0.147,-0.01,0.3],rotation_quaternion.q)
+ * env.plate.pose.sp.inv()
+ * env.agent.tcp.pose.sp
+ )
+ res = planner.move_to_pose_with_RRTConnect(place_pose)
+ env.render()
+ #time.sleep(0.1)
+ if res == -1:
+ #print("Failed to place on rack")
+ return res
+
+
+ # -------------------------------------------------------------------------- #
+ # Lower
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(place_pose.p+[0,0,-0.2],place_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ env.render()
+ planner.open_gripper()
+ if res == -1:
+ #print("Failed to lower pose")
+ return res
+
+ res = planner.move_to_pose_with_RRTConnect(sapien.Pose(place_pose.p+[0.2,0,0], place_pose.q))
+ env.render()
+ # -------------------------------------------------------------------------- #
+ # Raise and reset the gripper
+ # -------------------------------------------------------------------------- #
+ raise_pose = sapien.Pose(lower_pose.p+[0,0,0.4],[0,1,0,0])
+
+
+ res = planner.move_to_pose_with_RRTConnect(raise_pose)
+ env.render()
+
+
+ # -------------------------------------------------------------------------- #
+ # Plate 2
+
+ obb = get_actor_obb(env.plate1)
+ approaching = np.array([0, 0, -1], dtype=np.float32)
+ target_closing = env.agent.tcp.pose.to_transformation_matrix()[0, :3, 1].cpu().numpy().astype(np.float32)
+
+ grasp_info = compute_grasp_info_by_obb(
+ obb,
+ approaching=approaching,
+ target_closing=target_closing,
+ depth=FINGER_LENGTH
+ )
+ closing, center = grasp_info["closing"], grasp_info["center"]
+ grasp_pose = env.agent.build_grasp_pose(approaching, closing, center)
+ grasp_pose = sapien.Pose(grasp_pose.p + [0, -0.09, -0.0199], grasp_pose.q)
+
+ # -------------------------------------------------------------------------- #
+ # Reach
+ # -------------------------------------------------------------------------- #
+ reach_pose = sapien.Pose(grasp_pose.p + [0, 0, 0.1], grasp_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(reach_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to reach pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Grasp
+ # -------------------------------------------------------------------------- #
+ res = planner.move_to_pose_with_RRTConnect(grasp_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to grasp pose")
+ return res
+ planner.close_gripper(gripper_state=-1)
+ env.render()
+
+ # -------------------------------------------------------------------------- #
+ # Lift
+ # -------------------------------------------------------------------------- #
+ lift_pose = sapien.Pose([0, 0, 0.30]) * grasp_pose
+ res = planner.move_to_pose_with_RRTConnect(lift_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to lift pose")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Place on rack
+ # -------------------------------------------------------------------------- #
+ rack_pose = env.rack.pose.sp
+ rotation_quaternion = sapien.Pose([0, 0, 0], euler2quat(-np.pi/2+np.pi/30,0,-np.pi/2))
+ place_pose = (
+ sapien.Pose(rack_pose.p+[-0.120,-0.01,0.3],rotation_quaternion.q)
+ * env.plate1.pose.sp.inv()
+ * env.agent.tcp.pose.sp
+ )
+ res = planner.move_to_pose_with_RRTConnect(place_pose)
+ env.render()
+ if res == -1:
+ #print("Failed to place on rack")
+ return res
+
+ # -------------------------------------------------------------------------- #
+ # Lower
+ # -------------------------------------------------------------------------- #
+ lower_pose = sapien.Pose(place_pose.p+[0,0,-0.20],place_pose.q)
+ res = planner.move_to_pose_with_RRTConnect(lower_pose)
+ env.render()
+ planner.open_gripper()
+ if res == -1:
+ #print("Failed to lower pose")
+ return res
+
+ return res
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/utils.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc65734a821fe14f251ec9a959a72decac5d4c69
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda/utils.py
@@ -0,0 +1,90 @@
+import numpy as np
+import sapien
+import sapien.physx as physx
+import sapien.render
+import trimesh
+from transforms3d import quaternions
+from mani_skill.utils.structs import Actor
+from mani_skill.utils import common
+from mani_skill.utils.geometry.trimesh_utils import get_component_mesh
+
+
+def get_actor_obb(actor: Actor, to_world_frame=True, vis=False):
+ mesh = get_component_mesh(
+ actor._objs[0].find_component_by_type(physx.PhysxRigidDynamicComponent),
+ to_world_frame=to_world_frame,
+ )
+ assert mesh is not None, "can not get actor mesh for {}".format(actor)
+
+ obb: trimesh.primitives.Box = mesh.bounding_box_oriented
+
+ if vis:
+ obb.visual.vertex_colors = (255, 0, 0, 10)
+ trimesh.Scene([mesh, obb]).show()
+
+ return obb
+
+
+def compute_grasp_info_by_obb(
+ obb: trimesh.primitives.Box,
+ approaching=(0, 0, -1),
+ target_closing=None,
+ depth=0.0,
+ ortho=True,
+):
+ """Compute grasp info given an oriented bounding box.
+ The grasp info includes axes to define grasp frame, namely approaching, closing, orthogonal directions and center.
+
+ Args:
+ obb: oriented bounding box to grasp
+ approaching: direction to approach the object
+ target_closing: target closing direction, used to select one of multiple solutions
+ depth: displacement from hand to tcp along the approaching vector. Usually finger length.
+ ortho: whether to orthogonalize closing w.r.t. approaching.
+ """
+ # NOTE(jigu): DO NOT USE `x.extents`, which is inconsistent with `x.primitive.transform`!
+ extents = np.array(obb.primitive.extents)
+ T = np.array(obb.primitive.transform)
+
+ # Assume normalized
+ approaching = np.array(approaching)
+
+ # Find the axis closest to approaching vector
+ angles = approaching @ T[:3, :3] # [3]
+ inds0 = np.argsort(np.abs(angles))
+ ind0 = inds0[-1]
+
+ # Find the shorter axis as closing vector
+ inds1 = np.argsort(extents[inds0[0:-1]])
+ ind1 = inds0[0:-1][inds1[0]]
+ ind2 = inds0[0:-1][inds1[1]]
+
+ # If sizes are close, choose the one closest to the target closing
+ if target_closing is not None and 0.99 < (extents[ind1] / extents[ind2]) < 1.01:
+ vec1 = T[:3, ind1]
+ vec2 = T[:3, ind2]
+ if np.abs(target_closing @ vec1) < np.abs(target_closing @ vec2):
+ ind1 = inds0[0:-1][inds1[1]]
+ ind2 = inds0[0:-1][inds1[0]]
+ closing = T[:3, ind1]
+
+ # Flip if far from target
+ if target_closing is not None and target_closing @ closing < 0:
+ closing = -closing
+
+ # Reorder extents
+ extents = extents[[ind0, ind1, ind2]]
+
+ # Find the origin on the surface
+ center = T[:3, 3].copy()
+ half_size = extents[0] * 0.5
+ center = center + approaching * (-half_size + min(depth, half_size))
+
+ if ortho:
+ closing = closing - (approaching @ closing) * approaching
+ closing = common.np_normalize_vector(closing)
+
+ grasp_info = dict(
+ approaching=approaching, closing=closing, center=center, extents=extents
+ )
+ return grasp_info
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda_stick/__init__.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda_stick/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda_stick/motionplanner.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda_stick/motionplanner.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd4de25d541d467bbbc50cb6ebbd7712e7277f62
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/motionplanning/panda_stick/motionplanner.py
@@ -0,0 +1,162 @@
+import mplib
+import numpy as np
+import sapien
+import trimesh
+
+from mani_skill.agents.base_agent import BaseAgent
+from mani_skill.envs.sapien_env import BaseEnv
+from mani_skill.envs.scene import ManiSkillScene
+from mani_skill.utils.structs.pose import to_sapien_pose
+
+class PandaStickMotionPlanningSolver:
+ def __init__(
+ self,
+ env: BaseEnv,
+ debug: bool = False,
+ vis: bool = True,
+ base_pose: sapien.Pose = None, # TODO mplib doesn't support robot base being anywhere but 0
+ visualize_target_grasp_pose: bool = True,
+ print_env_info: bool = True,
+ joint_vel_limits=0.9,
+ joint_acc_limits=0.9,
+ ):
+ self.env = env
+ self.base_env: BaseEnv = env.unwrapped
+ self.env_agent: BaseAgent = self.base_env.agent
+ self.robot = self.env_agent.robot
+ self.joint_vel_limits = joint_vel_limits
+ self.joint_acc_limits = joint_acc_limits
+
+ self.base_pose = to_sapien_pose(base_pose)
+
+ self.planner = self.setup_planner()
+ self.control_mode = self.base_env.control_mode
+
+ self.debug = debug
+ self.vis = vis
+ self.print_env_info = print_env_info
+ self.visualize_target_grasp_pose = visualize_target_grasp_pose
+ self.elapsed_steps = 0
+
+ self.use_point_cloud = False
+ self.collision_pts_changed = False
+ self.all_collision_pts = None
+
+ def render_wait(self):
+ if not self.vis or not self.debug:
+ return
+ print("Press [c] to continue")
+ viewer = self.base_env.render_human()
+ while True:
+ if viewer.window.key_down("c"):
+ break
+ self.base_env.render_human()
+
+ def setup_planner(self):
+ link_names = [link.get_name() for link in self.robot.get_links()]
+ joint_names = [joint.get_name() for joint in self.robot.get_active_joints()]
+ planner = mplib.Planner(
+ urdf=self.env_agent.urdf_path,
+ srdf=self.env_agent.urdf_path.replace(".urdf", ".srdf"),
+ user_link_names=link_names,
+ user_joint_names=joint_names,
+ move_group="panda_hand_tcp",
+ joint_vel_limits=np.ones(7) * self.joint_vel_limits,
+ joint_acc_limits=np.ones(7) * self.joint_acc_limits,
+ )
+ planner.set_base_pose(np.hstack([self.base_pose.p, self.base_pose.q]))
+ return planner
+
+ def follow_path(self, result, refine_steps: int = 0):
+ n_step = result["position"].shape[0]
+ for i in range(n_step + refine_steps):
+ qpos = result["position"][min(i, n_step - 1)]
+ if self.control_mode == "pd_joint_pos_vel":
+ qvel = result["velocity"][min(i, n_step - 1)]
+ action = np.hstack([qpos, qvel])
+ else:
+ action = np.hstack([qpos])
+ obs, reward, terminated, truncated, info = self.env.step(action)
+ self.elapsed_steps += 1
+ if self.print_env_info:
+ print(
+ f"[{self.elapsed_steps:3}] Env Output: reward={reward} info={info}"
+ )
+ if self.vis:
+ self.base_env.render_human()
+ return obs, reward, terminated, truncated, info
+
+ def move_to_pose_with_RRTConnect(
+ self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0
+ ):
+ pose = to_sapien_pose(pose)
+ if self.grasp_pose_visual is not None:
+ self.grasp_pose_visual.set_pose(pose)
+ pose = sapien.Pose(p=pose.p, q=pose.q)
+ result = self.planner.plan_qpos_to_pose(
+ np.concatenate([pose.p, pose.q]),
+ self.robot.get_qpos().cpu().numpy()[0],
+ time_step=self.base_env.control_timestep,
+ use_point_cloud=self.use_point_cloud,
+ wrt_world=True,
+ )
+ if result["status"] != "Success":
+ print(result["status"])
+ self.render_wait()
+ return -1
+ self.render_wait()
+ if dry_run:
+ return result
+ return self.follow_path(result, refine_steps=refine_steps)
+
+ def move_to_pose_with_screw(
+ self, pose: sapien.Pose, dry_run: bool = False, refine_steps: int = 0
+ ):
+ pose = to_sapien_pose(pose)
+ # try screw two times before giving up
+ pose = sapien.Pose(p=pose.p , q=pose.q)
+ result = self.planner.plan_screw(
+ np.concatenate([pose.p, pose.q]),
+ self.robot.get_qpos().cpu().numpy()[0],
+ time_step=self.base_env.control_timestep,
+ use_point_cloud=self.use_point_cloud,
+ )
+ if result["status"] != "Success":
+ result = self.planner.plan_screw(
+ np.concatenate([pose.p, pose.q]),
+ self.robot.get_qpos().cpu().numpy()[0],
+ time_step=self.base_env.control_timestep,
+ use_point_cloud=self.use_point_cloud,
+ )
+ if result["status"] != "Success":
+ print(result["status"])
+ self.render_wait()
+ return -1
+ self.render_wait()
+ if dry_run:
+ return result
+ return self.follow_path(result, refine_steps=refine_steps)
+
+ def add_box_collision(self, extents: np.ndarray, pose: sapien.Pose):
+ self.use_point_cloud = True
+ box = trimesh.creation.box(extents, transform=pose.to_transformation_matrix())
+ pts, _ = trimesh.sample.sample_surface(box, 256)
+ if self.all_collision_pts is None:
+ self.all_collision_pts = pts
+ else:
+ self.all_collision_pts = np.vstack([self.all_collision_pts, pts])
+ self.planner.update_point_cloud(self.all_collision_pts)
+
+ def add_collision_pts(self, pts: np.ndarray):
+ if self.all_collision_pts is None:
+ self.all_collision_pts = pts
+ else:
+ self.all_collision_pts = np.vstack([self.all_collision_pts, pts])
+ self.planner.update_point_cloud(self.all_collision_pts)
+
+ def clear_collisions(self):
+ self.all_collision_pts = None
+ self.use_point_cloud = False
+
+ def close(self):
+ pass
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/teleoperation/__init__.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/teleoperation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/teleoperation/interactive_noahbiarm.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/teleoperation/interactive_noahbiarm.py
new file mode 100644
index 0000000000000000000000000000000000000000..5c7fa74cb8f4c9a256d191fc392c13ea7b7f67b8
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/teleoperation/interactive_noahbiarm.py
@@ -0,0 +1,238 @@
+import argparse
+from ast import parse
+from typing import Annotated
+import gymnasium as gym
+import numpy as np
+import sapien.core as sapien
+from mani_skill.envs.sapien_env import BaseEnv
+
+from mani_skill.examples.motionplanning.noahbiarm.motionplanner import \
+ NoahBiArmMotionPlanningSolver
+
+import sapien.utils.viewer
+import h5py
+import json
+import mani_skill.trajectory.utils as trajectory_utils
+from mani_skill.utils import sapien_utils
+from mani_skill.utils.wrappers.record import RecordEpisode
+import tyro
+from dataclasses import dataclass
+
+@dataclass
+class Args:
+ env_id: Annotated[str, tyro.conf.arg(aliases=["-e"])] = "PickCube-v1"
+ obs_mode: str = "none"
+ robot_uid: Annotated[str, tyro.conf.arg(aliases=["-r"])] = "noahbiarm_r"
+ """The robot to use. Robot setups supported for teleop in this script are panda and panda_stick"""
+ record_dir: str = "demos"
+ """directory to record the demonstration data and optionally videos"""
+ save_video: bool = False
+ """whether to save the videos of the demonstrations after collecting them all"""
+ viewer_shader: str = "rt-fast"
+ """the shader to use for the viewer. 'default' is fast but lower-quality shader, 'rt' and 'rt-fast' are the ray tracing shaders"""
+ video_saving_shader: str = "rt-fast"
+ """the shader to use for the videos of the demonstrations. 'minimal' is the fast shader, 'rt' and 'rt-fast' are the ray tracing shaders"""
+
+def parse_args() -> Args:
+ return tyro.cli(Args)
+
+def main(args: Args):
+ output_dir = f"{args.record_dir}/{args.env_id}/teleop/"
+ env = gym.make(
+ args.env_id,
+ obs_mode=args.obs_mode,
+ control_mode="pd_joint_pos",
+ render_mode="rgb_array",
+ reward_mode="none",
+ enable_shadow=True,
+ viewer_camera_configs=dict(shader_pack=args.viewer_shader),
+ robot_uids="noahbiarm_r"
+ )
+ env = RecordEpisode(
+ env,
+ output_dir=output_dir,
+ trajectory_name="trajectory",
+ save_video=False,
+ info_on_video=False,
+ source_type="teleoperation",
+ source_desc="teleoperation via the click+drag system"
+ )
+ num_trajs = 0
+ seed = 0
+ env.reset(seed=seed)
+ while True:
+ print(f"Collecting trajectory {num_trajs+1}, seed={seed}")
+ code = solve(env, debug=False, vis=True)
+ if code == "quit":
+ num_trajs += 1
+ break
+ elif code == "continue":
+ seed += 1
+ num_trajs += 1
+ env.reset(seed=seed)
+ continue
+ elif code == "restart":
+ env.reset(seed=seed, options=dict(save_trajectory=False))
+ h5_file_path = env._h5_file.filename
+ json_file_path = env._json_path
+ env.close()
+ del env
+ print(f"Trajectories saved to {h5_file_path}")
+ if args.save_video:
+ print(f"Saving videos to {output_dir}")
+
+ trajectory_data = h5py.File(h5_file_path)
+ with open(json_file_path, "r") as f:
+ json_data = json.load(f)
+ env = gym.make(
+ args.env_id,
+ obs_mode=args.obs_mode,
+ control_mode="pd_joint_pos",
+ render_mode="rgb_array",
+ reward_mode="none",
+ human_render_camera_configs=dict(shader_pack=args.video_saving_shader),
+ )
+ env = RecordEpisode(
+ env,
+ output_dir=output_dir,
+ trajectory_name="trajectory",
+ save_video=True,
+ info_on_video=False,
+ save_trajectory=False,
+ video_fps=30
+ )
+ for episode in json_data["episodes"]:
+ traj_id = f"traj_{episode['episode_id']}"
+ data = trajectory_data[traj_id]
+ env.reset(**episode["reset_kwargs"])
+ env_states_list = trajectory_utils.dict_to_list_of_dicts(data["env_states"])
+
+ env.base_env.set_state_dict(env_states_list[0])
+ for action in np.array(data["actions"]):
+ env.step(action)
+
+ trajectory_data.close()
+ env.close()
+ del env
+
+
+
+def solve(env: BaseEnv, debug=False, vis=False):
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+ robot_has_gripper = False
+ planner = NoahBiArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=False,
+ print_env_info=False,
+ joint_acc_limits=0.5,
+ joint_vel_limits=0.5,
+ )
+ viewer = env.render_human()
+
+ last_checkpoint_state = None
+ gripper_open = True
+ def select_panda_hand():
+ viewer.select_entity(sapien_utils.get_obj_by_name(env.agent.robot.links, "Right_Link_Gripper_Down")._objs[0].entity)
+ select_panda_hand()
+ for plugin in viewer.plugins:
+ if isinstance(plugin, sapien.utils.viewer.viewer.TransformWindow):
+ transform_window = plugin
+ while True:
+
+ transform_window.enabled = True
+ # transform_window.update_ghost_objects
+ # print(transform_window.ghost_objects, transform_window._gizmo_pose)
+ # planner.grasp_pose_visual.set_pose(transform_window._gizmo_pose)
+
+ env.render_human()
+ execute_current_pose = False
+ if viewer.window.key_press("h"):
+ print("""Available commands:
+ h: print this help menu
+ g: toggle gripper to close/open (if there is a gripper)
+ u: move the panda hand up
+ j: move the panda hand down
+ arrow_keys: move the panda hand in the direction of the arrow keys
+ n: execute command via motion planning to make the robot move to the target pose indicated by the ghost panda arm
+ c: stop this episode and record the trajectory and move on to a new episode
+ q: quit the script and stop collecting data. Save trajectories and optionally videos.
+ """)
+ pass
+ # elif viewer.window.key_press("k"):
+ # print("Saving checkpoint")
+ # last_checkpoint_state = env.get_state_dict()
+ # elif viewer.window.key_press("l"):
+ # if last_checkpoint_state is not None:
+ # print("Loading previous checkpoint")
+ # env.set_state_dict(last_checkpoint_state)
+ # else:
+ # print("Could not find previous checkpoint")
+ elif viewer.window.key_press("q"):
+ return "quit"
+ elif viewer.window.key_press("c"):
+ return "continue"
+ # elif viewer.window.key_press("r"):
+ # viewer.select_entity(None)
+ # return "restart"
+ # elif viewer.window.key_press("t"):
+ # # TODO (stao): change from position transform to rotation transform
+ # pass
+ elif viewer.window.key_press("n"):
+ execute_current_pose = True
+ elif viewer.window.key_press("g") and robot_has_gripper:
+ if gripper_open:
+ gripper_open = False
+ _, reward, _ ,_, info = planner.close_gripper()
+ else:
+ gripper_open = True
+ _, reward, _ ,_, info = planner.open_gripper()
+ print(f"Reward: {reward}, Info: {info}")
+ elif viewer.window.key_press("u"):
+ select_panda_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, 0, -0.01])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ elif viewer.window.key_press("j"):
+ select_panda_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, 0, +0.01])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ elif viewer.window.key_press("down"):
+ select_panda_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[+0.01, 0, 0])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ elif viewer.window.key_press("up"):
+ select_panda_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[-0.01, 0, 0])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ elif viewer.window.key_press("right"):
+ select_panda_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, -0.01, 0])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ elif viewer.window.key_press("left"):
+ select_panda_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, +0.01, 0])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ if execute_current_pose:
+ # z-offset of end-effector gizmo to TCP position is hardcoded for the panda robot here
+ if env.unwrapped.robot_uids == "panda" or env.unwrapped.robot_uids == "panda_wristcam":
+ result = planner.move_to_pose_with_screw(transform_window._gizmo_pose * sapien.Pose([0, 0, 0.1]), dry_run=True)
+ elif env.unwrapped.robot_uids == "panda_stick":
+ result = planner.move_to_pose_with_screw(transform_window._gizmo_pose * sapien.Pose([0, 0, 0.15]), dry_run=True)
+ if result != -1 and len(result["position"]) < 150:
+ _, reward, _ ,_, info = planner.follow_path(result)
+ print(f"Reward: {reward}, Info: {info}")
+ else:
+ if result == -1: print("Plan failed")
+ else: print("Generated motion plan was too long. Try a closer sub-goal")
+ execute_current_pose = False
+
+
+
+ return args
+if __name__ == "__main__":
+ main(parse_args())
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/teleoperation/interactive_panda.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/teleoperation/interactive_panda.py
new file mode 100644
index 0000000000000000000000000000000000000000..00fffdd21d87012f962ebd2c9fe848a21a18a12c
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/teleoperation/interactive_panda.py
@@ -0,0 +1,251 @@
+import argparse
+from ast import parse
+from typing import Annotated
+import gymnasium as gym
+import numpy as np
+import sapien.core as sapien
+from mani_skill.envs.sapien_env import BaseEnv
+
+from mani_skill.examples.motionplanning.panda.motionplanner import \
+ PandaArmMotionPlanningSolver
+from mani_skill.examples.motionplanning.panda_stick.motionplanner import \
+ PandaStickMotionPlanningSolver
+import sapien.utils.viewer
+import h5py
+import json
+import mani_skill.trajectory.utils as trajectory_utils
+from mani_skill.utils import sapien_utils
+from mani_skill.utils.wrappers.record import RecordEpisode
+import tyro
+from dataclasses import dataclass
+
+@dataclass
+class Args:
+ env_id: Annotated[str, tyro.conf.arg(aliases=["-e"])] = "PickCube-v1"
+ obs_mode: str = "none"
+ robot_uid: Annotated[str, tyro.conf.arg(aliases=["-r"])] = "panda"
+ """The robot to use. Robot setups supported for teleop in this script are panda and panda_stick"""
+ record_dir: str = "demos"
+ """directory to record the demonstration data and optionally videos"""
+ save_video: bool = False
+ """whether to save the videos of the demonstrations after collecting them all"""
+ viewer_shader: str = "rt-fast"
+ """the shader to use for the viewer. 'default' is fast but lower-quality shader, 'rt' and 'rt-fast' are the ray tracing shaders"""
+ video_saving_shader: str = "rt-fast"
+ """the shader to use for the videos of the demonstrations. 'minimal' is the fast shader, 'rt' and 'rt-fast' are the ray tracing shaders"""
+
+def parse_args() -> Args:
+ return tyro.cli(Args)
+
+def main(args: Args):
+ output_dir = f"{args.record_dir}/{args.env_id}/teleop/"
+ env = gym.make(
+ args.env_id,
+ obs_mode=args.obs_mode,
+ control_mode="pd_joint_pos",
+ render_mode="rgb_array",
+ reward_mode="none",
+ enable_shadow=True,
+ viewer_camera_configs=dict(shader_pack=args.viewer_shader)
+ )
+ env = RecordEpisode(
+ env,
+ output_dir=output_dir,
+ trajectory_name="trajectory",
+ save_video=False,
+ info_on_video=False,
+ source_type="teleoperation",
+ source_desc="teleoperation via the click+drag system"
+ )
+ num_trajs = 0
+ seed = 0
+ env.reset(seed=seed)
+ while True:
+ print(f"Collecting trajectory {num_trajs+1}, seed={seed}")
+ code = solve(env, debug=False, vis=True)
+ if code == "quit":
+ num_trajs += 1
+ break
+ elif code == "continue":
+ seed += 1
+ num_trajs += 1
+ env.reset(seed=seed)
+ continue
+ elif code == "restart":
+ env.reset(seed=seed, options=dict(save_trajectory=False))
+ h5_file_path = env._h5_file.filename
+ json_file_path = env._json_path
+ env.close()
+ del env
+ print(f"Trajectories saved to {h5_file_path}")
+ if args.save_video:
+ print(f"Saving videos to {output_dir}")
+
+ trajectory_data = h5py.File(h5_file_path)
+ with open(json_file_path, "r") as f:
+ json_data = json.load(f)
+ env = gym.make(
+ args.env_id,
+ obs_mode=args.obs_mode,
+ control_mode="pd_joint_pos",
+ render_mode="rgb_array",
+ reward_mode="none",
+ human_render_camera_configs=dict(shader_pack=args.video_saving_shader),
+ )
+ env = RecordEpisode(
+ env,
+ output_dir=output_dir,
+ trajectory_name="trajectory",
+ save_video=True,
+ info_on_video=False,
+ save_trajectory=False,
+ video_fps=30
+ )
+ for episode in json_data["episodes"]:
+ traj_id = f"traj_{episode['episode_id']}"
+ data = trajectory_data[traj_id]
+ env.reset(**episode["reset_kwargs"])
+ env_states_list = trajectory_utils.dict_to_list_of_dicts(data["env_states"])
+
+ env.base_env.set_state_dict(env_states_list[0])
+ for action in np.array(data["actions"]):
+ env.step(action)
+
+ trajectory_data.close()
+ env.close()
+ del env
+
+
+
+def solve(env: BaseEnv, debug=False, vis=False):
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+ robot_has_gripper = False
+ if env.unwrapped.robot_uids == "panda_stick":
+ planner = PandaStickMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=False,
+ print_env_info=False,
+ joint_acc_limits=0.5,
+ joint_vel_limits=0.5,
+ )
+ elif env.unwrapped.robot_uids == "panda" or env.unwrapped.robot_uids == "panda_wristcam":
+ robot_has_gripper = True
+ planner = PandaArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=False,
+ print_env_info=False,
+ joint_acc_limits=0.5,
+ joint_vel_limits=0.5,
+ )
+ viewer = env.render_human()
+
+ last_checkpoint_state = None
+ gripper_open = True
+ def select_panda_hand():
+ viewer.select_entity(sapien_utils.get_obj_by_name(env.agent.robot.links, "panda_hand")._objs[0].entity)
+ select_panda_hand()
+ for plugin in viewer.plugins:
+ if isinstance(plugin, sapien.utils.viewer.viewer.TransformWindow):
+ transform_window = plugin
+ while True:
+
+ transform_window.enabled = True
+ # transform_window.update_ghost_objects
+ # print(transform_window.ghost_objects, transform_window._gizmo_pose)
+ # planner.grasp_pose_visual.set_pose(transform_window._gizmo_pose)
+
+ env.render_human()
+ execute_current_pose = False
+ if viewer.window.key_press("h"):
+ print("""Available commands:
+ h: print this help menu
+ g: toggle gripper to close/open (if there is a gripper)
+ u: move the panda hand up
+ j: move the panda hand down
+ arrow_keys: move the panda hand in the direction of the arrow keys
+ n: execute command via motion planning to make the robot move to the target pose indicated by the ghost panda arm
+ c: stop this episode and record the trajectory and move on to a new episode
+ q: quit the script and stop collecting data. Save trajectories and optionally videos.
+ """)
+ pass
+ # elif viewer.window.key_press("k"):
+ # print("Saving checkpoint")
+ # last_checkpoint_state = env.get_state_dict()
+ # elif viewer.window.key_press("l"):
+ # if last_checkpoint_state is not None:
+ # print("Loading previous checkpoint")
+ # env.set_state_dict(last_checkpoint_state)
+ # else:
+ # print("Could not find previous checkpoint")
+ elif viewer.window.key_press("q"):
+ return "quit"
+ elif viewer.window.key_press("c"):
+ return "continue"
+ # elif viewer.window.key_press("r"):
+ # viewer.select_entity(None)
+ # return "restart"
+ # elif viewer.window.key_press("t"):
+ # # TODO (stao): change from position transform to rotation transform
+ # pass
+ elif viewer.window.key_press("n"):
+ execute_current_pose = True
+ elif viewer.window.key_press("g") and robot_has_gripper:
+ if gripper_open:
+ gripper_open = False
+ _, reward, _ ,_, info = planner.close_gripper()
+ else:
+ gripper_open = True
+ _, reward, _ ,_, info = planner.open_gripper()
+ print(f"Reward: {reward}, Info: {info}")
+ elif viewer.window.key_press("u"):
+ select_panda_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, 0, -0.01])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ elif viewer.window.key_press("j"):
+ select_panda_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, 0, +0.01])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ elif viewer.window.key_press("down"):
+ select_panda_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[+0.01, 0, 0])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ elif viewer.window.key_press("up"):
+ select_panda_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[-0.01, 0, 0])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ elif viewer.window.key_press("right"):
+ select_panda_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, -0.01, 0])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ elif viewer.window.key_press("left"):
+ select_panda_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, +0.01, 0])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ if execute_current_pose:
+ # z-offset of end-effector gizmo to TCP position is hardcoded for the panda robot here
+ if env.unwrapped.robot_uids == "panda" or env.unwrapped.robot_uids == "panda_wristcam":
+ result = planner.move_to_pose_with_screw(transform_window._gizmo_pose * sapien.Pose([0, 0, 0.1]), dry_run=True)
+ elif env.unwrapped.robot_uids == "panda_stick":
+ result = planner.move_to_pose_with_screw(transform_window._gizmo_pose * sapien.Pose([0, 0, 0.15]), dry_run=True)
+ if result != -1 and len(result["position"]) < 150:
+ _, reward, _ ,_, info = planner.follow_path(result)
+ print(f"Reward: {reward}, Info: {info}")
+ else:
+ if result == -1: print("Plan failed")
+ else: print("Generated motion plan was too long. Try a closer sub-goal")
+ execute_current_pose = False
+
+
+
+ return args
+if __name__ == "__main__":
+ main(parse_args())
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/teleoperation/interactive_piper.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/teleoperation/interactive_piper.py
new file mode 100644
index 0000000000000000000000000000000000000000..0dcea48b209538ca9f35d590b1ddda1e876669cb
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/examples/teleoperation/interactive_piper.py
@@ -0,0 +1,206 @@
+import argparse
+from typing import Annotated
+import gymnasium as gym
+import numpy as np
+import sapien.core as sapien
+from mani_skill.envs.sapien_env import BaseEnv
+
+from mani_skill.examples.motionplanning.agilex.motionplanner import PiperArmMotionPlanningSolver
+import sapien.utils.viewer
+import h5py
+import json
+import mani_skill.trajectory.utils as trajectory_utils
+from mani_skill.utils import sapien_utils
+from mani_skill.utils.wrappers.record import RecordEpisode
+import tyro
+from dataclasses import dataclass
+
+@dataclass
+class Args:
+ env_id: Annotated[str, tyro.conf.arg(aliases=["-e"])] = "PickCube-v1"
+ obs_mode: str = "none"
+ robot_uid: Annotated[str, tyro.conf.arg(aliases=["-r"])] = "piper"
+ """The robot to use. Robot setups supported for teleop in this script are piper"""
+ record_dir: str = "demos"
+ """directory to record the demonstration data and optionally videos"""
+ save_video: bool = False
+ """whether to save the videos of the demonstrations after collecting them all"""
+ viewer_shader: str = "rt-fast"
+ """the shader to use for the viewer. 'default' is fast but lower-quality shader, 'rt' and 'rt-fast' are the ray tracing shaders"""
+ video_saving_shader: str = "rt-fast"
+ """the shader to use for the videos of the demonstrations. 'minimal' is the fast shader, 'rt' and 'rt-fast' are the ray tracing shaders"""
+
+def parse_args() -> Args:
+ return tyro.cli(Args)
+
+def main(args: Args):
+ output_dir = f"{args.record_dir}/{args.env_id}/teleop/"
+ env = gym.make(
+ args.env_id,
+ obs_mode=args.obs_mode,
+ control_mode="pd_joint_pos",
+ render_mode="rgb_array",
+ reward_mode="none",
+ enable_shadow=True,
+ viewer_camera_configs=dict(shader_pack=args.viewer_shader)
+ )
+ env = RecordEpisode(
+ env,
+ output_dir=output_dir,
+ trajectory_name="trajectory",
+ save_video=False,
+ info_on_video=False,
+ source_type="teleoperation",
+ source_desc="teleoperation via the click+drag system"
+ )
+ num_trajs = 0
+ seed = 0
+ env.reset(seed=seed)
+ while True:
+ print(f"Collecting trajectory {num_trajs+1}, seed={seed}")
+ code = solve(env, debug=False, vis=True)
+ if code == "quit":
+ num_trajs += 1
+ break
+ elif code == "continue":
+ seed += 1
+ num_trajs += 1
+ env.reset(seed=seed)
+ continue
+ elif code == "restart":
+ env.reset(seed=seed, options=dict(save_trajectory=False))
+ h5_file_path = env._h5_file.filename
+ json_file_path = env._json_path
+ env.close()
+ del env
+ print(f"Trajectories saved to {h5_file_path}")
+ if args.save_video:
+ print(f"Saving videos to {output_dir}")
+
+ trajectory_data = h5py.File(h5_file_path)
+ with open(json_file_path, "r") as f:
+ json_data = json.load(f)
+ env = gym.make(
+ args.env_id,
+ obs_mode=args.obs_mode,
+ control_mode="pd_joint_pos",
+ render_mode="rgb_array",
+ reward_mode="none",
+ human_render_camera_configs=dict(shader_pack=args.video_saving_shader),
+ )
+ env = RecordEpisode(
+ env,
+ output_dir=output_dir,
+ trajectory_name="trajectory",
+ save_video=True,
+ info_on_video=False,
+ save_trajectory=False,
+ video_fps=30
+ )
+ for episode in json_data["episodes"]:
+ traj_id = f"traj_{episode['episode_id']}"
+ data = trajectory_data[traj_id]
+ env.reset(**episode["reset_kwargs"])
+ env_states_list = trajectory_utils.dict_to_list_of_dicts(data["env_states"])
+
+ env.base_env.set_state_dict(env_states_list[0])
+ for action in np.array(data["actions"]):
+ env.step(action)
+
+ trajectory_data.close()
+ env.close()
+ del env
+
+def solve(env: BaseEnv, debug=False, vis=False):
+ assert env.unwrapped.control_mode in [
+ "pd_joint_pos",
+ "pd_joint_pos_vel",
+ ], env.unwrapped.control_mode
+ robot_has_gripper = True
+ planner = PiperArmMotionPlanningSolver(
+ env,
+ debug=debug,
+ vis=vis,
+ base_pose=env.unwrapped.agent.robot.pose,
+ visualize_target_grasp_pose=False,
+ print_env_info=False,
+ joint_acc_limits=0.5,
+ joint_vel_limits=0.5,
+ )
+ viewer = env.render_human()
+
+ last_checkpoint_state = None
+ gripper_open = True
+ def select_piper_hand():
+ viewer.select_entity(sapien_utils.get_obj_by_name(env.agent.robot.links, "gripper_base")._objs[0].entity)
+ select_piper_hand()
+ for plugin in viewer.plugins:
+ if isinstance(plugin, sapien.utils.viewer.viewer.TransformWindow):
+ transform_window = plugin
+ while True:
+
+ transform_window.enabled = True
+ env.render_human()
+ execute_current_pose = False
+ if viewer.window.key_press("h"):
+ print("""Available commands:
+ h: print this help menu
+ g: toggle gripper to close/open (if there is a gripper)
+ u: move the piper hand up
+ j: move the piper hand down
+ arrow_keys: move the piper hand in the direction of the arrow keys
+ n: execute command via motion planning to make the robot move to the target pose indicated by the ghost piper arm
+ c: stop this episode and record the trajectory and move on to a new episode
+ q: quit the script and stop collecting data. Save trajectories and optionally videos.
+ """)
+ pass
+ elif viewer.window.key_press("q"):
+ return "quit"
+ elif viewer.window.key_press("c"):
+ return "continue"
+ elif viewer.window.key_press("n"):
+ execute_current_pose = True
+ elif viewer.window.key_press("g") and robot_has_gripper:
+ if gripper_open:
+ gripper_open = False
+ _, reward, _ ,_, info = planner.close_gripper()
+ else:
+ gripper_open = True
+ _, reward, _ ,_, info = planner.open_gripper()
+ print(f"Reward: {reward}, Info: {info}")
+ elif viewer.window.key_press("u"):
+ select_piper_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, 0, -0.01])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ elif viewer.window.key_press("j"):
+ select_piper_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, 0, +0.01])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ elif viewer.window.key_press("down"):
+ select_piper_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[+0.01, 0, 0])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ elif viewer.window.key_press("up"):
+ select_piper_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[-0.01, 0, 0])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ elif viewer.window.key_press("right"):
+ select_piper_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, -0.01, 0])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ elif viewer.window.key_press("left"):
+ select_piper_hand()
+ transform_window.gizmo_matrix = (transform_window._gizmo_pose * sapien.Pose(p=[0, +0.01, 0])).to_transformation_matrix()
+ transform_window.update_ghost_objects()
+ if execute_current_pose:
+ result = planner.move_to_pose_with_screw(transform_window._gizmo_pose * sapien.Pose([0, 0, 0.1]), dry_run=True)
+ if result != -1 and len(result["position"]) < 150:
+ _, reward, _ ,_, info = planner.follow_path(result)
+ print(f"Reward: {reward}, Info: {info}")
+ else:
+ if result == -1: print("Plan failed")
+ else: print("Generated motion plan was too long. Try a closer sub-goal")
+ execute_current_pose = False
+
+if __name__ == "__main__":
+ main(parse_args())
\ No newline at end of file
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/__init__.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a1568442983d290291d0ad69edc80b4585afe3d5
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/__init__.py
@@ -0,0 +1,2 @@
+from .shaders import PREBUILT_SHADER_CONFIGS, ShaderConfig, set_shader_pack
+from .version import SAPIEN_RENDER_SYSTEM
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/__pycache__/__init__.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c58adfb418cc2af107fbefd2e5e9999d8081b8b7
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/__pycache__/__init__.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/__pycache__/shaders.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/__pycache__/shaders.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e636cc316b52ac2ac780adf55647e7c14ad1987a
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/__pycache__/shaders.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/__pycache__/version.cpython-310.pyc b/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/__pycache__/version.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..9c9fc7c17f590f8e7f7d7af80110c509640de62f
Binary files /dev/null and b/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/__pycache__/version.cpython-310.pyc differ
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/shaders.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/shaders.py
new file mode 100644
index 0000000000000000000000000000000000000000..e2248dcd0d8958e0b64a5a06406227ed587d58c2
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/shaders.py
@@ -0,0 +1,165 @@
+from dataclasses import dataclass, field
+from typing import Any, Callable, Dict, List
+
+import sapien
+import torch
+
+from mani_skill.render.version import SAPIEN_RENDER_SYSTEM
+
+
+@dataclass
+class ShaderConfig:
+ """simple shader config dataclass to determine which shader pack to use, textures to render, and any possible configurations for the shader pack. Can be used as part of the CameraConfig
+ to further customize the camera output.
+
+ A shader config must define which shader pack to use, and which textures to consider rendering. Additional shader pack configs can be passed which are specific to the shader config itself
+ and can modify shader settings.
+
+ Texture transforms must be defined and are used to process the texture data into more standard formats for use. Some textures might be combined textures (e.g. depth+segmentation together)
+ due to shader optimizations. texture transforms must then split these combined textures back into their component parts.
+
+ The standard image modalities and expected dtypes/shapes are:
+ - rgb (torch.uint8, shape: [H, W, 3])
+ - depth (torch.int16, shape: [H, W])
+ - segmentation (torch.int16, shape: [H, W])
+ - position (torch.float32, shape: [H, W, 3]) (infinite points have segmentation == 0)
+ """
+
+ shader_pack: str
+ texture_names: Dict[str, List[str]] = field(default_factory=dict)
+ """dictionary mapping shader texture names to the image modalities that are rendered. e.g. Color, Depth, Segmentation, etc."""
+ shader_pack_config: Dict[str, Any] = field(default_factory=dict)
+ """configs for the shader pack. for e.g. the ray tracing shader you can configure the denoiser, samples per pixel, etc."""
+
+ texture_transforms: Dict[
+ str, Callable[[torch.Tensor], Dict[str, torch.Tensor]]
+ ] = field(default_factory=dict)
+ """texture transform functions that map each texture name to a function that converts the texture data into one or more standard image modalities. The return type should be a
+ dictionary with keys equal to the names of standard image modalities and values equal to the transformed data"""
+
+
+def default_position_texture_transform(data: torch.Tensor):
+ position = (data[..., :3] * 1000).to(torch.int16)
+ depth = -position[..., [2]]
+ return {
+ "depth": depth,
+ "position": position,
+ }
+
+
+rt_texture_transforms = {
+ "Color": lambda data: {"rgb": (data[..., :3] * 255).to(torch.uint8)},
+ "Position": default_position_texture_transform,
+ # note in default shader pack, 0 is visual shape / mesh, 1 is actor/link level, 2 is parallel scene ID, 3 is unused
+ "Segmentation": lambda data: {"segmentation": data[..., 1][..., None]},
+ "Normal": lambda data: {"normal": data[..., :3]},
+ "Albedo": lambda data: {"albedo": (data[..., :3] * 255).to(torch.uint8)},
+}
+rt_texture_names = {
+ "Color": ["rgb"],
+ "Position": ["position", "depth"],
+ "Segmentation": ["segmentation"],
+ "Normal": ["normal"],
+ "Albedo": ["albedo"],
+}
+
+
+PREBUILT_SHADER_CONFIGS = {
+ "minimal": ShaderConfig(
+ shader_pack="minimal",
+ texture_names={
+ "Color": ["rgb"],
+ "PositionSegmentation": ["position", "depth", "segmentation"],
+ },
+ texture_transforms={
+ "Color": lambda data: {"rgb": data[..., :3]},
+ "PositionSegmentation": lambda data: {
+ "position": data[
+ ..., :3
+ ], # position for minimal is in millimeters and is uint16
+ "depth": -data[..., [2]],
+ "segmentation": data[..., [3]],
+ },
+ },
+ ),
+ "default": ShaderConfig(
+ shader_pack="default",
+ texture_names={
+ "Color": ["rgb"],
+ "Position": ["position", "depth"],
+ "Segmentation": ["segmentation"],
+ "Normal": ["normal"],
+ "Albedo": ["albedo"],
+ },
+ texture_transforms={
+ "Color": lambda data: {"rgb": (data[..., :3] * 255).to(torch.uint8)},
+ "Position": default_position_texture_transform,
+ # note in default shader pack, 0 is visual shape / mesh, 1 is actor/link level, 2 is parallel scene ID, 3 is unused
+ "Segmentation": lambda data: {"segmentation": data[..., 1][..., None]},
+ "Normal": lambda data: {"normal": data[..., :3]},
+ "Albedo": lambda data: {"albedo": (data[..., :3] * 255).to(torch.uint8)},
+ },
+ ),
+ "rt": ShaderConfig(
+ shader_pack="rt",
+ texture_names=rt_texture_names,
+ shader_pack_config={
+ "ray_tracing_samples_per_pixel": 32,
+ "ray_tracing_path_depth": 16,
+ "ray_tracing_denoiser": "optix",
+ },
+ texture_transforms=rt_texture_transforms,
+ ),
+ "rt-med": ShaderConfig(
+ shader_pack="rt",
+ texture_names=rt_texture_names,
+ shader_pack_config={
+ "ray_tracing_samples_per_pixel": 4,
+ "ray_tracing_path_depth": 3,
+ "ray_tracing_denoiser": "optix",
+ },
+ texture_transforms=rt_texture_transforms,
+ ),
+ "rt-fast": ShaderConfig(
+ shader_pack="rt",
+ texture_names=rt_texture_names,
+ shader_pack_config={
+ "ray_tracing_samples_per_pixel": 2,
+ "ray_tracing_path_depth": 1,
+ "ray_tracing_denoiser": "optix",
+ },
+ texture_transforms=rt_texture_transforms,
+ ),
+}
+"""pre-defined shader configs"""
+
+
+def set_shader_pack(shader_config: ShaderConfig):
+ """sets a global shader pack for cameras. Used only for the 3.0 SAPIEN rendering system"""
+ if SAPIEN_RENDER_SYSTEM == "3.0":
+ sapien.render.set_camera_shader_dir(shader_config.shader_pack)
+ if shader_config.shader_pack == "minimal":
+ sapien.render.set_camera_shader_dir("minimal")
+ sapien.render.set_picture_format("Color", "r8g8b8a8unorm")
+ sapien.render.set_picture_format("ColorRaw", "r8g8b8a8unorm")
+ sapien.render.set_picture_format("PositionSegmentation", "r16g16b16a16sint")
+ if shader_config.shader_pack == "default":
+ sapien.render.set_camera_shader_dir("default")
+ sapien.render.set_picture_format("Color", "r32g32b32a32sfloat")
+ sapien.render.set_picture_format("ColorRaw", "r32g32b32a32sfloat")
+ sapien.render.set_picture_format(
+ "PositionSegmentation", "r32g32b32a32sfloat"
+ )
+ if shader_config.shader_pack[:2] == "rt":
+ sapien.render.set_ray_tracing_samples_per_pixel(
+ shader_config.shader_pack_config["ray_tracing_samples_per_pixel"]
+ )
+ sapien.render.set_ray_tracing_path_depth(
+ shader_config.shader_pack_config["ray_tracing_path_depth"]
+ )
+ sapien.render.set_ray_tracing_denoiser(
+ shader_config.shader_pack_config["ray_tracing_denoiser"]
+ )
+ elif SAPIEN_RENDER_SYSTEM == "3.1":
+ # sapien.render.set_camera_shader_pack_name would set a global default
+ pass
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/version.py b/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/version.py
new file mode 100644
index 0000000000000000000000000000000000000000..37b16733a99334acb0e6849127f04b30913af0d6
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/render/version.py
@@ -0,0 +1,8 @@
+SAPIEN_RENDER_SYSTEM = "3.0"
+try:
+ # NOTE (stao): hacky way to determine which render system in sapien 3 is being used for testing purposes
+ from sapien.wrapper.scene import get_camera_shader_pack
+
+ SAPIEN_RENDER_SYSTEM = "3.1"
+except:
+ pass
diff --git a/project/ManiSkill3/src/maniskill3_environment/mani_skill/shaders/postprocessing.comp b/project/ManiSkill3/src/maniskill3_environment/mani_skill/shaders/postprocessing.comp
new file mode 100644
index 0000000000000000000000000000000000000000..d5366281989b437fae073a4bc92a7bfa694ceeae
--- /dev/null
+++ b/project/ManiSkill3/src/maniskill3_environment/mani_skill/shaders/postprocessing.comp
@@ -0,0 +1,81 @@
+#version 450
+
+#include "push_constant.glsl"
+#include "grain.glsl"
+
+layout(set = 0, binding = 0, rgba32f) uniform readonly image2D HdrColor;
+layout(set = 0, binding = 1, rgba32f) uniform writeonly image2D Color;
+
+vec3 Gamma(vec3 x) {
+ return clamp(pow(x, vec3(1/2.2)), 0.0, 1.0);
+}
+
+vec3 sRGB(vec3 x) {
+ bvec3 cutoff = lessThan(x, vec3(0.0031308));
+ vec3 higher = vec3(1.055) * pow(x, vec3(1.0/2.4)) - vec3(0.055);
+ vec3 lower = x * vec3(12.92);
+ return clamp(mix(higher, lower, cutoff), 0.0, 1.0);
+}
+
+const mat3 ACESInputMat = mat3(
+ 0.59719, 0.35458, 0.04823,
+ 0.07600, 0.90834, 0.01566,
+ 0.02840, 0.13383, 0.83777
+);
+
+const mat3 ACESOutputMat = mat3(
+ 1.60475, -0.53108, -0.07367,
+ -0.10208, 1.10813, -0.00605,
+ -0.00327, -0.07276, 1.07602
+);
+
+vec3 RRTAndODTFit(vec3 v)
+{
+ vec3 a = v * (v + 0.0245786) - 0.000090537;
+ vec3 b = v * (0.983729 * v + 0.4329510) + 0.238081;
+ return a / b;
+}
+
+vec3 ACESsRGB(vec3 color) {
+ color = color * ACESInputMat;
+ color = RRTAndODTFit(color);
+ color = color * ACESOutputMat;
+ color = clamp(color, 0.0, 1.0);
+ return sRGB(color);
+}
+
+void main() {
+ uint x = gl_GlobalInvocationID.x;
+ uint y = gl_GlobalInvocationID.y;
+ vec4 color = imageLoad(HdrColor, ivec2(gl_GlobalInvocationID.xy));
+
+ // make nan visible for debugging
+ if (isnan(color.x + color.y + color.z)) {
+ color = vec4(1000.0, 0, 1000.0, 1);
+ }
+
+
+ if (toneMapper == 1) {
+ color = vec4(sRGB(color.rgb * exposure), color.a);
+ } else if (toneMapper == 2) {
+ color = vec4(ACESsRGB(color.rgb * exposure), color.a);
+ } else {
+ color = vec4(Gamma(color.rgb * exposure), color.a);
+ }
+
+ //imageStore(Color, ivec2(gl_GlobalInvocationID.xy), color);
+
+ // vec3 backgroundColor = color.rgb;
+ // vec2 resolution = vec2(gl_NumWorkGroups.xy);
+ // vec2 texCoord = vec2(gl_GlobalInvocationID.xy) / resolution;
+
+ // float grainSize = 2.0;
+ // vec3 g = vec3(grain(texCoord, resolution / grainSize));
+ // vec3 color2 = blendSoftLight(backgroundColor, g);
+ // float luminance = luma(backgroundColor);
+ // float response = smoothstep(0.05, 0.5, luminance);
+ // color2.rgb = mix(color2, backgroundColor, pow(response, 2.0));
+ color = mix(vec4(0.282, 0.294, 0.322, 1), color, color.a);
+ imageStore(Color, ivec2(gl_GlobalInvocationID.xy), color);
+ //imageStore(Color, ivec2(gl_GlobalInvocationID.xy), vec4(backgroundColor, color.a));
+}