Chelsea707 commited on
Commit
1f0781b
·
verified ·
1 Parent(s): 991a313

MinerU Batch 8f46ae6c-07be-4c29-a893-b2f0e7aaabad (Part 2/8)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +8 -0
  2. data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_content_list.json +1748 -0
  3. data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_model.json +2460 -0
  4. data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_origin.pdf +3 -0
  5. data/2025/2504_06xxx/2504.06156/full.md +359 -0
  6. data/2025/2504_06xxx/2504.06156/images/00f85838005136fdca15b5fe4bb78ee82f7340dea6f3e5a9b2e65bd76936a94c.jpg +3 -0
  7. data/2025/2504_06xxx/2504.06156/images/0d76e53a8c6ce1241acff7eeef8a2fcf95cd1b9821ab719fc786e6b8b40a1ec2.jpg +3 -0
  8. data/2025/2504_06xxx/2504.06156/images/145b0ff91ccc913134917d84f27ec9288b5cc0e7a4e6ccca1027d09a1eb5522f.jpg +3 -0
  9. data/2025/2504_06xxx/2504.06156/images/20f9fa3b2ed644154b3075e0c925e50cd264ba0e5235b21294c9f2bd1334e309.jpg +3 -0
  10. data/2025/2504_06xxx/2504.06156/images/2a3bf65b454c923e9b5bb949a3116a5dfef20d4712dfc9369a08f9361b171127.jpg +3 -0
  11. data/2025/2504_06xxx/2504.06156/images/2b3f5d9d3857ac4bc064a25dd8f846478edc8d3d4acf76854100e1224e09f2e4.jpg +3 -0
  12. data/2025/2504_06xxx/2504.06156/images/2eb0d57179fb5c021a773de17ac4443e984ccc352e0dc3e5d824297b87a58824.jpg +3 -0
  13. data/2025/2504_06xxx/2504.06156/images/2ec98aac269313a4a3cc98c76d6cba7f37ecc7b2a02ed422fa6eb8b07c3cd183.jpg +3 -0
  14. data/2025/2504_06xxx/2504.06156/images/3c482c5b0658f8f048d2ebe3c95889ab774372793412480b02727eafd0de414c.jpg +3 -0
  15. data/2025/2504_06xxx/2504.06156/images/3ce4769ee6b1bde42a17eee61d58d48bb5431619637f3963972110f5eafc4433.jpg +3 -0
  16. data/2025/2504_06xxx/2504.06156/images/3e856f4f3818833cac9099e64bb7f58858c535a342f9000c483c2c4ffb29e705.jpg +3 -0
  17. data/2025/2504_06xxx/2504.06156/images/41e27ceecac4c9235a249ac029abee0e7fc30124d187cd6077bf037a65e93fd4.jpg +3 -0
  18. data/2025/2504_06xxx/2504.06156/images/45234ed6e963ca64aacca0aeebac163393943ee8f94523b029c03b09faa1b450.jpg +3 -0
  19. data/2025/2504_06xxx/2504.06156/images/4b5c950d25456db7d2d940404eb103086fe309d96067fca9478d24545376c057.jpg +3 -0
  20. data/2025/2504_06xxx/2504.06156/images/4bd4ba61306b1059468cbd4655f3e43c112065002e75360f7ec6cebdc5cb4ee4.jpg +3 -0
  21. data/2025/2504_06xxx/2504.06156/images/4d0acbed64c8854765c2e9c80aa8c7e2abb13d31d7b57777b70dad4d3e6a981a.jpg +3 -0
  22. data/2025/2504_06xxx/2504.06156/images/4dc08d4271758a4aabed3f5e31b55c1d21ebc7da0ae86c7b523043f7db6cbe93.jpg +3 -0
  23. data/2025/2504_06xxx/2504.06156/images/512cac75dfa1a461ecf945a565d55ee173bc79056728365d8bbabcdff20497f1.jpg +3 -0
  24. data/2025/2504_06xxx/2504.06156/images/56817bcfa892e233432daadca6888f4cef9f11efeeb525983604173fded63e17.jpg +3 -0
  25. data/2025/2504_06xxx/2504.06156/images/5a72a662adc1c1ba0bfd167d4f4af69842d450e5c116e4daa0ea7c7387c99b10.jpg +3 -0
  26. data/2025/2504_06xxx/2504.06156/images/5e5f3b7ca4ef1ce5b7a8ef47b005c756ed1fc850e06dd280623fc0528eb1a89d.jpg +3 -0
  27. data/2025/2504_06xxx/2504.06156/images/66ceb8edcaeb309670260252767cb93432455bba815e9dc4a4ca645ef94a855b.jpg +3 -0
  28. data/2025/2504_06xxx/2504.06156/images/67d2c9e7967010da05736086d3a0fca8814cf40da4a222bb5e6737e56f406e1e.jpg +3 -0
  29. data/2025/2504_06xxx/2504.06156/images/6e0490d1099f2e64b741b9dd1f95e5ae865168537ab2ca60ee6fd37e533eacdb.jpg +3 -0
  30. data/2025/2504_06xxx/2504.06156/images/6fd39913d482519aa7b6f7a9a91a5fd878297b9f18bd5d9df7c2afe47a5f641f.jpg +3 -0
  31. data/2025/2504_06xxx/2504.06156/images/7445085cbc517fd3cd93fbb3a2bd9f6db8580e6c84d599414a68d9405529f3b0.jpg +3 -0
  32. data/2025/2504_06xxx/2504.06156/images/8105d743b48c767516e10ef93cc71f7fc5122df736e327dea3f051cc7bfb6c47.jpg +3 -0
  33. data/2025/2504_06xxx/2504.06156/images/827a914e84c52597b4da5d6a8593513b04ba1f5b5f8f15324c3d07e8a040904d.jpg +3 -0
  34. data/2025/2504_06xxx/2504.06156/images/8ffaab5c8e792fcf6faad355dd789f84bac2ae3b63606a4eac5401f023e77b6c.jpg +3 -0
  35. data/2025/2504_06xxx/2504.06156/images/94c8d59ccbd0cb0de5a5b3206f5de9360ecd718d47d6d0d3ca249decbf1ffc98.jpg +3 -0
  36. data/2025/2504_06xxx/2504.06156/images/99c0a32a6a7ff267400458289cc0fbf487ba3fbe191ce416aad8bac7243d1355.jpg +3 -0
  37. data/2025/2504_06xxx/2504.06156/images/a285013001ef5630297fdcd051b65b0ca161561fb3c9bcad3b85e8b0d8170ccc.jpg +3 -0
  38. data/2025/2504_06xxx/2504.06156/images/a7c742b59dc541a88a3273a380335073e194e7444064c0f55bc20ace7ec82882.jpg +3 -0
  39. data/2025/2504_06xxx/2504.06156/images/a9d59bd91c7d90305bf0363e93a7675127b8a317c02f04e3257aa1fddbcebcc7.jpg +3 -0
  40. data/2025/2504_06xxx/2504.06156/images/ad52e3e1fffe97ce097f5acd4e97f9d17c9f5a5940fed40ac9f7275aebb29b3d.jpg +3 -0
  41. data/2025/2504_06xxx/2504.06156/images/b83fcb747acca716d74ef5c58839df6114300388b8d0e6ee2f936782a0e64c43.jpg +3 -0
  42. data/2025/2504_06xxx/2504.06156/images/bad020bfaf946a16dcc60d68034d145033f7b7475137443f7b26c1c2e7ca1978.jpg +3 -0
  43. data/2025/2504_06xxx/2504.06156/images/bafd32d27c34e33981964ae485e4dfba8fcf84c249727c41a5a866d6121787e8.jpg +3 -0
  44. data/2025/2504_06xxx/2504.06156/images/c1639d459b6280e0d616c0b61ca5027d7312dc27193311d49fc82c533e5e3614.jpg +3 -0
  45. data/2025/2504_06xxx/2504.06156/images/c67f8d30bed7d5e78cd491b955ffbcc6ad2890cd3244a3c6ba5862292c5ec665.jpg +3 -0
  46. data/2025/2504_06xxx/2504.06156/images/c6980fdc3266252190f984cdc73c9a2bab1431c731bc03e678d86b4b54eeb2be.jpg +3 -0
  47. data/2025/2504_06xxx/2504.06156/images/c76f0cbfcc0157a2fd0ea97bdb5758c11c6fb5d419f1af561dacdd15498d9a0f.jpg +3 -0
  48. data/2025/2504_06xxx/2504.06156/images/c7ca6a14e745ecd4ed221682030a13fc964ba2820d452880f11c7800ce40073a.jpg +3 -0
  49. data/2025/2504_06xxx/2504.06156/images/ce63d3a7c6dc3449fc08f0a14ed53567368fbb562685332477dc26bd0e8072a3.jpg +3 -0
  50. data/2025/2504_06xxx/2504.06156/images/d26514e671a1cabe35615727660c95426ee9c01df609ba34f6407ddd70a97fc4.jpg +3 -0
.gitattributes CHANGED
@@ -1276,3 +1276,11 @@ data/2025/2504_07xxx/2504.07052/ecb0ea5a-e806-4206-b96f-d3cf7b2ea5b5_origin.pdf
1276
  data/2025/2504_07xxx/2504.07053/5069c415-7c54-431e-b348-92054587ecc3_origin.pdf filter=lfs diff=lfs merge=lfs -text
1277
  data/2025/2504_07xxx/2504.07079/119db10f-4b86-43b8-bde3-42f792f5a6e7_origin.pdf filter=lfs diff=lfs merge=lfs -text
1278
  data/2025/2504_08xxx/2504.08813/af01798f-8993-43b5-b109-47e5f260520d_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
1276
  data/2025/2504_07xxx/2504.07053/5069c415-7c54-431e-b348-92054587ecc3_origin.pdf filter=lfs diff=lfs merge=lfs -text
1277
  data/2025/2504_07xxx/2504.07079/119db10f-4b86-43b8-bde3-42f792f5a6e7_origin.pdf filter=lfs diff=lfs merge=lfs -text
1278
  data/2025/2504_08xxx/2504.08813/af01798f-8993-43b5-b109-47e5f260520d_origin.pdf filter=lfs diff=lfs merge=lfs -text
1279
+ data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_origin.pdf filter=lfs diff=lfs merge=lfs -text
1280
+ data/2025/2504_06xxx/2504.06201/43b7dceb-7067-4bc0-81f9-4f968ea096bb_origin.pdf filter=lfs diff=lfs merge=lfs -text
1281
+ data/2025/2504_06xxx/2504.06225/48347d42-40fb-4979-b798-617f024e9b22_origin.pdf filter=lfs diff=lfs merge=lfs -text
1282
+ data/2025/2504_06xxx/2504.06256/805981bf-d643-4b2e-955e-6bcd5ca89984_origin.pdf filter=lfs diff=lfs merge=lfs -text
1283
+ data/2025/2504_06xxx/2504.06261/000d1d7e-ab84-4037-a349-69f333ac45e9_origin.pdf filter=lfs diff=lfs merge=lfs -text
1284
+ data/2025/2504_06xxx/2504.06263/30e417a2-2609-4ff1-95ae-cf0382220f6f_origin.pdf filter=lfs diff=lfs merge=lfs -text
1285
+ data/2025/2504_06xxx/2504.06397/d704b2e6-2c04-4966-b818-dc796c22634f_origin.pdf filter=lfs diff=lfs merge=lfs -text
1286
+ data/2025/2504_06xxx/2504.06632/6418d473-80e2-437f-be9d-f7a58bd3474e_origin.pdf filter=lfs diff=lfs merge=lfs -text
data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_content_list.json ADDED
@@ -0,0 +1,1748 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "ViTaMIn: Learning Contact-Rich Tasks Through Robot-Free Visuo-Tactile Manipulation Interface",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 148,
8
+ 87,
9
+ 851,
10
+ 137
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Fangchen Liu\\*,2, Chuanyu Li\\*,1, Yihua Qin\\*, Jing Xu\\*, Pieter Abbeel\\*, Rui Chen\\*,1",
17
+ "bbox": [
18
+ 181,
19
+ 157,
20
+ 807,
21
+ 174
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "$^{1}$ Tsinghua University, $^{2}$ University of California, Berkeley",
28
+ "bbox": [
29
+ 285,
30
+ 175,
31
+ 712,
32
+ 191
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "* Equal contribution, † Corresponding author",
39
+ "bbox": [
40
+ 357,
41
+ 193,
42
+ 635,
43
+ 208
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "https://chuanyune.github.io/ViTaMIN_page",
50
+ "bbox": [
51
+ 279,
52
+ 210,
53
+ 714,
54
+ 226
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "image",
60
+ "img_path": "images/2b3f5d9d3857ac4bc064a25dd8f846478edc8d3d4acf76854100e1224e09f2e4.jpg",
61
+ "image_caption": [
62
+ "Demonstrations"
63
+ ],
64
+ "image_footnote": [],
65
+ "bbox": [
66
+ 96,
67
+ 282,
68
+ 218,
69
+ 376
70
+ ],
71
+ "page_idx": 0
72
+ },
73
+ {
74
+ "type": "image",
75
+ "img_path": "images/c76f0cbfcc0157a2fd0ea97bdb5758c11c6fb5d419f1af561dacdd15498d9a0f.jpg",
76
+ "image_caption": [],
77
+ "image_footnote": [],
78
+ "bbox": [
79
+ 236,
80
+ 282,
81
+ 341,
82
+ 375
83
+ ],
84
+ "page_idx": 0
85
+ },
86
+ {
87
+ "type": "image",
88
+ "img_path": "images/a7c742b59dc541a88a3273a380335073e194e7444064c0f55bc20ace7ec82882.jpg",
89
+ "image_caption": [],
90
+ "image_footnote": [],
91
+ "bbox": [
92
+ 354,
93
+ 282,
94
+ 473,
95
+ 376
96
+ ],
97
+ "page_idx": 0
98
+ },
99
+ {
100
+ "type": "image",
101
+ "img_path": "images/4d0acbed64c8854765c2e9c80aa8c7e2abb13d31d7b57777b70dad4d3e6a981a.jpg",
102
+ "image_caption": [
103
+ "Real-World Tasks"
104
+ ],
105
+ "image_footnote": [],
106
+ "bbox": [
107
+ 486,
108
+ 282,
109
+ 611,
110
+ 376
111
+ ],
112
+ "page_idx": 0
113
+ },
114
+ {
115
+ "type": "image",
116
+ "img_path": "images/c67f8d30bed7d5e78cd491b955ffbcc6ad2890cd3244a3c6ba5862292c5ec665.jpg",
117
+ "image_caption": [],
118
+ "image_footnote": [],
119
+ "bbox": [
120
+ 622,
121
+ 282,
122
+ 756,
123
+ 376
124
+ ],
125
+ "page_idx": 0
126
+ },
127
+ {
128
+ "type": "image",
129
+ "img_path": "images/3c482c5b0658f8f048d2ebe3c95889ab774372793412480b02727eafd0de414c.jpg",
130
+ "image_caption": [],
131
+ "image_footnote": [],
132
+ "bbox": [
133
+ 767,
134
+ 282,
135
+ 903,
136
+ 377
137
+ ],
138
+ "page_idx": 0
139
+ },
140
+ {
141
+ "type": "image",
142
+ "img_path": "images/56817bcfa892e233432daadca6888f4cef9f11efeeb525983604173fded63e17.jpg",
143
+ "image_caption": [
144
+ "Fig. 1: ViTaMIn overview. Our system comprises a portable data collection device that integrates visual and tactile sensing, a multimodal representation learning framework for fusing visual and tactile information, and demonstrations of various contact-rich manipulation tasks. This system facilitates efficient collection of manipulation data without requiring complex robot setups. (*Backgrounds in the images are blurred.)"
145
+ ],
146
+ "image_footnote": [],
147
+ "bbox": [
148
+ 88,
149
+ 380,
150
+ 452,
151
+ 545
152
+ ],
153
+ "page_idx": 0
154
+ },
155
+ {
156
+ "type": "image",
157
+ "img_path": "images/d3b473412a83550b31125af282bc865a7484b0a11b3fe4b684aa09dfa0912134.jpg",
158
+ "image_caption": [],
159
+ "image_footnote": [],
160
+ "bbox": [
161
+ 468,
162
+ 383,
163
+ 625,
164
+ 542
165
+ ],
166
+ "page_idx": 0
167
+ },
168
+ {
169
+ "type": "image",
170
+ "img_path": "images/bafd32d27c34e33981964ae485e4dfba8fcf84c249727c41a5a866d6121787e8.jpg",
171
+ "image_caption": [],
172
+ "image_footnote": [],
173
+ "bbox": [
174
+ 627,
175
+ 383,
176
+ 764,
177
+ 542
178
+ ],
179
+ "page_idx": 0
180
+ },
181
+ {
182
+ "type": "image",
183
+ "img_path": "images/4bd4ba61306b1059468cbd4655f3e43c112065002e75360f7ec6cebdc5cb4ee4.jpg",
184
+ "image_caption": [],
185
+ "image_footnote": [],
186
+ "bbox": [
187
+ 767,
188
+ 383,
189
+ 903,
190
+ 542
191
+ ],
192
+ "page_idx": 0
193
+ },
194
+ {
195
+ "type": "text",
196
+ "text": "Abstract—Tactile information plays a crucial role for humans and robots to interact effectively with their environment, particularly for tasks requiring the understanding of contact properties. Solving such dexterous manipulation tasks often relies on imitation learning from demonstration datasets, which are typically collected via teleoperation systems and often demand substantial time and effort. To address these challenges, we present ViTaMIn, an embodiment-free manipulation interface that integrates visual and tactile sensing into a hand-held gripper, enabling multi-modality data collection without the need for teleoperation. Our design employs a compliant Fin Ray gripper with tactile sensing, allowing operators to perceive force feedback during manipulation for more intuitive operation. Additionally, we propose a multi-modal representation learning strategy to obtain pre-trained tactile representations, improving data efficiency and policy robustness. Experiments on 5 contact-rich manipulation tasks demonstrate that our system is more scalable, efficient, and effective than baseline methods.",
197
+ "bbox": [
198
+ 81,
199
+ 630,
200
+ 488,
201
+ 857
202
+ ],
203
+ "page_idx": 0
204
+ },
205
+ {
206
+ "type": "text",
207
+ "text": "I. INTRODUCTION",
208
+ "text_level": 1,
209
+ "bbox": [
210
+ 218,
211
+ 872,
212
+ 352,
213
+ 886
214
+ ],
215
+ "page_idx": 0
216
+ },
217
+ {
218
+ "type": "text",
219
+ "text": "Humans rely on both visual and tactile modalities to perform a diverse range of manipulation tasks in daily",
220
+ "bbox": [
221
+ 81,
222
+ 896,
223
+ 488,
224
+ 926
225
+ ],
226
+ "page_idx": 0
227
+ },
228
+ {
229
+ "type": "text",
230
+ "text": "life. For instance, when inserting a plug into a socket or tightening a screw, vision helps with identifying and aligning components, while tactile signals enable precise force control during contact. This seamless integration of vision and touch enhances human dexterity, particularly in tasks that require contact-rich control, handling visual occlusions, or performing in-hand manipulations.",
231
+ "bbox": [
232
+ 504,
233
+ 628,
234
+ 913,
235
+ 734
236
+ ],
237
+ "page_idx": 0
238
+ },
239
+ {
240
+ "type": "text",
241
+ "text": "Recent progress in learning from demonstrations [1], [2], [3], [4] has shown significant potential for advancing general-purpose robots, enabling them to efficiently acquire complex skills from human demonstrations. Consequently, developing systems to collect high-quality demonstration data has been a recent key focus. Prior works have explored real-world data collection methods, including joint-mapped devices and exoskeletons [5], [6], [7], [8], and vision-based teleoperation frameworks [9], [10]. Nevertheless, these techniques require real-time teleoperation of a physical robot during data collection, which constrains efficiency and flexibility. In contrast, portable devices [11], [12], [13], [14] present",
242
+ "bbox": [
243
+ 504,
244
+ 744,
245
+ 913,
246
+ 926
247
+ ],
248
+ "page_idx": 0
249
+ },
250
+ {
251
+ "type": "aside_text",
252
+ "text": "arXiv:2504.06156v2 [cs.RO] 1 Sep 2025",
253
+ "bbox": [
254
+ 22,
255
+ 282,
256
+ 60,
257
+ 710
258
+ ],
259
+ "page_idx": 0
260
+ },
261
+ {
262
+ "type": "text",
263
+ "text": "a more scalable and cost-effective alternative to collect demonstration without teleoperation. Moreover, they can be seamlessly integrated into various embodiments, providing a more flexible data collection approach. However, these portable devices primarily focus on capturing vision-only demonstration data, limiting their usage for contact-rich and dexterous manipulation tasks where tactile feedback plays a crucial role.",
264
+ "bbox": [
265
+ 81,
266
+ 65,
267
+ 488,
268
+ 184
269
+ ],
270
+ "page_idx": 1
271
+ },
272
+ {
273
+ "type": "text",
274
+ "text": "In this work, we aim to address both the challenge of efficient data collection and the need for learning more dexterous tasks using visuo-tactile demonstrations. To this end, we introduce ViTaMIn, a novel and effective visuotactile manipulation interface designed to capture high-quality demonstrations with enhanced efficiency and flexibility. Unlike conventional approaches that rely on rigid tactile sensors, ViTaMIn leverages an omnidirectional compliant Fin Ray gripper with customized tactile sensing, which can detect contact from all directions as an expressive tactile signal for robot manipulation. We integrate the tactile-aware Fin Ray gripper [15] with UMI [14], enhancing the collected data with rich multimodal information and improving policy learning performance while maintaining the core advantages of portable devices. Additionally, our system enables operators to perceive force feedback during manipulation, facilitating more intuitive and seamless operation.",
275
+ "bbox": [
276
+ 81,
277
+ 186,
278
+ 488,
279
+ 443
280
+ ],
281
+ "page_idx": 1
282
+ },
283
+ {
284
+ "type": "text",
285
+ "text": "Pre-trained visual representations have shown improved performance in robotic manipulation [16], [17], [18], [19], [20], benefiting from large-scale visual pre-training. To fully leverage the visuo-tactile datasets collected with ViTaMIn, we adopt a multimodal representation learning strategy to pre-train tactile representations, enhancing the robustness and generalizability of our sensor-based policies. Our pretraining objective integrates masked autoencoding [21] and contrastive learning for multimodal alignment [22], where future image observations are aligned with masked current images and tactile signals. Through extensive experiments on five challenging contact-rich manipulation tasks, our visuotactile policy, enhanced by multimodal pre-training, exhibits superior data and training efficiency while demonstrating strong generalization across diverse objects and environmental conditions.",
286
+ "bbox": [
287
+ 81,
288
+ 443,
289
+ 488,
290
+ 684
291
+ ],
292
+ "page_idx": 1
293
+ },
294
+ {
295
+ "type": "text",
296
+ "text": "In conclusion, our contributions are:",
297
+ "bbox": [
298
+ 99,
299
+ 686,
300
+ 346,
301
+ 699
302
+ ],
303
+ "page_idx": 1
304
+ },
305
+ {
306
+ "type": "list",
307
+ "sub_type": "text",
308
+ "list_items": [
309
+ "- ViTaMIn provides a portable and scalable visuo-tactile data collection system.",
310
+ "- ViTaMIn proposes an effective multimodal representation learning strategy, which significantly improves the data efficiency, robustness and generalization capabilities.",
311
+ "- ViTaMIn achieves superior performance over vision-only baselines across five manipulation tasks by leveraging visuo-tactile demonstrations."
312
+ ],
313
+ "bbox": [
314
+ 99,
315
+ 703,
316
+ 486,
317
+ 838
318
+ ],
319
+ "page_idx": 1
320
+ },
321
+ {
322
+ "type": "text",
323
+ "text": "II. RELATED WORK",
324
+ "text_level": 1,
325
+ "bbox": [
326
+ 212,
327
+ 842,
328
+ 357,
329
+ 854
330
+ ],
331
+ "page_idx": 1
332
+ },
333
+ {
334
+ "type": "text",
335
+ "text": "A. Visuo-Tactile Manipulation",
336
+ "text_level": 1,
337
+ "bbox": [
338
+ 81,
339
+ 862,
340
+ 290,
341
+ 876
342
+ ],
343
+ "page_idx": 1
344
+ },
345
+ {
346
+ "type": "text",
347
+ "text": "Tactile sensing is essential for robotic manipulation as it provides signals about physical contact in addition to visual observation. Early works [23], [24], [25] use RGB cameras",
348
+ "bbox": [
349
+ 81,
350
+ 881,
351
+ 488,
352
+ 926
353
+ ],
354
+ "page_idx": 1
355
+ },
356
+ {
357
+ "type": "text",
358
+ "text": "and force/torque sensors to infer contact status for making decisions. However, the information from force/torque sensors is low-dimensional and insufficient for more dexterous manipulation tasks.",
359
+ "bbox": [
360
+ 504,
361
+ 65,
362
+ 911,
363
+ 125
364
+ ],
365
+ "page_idx": 1
366
+ },
367
+ {
368
+ "type": "text",
369
+ "text": "More recently, vision-based tactile sensors have gained attention for their ability to capture high-resolution contact information [26], [27], [28]. Despite these advances, the rigid design of these sensors restricts the compliance of the end effector, where alternative approaches like uncalibrated tactile skins [29] and plug-and-play sensing systems [30] have improved adaptability and flexibility. In our work, we use a Fin-Ray-shaped compliant and all-directional tactile sensor, which can detect contacts from all directions and also support safe and robust contact-rich manipulation.",
370
+ "bbox": [
371
+ 504,
372
+ 127,
373
+ 911,
374
+ 279
375
+ ],
376
+ "page_idx": 1
377
+ },
378
+ {
379
+ "type": "text",
380
+ "text": "B. Data Collection System for Robot Manipulation",
381
+ "text_level": 1,
382
+ "bbox": [
383
+ 506,
384
+ 297,
385
+ 854,
386
+ 313
387
+ ],
388
+ "page_idx": 1
389
+ },
390
+ {
391
+ "type": "text",
392
+ "text": "Recent advancements in learning from demonstrations [1], [2], [3], [4] have shown promising results in developing general-purpose robots. Therefore, efficiently collecting high-quality demonstrations has become a key research focus.",
393
+ "bbox": [
394
+ 504,
395
+ 321,
396
+ 911,
397
+ 395
398
+ ],
399
+ "page_idx": 1
400
+ },
401
+ {
402
+ "type": "text",
403
+ "text": "Recently works have focused on efficient real-world data collection systems, such as devices or exoskeletons with joint-mapping [5], [6], [7], exoskeletons [8], or vision-based systems [9], [10]. However, these approaches require a physical robot during data collection, which limits efficiency and flexibility. In contrast, portable devices [11], [12], [13], [14], [31], [32] offer several advantages: they are low-cost, flexible, and do not depend on a specific physical robot. Additionally, they can be seamlessly integrated into various embodiments and provide a more user-friendly experience for data collection. We extend the UMI data collection system [14] by integrating tactile sensing, which enriches the demonstrations with multimodal information, improving policy learning performance while preserving the key benefits of portable devices.",
404
+ "bbox": [
405
+ 504,
406
+ 398,
407
+ 913,
408
+ 625
409
+ ],
410
+ "page_idx": 1
411
+ },
412
+ {
413
+ "type": "text",
414
+ "text": "C. Multimodal Pre-training for Robotics",
415
+ "text_level": 1,
416
+ "bbox": [
417
+ 506,
418
+ 643,
419
+ 784,
420
+ 657
421
+ ],
422
+ "page_idx": 1
423
+ },
424
+ {
425
+ "type": "text",
426
+ "text": "Pre-trained visual representations have shown improved performance and generalization in robotic manipulation [16], [17], [18], [19], [20] with self-supervised learning techniques [21], [22]. This can be extended to multimodal representation learning [33], [34], [35] by integrating visual, tactile, and proprioceptive modalities, allowing robots to perceive object properties beyond visual appearance.",
427
+ "bbox": [
428
+ 504,
429
+ 667,
430
+ 911,
431
+ 773
432
+ ],
433
+ "page_idx": 1
434
+ },
435
+ {
436
+ "type": "text",
437
+ "text": "Aligning heterogeneous sensory modalities is a key challenge in multimodal learning, as different sensors have varying data structures, sampling rates, and noise characteristics [36]. Inspired by CLIP [22], researchers have developed contrastive learning techniques to align tactile and visual representations for manipulation tasks [37], [38].",
438
+ "bbox": [
439
+ 504,
440
+ 773,
441
+ 911,
442
+ 864
443
+ ],
444
+ "page_idx": 1
445
+ },
446
+ {
447
+ "type": "text",
448
+ "text": "Our work extends these efforts by introducing masked contrastive pre-training, where the tactile encoder learns to reconstruct future occluded visual information, further enhancing multimodal understanding.",
449
+ "bbox": [
450
+ 504,
451
+ 866,
452
+ 911,
453
+ 926
454
+ ],
455
+ "page_idx": 1
456
+ },
457
+ {
458
+ "type": "image",
459
+ "img_path": "images/eaeb262e8e8de2cc9c2c5f6bd946acaa4ad560a3e6122d16fbd8e4f0a08cfc1a.jpg",
460
+ "image_caption": [
461
+ "Fig. 2: ViTaMIn's hardware system overview. The handheld device integrates a GoPro camera, two tactile sensors and a synchronization camera to align visual and tactile information. During data collection, the two tactile sensors and the synchronization camera are connected to the Raspberry Pi in the backbox. The total weight of the gripper is approximately $1960\\mathrm{g}$ . Left: Side view of the ViTaMIn system. Right: Top view of the ViTaMIn system with the backbox cover removed."
462
+ ],
463
+ "image_footnote": [],
464
+ "bbox": [
465
+ 94,
466
+ 61,
467
+ 519,
468
+ 364
469
+ ],
470
+ "page_idx": 2
471
+ },
472
+ {
473
+ "type": "image",
474
+ "img_path": "images/4dc08d4271758a4aabed3f5e31b55c1d21ebc7da0ae86c7b523043f7db6cbe93.jpg",
475
+ "image_caption": [],
476
+ "image_footnote": [],
477
+ "bbox": [
478
+ 529,
479
+ 64,
480
+ 901,
481
+ 363
482
+ ],
483
+ "page_idx": 2
484
+ },
485
+ {
486
+ "type": "text",
487
+ "text": "III. VISUO-TACTILE MANIPULATION INTERFACE",
488
+ "text_level": 1,
489
+ "bbox": [
490
+ 112,
491
+ 445,
492
+ 460,
493
+ 460
494
+ ],
495
+ "page_idx": 2
496
+ },
497
+ {
498
+ "type": "text",
499
+ "text": "A. System Overview",
500
+ "text_level": 1,
501
+ "bbox": [
502
+ 81,
503
+ 467,
504
+ 225,
505
+ 482
506
+ ],
507
+ "page_idx": 2
508
+ },
509
+ {
510
+ "type": "text",
511
+ "text": "We design a handheld gripper to collect visuo-tactile demonstrations without requiring teleoperation on physical robots. Our gripper design is illustrated in Figure 2. The gripper consists of an RGB fisheye wrist camera (GoPro 10) for image observation, two AllTact finger [15], a synchronization camera for observation temporal alignment, and a Raspberry Pi 5 with a battery for data recording.",
512
+ "bbox": [
513
+ 81,
514
+ 487,
515
+ 490,
516
+ 593
517
+ ],
518
+ "page_idx": 2
519
+ },
520
+ {
521
+ "type": "text",
522
+ "text": "Image Observation To capture comprehensive visual information, we employ a GoPro 10 camera with a $155^{\\circ}$ field-of-view (FoV) fisheye lens. The camera operates at 60 FPS with a resolution of $2704 \\times 2028$ pixels and is mounted at the end-effector of our ViTaMIn to ensure consistent visual coverage of the manipulation workspace during demonstration collection and policy deployment.",
523
+ "bbox": [
524
+ 81,
525
+ 594,
526
+ 488,
527
+ 698
528
+ ],
529
+ "page_idx": 2
530
+ },
531
+ {
532
+ "type": "text",
533
+ "text": "Tactile Observation In UMI [14], two TPU-printed Fin Ray grippers are used to provide compliance and enhance grasping stability. However, these grippers lack tactile sensing capabilities. In our ViTaMIn, we employ AllTact [15], a compliant Fin Ray gripper with omnidirectional tactile sensing ability. During manipulation, the embedded camera in AllTact captures both the global deformation of the entire finger and the local deformation of the contact surface as a single image. The tactile sensor operates at 30 FPS with a resolution of $640 \\times 480$ pixels.",
534
+ "bbox": [
535
+ 81,
536
+ 699,
537
+ 488,
538
+ 849
539
+ ],
540
+ "page_idx": 2
541
+ },
542
+ {
543
+ "type": "text",
544
+ "text": "Other Observations To enhance the robustness and accuracy of SLAM, we utilize the IMU data provided by the GoPro, which is synchronized with the visual observations. Gripper width is also critical for precise manipulation. Following UMI [14], we attach two ArUco markers to the",
545
+ "bbox": [
546
+ 81,
547
+ 851,
548
+ 490,
549
+ 926
550
+ ],
551
+ "page_idx": 2
552
+ },
553
+ {
554
+ "type": "text",
555
+ "text": "gripper's fingers and compute the gripper width from the visual observations.",
556
+ "bbox": [
557
+ 504,
558
+ 445,
559
+ 911,
560
+ 474
561
+ ],
562
+ "page_idx": 2
563
+ },
564
+ {
565
+ "type": "text",
566
+ "text": "B. Data Processing",
567
+ "text_level": 1,
568
+ "bbox": [
569
+ 506,
570
+ 484,
571
+ 643,
572
+ 498
573
+ ],
574
+ "page_idx": 2
575
+ },
576
+ {
577
+ "type": "text",
578
+ "text": "Sensor Synchronization To synchronize the tactile sensors and GoPro camera, we use an additional low-cost camera which is connected to the Raspberry Pi and is naturally synchronized with the tactile sensors. Before data collection, both the GoPro and the synchronization camera simultaneously capture a sequence of ArUco markers displayed on a computer screen. The ArUco IDs are detected in both video streams, and when an identical ID appears in both, the corresponding timestamps are used for synchronization. Since the framereates of the GoPro and the synchronization camera are $60\\mathrm{Hz}$ and $30\\mathrm{Hz}$ respectively, the temporal alignment error is below $1/60 + 1/30 = 0.05$ seconds, which is sufficient for our tasks. Once the two videos are synchronized, they are cropped by the starting and ending signals triggered by the control button.",
579
+ "bbox": [
580
+ 504,
581
+ 503,
582
+ 913,
583
+ 728
584
+ ],
585
+ "page_idx": 2
586
+ },
587
+ {
588
+ "type": "text",
589
+ "text": "Data Collection and Filtering We adopt a similar data collection pipeline to UMI [14]. We also utilize Simultaneous Localization and Mapping (SLAM) to capture the end-effector trajectories. While SLAM may fail in low-texture environments, it achieves a success rate of approximately $80\\%$ in our tasks, allowing the majority of collected data to be used for imitation learning.",
590
+ "bbox": [
591
+ 504,
592
+ 729,
593
+ 913,
594
+ 835
595
+ ],
596
+ "page_idx": 2
597
+ },
598
+ {
599
+ "type": "text",
600
+ "text": "IV. VISUO-TACTILE POLICY LEARNING",
601
+ "text_level": 1,
602
+ "bbox": [
603
+ 566,
604
+ 843,
605
+ 852,
606
+ 857
607
+ ],
608
+ "page_idx": 2
609
+ },
610
+ {
611
+ "type": "text",
612
+ "text": "A. Visuo-Tactile Representation Learning",
613
+ "text_level": 1,
614
+ "bbox": [
615
+ 504,
616
+ 862,
617
+ 790,
618
+ 877
619
+ ],
620
+ "page_idx": 2
621
+ },
622
+ {
623
+ "type": "text",
624
+ "text": "UMI uses a pre-trained CLIP [22] encoder to extract visual representations. However, the tactile images in ViTaMIn are very different from the CLIP's training distribution, which",
625
+ "bbox": [
626
+ 504,
627
+ 881,
628
+ 913,
629
+ 926
630
+ ],
631
+ "page_idx": 2
632
+ },
633
+ {
634
+ "type": "text",
635
+ "text": "can lead to suboptimal representation. To tackle this, we pretrain an effective tactile encoder using the collected action-free datasets, which doesn't rely on the SLAM success.",
636
+ "bbox": [
637
+ 81,
638
+ 65,
639
+ 488,
640
+ 111
641
+ ],
642
+ "page_idx": 3
643
+ },
644
+ {
645
+ "type": "text",
646
+ "text": "Taking the tactile image in Figure 3 as an example, we want the encoder to capture the essential contact properties, such as the object's in-hand pose and gripper's deformation. These signals are complementary information from pixel observations, and are crucial for making future decisions.",
647
+ "bbox": [
648
+ 81,
649
+ 112,
650
+ 488,
651
+ 186
652
+ ],
653
+ "page_idx": 3
654
+ },
655
+ {
656
+ "type": "text",
657
+ "text": "To achieve this, we employ a multimodal contrastive learning approach as illustrated in Figure 3. Given the current masked image $\\tilde{I}_V^k$ and current full tactile observation $I_T^k$ of step $k$ , we want the combination of $\\tilde{I}_V^k$ and $I_T^k$ align with the future full image observation $I_V^{k + 1}$ in the CLIP embedding space. The intuition behind this is to make the tactile encoder focus on the contact information to predict future images based on the current corrupted image.",
658
+ "bbox": [
659
+ 81,
660
+ 188,
661
+ 490,
662
+ 309
663
+ ],
664
+ "page_idx": 3
665
+ },
666
+ {
667
+ "type": "image",
668
+ "img_path": "images/a9d59bd91c7d90305bf0363e93a7675127b8a317c02f04e3257aa1fddbcebcc7.jpg",
669
+ "image_caption": [],
670
+ "image_footnote": [],
671
+ "bbox": [
672
+ 101,
673
+ 333,
674
+ 465,
675
+ 579
676
+ ],
677
+ "page_idx": 3
678
+ },
679
+ {
680
+ "type": "image",
681
+ "img_path": "images/dfac30919f736ceede8d6fefd2d847d22d809cb8f4e923239b4620efb4776ebf.jpg",
682
+ "image_caption": [
683
+ "Fig. 3: The illustration of the multimodal contrastive representation pre-training phase. The tactile encoder is trained to capture complementary information to predict the missing content for the future image."
684
+ ],
685
+ "image_footnote": [],
686
+ "bbox": [
687
+ 148,
688
+ 583,
689
+ 416,
690
+ 785
691
+ ],
692
+ "page_idx": 3
693
+ },
694
+ {
695
+ "type": "text",
696
+ "text": "To ensure stable training, we freeze the image CLIP encoder $\\phi_V(\\cdot)$ but only fine-tune the tactile encoder $\\phi_T(\\cdot)$ . We first obtain the tactile embedding $T_{k}$ from $\\phi_T(I_T^k)$ , and $V_{k}$ from $\\phi_V(\\tilde{I}_V^k)$ . These embeddings are concatenated and",
697
+ "bbox": [
698
+ 81,
699
+ 864,
700
+ 490,
701
+ 926
702
+ ],
703
+ "page_idx": 3
704
+ },
705
+ {
706
+ "type": "text",
707
+ "text": "passed through a fully connected projection layer, mapping them back to the original 512-dimensional CLIP embedding space as a fused feature $F_{k}$ . Finally, we train the tactile encoder using the standard CLIP loss on $F_{k}$ and $V_{k + 1}$ :",
708
+ "bbox": [
709
+ 504,
710
+ 65,
711
+ 911,
712
+ 127
713
+ ],
714
+ "page_idx": 3
715
+ },
716
+ {
717
+ "type": "equation",
718
+ "text": "\n$$\n\\mathcal {L} _ {\\mathrm {C L I P}} = \\frac {1}{2} \\left(\\mathcal {L} _ {\\mathrm {f - v}} + \\mathcal {L} _ {\\mathrm {v - f}}\\right) \\tag {1}\n$$\n",
719
+ "text_format": "latex",
720
+ "bbox": [
721
+ 627,
722
+ 131,
723
+ 911,
724
+ 160
725
+ ],
726
+ "page_idx": 3
727
+ },
728
+ {
729
+ "type": "text",
730
+ "text": "where",
731
+ "bbox": [
732
+ 506,
733
+ 164,
734
+ 552,
735
+ 176
736
+ ],
737
+ "page_idx": 3
738
+ },
739
+ {
740
+ "type": "equation",
741
+ "text": "\n$$\n\\mathcal {L} _ {\\mathrm {v - f}} = - \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\log \\frac {\\exp \\left(\\cos \\left(V _ {i + 1} , F _ {i}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {N} \\exp \\left(\\cos \\left(V _ {i + 1} , F _ {j}\\right) / \\tau\\right)} \\tag {2}\n$$\n",
742
+ "text_format": "latex",
743
+ "bbox": [
744
+ 544,
745
+ 181,
746
+ 911,
747
+ 224
748
+ ],
749
+ "page_idx": 3
750
+ },
751
+ {
752
+ "type": "equation",
753
+ "text": "\n$$\n\\mathcal {L} _ {\\mathrm {f - v}} = - \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\log \\frac {\\exp \\left(\\cos \\left(F _ {i} , V _ {i + 1}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {N} \\exp \\left(\\cos \\left(F _ {i} , V _ {j + 1}\\right) / \\tau\\right)} \\tag {3}\n$$\n",
754
+ "text_format": "latex",
755
+ "bbox": [
756
+ 544,
757
+ 237,
758
+ 911,
759
+ 279
760
+ ],
761
+ "page_idx": 3
762
+ },
763
+ {
764
+ "type": "text",
765
+ "text": "here $\\tau$ is a learnable temperature parameter.",
766
+ "bbox": [
767
+ 504,
768
+ 282,
769
+ 807,
770
+ 297
771
+ ],
772
+ "page_idx": 3
773
+ },
774
+ {
775
+ "type": "text",
776
+ "text": "Different from [39], where they directly apply the CLIP loss on the time-aligned visuo-tactile images, we instead fuse the tactile observation with a masked current image to predict the future image. We make this choice for two main reasons. First, in [39], the tactile representation is conditioned on proprioceptive states, which are unavailable in our dataset before the success of SLAM. Second, since different tasks may have varying images but similar tactile observations, fusing a masked current image helps the network learn a more expressive tactile representation. Without sufficient masking, the alignment becomes trivial.",
777
+ "bbox": [
778
+ 504,
779
+ 297,
780
+ 913,
781
+ 464
782
+ ],
783
+ "page_idx": 3
784
+ },
785
+ {
786
+ "type": "text",
787
+ "text": "After pre-training, we train a Diffusion Policy [4] on the SLAM-filtered data. Following [4], we use a U-Net [40] as the noise prediction network and apply DDIM [41] to accelerate the inference for action prediction.",
788
+ "bbox": [
789
+ 504,
790
+ 464,
791
+ 913,
792
+ 525
793
+ ],
794
+ "page_idx": 3
795
+ },
796
+ {
797
+ "type": "image",
798
+ "img_path": "images/66ceb8edcaeb309670260252767cb93432455bba815e9dc4a4ca645ef94a855b.jpg",
799
+ "image_caption": [
800
+ "V. EXPERIMENTS",
801
+ "Fig. 4: Hardware setup for policy deployment."
802
+ ],
803
+ "image_footnote": [],
804
+ "bbox": [
805
+ 513,
806
+ 561,
807
+ 908,
808
+ 672
809
+ ],
810
+ "page_idx": 3
811
+ },
812
+ {
813
+ "type": "text",
814
+ "text": "A. Experimental Setup",
815
+ "text_level": 1,
816
+ "bbox": [
817
+ 504,
818
+ 710,
819
+ 665,
820
+ 726
821
+ ],
822
+ "page_idx": 3
823
+ },
824
+ {
825
+ "type": "text",
826
+ "text": "Hardware Figure 4 shows the policy deployment setup. Our system consists of a Rokae xMate ER3PRO robotic arm equipped with a PGI-140-80-W-S parallel gripper. The 7-DOF robotic arm provides flexible manipulation capabilities, while the gripper features an 8cm stroke range from fully open to closed position. The system is implemented using ROS Noetic on Ubuntu 20.04. The control loop operates at $10\\mathrm{Hz}$ , with separate threads handling robot control, visual and tactile sensing. The system architecture is designed to minimize latency while maintaining reliable real-time performance.",
827
+ "bbox": [
828
+ 504,
829
+ 729,
830
+ 913,
831
+ 895
832
+ ],
833
+ "page_idx": 3
834
+ },
835
+ {
836
+ "type": "text",
837
+ "text": "Similar to UMI [14], our system compensates for various sources of latency in the perception-action loop through",
838
+ "bbox": [
839
+ 504,
840
+ 896,
841
+ 913,
842
+ 926
843
+ ],
844
+ "page_idx": 3
845
+ },
846
+ {
847
+ "type": "text",
848
+ "text": "predictive buffering and timestamp-based synchronization between visual and tactile feedback streams. The policy generates 16 consecutive trajectories at each inference step, with 10 trajectories being executed based on our temporal compensation strategy.",
849
+ "bbox": [
850
+ 81,
851
+ 65,
852
+ 486,
853
+ 141
854
+ ],
855
+ "page_idx": 4
856
+ },
857
+ {
858
+ "type": "text",
859
+ "text": "Manipulation Tasks As shown in Figure 5, we propose diverse contact-rich manipulation tasks to evaluate the effectiveness of ViTaMIn. These tasks are specifically crafted to demonstrate the following key capabilities: (1) Robust pick-and-place of diverse objects, including fragile and small objects; (2) Dexterous manipulation, such as in-hand reorientation; (3) Task success determination, allowing the robot to repeat attempts until success; (4) Dynamic and precise manipulation.",
860
+ "bbox": [
861
+ 81,
862
+ 141,
863
+ 486,
864
+ 277
865
+ ],
866
+ "page_idx": 4
867
+ },
868
+ {
869
+ "type": "text",
870
+ "text": "We design the following 5 manipulation tasks:",
871
+ "bbox": [
872
+ 99,
873
+ 277,
874
+ 416,
875
+ 292
876
+ ],
877
+ "page_idx": 4
878
+ },
879
+ {
880
+ "type": "list",
881
+ "sub_type": "text",
882
+ "list_items": [
883
+ "- Orange Placement: Put a fragile orange from a randomized position to a randomized plate.",
884
+ "- Dynamic Peg Insertion: Grasp a peg and approach a hole, which is moving at a constant speed of $10\\mathrm{mm / s}$ . And precisely insert the peg to the hole.",
885
+ "- Test Tube Reorientation: Grasp a transparent test tube from a shelf and adjust its pose through extrinsic dexterity based on tactile feedback.",
886
+ "- Scissor Hanging: Grasp a pair of scissors and hang them on a hook. Adjust the pose and keep attempting until it succeeds.",
887
+ "- Dual-Arm Knife Pulling: The left arm first grasps a knife from a cup, orients it horizontally. The right arm grasps and pulls it out with a constrained prismatic motion. This task requires tactile feedback to grasp the thin object and perform the correct pulling motion."
888
+ ],
889
+ "bbox": [
890
+ 99,
891
+ 291,
892
+ 486,
893
+ 530
894
+ ],
895
+ "page_idx": 4
896
+ },
897
+ {
898
+ "type": "table",
899
+ "img_path": "images/eae2bd4dd37d3b7a1249a136a2e4e36453a4e27aed461598963d0c97dc63fade.jpg",
900
+ "table_caption": [
901
+ "TABLE I: Data Collection Statistics for Different Tasks"
902
+ ],
903
+ "table_footnote": [
904
+ "*Valid data refers to demonstrations with successful SLAM tracking"
905
+ ],
906
+ "table_body": "<table><tr><td>Task</td><td>Raw Data</td><td>Valid Data*</td><td>Avg. Length</td></tr><tr><td>Orange Placement</td><td>87</td><td>73</td><td>435</td></tr><tr><td>Dynamic Peg Insertion</td><td>201</td><td>141</td><td>321</td></tr><tr><td>Test Tube Reorientation</td><td>150</td><td>125</td><td>619</td></tr><tr><td>Scissor Hanging</td><td>172</td><td>137</td><td>642</td></tr><tr><td>Knife Pulling (Left)</td><td>188</td><td>131</td><td>403</td></tr><tr><td>Knife Pulling (Right)</td><td>180</td><td>134</td><td>254</td></tr></table>",
907
+ "bbox": [
908
+ 89,
909
+ 569,
910
+ 483,
911
+ 671
912
+ ],
913
+ "page_idx": 4
914
+ },
915
+ {
916
+ "type": "text",
917
+ "text": "Table I shows the statistics of the demonstration data. We collect demonstrations for both single-arm and dual-arm manipulation tasks. For single-arm tasks, we gather between 87 and 172 raw demonstrations per task according to the task difficulty, with successful SLAM tracking achieved in approximately $80\\%$ of the trajectories. The dual-arm knife pulling task requires coordinated motion between both arms, with similar data collection volumes but slightly different average demonstration lengths for left and right arm movements.",
918
+ "bbox": [
919
+ 81,
920
+ 699,
921
+ 486,
922
+ 848
923
+ ],
924
+ "page_idx": 4
925
+ },
926
+ {
927
+ "type": "text",
928
+ "text": "We compare our approach against the following methods: (1) Vision: the policy only takes visual observation from the GoPro camera, which is encoded by the pre-trained CLIP model (identical to the original UMI [14] paper); (2) Ours w/o Pre-training: This baseline simply concatenate visual and",
929
+ "bbox": [
930
+ 81,
931
+ 849,
932
+ 486,
933
+ 926
934
+ ],
935
+ "page_idx": 4
936
+ },
937
+ {
938
+ "type": "text",
939
+ "text": "tactile observations after separate CLIP ViT-B/16 encoders, and fine-tuned with behavior cloning.",
940
+ "bbox": [
941
+ 504,
942
+ 65,
943
+ 911,
944
+ 95
945
+ ],
946
+ "page_idx": 4
947
+ },
948
+ {
949
+ "type": "table",
950
+ "img_path": "images/2a3bf65b454c923e9b5bb949a3116a5dfef20d4712dfc9369a08f9361b171127.jpg",
951
+ "table_caption": [],
952
+ "table_footnote": [
953
+ "TABLE II: Comparisons on 5 tasks with baselines. Our approach improves the performance on 5 tasks through multimodal sensing and pre-training."
954
+ ],
955
+ "table_body": "<table><tr><td>Task</td><td>Vision</td><td>w/o Pre-training</td><td>Ours</td></tr><tr><td colspan=\"4\">Single-Arm Tasks</td></tr><tr><td>Orange placement</td><td>0.85</td><td>0.9</td><td>1</td></tr><tr><td>Test Tube Reorientation</td><td>0.4</td><td>0.7</td><td>0.9</td></tr><tr><td>Scissor Hanging</td><td>0.1</td><td>0.45</td><td>0.7</td></tr><tr><td>Dynamic Peg Insertion</td><td>0.45</td><td>0.8</td><td>0.9</td></tr><tr><td colspan=\"4\">Dual-Arm Task</td></tr><tr><td>Knife Pulling</td><td>0.6</td><td>0.8</td><td>0.9</td></tr></table>",
956
+ "bbox": [
957
+ 531,
958
+ 111,
959
+ 888,
960
+ 239
961
+ ],
962
+ "page_idx": 4
963
+ },
964
+ {
965
+ "type": "text",
966
+ "text": "The results are presented in Table II. For each task, we conduct 20 trials with randomized initial conditions and report the average performance. The vision-only policy performs the worst across all five tasks, particularly in contact-rich tasks like test tube reorientation and scissor hanging, where tactile feedback is crucial for success. Across all tasks, pre-training enhances the performance, highlighting the importance of learning effective tactile representations.",
967
+ "bbox": [
968
+ 504,
969
+ 299,
970
+ 911,
971
+ 419
972
+ ],
973
+ "page_idx": 4
974
+ },
975
+ {
976
+ "type": "text",
977
+ "text": "B. Failure Analysis",
978
+ "text_level": 1,
979
+ "bbox": [
980
+ 506,
981
+ 438,
982
+ 640,
983
+ 453
984
+ ],
985
+ "page_idx": 4
986
+ },
987
+ {
988
+ "type": "text",
989
+ "text": "In the Orange placement task, the robot picks up an orange from a random position within a $50\\mathrm{cm} \\times 50\\mathrm{cm}$ workspace and places it on a plate. Failures stem from table collisions, unstable placement, or motion planning errors despite correct object detection. In Dynamic peg insertion, the robot inserts a grasped peg into a moving hole. Vision-only methods often fail due to imprecise localization and alignment.",
990
+ "bbox": [
991
+ 504,
992
+ 460,
993
+ 911,
994
+ 566
995
+ ],
996
+ "page_idx": 4
997
+ },
998
+ {
999
+ "type": "text",
1000
+ "text": "In Test tube reorientation, the robot must pick up a tube from a random rack location and reorient it vertically, with success defined by less than $10^{\\circ}$ orientation error. Failures include rack collisions, over-lifting, and incorrect final orientation. Scissor hanging requires picking up scissors and hanging them on a narrow hook, where common issues include misdetection, misalignment, and failure to release. In Knife pulling, a dual-arm policy reorients the knife with one arm while the other pulls it out of a holder. Failures often result from poor coordination, weak grasps, or incomplete pulling. Overall, vision-only policies struggle with contact-rich tasks, highlighting the limitations of unimodal sensing.",
1001
+ "bbox": [
1002
+ 504,
1003
+ 568,
1004
+ 911,
1005
+ 750
1006
+ ],
1007
+ "page_idx": 4
1008
+ },
1009
+ {
1010
+ "type": "text",
1011
+ "text": "C. Compliant Articulated Object Manipulation",
1012
+ "text_level": 1,
1013
+ "bbox": [
1014
+ 506,
1015
+ 767,
1016
+ 825,
1017
+ 782
1018
+ ],
1019
+ "page_idx": 4
1020
+ },
1021
+ {
1022
+ "type": "text",
1023
+ "text": "To demonstrate the compliance capabilities of ViTaMIn, we designed a compliant-controlled articulated object manipulation task. The robotic arm needs to grasp a handle (connected to a force gauge) and rotate it 90 degrees to open a switch. During the rotation process, the arm must minimize axial forces to ensure smooth operation. We conduct 10 experiments for each condition and calculate the average forces. The results show that ViTaMIn achieves significantly lower average forces compared to using pure vision as input.",
1024
+ "bbox": [
1025
+ 504,
1026
+ 789,
1027
+ 911,
1028
+ 926
1029
+ ],
1030
+ "page_idx": 4
1031
+ },
1032
+ {
1033
+ "type": "image",
1034
+ "img_path": "images/ff5d1182fc87c0d6043cdc51c2604c67d7dd26e1c42f06dddaec7cbdb5b6fff2.jpg",
1035
+ "image_caption": [
1036
+ "Task 1. Orange Placement"
1037
+ ],
1038
+ "image_footnote": [],
1039
+ "bbox": [
1040
+ 99,
1041
+ 85,
1042
+ 202,
1043
+ 183
1044
+ ],
1045
+ "page_idx": 5
1046
+ },
1047
+ {
1048
+ "type": "image",
1049
+ "img_path": "images/512cac75dfa1a461ecf945a565d55ee173bc79056728365d8bbabcdff20497f1.jpg",
1050
+ "image_caption": [],
1051
+ "image_footnote": [],
1052
+ "bbox": [
1053
+ 207,
1054
+ 88,
1055
+ 308,
1056
+ 183
1057
+ ],
1058
+ "page_idx": 5
1059
+ },
1060
+ {
1061
+ "type": "image",
1062
+ "img_path": "images/8ffaab5c8e792fcf6faad355dd789f84bac2ae3b63606a4eac5401f023e77b6c.jpg",
1063
+ "image_caption": [],
1064
+ "image_footnote": [],
1065
+ "bbox": [
1066
+ 313,
1067
+ 88,
1068
+ 415,
1069
+ 183
1070
+ ],
1071
+ "page_idx": 5
1072
+ },
1073
+ {
1074
+ "type": "image",
1075
+ "img_path": "images/145b0ff91ccc913134917d84f27ec9288b5cc0e7a4e6ccca1027d09a1eb5522f.jpg",
1076
+ "image_caption": [
1077
+ "Task 2. Dynamic Peg Insertion"
1078
+ ],
1079
+ "image_footnote": [],
1080
+ "bbox": [
1081
+ 419,
1082
+ 87,
1083
+ 542,
1084
+ 183
1085
+ ],
1086
+ "page_idx": 5
1087
+ },
1088
+ {
1089
+ "type": "image",
1090
+ "img_path": "images/c1639d459b6280e0d616c0b61ca5027d7312dc27193311d49fc82c533e5e3614.jpg",
1091
+ "image_caption": [],
1092
+ "image_footnote": [],
1093
+ "bbox": [
1094
+ 542,
1095
+ 87,
1096
+ 653,
1097
+ 183
1098
+ ],
1099
+ "page_idx": 5
1100
+ },
1101
+ {
1102
+ "type": "image",
1103
+ "img_path": "images/ed9be295452bb2b609707999c0d7ce53274abf084feefa571723224f2e442fef.jpg",
1104
+ "image_caption": [],
1105
+ "image_footnote": [],
1106
+ "bbox": [
1107
+ 653,
1108
+ 87,
1109
+ 776,
1110
+ 183
1111
+ ],
1112
+ "page_idx": 5
1113
+ },
1114
+ {
1115
+ "type": "image",
1116
+ "img_path": "images/5a72a662adc1c1ba0bfd167d4f4af69842d450e5c116e4daa0ea7c7387c99b10.jpg",
1117
+ "image_caption": [],
1118
+ "image_footnote": [],
1119
+ "bbox": [
1120
+ 777,
1121
+ 87,
1122
+ 898,
1123
+ 183
1124
+ ],
1125
+ "page_idx": 5
1126
+ },
1127
+ {
1128
+ "type": "image",
1129
+ "img_path": "images/2eb0d57179fb5c021a773de17ac4443e984ccc352e0dc3e5d824297b87a58824.jpg",
1130
+ "image_caption": [
1131
+ "Task 3. Test Tube Reorientation",
1132
+ "Stage I"
1133
+ ],
1134
+ "image_footnote": [],
1135
+ "bbox": [
1136
+ 99,
1137
+ 196,
1138
+ 254,
1139
+ 277
1140
+ ],
1141
+ "page_idx": 5
1142
+ },
1143
+ {
1144
+ "type": "image",
1145
+ "img_path": "images/d9d86998bcb7355813c2ec3771bc9be86562ca597b9726d312f20d51db3d0713.jpg",
1146
+ "image_caption": [],
1147
+ "image_footnote": [],
1148
+ "bbox": [
1149
+ 259,
1150
+ 196,
1151
+ 413,
1152
+ 277
1153
+ ],
1154
+ "page_idx": 5
1155
+ },
1156
+ {
1157
+ "type": "image",
1158
+ "img_path": "images/ad52e3e1fffe97ce097f5acd4e97f9d17c9f5a5940fed40ac9f7275aebb29b3d.jpg",
1159
+ "image_caption": [],
1160
+ "image_footnote": [],
1161
+ "bbox": [
1162
+ 421,
1163
+ 196,
1164
+ 575,
1165
+ 294
1166
+ ],
1167
+ "page_idx": 5
1168
+ },
1169
+ {
1170
+ "type": "image",
1171
+ "img_path": "images/6e0490d1099f2e64b741b9dd1f95e5ae865168537ab2ca60ee6fd37e533eacdb.jpg",
1172
+ "image_caption": [],
1173
+ "image_footnote": [],
1174
+ "bbox": [
1175
+ 581,
1176
+ 196,
1177
+ 735,
1178
+ 294
1179
+ ],
1180
+ "page_idx": 5
1181
+ },
1182
+ {
1183
+ "type": "image",
1184
+ "img_path": "images/41e27ceecac4c9235a249ac029abee0e7fc30124d187cd6077bf037a65e93fd4.jpg",
1185
+ "image_caption": [],
1186
+ "image_footnote": [],
1187
+ "bbox": [
1188
+ 743,
1189
+ 196,
1190
+ 898,
1191
+ 294
1192
+ ],
1193
+ "page_idx": 5
1194
+ },
1195
+ {
1196
+ "type": "image",
1197
+ "img_path": "images/00f85838005136fdca15b5fe4bb78ee82f7340dea6f3e5a9b2e65bd76936a94c.jpg",
1198
+ "image_caption": [
1199
+ "Task 4. Scissor Hanging"
1200
+ ],
1201
+ "image_footnote": [],
1202
+ "bbox": [
1203
+ 99,
1204
+ 309,
1205
+ 254,
1206
+ 388
1207
+ ],
1208
+ "page_idx": 5
1209
+ },
1210
+ {
1211
+ "type": "image",
1212
+ "img_path": "images/67d2c9e7967010da05736086d3a0fca8814cf40da4a222bb5e6737e56f406e1e.jpg",
1213
+ "image_caption": [],
1214
+ "image_footnote": [],
1215
+ "bbox": [
1216
+ 259,
1217
+ 309,
1218
+ 413,
1219
+ 405
1220
+ ],
1221
+ "page_idx": 5
1222
+ },
1223
+ {
1224
+ "type": "image",
1225
+ "img_path": "images/7445085cbc517fd3cd93fbb3a2bd9f6db8580e6c84d599414a68d9405529f3b0.jpg",
1226
+ "image_caption": [],
1227
+ "image_footnote": [],
1228
+ "bbox": [
1229
+ 421,
1230
+ 309,
1231
+ 575,
1232
+ 405
1233
+ ],
1234
+ "page_idx": 5
1235
+ },
1236
+ {
1237
+ "type": "image",
1238
+ "img_path": "images/5e5f3b7ca4ef1ce5b7a8ef47b005c756ed1fc850e06dd280623fc0528eb1a89d.jpg",
1239
+ "image_caption": [],
1240
+ "image_footnote": [],
1241
+ "bbox": [
1242
+ 581,
1243
+ 309,
1244
+ 735,
1245
+ 405
1246
+ ],
1247
+ "page_idx": 5
1248
+ },
1249
+ {
1250
+ "type": "image",
1251
+ "img_path": "images/6fd39913d482519aa7b6f7a9a91a5fd878297b9f18bd5d9df7c2afe47a5f641f.jpg",
1252
+ "image_caption": [],
1253
+ "image_footnote": [],
1254
+ "bbox": [
1255
+ 743,
1256
+ 309,
1257
+ 897,
1258
+ 405
1259
+ ],
1260
+ "page_idx": 5
1261
+ },
1262
+ {
1263
+ "type": "image",
1264
+ "img_path": "images/99c0a32a6a7ff267400458289cc0fbf487ba3fbe191ce416aad8bac7243d1355.jpg",
1265
+ "image_caption": [
1266
+ "Task 5. Knife Pulling (Bimanual)",
1267
+ "Fig. 5: We test ViTaMIn on 5 contact-rich manipulation tasks, including precise and dynamic insertion, object hanging with multimodal feedback, and transparent in-hand object manipulation."
1268
+ ],
1269
+ "image_footnote": [],
1270
+ "bbox": [
1271
+ 99,
1272
+ 422,
1273
+ 254,
1274
+ 518
1275
+ ],
1276
+ "page_idx": 5
1277
+ },
1278
+ {
1279
+ "type": "image",
1280
+ "img_path": "images/3ce4769ee6b1bde42a17eee61d58d48bb5431619637f3963972110f5eafc4433.jpg",
1281
+ "image_caption": [],
1282
+ "image_footnote": [],
1283
+ "bbox": [
1284
+ 261,
1285
+ 422,
1286
+ 413,
1287
+ 518
1288
+ ],
1289
+ "page_idx": 5
1290
+ },
1291
+ {
1292
+ "type": "image",
1293
+ "img_path": "images/d26514e671a1cabe35615727660c95426ee9c01df609ba34f6407ddd70a97fc4.jpg",
1294
+ "image_caption": [],
1295
+ "image_footnote": [],
1296
+ "bbox": [
1297
+ 421,
1298
+ 422,
1299
+ 575,
1300
+ 518
1301
+ ],
1302
+ "page_idx": 5
1303
+ },
1304
+ {
1305
+ "type": "image",
1306
+ "img_path": "images/45234ed6e963ca64aacca0aeebac163393943ee8f94523b029c03b09faa1b450.jpg",
1307
+ "image_caption": [],
1308
+ "image_footnote": [],
1309
+ "bbox": [
1310
+ 581,
1311
+ 422,
1312
+ 735,
1313
+ 518
1314
+ ],
1315
+ "page_idx": 5
1316
+ },
1317
+ {
1318
+ "type": "image",
1319
+ "img_path": "images/b83fcb747acca716d74ef5c58839df6114300388b8d0e6ee2f936782a0e64c43.jpg",
1320
+ "image_caption": [],
1321
+ "image_footnote": [],
1322
+ "bbox": [
1323
+ 743,
1324
+ 422,
1325
+ 897,
1326
+ 518
1327
+ ],
1328
+ "page_idx": 5
1329
+ },
1330
+ {
1331
+ "type": "image",
1332
+ "img_path": "images/ce63d3a7c6dc3449fc08f0a14ed53567368fbb562685332477dc26bd0e8072a3.jpg",
1333
+ "image_caption": [
1334
+ "Fig. 6: The robot needs to flip open a switch (fixed to a force gauge) by rotating it 90 degrees. During the rotation, the robot must minimize axial forces to ensure smooth operation."
1335
+ ],
1336
+ "image_footnote": [],
1337
+ "bbox": [
1338
+ 98,
1339
+ 592,
1340
+ 282,
1341
+ 714
1342
+ ],
1343
+ "page_idx": 5
1344
+ },
1345
+ {
1346
+ "type": "image",
1347
+ "img_path": "images/c6980fdc3266252190f984cdc73c9a2bab1431c731bc03e678d86b4b54eeb2be.jpg",
1348
+ "image_caption": [
1349
+ "Maximum Force Comparison: Vision vs. Ours"
1350
+ ],
1351
+ "image_footnote": [],
1352
+ "bbox": [
1353
+ 287,
1354
+ 604,
1355
+ 460,
1356
+ 712
1357
+ ],
1358
+ "page_idx": 5
1359
+ },
1360
+ {
1361
+ "type": "image",
1362
+ "img_path": "images/94c8d59ccbd0cb0de5a5b3206f5de9360ecd718d47d6d0d3ca249decbf1ffc98.jpg",
1363
+ "image_caption": [
1364
+ "Novel Objects",
1365
+ "Fig. 7: Showcase of novel objects and different lighting in the generalization tasks. The right columns demonstrate colored flashlight/high-power/normal lighting conditions."
1366
+ ],
1367
+ "image_footnote": [],
1368
+ "bbox": [
1369
+ 516,
1370
+ 593,
1371
+ 658,
1372
+ 713
1373
+ ],
1374
+ "page_idx": 5
1375
+ },
1376
+ {
1377
+ "type": "image",
1378
+ "img_path": "images/c7ca6a14e745ecd4ed221682030a13fc964ba2820d452880f11c7800ce40073a.jpg",
1379
+ "image_caption": [
1380
+ "Different Lighting"
1381
+ ],
1382
+ "image_footnote": [],
1383
+ "bbox": [
1384
+ 661,
1385
+ 593,
1386
+ 898,
1387
+ 713
1388
+ ],
1389
+ "page_idx": 5
1390
+ },
1391
+ {
1392
+ "type": "text",
1393
+ "text": "D. Ablation Studies",
1394
+ "text_level": 1,
1395
+ "bbox": [
1396
+ 83,
1397
+ 810,
1398
+ 220,
1399
+ 824
1400
+ ],
1401
+ "page_idx": 5
1402
+ },
1403
+ {
1404
+ "type": "text",
1405
+ "text": "a) Data Efficiency: We evaluate the performance of policies trained on different amounts (25%, 50%, and 100%) of demonstrations. All the models are evaluated in 20 real-world trials with different initializations. For a more in-depth analysis, we calculate the success rates of each stage separately, as illustrated in Figure 8. With the pre-trained",
1406
+ "bbox": [
1407
+ 81,
1408
+ 835,
1409
+ 488,
1410
+ 926
1411
+ ],
1412
+ "page_idx": 5
1413
+ },
1414
+ {
1415
+ "type": "list",
1416
+ "sub_type": "text",
1417
+ "list_items": [
1418
+ "tactile representations, our method can achieve consistently higher success rates on all the tasks across different amounts of data, and can even master the task with limited data (25%) for test tube reorientation.",
1419
+ "b) Training Efficiency: We further evaluate the policies trained with different numbers of epochs to understand its training efficiency under the same evaluation protocol. The"
1420
+ ],
1421
+ "bbox": [
1422
+ 504,
1423
+ 816,
1424
+ 913,
1425
+ 926
1426
+ ],
1427
+ "page_idx": 5
1428
+ },
1429
+ {
1430
+ "type": "image",
1431
+ "img_path": "images/827a914e84c52597b4da5d6a8593513b04ba1f5b5f8f15324c3d07e8a040904d.jpg",
1432
+ "image_caption": [
1433
+ "Stage I"
1434
+ ],
1435
+ "image_footnote": [],
1436
+ "bbox": [
1437
+ 86,
1438
+ 90,
1439
+ 289,
1440
+ 252
1441
+ ],
1442
+ "page_idx": 6
1443
+ },
1444
+ {
1445
+ "type": "image",
1446
+ "img_path": "images/0d76e53a8c6ce1241acff7eeef8a2fcf95cd1b9821ab719fc786e6b8b40a1ec2.jpg",
1447
+ "image_caption": [
1448
+ "Tube Reorientation"
1449
+ ],
1450
+ "image_footnote": [],
1451
+ "bbox": [
1452
+ 295,
1453
+ 78,
1454
+ 495,
1455
+ 252
1456
+ ],
1457
+ "page_idx": 6
1458
+ },
1459
+ {
1460
+ "type": "image",
1461
+ "img_path": "images/a285013001ef5630297fdcd051b65b0ca161561fb3c9bcad3b85e8b0d8170ccc.jpg",
1462
+ "image_caption": [
1463
+ "Stage I",
1464
+ "Fig. 8: Ablation study on the effect of pre-training on data efficiency. The performance of the policy improves as the quantity of data increases. After pre-training on the action-free, task-ignorant dataset, our method can achieve a high success rate even with limited data (25%)."
1465
+ ],
1466
+ "image_footnote": [],
1467
+ "bbox": [
1468
+ 501,
1469
+ 90,
1470
+ 702,
1471
+ 252
1472
+ ],
1473
+ "page_idx": 6
1474
+ },
1475
+ {
1476
+ "type": "image",
1477
+ "img_path": "images/4b5c950d25456db7d2d940404eb103086fe309d96067fca9478d24545376c057.jpg",
1478
+ "image_caption": [
1479
+ "Scissor Hanging",
1480
+ "Stage II"
1481
+ ],
1482
+ "image_footnote": [],
1483
+ "bbox": [
1484
+ 707,
1485
+ 88,
1486
+ 908,
1487
+ 252
1488
+ ],
1489
+ "page_idx": 6
1490
+ },
1491
+ {
1492
+ "type": "image",
1493
+ "img_path": "images/d8dcc28916f7268aa5ffb965d055ef3eb9daf033798758dca22c4625f78d2473.jpg",
1494
+ "image_caption": [
1495
+ "Tube Reorientation",
1496
+ "Stage I",
1497
+ "Fig. 9: Ablation study on the effect of pre-training on training efficiency. Policies with pre-training are able to learn to complete the first-stage task at a remarkably early stage of training (within 10 epochs). Additionally, when the policy network is pre-trained, the overall success rates increase more rapidly."
1498
+ ],
1499
+ "image_footnote": [],
1500
+ "bbox": [
1501
+ 86,
1502
+ 343,
1503
+ 287,
1504
+ 503
1505
+ ],
1506
+ "page_idx": 6
1507
+ },
1508
+ {
1509
+ "type": "image",
1510
+ "img_path": "images/20f9fa3b2ed644154b3075e0c925e50cd264ba0e5235b21294c9f2bd1334e309.jpg",
1511
+ "image_caption": [
1512
+ "Stage II"
1513
+ ],
1514
+ "image_footnote": [],
1515
+ "bbox": [
1516
+ 294,
1517
+ 343,
1518
+ 493,
1519
+ 503
1520
+ ],
1521
+ "page_idx": 6
1522
+ },
1523
+ {
1524
+ "type": "image",
1525
+ "img_path": "images/bad020bfaf946a16dcc60d68034d145033f7b7475137443f7b26c1c2e7ca1978.jpg",
1526
+ "image_caption": [
1527
+ "Scissor Hanging",
1528
+ "Stage I"
1529
+ ],
1530
+ "image_footnote": [],
1531
+ "bbox": [
1532
+ 500,
1533
+ 342,
1534
+ 700,
1535
+ 503
1536
+ ],
1537
+ "page_idx": 6
1538
+ },
1539
+ {
1540
+ "type": "image",
1541
+ "img_path": "images/3e856f4f3818833cac9099e64bb7f58858c535a342f9000c483c2c4ffb29e705.jpg",
1542
+ "image_caption": [
1543
+ "Stage II"
1544
+ ],
1545
+ "image_footnote": [],
1546
+ "bbox": [
1547
+ 707,
1548
+ 342,
1549
+ 908,
1550
+ 503
1551
+ ],
1552
+ "page_idx": 6
1553
+ },
1554
+ {
1555
+ "type": "text",
1556
+ "text": "results are illustrated in Figure 9. We also observe consistent task performance improvements with pre-training. The policy can complete the first stage of the task at a remarkably early training stage (within 10 epochs).",
1557
+ "bbox": [
1558
+ 81,
1559
+ 575,
1560
+ 488,
1561
+ 638
1562
+ ],
1563
+ "page_idx": 6
1564
+ },
1565
+ {
1566
+ "type": "table",
1567
+ "img_path": "images/8105d743b48c767516e10ef93cc71f7fc5122df736e327dea3f051cc7bfb6c47.jpg",
1568
+ "table_caption": [],
1569
+ "table_footnote": [],
1570
+ "table_body": "<table><tr><td>Task</td><td>Method</td><td>Original</td><td>Novel Objects</td><td>Different Lighting</td></tr><tr><td rowspan=\"3\">Orange Placement</td><td>Vision</td><td>0.85</td><td>0.7</td><td>0.55</td></tr><tr><td>Ours w/o Pre-training</td><td>0.9</td><td>0.8</td><td>0.6</td></tr><tr><td>Ours</td><td>1.0</td><td>1.0</td><td>0.85</td></tr><tr><td rowspan=\"3\">Scissor Hanging</td><td>Vision</td><td>0.0</td><td>0.0</td><td>0.0</td></tr><tr><td>Ours w/o Pre-training</td><td>0.45</td><td>0.4</td><td>0.4</td></tr><tr><td>Ours</td><td>0.7</td><td>0.7</td><td>0.5</td></tr></table>",
1571
+ "bbox": [
1572
+ 84,
1573
+ 651,
1574
+ 488,
1575
+ 765
1576
+ ],
1577
+ "page_idx": 6
1578
+ },
1579
+ {
1580
+ "type": "text",
1581
+ "text": "TABLE III: Generalization under different objects and scenes. The results demonstrate that our multi-modal policy is more robust to novel objects and different lighting conditions.",
1582
+ "bbox": [
1583
+ 81,
1584
+ 770,
1585
+ 488,
1586
+ 830
1587
+ ],
1588
+ "page_idx": 6
1589
+ },
1590
+ {
1591
+ "type": "text",
1592
+ "text": "E. Generalization Capability",
1593
+ "text_level": 1,
1594
+ "bbox": [
1595
+ 83,
1596
+ 861,
1597
+ 284,
1598
+ 876
1599
+ ],
1600
+ "page_idx": 6
1601
+ },
1602
+ {
1603
+ "type": "text",
1604
+ "text": "We also evaluate our policy's generalizability to unseen objects and environments. As shown in Figure 7, beyond the training orange and scissor, we introduce 6 unseen small",
1605
+ "bbox": [
1606
+ 81,
1607
+ 880,
1608
+ 488,
1609
+ 926
1610
+ ],
1611
+ "page_idx": 6
1612
+ },
1613
+ {
1614
+ "type": "text",
1615
+ "text": "objects and 3 unseen scissors to assess object generalization. Additionally, we modify lighting conditions by increasing brightness and introducing colored disco ball lighting. Table III presents results on the tasks of orange placement and scissor hanging. Our method with pre-training achieves consistent better performance across various generalization settings.",
1616
+ "bbox": [
1617
+ 504,
1618
+ 575,
1619
+ 911,
1620
+ 681
1621
+ ],
1622
+ "page_idx": 6
1623
+ },
1624
+ {
1625
+ "type": "text",
1626
+ "text": "VI. CONCLUSION",
1627
+ "text_level": 1,
1628
+ "bbox": [
1629
+ 643,
1630
+ 694,
1631
+ 774,
1632
+ 707
1633
+ ],
1634
+ "page_idx": 6
1635
+ },
1636
+ {
1637
+ "type": "text",
1638
+ "text": "In this paper, we present ViTaMIn, a portable visuo-tactile manipulation interface designed for efficiently collecting high-quality demonstrations by capturing both visual and tactile signals. Furthermore, ViTaMIn introduces an effective pre-training strategy that leverages all the collected action-free data to learn a robust and generalizable tactile representation through multimodal contrastive learning. Our approach significantly outperforms vision-only policies across 5 real-world contact-rich manipulation tasks and demonstrates improved data efficiency, robustness, and generalizability with pre-trained visuo-tactile representations.",
1639
+ "bbox": [
1640
+ 504,
1641
+ 714,
1642
+ 913,
1643
+ 880
1644
+ ],
1645
+ "page_idx": 6
1646
+ },
1647
+ {
1648
+ "type": "text",
1649
+ "text": "Our method primarily focuses on fixed-base single-arm and dual-arm tasks with parallel-jaw grippers. While this setup is suitable for a wide range of manipulation tasks,",
1650
+ "bbox": [
1651
+ 504,
1652
+ 881,
1653
+ 913,
1654
+ 926
1655
+ ],
1656
+ "page_idx": 6
1657
+ },
1658
+ {
1659
+ "type": "text",
1660
+ "text": "future work could extend our approach to dexterous hands, enabling richer and more versatile manipulation skills that better approximate human-level dexterity.",
1661
+ "bbox": [
1662
+ 81,
1663
+ 66,
1664
+ 488,
1665
+ 111
1666
+ ],
1667
+ "page_idx": 7
1668
+ },
1669
+ {
1670
+ "type": "text",
1671
+ "text": "REFERENCES",
1672
+ "text_level": 1,
1673
+ "bbox": [
1674
+ 238,
1675
+ 121,
1676
+ 334,
1677
+ 133
1678
+ ],
1679
+ "page_idx": 7
1680
+ },
1681
+ {
1682
+ "type": "list",
1683
+ "sub_type": "ref_text",
1684
+ "list_items": [
1685
+ "[1] S. Levine, C. Finn, T. Darrell, and P. Abbeel, \"End-to-end training of deep visuomotor policies,\" Journal of Machine Learning Research, vol. 17, no. 39, pp. 1-40, 2016.",
1686
+ "[2] A. Brohan, N. Brown, J. Carbajal, Y. Chebotar, J. Dabis, C. Finn, K. Gopalakrishnan, K. Hausman, A. Herzog, J. Hsu et al., \"Rt-1: Robotics transformer for real-world control at scale,\" arXiv preprint arXiv:2212.06817, 2022.",
1687
+ "[3] A. Brohan, N. Brown, J. Carbajal, Y. Chebotar, X. Chen, K. Choromanski, T. Ding, D. Driess, A. Dubey, C. Finn et al., \"Rt-2: Vision-language-action models transfer web knowledge to robotic control,\" arXiv preprint arXiv:2307.15818, 2023.",
1688
+ "[4] C. Chi, S. Feng, Y. Du, Z. Xu, E. Cousineau, B. Burchfiel, and S. Song, \"Diffusion policy: Visuomotor policy learning via action diffusion,\" arXiv preprint arXiv:2303.04137, 2023.",
1689
+ "[5] J. Aldaco, T. Armstrong, R. Baruch, J. Bingham, S. Chan, K. Draper, D. Dwibedi, C. Finn, P. Florence, S. Goodrich et al., \"Aloha 2: An enhanced low-cost hardware for bimanual teleoperation,\" arXiv preprint arXiv:2405.02292, 2024.",
1690
+ "[6] Z. Fu, T. Z. Zhao, and C. Finn, \"Mobile aloha: Learning bimanual mobile manipulation with low-cost whole-body teleoperation,\" arXiv preprint arXiv:2401.02117, 2024.",
1691
+ "[7] T. Z. Zhao, V. Kumar, S. Levine, and C. Finn, “Learning fine-grained bimanual manipulation with low-cost hardware,” arXiv preprint arXiv:2304.13705, 2023.",
1692
+ "[8] H. Fang, H.-S. Fang, Y. Wang, J. Ren, J. Chen, R. Zhang, W. Wang, and C. Lu, \"Airexo: Low-cost exoskeletons for learning whole-arm manipulation in the wild,\" in 2024 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2024, pp. 15031-15038.",
1693
+ "[9] X. Cheng, J. Li, S. Yang, G. Yang, and X. Wang, “Open-television: Teleoperation with immersive active visual feedback,” arXiv preprint arXiv:2407.01512, 2024.",
1694
+ "[10] Y. Qin, W. Yang, B. Huang, K. Van Wyk, H. Su, X. Wang, Y.-W. Chao, and D. Fox, \"Anyteleop: A general vision-based dexterous robot arm-hand teleoperation system,\" arXiv preprint arXiv:2307.04577, 2023.",
1695
+ "[11] F. Sanches, G. Gao, N. Elangovan, R. V. Godoy, J. Chapman, K. Wang, P. Jarvis, and M. Liarokapis, \"Scalable. intuitive human to robot skill transfer with wearable human machine interfaces: On complex, dexterous tasks,\" in 2023 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2023, pp. 6318-6325.",
1696
+ "[12] K. Doshi, Y. Huang, and S. Coros, \"On hand-held grippers and the morphological gap in human manipulation demonstration,\" arXiv preprint arXiv:2311.01832, 2023.",
1697
+ "[13] N. M. M. Shafiullah, A. Rai, H. Etukuru, Y. Liu, I. Misra, S. Chintala, and L. Pinto, \"On bringing robots home,\" arXiv preprint arXiv:2311.16098, 2023.",
1698
+ "[14] C. Chi, Z. Xu, C. Pan, E. Cousineau, B. Burchfiel, S. Feng, R. Tedrake, and S. Song, \"Universal manipulation interface: In-the-wild robot teaching without in-the-wild robots,\" arXiv preprint arXiv:2402.10329, 2024.",
1699
+ "[15] S. Liang, Y. Guan, J. Xu, H. Qian, X. Zhang, D. Wu, W. Ding, and R. Chen, \"Alltact fin ray: A compliant robot gripper with omnidirectional tactile sensing,\" arXiv preprint arXiv:2504.18064, 2025.",
1700
+ "[16] S. Nair, A. Rajeswaran, V. Kumar, C. Finn, and A. Gupta, “R3m: A universal visual representation for robot manipulation,” in Proceedings of The 6th Conference on Robot Learning (CoRL), vol. 205. PMLR, 2022, pp. 892–909.",
1701
+ "[17] Y. J. Ma, S. Sodhani, D. Jayaraman, O. Bastani, V. Kumar, and A. Zhang, “VIP: Towards universal visual reward and representation via value-implicit pre-training,” in The Eleventh International Conference on Learning Representations, 2023.",
1702
+ "[18] T. Xiao, I. Radosavovic, T. Darrell, and J. Malik, “Masked visual pretraining for motor control,” arXiv:2203.06173, 2022.",
1703
+ "[19] I. Radosavovic, T. Xiao, S. James, P. Abbeel, J. Malik, and T. Darrell, “Real-world robot learning with masked visual pre-training,” in Conference on Robot Learning. PMLR, 2023, pp. 416–426.",
1704
+ "[20] A. Majumdar, K. Yadav, S. Arnaud, J. Ma, C. Chen, S. Silwal, A. Jain, V.-P. Berges, T. Wu, J. Vakil et al., \"Where are we in the search for an artificial visual cortex for embodied intelligence?\" Advances in Neural Information Processing Systems, vol. 36, pp. 655-677, 2023."
1705
+ ],
1706
+ "bbox": [
1707
+ 86,
1708
+ 142,
1709
+ 488,
1710
+ 926
1711
+ ],
1712
+ "page_idx": 7
1713
+ },
1714
+ {
1715
+ "type": "list",
1716
+ "sub_type": "ref_text",
1717
+ "list_items": [
1718
+ "[21] K. He, X. Chen, S. Xie, Y. Li, P. Dollar, and R. Girshick, “Masked autoencoders are scalable vision learners,” in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2022, pp. 16000-16009.",
1719
+ "[22] A. Radford, J. W. Kim, C. Hallacy, A. Ramesh, G. Goh, S. Agarwal, G. Sastry, A. Askell, P. Mishkin, J. Clark et al., \"Learning transferable visual models from natural language supervision,\" in International conference on machine learning. PMLR, 2021, pp. 8748-8763.",
1720
+ "[23] K. Hosoda, K. Igarashi, and M. Asada, \"Adaptive hybrid visual servoing/force control in unknown environment,\" in Proceedings of IEEE/RSJ International Conference on Intelligent Robots and Systems. IROS'96, vol. 3. IEEE, 1996, pp. 1097-1103.",
1721
+ "[24] H. Nakagaki, K. Kitagaki, T. Ogasawara, and H. Tsukune, \"Study of deformation and insertion tasks of a flexible wire,\" in Proceedings of International Conference on Robotics and Automation, vol. 3. IEEE, 1997, pp. 2397-2402.",
1722
+ "[25] P. Miller and P. Leibowitz, \"Integration of vision, force and tactile sensing for grasping,\" Int. J. Intell. Mach, vol. 4, pp. 129-149, 1999.",
1723
+ "[26] H. Qi, B. Yi, S. Suresh, M. Lambeta, Y. Ma, R. Calandra, and J. Malik, \"General in-hand object rotation with vision and touch,\" in Conference on Robot Learning. PMLR, 2023, pp. 2549-2564.",
1724
+ "[27] S. Li, H. Yu, W. Ding, H. Liu, L. Ye, C. Xia, X. Wang, and X.-P. Zhang, “Visual-tactile fusion for transparent object grasping in complex backgrounds,” IEEE Transactions on Robotics, 2023.",
1725
+ "[28] Y. Han, K. Yu, R. Batra, N. Boyd, C. Mehta, T. Zhao, Y. She, S. Hutchinson, and Y. Zhao, “Learning generalizable vision-tactile robotic grasping strategy for deformable objects via transformer,” IEEE/ASME Transactions on Mechatronics, 2024.",
1726
+ "[29] R. Bhirangi, V. Pattabiraman, E. Erciyes, Y. Cao, T. Hellebrekers, and L. Pinto, “Anyskin: Plug-and-play skin sensing for robotic touch,” arXiv preprint arXiv:2409.08276, 2024.",
1727
+ "[30] V. Pattabiraman, Y. Cao, S. Haldar, L. Pinto, and R. Bhirangi, “Learning precise, contact-rich manipulation through uncalibrated tactile skins,” arXiv preprint arXiv:2410.17246, 2024.",
1728
+ "[31] Liu, Guan, Jia, Wu, Liu, Wang, Liang, Chen, Zhang, Song et al., \"Fastumi: A scalable and hardware-independent universal manipulation interface with dataset,\" arXiv e-prints, pp. arXiv-2409, 2024.",
1729
+ "[32] Liu, Chi, Cousineau, Kuppuswamy, Burchfiel, and Song, \"Maniwav: Learning robot manipulation from in-the-wild audio-visual data,\" in CoRL, 2024.",
1730
+ "[33] C. Sferrazza, Y. Seo, H. Liu, Y. Lee, and P. Abbeel, \"The power of the senses: Generalizable manipulation from vision and touch through masked multimodal learning,\" in 2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2024, pp. 9698-9705.",
1731
+ "[34] Z. Xu, R. Uppuluri, X. Zhang, C. Fitch, P. G. Crandall, W. Shou, D. Wang, and Y. She, \"UniT: Unified tactile representation for robot learning,\" 2024. [Online]. Available: https://arxiv.org/abs/2408.06481",
1732
+ "[35] X. Zhang and et al., “Fusing multimodal sensory data for robotic perception,” IEEE Transactions on Robotics, 2022.",
1733
+ "[36] A. Nagabandi, G. Kahn, S. Levine, and C. Finn, \"Deep reinforcement learning for vision-based robotic control with multimodal inputs,\" in Conference on Robot Learning (CoRL), 2020.",
1734
+ "[37] L. Fu, G. Datta, H. Huang, W. C.-H. Panitch, J. Drake, J. Ortiz, M. Mukadam, M. Lambeta, R. Calandra, and K. Goldberg, \"A touch, vision, and language dataset for multimodal alignment,\" in Forty-first International Conference on Machine Learning, 2024. [Online]. Available: https://openreview.net/forum?id=tFEOOH9eH0",
1735
+ "[38] F. Yang, C. Feng, Z. Chen, H. Park, D. Wang, Y. Dou, Z. Zeng, X. Chen, R. Gangopadhyay, A. Owens, and A. Wong, \"Binding touch to everything: Learning unified multimodal tactile representations,\" arXiv:2401.18084, 2024.",
1736
+ "[39] A. George, S. Gano, P. Katragadda, and A. Farimani, “Vital pretraining: Visuo-tactile pretraining for tactile and non-tactile manipulation policies,” arXiv preprint arXiv:2403.11898, 2024.",
1737
+ "[40] O. Ronneberger, P. Fischer, and T. Brox, “U-net: Convolutional networks for biomedical image segmentation,” in Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international conference, Munich, Germany, October 5-9, 2015, proceedings, part III 18. Springer, 2015, pp. 234-241.",
1738
+ "[41] J. Song, C. Meng, and S. Ermon, “Denoising diffusion implicit models,” arXiv preprint arXiv:2010.02502, 2020."
1739
+ ],
1740
+ "bbox": [
1741
+ 509,
1742
+ 66,
1743
+ 911,
1744
+ 883
1745
+ ],
1746
+ "page_idx": 7
1747
+ }
1748
+ ]
data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_model.json ADDED
@@ -0,0 +1,2460 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "title",
5
+ "bbox": [
6
+ 0.149,
7
+ 0.088,
8
+ 0.852,
9
+ 0.138
10
+ ],
11
+ "angle": 0,
12
+ "content": "ViTaMIn: Learning Contact-Rich Tasks Through Robot-Free Visuo-Tactile Manipulation Interface"
13
+ },
14
+ {
15
+ "type": "text",
16
+ "bbox": [
17
+ 0.182,
18
+ 0.158,
19
+ 0.808,
20
+ 0.175
21
+ ],
22
+ "angle": 0,
23
+ "content": "Fangchen Liu\\*,2, Chuanyu Li\\*,1, Yihua Qin\\*, Jing Xu\\*, Pieter Abbeel\\*, Rui Chen\\*,1"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.286,
29
+ 0.176,
30
+ 0.714,
31
+ 0.193
32
+ ],
33
+ "angle": 0,
34
+ "content": "\\(^{1}\\)Tsinghua University, \\(^{2}\\)University of California, Berkeley"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.358,
40
+ 0.194,
41
+ 0.636,
42
+ 0.209
43
+ ],
44
+ "angle": 0,
45
+ "content": "* Equal contribution, † Corresponding author"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.281,
51
+ 0.211,
52
+ 0.715,
53
+ 0.227
54
+ ],
55
+ "angle": 0,
56
+ "content": "https://chuanyune.github.io/ViTaMIN_page"
57
+ },
58
+ {
59
+ "type": "aside_text",
60
+ "bbox": [
61
+ 0.023,
62
+ 0.283,
63
+ 0.061,
64
+ 0.712
65
+ ],
66
+ "angle": 270,
67
+ "content": "arXiv:2504.06156v2 [cs.RO] 1 Sep 2025"
68
+ },
69
+ {
70
+ "type": "image_caption",
71
+ "bbox": [
72
+ 0.094,
73
+ 0.271,
74
+ 0.221,
75
+ 0.283
76
+ ],
77
+ "angle": 0,
78
+ "content": "Demonstrations"
79
+ },
80
+ {
81
+ "type": "image",
82
+ "bbox": [
83
+ 0.098,
84
+ 0.284,
85
+ 0.22,
86
+ 0.377
87
+ ],
88
+ "angle": 0,
89
+ "content": null
90
+ },
91
+ {
92
+ "type": "image",
93
+ "bbox": [
94
+ 0.238,
95
+ 0.283,
96
+ 0.343,
97
+ 0.376
98
+ ],
99
+ "angle": 0,
100
+ "content": null
101
+ },
102
+ {
103
+ "type": "image",
104
+ "bbox": [
105
+ 0.355,
106
+ 0.283,
107
+ 0.474,
108
+ 0.377
109
+ ],
110
+ "angle": 0,
111
+ "content": null
112
+ },
113
+ {
114
+ "type": "image_caption",
115
+ "bbox": [
116
+ 0.48,
117
+ 0.269,
118
+ 0.617,
119
+ 0.282
120
+ ],
121
+ "angle": 0,
122
+ "content": "Real-World Tasks"
123
+ },
124
+ {
125
+ "type": "image",
126
+ "bbox": [
127
+ 0.487,
128
+ 0.283,
129
+ 0.612,
130
+ 0.377
131
+ ],
132
+ "angle": 0,
133
+ "content": null
134
+ },
135
+ {
136
+ "type": "image",
137
+ "bbox": [
138
+ 0.623,
139
+ 0.283,
140
+ 0.758,
141
+ 0.377
142
+ ],
143
+ "angle": 0,
144
+ "content": null
145
+ },
146
+ {
147
+ "type": "image",
148
+ "bbox": [
149
+ 0.769,
150
+ 0.283,
151
+ 0.905,
152
+ 0.378
153
+ ],
154
+ "angle": 0,
155
+ "content": null
156
+ },
157
+ {
158
+ "type": "image",
159
+ "bbox": [
160
+ 0.089,
161
+ 0.381,
162
+ 0.454,
163
+ 0.546
164
+ ],
165
+ "angle": 0,
166
+ "content": null
167
+ },
168
+ {
169
+ "type": "image",
170
+ "bbox": [
171
+ 0.469,
172
+ 0.384,
173
+ 0.627,
174
+ 0.543
175
+ ],
176
+ "angle": 0,
177
+ "content": null
178
+ },
179
+ {
180
+ "type": "image",
181
+ "bbox": [
182
+ 0.629,
183
+ 0.384,
184
+ 0.766,
185
+ 0.543
186
+ ],
187
+ "angle": 0,
188
+ "content": null
189
+ },
190
+ {
191
+ "type": "image",
192
+ "bbox": [
193
+ 0.768,
194
+ 0.384,
195
+ 0.904,
196
+ 0.543
197
+ ],
198
+ "angle": 0,
199
+ "content": null
200
+ },
201
+ {
202
+ "type": "image_caption",
203
+ "bbox": [
204
+ 0.082,
205
+ 0.557,
206
+ 0.913,
207
+ 0.618
208
+ ],
209
+ "angle": 0,
210
+ "content": "Fig. 1: ViTaMIn overview. Our system comprises a portable data collection device that integrates visual and tactile sensing, a multimodal representation learning framework for fusing visual and tactile information, and demonstrations of various contact-rich manipulation tasks. This system facilitates efficient collection of manipulation data without requiring complex robot setups. (*Backgrounds in the images are blurred.)"
211
+ },
212
+ {
213
+ "type": "text",
214
+ "bbox": [
215
+ 0.082,
216
+ 0.631,
217
+ 0.49,
218
+ 0.858
219
+ ],
220
+ "angle": 0,
221
+ "content": "Abstract—Tactile information plays a crucial role for humans and robots to interact effectively with their environment, particularly for tasks requiring the understanding of contact properties. Solving such dexterous manipulation tasks often relies on imitation learning from demonstration datasets, which are typically collected via teleoperation systems and often demand substantial time and effort. To address these challenges, we present ViTaMIn, an embodiment-free manipulation interface that integrates visual and tactile sensing into a hand-held gripper, enabling multi-modality data collection without the need for teleoperation. Our design employs a compliant Fin Ray gripper with tactile sensing, allowing operators to perceive force feedback during manipulation for more intuitive operation. Additionally, we propose a multi-modal representation learning strategy to obtain pre-trained tactile representations, improving data efficiency and policy robustness. Experiments on 5 contact-rich manipulation tasks demonstrate that our system is more scalable, efficient, and effective than baseline methods."
222
+ },
223
+ {
224
+ "type": "title",
225
+ "bbox": [
226
+ 0.219,
227
+ 0.873,
228
+ 0.353,
229
+ 0.887
230
+ ],
231
+ "angle": 0,
232
+ "content": "I. INTRODUCTION"
233
+ },
234
+ {
235
+ "type": "text",
236
+ "bbox": [
237
+ 0.083,
238
+ 0.897,
239
+ 0.49,
240
+ 0.927
241
+ ],
242
+ "angle": 0,
243
+ "content": "Humans rely on both visual and tactile modalities to perform a diverse range of manipulation tasks in daily"
244
+ },
245
+ {
246
+ "type": "text",
247
+ "bbox": [
248
+ 0.505,
249
+ 0.63,
250
+ 0.914,
251
+ 0.736
252
+ ],
253
+ "angle": 0,
254
+ "content": "life. For instance, when inserting a plug into a socket or tightening a screw, vision helps with identifying and aligning components, while tactile signals enable precise force control during contact. This seamless integration of vision and touch enhances human dexterity, particularly in tasks that require contact-rich control, handling visual occlusions, or performing in-hand manipulations."
255
+ },
256
+ {
257
+ "type": "text",
258
+ "bbox": [
259
+ 0.506,
260
+ 0.746,
261
+ 0.914,
262
+ 0.927
263
+ ],
264
+ "angle": 0,
265
+ "content": "Recent progress in learning from demonstrations [1], [2], [3], [4] has shown significant potential for advancing general-purpose robots, enabling them to efficiently acquire complex skills from human demonstrations. Consequently, developing systems to collect high-quality demonstration data has been a recent key focus. Prior works have explored real-world data collection methods, including joint-mapped devices and exoskeletons [5], [6], [7], [8], and vision-based teleoperation frameworks [9], [10]. Nevertheless, these techniques require real-time teleoperation of a physical robot during data collection, which constrains efficiency and flexibility. In contrast, portable devices [11], [12], [13], [14] present"
266
+ }
267
+ ],
268
+ [
269
+ {
270
+ "type": "text",
271
+ "bbox": [
272
+ 0.082,
273
+ 0.066,
274
+ 0.49,
275
+ 0.185
276
+ ],
277
+ "angle": 0,
278
+ "content": "a more scalable and cost-effective alternative to collect demonstration without teleoperation. Moreover, they can be seamlessly integrated into various embodiments, providing a more flexible data collection approach. However, these portable devices primarily focus on capturing vision-only demonstration data, limiting their usage for contact-rich and dexterous manipulation tasks where tactile feedback plays a crucial role."
279
+ },
280
+ {
281
+ "type": "text",
282
+ "bbox": [
283
+ 0.082,
284
+ 0.187,
285
+ 0.49,
286
+ 0.444
287
+ ],
288
+ "angle": 0,
289
+ "content": "In this work, we aim to address both the challenge of efficient data collection and the need for learning more dexterous tasks using visuo-tactile demonstrations. To this end, we introduce ViTaMIn, a novel and effective visuotactile manipulation interface designed to capture high-quality demonstrations with enhanced efficiency and flexibility. Unlike conventional approaches that rely on rigid tactile sensors, ViTaMIn leverages an omnidirectional compliant Fin Ray gripper with customized tactile sensing, which can detect contact from all directions as an expressive tactile signal for robot manipulation. We integrate the tactile-aware Fin Ray gripper [15] with UMI [14], enhancing the collected data with rich multimodal information and improving policy learning performance while maintaining the core advantages of portable devices. Additionally, our system enables operators to perceive force feedback during manipulation, facilitating more intuitive and seamless operation."
290
+ },
291
+ {
292
+ "type": "text",
293
+ "bbox": [
294
+ 0.082,
295
+ 0.444,
296
+ 0.49,
297
+ 0.685
298
+ ],
299
+ "angle": 0,
300
+ "content": "Pre-trained visual representations have shown improved performance in robotic manipulation [16], [17], [18], [19], [20], benefiting from large-scale visual pre-training. To fully leverage the visuo-tactile datasets collected with ViTaMIn, we adopt a multimodal representation learning strategy to pre-train tactile representations, enhancing the robustness and generalizability of our sensor-based policies. Our pretraining objective integrates masked autoencoding [21] and contrastive learning for multimodal alignment [22], where future image observations are aligned with masked current images and tactile signals. Through extensive experiments on five challenging contact-rich manipulation tasks, our visuotactile policy, enhanced by multimodal pre-training, exhibits superior data and training efficiency while demonstrating strong generalization across diverse objects and environmental conditions."
301
+ },
302
+ {
303
+ "type": "text",
304
+ "bbox": [
305
+ 0.1,
306
+ 0.687,
307
+ 0.348,
308
+ 0.7
309
+ ],
310
+ "angle": 0,
311
+ "content": "In conclusion, our contributions are:"
312
+ },
313
+ {
314
+ "type": "text",
315
+ "bbox": [
316
+ 0.1,
317
+ 0.704,
318
+ 0.488,
319
+ 0.733
320
+ ],
321
+ "angle": 0,
322
+ "content": "- ViTaMIn provides a portable and scalable visuo-tactile data collection system."
323
+ },
324
+ {
325
+ "type": "text",
326
+ "bbox": [
327
+ 0.1,
328
+ 0.735,
329
+ 0.488,
330
+ 0.792
331
+ ],
332
+ "angle": 0,
333
+ "content": "- ViTaMIn proposes an effective multimodal representation learning strategy, which significantly improves the data efficiency, robustness and generalization capabilities."
334
+ },
335
+ {
336
+ "type": "text",
337
+ "bbox": [
338
+ 0.1,
339
+ 0.795,
340
+ 0.488,
341
+ 0.839
342
+ ],
343
+ "angle": 0,
344
+ "content": "- ViTaMIn achieves superior performance over vision-only baselines across five manipulation tasks by leveraging visuo-tactile demonstrations."
345
+ },
346
+ {
347
+ "type": "list",
348
+ "bbox": [
349
+ 0.1,
350
+ 0.704,
351
+ 0.488,
352
+ 0.839
353
+ ],
354
+ "angle": 0,
355
+ "content": null
356
+ },
357
+ {
358
+ "type": "title",
359
+ "bbox": [
360
+ 0.214,
361
+ 0.843,
362
+ 0.359,
363
+ 0.856
364
+ ],
365
+ "angle": 0,
366
+ "content": "II. RELATED WORK"
367
+ },
368
+ {
369
+ "type": "title",
370
+ "bbox": [
371
+ 0.082,
372
+ 0.863,
373
+ 0.292,
374
+ 0.877
375
+ ],
376
+ "angle": 0,
377
+ "content": "A. Visuo-Tactile Manipulation"
378
+ },
379
+ {
380
+ "type": "text",
381
+ "bbox": [
382
+ 0.082,
383
+ 0.882,
384
+ 0.49,
385
+ 0.927
386
+ ],
387
+ "angle": 0,
388
+ "content": "Tactile sensing is essential for robotic manipulation as it provides signals about physical contact in addition to visual observation. Early works [23], [24], [25] use RGB cameras"
389
+ },
390
+ {
391
+ "type": "text",
392
+ "bbox": [
393
+ 0.505,
394
+ 0.066,
395
+ 0.913,
396
+ 0.126
397
+ ],
398
+ "angle": 0,
399
+ "content": "and force/torque sensors to infer contact status for making decisions. However, the information from force/torque sensors is low-dimensional and insufficient for more dexterous manipulation tasks."
400
+ },
401
+ {
402
+ "type": "text",
403
+ "bbox": [
404
+ 0.505,
405
+ 0.128,
406
+ 0.913,
407
+ 0.28
408
+ ],
409
+ "angle": 0,
410
+ "content": "More recently, vision-based tactile sensors have gained attention for their ability to capture high-resolution contact information [26], [27], [28]. Despite these advances, the rigid design of these sensors restricts the compliance of the end effector, where alternative approaches like uncalibrated tactile skins [29] and plug-and-play sensing systems [30] have improved adaptability and flexibility. In our work, we use a Fin-Ray-shaped compliant and all-directional tactile sensor, which can detect contacts from all directions and also support safe and robust contact-rich manipulation."
411
+ },
412
+ {
413
+ "type": "title",
414
+ "bbox": [
415
+ 0.507,
416
+ 0.298,
417
+ 0.855,
418
+ 0.314
419
+ ],
420
+ "angle": 0,
421
+ "content": "B. Data Collection System for Robot Manipulation"
422
+ },
423
+ {
424
+ "type": "text",
425
+ "bbox": [
426
+ 0.505,
427
+ 0.322,
428
+ 0.913,
429
+ 0.396
430
+ ],
431
+ "angle": 0,
432
+ "content": "Recent advancements in learning from demonstrations [1], [2], [3], [4] have shown promising results in developing general-purpose robots. Therefore, efficiently collecting high-quality demonstrations has become a key research focus."
433
+ },
434
+ {
435
+ "type": "text",
436
+ "bbox": [
437
+ 0.505,
438
+ 0.399,
439
+ 0.914,
440
+ 0.626
441
+ ],
442
+ "angle": 0,
443
+ "content": "Recently works have focused on efficient real-world data collection systems, such as devices or exoskeletons with joint-mapping [5], [6], [7], exoskeletons [8], or vision-based systems [9], [10]. However, these approaches require a physical robot during data collection, which limits efficiency and flexibility. In contrast, portable devices [11], [12], [13], [14], [31], [32] offer several advantages: they are low-cost, flexible, and do not depend on a specific physical robot. Additionally, they can be seamlessly integrated into various embodiments and provide a more user-friendly experience for data collection. We extend the UMI data collection system [14] by integrating tactile sensing, which enriches the demonstrations with multimodal information, improving policy learning performance while preserving the key benefits of portable devices."
444
+ },
445
+ {
446
+ "type": "title",
447
+ "bbox": [
448
+ 0.507,
449
+ 0.644,
450
+ 0.785,
451
+ 0.659
452
+ ],
453
+ "angle": 0,
454
+ "content": "C. Multimodal Pre-training for Robotics"
455
+ },
456
+ {
457
+ "type": "text",
458
+ "bbox": [
459
+ 0.505,
460
+ 0.668,
461
+ 0.913,
462
+ 0.774
463
+ ],
464
+ "angle": 0,
465
+ "content": "Pre-trained visual representations have shown improved performance and generalization in robotic manipulation [16], [17], [18], [19], [20] with self-supervised learning techniques [21], [22]. This can be extended to multimodal representation learning [33], [34], [35] by integrating visual, tactile, and proprioceptive modalities, allowing robots to perceive object properties beyond visual appearance."
466
+ },
467
+ {
468
+ "type": "text",
469
+ "bbox": [
470
+ 0.505,
471
+ 0.775,
472
+ 0.913,
473
+ 0.866
474
+ ],
475
+ "angle": 0,
476
+ "content": "Aligning heterogeneous sensory modalities is a key challenge in multimodal learning, as different sensors have varying data structures, sampling rates, and noise characteristics [36]. Inspired by CLIP [22], researchers have developed contrastive learning techniques to align tactile and visual representations for manipulation tasks [37], [38]."
477
+ },
478
+ {
479
+ "type": "text",
480
+ "bbox": [
481
+ 0.505,
482
+ 0.867,
483
+ 0.913,
484
+ 0.927
485
+ ],
486
+ "angle": 0,
487
+ "content": "Our work extends these efforts by introducing masked contrastive pre-training, where the tactile encoder learns to reconstruct future occluded visual information, further enhancing multimodal understanding."
488
+ }
489
+ ],
490
+ [
491
+ {
492
+ "type": "image",
493
+ "bbox": [
494
+ 0.095,
495
+ 0.063,
496
+ 0.52,
497
+ 0.365
498
+ ],
499
+ "angle": 0,
500
+ "content": null
501
+ },
502
+ {
503
+ "type": "image",
504
+ "bbox": [
505
+ 0.53,
506
+ 0.065,
507
+ 0.903,
508
+ 0.364
509
+ ],
510
+ "angle": 0,
511
+ "content": null
512
+ },
513
+ {
514
+ "type": "image_caption",
515
+ "bbox": [
516
+ 0.082,
517
+ 0.373,
518
+ 0.916,
519
+ 0.437
520
+ ],
521
+ "angle": 0,
522
+ "content": "Fig. 2: ViTaMIn's hardware system overview. The handheld device integrates a GoPro camera, two tactile sensors and a synchronization camera to align visual and tactile information. During data collection, the two tactile sensors and the synchronization camera are connected to the Raspberry Pi in the backbox. The total weight of the gripper is approximately \\(1960\\mathrm{g}\\). Left: Side view of the ViTaMIn system. Right: Top view of the ViTaMIn system with the backbox cover removed."
523
+ },
524
+ {
525
+ "type": "title",
526
+ "bbox": [
527
+ 0.113,
528
+ 0.446,
529
+ 0.462,
530
+ 0.461
531
+ ],
532
+ "angle": 0,
533
+ "content": "III. VISUO-TACTILE MANIPULATION INTERFACE"
534
+ },
535
+ {
536
+ "type": "title",
537
+ "bbox": [
538
+ 0.083,
539
+ 0.468,
540
+ 0.226,
541
+ 0.483
542
+ ],
543
+ "angle": 0,
544
+ "content": "A. System Overview"
545
+ },
546
+ {
547
+ "type": "text",
548
+ "bbox": [
549
+ 0.082,
550
+ 0.488,
551
+ 0.491,
552
+ 0.594
553
+ ],
554
+ "angle": 0,
555
+ "content": "We design a handheld gripper to collect visuo-tactile demonstrations without requiring teleoperation on physical robots. Our gripper design is illustrated in Figure 2. The gripper consists of an RGB fisheye wrist camera (GoPro 10) for image observation, two AllTact finger [15], a synchronization camera for observation temporal alignment, and a Raspberry Pi 5 with a battery for data recording."
556
+ },
557
+ {
558
+ "type": "text",
559
+ "bbox": [
560
+ 0.082,
561
+ 0.595,
562
+ 0.49,
563
+ 0.699
564
+ ],
565
+ "angle": 0,
566
+ "content": "Image Observation To capture comprehensive visual information, we employ a GoPro 10 camera with a \\(155^{\\circ}\\) field-of-view (FoV) fisheye lens. The camera operates at 60 FPS with a resolution of \\(2704 \\times 2028\\) pixels and is mounted at the end-effector of our ViTaMIn to ensure consistent visual coverage of the manipulation workspace during demonstration collection and policy deployment."
567
+ },
568
+ {
569
+ "type": "text",
570
+ "bbox": [
571
+ 0.082,
572
+ 0.7,
573
+ 0.49,
574
+ 0.851
575
+ ],
576
+ "angle": 0,
577
+ "content": "Tactile Observation In UMI [14], two TPU-printed Fin Ray grippers are used to provide compliance and enhance grasping stability. However, these grippers lack tactile sensing capabilities. In our ViTaMIn, we employ AllTact [15], a compliant Fin Ray gripper with omnidirectional tactile sensing ability. During manipulation, the embedded camera in AllTact captures both the global deformation of the entire finger and the local deformation of the contact surface as a single image. The tactile sensor operates at 30 FPS with a resolution of \\(640 \\times 480\\) pixels."
578
+ },
579
+ {
580
+ "type": "text",
581
+ "bbox": [
582
+ 0.082,
583
+ 0.852,
584
+ 0.491,
585
+ 0.928
586
+ ],
587
+ "angle": 0,
588
+ "content": "Other Observations To enhance the robustness and accuracy of SLAM, we utilize the IMU data provided by the GoPro, which is synchronized with the visual observations. Gripper width is also critical for precise manipulation. Following UMI [14], we attach two ArUco markers to the"
589
+ },
590
+ {
591
+ "type": "text",
592
+ "bbox": [
593
+ 0.506,
594
+ 0.446,
595
+ 0.913,
596
+ 0.476
597
+ ],
598
+ "angle": 0,
599
+ "content": "gripper's fingers and compute the gripper width from the visual observations."
600
+ },
601
+ {
602
+ "type": "title",
603
+ "bbox": [
604
+ 0.507,
605
+ 0.485,
606
+ 0.645,
607
+ 0.499
608
+ ],
609
+ "angle": 0,
610
+ "content": "B. Data Processing"
611
+ },
612
+ {
613
+ "type": "text",
614
+ "bbox": [
615
+ 0.505,
616
+ 0.504,
617
+ 0.914,
618
+ 0.729
619
+ ],
620
+ "angle": 0,
621
+ "content": "Sensor Synchronization To synchronize the tactile sensors and GoPro camera, we use an additional low-cost camera which is connected to the Raspberry Pi and is naturally synchronized with the tactile sensors. Before data collection, both the GoPro and the synchronization camera simultaneously capture a sequence of ArUco markers displayed on a computer screen. The ArUco IDs are detected in both video streams, and when an identical ID appears in both, the corresponding timestamps are used for synchronization. Since the framereates of the GoPro and the synchronization camera are \\(60\\mathrm{Hz}\\) and \\(30\\mathrm{Hz}\\) respectively, the temporal alignment error is below \\(1/60 + 1/30 = 0.05\\) seconds, which is sufficient for our tasks. Once the two videos are synchronized, they are cropped by the starting and ending signals triggered by the control button."
622
+ },
623
+ {
624
+ "type": "text",
625
+ "bbox": [
626
+ 0.506,
627
+ 0.73,
628
+ 0.914,
629
+ 0.837
630
+ ],
631
+ "angle": 0,
632
+ "content": "Data Collection and Filtering We adopt a similar data collection pipeline to UMI [14]. We also utilize Simultaneous Localization and Mapping (SLAM) to capture the end-effector trajectories. While SLAM may fail in low-texture environments, it achieves a success rate of approximately \\(80\\%\\) in our tasks, allowing the majority of collected data to be used for imitation learning."
633
+ },
634
+ {
635
+ "type": "title",
636
+ "bbox": [
637
+ 0.567,
638
+ 0.844,
639
+ 0.853,
640
+ 0.858
641
+ ],
642
+ "angle": 0,
643
+ "content": "IV. VISUO-TACTILE POLICY LEARNING"
644
+ },
645
+ {
646
+ "type": "title",
647
+ "bbox": [
648
+ 0.506,
649
+ 0.863,
650
+ 0.791,
651
+ 0.878
652
+ ],
653
+ "angle": 0,
654
+ "content": "A. Visuo-Tactile Representation Learning"
655
+ },
656
+ {
657
+ "type": "text",
658
+ "bbox": [
659
+ 0.506,
660
+ 0.882,
661
+ 0.914,
662
+ 0.928
663
+ ],
664
+ "angle": 0,
665
+ "content": "UMI uses a pre-trained CLIP [22] encoder to extract visual representations. However, the tactile images in ViTaMIn are very different from the CLIP's training distribution, which"
666
+ }
667
+ ],
668
+ [
669
+ {
670
+ "type": "text",
671
+ "bbox": [
672
+ 0.082,
673
+ 0.066,
674
+ 0.49,
675
+ 0.112
676
+ ],
677
+ "angle": 0,
678
+ "content": "can lead to suboptimal representation. To tackle this, we pretrain an effective tactile encoder using the collected action-free datasets, which doesn't rely on the SLAM success."
679
+ },
680
+ {
681
+ "type": "text",
682
+ "bbox": [
683
+ 0.082,
684
+ 0.113,
685
+ 0.49,
686
+ 0.188
687
+ ],
688
+ "angle": 0,
689
+ "content": "Taking the tactile image in Figure 3 as an example, we want the encoder to capture the essential contact properties, such as the object's in-hand pose and gripper's deformation. These signals are complementary information from pixel observations, and are crucial for making future decisions."
690
+ },
691
+ {
692
+ "type": "text",
693
+ "bbox": [
694
+ 0.082,
695
+ 0.189,
696
+ 0.491,
697
+ 0.31
698
+ ],
699
+ "angle": 0,
700
+ "content": "To achieve this, we employ a multimodal contrastive learning approach as illustrated in Figure 3. Given the current masked image \\(\\tilde{I}_V^k\\) and current full tactile observation \\(I_T^k\\) of step \\(k\\), we want the combination of \\(\\tilde{I}_V^k\\) and \\(I_T^k\\) align with the future full image observation \\(I_V^{k + 1}\\) in the CLIP embedding space. The intuition behind this is to make the tactile encoder focus on the contact information to predict future images based on the current corrupted image."
701
+ },
702
+ {
703
+ "type": "image",
704
+ "bbox": [
705
+ 0.102,
706
+ 0.334,
707
+ 0.466,
708
+ 0.58
709
+ ],
710
+ "angle": 0,
711
+ "content": null
712
+ },
713
+ {
714
+ "type": "image",
715
+ "bbox": [
716
+ 0.149,
717
+ 0.584,
718
+ 0.418,
719
+ 0.786
720
+ ],
721
+ "angle": 0,
722
+ "content": null
723
+ },
724
+ {
725
+ "type": "image_caption",
726
+ "bbox": [
727
+ 0.082,
728
+ 0.797,
729
+ 0.49,
730
+ 0.858
731
+ ],
732
+ "angle": 0,
733
+ "content": "Fig. 3: The illustration of the multimodal contrastive representation pre-training phase. The tactile encoder is trained to capture complementary information to predict the missing content for the future image."
734
+ },
735
+ {
736
+ "type": "text",
737
+ "bbox": [
738
+ 0.083,
739
+ 0.866,
740
+ 0.491,
741
+ 0.928
742
+ ],
743
+ "angle": 0,
744
+ "content": "To ensure stable training, we freeze the image CLIP encoder \\(\\phi_V(\\cdot)\\) but only fine-tune the tactile encoder \\(\\phi_T(\\cdot)\\). We first obtain the tactile embedding \\(T_{k}\\) from \\(\\phi_T(I_T^k)\\), and \\(V_{k}\\) from \\(\\phi_V(\\tilde{I}_V^k)\\). These embeddings are concatenated and"
745
+ },
746
+ {
747
+ "type": "text",
748
+ "bbox": [
749
+ 0.505,
750
+ 0.066,
751
+ 0.913,
752
+ 0.128
753
+ ],
754
+ "angle": 0,
755
+ "content": "passed through a fully connected projection layer, mapping them back to the original 512-dimensional CLIP embedding space as a fused feature \\( F_{k} \\). Finally, we train the tactile encoder using the standard CLIP loss on \\( F_{k} \\) and \\( V_{k + 1} \\):"
756
+ },
757
+ {
758
+ "type": "equation",
759
+ "bbox": [
760
+ 0.629,
761
+ 0.132,
762
+ 0.913,
763
+ 0.161
764
+ ],
765
+ "angle": 0,
766
+ "content": "\\[\n\\mathcal {L} _ {\\mathrm {C L I P}} = \\frac {1}{2} \\left(\\mathcal {L} _ {\\mathrm {f - v}} + \\mathcal {L} _ {\\mathrm {v - f}}\\right) \\tag {1}\n\\]"
767
+ },
768
+ {
769
+ "type": "text",
770
+ "bbox": [
771
+ 0.508,
772
+ 0.165,
773
+ 0.553,
774
+ 0.178
775
+ ],
776
+ "angle": 0,
777
+ "content": "where"
778
+ },
779
+ {
780
+ "type": "equation",
781
+ "bbox": [
782
+ 0.545,
783
+ 0.183,
784
+ 0.913,
785
+ 0.226
786
+ ],
787
+ "angle": 0,
788
+ "content": "\\[\n\\mathcal {L} _ {\\mathrm {v - f}} = - \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\log \\frac {\\exp \\left(\\cos \\left(V _ {i + 1} , F _ {i}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {N} \\exp \\left(\\cos \\left(V _ {i + 1} , F _ {j}\\right) / \\tau\\right)} \\tag {2}\n\\]"
789
+ },
790
+ {
791
+ "type": "equation",
792
+ "bbox": [
793
+ 0.545,
794
+ 0.238,
795
+ 0.913,
796
+ 0.28
797
+ ],
798
+ "angle": 0,
799
+ "content": "\\[\n\\mathcal {L} _ {\\mathrm {f - v}} = - \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\log \\frac {\\exp \\left(\\cos \\left(F _ {i} , V _ {i + 1}\\right) / \\tau\\right)}{\\sum_ {j = 1} ^ {N} \\exp \\left(\\cos \\left(F _ {i} , V _ {j + 1}\\right) / \\tau\\right)} \\tag {3}\n\\]"
800
+ },
801
+ {
802
+ "type": "text",
803
+ "bbox": [
804
+ 0.506,
805
+ 0.284,
806
+ 0.808,
807
+ 0.299
808
+ ],
809
+ "angle": 0,
810
+ "content": "here \\(\\tau\\) is a learnable temperature parameter."
811
+ },
812
+ {
813
+ "type": "text",
814
+ "bbox": [
815
+ 0.505,
816
+ 0.299,
817
+ 0.914,
818
+ 0.465
819
+ ],
820
+ "angle": 0,
821
+ "content": "Different from [39], where they directly apply the CLIP loss on the time-aligned visuo-tactile images, we instead fuse the tactile observation with a masked current image to predict the future image. We make this choice for two main reasons. First, in [39], the tactile representation is conditioned on proprioceptive states, which are unavailable in our dataset before the success of SLAM. Second, since different tasks may have varying images but similar tactile observations, fusing a masked current image helps the network learn a more expressive tactile representation. Without sufficient masking, the alignment becomes trivial."
822
+ },
823
+ {
824
+ "type": "text",
825
+ "bbox": [
826
+ 0.506,
827
+ 0.465,
828
+ 0.914,
829
+ 0.526
830
+ ],
831
+ "angle": 0,
832
+ "content": "After pre-training, we train a Diffusion Policy [4] on the SLAM-filtered data. Following [4], we use a U-Net [40] as the noise prediction network and apply DDIM [41] to accelerate the inference for action prediction."
833
+ },
834
+ {
835
+ "type": "image_caption",
836
+ "bbox": [
837
+ 0.647,
838
+ 0.533,
839
+ 0.774,
840
+ 0.547
841
+ ],
842
+ "angle": 0,
843
+ "content": "V. EXPERIMENTS"
844
+ },
845
+ {
846
+ "type": "image",
847
+ "bbox": [
848
+ 0.514,
849
+ 0.562,
850
+ 0.91,
851
+ 0.673
852
+ ],
853
+ "angle": 0,
854
+ "content": null
855
+ },
856
+ {
857
+ "type": "image_caption",
858
+ "bbox": [
859
+ 0.55,
860
+ 0.681,
861
+ 0.868,
862
+ 0.697
863
+ ],
864
+ "angle": 0,
865
+ "content": "Fig. 4: Hardware setup for policy deployment."
866
+ },
867
+ {
868
+ "type": "title",
869
+ "bbox": [
870
+ 0.506,
871
+ 0.712,
872
+ 0.666,
873
+ 0.727
874
+ ],
875
+ "angle": 0,
876
+ "content": "A. Experimental Setup"
877
+ },
878
+ {
879
+ "type": "text",
880
+ "bbox": [
881
+ 0.505,
882
+ 0.731,
883
+ 0.914,
884
+ 0.896
885
+ ],
886
+ "angle": 0,
887
+ "content": "Hardware Figure 4 shows the policy deployment setup. Our system consists of a Rokae xMate ER3PRO robotic arm equipped with a PGI-140-80-W-S parallel gripper. The 7-DOF robotic arm provides flexible manipulation capabilities, while the gripper features an 8cm stroke range from fully open to closed position. The system is implemented using ROS Noetic on Ubuntu 20.04. The control loop operates at \\(10\\mathrm{Hz}\\), with separate threads handling robot control, visual and tactile sensing. The system architecture is designed to minimize latency while maintaining reliable real-time performance."
888
+ },
889
+ {
890
+ "type": "text",
891
+ "bbox": [
892
+ 0.506,
893
+ 0.897,
894
+ 0.914,
895
+ 0.928
896
+ ],
897
+ "angle": 0,
898
+ "content": "Similar to UMI [14], our system compensates for various sources of latency in the perception-action loop through"
899
+ }
900
+ ],
901
+ [
902
+ {
903
+ "type": "text",
904
+ "bbox": [
905
+ 0.082,
906
+ 0.066,
907
+ 0.488,
908
+ 0.142
909
+ ],
910
+ "angle": 0,
911
+ "content": "predictive buffering and timestamp-based synchronization between visual and tactile feedback streams. The policy generates 16 consecutive trajectories at each inference step, with 10 trajectories being executed based on our temporal compensation strategy."
912
+ },
913
+ {
914
+ "type": "text",
915
+ "bbox": [
916
+ 0.082,
917
+ 0.142,
918
+ 0.488,
919
+ 0.278
920
+ ],
921
+ "angle": 0,
922
+ "content": "Manipulation Tasks As shown in Figure 5, we propose diverse contact-rich manipulation tasks to evaluate the effectiveness of ViTaMIn. These tasks are specifically crafted to demonstrate the following key capabilities: (1) Robust pick-and-place of diverse objects, including fragile and small objects; (2) Dexterous manipulation, such as in-hand reorientation; (3) Task success determination, allowing the robot to repeat attempts until success; (4) Dynamic and precise manipulation."
923
+ },
924
+ {
925
+ "type": "text",
926
+ "bbox": [
927
+ 0.1,
928
+ 0.279,
929
+ 0.417,
930
+ 0.293
931
+ ],
932
+ "angle": 0,
933
+ "content": "We design the following 5 manipulation tasks:"
934
+ },
935
+ {
936
+ "type": "text",
937
+ "bbox": [
938
+ 0.1,
939
+ 0.292,
940
+ 0.488,
941
+ 0.319
942
+ ],
943
+ "angle": 0,
944
+ "content": "- Orange Placement: Put a fragile orange from a randomized position to a randomized plate."
945
+ },
946
+ {
947
+ "type": "text",
948
+ "bbox": [
949
+ 0.1,
950
+ 0.321,
951
+ 0.488,
952
+ 0.365
953
+ ],
954
+ "angle": 0,
955
+ "content": "- Dynamic Peg Insertion: Grasp a peg and approach a hole, which is moving at a constant speed of \\(10\\mathrm{mm / s}\\). And precisely insert the peg to the hole."
956
+ },
957
+ {
958
+ "type": "text",
959
+ "bbox": [
960
+ 0.1,
961
+ 0.366,
962
+ 0.488,
963
+ 0.41
964
+ ],
965
+ "angle": 0,
966
+ "content": "- Test Tube Reorientation: Grasp a transparent test tube from a shelf and adjust its pose through extrinsic dexterity based on tactile feedback."
967
+ },
968
+ {
969
+ "type": "text",
970
+ "bbox": [
971
+ 0.1,
972
+ 0.411,
973
+ 0.488,
974
+ 0.453
975
+ ],
976
+ "angle": 0,
977
+ "content": "- Scissor Hanging: Grasp a pair of scissors and hang them on a hook. Adjust the pose and keep attempting until it succeeds."
978
+ },
979
+ {
980
+ "type": "text",
981
+ "bbox": [
982
+ 0.1,
983
+ 0.456,
984
+ 0.488,
985
+ 0.531
986
+ ],
987
+ "angle": 0,
988
+ "content": "- Dual-Arm Knife Pulling: The left arm first grasps a knife from a cup, orients it horizontally. The right arm grasps and pulls it out with a constrained prismatic motion. This task requires tactile feedback to grasp the thin object and perform the correct pulling motion."
989
+ },
990
+ {
991
+ "type": "list",
992
+ "bbox": [
993
+ 0.1,
994
+ 0.292,
995
+ 0.488,
996
+ 0.531
997
+ ],
998
+ "angle": 0,
999
+ "content": null
1000
+ },
1001
+ {
1002
+ "type": "table_caption",
1003
+ "bbox": [
1004
+ 0.098,
1005
+ 0.548,
1006
+ 0.476,
1007
+ 0.563
1008
+ ],
1009
+ "angle": 0,
1010
+ "content": "TABLE I: Data Collection Statistics for Different Tasks"
1011
+ },
1012
+ {
1013
+ "type": "table",
1014
+ "bbox": [
1015
+ 0.09,
1016
+ 0.57,
1017
+ 0.484,
1018
+ 0.672
1019
+ ],
1020
+ "angle": 0,
1021
+ "content": "<table><tr><td>Task</td><td>Raw Data</td><td>Valid Data*</td><td>Avg. Length</td></tr><tr><td>Orange Placement</td><td>87</td><td>73</td><td>435</td></tr><tr><td>Dynamic Peg Insertion</td><td>201</td><td>141</td><td>321</td></tr><tr><td>Test Tube Reorientation</td><td>150</td><td>125</td><td>619</td></tr><tr><td>Scissor Hanging</td><td>172</td><td>137</td><td>642</td></tr><tr><td>Knife Pulling (Left)</td><td>188</td><td>131</td><td>403</td></tr><tr><td>Knife Pulling (Right)</td><td>180</td><td>134</td><td>254</td></tr></table>"
1022
+ },
1023
+ {
1024
+ "type": "table_footnote",
1025
+ "bbox": [
1026
+ 0.099,
1027
+ 0.672,
1028
+ 0.472,
1029
+ 0.683
1030
+ ],
1031
+ "angle": 0,
1032
+ "content": "*Valid data refers to demonstrations with successful SLAM tracking"
1033
+ },
1034
+ {
1035
+ "type": "text",
1036
+ "bbox": [
1037
+ 0.082,
1038
+ 0.7,
1039
+ 0.488,
1040
+ 0.849
1041
+ ],
1042
+ "angle": 0,
1043
+ "content": "Table I shows the statistics of the demonstration data. We collect demonstrations for both single-arm and dual-arm manipulation tasks. For single-arm tasks, we gather between 87 and 172 raw demonstrations per task according to the task difficulty, with successful SLAM tracking achieved in approximately \\(80\\%\\) of the trajectories. The dual-arm knife pulling task requires coordinated motion between both arms, with similar data collection volumes but slightly different average demonstration lengths for left and right arm movements."
1044
+ },
1045
+ {
1046
+ "type": "text",
1047
+ "bbox": [
1048
+ 0.082,
1049
+ 0.851,
1050
+ 0.488,
1051
+ 0.927
1052
+ ],
1053
+ "angle": 0,
1054
+ "content": "We compare our approach against the following methods: (1) Vision: the policy only takes visual observation from the GoPro camera, which is encoded by the pre-trained CLIP model (identical to the original UMI [14] paper); (2) Ours w/o Pre-training: This baseline simply concatenate visual and"
1055
+ },
1056
+ {
1057
+ "type": "text",
1058
+ "bbox": [
1059
+ 0.505,
1060
+ 0.066,
1061
+ 0.912,
1062
+ 0.097
1063
+ ],
1064
+ "angle": 0,
1065
+ "content": "tactile observations after separate CLIP ViT-B/16 encoders, and fine-tuned with behavior cloning."
1066
+ },
1067
+ {
1068
+ "type": "table",
1069
+ "bbox": [
1070
+ 0.532,
1071
+ 0.112,
1072
+ 0.89,
1073
+ 0.24
1074
+ ],
1075
+ "angle": 0,
1076
+ "content": "<table><tr><td>Task</td><td>Vision</td><td>w/o Pre-training</td><td>Ours</td></tr><tr><td colspan=\"4\">Single-Arm Tasks</td></tr><tr><td>Orange placement</td><td>0.85</td><td>0.9</td><td>1</td></tr><tr><td>Test Tube Reorientation</td><td>0.4</td><td>0.7</td><td>0.9</td></tr><tr><td>Scissor Hanging</td><td>0.1</td><td>0.45</td><td>0.7</td></tr><tr><td>Dynamic Peg Insertion</td><td>0.45</td><td>0.8</td><td>0.9</td></tr><tr><td colspan=\"4\">Dual-Arm Task</td></tr><tr><td>Knife Pulling</td><td>0.6</td><td>0.8</td><td>0.9</td></tr></table>"
1077
+ },
1078
+ {
1079
+ "type": "table_footnote",
1080
+ "bbox": [
1081
+ 0.506,
1082
+ 0.245,
1083
+ 0.912,
1084
+ 0.291
1085
+ ],
1086
+ "angle": 0,
1087
+ "content": "TABLE II: Comparisons on 5 tasks with baselines. Our approach improves the performance on 5 tasks through multimodal sensing and pre-training."
1088
+ },
1089
+ {
1090
+ "type": "text",
1091
+ "bbox": [
1092
+ 0.505,
1093
+ 0.3,
1094
+ 0.912,
1095
+ 0.42
1096
+ ],
1097
+ "angle": 0,
1098
+ "content": "The results are presented in Table II. For each task, we conduct 20 trials with randomized initial conditions and report the average performance. The vision-only policy performs the worst across all five tasks, particularly in contact-rich tasks like test tube reorientation and scissor hanging, where tactile feedback is crucial for success. Across all tasks, pre-training enhances the performance, highlighting the importance of learning effective tactile representations."
1099
+ },
1100
+ {
1101
+ "type": "title",
1102
+ "bbox": [
1103
+ 0.508,
1104
+ 0.439,
1105
+ 0.642,
1106
+ 0.454
1107
+ ],
1108
+ "angle": 0,
1109
+ "content": "B. Failure Analysis"
1110
+ },
1111
+ {
1112
+ "type": "text",
1113
+ "bbox": [
1114
+ 0.505,
1115
+ 0.462,
1116
+ 0.912,
1117
+ 0.568
1118
+ ],
1119
+ "angle": 0,
1120
+ "content": "In the Orange placement task, the robot picks up an orange from a random position within a \\(50\\mathrm{cm} \\times 50\\mathrm{cm}\\) workspace and places it on a plate. Failures stem from table collisions, unstable placement, or motion planning errors despite correct object detection. In Dynamic peg insertion, the robot inserts a grasped peg into a moving hole. Vision-only methods often fail due to imprecise localization and alignment."
1121
+ },
1122
+ {
1123
+ "type": "text",
1124
+ "bbox": [
1125
+ 0.505,
1126
+ 0.569,
1127
+ 0.912,
1128
+ 0.75
1129
+ ],
1130
+ "angle": 0,
1131
+ "content": "In Test tube reorientation, the robot must pick up a tube from a random rack location and reorient it vertically, with success defined by less than \\(10^{\\circ}\\) orientation error. Failures include rack collisions, over-lifting, and incorrect final orientation. Scissor hanging requires picking up scissors and hanging them on a narrow hook, where common issues include misdetection, misalignment, and failure to release. In Knife pulling, a dual-arm policy reorients the knife with one arm while the other pulls it out of a holder. Failures often result from poor coordination, weak grasps, or incomplete pulling. Overall, vision-only policies struggle with contact-rich tasks, highlighting the limitations of unimodal sensing."
1132
+ },
1133
+ {
1134
+ "type": "title",
1135
+ "bbox": [
1136
+ 0.507,
1137
+ 0.768,
1138
+ 0.826,
1139
+ 0.784
1140
+ ],
1141
+ "angle": 0,
1142
+ "content": "C. Compliant Articulated Object Manipulation"
1143
+ },
1144
+ {
1145
+ "type": "text",
1146
+ "bbox": [
1147
+ 0.505,
1148
+ 0.79,
1149
+ 0.912,
1150
+ 0.927
1151
+ ],
1152
+ "angle": 0,
1153
+ "content": "To demonstrate the compliance capabilities of ViTaMIn, we designed a compliant-controlled articulated object manipulation task. The robotic arm needs to grasp a handle (connected to a force gauge) and rotate it 90 degrees to open a switch. During the rotation process, the arm must minimize axial forces to ensure smooth operation. We conduct 10 experiments for each condition and calculate the average forces. The results show that ViTaMIn achieves significantly lower average forces compared to using pure vision as input."
1154
+ }
1155
+ ],
1156
+ [
1157
+ {
1158
+ "type": "image_caption",
1159
+ "bbox": [
1160
+ 0.102,
1161
+ 0.07,
1162
+ 0.308,
1163
+ 0.086
1164
+ ],
1165
+ "angle": 0,
1166
+ "content": "Task 1. Orange Placement"
1167
+ },
1168
+ {
1169
+ "type": "image",
1170
+ "bbox": [
1171
+ 0.1,
1172
+ 0.087,
1173
+ 0.203,
1174
+ 0.184
1175
+ ],
1176
+ "angle": 0,
1177
+ "content": null
1178
+ },
1179
+ {
1180
+ "type": "image",
1181
+ "bbox": [
1182
+ 0.208,
1183
+ 0.089,
1184
+ 0.309,
1185
+ 0.184
1186
+ ],
1187
+ "angle": 0,
1188
+ "content": null
1189
+ },
1190
+ {
1191
+ "type": "image",
1192
+ "bbox": [
1193
+ 0.315,
1194
+ 0.089,
1195
+ 0.416,
1196
+ 0.184
1197
+ ],
1198
+ "angle": 0,
1199
+ "content": null
1200
+ },
1201
+ {
1202
+ "type": "image_caption",
1203
+ "bbox": [
1204
+ 0.419,
1205
+ 0.07,
1206
+ 0.658,
1207
+ 0.086
1208
+ ],
1209
+ "angle": 0,
1210
+ "content": "Task 2. Dynamic Peg Insertion"
1211
+ },
1212
+ {
1213
+ "type": "image",
1214
+ "bbox": [
1215
+ 0.42,
1216
+ 0.088,
1217
+ 0.543,
1218
+ 0.184
1219
+ ],
1220
+ "angle": 0,
1221
+ "content": null
1222
+ },
1223
+ {
1224
+ "type": "image",
1225
+ "bbox": [
1226
+ 0.543,
1227
+ 0.088,
1228
+ 0.655,
1229
+ 0.184
1230
+ ],
1231
+ "angle": 0,
1232
+ "content": null
1233
+ },
1234
+ {
1235
+ "type": "image",
1236
+ "bbox": [
1237
+ 0.655,
1238
+ 0.088,
1239
+ 0.777,
1240
+ 0.184
1241
+ ],
1242
+ "angle": 0,
1243
+ "content": null
1244
+ },
1245
+ {
1246
+ "type": "image",
1247
+ "bbox": [
1248
+ 0.778,
1249
+ 0.088,
1250
+ 0.9,
1251
+ 0.184
1252
+ ],
1253
+ "angle": 0,
1254
+ "content": null
1255
+ },
1256
+ {
1257
+ "type": "image_caption",
1258
+ "bbox": [
1259
+ 0.102,
1260
+ 0.185,
1261
+ 0.35,
1262
+ 0.198
1263
+ ],
1264
+ "angle": 0,
1265
+ "content": "Task 3. Test Tube Reorientation"
1266
+ },
1267
+ {
1268
+ "type": "image",
1269
+ "bbox": [
1270
+ 0.101,
1271
+ 0.198,
1272
+ 0.255,
1273
+ 0.279
1274
+ ],
1275
+ "angle": 0,
1276
+ "content": null
1277
+ },
1278
+ {
1279
+ "type": "image",
1280
+ "bbox": [
1281
+ 0.261,
1282
+ 0.198,
1283
+ 0.415,
1284
+ 0.279
1285
+ ],
1286
+ "angle": 0,
1287
+ "content": null
1288
+ },
1289
+ {
1290
+ "type": "image",
1291
+ "bbox": [
1292
+ 0.422,
1293
+ 0.198,
1294
+ 0.576,
1295
+ 0.295
1296
+ ],
1297
+ "angle": 0,
1298
+ "content": null
1299
+ },
1300
+ {
1301
+ "type": "image",
1302
+ "bbox": [
1303
+ 0.582,
1304
+ 0.198,
1305
+ 0.736,
1306
+ 0.295
1307
+ ],
1308
+ "angle": 0,
1309
+ "content": null
1310
+ },
1311
+ {
1312
+ "type": "image",
1313
+ "bbox": [
1314
+ 0.744,
1315
+ 0.198,
1316
+ 0.899,
1317
+ 0.295
1318
+ ],
1319
+ "angle": 0,
1320
+ "content": null
1321
+ },
1322
+ {
1323
+ "type": "image_caption",
1324
+ "bbox": [
1325
+ 0.229,
1326
+ 0.28,
1327
+ 0.285,
1328
+ 0.293
1329
+ ],
1330
+ "angle": 0,
1331
+ "content": "Stage I"
1332
+ },
1333
+ {
1334
+ "type": "image_caption",
1335
+ "bbox": [
1336
+ 0.102,
1337
+ 0.295,
1338
+ 0.293,
1339
+ 0.31
1340
+ ],
1341
+ "angle": 0,
1342
+ "content": "Task 4. Scissor Hanging"
1343
+ },
1344
+ {
1345
+ "type": "image",
1346
+ "bbox": [
1347
+ 0.101,
1348
+ 0.31,
1349
+ 0.256,
1350
+ 0.389
1351
+ ],
1352
+ "angle": 0,
1353
+ "content": null
1354
+ },
1355
+ {
1356
+ "type": "image",
1357
+ "bbox": [
1358
+ 0.261,
1359
+ 0.31,
1360
+ 0.415,
1361
+ 0.406
1362
+ ],
1363
+ "angle": 0,
1364
+ "content": null
1365
+ },
1366
+ {
1367
+ "type": "image",
1368
+ "bbox": [
1369
+ 0.422,
1370
+ 0.31,
1371
+ 0.576,
1372
+ 0.406
1373
+ ],
1374
+ "angle": 0,
1375
+ "content": null
1376
+ },
1377
+ {
1378
+ "type": "image",
1379
+ "bbox": [
1380
+ 0.582,
1381
+ 0.31,
1382
+ 0.736,
1383
+ 0.406
1384
+ ],
1385
+ "angle": 0,
1386
+ "content": null
1387
+ },
1388
+ {
1389
+ "type": "image",
1390
+ "bbox": [
1391
+ 0.744,
1392
+ 0.31,
1393
+ 0.898,
1394
+ 0.406
1395
+ ],
1396
+ "angle": 0,
1397
+ "content": null
1398
+ },
1399
+ {
1400
+ "type": "image_caption",
1401
+ "bbox": [
1402
+ 0.102,
1403
+ 0.409,
1404
+ 0.353,
1405
+ 0.424
1406
+ ],
1407
+ "angle": 0,
1408
+ "content": "Task 5. Knife Pulling (Bimanual)"
1409
+ },
1410
+ {
1411
+ "type": "image",
1412
+ "bbox": [
1413
+ 0.101,
1414
+ 0.424,
1415
+ 0.256,
1416
+ 0.519
1417
+ ],
1418
+ "angle": 0,
1419
+ "content": null
1420
+ },
1421
+ {
1422
+ "type": "image",
1423
+ "bbox": [
1424
+ 0.262,
1425
+ 0.424,
1426
+ 0.415,
1427
+ 0.519
1428
+ ],
1429
+ "angle": 0,
1430
+ "content": null
1431
+ },
1432
+ {
1433
+ "type": "image",
1434
+ "bbox": [
1435
+ 0.422,
1436
+ 0.424,
1437
+ 0.576,
1438
+ 0.519
1439
+ ],
1440
+ "angle": 0,
1441
+ "content": null
1442
+ },
1443
+ {
1444
+ "type": "image",
1445
+ "bbox": [
1446
+ 0.582,
1447
+ 0.424,
1448
+ 0.736,
1449
+ 0.519
1450
+ ],
1451
+ "angle": 0,
1452
+ "content": null
1453
+ },
1454
+ {
1455
+ "type": "image",
1456
+ "bbox": [
1457
+ 0.744,
1458
+ 0.424,
1459
+ 0.898,
1460
+ 0.519
1461
+ ],
1462
+ "angle": 0,
1463
+ "content": null
1464
+ },
1465
+ {
1466
+ "type": "image_caption",
1467
+ "bbox": [
1468
+ 0.082,
1469
+ 0.54,
1470
+ 0.913,
1471
+ 0.571
1472
+ ],
1473
+ "angle": 0,
1474
+ "content": "Fig. 5: We test ViTaMIn on 5 contact-rich manipulation tasks, including precise and dynamic insertion, object hanging with multimodal feedback, and transparent in-hand object manipulation."
1475
+ },
1476
+ {
1477
+ "type": "image",
1478
+ "bbox": [
1479
+ 0.099,
1480
+ 0.593,
1481
+ 0.283,
1482
+ 0.715
1483
+ ],
1484
+ "angle": 0,
1485
+ "content": null
1486
+ },
1487
+ {
1488
+ "type": "image_caption",
1489
+ "bbox": [
1490
+ 0.289,
1491
+ 0.595,
1492
+ 0.485,
1493
+ 0.605
1494
+ ],
1495
+ "angle": 0,
1496
+ "content": "Maximum Force Comparison: Vision vs. Ours"
1497
+ },
1498
+ {
1499
+ "type": "image",
1500
+ "bbox": [
1501
+ 0.289,
1502
+ 0.606,
1503
+ 0.461,
1504
+ 0.713
1505
+ ],
1506
+ "angle": 0,
1507
+ "content": null
1508
+ },
1509
+ {
1510
+ "type": "image",
1511
+ "bbox": [
1512
+ 0.517,
1513
+ 0.594,
1514
+ 0.66,
1515
+ 0.714
1516
+ ],
1517
+ "angle": 0,
1518
+ "content": null
1519
+ },
1520
+ {
1521
+ "type": "image_caption",
1522
+ "bbox": [
1523
+ 0.579,
1524
+ 0.715,
1525
+ 0.652,
1526
+ 0.726
1527
+ ],
1528
+ "angle": 0,
1529
+ "content": "Novel Objects"
1530
+ },
1531
+ {
1532
+ "type": "image",
1533
+ "bbox": [
1534
+ 0.662,
1535
+ 0.594,
1536
+ 0.899,
1537
+ 0.714
1538
+ ],
1539
+ "angle": 0,
1540
+ "content": null
1541
+ },
1542
+ {
1543
+ "type": "image_caption",
1544
+ "bbox": [
1545
+ 0.739,
1546
+ 0.715,
1547
+ 0.822,
1548
+ 0.727
1549
+ ],
1550
+ "angle": 0,
1551
+ "content": "Different Lighting"
1552
+ },
1553
+ {
1554
+ "type": "image_caption",
1555
+ "bbox": [
1556
+ 0.082,
1557
+ 0.731,
1558
+ 0.49,
1559
+ 0.777
1560
+ ],
1561
+ "angle": 0,
1562
+ "content": "Fig. 6: The robot needs to flip open a switch (fixed to a force gauge) by rotating it 90 degrees. During the rotation, the robot must minimize axial forces to ensure smooth operation."
1563
+ },
1564
+ {
1565
+ "type": "image_caption",
1566
+ "bbox": [
1567
+ 0.506,
1568
+ 0.737,
1569
+ 0.913,
1570
+ 0.782
1571
+ ],
1572
+ "angle": 0,
1573
+ "content": "Fig. 7: Showcase of novel objects and different lighting in the generalization tasks. The right columns demonstrate colored flashlight/high-power/normal lighting conditions."
1574
+ },
1575
+ {
1576
+ "type": "title",
1577
+ "bbox": [
1578
+ 0.084,
1579
+ 0.811,
1580
+ 0.222,
1581
+ 0.825
1582
+ ],
1583
+ "angle": 0,
1584
+ "content": "D. Ablation Studies"
1585
+ },
1586
+ {
1587
+ "type": "text",
1588
+ "bbox": [
1589
+ 0.082,
1590
+ 0.836,
1591
+ 0.49,
1592
+ 0.928
1593
+ ],
1594
+ "angle": 0,
1595
+ "content": "a) Data Efficiency: We evaluate the performance of policies trained on different amounts (25%, 50%, and 100%) of demonstrations. All the models are evaluated in 20 real-world trials with different initializations. For a more in-depth analysis, we calculate the success rates of each stage separately, as illustrated in Figure 8. With the pre-trained"
1596
+ },
1597
+ {
1598
+ "type": "text",
1599
+ "bbox": [
1600
+ 0.506,
1601
+ 0.818,
1602
+ 0.913,
1603
+ 0.877
1604
+ ],
1605
+ "angle": 0,
1606
+ "content": "tactile representations, our method can achieve consistently higher success rates on all the tasks across different amounts of data, and can even master the task with limited data (25%) for test tube reorientation."
1607
+ },
1608
+ {
1609
+ "type": "text",
1610
+ "bbox": [
1611
+ 0.506,
1612
+ 0.882,
1613
+ 0.914,
1614
+ 0.928
1615
+ ],
1616
+ "angle": 0,
1617
+ "content": "b) Training Efficiency: We further evaluate the policies trained with different numbers of epochs to understand its training efficiency under the same evaluation protocol. The"
1618
+ },
1619
+ {
1620
+ "type": "list",
1621
+ "bbox": [
1622
+ 0.506,
1623
+ 0.818,
1624
+ 0.914,
1625
+ 0.928
1626
+ ],
1627
+ "angle": 0,
1628
+ "content": null
1629
+ }
1630
+ ],
1631
+ [
1632
+ {
1633
+ "type": "image_caption",
1634
+ "bbox": [
1635
+ 0.234,
1636
+ 0.064,
1637
+ 0.364,
1638
+ 0.076
1639
+ ],
1640
+ "angle": 0,
1641
+ "content": "Tube Reorientation"
1642
+ },
1643
+ {
1644
+ "type": "image_caption",
1645
+ "bbox": [
1646
+ 0.659,
1647
+ 0.064,
1648
+ 0.771,
1649
+ 0.077
1650
+ ],
1651
+ "angle": 0,
1652
+ "content": "Scissor Hanging"
1653
+ },
1654
+ {
1655
+ "type": "image_caption",
1656
+ "bbox": [
1657
+ 0.179,
1658
+ 0.078,
1659
+ 0.221,
1660
+ 0.09
1661
+ ],
1662
+ "angle": 0,
1663
+ "content": "Stage I"
1664
+ },
1665
+ {
1666
+ "type": "image",
1667
+ "bbox": [
1668
+ 0.088,
1669
+ 0.092,
1670
+ 0.29,
1671
+ 0.253
1672
+ ],
1673
+ "angle": 0,
1674
+ "content": null
1675
+ },
1676
+ {
1677
+ "type": "image",
1678
+ "bbox": [
1679
+ 0.296,
1680
+ 0.079,
1681
+ 0.496,
1682
+ 0.253
1683
+ ],
1684
+ "angle": 0,
1685
+ "content": null
1686
+ },
1687
+ {
1688
+ "type": "image_caption",
1689
+ "bbox": [
1690
+ 0.593,
1691
+ 0.079,
1692
+ 0.635,
1693
+ 0.089
1694
+ ],
1695
+ "angle": 0,
1696
+ "content": "Stage I"
1697
+ },
1698
+ {
1699
+ "type": "image",
1700
+ "bbox": [
1701
+ 0.503,
1702
+ 0.092,
1703
+ 0.703,
1704
+ 0.253
1705
+ ],
1706
+ "angle": 0,
1707
+ "content": null
1708
+ },
1709
+ {
1710
+ "type": "image_caption",
1711
+ "bbox": [
1712
+ 0.8,
1713
+ 0.079,
1714
+ 0.845,
1715
+ 0.089
1716
+ ],
1717
+ "angle": 0,
1718
+ "content": "Stage II"
1719
+ },
1720
+ {
1721
+ "type": "image",
1722
+ "bbox": [
1723
+ 0.708,
1724
+ 0.089,
1725
+ 0.91,
1726
+ 0.253
1727
+ ],
1728
+ "angle": 0,
1729
+ "content": null
1730
+ },
1731
+ {
1732
+ "type": "image_caption",
1733
+ "bbox": [
1734
+ 0.082,
1735
+ 0.259,
1736
+ 0.915,
1737
+ 0.306
1738
+ ],
1739
+ "angle": 0,
1740
+ "content": "Fig. 8: Ablation study on the effect of pre-training on data efficiency. The performance of the policy improves as the quantity of data increases. After pre-training on the action-free, task-ignorant dataset, our method can achieve a high success rate even with limited data (25%)."
1741
+ },
1742
+ {
1743
+ "type": "image_caption",
1744
+ "bbox": [
1745
+ 0.232,
1746
+ 0.315,
1747
+ 0.362,
1748
+ 0.327
1749
+ ],
1750
+ "angle": 0,
1751
+ "content": "Tube Reorientation"
1752
+ },
1753
+ {
1754
+ "type": "image_caption",
1755
+ "bbox": [
1756
+ 0.178,
1757
+ 0.332,
1758
+ 0.22,
1759
+ 0.343
1760
+ ],
1761
+ "angle": 0,
1762
+ "content": "Stage I"
1763
+ },
1764
+ {
1765
+ "type": "image",
1766
+ "bbox": [
1767
+ 0.087,
1768
+ 0.344,
1769
+ 0.288,
1770
+ 0.505
1771
+ ],
1772
+ "angle": 0,
1773
+ "content": null
1774
+ },
1775
+ {
1776
+ "type": "image_caption",
1777
+ "bbox": [
1778
+ 0.385,
1779
+ 0.332,
1780
+ 0.43,
1781
+ 0.343
1782
+ ],
1783
+ "angle": 0,
1784
+ "content": "Stage II"
1785
+ },
1786
+ {
1787
+ "type": "image",
1788
+ "bbox": [
1789
+ 0.295,
1790
+ 0.344,
1791
+ 0.495,
1792
+ 0.505
1793
+ ],
1794
+ "angle": 0,
1795
+ "content": null
1796
+ },
1797
+ {
1798
+ "type": "image_caption",
1799
+ "bbox": [
1800
+ 0.659,
1801
+ 0.315,
1802
+ 0.771,
1803
+ 0.328
1804
+ ],
1805
+ "angle": 0,
1806
+ "content": "Scissor Hanging"
1807
+ },
1808
+ {
1809
+ "type": "image_caption",
1810
+ "bbox": [
1811
+ 0.591,
1812
+ 0.332,
1813
+ 0.635,
1814
+ 0.342
1815
+ ],
1816
+ "angle": 0,
1817
+ "content": "Stage I"
1818
+ },
1819
+ {
1820
+ "type": "image",
1821
+ "bbox": [
1822
+ 0.501,
1823
+ 0.343,
1824
+ 0.702,
1825
+ 0.505
1826
+ ],
1827
+ "angle": 0,
1828
+ "content": null
1829
+ },
1830
+ {
1831
+ "type": "image_caption",
1832
+ "bbox": [
1833
+ 0.8,
1834
+ 0.332,
1835
+ 0.844,
1836
+ 0.342
1837
+ ],
1838
+ "angle": 0,
1839
+ "content": "Stage II"
1840
+ },
1841
+ {
1842
+ "type": "image",
1843
+ "bbox": [
1844
+ 0.708,
1845
+ 0.343,
1846
+ 0.909,
1847
+ 0.504
1848
+ ],
1849
+ "angle": 0,
1850
+ "content": null
1851
+ },
1852
+ {
1853
+ "type": "image_caption",
1854
+ "bbox": [
1855
+ 0.082,
1856
+ 0.512,
1857
+ 0.913,
1858
+ 0.558
1859
+ ],
1860
+ "angle": 0,
1861
+ "content": "Fig. 9: Ablation study on the effect of pre-training on training efficiency. Policies with pre-training are able to learn to complete the first-stage task at a remarkably early stage of training (within 10 epochs). Additionally, when the policy network is pre-trained, the overall success rates increase more rapidly."
1862
+ },
1863
+ {
1864
+ "type": "text",
1865
+ "bbox": [
1866
+ 0.082,
1867
+ 0.577,
1868
+ 0.49,
1869
+ 0.639
1870
+ ],
1871
+ "angle": 0,
1872
+ "content": "results are illustrated in Figure 9. We also observe consistent task performance improvements with pre-training. The policy can complete the first stage of the task at a remarkably early training stage (within 10 epochs)."
1873
+ },
1874
+ {
1875
+ "type": "table",
1876
+ "bbox": [
1877
+ 0.085,
1878
+ 0.652,
1879
+ 0.49,
1880
+ 0.766
1881
+ ],
1882
+ "angle": 0,
1883
+ "content": "<table><tr><td>Task</td><td>Method</td><td>Original</td><td>Novel Objects</td><td>Different Lighting</td></tr><tr><td rowspan=\"3\">Orange Placement</td><td>Vision</td><td>0.85</td><td>0.7</td><td>0.55</td></tr><tr><td>Ours w/o Pre-training</td><td>0.9</td><td>0.8</td><td>0.6</td></tr><tr><td>Ours</td><td>1.0</td><td>1.0</td><td>0.85</td></tr><tr><td rowspan=\"3\">Scissor Hanging</td><td>Vision</td><td>0.0</td><td>0.0</td><td>0.0</td></tr><tr><td>Ours w/o Pre-training</td><td>0.45</td><td>0.4</td><td>0.4</td></tr><tr><td>Ours</td><td>0.7</td><td>0.7</td><td>0.5</td></tr></table>"
1884
+ },
1885
+ {
1886
+ "type": "table_caption",
1887
+ "bbox": [
1888
+ 0.082,
1889
+ 0.771,
1890
+ 0.489,
1891
+ 0.831
1892
+ ],
1893
+ "angle": 0,
1894
+ "content": "TABLE III: Generalization under different objects and scenes. The results demonstrate that our multi-modal policy is more robust to novel objects and different lighting conditions."
1895
+ },
1896
+ {
1897
+ "type": "title",
1898
+ "bbox": [
1899
+ 0.084,
1900
+ 0.862,
1901
+ 0.285,
1902
+ 0.877
1903
+ ],
1904
+ "angle": 0,
1905
+ "content": "E. Generalization Capability"
1906
+ },
1907
+ {
1908
+ "type": "text",
1909
+ "bbox": [
1910
+ 0.082,
1911
+ 0.881,
1912
+ 0.49,
1913
+ 0.927
1914
+ ],
1915
+ "angle": 0,
1916
+ "content": "We also evaluate our policy's generalizability to unseen objects and environments. As shown in Figure 7, beyond the training orange and scissor, we introduce 6 unseen small"
1917
+ },
1918
+ {
1919
+ "type": "text",
1920
+ "bbox": [
1921
+ 0.505,
1922
+ 0.577,
1923
+ 0.913,
1924
+ 0.682
1925
+ ],
1926
+ "angle": 0,
1927
+ "content": "objects and 3 unseen scissors to assess object generalization. Additionally, we modify lighting conditions by increasing brightness and introducing colored disco ball lighting. Table III presents results on the tasks of orange placement and scissor hanging. Our method with pre-training achieves consistent better performance across various generalization settings."
1928
+ },
1929
+ {
1930
+ "type": "title",
1931
+ "bbox": [
1932
+ 0.645,
1933
+ 0.695,
1934
+ 0.776,
1935
+ 0.708
1936
+ ],
1937
+ "angle": 0,
1938
+ "content": "VI. CONCLUSION"
1939
+ },
1940
+ {
1941
+ "type": "text",
1942
+ "bbox": [
1943
+ 0.505,
1944
+ 0.715,
1945
+ 0.914,
1946
+ 0.881
1947
+ ],
1948
+ "angle": 0,
1949
+ "content": "In this paper, we present ViTaMIn, a portable visuo-tactile manipulation interface designed for efficiently collecting high-quality demonstrations by capturing both visual and tactile signals. Furthermore, ViTaMIn introduces an effective pre-training strategy that leverages all the collected action-free data to learn a robust and generalizable tactile representation through multimodal contrastive learning. Our approach significantly outperforms vision-only policies across 5 real-world contact-rich manipulation tasks and demonstrates improved data efficiency, robustness, and generalizability with pre-trained visuo-tactile representations."
1950
+ },
1951
+ {
1952
+ "type": "text",
1953
+ "bbox": [
1954
+ 0.506,
1955
+ 0.882,
1956
+ 0.914,
1957
+ 0.927
1958
+ ],
1959
+ "angle": 0,
1960
+ "content": "Our method primarily focuses on fixed-base single-arm and dual-arm tasks with parallel-jaw grippers. While this setup is suitable for a wide range of manipulation tasks,"
1961
+ }
1962
+ ],
1963
+ [
1964
+ {
1965
+ "type": "text",
1966
+ "bbox": [
1967
+ 0.083,
1968
+ 0.067,
1969
+ 0.49,
1970
+ 0.112
1971
+ ],
1972
+ "angle": 0,
1973
+ "content": "future work could extend our approach to dexterous hands, enabling richer and more versatile manipulation skills that better approximate human-level dexterity."
1974
+ },
1975
+ {
1976
+ "type": "title",
1977
+ "bbox": [
1978
+ 0.239,
1979
+ 0.122,
1980
+ 0.335,
1981
+ 0.134
1982
+ ],
1983
+ "angle": 0,
1984
+ "content": "REFERENCES"
1985
+ },
1986
+ {
1987
+ "type": "ref_text",
1988
+ "bbox": [
1989
+ 0.093,
1990
+ 0.143,
1991
+ 0.49,
1992
+ 0.178
1993
+ ],
1994
+ "angle": 0,
1995
+ "content": "[1] S. Levine, C. Finn, T. Darrell, and P. Abbeel, \"End-to-end training of deep visuomotor policies,\" Journal of Machine Learning Research, vol. 17, no. 39, pp. 1-40, 2016."
1996
+ },
1997
+ {
1998
+ "type": "ref_text",
1999
+ "bbox": [
2000
+ 0.093,
2001
+ 0.178,
2002
+ 0.49,
2003
+ 0.222
2004
+ ],
2005
+ "angle": 0,
2006
+ "content": "[2] A. Brohan, N. Brown, J. Carbajal, Y. Chebotar, J. Dabis, C. Finn, K. Gopalakrishnan, K. Hausman, A. Herzog, J. Hsu et al., \"Rt-1: Robotics transformer for real-world control at scale,\" arXiv preprint arXiv:2212.06817, 2022."
2007
+ },
2008
+ {
2009
+ "type": "ref_text",
2010
+ "bbox": [
2011
+ 0.093,
2012
+ 0.223,
2013
+ 0.49,
2014
+ 0.268
2015
+ ],
2016
+ "angle": 0,
2017
+ "content": "[3] A. Brohan, N. Brown, J. Carbajal, Y. Chebotar, X. Chen, K. Choromanski, T. Ding, D. Driess, A. Dubey, C. Finn et al., \"Rt-2: Vision-language-action models transfer web knowledge to robotic control,\" arXiv preprint arXiv:2307.15818, 2023."
2018
+ },
2019
+ {
2020
+ "type": "ref_text",
2021
+ "bbox": [
2022
+ 0.093,
2023
+ 0.268,
2024
+ 0.49,
2025
+ 0.302
2026
+ ],
2027
+ "angle": 0,
2028
+ "content": "[4] C. Chi, S. Feng, Y. Du, Z. Xu, E. Cousineau, B. Burchfiel, and S. Song, \"Diffusion policy: Visuomotor policy learning via action diffusion,\" arXiv preprint arXiv:2303.04137, 2023."
2029
+ },
2030
+ {
2031
+ "type": "ref_text",
2032
+ "bbox": [
2033
+ 0.093,
2034
+ 0.302,
2035
+ 0.49,
2036
+ 0.348
2037
+ ],
2038
+ "angle": 0,
2039
+ "content": "[5] J. Aldaco, T. Armstrong, R. Baruch, J. Bingham, S. Chan, K. Draper, D. Dwibedi, C. Finn, P. Florence, S. Goodrich et al., \"Aloha 2: An enhanced low-cost hardware for bimanual teleoperation,\" arXiv preprint arXiv:2405.02292, 2024."
2040
+ },
2041
+ {
2042
+ "type": "ref_text",
2043
+ "bbox": [
2044
+ 0.093,
2045
+ 0.348,
2046
+ 0.49,
2047
+ 0.381
2048
+ ],
2049
+ "angle": 0,
2050
+ "content": "[6] Z. Fu, T. Z. Zhao, and C. Finn, \"Mobile aloha: Learning bimanual mobile manipulation with low-cost whole-body teleoperation,\" arXiv preprint arXiv:2401.02117, 2024."
2051
+ },
2052
+ {
2053
+ "type": "ref_text",
2054
+ "bbox": [
2055
+ 0.093,
2056
+ 0.381,
2057
+ 0.49,
2058
+ 0.415
2059
+ ],
2060
+ "angle": 0,
2061
+ "content": "[7] T. Z. Zhao, V. Kumar, S. Levine, and C. Finn, “Learning fine-grained bimanual manipulation with low-cost hardware,” arXiv preprint arXiv:2304.13705, 2023."
2062
+ },
2063
+ {
2064
+ "type": "ref_text",
2065
+ "bbox": [
2066
+ 0.093,
2067
+ 0.416,
2068
+ 0.49,
2069
+ 0.461
2070
+ ],
2071
+ "angle": 0,
2072
+ "content": "[8] H. Fang, H.-S. Fang, Y. Wang, J. Ren, J. Chen, R. Zhang, W. Wang, and C. Lu, \"Airexo: Low-cost exoskeletons for learning whole-arm manipulation in the wild,\" in 2024 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2024, pp. 15031-15038."
2073
+ },
2074
+ {
2075
+ "type": "ref_text",
2076
+ "bbox": [
2077
+ 0.093,
2078
+ 0.461,
2079
+ 0.49,
2080
+ 0.495
2081
+ ],
2082
+ "angle": 0,
2083
+ "content": "[9] X. Cheng, J. Li, S. Yang, G. Yang, and X. Wang, “Open-television: Teleoperation with immersive active visual feedback,” arXiv preprint arXiv:2407.01512, 2024."
2084
+ },
2085
+ {
2086
+ "type": "ref_text",
2087
+ "bbox": [
2088
+ 0.087,
2089
+ 0.495,
2090
+ 0.49,
2091
+ 0.529
2092
+ ],
2093
+ "angle": 0,
2094
+ "content": "[10] Y. Qin, W. Yang, B. Huang, K. Van Wyk, H. Su, X. Wang, Y.-W. Chao, and D. Fox, \"Anyteleop: A general vision-based dexterous robot arm-hand teleoperation system,\" arXiv preprint arXiv:2307.04577, 2023."
2095
+ },
2096
+ {
2097
+ "type": "ref_text",
2098
+ "bbox": [
2099
+ 0.087,
2100
+ 0.529,
2101
+ 0.49,
2102
+ 0.585
2103
+ ],
2104
+ "angle": 0,
2105
+ "content": "[11] F. Sanches, G. Gao, N. Elangovan, R. V. Godoy, J. Chapman, K. Wang, P. Jarvis, and M. Liarokapis, \"Scalable. intuitive human to robot skill transfer with wearable human machine interfaces: On complex, dexterous tasks,\" in 2023 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2023, pp. 6318-6325."
2106
+ },
2107
+ {
2108
+ "type": "ref_text",
2109
+ "bbox": [
2110
+ 0.087,
2111
+ 0.586,
2112
+ 0.49,
2113
+ 0.62
2114
+ ],
2115
+ "angle": 0,
2116
+ "content": "[12] K. Doshi, Y. Huang, and S. Coros, \"On hand-held grippers and the morphological gap in human manipulation demonstration,\" arXiv preprint arXiv:2311.01832, 2023."
2117
+ },
2118
+ {
2119
+ "type": "ref_text",
2120
+ "bbox": [
2121
+ 0.087,
2122
+ 0.62,
2123
+ 0.49,
2124
+ 0.653
2125
+ ],
2126
+ "angle": 0,
2127
+ "content": "[13] N. M. M. Shafiullah, A. Rai, H. Etukuru, Y. Liu, I. Misra, S. Chintala, and L. Pinto, \"On bringing robots home,\" arXiv preprint arXiv:2311.16098, 2023."
2128
+ },
2129
+ {
2130
+ "type": "ref_text",
2131
+ "bbox": [
2132
+ 0.087,
2133
+ 0.654,
2134
+ 0.49,
2135
+ 0.698
2136
+ ],
2137
+ "angle": 0,
2138
+ "content": "[14] C. Chi, Z. Xu, C. Pan, E. Cousineau, B. Burchfiel, S. Feng, R. Tedrake, and S. Song, \"Universal manipulation interface: In-the-wild robot teaching without in-the-wild robots,\" arXiv preprint arXiv:2402.10329, 2024."
2139
+ },
2140
+ {
2141
+ "type": "ref_text",
2142
+ "bbox": [
2143
+ 0.087,
2144
+ 0.699,
2145
+ 0.49,
2146
+ 0.734
2147
+ ],
2148
+ "angle": 0,
2149
+ "content": "[15] S. Liang, Y. Guan, J. Xu, H. Qian, X. Zhang, D. Wu, W. Ding, and R. Chen, \"Alltact fin ray: A compliant robot gripper with omnidirectional tactile sensing,\" arXiv preprint arXiv:2504.18064, 2025."
2150
+ },
2151
+ {
2152
+ "type": "ref_text",
2153
+ "bbox": [
2154
+ 0.087,
2155
+ 0.734,
2156
+ 0.49,
2157
+ 0.779
2158
+ ],
2159
+ "angle": 0,
2160
+ "content": "[16] S. Nair, A. Rajeswaran, V. Kumar, C. Finn, and A. Gupta, “R3m: A universal visual representation for robot manipulation,” in Proceedings of The 6th Conference on Robot Learning (CoRL), vol. 205. PMLR, 2022, pp. 892–909."
2161
+ },
2162
+ {
2163
+ "type": "ref_text",
2164
+ "bbox": [
2165
+ 0.087,
2166
+ 0.779,
2167
+ 0.49,
2168
+ 0.824
2169
+ ],
2170
+ "angle": 0,
2171
+ "content": "[17] Y. J. Ma, S. Sodhani, D. Jayaraman, O. Bastani, V. Kumar, and A. Zhang, “VIP: Towards universal visual reward and representation via value-implicit pre-training,” in The Eleventh International Conference on Learning Representations, 2023."
2172
+ },
2173
+ {
2174
+ "type": "ref_text",
2175
+ "bbox": [
2176
+ 0.087,
2177
+ 0.824,
2178
+ 0.49,
2179
+ 0.846
2180
+ ],
2181
+ "angle": 0,
2182
+ "content": "[18] T. Xiao, I. Radosavovic, T. Darrell, and J. Malik, “Masked visual pretraining for motor control,” arXiv:2203.06173, 2022."
2183
+ },
2184
+ {
2185
+ "type": "ref_text",
2186
+ "bbox": [
2187
+ 0.087,
2188
+ 0.846,
2189
+ 0.49,
2190
+ 0.881
2191
+ ],
2192
+ "angle": 0,
2193
+ "content": "[19] I. Radosavovic, T. Xiao, S. James, P. Abbeel, J. Malik, and T. Darrell, “Real-world robot learning with masked visual pre-training,” in Conference on Robot Learning. PMLR, 2023, pp. 416–426."
2194
+ },
2195
+ {
2196
+ "type": "ref_text",
2197
+ "bbox": [
2198
+ 0.087,
2199
+ 0.881,
2200
+ 0.49,
2201
+ 0.927
2202
+ ],
2203
+ "angle": 0,
2204
+ "content": "[20] A. Majumdar, K. Yadav, S. Arnaud, J. Ma, C. Chen, S. Silwal, A. Jain, V.-P. Berges, T. Wu, J. Vakil et al., \"Where are we in the search for an artificial visual cortex for embodied intelligence?\" Advances in Neural Information Processing Systems, vol. 36, pp. 655-677, 2023."
2205
+ },
2206
+ {
2207
+ "type": "list",
2208
+ "bbox": [
2209
+ 0.087,
2210
+ 0.143,
2211
+ 0.49,
2212
+ 0.927
2213
+ ],
2214
+ "angle": 0,
2215
+ "content": null
2216
+ },
2217
+ {
2218
+ "type": "ref_text",
2219
+ "bbox": [
2220
+ 0.51,
2221
+ 0.068,
2222
+ 0.913,
2223
+ 0.114
2224
+ ],
2225
+ "angle": 0,
2226
+ "content": "[21] K. He, X. Chen, S. Xie, Y. Li, P. Dollar, and R. Girshick, “Masked autoencoders are scalable vision learners,” in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2022, pp. 16000-16009."
2227
+ },
2228
+ {
2229
+ "type": "ref_text",
2230
+ "bbox": [
2231
+ 0.51,
2232
+ 0.115,
2233
+ 0.913,
2234
+ 0.16
2235
+ ],
2236
+ "angle": 0,
2237
+ "content": "[22] A. Radford, J. W. Kim, C. Hallacy, A. Ramesh, G. Goh, S. Agarwal, G. Sastry, A. Askell, P. Mishkin, J. Clark et al., \"Learning transferable visual models from natural language supervision,\" in International conference on machine learning. PMLR, 2021, pp. 8748-8763."
2238
+ },
2239
+ {
2240
+ "type": "ref_text",
2241
+ "bbox": [
2242
+ 0.51,
2243
+ 0.16,
2244
+ 0.913,
2245
+ 0.205
2246
+ ],
2247
+ "angle": 0,
2248
+ "content": "[23] K. Hosoda, K. Igarashi, and M. Asada, \"Adaptive hybrid visual servoing/force control in unknown environment,\" in Proceedings of IEEE/RSJ International Conference on Intelligent Robots and Systems. IROS'96, vol. 3. IEEE, 1996, pp. 1097-1103."
2249
+ },
2250
+ {
2251
+ "type": "ref_text",
2252
+ "bbox": [
2253
+ 0.51,
2254
+ 0.205,
2255
+ 0.913,
2256
+ 0.25
2257
+ ],
2258
+ "angle": 0,
2259
+ "content": "[24] H. Nakagaki, K. Kitagaki, T. Ogasawara, and H. Tsukune, \"Study of deformation and insertion tasks of a flexible wire,\" in Proceedings of International Conference on Robotics and Automation, vol. 3. IEEE, 1997, pp. 2397-2402."
2260
+ },
2261
+ {
2262
+ "type": "ref_text",
2263
+ "bbox": [
2264
+ 0.51,
2265
+ 0.25,
2266
+ 0.913,
2267
+ 0.274
2268
+ ],
2269
+ "angle": 0,
2270
+ "content": "[25] P. Miller and P. Leibowitz, \"Integration of vision, force and tactile sensing for grasping,\" Int. J. Intell. Mach, vol. 4, pp. 129-149, 1999."
2271
+ },
2272
+ {
2273
+ "type": "ref_text",
2274
+ "bbox": [
2275
+ 0.51,
2276
+ 0.274,
2277
+ 0.913,
2278
+ 0.307
2279
+ ],
2280
+ "angle": 0,
2281
+ "content": "[26] H. Qi, B. Yi, S. Suresh, M. Lambeta, Y. Ma, R. Calandra, and J. Malik, \"General in-hand object rotation with vision and touch,\" in Conference on Robot Learning. PMLR, 2023, pp. 2549-2564."
2282
+ },
2283
+ {
2284
+ "type": "ref_text",
2285
+ "bbox": [
2286
+ 0.51,
2287
+ 0.307,
2288
+ 0.913,
2289
+ 0.341
2290
+ ],
2291
+ "angle": 0,
2292
+ "content": "[27] S. Li, H. Yu, W. Ding, H. Liu, L. Ye, C. Xia, X. Wang, and X.-P. Zhang, “Visual-tactile fusion for transparent object grasping in complex backgrounds,” IEEE Transactions on Robotics, 2023."
2293
+ },
2294
+ {
2295
+ "type": "ref_text",
2296
+ "bbox": [
2297
+ 0.51,
2298
+ 0.341,
2299
+ 0.913,
2300
+ 0.386
2301
+ ],
2302
+ "angle": 0,
2303
+ "content": "[28] Y. Han, K. Yu, R. Batra, N. Boyd, C. Mehta, T. Zhao, Y. She, S. Hutchinson, and Y. Zhao, “Learning generalizable vision-tactile robotic grasping strategy for deformable objects via transformer,” IEEE/ASME Transactions on Mechatronics, 2024."
2304
+ },
2305
+ {
2306
+ "type": "ref_text",
2307
+ "bbox": [
2308
+ 0.51,
2309
+ 0.386,
2310
+ 0.913,
2311
+ 0.42
2312
+ ],
2313
+ "angle": 0,
2314
+ "content": "[29] R. Bhirangi, V. Pattabiraman, E. Erciyes, Y. Cao, T. Hellebrekers, and L. Pinto, “Anyskin: Plug-and-play skin sensing for robotic touch,” arXiv preprint arXiv:2409.08276, 2024."
2315
+ },
2316
+ {
2317
+ "type": "ref_text",
2318
+ "bbox": [
2319
+ 0.51,
2320
+ 0.421,
2321
+ 0.913,
2322
+ 0.453
2323
+ ],
2324
+ "angle": 0,
2325
+ "content": "[30] V. Pattabiraman, Y. Cao, S. Haldar, L. Pinto, and R. Bhirangi, “Learning precise, contact-rich manipulation through uncalibrated tactile skins,” arXiv preprint arXiv:2410.17246, 2024."
2326
+ },
2327
+ {
2328
+ "type": "ref_text",
2329
+ "bbox": [
2330
+ 0.51,
2331
+ 0.453,
2332
+ 0.913,
2333
+ 0.489
2334
+ ],
2335
+ "angle": 0,
2336
+ "content": "[31] Liu, Guan, Jia, Wu, Liu, Wang, Liang, Chen, Zhang, Song et al., \"Fastumi: A scalable and hardware-independent universal manipulation interface with dataset,\" arXiv e-prints, pp. arXiv-2409, 2024."
2337
+ },
2338
+ {
2339
+ "type": "ref_text",
2340
+ "bbox": [
2341
+ 0.51,
2342
+ 0.489,
2343
+ 0.913,
2344
+ 0.522
2345
+ ],
2346
+ "angle": 0,
2347
+ "content": "[32] Liu, Chi, Cousineau, Kuppuswamy, Burchfiel, and Song, \"Maniwav: Learning robot manipulation from in-the-wild audio-visual data,\" in CoRL, 2024."
2348
+ },
2349
+ {
2350
+ "type": "ref_text",
2351
+ "bbox": [
2352
+ 0.51,
2353
+ 0.522,
2354
+ 0.913,
2355
+ 0.579
2356
+ ],
2357
+ "angle": 0,
2358
+ "content": "[33] C. Sferrazza, Y. Seo, H. Liu, Y. Lee, and P. Abbeel, \"The power of the senses: Generalizable manipulation from vision and touch through masked multimodal learning,\" in 2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2024, pp. 9698-9705."
2359
+ },
2360
+ {
2361
+ "type": "ref_text",
2362
+ "bbox": [
2363
+ 0.51,
2364
+ 0.579,
2365
+ 0.913,
2366
+ 0.613
2367
+ ],
2368
+ "angle": 0,
2369
+ "content": "[34] Z. Xu, R. Uppuluri, X. Zhang, C. Fitch, P. G. Crandall, W. Shou, D. Wang, and Y. She, \"UniT: Unified tactile representation for robot learning,\" 2024. [Online]. Available: https://arxiv.org/abs/2408.06481"
2370
+ },
2371
+ {
2372
+ "type": "ref_text",
2373
+ "bbox": [
2374
+ 0.51,
2375
+ 0.613,
2376
+ 0.913,
2377
+ 0.636
2378
+ ],
2379
+ "angle": 0,
2380
+ "content": "[35] X. Zhang and et al., “Fusing multimodal sensory data for robotic perception,” IEEE Transactions on Robotics, 2022."
2381
+ },
2382
+ {
2383
+ "type": "ref_text",
2384
+ "bbox": [
2385
+ 0.51,
2386
+ 0.636,
2387
+ 0.913,
2388
+ 0.669
2389
+ ],
2390
+ "angle": 0,
2391
+ "content": "[36] A. Nagabandi, G. Kahn, S. Levine, and C. Finn, \"Deep reinforcement learning for vision-based robotic control with multimodal inputs,\" in Conference on Robot Learning (CoRL), 2020."
2392
+ },
2393
+ {
2394
+ "type": "ref_text",
2395
+ "bbox": [
2396
+ 0.51,
2397
+ 0.669,
2398
+ 0.913,
2399
+ 0.727
2400
+ ],
2401
+ "angle": 0,
2402
+ "content": "[37] L. Fu, G. Datta, H. Huang, W. C.-H. Panitch, J. Drake, J. Ortiz, M. Mukadam, M. Lambeta, R. Calandra, and K. Goldberg, \"A touch, vision, and language dataset for multimodal alignment,\" in Forty-first International Conference on Machine Learning, 2024. [Online]. Available: https://openreview.net/forum?id=tFEOOH9eH0"
2403
+ },
2404
+ {
2405
+ "type": "ref_text",
2406
+ "bbox": [
2407
+ 0.51,
2408
+ 0.727,
2409
+ 0.913,
2410
+ 0.771
2411
+ ],
2412
+ "angle": 0,
2413
+ "content": "[38] F. Yang, C. Feng, Z. Chen, H. Park, D. Wang, Y. Dou, Z. Zeng, X. Chen, R. Gangopadhyay, A. Owens, and A. Wong, \"Binding touch to everything: Learning unified multimodal tactile representations,\" arXiv:2401.18084, 2024."
2414
+ },
2415
+ {
2416
+ "type": "ref_text",
2417
+ "bbox": [
2418
+ 0.51,
2419
+ 0.771,
2420
+ 0.913,
2421
+ 0.805
2422
+ ],
2423
+ "angle": 0,
2424
+ "content": "[39] A. George, S. Gano, P. Katragadda, and A. Farimani, “Vital pretraining: Visuo-tactile pretraining for tactile and non-tactile manipulation policies,” arXiv preprint arXiv:2403.11898, 2024."
2425
+ },
2426
+ {
2427
+ "type": "ref_text",
2428
+ "bbox": [
2429
+ 0.51,
2430
+ 0.805,
2431
+ 0.913,
2432
+ 0.863
2433
+ ],
2434
+ "angle": 0,
2435
+ "content": "[40] O. Ronneberger, P. Fischer, and T. Brox, “U-net: Convolutional networks for biomedical image segmentation,” in Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international conference, Munich, Germany, October 5-9, 2015, proceedings, part III 18. Springer, 2015, pp. 234-241."
2436
+ },
2437
+ {
2438
+ "type": "ref_text",
2439
+ "bbox": [
2440
+ 0.51,
2441
+ 0.863,
2442
+ 0.913,
2443
+ 0.885
2444
+ ],
2445
+ "angle": 0,
2446
+ "content": "[41] J. Song, C. Meng, and S. Ermon, “Denoising diffusion implicit models,” arXiv preprint arXiv:2010.02502, 2020."
2447
+ },
2448
+ {
2449
+ "type": "list",
2450
+ "bbox": [
2451
+ 0.51,
2452
+ 0.068,
2453
+ 0.913,
2454
+ 0.885
2455
+ ],
2456
+ "angle": 0,
2457
+ "content": null
2458
+ }
2459
+ ]
2460
+ ]
data/2025/2504_06xxx/2504.06156/fdf7ba1e-e3e9-411b-99db-249127183d1d_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9cf4ffb32b8c332a6005f607064abc7cedea7b129b5afa50f833807501d927fb
3
+ size 4293227
data/2025/2504_06xxx/2504.06156/full.md ADDED
@@ -0,0 +1,359 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ViTaMIn: Learning Contact-Rich Tasks Through Robot-Free Visuo-Tactile Manipulation Interface
2
+
3
+ Fangchen Liu\*,2, Chuanyu Li\*,1, Yihua Qin\*, Jing Xu\*, Pieter Abbeel\*, Rui Chen\*,1
4
+
5
+ $^{1}$ Tsinghua University, $^{2}$ University of California, Berkeley
6
+
7
+ * Equal contribution, † Corresponding author
8
+
9
+ https://chuanyune.github.io/ViTaMIN_page
10
+
11
+ ![](images/2b3f5d9d3857ac4bc064a25dd8f846478edc8d3d4acf76854100e1224e09f2e4.jpg)
12
+ Demonstrations
13
+
14
+ ![](images/c76f0cbfcc0157a2fd0ea97bdb5758c11c6fb5d419f1af561dacdd15498d9a0f.jpg)
15
+
16
+ ![](images/a7c742b59dc541a88a3273a380335073e194e7444064c0f55bc20ace7ec82882.jpg)
17
+
18
+ ![](images/4d0acbed64c8854765c2e9c80aa8c7e2abb13d31d7b57777b70dad4d3e6a981a.jpg)
19
+ Real-World Tasks
20
+
21
+ ![](images/c67f8d30bed7d5e78cd491b955ffbcc6ad2890cd3244a3c6ba5862292c5ec665.jpg)
22
+
23
+ ![](images/3c482c5b0658f8f048d2ebe3c95889ab774372793412480b02727eafd0de414c.jpg)
24
+
25
+ ![](images/56817bcfa892e233432daadca6888f4cef9f11efeeb525983604173fded63e17.jpg)
26
+ Fig. 1: ViTaMIn overview. Our system comprises a portable data collection device that integrates visual and tactile sensing, a multimodal representation learning framework for fusing visual and tactile information, and demonstrations of various contact-rich manipulation tasks. This system facilitates efficient collection of manipulation data without requiring complex robot setups. (*Backgrounds in the images are blurred.)
27
+
28
+ ![](images/d3b473412a83550b31125af282bc865a7484b0a11b3fe4b684aa09dfa0912134.jpg)
29
+
30
+ ![](images/bafd32d27c34e33981964ae485e4dfba8fcf84c249727c41a5a866d6121787e8.jpg)
31
+
32
+ ![](images/4bd4ba61306b1059468cbd4655f3e43c112065002e75360f7ec6cebdc5cb4ee4.jpg)
33
+
34
+ Abstract—Tactile information plays a crucial role for humans and robots to interact effectively with their environment, particularly for tasks requiring the understanding of contact properties. Solving such dexterous manipulation tasks often relies on imitation learning from demonstration datasets, which are typically collected via teleoperation systems and often demand substantial time and effort. To address these challenges, we present ViTaMIn, an embodiment-free manipulation interface that integrates visual and tactile sensing into a hand-held gripper, enabling multi-modality data collection without the need for teleoperation. Our design employs a compliant Fin Ray gripper with tactile sensing, allowing operators to perceive force feedback during manipulation for more intuitive operation. Additionally, we propose a multi-modal representation learning strategy to obtain pre-trained tactile representations, improving data efficiency and policy robustness. Experiments on 5 contact-rich manipulation tasks demonstrate that our system is more scalable, efficient, and effective than baseline methods.
35
+
36
+ # I. INTRODUCTION
37
+
38
+ Humans rely on both visual and tactile modalities to perform a diverse range of manipulation tasks in daily
39
+
40
+ life. For instance, when inserting a plug into a socket or tightening a screw, vision helps with identifying and aligning components, while tactile signals enable precise force control during contact. This seamless integration of vision and touch enhances human dexterity, particularly in tasks that require contact-rich control, handling visual occlusions, or performing in-hand manipulations.
41
+
42
+ Recent progress in learning from demonstrations [1], [2], [3], [4] has shown significant potential for advancing general-purpose robots, enabling them to efficiently acquire complex skills from human demonstrations. Consequently, developing systems to collect high-quality demonstration data has been a recent key focus. Prior works have explored real-world data collection methods, including joint-mapped devices and exoskeletons [5], [6], [7], [8], and vision-based teleoperation frameworks [9], [10]. Nevertheless, these techniques require real-time teleoperation of a physical robot during data collection, which constrains efficiency and flexibility. In contrast, portable devices [11], [12], [13], [14] present
43
+
44
+ a more scalable and cost-effective alternative to collect demonstration without teleoperation. Moreover, they can be seamlessly integrated into various embodiments, providing a more flexible data collection approach. However, these portable devices primarily focus on capturing vision-only demonstration data, limiting their usage for contact-rich and dexterous manipulation tasks where tactile feedback plays a crucial role.
45
+
46
+ In this work, we aim to address both the challenge of efficient data collection and the need for learning more dexterous tasks using visuo-tactile demonstrations. To this end, we introduce ViTaMIn, a novel and effective visuotactile manipulation interface designed to capture high-quality demonstrations with enhanced efficiency and flexibility. Unlike conventional approaches that rely on rigid tactile sensors, ViTaMIn leverages an omnidirectional compliant Fin Ray gripper with customized tactile sensing, which can detect contact from all directions as an expressive tactile signal for robot manipulation. We integrate the tactile-aware Fin Ray gripper [15] with UMI [14], enhancing the collected data with rich multimodal information and improving policy learning performance while maintaining the core advantages of portable devices. Additionally, our system enables operators to perceive force feedback during manipulation, facilitating more intuitive and seamless operation.
47
+
48
+ Pre-trained visual representations have shown improved performance in robotic manipulation [16], [17], [18], [19], [20], benefiting from large-scale visual pre-training. To fully leverage the visuo-tactile datasets collected with ViTaMIn, we adopt a multimodal representation learning strategy to pre-train tactile representations, enhancing the robustness and generalizability of our sensor-based policies. Our pretraining objective integrates masked autoencoding [21] and contrastive learning for multimodal alignment [22], where future image observations are aligned with masked current images and tactile signals. Through extensive experiments on five challenging contact-rich manipulation tasks, our visuotactile policy, enhanced by multimodal pre-training, exhibits superior data and training efficiency while demonstrating strong generalization across diverse objects and environmental conditions.
49
+
50
+ In conclusion, our contributions are:
51
+
52
+ - ViTaMIn provides a portable and scalable visuo-tactile data collection system.
53
+ - ViTaMIn proposes an effective multimodal representation learning strategy, which significantly improves the data efficiency, robustness and generalization capabilities.
54
+ - ViTaMIn achieves superior performance over vision-only baselines across five manipulation tasks by leveraging visuo-tactile demonstrations.
55
+
56
+ # II. RELATED WORK
57
+
58
+ # A. Visuo-Tactile Manipulation
59
+
60
+ Tactile sensing is essential for robotic manipulation as it provides signals about physical contact in addition to visual observation. Early works [23], [24], [25] use RGB cameras
61
+
62
+ and force/torque sensors to infer contact status for making decisions. However, the information from force/torque sensors is low-dimensional and insufficient for more dexterous manipulation tasks.
63
+
64
+ More recently, vision-based tactile sensors have gained attention for their ability to capture high-resolution contact information [26], [27], [28]. Despite these advances, the rigid design of these sensors restricts the compliance of the end effector, where alternative approaches like uncalibrated tactile skins [29] and plug-and-play sensing systems [30] have improved adaptability and flexibility. In our work, we use a Fin-Ray-shaped compliant and all-directional tactile sensor, which can detect contacts from all directions and also support safe and robust contact-rich manipulation.
65
+
66
+ # B. Data Collection System for Robot Manipulation
67
+
68
+ Recent advancements in learning from demonstrations [1], [2], [3], [4] have shown promising results in developing general-purpose robots. Therefore, efficiently collecting high-quality demonstrations has become a key research focus.
69
+
70
+ Recently works have focused on efficient real-world data collection systems, such as devices or exoskeletons with joint-mapping [5], [6], [7], exoskeletons [8], or vision-based systems [9], [10]. However, these approaches require a physical robot during data collection, which limits efficiency and flexibility. In contrast, portable devices [11], [12], [13], [14], [31], [32] offer several advantages: they are low-cost, flexible, and do not depend on a specific physical robot. Additionally, they can be seamlessly integrated into various embodiments and provide a more user-friendly experience for data collection. We extend the UMI data collection system [14] by integrating tactile sensing, which enriches the demonstrations with multimodal information, improving policy learning performance while preserving the key benefits of portable devices.
71
+
72
+ # C. Multimodal Pre-training for Robotics
73
+
74
+ Pre-trained visual representations have shown improved performance and generalization in robotic manipulation [16], [17], [18], [19], [20] with self-supervised learning techniques [21], [22]. This can be extended to multimodal representation learning [33], [34], [35] by integrating visual, tactile, and proprioceptive modalities, allowing robots to perceive object properties beyond visual appearance.
75
+
76
+ Aligning heterogeneous sensory modalities is a key challenge in multimodal learning, as different sensors have varying data structures, sampling rates, and noise characteristics [36]. Inspired by CLIP [22], researchers have developed contrastive learning techniques to align tactile and visual representations for manipulation tasks [37], [38].
77
+
78
+ Our work extends these efforts by introducing masked contrastive pre-training, where the tactile encoder learns to reconstruct future occluded visual information, further enhancing multimodal understanding.
79
+
80
+ ![](images/eaeb262e8e8de2cc9c2c5f6bd946acaa4ad560a3e6122d16fbd8e4f0a08cfc1a.jpg)
81
+ Fig. 2: ViTaMIn's hardware system overview. The handheld device integrates a GoPro camera, two tactile sensors and a synchronization camera to align visual and tactile information. During data collection, the two tactile sensors and the synchronization camera are connected to the Raspberry Pi in the backbox. The total weight of the gripper is approximately $1960\mathrm{g}$ . Left: Side view of the ViTaMIn system. Right: Top view of the ViTaMIn system with the backbox cover removed.
82
+
83
+ ![](images/4dc08d4271758a4aabed3f5e31b55c1d21ebc7da0ae86c7b523043f7db6cbe93.jpg)
84
+
85
+ # III. VISUO-TACTILE MANIPULATION INTERFACE
86
+
87
+ # A. System Overview
88
+
89
+ We design a handheld gripper to collect visuo-tactile demonstrations without requiring teleoperation on physical robots. Our gripper design is illustrated in Figure 2. The gripper consists of an RGB fisheye wrist camera (GoPro 10) for image observation, two AllTact finger [15], a synchronization camera for observation temporal alignment, and a Raspberry Pi 5 with a battery for data recording.
90
+
91
+ Image Observation To capture comprehensive visual information, we employ a GoPro 10 camera with a $155^{\circ}$ field-of-view (FoV) fisheye lens. The camera operates at 60 FPS with a resolution of $2704 \times 2028$ pixels and is mounted at the end-effector of our ViTaMIn to ensure consistent visual coverage of the manipulation workspace during demonstration collection and policy deployment.
92
+
93
+ Tactile Observation In UMI [14], two TPU-printed Fin Ray grippers are used to provide compliance and enhance grasping stability. However, these grippers lack tactile sensing capabilities. In our ViTaMIn, we employ AllTact [15], a compliant Fin Ray gripper with omnidirectional tactile sensing ability. During manipulation, the embedded camera in AllTact captures both the global deformation of the entire finger and the local deformation of the contact surface as a single image. The tactile sensor operates at 30 FPS with a resolution of $640 \times 480$ pixels.
94
+
95
+ Other Observations To enhance the robustness and accuracy of SLAM, we utilize the IMU data provided by the GoPro, which is synchronized with the visual observations. Gripper width is also critical for precise manipulation. Following UMI [14], we attach two ArUco markers to the
96
+
97
+ gripper's fingers and compute the gripper width from the visual observations.
98
+
99
+ # B. Data Processing
100
+
101
+ Sensor Synchronization To synchronize the tactile sensors and GoPro camera, we use an additional low-cost camera which is connected to the Raspberry Pi and is naturally synchronized with the tactile sensors. Before data collection, both the GoPro and the synchronization camera simultaneously capture a sequence of ArUco markers displayed on a computer screen. The ArUco IDs are detected in both video streams, and when an identical ID appears in both, the corresponding timestamps are used for synchronization. Since the framereates of the GoPro and the synchronization camera are $60\mathrm{Hz}$ and $30\mathrm{Hz}$ respectively, the temporal alignment error is below $1/60 + 1/30 = 0.05$ seconds, which is sufficient for our tasks. Once the two videos are synchronized, they are cropped by the starting and ending signals triggered by the control button.
102
+
103
+ Data Collection and Filtering We adopt a similar data collection pipeline to UMI [14]. We also utilize Simultaneous Localization and Mapping (SLAM) to capture the end-effector trajectories. While SLAM may fail in low-texture environments, it achieves a success rate of approximately $80\%$ in our tasks, allowing the majority of collected data to be used for imitation learning.
104
+
105
+ # IV. VISUO-TACTILE POLICY LEARNING
106
+
107
+ # A. Visuo-Tactile Representation Learning
108
+
109
+ UMI uses a pre-trained CLIP [22] encoder to extract visual representations. However, the tactile images in ViTaMIn are very different from the CLIP's training distribution, which
110
+
111
+ can lead to suboptimal representation. To tackle this, we pretrain an effective tactile encoder using the collected action-free datasets, which doesn't rely on the SLAM success.
112
+
113
+ Taking the tactile image in Figure 3 as an example, we want the encoder to capture the essential contact properties, such as the object's in-hand pose and gripper's deformation. These signals are complementary information from pixel observations, and are crucial for making future decisions.
114
+
115
+ To achieve this, we employ a multimodal contrastive learning approach as illustrated in Figure 3. Given the current masked image $\tilde{I}_V^k$ and current full tactile observation $I_T^k$ of step $k$ , we want the combination of $\tilde{I}_V^k$ and $I_T^k$ align with the future full image observation $I_V^{k + 1}$ in the CLIP embedding space. The intuition behind this is to make the tactile encoder focus on the contact information to predict future images based on the current corrupted image.
116
+
117
+ ![](images/a9d59bd91c7d90305bf0363e93a7675127b8a317c02f04e3257aa1fddbcebcc7.jpg)
118
+
119
+ ![](images/dfac30919f736ceede8d6fefd2d847d22d809cb8f4e923239b4620efb4776ebf.jpg)
120
+ Fig. 3: The illustration of the multimodal contrastive representation pre-training phase. The tactile encoder is trained to capture complementary information to predict the missing content for the future image.
121
+
122
+ To ensure stable training, we freeze the image CLIP encoder $\phi_V(\cdot)$ but only fine-tune the tactile encoder $\phi_T(\cdot)$ . We first obtain the tactile embedding $T_{k}$ from $\phi_T(I_T^k)$ , and $V_{k}$ from $\phi_V(\tilde{I}_V^k)$ . These embeddings are concatenated and
123
+
124
+ passed through a fully connected projection layer, mapping them back to the original 512-dimensional CLIP embedding space as a fused feature $F_{k}$ . Finally, we train the tactile encoder using the standard CLIP loss on $F_{k}$ and $V_{k + 1}$ :
125
+
126
+ $$
127
+ \mathcal {L} _ {\mathrm {C L I P}} = \frac {1}{2} \left(\mathcal {L} _ {\mathrm {f - v}} + \mathcal {L} _ {\mathrm {v - f}}\right) \tag {1}
128
+ $$
129
+
130
+ where
131
+
132
+ $$
133
+ \mathcal {L} _ {\mathrm {v - f}} = - \frac {1}{N} \sum_ {i = 1} ^ {N} \log \frac {\exp \left(\cos \left(V _ {i + 1} , F _ {i}\right) / \tau\right)}{\sum_ {j = 1} ^ {N} \exp \left(\cos \left(V _ {i + 1} , F _ {j}\right) / \tau\right)} \tag {2}
134
+ $$
135
+
136
+ $$
137
+ \mathcal {L} _ {\mathrm {f - v}} = - \frac {1}{N} \sum_ {i = 1} ^ {N} \log \frac {\exp \left(\cos \left(F _ {i} , V _ {i + 1}\right) / \tau\right)}{\sum_ {j = 1} ^ {N} \exp \left(\cos \left(F _ {i} , V _ {j + 1}\right) / \tau\right)} \tag {3}
138
+ $$
139
+
140
+ here $\tau$ is a learnable temperature parameter.
141
+
142
+ Different from [39], where they directly apply the CLIP loss on the time-aligned visuo-tactile images, we instead fuse the tactile observation with a masked current image to predict the future image. We make this choice for two main reasons. First, in [39], the tactile representation is conditioned on proprioceptive states, which are unavailable in our dataset before the success of SLAM. Second, since different tasks may have varying images but similar tactile observations, fusing a masked current image helps the network learn a more expressive tactile representation. Without sufficient masking, the alignment becomes trivial.
143
+
144
+ After pre-training, we train a Diffusion Policy [4] on the SLAM-filtered data. Following [4], we use a U-Net [40] as the noise prediction network and apply DDIM [41] to accelerate the inference for action prediction.
145
+
146
+ ![](images/66ceb8edcaeb309670260252767cb93432455bba815e9dc4a4ca645ef94a855b.jpg)
147
+ V. EXPERIMENTS
148
+ Fig. 4: Hardware setup for policy deployment.
149
+
150
+ # A. Experimental Setup
151
+
152
+ Hardware Figure 4 shows the policy deployment setup. Our system consists of a Rokae xMate ER3PRO robotic arm equipped with a PGI-140-80-W-S parallel gripper. The 7-DOF robotic arm provides flexible manipulation capabilities, while the gripper features an 8cm stroke range from fully open to closed position. The system is implemented using ROS Noetic on Ubuntu 20.04. The control loop operates at $10\mathrm{Hz}$ , with separate threads handling robot control, visual and tactile sensing. The system architecture is designed to minimize latency while maintaining reliable real-time performance.
153
+
154
+ Similar to UMI [14], our system compensates for various sources of latency in the perception-action loop through
155
+
156
+ predictive buffering and timestamp-based synchronization between visual and tactile feedback streams. The policy generates 16 consecutive trajectories at each inference step, with 10 trajectories being executed based on our temporal compensation strategy.
157
+
158
+ Manipulation Tasks As shown in Figure 5, we propose diverse contact-rich manipulation tasks to evaluate the effectiveness of ViTaMIn. These tasks are specifically crafted to demonstrate the following key capabilities: (1) Robust pick-and-place of diverse objects, including fragile and small objects; (2) Dexterous manipulation, such as in-hand reorientation; (3) Task success determination, allowing the robot to repeat attempts until success; (4) Dynamic and precise manipulation.
159
+
160
+ We design the following 5 manipulation tasks:
161
+
162
+ - Orange Placement: Put a fragile orange from a randomized position to a randomized plate.
163
+ - Dynamic Peg Insertion: Grasp a peg and approach a hole, which is moving at a constant speed of $10\mathrm{mm / s}$ . And precisely insert the peg to the hole.
164
+ - Test Tube Reorientation: Grasp a transparent test tube from a shelf and adjust its pose through extrinsic dexterity based on tactile feedback.
165
+ - Scissor Hanging: Grasp a pair of scissors and hang them on a hook. Adjust the pose and keep attempting until it succeeds.
166
+ - Dual-Arm Knife Pulling: The left arm first grasps a knife from a cup, orients it horizontally. The right arm grasps and pulls it out with a constrained prismatic motion. This task requires tactile feedback to grasp the thin object and perform the correct pulling motion.
167
+
168
+ TABLE I: Data Collection Statistics for Different Tasks
169
+
170
+ <table><tr><td>Task</td><td>Raw Data</td><td>Valid Data*</td><td>Avg. Length</td></tr><tr><td>Orange Placement</td><td>87</td><td>73</td><td>435</td></tr><tr><td>Dynamic Peg Insertion</td><td>201</td><td>141</td><td>321</td></tr><tr><td>Test Tube Reorientation</td><td>150</td><td>125</td><td>619</td></tr><tr><td>Scissor Hanging</td><td>172</td><td>137</td><td>642</td></tr><tr><td>Knife Pulling (Left)</td><td>188</td><td>131</td><td>403</td></tr><tr><td>Knife Pulling (Right)</td><td>180</td><td>134</td><td>254</td></tr></table>
171
+
172
+ *Valid data refers to demonstrations with successful SLAM tracking
173
+
174
+ Table I shows the statistics of the demonstration data. We collect demonstrations for both single-arm and dual-arm manipulation tasks. For single-arm tasks, we gather between 87 and 172 raw demonstrations per task according to the task difficulty, with successful SLAM tracking achieved in approximately $80\%$ of the trajectories. The dual-arm knife pulling task requires coordinated motion between both arms, with similar data collection volumes but slightly different average demonstration lengths for left and right arm movements.
175
+
176
+ We compare our approach against the following methods: (1) Vision: the policy only takes visual observation from the GoPro camera, which is encoded by the pre-trained CLIP model (identical to the original UMI [14] paper); (2) Ours w/o Pre-training: This baseline simply concatenate visual and
177
+
178
+ tactile observations after separate CLIP ViT-B/16 encoders, and fine-tuned with behavior cloning.
179
+
180
+ <table><tr><td>Task</td><td>Vision</td><td>w/o Pre-training</td><td>Ours</td></tr><tr><td colspan="4">Single-Arm Tasks</td></tr><tr><td>Orange placement</td><td>0.85</td><td>0.9</td><td>1</td></tr><tr><td>Test Tube Reorientation</td><td>0.4</td><td>0.7</td><td>0.9</td></tr><tr><td>Scissor Hanging</td><td>0.1</td><td>0.45</td><td>0.7</td></tr><tr><td>Dynamic Peg Insertion</td><td>0.45</td><td>0.8</td><td>0.9</td></tr><tr><td colspan="4">Dual-Arm Task</td></tr><tr><td>Knife Pulling</td><td>0.6</td><td>0.8</td><td>0.9</td></tr></table>
181
+
182
+ TABLE II: Comparisons on 5 tasks with baselines. Our approach improves the performance on 5 tasks through multimodal sensing and pre-training.
183
+
184
+ The results are presented in Table II. For each task, we conduct 20 trials with randomized initial conditions and report the average performance. The vision-only policy performs the worst across all five tasks, particularly in contact-rich tasks like test tube reorientation and scissor hanging, where tactile feedback is crucial for success. Across all tasks, pre-training enhances the performance, highlighting the importance of learning effective tactile representations.
185
+
186
+ # B. Failure Analysis
187
+
188
+ In the Orange placement task, the robot picks up an orange from a random position within a $50\mathrm{cm} \times 50\mathrm{cm}$ workspace and places it on a plate. Failures stem from table collisions, unstable placement, or motion planning errors despite correct object detection. In Dynamic peg insertion, the robot inserts a grasped peg into a moving hole. Vision-only methods often fail due to imprecise localization and alignment.
189
+
190
+ In Test tube reorientation, the robot must pick up a tube from a random rack location and reorient it vertically, with success defined by less than $10^{\circ}$ orientation error. Failures include rack collisions, over-lifting, and incorrect final orientation. Scissor hanging requires picking up scissors and hanging them on a narrow hook, where common issues include misdetection, misalignment, and failure to release. In Knife pulling, a dual-arm policy reorients the knife with one arm while the other pulls it out of a holder. Failures often result from poor coordination, weak grasps, or incomplete pulling. Overall, vision-only policies struggle with contact-rich tasks, highlighting the limitations of unimodal sensing.
191
+
192
+ # C. Compliant Articulated Object Manipulation
193
+
194
+ To demonstrate the compliance capabilities of ViTaMIn, we designed a compliant-controlled articulated object manipulation task. The robotic arm needs to grasp a handle (connected to a force gauge) and rotate it 90 degrees to open a switch. During the rotation process, the arm must minimize axial forces to ensure smooth operation. We conduct 10 experiments for each condition and calculate the average forces. The results show that ViTaMIn achieves significantly lower average forces compared to using pure vision as input.
195
+
196
+ ![](images/ff5d1182fc87c0d6043cdc51c2604c67d7dd26e1c42f06dddaec7cbdb5b6fff2.jpg)
197
+ Task 1. Orange Placement
198
+
199
+ ![](images/512cac75dfa1a461ecf945a565d55ee173bc79056728365d8bbabcdff20497f1.jpg)
200
+
201
+ ![](images/8ffaab5c8e792fcf6faad355dd789f84bac2ae3b63606a4eac5401f023e77b6c.jpg)
202
+
203
+ ![](images/145b0ff91ccc913134917d84f27ec9288b5cc0e7a4e6ccca1027d09a1eb5522f.jpg)
204
+ Task 2. Dynamic Peg Insertion
205
+
206
+ ![](images/c1639d459b6280e0d616c0b61ca5027d7312dc27193311d49fc82c533e5e3614.jpg)
207
+
208
+ ![](images/ed9be295452bb2b609707999c0d7ce53274abf084feefa571723224f2e442fef.jpg)
209
+
210
+ ![](images/5a72a662adc1c1ba0bfd167d4f4af69842d450e5c116e4daa0ea7c7387c99b10.jpg)
211
+
212
+ ![](images/2eb0d57179fb5c021a773de17ac4443e984ccc352e0dc3e5d824297b87a58824.jpg)
213
+ Task 3. Test Tube Reorientation
214
+ Stage I
215
+
216
+ ![](images/d9d86998bcb7355813c2ec3771bc9be86562ca597b9726d312f20d51db3d0713.jpg)
217
+
218
+ ![](images/ad52e3e1fffe97ce097f5acd4e97f9d17c9f5a5940fed40ac9f7275aebb29b3d.jpg)
219
+
220
+ ![](images/6e0490d1099f2e64b741b9dd1f95e5ae865168537ab2ca60ee6fd37e533eacdb.jpg)
221
+
222
+ ![](images/41e27ceecac4c9235a249ac029abee0e7fc30124d187cd6077bf037a65e93fd4.jpg)
223
+
224
+ ![](images/00f85838005136fdca15b5fe4bb78ee82f7340dea6f3e5a9b2e65bd76936a94c.jpg)
225
+ Task 4. Scissor Hanging
226
+
227
+ ![](images/67d2c9e7967010da05736086d3a0fca8814cf40da4a222bb5e6737e56f406e1e.jpg)
228
+
229
+ ![](images/7445085cbc517fd3cd93fbb3a2bd9f6db8580e6c84d599414a68d9405529f3b0.jpg)
230
+
231
+ ![](images/5e5f3b7ca4ef1ce5b7a8ef47b005c756ed1fc850e06dd280623fc0528eb1a89d.jpg)
232
+
233
+ ![](images/6fd39913d482519aa7b6f7a9a91a5fd878297b9f18bd5d9df7c2afe47a5f641f.jpg)
234
+
235
+ ![](images/99c0a32a6a7ff267400458289cc0fbf487ba3fbe191ce416aad8bac7243d1355.jpg)
236
+ Task 5. Knife Pulling (Bimanual)
237
+ Fig. 5: We test ViTaMIn on 5 contact-rich manipulation tasks, including precise and dynamic insertion, object hanging with multimodal feedback, and transparent in-hand object manipulation.
238
+
239
+ ![](images/3ce4769ee6b1bde42a17eee61d58d48bb5431619637f3963972110f5eafc4433.jpg)
240
+
241
+ ![](images/d26514e671a1cabe35615727660c95426ee9c01df609ba34f6407ddd70a97fc4.jpg)
242
+
243
+ ![](images/45234ed6e963ca64aacca0aeebac163393943ee8f94523b029c03b09faa1b450.jpg)
244
+
245
+ ![](images/b83fcb747acca716d74ef5c58839df6114300388b8d0e6ee2f936782a0e64c43.jpg)
246
+
247
+ ![](images/ce63d3a7c6dc3449fc08f0a14ed53567368fbb562685332477dc26bd0e8072a3.jpg)
248
+ Fig. 6: The robot needs to flip open a switch (fixed to a force gauge) by rotating it 90 degrees. During the rotation, the robot must minimize axial forces to ensure smooth operation.
249
+
250
+ ![](images/c6980fdc3266252190f984cdc73c9a2bab1431c731bc03e678d86b4b54eeb2be.jpg)
251
+ Maximum Force Comparison: Vision vs. Ours
252
+
253
+ ![](images/94c8d59ccbd0cb0de5a5b3206f5de9360ecd718d47d6d0d3ca249decbf1ffc98.jpg)
254
+ Novel Objects
255
+ Fig. 7: Showcase of novel objects and different lighting in the generalization tasks. The right columns demonstrate colored flashlight/high-power/normal lighting conditions.
256
+
257
+ ![](images/c7ca6a14e745ecd4ed221682030a13fc964ba2820d452880f11c7800ce40073a.jpg)
258
+ Different Lighting
259
+
260
+ # D. Ablation Studies
261
+
262
+ a) Data Efficiency: We evaluate the performance of policies trained on different amounts (25%, 50%, and 100%) of demonstrations. All the models are evaluated in 20 real-world trials with different initializations. For a more in-depth analysis, we calculate the success rates of each stage separately, as illustrated in Figure 8. With the pre-trained
263
+
264
+ tactile representations, our method can achieve consistently higher success rates on all the tasks across different amounts of data, and can even master the task with limited data (25%) for test tube reorientation.
265
+ b) Training Efficiency: We further evaluate the policies trained with different numbers of epochs to understand its training efficiency under the same evaluation protocol. The
266
+
267
+ ![](images/827a914e84c52597b4da5d6a8593513b04ba1f5b5f8f15324c3d07e8a040904d.jpg)
268
+ Stage I
269
+
270
+ ![](images/0d76e53a8c6ce1241acff7eeef8a2fcf95cd1b9821ab719fc786e6b8b40a1ec2.jpg)
271
+ Tube Reorientation
272
+
273
+ ![](images/a285013001ef5630297fdcd051b65b0ca161561fb3c9bcad3b85e8b0d8170ccc.jpg)
274
+ Stage I
275
+ Fig. 8: Ablation study on the effect of pre-training on data efficiency. The performance of the policy improves as the quantity of data increases. After pre-training on the action-free, task-ignorant dataset, our method can achieve a high success rate even with limited data (25%).
276
+
277
+ ![](images/4b5c950d25456db7d2d940404eb103086fe309d96067fca9478d24545376c057.jpg)
278
+ Scissor Hanging
279
+ Stage II
280
+
281
+ ![](images/d8dcc28916f7268aa5ffb965d055ef3eb9daf033798758dca22c4625f78d2473.jpg)
282
+ Tube Reorientation
283
+ Stage I
284
+ Fig. 9: Ablation study on the effect of pre-training on training efficiency. Policies with pre-training are able to learn to complete the first-stage task at a remarkably early stage of training (within 10 epochs). Additionally, when the policy network is pre-trained, the overall success rates increase more rapidly.
285
+
286
+ ![](images/20f9fa3b2ed644154b3075e0c925e50cd264ba0e5235b21294c9f2bd1334e309.jpg)
287
+ Stage II
288
+
289
+ ![](images/bad020bfaf946a16dcc60d68034d145033f7b7475137443f7b26c1c2e7ca1978.jpg)
290
+ Scissor Hanging
291
+ Stage I
292
+
293
+ ![](images/3e856f4f3818833cac9099e64bb7f58858c535a342f9000c483c2c4ffb29e705.jpg)
294
+ Stage II
295
+
296
+ results are illustrated in Figure 9. We also observe consistent task performance improvements with pre-training. The policy can complete the first stage of the task at a remarkably early training stage (within 10 epochs).
297
+
298
+ <table><tr><td>Task</td><td>Method</td><td>Original</td><td>Novel Objects</td><td>Different Lighting</td></tr><tr><td rowspan="3">Orange Placement</td><td>Vision</td><td>0.85</td><td>0.7</td><td>0.55</td></tr><tr><td>Ours w/o Pre-training</td><td>0.9</td><td>0.8</td><td>0.6</td></tr><tr><td>Ours</td><td>1.0</td><td>1.0</td><td>0.85</td></tr><tr><td rowspan="3">Scissor Hanging</td><td>Vision</td><td>0.0</td><td>0.0</td><td>0.0</td></tr><tr><td>Ours w/o Pre-training</td><td>0.45</td><td>0.4</td><td>0.4</td></tr><tr><td>Ours</td><td>0.7</td><td>0.7</td><td>0.5</td></tr></table>
299
+
300
+ TABLE III: Generalization under different objects and scenes. The results demonstrate that our multi-modal policy is more robust to novel objects and different lighting conditions.
301
+
302
+ # E. Generalization Capability
303
+
304
+ We also evaluate our policy's generalizability to unseen objects and environments. As shown in Figure 7, beyond the training orange and scissor, we introduce 6 unseen small
305
+
306
+ objects and 3 unseen scissors to assess object generalization. Additionally, we modify lighting conditions by increasing brightness and introducing colored disco ball lighting. Table III presents results on the tasks of orange placement and scissor hanging. Our method with pre-training achieves consistent better performance across various generalization settings.
307
+
308
+ # VI. CONCLUSION
309
+
310
+ In this paper, we present ViTaMIn, a portable visuo-tactile manipulation interface designed for efficiently collecting high-quality demonstrations by capturing both visual and tactile signals. Furthermore, ViTaMIn introduces an effective pre-training strategy that leverages all the collected action-free data to learn a robust and generalizable tactile representation through multimodal contrastive learning. Our approach significantly outperforms vision-only policies across 5 real-world contact-rich manipulation tasks and demonstrates improved data efficiency, robustness, and generalizability with pre-trained visuo-tactile representations.
311
+
312
+ Our method primarily focuses on fixed-base single-arm and dual-arm tasks with parallel-jaw grippers. While this setup is suitable for a wide range of manipulation tasks,
313
+
314
+ future work could extend our approach to dexterous hands, enabling richer and more versatile manipulation skills that better approximate human-level dexterity.
315
+
316
+ # REFERENCES
317
+
318
+ [1] S. Levine, C. Finn, T. Darrell, and P. Abbeel, "End-to-end training of deep visuomotor policies," Journal of Machine Learning Research, vol. 17, no. 39, pp. 1-40, 2016.
319
+ [2] A. Brohan, N. Brown, J. Carbajal, Y. Chebotar, J. Dabis, C. Finn, K. Gopalakrishnan, K. Hausman, A. Herzog, J. Hsu et al., "Rt-1: Robotics transformer for real-world control at scale," arXiv preprint arXiv:2212.06817, 2022.
320
+ [3] A. Brohan, N. Brown, J. Carbajal, Y. Chebotar, X. Chen, K. Choromanski, T. Ding, D. Driess, A. Dubey, C. Finn et al., "Rt-2: Vision-language-action models transfer web knowledge to robotic control," arXiv preprint arXiv:2307.15818, 2023.
321
+ [4] C. Chi, S. Feng, Y. Du, Z. Xu, E. Cousineau, B. Burchfiel, and S. Song, "Diffusion policy: Visuomotor policy learning via action diffusion," arXiv preprint arXiv:2303.04137, 2023.
322
+ [5] J. Aldaco, T. Armstrong, R. Baruch, J. Bingham, S. Chan, K. Draper, D. Dwibedi, C. Finn, P. Florence, S. Goodrich et al., "Aloha 2: An enhanced low-cost hardware for bimanual teleoperation," arXiv preprint arXiv:2405.02292, 2024.
323
+ [6] Z. Fu, T. Z. Zhao, and C. Finn, "Mobile aloha: Learning bimanual mobile manipulation with low-cost whole-body teleoperation," arXiv preprint arXiv:2401.02117, 2024.
324
+ [7] T. Z. Zhao, V. Kumar, S. Levine, and C. Finn, “Learning fine-grained bimanual manipulation with low-cost hardware,” arXiv preprint arXiv:2304.13705, 2023.
325
+ [8] H. Fang, H.-S. Fang, Y. Wang, J. Ren, J. Chen, R. Zhang, W. Wang, and C. Lu, "Airexo: Low-cost exoskeletons for learning whole-arm manipulation in the wild," in 2024 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2024, pp. 15031-15038.
326
+ [9] X. Cheng, J. Li, S. Yang, G. Yang, and X. Wang, “Open-television: Teleoperation with immersive active visual feedback,” arXiv preprint arXiv:2407.01512, 2024.
327
+ [10] Y. Qin, W. Yang, B. Huang, K. Van Wyk, H. Su, X. Wang, Y.-W. Chao, and D. Fox, "Anyteleop: A general vision-based dexterous robot arm-hand teleoperation system," arXiv preprint arXiv:2307.04577, 2023.
328
+ [11] F. Sanches, G. Gao, N. Elangovan, R. V. Godoy, J. Chapman, K. Wang, P. Jarvis, and M. Liarokapis, "Scalable. intuitive human to robot skill transfer with wearable human machine interfaces: On complex, dexterous tasks," in 2023 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2023, pp. 6318-6325.
329
+ [12] K. Doshi, Y. Huang, and S. Coros, "On hand-held grippers and the morphological gap in human manipulation demonstration," arXiv preprint arXiv:2311.01832, 2023.
330
+ [13] N. M. M. Shafiullah, A. Rai, H. Etukuru, Y. Liu, I. Misra, S. Chintala, and L. Pinto, "On bringing robots home," arXiv preprint arXiv:2311.16098, 2023.
331
+ [14] C. Chi, Z. Xu, C. Pan, E. Cousineau, B. Burchfiel, S. Feng, R. Tedrake, and S. Song, "Universal manipulation interface: In-the-wild robot teaching without in-the-wild robots," arXiv preprint arXiv:2402.10329, 2024.
332
+ [15] S. Liang, Y. Guan, J. Xu, H. Qian, X. Zhang, D. Wu, W. Ding, and R. Chen, "Alltact fin ray: A compliant robot gripper with omnidirectional tactile sensing," arXiv preprint arXiv:2504.18064, 2025.
333
+ [16] S. Nair, A. Rajeswaran, V. Kumar, C. Finn, and A. Gupta, “R3m: A universal visual representation for robot manipulation,” in Proceedings of The 6th Conference on Robot Learning (CoRL), vol. 205. PMLR, 2022, pp. 892–909.
334
+ [17] Y. J. Ma, S. Sodhani, D. Jayaraman, O. Bastani, V. Kumar, and A. Zhang, “VIP: Towards universal visual reward and representation via value-implicit pre-training,” in The Eleventh International Conference on Learning Representations, 2023.
335
+ [18] T. Xiao, I. Radosavovic, T. Darrell, and J. Malik, “Masked visual pretraining for motor control,” arXiv:2203.06173, 2022.
336
+ [19] I. Radosavovic, T. Xiao, S. James, P. Abbeel, J. Malik, and T. Darrell, “Real-world robot learning with masked visual pre-training,” in Conference on Robot Learning. PMLR, 2023, pp. 416–426.
337
+ [20] A. Majumdar, K. Yadav, S. Arnaud, J. Ma, C. Chen, S. Silwal, A. Jain, V.-P. Berges, T. Wu, J. Vakil et al., "Where are we in the search for an artificial visual cortex for embodied intelligence?" Advances in Neural Information Processing Systems, vol. 36, pp. 655-677, 2023.
338
+
339
+ [21] K. He, X. Chen, S. Xie, Y. Li, P. Dollar, and R. Girshick, “Masked autoencoders are scalable vision learners,” in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2022, pp. 16000-16009.
340
+ [22] A. Radford, J. W. Kim, C. Hallacy, A. Ramesh, G. Goh, S. Agarwal, G. Sastry, A. Askell, P. Mishkin, J. Clark et al., "Learning transferable visual models from natural language supervision," in International conference on machine learning. PMLR, 2021, pp. 8748-8763.
341
+ [23] K. Hosoda, K. Igarashi, and M. Asada, "Adaptive hybrid visual servoing/force control in unknown environment," in Proceedings of IEEE/RSJ International Conference on Intelligent Robots and Systems. IROS'96, vol. 3. IEEE, 1996, pp. 1097-1103.
342
+ [24] H. Nakagaki, K. Kitagaki, T. Ogasawara, and H. Tsukune, "Study of deformation and insertion tasks of a flexible wire," in Proceedings of International Conference on Robotics and Automation, vol. 3. IEEE, 1997, pp. 2397-2402.
343
+ [25] P. Miller and P. Leibowitz, "Integration of vision, force and tactile sensing for grasping," Int. J. Intell. Mach, vol. 4, pp. 129-149, 1999.
344
+ [26] H. Qi, B. Yi, S. Suresh, M. Lambeta, Y. Ma, R. Calandra, and J. Malik, "General in-hand object rotation with vision and touch," in Conference on Robot Learning. PMLR, 2023, pp. 2549-2564.
345
+ [27] S. Li, H. Yu, W. Ding, H. Liu, L. Ye, C. Xia, X. Wang, and X.-P. Zhang, “Visual-tactile fusion for transparent object grasping in complex backgrounds,” IEEE Transactions on Robotics, 2023.
346
+ [28] Y. Han, K. Yu, R. Batra, N. Boyd, C. Mehta, T. Zhao, Y. She, S. Hutchinson, and Y. Zhao, “Learning generalizable vision-tactile robotic grasping strategy for deformable objects via transformer,” IEEE/ASME Transactions on Mechatronics, 2024.
347
+ [29] R. Bhirangi, V. Pattabiraman, E. Erciyes, Y. Cao, T. Hellebrekers, and L. Pinto, “Anyskin: Plug-and-play skin sensing for robotic touch,” arXiv preprint arXiv:2409.08276, 2024.
348
+ [30] V. Pattabiraman, Y. Cao, S. Haldar, L. Pinto, and R. Bhirangi, “Learning precise, contact-rich manipulation through uncalibrated tactile skins,” arXiv preprint arXiv:2410.17246, 2024.
349
+ [31] Liu, Guan, Jia, Wu, Liu, Wang, Liang, Chen, Zhang, Song et al., "Fastumi: A scalable and hardware-independent universal manipulation interface with dataset," arXiv e-prints, pp. arXiv-2409, 2024.
350
+ [32] Liu, Chi, Cousineau, Kuppuswamy, Burchfiel, and Song, "Maniwav: Learning robot manipulation from in-the-wild audio-visual data," in CoRL, 2024.
351
+ [33] C. Sferrazza, Y. Seo, H. Liu, Y. Lee, and P. Abbeel, "The power of the senses: Generalizable manipulation from vision and touch through masked multimodal learning," in 2024 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2024, pp. 9698-9705.
352
+ [34] Z. Xu, R. Uppuluri, X. Zhang, C. Fitch, P. G. Crandall, W. Shou, D. Wang, and Y. She, "UniT: Unified tactile representation for robot learning," 2024. [Online]. Available: https://arxiv.org/abs/2408.06481
353
+ [35] X. Zhang and et al., “Fusing multimodal sensory data for robotic perception,” IEEE Transactions on Robotics, 2022.
354
+ [36] A. Nagabandi, G. Kahn, S. Levine, and C. Finn, "Deep reinforcement learning for vision-based robotic control with multimodal inputs," in Conference on Robot Learning (CoRL), 2020.
355
+ [37] L. Fu, G. Datta, H. Huang, W. C.-H. Panitch, J. Drake, J. Ortiz, M. Mukadam, M. Lambeta, R. Calandra, and K. Goldberg, "A touch, vision, and language dataset for multimodal alignment," in Forty-first International Conference on Machine Learning, 2024. [Online]. Available: https://openreview.net/forum?id=tFEOOH9eH0
356
+ [38] F. Yang, C. Feng, Z. Chen, H. Park, D. Wang, Y. Dou, Z. Zeng, X. Chen, R. Gangopadhyay, A. Owens, and A. Wong, "Binding touch to everything: Learning unified multimodal tactile representations," arXiv:2401.18084, 2024.
357
+ [39] A. George, S. Gano, P. Katragadda, and A. Farimani, “Vital pretraining: Visuo-tactile pretraining for tactile and non-tactile manipulation policies,” arXiv preprint arXiv:2403.11898, 2024.
358
+ [40] O. Ronneberger, P. Fischer, and T. Brox, “U-net: Convolutional networks for biomedical image segmentation,” in Medical image computing and computer-assisted intervention-MICCAI 2015: 18th international conference, Munich, Germany, October 5-9, 2015, proceedings, part III 18. Springer, 2015, pp. 234-241.
359
+ [41] J. Song, C. Meng, and S. Ermon, “Denoising diffusion implicit models,” arXiv preprint arXiv:2010.02502, 2020.
data/2025/2504_06xxx/2504.06156/images/00f85838005136fdca15b5fe4bb78ee82f7340dea6f3e5a9b2e65bd76936a94c.jpg ADDED

Git LFS Details

  • SHA256: 4b436f402adc0ab5774c0684738d89e4b583d9e89624bd108497db1ce395a195
  • Pointer size: 129 Bytes
  • Size of remote file: 7.42 kB
data/2025/2504_06xxx/2504.06156/images/0d76e53a8c6ce1241acff7eeef8a2fcf95cd1b9821ab719fc786e6b8b40a1ec2.jpg ADDED

Git LFS Details

  • SHA256: 31e536af065a7b46d05cd54b004c9d9041a7f3e26db91aa5ef4580982732b6e2
  • Pointer size: 130 Bytes
  • Size of remote file: 17.4 kB
data/2025/2504_06xxx/2504.06156/images/145b0ff91ccc913134917d84f27ec9288b5cc0e7a4e6ccca1027d09a1eb5522f.jpg ADDED

Git LFS Details

  • SHA256: 34449953d2e6c67da933dc9992c831a30ac30ea6fab023f11b32c6d995bf9402
  • Pointer size: 129 Bytes
  • Size of remote file: 6.97 kB
data/2025/2504_06xxx/2504.06156/images/20f9fa3b2ed644154b3075e0c925e50cd264ba0e5235b21294c9f2bd1334e309.jpg ADDED

Git LFS Details

  • SHA256: a25357b4084acae27c36b21bbd6dd3550c5db25b45445d00ba102d95562577c9
  • Pointer size: 130 Bytes
  • Size of remote file: 16.7 kB
data/2025/2504_06xxx/2504.06156/images/2a3bf65b454c923e9b5bb949a3116a5dfef20d4712dfc9369a08f9361b171127.jpg ADDED

Git LFS Details

  • SHA256: 4ad181baef3b014a5cd1caf05f22d37c9c55f0094a2f5dd7512da3c2b7ee14a1
  • Pointer size: 130 Bytes
  • Size of remote file: 33.1 kB
data/2025/2504_06xxx/2504.06156/images/2b3f5d9d3857ac4bc064a25dd8f846478edc8d3d4acf76854100e1224e09f2e4.jpg ADDED

Git LFS Details

  • SHA256: ba589db43b6bef224dfa66bcee97fabda6586b7781f32ee805e53f853b587b0c
  • Pointer size: 129 Bytes
  • Size of remote file: 7.73 kB
data/2025/2504_06xxx/2504.06156/images/2eb0d57179fb5c021a773de17ac4443e984ccc352e0dc3e5d824297b87a58824.jpg ADDED

Git LFS Details

  • SHA256: 8e316c1f2935f0f39f642e41536b645fc986344a5ec4531bd83d26e2725c9163
  • Pointer size: 129 Bytes
  • Size of remote file: 7.49 kB
data/2025/2504_06xxx/2504.06156/images/2ec98aac269313a4a3cc98c76d6cba7f37ecc7b2a02ed422fa6eb8b07c3cd183.jpg ADDED

Git LFS Details

  • SHA256: b6a07551dfaa3b22035e461486ff21e9dd9566ef2899dc6fe7a8d2bc485aec06
  • Pointer size: 130 Bytes
  • Size of remote file: 10.7 kB
data/2025/2504_06xxx/2504.06156/images/3c482c5b0658f8f048d2ebe3c95889ab774372793412480b02727eafd0de414c.jpg ADDED

Git LFS Details

  • SHA256: b8589473b3d02bf5867f02e9bd38b828bcadaece288c4493f066f66d8ab80a14
  • Pointer size: 129 Bytes
  • Size of remote file: 9.08 kB
data/2025/2504_06xxx/2504.06156/images/3ce4769ee6b1bde42a17eee61d58d48bb5431619637f3963972110f5eafc4433.jpg ADDED

Git LFS Details

  • SHA256: 3072f50e93846c3b2bf8ed1ca75d0f84d43dd8214622575a6efc18d153050c69
  • Pointer size: 129 Bytes
  • Size of remote file: 8.74 kB
data/2025/2504_06xxx/2504.06156/images/3e856f4f3818833cac9099e64bb7f58858c535a342f9000c483c2c4ffb29e705.jpg ADDED

Git LFS Details

  • SHA256: 2edc72430532a5d2cd85af4be88834bb355cb77cf2491289243e2cd38e76c105
  • Pointer size: 130 Bytes
  • Size of remote file: 15.8 kB
data/2025/2504_06xxx/2504.06156/images/41e27ceecac4c9235a249ac029abee0e7fc30124d187cd6077bf037a65e93fd4.jpg ADDED

Git LFS Details

  • SHA256: 987ef9e0b72599473463220ccefbe20f8198dc7afd125c4462011a0cc44f64e7
  • Pointer size: 129 Bytes
  • Size of remote file: 9.19 kB
data/2025/2504_06xxx/2504.06156/images/45234ed6e963ca64aacca0aeebac163393943ee8f94523b029c03b09faa1b450.jpg ADDED

Git LFS Details

  • SHA256: fa273679d1637158a49f8f28bc2008bf1645acbe73e2cf73d99da7d13aa3082d
  • Pointer size: 129 Bytes
  • Size of remote file: 9.08 kB
data/2025/2504_06xxx/2504.06156/images/4b5c950d25456db7d2d940404eb103086fe309d96067fca9478d24545376c057.jpg ADDED

Git LFS Details

  • SHA256: 7f11faf46fb1fa2cb0ec205634b2b890b45793c43ba5e6dfa595b0d6c5882904
  • Pointer size: 130 Bytes
  • Size of remote file: 15.3 kB
data/2025/2504_06xxx/2504.06156/images/4bd4ba61306b1059468cbd4655f3e43c112065002e75360f7ec6cebdc5cb4ee4.jpg ADDED

Git LFS Details

  • SHA256: 9db5514abc30c2d497bfe2bc5a439156db9bed0db55da137869459cb2ac23581
  • Pointer size: 130 Bytes
  • Size of remote file: 11.6 kB
data/2025/2504_06xxx/2504.06156/images/4d0acbed64c8854765c2e9c80aa8c7e2abb13d31d7b57777b70dad4d3e6a981a.jpg ADDED

Git LFS Details

  • SHA256: d065bf984a506ad6a8e29e268b08d2380d639e3b729866b2fe7a6905860964b6
  • Pointer size: 129 Bytes
  • Size of remote file: 8.23 kB
data/2025/2504_06xxx/2504.06156/images/4dc08d4271758a4aabed3f5e31b55c1d21ebc7da0ae86c7b523043f7db6cbe93.jpg ADDED

Git LFS Details

  • SHA256: da0b6128ccb90f4ef776005a41ad59efbd882862e40d147646228097d5ce0d77
  • Pointer size: 130 Bytes
  • Size of remote file: 56.9 kB
data/2025/2504_06xxx/2504.06156/images/512cac75dfa1a461ecf945a565d55ee173bc79056728365d8bbabcdff20497f1.jpg ADDED

Git LFS Details

  • SHA256: ef2f951c2f96cdcc180bf03606dd75ebb9e97b28afb7de083e84c1f37eb4d1b6
  • Pointer size: 129 Bytes
  • Size of remote file: 7.3 kB
data/2025/2504_06xxx/2504.06156/images/56817bcfa892e233432daadca6888f4cef9f11efeeb525983604173fded63e17.jpg ADDED

Git LFS Details

  • SHA256: f8324ec92d19a50b0a2a48ace6a1942fb8ab8187bd6d78e27b7b1b2a3d851ae6
  • Pointer size: 130 Bytes
  • Size of remote file: 25.2 kB
data/2025/2504_06xxx/2504.06156/images/5a72a662adc1c1ba0bfd167d4f4af69842d450e5c116e4daa0ea7c7387c99b10.jpg ADDED

Git LFS Details

  • SHA256: 9993f26466543d760921cf9b191b8b09d8f3235e246036aa95c625f47ca190d0
  • Pointer size: 129 Bytes
  • Size of remote file: 7.05 kB
data/2025/2504_06xxx/2504.06156/images/5e5f3b7ca4ef1ce5b7a8ef47b005c756ed1fc850e06dd280623fc0528eb1a89d.jpg ADDED

Git LFS Details

  • SHA256: a28689252d7fa04baceff0d0ed4b7ea7c2837c4a36db81c70a90a411cdc2db2a
  • Pointer size: 129 Bytes
  • Size of remote file: 9.18 kB
data/2025/2504_06xxx/2504.06156/images/66ceb8edcaeb309670260252767cb93432455bba815e9dc4a4ca645ef94a855b.jpg ADDED

Git LFS Details

  • SHA256: 7244062666863e4244819c1b59854f74e3f5b33d26035bb96fff689c8d66b301
  • Pointer size: 130 Bytes
  • Size of remote file: 24.6 kB
data/2025/2504_06xxx/2504.06156/images/67d2c9e7967010da05736086d3a0fca8814cf40da4a222bb5e6737e56f406e1e.jpg ADDED

Git LFS Details

  • SHA256: 29aa3922186d5317618134a58e4a604ecd791d34f175dbc9393b2e0146dcf143
  • Pointer size: 129 Bytes
  • Size of remote file: 9.13 kB
data/2025/2504_06xxx/2504.06156/images/6e0490d1099f2e64b741b9dd1f95e5ae865168537ab2ca60ee6fd37e533eacdb.jpg ADDED

Git LFS Details

  • SHA256: 9c085fb8a6f344947695f20b0b99de177fe62a015f3c5dc9bbf75cd245a6ac2b
  • Pointer size: 129 Bytes
  • Size of remote file: 9.56 kB
data/2025/2504_06xxx/2504.06156/images/6fd39913d482519aa7b6f7a9a91a5fd878297b9f18bd5d9df7c2afe47a5f641f.jpg ADDED

Git LFS Details

  • SHA256: 93d19361e14980380c2006849e4c45c2345cd416705710b1e4a4f1ed631ed9fe
  • Pointer size: 129 Bytes
  • Size of remote file: 9.28 kB
data/2025/2504_06xxx/2504.06156/images/7445085cbc517fd3cd93fbb3a2bd9f6db8580e6c84d599414a68d9405529f3b0.jpg ADDED

Git LFS Details

  • SHA256: e78fc283bdf8f867068b1e353d29cdf4ec83acdae81bac06bfa05d1eb4da40e7
  • Pointer size: 129 Bytes
  • Size of remote file: 9.69 kB
data/2025/2504_06xxx/2504.06156/images/8105d743b48c767516e10ef93cc71f7fc5122df736e327dea3f051cc7bfb6c47.jpg ADDED

Git LFS Details

  • SHA256: 06f5d13e714678b26f49ce32ac04d0760c7a82d49c8035847d88af77ba57814f
  • Pointer size: 130 Bytes
  • Size of remote file: 28 kB
data/2025/2504_06xxx/2504.06156/images/827a914e84c52597b4da5d6a8593513b04ba1f5b5f8f15324c3d07e8a040904d.jpg ADDED

Git LFS Details

  • SHA256: 5396d613369681309f245453c60490dd12fb689ef66ffab20a2d2280abf7da32
  • Pointer size: 130 Bytes
  • Size of remote file: 15.1 kB
data/2025/2504_06xxx/2504.06156/images/8ffaab5c8e792fcf6faad355dd789f84bac2ae3b63606a4eac5401f023e77b6c.jpg ADDED

Git LFS Details

  • SHA256: 0cae3f4e6866d890380bf0a0f3c1e51cef41f74a24ce764bec12f0651667db9a
  • Pointer size: 129 Bytes
  • Size of remote file: 6.56 kB
data/2025/2504_06xxx/2504.06156/images/94c8d59ccbd0cb0de5a5b3206f5de9360ecd718d47d6d0d3ca249decbf1ffc98.jpg ADDED

Git LFS Details

  • SHA256: 400dec9d08dd4e7717df91bd8bad2302c44e18ebb5a83ca1ea69d2a12197a5ac
  • Pointer size: 130 Bytes
  • Size of remote file: 12.2 kB
data/2025/2504_06xxx/2504.06156/images/99c0a32a6a7ff267400458289cc0fbf487ba3fbe191ce416aad8bac7243d1355.jpg ADDED

Git LFS Details

  • SHA256: 21084ef370053b4b4db325e791e88225d6d0006e2345d8da1f9d691a0bed3337
  • Pointer size: 129 Bytes
  • Size of remote file: 8.88 kB
data/2025/2504_06xxx/2504.06156/images/a285013001ef5630297fdcd051b65b0ca161561fb3c9bcad3b85e8b0d8170ccc.jpg ADDED

Git LFS Details

  • SHA256: 1b0a87e99927cd3a199f5ec4b4798e1d992a1e02a31ea7e8fcc398c0d1f91997
  • Pointer size: 130 Bytes
  • Size of remote file: 14.4 kB
data/2025/2504_06xxx/2504.06156/images/a7c742b59dc541a88a3273a380335073e194e7444064c0f55bc20ace7ec82882.jpg ADDED

Git LFS Details

  • SHA256: 5bd3e32de0acb08a5e2f775bd60272286b49a4c6cc65f5694c0f844b216d5834
  • Pointer size: 129 Bytes
  • Size of remote file: 8.52 kB
data/2025/2504_06xxx/2504.06156/images/a9d59bd91c7d90305bf0363e93a7675127b8a317c02f04e3257aa1fddbcebcc7.jpg ADDED

Git LFS Details

  • SHA256: ee44e5c22be73f627dc6e99dc2bdbd2f30e1cd6bc8b95db31344b9e97982cf4a
  • Pointer size: 130 Bytes
  • Size of remote file: 41.1 kB
data/2025/2504_06xxx/2504.06156/images/ad52e3e1fffe97ce097f5acd4e97f9d17c9f5a5940fed40ac9f7275aebb29b3d.jpg ADDED

Git LFS Details

  • SHA256: af092cb8553acdbb92a3d946c02d33c21ae0a890f2daced05ff58c5becf7ddf8
  • Pointer size: 129 Bytes
  • Size of remote file: 9.05 kB
data/2025/2504_06xxx/2504.06156/images/b83fcb747acca716d74ef5c58839df6114300388b8d0e6ee2f936782a0e64c43.jpg ADDED

Git LFS Details

  • SHA256: 44daae7eee4dcadfdddfc24d43c374e58f651952fd8451abe07157b43f9ef890
  • Pointer size: 129 Bytes
  • Size of remote file: 9.03 kB
data/2025/2504_06xxx/2504.06156/images/bad020bfaf946a16dcc60d68034d145033f7b7475137443f7b26c1c2e7ca1978.jpg ADDED

Git LFS Details

  • SHA256: bcb7adddbef4c83f90ba8d12f079e46ef046ebea1875f8e83118ef54ead56e4c
  • Pointer size: 130 Bytes
  • Size of remote file: 15 kB
data/2025/2504_06xxx/2504.06156/images/bafd32d27c34e33981964ae485e4dfba8fcf84c249727c41a5a866d6121787e8.jpg ADDED

Git LFS Details

  • SHA256: 9b37257d96b21fd0476df6f9c83595534dddb8ff713bc6b7f5098d4246373751
  • Pointer size: 130 Bytes
  • Size of remote file: 11.4 kB
data/2025/2504_06xxx/2504.06156/images/c1639d459b6280e0d616c0b61ca5027d7312dc27193311d49fc82c533e5e3614.jpg ADDED

Git LFS Details

  • SHA256: b1c1a7cb55ad01837ce26d88d0958818ba2dab2512b8c949ac26553e732976ad
  • Pointer size: 129 Bytes
  • Size of remote file: 6.68 kB
data/2025/2504_06xxx/2504.06156/images/c67f8d30bed7d5e78cd491b955ffbcc6ad2890cd3244a3c6ba5862292c5ec665.jpg ADDED

Git LFS Details

  • SHA256: e167813473813b1dc164bd6a33753ba2340499ddadaa1131ec96166531bdedb5
  • Pointer size: 129 Bytes
  • Size of remote file: 9.6 kB
data/2025/2504_06xxx/2504.06156/images/c6980fdc3266252190f984cdc73c9a2bab1431c731bc03e678d86b4b54eeb2be.jpg ADDED

Git LFS Details

  • SHA256: 94908be63e466d8c0639ee03b4eb42d2d48bfae299f22f40bd3b7f440b8c1a34
  • Pointer size: 129 Bytes
  • Size of remote file: 9.96 kB
data/2025/2504_06xxx/2504.06156/images/c76f0cbfcc0157a2fd0ea97bdb5758c11c6fb5d419f1af561dacdd15498d9a0f.jpg ADDED

Git LFS Details

  • SHA256: 05b1aa42a27a30c9f5b4be2a60d430f413c4220793fb1145e4cbdd362bc97314
  • Pointer size: 129 Bytes
  • Size of remote file: 7.81 kB
data/2025/2504_06xxx/2504.06156/images/c7ca6a14e745ecd4ed221682030a13fc964ba2820d452880f11c7800ce40073a.jpg ADDED

Git LFS Details

  • SHA256: 60b754ca9a382213fb1d741e4ada0d5c315d0897ce024e5d1741164fb866f285
  • Pointer size: 130 Bytes
  • Size of remote file: 25.6 kB
data/2025/2504_06xxx/2504.06156/images/ce63d3a7c6dc3449fc08f0a14ed53567368fbb562685332477dc26bd0e8072a3.jpg ADDED

Git LFS Details

  • SHA256: fc393c088d5d1a4c7c6e9df0dcdb8854f0bc14f5e12273d0dc22b766551ea7d5
  • Pointer size: 130 Bytes
  • Size of remote file: 14.4 kB
data/2025/2504_06xxx/2504.06156/images/d26514e671a1cabe35615727660c95426ee9c01df609ba34f6407ddd70a97fc4.jpg ADDED

Git LFS Details

  • SHA256: 7b3eddd74626fe24996f4d9ab8d4d28e3589aac0757a15e2225d5744faa9a325
  • Pointer size: 129 Bytes
  • Size of remote file: 9.47 kB