Chelsea707 commited on
Commit
7814b47
·
verified ·
1 Parent(s): 5cf1f03

Add MinerU batch 7556f414-9601-4052-95d5-34bb4099920c

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +64 -0
  2. data/2025/2502_20xxx/2502.20390/a85c762a-48b0-4e62-ba82-8973c3e103a2_content_list.json +0 -0
  3. data/2025/2502_20xxx/2502.20390/a85c762a-48b0-4e62-ba82-8973c3e103a2_model.json +0 -0
  4. data/2025/2502_20xxx/2502.20390/a85c762a-48b0-4e62-ba82-8973c3e103a2_origin.pdf +3 -0
  5. data/2025/2502_20xxx/2502.20390/full.md +0 -0
  6. data/2025/2502_20xxx/2502.20390/images.zip +3 -0
  7. data/2025/2502_20xxx/2502.20390/layout.json +0 -0
  8. data/2025/2502_20xxx/2502.20391/2a12bbc3-eb4e-4244-abd5-10dfb76027ff_content_list.json +0 -0
  9. data/2025/2502_20xxx/2502.20391/2a12bbc3-eb4e-4244-abd5-10dfb76027ff_model.json +0 -0
  10. data/2025/2502_20xxx/2502.20391/2a12bbc3-eb4e-4244-abd5-10dfb76027ff_origin.pdf +3 -0
  11. data/2025/2502_20xxx/2502.20391/full.md +603 -0
  12. data/2025/2502_20xxx/2502.20391/images.zip +3 -0
  13. data/2025/2502_20xxx/2502.20391/layout.json +0 -0
  14. data/2025/2502_20xxx/2502.20396/847fca61-3c3a-4285-8573-13bf56b2db4b_content_list.json +1537 -0
  15. data/2025/2502_20xxx/2502.20396/847fca61-3c3a-4285-8573-13bf56b2db4b_model.json +0 -0
  16. data/2025/2502_20xxx/2502.20396/847fca61-3c3a-4285-8573-13bf56b2db4b_origin.pdf +3 -0
  17. data/2025/2502_20xxx/2502.20396/full.md +322 -0
  18. data/2025/2502_20xxx/2502.20396/images.zip +3 -0
  19. data/2025/2502_20xxx/2502.20396/layout.json +0 -0
  20. data/2025/2502_20xxx/2502.20502/a95429a0-0940-4d78-b3f9-687f81572084_content_list.json +0 -0
  21. data/2025/2502_20xxx/2502.20502/a95429a0-0940-4d78-b3f9-687f81572084_model.json +0 -0
  22. data/2025/2502_20xxx/2502.20502/a95429a0-0940-4d78-b3f9-687f81572084_origin.pdf +3 -0
  23. data/2025/2502_20xxx/2502.20502/full.md +414 -0
  24. data/2025/2502_20xxx/2502.20502/images.zip +3 -0
  25. data/2025/2502_20xxx/2502.20502/layout.json +0 -0
  26. data/2025/2502_20xxx/2502.20586/01ebdb3d-c847-41b0-88c7-f3959668297a_content_list.json +0 -0
  27. data/2025/2502_20xxx/2502.20586/01ebdb3d-c847-41b0-88c7-f3959668297a_model.json +0 -0
  28. data/2025/2502_20xxx/2502.20586/01ebdb3d-c847-41b0-88c7-f3959668297a_origin.pdf +3 -0
  29. data/2025/2502_20xxx/2502.20586/full.md +474 -0
  30. data/2025/2502_20xxx/2502.20586/images.zip +3 -0
  31. data/2025/2502_20xxx/2502.20586/layout.json +0 -0
  32. data/2025/2502_20xxx/2502.20604/be121c54-f9e2-4c21-8d04-9ea04ee6c28a_content_list.json +1555 -0
  33. data/2025/2502_20xxx/2502.20604/be121c54-f9e2-4c21-8d04-9ea04ee6c28a_model.json +2055 -0
  34. data/2025/2502_20xxx/2502.20604/be121c54-f9e2-4c21-8d04-9ea04ee6c28a_origin.pdf +3 -0
  35. data/2025/2502_20xxx/2502.20604/full.md +331 -0
  36. data/2025/2502_20xxx/2502.20604/images.zip +3 -0
  37. data/2025/2502_20xxx/2502.20604/layout.json +0 -0
  38. data/2025/2502_20xxx/2502.20639/6f65e6c2-2b42-47a6-80fd-0c91d9b82f41_content_list.json +0 -0
  39. data/2025/2502_20xxx/2502.20639/6f65e6c2-2b42-47a6-80fd-0c91d9b82f41_model.json +0 -0
  40. data/2025/2502_20xxx/2502.20639/6f65e6c2-2b42-47a6-80fd-0c91d9b82f41_origin.pdf +3 -0
  41. data/2025/2502_20xxx/2502.20639/full.md +569 -0
  42. data/2025/2502_20xxx/2502.20639/images.zip +3 -0
  43. data/2025/2502_20xxx/2502.20639/layout.json +0 -0
  44. data/2025/2502_20xxx/2502.20653/882fa9ce-7503-46e8-8936-f6b18b3bf5e6_content_list.json +1721 -0
  45. data/2025/2502_20xxx/2502.20653/882fa9ce-7503-46e8-8936-f6b18b3bf5e6_model.json +0 -0
  46. data/2025/2502_20xxx/2502.20653/882fa9ce-7503-46e8-8936-f6b18b3bf5e6_origin.pdf +3 -0
  47. data/2025/2502_20xxx/2502.20653/full.md +355 -0
  48. data/2025/2502_20xxx/2502.20653/images.zip +3 -0
  49. data/2025/2502_20xxx/2502.20653/layout.json +0 -0
  50. data/2025/2502_20xxx/2502.20694/c26d785e-046d-463b-a119-b1927240da07_content_list.json +0 -0
.gitattributes CHANGED
@@ -2051,3 +2051,67 @@ data/2025/2503_02xxx/2503.02247/c3cccb6b-c68b-4a3e-a3ae-4e2074ec1a52_origin.pdf
2051
  data/2025/2503_02xxx/2503.02268/aa7077fc-2ea4-43f2-afde-36c95a306eae_origin.pdf filter=lfs diff=lfs merge=lfs -text
2052
  data/2025/2503_02xxx/2503.02310/14565682-c2e4-4620-ae64-958832a8f9f1_origin.pdf filter=lfs diff=lfs merge=lfs -text
2053
  data/2025/2503_05xxx/2503.05804/b1f41bfa-e5d3-4e40-9916-a3295463e14a_origin.pdf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2051
  data/2025/2503_02xxx/2503.02268/aa7077fc-2ea4-43f2-afde-36c95a306eae_origin.pdf filter=lfs diff=lfs merge=lfs -text
2052
  data/2025/2503_02xxx/2503.02310/14565682-c2e4-4620-ae64-958832a8f9f1_origin.pdf filter=lfs diff=lfs merge=lfs -text
2053
  data/2025/2503_05xxx/2503.05804/b1f41bfa-e5d3-4e40-9916-a3295463e14a_origin.pdf filter=lfs diff=lfs merge=lfs -text
2054
+ data/2025/2502_20xxx/2502.20390/a85c762a-48b0-4e62-ba82-8973c3e103a2_origin.pdf filter=lfs diff=lfs merge=lfs -text
2055
+ data/2025/2502_20xxx/2502.20391/2a12bbc3-eb4e-4244-abd5-10dfb76027ff_origin.pdf filter=lfs diff=lfs merge=lfs -text
2056
+ data/2025/2502_20xxx/2502.20396/847fca61-3c3a-4285-8573-13bf56b2db4b_origin.pdf filter=lfs diff=lfs merge=lfs -text
2057
+ data/2025/2502_20xxx/2502.20502/a95429a0-0940-4d78-b3f9-687f81572084_origin.pdf filter=lfs diff=lfs merge=lfs -text
2058
+ data/2025/2502_20xxx/2502.20586/01ebdb3d-c847-41b0-88c7-f3959668297a_origin.pdf filter=lfs diff=lfs merge=lfs -text
2059
+ data/2025/2502_20xxx/2502.20604/be121c54-f9e2-4c21-8d04-9ea04ee6c28a_origin.pdf filter=lfs diff=lfs merge=lfs -text
2060
+ data/2025/2502_20xxx/2502.20639/6f65e6c2-2b42-47a6-80fd-0c91d9b82f41_origin.pdf filter=lfs diff=lfs merge=lfs -text
2061
+ data/2025/2502_20xxx/2502.20653/882fa9ce-7503-46e8-8936-f6b18b3bf5e6_origin.pdf filter=lfs diff=lfs merge=lfs -text
2062
+ data/2025/2502_20xxx/2502.20694/c26d785e-046d-463b-a119-b1927240da07_origin.pdf filter=lfs diff=lfs merge=lfs -text
2063
+ data/2025/2502_20xxx/2502.20698/222bc403-ae81-4e86-8d55-34afe2d77e77_origin.pdf filter=lfs diff=lfs merge=lfs -text
2064
+ data/2025/2502_20xxx/2502.20754/eab7bacd-3f90-428a-a83c-a7766d4cec70_origin.pdf filter=lfs diff=lfs merge=lfs -text
2065
+ data/2025/2502_20xxx/2502.20762/b5950d64-81bf-4f22-954c-024c2c3c892d_origin.pdf filter=lfs diff=lfs merge=lfs -text
2066
+ data/2025/2502_20xxx/2502.20766/19f1043c-4a7c-4b94-92ee-be4e5f0d4296_origin.pdf filter=lfs diff=lfs merge=lfs -text
2067
+ data/2025/2502_20xxx/2502.20808/96e9b83a-1f42-4419-ae40-8d95958ee7e8_origin.pdf filter=lfs diff=lfs merge=lfs -text
2068
+ data/2025/2502_20xxx/2502.20897/3c99688b-74f2-450f-adcf-851604319376_origin.pdf filter=lfs diff=lfs merge=lfs -text
2069
+ data/2025/2502_20xxx/2502.20900/e98b4fb3-7f55-4b97-8df9-bc3ad9257b87_origin.pdf filter=lfs diff=lfs merge=lfs -text
2070
+ data/2025/2502_21xxx/2502.21074/a4d92408-a3e4-460e-96b0-fcb0371b4560_origin.pdf filter=lfs diff=lfs merge=lfs -text
2071
+ data/2025/2502_21xxx/2502.21079/fa5cced9-4db3-4380-85ad-5752214bf368_origin.pdf filter=lfs diff=lfs merge=lfs -text
2072
+ data/2025/2502_21xxx/2502.21080/ec558e7e-e50f-459e-800d-e3059fc33963_origin.pdf filter=lfs diff=lfs merge=lfs -text
2073
+ data/2025/2502_21xxx/2502.21117/83e0ffa1-1987-41c6-8cad-0120e2edcb4a_origin.pdf filter=lfs diff=lfs merge=lfs -text
2074
+ data/2025/2502_21xxx/2502.21193/c7ab1c5f-6185-4e9c-9f84-f45cd6d54d79_origin.pdf filter=lfs diff=lfs merge=lfs -text
2075
+ data/2025/2502_21xxx/2502.21212/50bf2d96-a2d9-4a8c-b035-14217828f120_origin.pdf filter=lfs diff=lfs merge=lfs -text
2076
+ data/2025/2502_21xxx/2502.21228/c80b8133-acd5-4910-be2a-ad2a6fe9e3bf_origin.pdf filter=lfs diff=lfs merge=lfs -text
2077
+ data/2025/2502_21xxx/2502.21257/5d1326fc-bb30-49e3-b183-834a6c645162_origin.pdf filter=lfs diff=lfs merge=lfs -text
2078
+ data/2025/2502_21xxx/2502.21269/dcdb2015-4bbe-4f13-a59c-3d5ed1324bbd_origin.pdf filter=lfs diff=lfs merge=lfs -text
2079
+ data/2025/2502_21xxx/2502.21271/df578309-cafe-4d85-ae42-35ff60220100_origin.pdf filter=lfs diff=lfs merge=lfs -text
2080
+ data/2025/2502_21xxx/2502.21286/8149c5ce-3594-4afe-b1f5-248c96b2dbd9_origin.pdf filter=lfs diff=lfs merge=lfs -text
2081
+ data/2025/2502_21xxx/2502.21321/af5428ae-cf38-4060-91eb-57ec9f97afa6_origin.pdf filter=lfs diff=lfs merge=lfs -text
2082
+ data/2025/2503_00xxx/2503.00079/e11b583c-2e57-4dd8-a094-a542174d7431_origin.pdf filter=lfs diff=lfs merge=lfs -text
2083
+ data/2025/2503_00xxx/2503.00096/d6db6ed3-eea9-45ac-aff9-3a2ea19ab7c2_origin.pdf filter=lfs diff=lfs merge=lfs -text
2084
+ data/2025/2503_00xxx/2503.00152/fb5270c8-3d16-4bcc-b6f9-f76b11f7b7bd_origin.pdf filter=lfs diff=lfs merge=lfs -text
2085
+ data/2025/2503_00xxx/2503.00172/be5e5d11-6a84-4bad-9d30-8c15542b0b52_origin.pdf filter=lfs diff=lfs merge=lfs -text
2086
+ data/2025/2503_00xxx/2503.00177/86454116-a8fd-4838-9548-f293716db88e_origin.pdf filter=lfs diff=lfs merge=lfs -text
2087
+ data/2025/2503_00xxx/2503.00200/1ec244f2-4a1f-45a8-9141-4ad9d2798a71_origin.pdf filter=lfs diff=lfs merge=lfs -text
2088
+ data/2025/2503_00xxx/2503.00205/56f041ca-396c-42ea-acbd-51d5c42ab36d_origin.pdf filter=lfs diff=lfs merge=lfs -text
2089
+ data/2025/2503_00xxx/2503.00223/76ff2fcb-50a7-4535-b7d0-3adc3064eaf7_origin.pdf filter=lfs diff=lfs merge=lfs -text
2090
+ data/2025/2503_00xxx/2503.00237/2ccd05b0-c9c0-4808-82b4-df729b1be984_origin.pdf filter=lfs diff=lfs merge=lfs -text
2091
+ data/2025/2503_00xxx/2503.00307/42304127-463c-4858-8e87-4046431deb69_origin.pdf filter=lfs diff=lfs merge=lfs -text
2092
+ data/2025/2503_00xxx/2503.00357/1eccf6f9-6f38-4e4d-94aa-084f3abc6247_origin.pdf filter=lfs diff=lfs merge=lfs -text
2093
+ data/2025/2503_00xxx/2503.00361/ead4bddf-9080-43fa-a90e-2878f183978a_origin.pdf filter=lfs diff=lfs merge=lfs -text
2094
+ data/2025/2503_00xxx/2503.00370/ae74324b-e635-4a48-8e60-695e532b5e76_origin.pdf filter=lfs diff=lfs merge=lfs -text
2095
+ data/2025/2503_00xxx/2503.00375/95ff5447-1dae-4c9d-89b9-c86089c4916f_origin.pdf filter=lfs diff=lfs merge=lfs -text
2096
+ data/2025/2503_00xxx/2503.00453/f09e4425-9e56-4bec-a943-ec2aa49b0742_origin.pdf filter=lfs diff=lfs merge=lfs -text
2097
+ data/2025/2503_00xxx/2503.00467/6ca4e334-e87f-4810-a08c-b9f9da8d3207_origin.pdf filter=lfs diff=lfs merge=lfs -text
2098
+ data/2025/2503_00xxx/2503.00493/62223415-7c80-40c4-9eea-91fb000589d8_origin.pdf filter=lfs diff=lfs merge=lfs -text
2099
+ data/2025/2503_00xxx/2503.00513/c4243724-fed4-4298-972d-8bf318331006_origin.pdf filter=lfs diff=lfs merge=lfs -text
2100
+ data/2025/2503_00xxx/2503.00516/f45274cb-094a-4e93-a625-35cb9f08c61e_origin.pdf filter=lfs diff=lfs merge=lfs -text
2101
+ data/2025/2503_00xxx/2503.00535/72d0d94a-c3fc-437f-8368-d1c525f704c6_origin.pdf filter=lfs diff=lfs merge=lfs -text
2102
+ data/2025/2503_00xxx/2503.00540/8bf1147c-d9fb-4a03-9074-7687d754eac4_origin.pdf filter=lfs diff=lfs merge=lfs -text
2103
+ data/2025/2503_00xxx/2503.00555/6dc7e406-7f61-46d7-867a-994bde578c3a_origin.pdf filter=lfs diff=lfs merge=lfs -text
2104
+ data/2025/2503_00xxx/2503.00564/d987acde-6865-4390-b31e-1bcde498ff73_origin.pdf filter=lfs diff=lfs merge=lfs -text
2105
+ data/2025/2503_00xxx/2503.00580/290f9c32-bc1a-4169-8631-2572c03123b8_origin.pdf filter=lfs diff=lfs merge=lfs -text
2106
+ data/2025/2503_00xxx/2503.00596/135a3200-e17d-4956-995b-0492840ac17e_origin.pdf filter=lfs diff=lfs merge=lfs -text
2107
+ data/2025/2503_00xxx/2503.00670/6205f049-dfc5-4a92-bc56-6b7d180d01b4_origin.pdf filter=lfs diff=lfs merge=lfs -text
2108
+ data/2025/2503_00xxx/2503.00686/394d6a83-ff48-46cf-a497-a74daca08902_origin.pdf filter=lfs diff=lfs merge=lfs -text
2109
+ data/2025/2503_00xxx/2503.00710/1687337c-f03f-474e-a782-445a3fc5550a_origin.pdf filter=lfs diff=lfs merge=lfs -text
2110
+ data/2025/2503_00xxx/2503.00724/6631d3ee-8e48-4f30-a0e3-d7c2651c2595_origin.pdf filter=lfs diff=lfs merge=lfs -text
2111
+ data/2025/2503_00xxx/2503.00753/d76f4048-8b65-4ac7-bf85-eb615815ac05_origin.pdf filter=lfs diff=lfs merge=lfs -text
2112
+ data/2025/2503_00xxx/2503.00778/17ec0d38-c609-4f31-8892-80c46c2bb66e_origin.pdf filter=lfs diff=lfs merge=lfs -text
2113
+ data/2025/2503_00xxx/2503.00779/f54dded9-2e16-4018-9d35-c140b8329d93_origin.pdf filter=lfs diff=lfs merge=lfs -text
2114
+ data/2025/2503_00xxx/2503.00865/18748a69-7206-434f-8263-b4951d298621_origin.pdf filter=lfs diff=lfs merge=lfs -text
2115
+ data/2025/2503_00xxx/2503.00877/d787203d-8567-46af-9f39-03e19679b759_origin.pdf filter=lfs diff=lfs merge=lfs -text
2116
+ data/2025/2503_01xxx/2503.01917/841391db-7d4e-47cc-bd65-986f39fbf69b_origin.pdf filter=lfs diff=lfs merge=lfs -text
2117
+ data/2025/2503_05xxx/2503.05788/351cd0e9-a8b4-4d6d-abe8-bdb5ca999a4b_origin.pdf filter=lfs diff=lfs merge=lfs -text
data/2025/2502_20xxx/2502.20390/a85c762a-48b0-4e62-ba82-8973c3e103a2_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20390/a85c762a-48b0-4e62-ba82-8973c3e103a2_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20390/a85c762a-48b0-4e62-ba82-8973c3e103a2_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89e72da7d3123e6b034b32568fc3e6a9fc1b98748ff209b539d56ee1c40adf95
3
+ size 18376321
data/2025/2502_20xxx/2502.20390/full.md ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20390/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e10f386dc9e7fc03bbc924fcec5c60f3b1e2e258d6e12dadfd835ba822f70c7
3
+ size 735721
data/2025/2502_20xxx/2502.20390/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20391/2a12bbc3-eb4e-4244-abd5-10dfb76027ff_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20391/2a12bbc3-eb4e-4244-abd5-10dfb76027ff_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20391/2a12bbc3-eb4e-4244-abd5-10dfb76027ff_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7db7b2f6d19e4f19a2cca2cc284990f1ba1527318257d9821465a5d4f99496f6
3
+ size 27444392
data/2025/2502_20xxx/2502.20391/full.md ADDED
@@ -0,0 +1,603 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Point Policy: Unifying Observations and Actions with Key Points for Robot Manipulation
2
+
3
+ Siddhant Haldar Kerrel Pinto
4
+
5
+ New York University
6
+
7
+ point-policy.github.io
8
+
9
+ ![](images/26a61ffaa540014f109230b6e93311b71f89d5783a7e64ffba6f1c53eae4a4ac.jpg)
10
+ (a) Spatial Generalization
11
+
12
+ ![](images/b067a67524187594ff18921f7f9441b148beeb10283979eb63358388ea1d9f7c.jpg)
13
+ Fig. 1: We present Point Policy, a framework that unifies robot observations and actions with key points and enables learning robot policies exclusively from human videos. Point Policy enables learning policies with improved generalization capabilities, including spatial generalization (i.e. generalization to new locations), generalization to novel object instances, and robustness to background distractors.
14
+
15
+ ![](images/14cd8dbca4f7c2f3aff87fc8a9fba82574ab0f06890b0931e5a01339ebd82030.jpg)
16
+ (b) Novel Object Instances
17
+
18
+ ![](images/f9e8fdebfe8eaa46048e0727ae65b503bd7c2e43b12eab35af54b2e00c47eebe.jpg)
19
+ (c) Robustness to Background Clutter
20
+
21
+ Abstract—Building robotic agents capable of operating across diverse environments and object types remains a significant challenge, often requiring extensive data collection. This is particularly restrictive in robotics, where each data point must be physically executed in the real world. Consequently, there is a critical need for alternative data sources for robotics and frameworks that enable learning from such data. In this work, we present Point Policy, a new method for learning robot policies exclusively from offline human demonstration videos and without any teleoperation data. Point Policy leverages state-of-the-art vision models and policy architectures to translate human hand poses into robot poses while capturing object states through semantically meaningful key points. This approach yields a morphology-agnostic representation that facilitates effective policy learning. Our experiments on 8 real-world tasks demonstrate an overall $75\%$ absolute improvement over prior works when evaluated in identical settings as training. Further, Point Policy exhibits a $74\%$ gain across tasks for novel object instances and
22
+
23
+ Correspondence to: siddhanthaldar@nyu.edu
24
+
25
+ is robust to significant background clutter. Videos of the robot are best viewed at point-policy.github.io.
26
+
27
+ # I. INTRODUCTION
28
+
29
+ Recent years have witnessed remarkable advancements in computer vision (CV) and natural language processing (NLP), resulting in models capable of complex reasoning [2, 66, 76], generating photorealistic images [7, 69] and videos [48], and even writing code [15]. A driving force behind these breakthroughs has been the abundance of data scraped from the internet. In contrast, robotics has yet to experience a similar revolution, with most robots still confined to controlled or structured environments. While CV and NLP can readily take advantage of large-scale datasets from the internet, robotics is inherently interactive and requires physical engagement with the world for data acquisition. This makes collecting robot data significantly more challenging, both in terms of time and
30
+
31
+ financial resources.
32
+
33
+ A prominent approach for training robot policies has been the collection of extensive datasets, often through contracted teleoperators [53, 12, 71], followed by training deep networks on these datasets [71, 19, 60, 41]. While effective, these methods tend to require months or even years of human effort [12, 41] and still result in datasets orders of magnitude smaller than those used in CV and NLP [60, 41]. A potential solution to this data scarcity in robotics is to tap into the vast repository of human videos available online, showcasing individuals performing a wide range of tasks in diverse scenarios.
34
+
35
+ The primary challenge in learning robot policies from human videos lies in addressing the morphology gap between robots and the human body [4, 25, 10, 9, 67]. Two notable trends have emerged in efforts to utilize human data for learning robot policies: (1) first learning visual representations or coarse policies from human datasets and then finetuning them for downstream learning on robot datasets [10, 9, 67, 57, 11, 79, 51, 52, 38], and (2) using human videos to compute rewards for autonomous policy learning through reinforcement learning [81, 4, 25, 43]. While the former requires a substantial amount of robot demonstrations to learn policies for downstream tasks, the latter often requires large amounts of online robot interactions in the real world, which can be time-consuming and potentially unsafe.
36
+
37
+ In this work, we introduce Point Policy, a new technique to learn robot policies solely from offline human data without requiring robot interactions during training. Our key observation in building Point Policy is that both humans and robots occupy the same 3D space in the world, which can be tied together using key points derived from state-of-the-art vision models.
38
+
39
+ Concretely, Point Policy works in three steps. First, given a dataset of human videos, a motion track of key points on the human hand and the object is computed using hand pose detectors [50, 63] and minimal human annotation of one frame per task. These key points are computed from two camera views, which allows for projection in 3D using point triangulation. Second, a transformer-based policy [28] is trained to predict future robot points given the set of key points derived in the previous stage. Third, during inference, the predicted future robot points in 3D space are used to backtrack the 6 DOF pose of the robot's end-effector using constraints from rigid-body geometry. The gripper state of the robot end effector is predicted as an additional token. The predicted end-effector pose and gripper state are then executed on the robot at $6\mathrm{Hz}$ .
40
+
41
+ We demonstrate the effectiveness of Point Policy through experiments on 8 real-world tasks on a Franka robot. Our main findings are summarized below:
42
+
43
+ 1) Point Policy exhibits an absolute improvement of $75\%$ over prior state-of-the-art policy learning algorithms across 8 real world tasks when evaluated in identical settings as training. (Section V-E).
44
+ 2) Point Policy generalizes to novel object instances, exhibited a $74\%$ absolute improvement over prior work on a held-out set of objects unseen in the training data. (Section V-F).
45
+ 3) Policies trained with Point Policy are robust to the presence of background distractors, performing at par with scenes
46
+
47
+ without clutter (Section V-G).
48
+
49
+ 4) We provide an analysis of co-training Point Policy with tele-operated robot data (Section V-H) and study the importance of several design choices in Point Policy (Section V-I).
50
+
51
+ All of our datasets, and training and evaluation code have been made publicly available. Videos of our trained policies can be seen here: point-policy.github.io.
52
+
53
+ # II. RELATED WORKS
54
+
55
+ # A. Imitation Learning
56
+
57
+ Imitation Learning (IL) [33] refers to training policies with expert demonstrations, without requiring a predefined reward function. In the context of reinforcement learning (RL), this is often referred to as inverse RL [58, 1], where the reward function is derived from the demonstrations and used to train a policy [46, 26, 27, 30, 56]. While these methods reduce the need for extensive human demonstrations, they still suffer from significant sample inefficiency. As a result of this inefficiency in deploying RL policies in the real world, behavior cloning (BC) [65, 75, 70, 68] has become increasingly popular in robotics. Recent advances in BC have demonstrated success in learning policies for both long-horizon tasks [13, 54, 73] and multi-task scenarios [28, 8, 61, 10, 9]. However, most of these approaches rely on image-based representations [82, 28, 14, 8, 61, 35], which limits their ability to generalize to new objects and function effectively outside of controlled lab environments. In this work, we propose Point Policy, which attempts to address this reliance on image representations by directly using key points as an input to the policy instead of raw images. Through extensive experiments, we observe that such an abstraction helps learn robust policies that generalize across varying scenarios.
58
+
59
+ # B. Object-centric Representation Learning
60
+
61
+ Object-centric representation learning aims to create structured representations for individual components within a scene, rather than treating the scene as a whole. Common techniques in this area include segmenting scenes into bounding boxes [16, 54, 18, 20, 87] and estimating object poses [77, 78]. While bounding boxes show promise, they share similar limitations with non object-centric image-based models, such as overfitting to specific object instances. Pose estimation, although less prone to overfitting, requires separate models for each object in a task. Another popular method involves using point clouds [86, 5], but their high dimensionality necessitates specialized models, making it difficult to accurately capture spatial relationships. Lately, several works have resorted to adopting key points [45, 36, 32, 10, 9, 67, 21, 6] for policy learning due to their generalization ability. Further, key points also allow the direct injection of human priors into the policy learning pipeline [10, 9, 67] as opposed to learning representations from human videos followed by downstream learning on robot teleoperated data [57, 11, 79, 51, 52, 38]. In this work, we leverage key points as a unified observation and action space to enable learning generalizable policies exclusively from human videos.
62
+
63
+ ![](images/92b4bac61532e6073de2f00446cb51f78a85203426c67ad95c0b275db82bc1e2.jpg)
64
+ Fig. 2: Overview of the Point Policy framework. (a) Point Policy leverages state-of-the-art vision models and policy architectures to translate human hand poses into robot poses while capturing object states through sparse single-frame human annotations. (b) The derived key points are fed into a transformer policy to predict the 3D future point tracks from which the robot actions are computed through rigid-body geometry constraints. (c) Finally, the computed action is executed on the robot using end-effector position control at a 6Hz frequency.
65
+
66
+ # C. Human-to-Robot Transfer for Policy Learning
67
+
68
+ There have been several attempts at learning robot policies from human videos. Some works first learn visual representations from large-scale human video datasets and learn a downstream policy on these representations using limited amounts of robot data [57, 11, 79, 51, 52, 38]. Another line of work learns coarse policies from human videos, using key points [10] and generative modeling [9], which are then improved using downstream learning on robot data. Recently proposed MT- $\pi$ [67] alleviates the need for downstream learning by co-training a key point policy with human and robot data. A caveat in all these works is that despite having access to abundant human demonstrations, there is a need to collect robot data to achieve a highly performant policy. A recently emerging line of work [62] attempts to do away with this need for robot data by doing in-context learning with state-of-the-art vision-language models (VLMs) [66, 2, 76]. However, owing to the large compute times of VLMs, these policies are required to be deployed open-loop and hence, are not reactive to changes in the scene. In this work, we propose Point Policy, a new framework that learns generalizable policies from human videos, does not require robot demonstrations or online robot interactions, and can be executed in a closed-loop fashion.
69
+
70
+ # III. BACKGROUND
71
+
72
+ # A. Imitation learning
73
+
74
+ The goal of imitation learning is to learn a behavior policy $\pi^b$ given access to either the expert policy $\pi^e$ or trajectories derived from the expert policy $\tau^e$ . This work operates in the setting where the agent only has access to observation-based trajectories, i.e. $\tau^e \equiv \{(o_t, a_t)^T_{t=0}\}_{n=0}^N$ . Here $N$ and $T$ denote the number of demonstrations and episode timesteps respectively. We choose this specific setting since obtaining observations and actions from expert or near-expert demonstrators is feasible in real-world settings [83, 34] and falls in line with recent work in this area [28, 44, 83, 14].
75
+
76
+ # B. Behavior Cloning
77
+
78
+ Behavior Cloning (BC) [64, 72] corresponds to solving the maximum likelihood problem shown in Eq. 1. Here $\mathcal{T}^e$ refers to expert demonstrations. When parameterized by a normal distribution with fixed variance, the objective can be framed as a regression problem where, given observations $o^e$ , $\pi^{BC}$ needs to output $a^e$ .
79
+
80
+ $$
81
+ \mathcal {L} ^ {B C} = \mathbb {E} _ {\left(o ^ {e}, a ^ {e}\right) \sim \mathcal {T} ^ {e}} \| a ^ {e} - \pi^ {B C} \left(o ^ {e}\right) \| ^ {2} \tag {1}
82
+ $$
83
+
84
+ After training, it enables $\pi^{BC}$ to mimic the actions corresponding to the observations seen in the demonstrations.
85
+
86
+ # C. Semantic Correspondence and Point Tracking
87
+
88
+ Semantic correspondence and point tracking are fundamental problems in computer vision. Semantic correspondence matches semantically equivalent points between images of different scenes, while point tracking follows reference points across video frames. We leverage these ideas using two state-of-the-art models: DIFT [74] and Co-Tracker [37]. DIFT establishes correspondences between reference and observed images, as illustrated in Figure 3, while Co-Tracker tracks initialized key points throughout the video trajectory (Figure 2). This integration enables robust identification and tracking of semantically meaningful points across diverse visual scenarios, forming a key component Point Policy. We have included a more detailed explanation in Appendix A.
89
+
90
+ # IV. POINT POLICY
91
+
92
+ Point Policy seeks to learn generalizable policies exclusively from human videos that are robust to significant environmental perturbations and applicable to diverse object locations and types. An overview of our method is presented in Figure 2. Before diving into the details, we first present some of the key assumptions needed to run Point Policy.
93
+
94
+ Assumptions: (1) The pose of the human hand in the first frame is known for each task. This is needed to initialize the robot and set that pose as the base frame of operation. This assumption can be relaxed with a hand-pose estimator [63], which we do not investigate in this work. (2) We operate in a calibrated scene with the camera's intrinsic and extrinsic matrices, and the transforms between each camera and the robot base known. In practice this is a one-time process that takes under 5 minutes when the robot system is first installed.
95
+
96
+ # A. Point-based Scene Representation
97
+
98
+ Our method begins by collecting human demonstrations, which are then converted to a point-based representation amenable to policy learning.
99
+
100
+ 1) Human-to-Robot Pose Transfer: For each time step $t$ of a human video, we first extract image key points on the human hand $p_h^t$ using the MediaPipe [50] hand pose detector, focusing specifically on the index finger and thumb. The corresponding hand key points $p_h^t$ obtained from two camera views are used to compute the 3D world coordinates $\mathcal{P}_h^t$ of the human hand through point triangulation. We use point triangulation for 3D projection due to its higher accuracy as compared to sensor depth from the camera (Section V-I). The robot position $\mathcal{R}_{pos}^t$ is computed as the midpoint between the tips of the index finger and thumb in $\mathcal{P}_h^t$ . The robot orientation $\mathcal{R}_{ori}^t$ is computed as
101
+
102
+ $$
103
+ \Delta \mathcal {R} _ {\text {o r i}} ^ {t} = \mathcal {T} \left(\mathcal {P} _ {h} ^ {0}, \mathcal {P} _ {h} ^ {t}\right) \tag {2}
104
+ $$
105
+
106
+ $$
107
+ \mathcal {R} _ {o r i} ^ {t} = \Delta \mathcal {R} _ {o r i} ^ {t} \cdot \mathcal {R} _ {o r i} ^ {0}
108
+ $$
109
+
110
+ where $\mathcal{T}$ computes the rigid transform between hand key points on the first frame of the video, $\mathcal{P}_h^0$ , and $\mathcal{P}_h^t$ . The robot end effector pose is then represented at $T_r^t \gets \{\mathcal{R}_{pos}^t, \mathcal{R}_{ori}^t\}$ . The robot's gripper state $\mathcal{R}_g$ is computed using the distance between the tip of the index finger and thumb. The gripper is
111
+
112
+ considered closed when the distance is less than $7\mathrm{cm}$ , otherwise open. Finally, given the robot pose $T_r^t$ , we define a set of $N$ rigid transformations $T$ about the computed robot pose and compute robot key points $\mathcal{P}_r^t$ such that
113
+
114
+ $$
115
+ \left(\mathcal {P} _ {r} ^ {t}\right) ^ {i} = T _ {r} ^ {t} \cdot T ^ {i}, \quad \forall i \in \{1, \dots , N \} \tag {3}
116
+ $$
117
+
118
+ This process has been demonstrated in Figure 2. This approach effectively bridges the morphological gap between human hands and robot manipulators, enabling accurate transfer of demonstrated actions to a robotic framework.
119
+
120
+ 2) Environment state through point priors: To obtain key points on task-relevant objects in the scene, we adopt the method proposed by P3PO [45]. Initially, a user randomly selects one demonstration from a dataset of human videos and annotates semantically meaningful object points on the first frame that are pertinent to the task being performed. This annotation process is quick, taking only a few seconds. The user-annotated points serve as priors for subsequent data generation. Using an off-the-shelf semantic correspondence model, DIFT [74], we transfer the annotated points from the first frame to the corresponding locations in the first frames of all other demonstrations within the dataset. This approach allows us to initialize key points throughout the data set with minimal additional human effort.
121
+
122
+ For each demonstration, we then employ Co-Tracker [37], an off-the-shelf point tracker, to automatically track these initialized key points throughout the entire trajectory. By leveraging existing vision models for correspondence and tracking, we efficiently compute object key points for every frame in the dataset while requiring user input for only a single frame. This process, illustrated in Figure 3, capitalizes on large-scale pre-training of vision models to generalize across new object instances and scenes without necessitating further training. We prefer point tracking over correspondence at each frame due to its faster inference speed and its capability to handle occlusions by continuing to track points. The corresponding object points from two camera views are lifted to 3D world coordinates using point triangulation to obtain the 3D object key points $\mathcal{P}_o$ . During inference, DIFT is employed to identify corresponding object key points on the first frame, followed by Co-Tracker tracking these points during execution.
123
+
124
+ It is important to note that Point Policy utilizes multiple camera views only for point triangulation, with the policy being learned on 3D key points grounded in the robot's base frame. More details on point triangulation can be found in Appendix B1.
125
+
126
+ # B. Policy Learning
127
+
128
+ For policy learning, we use BAKU [28]. Instead of providing raw images as input, we provide the robot points $\mathcal{P}_r$ and object points $\mathcal{P}_o$ grounded in the robot's base frame as input to the policy. A history of observations for each key point is flattened into a single vector which is then encoded using a multilayer perceptron (MLP) encoder. The encoded representations are fed as separate tokens along with a gripper token into a
129
+
130
+ ![](images/8ab796ee16f28818a7595c0cd55c50459ae369df36d1901394c414a3591dbeb2.jpg)
131
+ Human Annotation
132
+
133
+ ![](images/b3063f7e9b29b2a5552319ce9e008f6d2a9a9a4f3a7c8418a6766da00d0be25c.jpg)
134
+
135
+ ![](images/5df5d7b60f0f5cbd187b99ffa459ef229df9a7228d92a63fccc56124202af144.jpg)
136
+ Corresponding points
137
+
138
+ ![](images/7af18a5b61569e7fdcfde3a6078722a7e224a623b531331a9a3a47120a0fd3df.jpg)
139
+ Different position
140
+
141
+ ![](images/0065b4f0baee24ad9df624008781cf4f79fca80cae7a1eabe6131e9a56798740.jpg)
142
+
143
+ ![](images/876f9add637495d1ba600f52142f2ff11c3217eceb53c19cd52d1c5856132ef8.jpg)
144
+ New object instance
145
+ Fig. 3: Results of the correspondence model when used for the put bottle on rack and sweep broom tasks. On the left is a frame with human annotations for the object points. On the right, we show that semantic correspondence can identify the same points across different positions, new object instances, and background clutter.
146
+
147
+ ![](images/8db2c5ecf14e50d4906f11b1bad29865d5de37d7c559327d4c919c954d97c86d.jpg)
148
+
149
+ ![](images/271d3b29cd86b29f2be178a90afdf3a424527a5f5b4ccc5cf82b3a80e7908444.jpg)
150
+ Background clutter
151
+
152
+ BAKU [28] transformer policy, which predicts the future tracks for each robot point $\hat{\mathcal{P}}_r$ and the robot gripper state $\hat{\mathcal{G}}_r$ using a deterministic action head. Mathematically, this can be represented as
153
+
154
+ $$
155
+ \begin{array}{l} \mathcal {O} ^ {t - H: t} = \left\{\mathcal {P} _ {r} ^ {t - H: t}, \mathcal {P} _ {o} ^ {t - H: t} \right\} \\ \hat {\mathcal {P}} _ {r} ^ {t + 1}, \mathcal {G} _ {r} ^ {t + 1} = \pi (\cdot | \mathcal {O} ^ {t - H: t}) \tag {4} \\ \end{array}
156
+ $$
157
+
158
+ where $H$ is the history length and $\pi$ is the learned policy. Following prior works in policy learning [83, 14], we use action chunking with exponential temporal averaging to ensure temporal smoothness of the predicted point tracks. The transformer is non-causal in this scenario and hence the training loss is only applied to the robot point tracks.
159
+
160
+ # C. Backtrack Robot Actions from Predicted Key Points
161
+
162
+ The predicted robot points $\hat{\mathcal{P}}_r$ are mapped back to the robot pose using constraints from rigid-body geometry. We first consider the key point corresponding to the robot's wrist $\hat{\mathcal{P}}_r^{wrist}$ as the robot position $\hat{\mathcal{R}}_{pos}$ . The robot orientation $\hat{\mathcal{R}}_{ori}$ is computed using Eq. 2 considering $\mathcal{R}_{ori}^0$ is fixed and known. Finally, the robot action $\mathcal{A}_r$ is defined as
163
+
164
+ $$
165
+ \hat {\mathcal {A}} _ {r} = \left(\hat {\mathcal {R}} _ {p o s}, \hat {\mathcal {R}} _ {o r i}, \hat {\mathcal {G}} _ {r}\right) \tag {5}
166
+ $$
167
+
168
+ Finally, the action $\hat{A}_r$ is executed on the robot using end-effector position control at a 6Hz frequency.
169
+
170
+ # V. EXPERIMENTS
171
+
172
+ Our experiments are designed to answer the following questions: (1) How well does Point Policy work for policy learning? (2) How well does Point Policy work for novel object instances? (3) Can Point Policy handle background distractors? (4) Can Point Policy be improved with robot demonstrations? (5) What design choices matter for human-to-robot learning?
173
+
174
+ # A. Experimental Setup
175
+
176
+ Our experiments utilize a Franka Research 3 robot equipped with a Franka Hand gripper, operating in a real-world environment. We use the Deoxys [87] real-time controller for controlling the robot. The policies utilize RGB and RGB-D images captured using Intel RealSense D435 cameras from two third-person camera views. The action space encompasses the robot's end effector pose and gripper state. We collect a total of 190 human demonstrations across 8 real-world tasks, featuring diverse object positions and types. Additionally, for studying the effect of co-training with robot data (Section V-H), we collect a total of 100 robot demonstrations for 4 tasks (Section V-H) using a VR-based teleoperation framework [34]. All demonstrations are recorded at a $20\mathrm{Hz}$ frequency and subsequently subsampled to approximately $6\mathrm{Hz}$ . For methods that directly predict robot actions, we employ absolute actions during training, with orientation represented using a 6D rotation representation [85]. This representation is chosen for its continuity and fast convergence properties. The learned policies are deployed at a $6\mathrm{Hz}$ frequency during execution.
177
+
178
+ # B. Task Descriptions
179
+
180
+ We experiment with manipulation tasks with significant variability in object position, type, and background context. Figure 5 depicts rollouts for all of our tasks. For each task, we collect data across various object sizes and appearances. During evaluations, we add novel object instances that are unseen during training. The variations in positions and object instances for selected tasks are depicted in Figure 4, with more examples provided in Appendix E1. We provide a brief description of each task below.
181
+
182
+ a) Close drawer: The robot arm is tasked with pushing close a drawer placed on the table. The position of the drawer varies for each evaluation. We collect 20 demonstrations for a single drawer and run evaluations on the same drawer.
183
+ b) Put bread on plate: The robot arm picks up a piece of bread from the table and places it on a plate. The positions of the bread and the plate are varied for each evaluation. We collect 30 demonstrations for the task of a single bread-plate pair. During evaluations, we introduce two new plates.
184
+ c) Fold towel: The robot arm picks up a towel placed on the table from a corner and folds it. The position of the towel varies for each evaluation. We collect 20 demonstrations for a single towel. During evaluations, we introduce two new towels.
185
+ d) Close oven: The robot arm is tasked with closing the door of an oven. The position of the oven varies for each evaluation. We collect 20 demonstrations for the task on a single oven and run evaluations on the same oven.
186
+ e) Sweep broom: The robot arm picks up a broom and sweeps the table. The position and orientation of the broom are varied across evaluations. We collect 20 demonstrations for a single broom. During evaluations, we introduce a new broom.
187
+ f) Put bottle on rack: The robot arm picks up a bottle from the table and places it on the lower level of a kitchen rack. The position of the bottle is varied for each evaluation. We collect 15 demonstrations for 2 different bottles, resulting
188
+
189
+ ![](images/05291b31ebd3882b28fa08b7b29d21cc94710f8ae09c46cd49eb81ab194764d9.jpg)
190
+ Spatial Generalization
191
+
192
+ ![](images/7a2b7772d76ccc695f684f43a20ad06df17c1c67d28ef752764945c1a5d2fe49.jpg)
193
+
194
+ ![](images/dfa2c89897bdbbaf8b9b3d84f5ddc6c21acc06e77931197040b5108ab21bb710.jpg)
195
+ Fold Towel
196
+ Put bottle on rack
197
+ Fig. 4: (left) Illustration of spatial variation used in our experiments. (right) Range of objects used in our experiments, where the objects on the left are in-domain objects while on the right are unseen objects used in our generalization experiments.
198
+
199
+ ![](images/2e7ac0f56adc4b8f9cebb4a11c4a8d2c618e507c9caaeb9e3968173239efd59f.jpg)
200
+ Sweep with broom
201
+ Put bowl in oven
202
+
203
+ ![](images/402eb517219e24fe3d851ee30ebe681d177f3e81bc74513f6fae67a4813cc178.jpg)
204
+ Generalization to Novel Object Instances
205
+ Fold Towel
206
+
207
+ ![](images/e5e10d8d592bd099d41b5f773fdf41874e8b5449c92ae9457a30c8c22dbefcba.jpg)
208
+
209
+ ![](images/4f1be04998a14852c06542b8c65d1970aed9dc0be6acc969acbb06af2c30baf1.jpg)
210
+ Put bottle on rack
211
+
212
+ ![](images/ffdea3f582500c0984b1cf8c2ac951f1f8b73d8b32a23e1b49eed1c7503449c6.jpg)
213
+ Sweep with broom
214
+ Put bowl in oven
215
+
216
+ in a total of 30 demonstrations for the task. During evaluations, we introduce three new bottles.
217
+
218
+ g) Put bowl in oven: The robot arm picks up a bowl from the table and places it inside an oven. The position of the bowl varies for each evaluation. We collect 20 demonstrations for the task with a single bowl. During evaluations, we introduce a new bowl.
219
+ h) Make bottle upright: The robot arm pick up a bottle from the table and places it in an upright position. The position of the bottle varies for each evaluation. We collect 15 demonstrations for 2 different bottles, resulting in a total of 30 demonstrations for the task. During evaluations, we introduce two new bottles.
220
+
221
+ # C. Baselines
222
+
223
+ We compare Point Policy with 4 baselines - behavior cloning (BC) [28] with RGB and RGB-D images, Motion Tracks [67], and $P3-PO$ [45]. We describe each method below.
224
+
225
+ a) Behavior Cloning (BC) [28]: This method performs behavior cloning (BC) using the BAKU policy learning architecture [28], which takes RGB images of the human hand as input and predicts the extracted robot actions as output.
226
+ b) Behavior Cloning (BC) with Depth: This is similar to BC but uses both RGB and depth images as input.
227
+ c) Motion Track Policy $(MT - \pi)$ [67]: Given an image of the scene and robot key points on the image, $MT - \pi$ predicts the future 2D robot point tracks to complete a task. This approach generates future 2D point tracks for robot points across multiple views, which are then triangulated to obtain 3D points on the robot. These 3D points are subsequently converted to the robot's absolute pose (similar to our proposed method) and treated as the robot's action. Implementation details for $MT - \pi$ have been provided in Appendix D.
228
+ d) $P3-PO$ [45]: This method utilizes image points representing both the robot and objects of interest, projecting them into 3D space using camera depth information. These
229
+
230
+ 3D points serve as input to a transformer policy [28], which predicts robot actions. P3PO's 3D point representations, akin to those in Point Policy, enable spatial generalization, adaptability to novel object instances, and robustness to background clutter.
231
+
232
+ # D. Considerations for policy learning
233
+
234
+ Point Policy and P3PO use a point-based representation obtained from $640 \times 480$ images. For correspondence, we use DIFT [74] using the first layer of the hundredth diffusion time step with an ensemble size of 8. Point tracking is performed using a modified version of Co-Tracker [37] that enables tracking one frame at a time, rather than chunks. Point Policy, MT- $\pi$ , and P3PO use a history of 10 point observations, while the image-based baselines do not use history [28]. BC (RGB), BC (RGB-D), and MT- $\pi$ are trained on images of size $256 \times 256$ . All methods predict an action chunk [83] of size 20 ( $\sim$ 3 seconds).
235
+
236
+ # E. How well does Point Policy work for policy learning?
237
+
238
+ We evaluate Point Policy in an in-domain setting, using the same objects seen during training. The evaluation consists of 10 trials per object for each task, resulting in a variable total number of trials per task. The results of this evaluation are summarized in Table I. Baselines that rely on RGB images as inputs (RGB, RGB-D, MT-π) perform poorly when trained exclusively on human hand videos. This is largely due to the significant visual differences between the human hand and the robot manipulator. While appearance-agnostic, P3-PO struggles due to noisy depth data from the camera. Point Policy achieves an average success rate of $88\%$ across all tasks, outperforming the strongest baseline MT-π by $75\%$ . Overall, these results demonstrate that Point Policy's ability to effectively address challenges related to visual differences and noisy depth data, achieving state-of-the-art performance in an in-domain setting.
239
+
240
+ TABLE I: Policy performance of Point Policy on in-domain object instances on 8 real-world tasks.
241
+
242
+ <table><tr><td>Method</td><td>Close drawer</td><td>Put bread on plate</td><td>Fold towel</td><td>Close oven</td><td>Sweep broom</td><td>Put bottle on rack</td><td>Put bowl in oven</td><td>Make bottle upright</td></tr><tr><td>BC [28]</td><td>0/10</td><td>0/20</td><td>0/10</td><td>0/10</td><td>0/10</td><td>0/30</td><td>1/10</td><td>0/20</td></tr><tr><td>BC w/ Depth</td><td>0/10</td><td>0/20</td><td>0/10</td><td>0/10</td><td>0/10</td><td>0/30</td><td>0/10</td><td>0/20</td></tr><tr><td>MT-π [67]</td><td>2/10</td><td>2/20</td><td>0/10</td><td>4/10</td><td>0/10</td><td>8/30</td><td>0/10</td><td>0/20</td></tr><tr><td>P3-PO [45]</td><td>0/10</td><td>0/20</td><td>0/10</td><td>0/10</td><td>0/10</td><td>0/30</td><td>0/10</td><td>0/20</td></tr><tr><td>Point Policy (Ours)</td><td>10/10</td><td>19/20</td><td>9/10</td><td>9/10</td><td>9/10</td><td>26/30</td><td>8/10</td><td>16/20</td></tr></table>
243
+
244
+ TABLE II: Policy performance of Point Policy on novel object instances on 6 real-world tasks.
245
+
246
+ <table><tr><td>Method</td><td>Put bread on plate</td><td>Fold towel</td><td>Sweep broom</td><td>Put bottle on rack</td><td>Put bowl in oven</td><td>Make bottle upright</td></tr><tr><td>BC [28]</td><td>0/20</td><td>0/20</td><td>0/10</td><td>0/30</td><td>0/10</td><td>0/20</td></tr><tr><td>BC w/ Depth</td><td>0/20</td><td>0/20</td><td>0/20</td><td>0/30</td><td>0/10</td><td>0/20</td></tr><tr><td>MT-π [67]</td><td>1/20</td><td>0/20</td><td>0/10</td><td>0/30</td><td>0/10</td><td>0/20</td></tr><tr><td>P3-PO [45]</td><td>0/20</td><td>0/20</td><td>0/10</td><td>0/30</td><td>0/10</td><td>0/20</td></tr><tr><td>Point Policy (Ours)</td><td>18/20</td><td>15/20</td><td>4/10</td><td>27/30</td><td>9/10</td><td>9/20</td></tr></table>
247
+
248
+ TABLE III: Policy performance of Point Policy with background distractors on both in-domain and novel object instances.
249
+
250
+ <table><tr><td rowspan="2">Background distractors</td><td colspan="2">Put bread on plate</td><td colspan="2">Sweep broom</td><td colspan="2">Put bottle on rack</td></tr><tr><td>In-domain</td><td>Novel object</td><td>In-domain</td><td>Novel object</td><td>In-domain</td><td>Novel object</td></tr><tr><td>×</td><td>19/20</td><td>18/20</td><td>9/10</td><td>4/10</td><td>26/30</td><td>27/30</td></tr><tr><td>✓</td><td>18/20</td><td>18/20</td><td>9/10</td><td>2/10</td><td>23/30</td><td>23/30</td></tr></table>
251
+
252
+ F. How well does Point Policy work for novel object instances?
253
+
254
+ Table II compares the performance of Point Policy when evaluated on new object instances unseen in the training data. We perform this comparison on a subset of our tasks. We observe that Point Policy achieves an average success rate of $74\%$ across all tasks, outperforming the strongest baseline by $73\%$ . Compared to P3PO[45], where each task is trained with a variety of object sizes, most of our tasks are trained on a single object instance. Despite this limited diversity in the training data, Point Policy demonstrates robust generalization capabilities. Figure 6 depicts rollouts of Point Policy for novel object instances. For a visual reference of the novel object instances used for each task, please refer to Appendix E1. These results affirm Point Policy's strong generalization capabilities, making it suitable for real-world applications where encountering unseen objects is common.
255
+
256
+ G. Can Point Policy handle background distractors?
257
+
258
+ We evaluate the robustness of Point Policy in the presence of background clutter, as shown in Table III. This study is conducted on three tasks - put bread on plate, sweep broom, and put bottle on rack. Trials are conducted using both indomain and novel object instances. Examples of the distractors used are illustrated in Figure 2, with Figure 6 depicting rollouts of Point Policy in the presence of background distractors. We observe that Point Policy is robust to background clutter, exhibiting either comparable performance or only minimal degradation in the presence of background distractors. This robustness can be attributed to Point Policy's use of point-based representations, which are decoupled from raw pixel values. By focusing on semantically meaningful points
259
+
260
+ TABLE IV: Policy performance of Point Policy with teleoperated robot data on in-domain object instaces.
261
+
262
+ <table><tr><td>Demonstrations</td><td>Put bread on plate</td><td>Fold towel</td><td>Sweep broom</td><td>Make bottle upright</td></tr><tr><td>Human</td><td>19/20</td><td>9/10</td><td>9/10</td><td>16/20</td></tr><tr><td>Robot</td><td>18/20</td><td>9/10</td><td>4/10</td><td>12/20</td></tr><tr><td>Human + Robot</td><td>20/20</td><td>9/10</td><td>8/10</td><td>8/20</td></tr></table>
263
+
264
+ rather than image-level features, Point Policy enables policies that are resilient to environmental perturbations.
265
+
266
+ H. Can Point Policy be improved with robot demonstrations?
267
+
268
+ Table IV investigates whether Point Policy's performance can be enhanced through co-training with teleoperated robot data, collected using a VR-based teleoperation framework [34]. We conduct this study on four tasks - put bread on plate, fold towel, sweep broom, and make bottle upright. For each task, we collect an equal number of robot demonstrations as human demonstrations, resulting in 30, 20, 20, and 30 demonstrations respectively. Interestingly, our findings reveal that for tasks involving complex motions, such as sweep broom and make bottle upright, policies trained solely on robot data perform poorly with the same amount of data as compared to those trained exclusively on human data. This drop in performance stems from the complex motions in these tasks making it harder to collect robot data using VR teleoperation, resulting in noisy demos. These results highlight an important consideration: humans and robots may execute the same task in different ways. Consequently, co-training with both human and robot data requires the development of algorithms capable of dealing with these differences effectively.
269
+
270
+ ![](images/49ce5220a5e70a31efbedad5de499dbf121071f546808f498644412b5b81ba15.jpg)
271
+
272
+ ![](images/5c35be069d302f0a9f6fc73495b3cc0d4d3a1a0ff10d8087ca5e4d4f2a08cafc.jpg)
273
+ Fold towel
274
+
275
+ ![](images/2700dd855a66c3439738e294d1dc08cf431b3f361e4122f51441484b60fe500c.jpg)
276
+ Sweep with broom
277
+
278
+ ![](images/be0c7093b0d81e3b1858f57b5fb975f84a17b3877d54d7e80422fc452fb01fc6.jpg)
279
+ Put bowl in oven
280
+ Fig. 5: Real-world rollouts showing Point Policy's ability on in-domain objects across 8 real-world tasks.
281
+
282
+ ![](images/7840600d48ea0054586058d915e06f4c2aa475a35ffbe078590b2a8c34d43e8f.jpg)
283
+ Put bread on a plate
284
+
285
+ ![](images/02c38d94859e671215a2cc8ba853ffa4f9051701ebcd636ee4775db9ecabaf3d.jpg)
286
+ Close oven
287
+
288
+ ![](images/2e01cbbb2c68468ffdfd4e7c2712fdfdfa1243c383d9cc2f934a1074d1a17e92.jpg)
289
+ Put bottle on rack
290
+
291
+ ![](images/7d340582db63eeb3fed3ebb02c0c23c2b955fc294ec7a91019f76e27da84c81e.jpg)
292
+ Make bottle upright
293
+
294
+ TABLE V: The effect of triangulated depth on P3PO and Point Policy.
295
+
296
+ <table><tr><td>Method</td><td>Put bread on plate</td><td>Sweep broom</td><td>Put bottle on rack</td></tr><tr><td>P3PO</td><td>0/20</td><td>0/10</td><td>0/30</td></tr><tr><td>P3PO + Triangulated Depth</td><td>17/20</td><td>4/10</td><td>23/30</td></tr><tr><td>Point Policy</td><td>19/20</td><td>9/10</td><td>26/30</td></tr><tr><td>Point Policy - Triangulated Depth</td><td>0/20</td><td>0/10</td><td>0/30</td></tr></table>
297
+
298
+ # I. What design choices matter for human-to-robot learning?
299
+
300
+ This section examines the impact of key design decisions on learning from human videos.
301
+
302
+ a) Depth Sensing: In Point Policy, we utilize point triangulation from two camera views to obtain 3D key points, rather than relying on depth maps from the camera. We hypothesize that noisy camera depth leads to imprecise 3D key points, resulting in unreliable actions. Table V tests this hypothesis on 4 real-world tasks by comparing the performance of P3PO and Point Policy with and without triangulated depth. We observe that adding triangulated depth to P3PO improves its performance from $0\%$ to $72\%$ . Further, removing triangulated depth from Point Policy reduces its performance from $90\%$ to $0\%$ . These results emphasize the importance of obtaining accurate 3D key points from human hands when learning robot policies from human videos. Appendix E2 includes an illustration of imprecise actions resulting from noisy sensor depth.
303
+ b) Significance of Object Points: While Point Policy uses robot and object key points as input to the policy, MT-π [67],
304
+
305
+ TABLE VI: Importance of object point inputs for policy learning.
306
+
307
+ <table><tr><td>Method</td><td>Close drawer</td><td>Put bread on plate</td><td>Fold towel</td><td>Make bottle upright</td></tr><tr><td>MT-π</td><td>2/10</td><td>2/20</td><td>0/10</td><td>0/20</td></tr><tr><td>MT-π + object points</td><td>8/10</td><td>1/20</td><td>6/10</td><td>2/20</td></tr><tr><td>Point Policy</td><td>10/10</td><td>19/20</td><td>9/10</td><td>16/20</td></tr></table>
308
+
309
+ the best-performing baseline in Table I, only uses robot key points and obtains information about the rest of the scene through an input image. We hypothesize that using object points can improve policy learning performance, especially when there is a morphology gap between data collection and inference. Table VI tests this hypothesis by providing object points in addition to the robot points already passed as input into $\mathrm{MT - }\pi$ . We observe that adding object points improves the performance of $\mathrm{MT - }\pi$ on select tasks(comprehensive results on all tasks included in Appendix E3), suggesting that including object points in the input offers a potential advantage. Nevertheless, Point Policy outperforms both methods by $68\%$ across all tasks, emphasizing the efficacy of predicting 3D key points rather than 2D key points in image space.
310
+
311
+ # VI. CONCLUSION AND LIMITATIONS
312
+
313
+ In this work, we presented Point Policy, a framework that enables learning robot policies exclusively from human videos, does not require real-world online interactions, and exhibits
314
+
315
+ ![](images/0797d011655e1af2f4bbc0fe2653f634c0119111d4d2fdb2b56045f77b0f569c.jpg)
316
+ Human
317
+
318
+ ![](images/bd43dab5f4a2988e7d17934886d05447979339bf4f6cc1418eb3982a80bc6ed0.jpg)
319
+
320
+ ![](images/ec766767a021aeb17584330c6a6c32027a92429f74f95088fdbcc594755d53d4.jpg)
321
+
322
+ ![](images/56ad471d841e51f25d6b715b8f8ac9bdfb7c9dea833c67d0fbc98ad6afc23127.jpg)
323
+ Robot execution
324
+
325
+ ![](images/7c13dcf5d1486f8f6c6b456a3428aa3552d0e2f5191e8a5eb75e13e37229053e.jpg)
326
+
327
+ ![](images/250984e9fbe228e8ce9ba84dfc203acfab459245b2bb09ce75e58947bfcc3736.jpg)
328
+ Put bowl in oven.
329
+
330
+ ![](images/7f12b575bafaacf20fe8694481e949f37e8438439cac06fd1b3fa40fc48b7fbe.jpg)
331
+ Fold towel
332
+
333
+ ![](images/d140910c630050ff16a453f04076f14c64a45298d3b1a484602fce212669f989.jpg)
334
+
335
+ ![](images/4f914628c2c452207ce560adb4fd6df74ea0ebd2d0ac455830a17d2d8ea573f6.jpg)
336
+ Fig. 6: Real-world rollouts showing that Point Policy generalizes to novel object instances and is robust to background distractors.
337
+
338
+ ![](images/8fba1251e0d60e4297961acbc0eeb69f78d8b219bbe1c5404acddbf68a0c0d0f.jpg)
339
+ Make bottle upright
340
+
341
+ ![](images/f15b779ccd91025ebd10bff9603e11edcd829f66cc3708b56c4b36f2e871eed3.jpg)
342
+
343
+ ![](images/07e415559d7d8aacf4f7ad06f179ba876fddd5f7a46092b00d63ad75586d8cd6.jpg)
344
+
345
+ ![](images/51bf0230b9ee789ad06f564523b2682dc2cf11592efb0f66edac167a2692f9ab.jpg)
346
+ Human
347
+
348
+ ![](images/f761b3f00f1cc977677031b870d278b074cad481af3759bbe7be4132f419f007.jpg)
349
+
350
+ ![](images/f53bddf39f20355edfa9223f5a63d4785d51d566bb3d24edc04c91f23318c832.jpg)
351
+ Robot execution
352
+
353
+ ![](images/37cea786bb1c77f031bed8982574f582d7585df29fff9e58d8881ecc01e4e272.jpg)
354
+
355
+ ![](images/e60fbb3e4451ce02eec579d167db29878b0b021753694cfb14ea39fbef06939b.jpg)
356
+
357
+ ![](images/5dc9097abe8edcb53aa98ead906476799940524789907f6b40f8de263b368703.jpg)
358
+ Put bread on plate
359
+ Sweep with broom
360
+
361
+ ![](images/5e6f311192c9bedd3e0276f26b6344b86228d56c81815cc6cec4485dc8347431.jpg)
362
+
363
+ ![](images/3ad9abd8a5411ef2dca58e4b2f9d029644e6d244e09eaf34d360e854ef5d423d.jpg)
364
+
365
+ ![](images/4a3ccb356c6805b1b762362d2cada1b1422a19fe2950379a71d0500415322fb9.jpg)
366
+
367
+ ![](images/122f9f3a7d7172fa0ba24cca9769b3d28baae36fcdc45a8a5dc2fd1b42a6aa4c.jpg)
368
+ Put bottle on rack
369
+
370
+ ![](images/8ca65170c9ce7949c56dceb2e82654b86c0029a8468c005448ddb71949d475b9.jpg)
371
+
372
+ ![](images/13d614047423bc9eff7fde41b4176a257ebe772fa64b5eaf755f018cdd35fa34.jpg)
373
+
374
+ generalization to spatial variations, new object instances, and robustness to background clutter.
375
+
376
+ Limitations: We recognize a few limitations in this work: (1) Point Policy's reliance on existing vision models makes it susceptible to their failures. For instance, failures in hand pose detection or point tracking under occlusion have a detrimental effect on performance. However, with continued advances in computer vision, we believe that frameworks such as Point Policy will become stronger over time. (2) Point-based abstractions enhance generalization capabilities, but sacrifice valuable scene context information, which is crucial for navigating through cluttered or obstacle-rich environments. Future research focusing on developing algorithms that preserve sparse contextual cues in addition to the point abstractions in Point Policy might help address this. (3) While all our experiments are from a fixed third-person camera view, a large portion of human task videos on the internet are from an egocentric view [23, 49]. Extending Point Policy to egocentric camera views can help us utilize these vast repositories of human videos readily available on the internet.
377
+
378
+ # VII. ACKNOWLEDGMENTS
379
+
380
+ We would like to thank Enes Erciyes, Raunaq Bhirangi, and Venkatesh Pattabiraman for help with setting up the Franka robot and Nur Muhammad Shafiullah, Raunaq Bhirangi, Gaoyue Zhou, Lisa Kondrich, and Ajay Mandlekar for their valuable feedback on the paper. This work was supported by grants from Honda, Hyundai, NSF award 2339096, and ONR award N00014-22-1-2773. LP is supported by the Packard Fellowship.
381
+
382
+ # REFERENCES
383
+
384
+ [1] Pieter Abbeel and Andrew Y. Ng. Apprenticeship learning via inverse reinforcement learning. In Proceedings of the Twenty-First International Conference on Machine Learning, ICML '04, page 1, New York, NY, USA, 2004. Association for Computing Machinery. ISBN 1581138385.
385
+ [2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.
386
+ [3] Jake K Aggarwal and Quin Cai. Human motion analysis: A review. Computer vision and image understanding, 73 (3):428-440, 1999.
387
+ [4] Shikhar Bahl, Abhinav Gupta, and Deepak Pathak. Human-to-robot imitation in the wild. arXiv preprint arXiv:2207.09450, 2022.
388
+ [5] Dominik Bauer, Timothy Patten, and Markus Vincze. Reagent: Point cloud registration using imitation and reinforcement learning, 2021.
389
+ [6] Sarah Bechtle, Neha Das, and Franziska Meier. Multimodal learning of keypoint predictive models for visual object manipulation. IEEE Transactions on Robotics, 39 (2):1212-1224, 2023.
390
+ [7] James Betker, Gabriel Goh, Li Jing, Tim Brooks, Jianfeng Wang, Linjie Li, Long Ouyang, Juntang Zhuang, Joyce Lee, Yufei Guo, et al. Improving image generation with better captions. Computer Science. https://cdn.openai.com/papers/dall-e-3.pdf, 2(3):8, 2023.
391
+ [8] Homanga Bharadhwaj, Jay Vakil, Mohit Sharma, Abhinav Gupta, Shubham Tulsiani, and Vikash Kumar. Roboagent:
392
+
393
+ Generalization and efficiency in robot manipulation via semantic augmentations and action chunking. arXiv preprint arXiv:2309.01918, 2023.
394
+ [9] Homanga Bharadhwaj, Debidatta Dwibedi, Abhinav Gupta, Shubham Tulsiani, Carl Doersch, Ted Xiao, Dhruv Shah, Fei Xia, Dorsa Sadigh, and Sean Kirmani. Gen2act: Human video generation in novel scenarios enables generalizable robot manipulation. arXiv preprint arXiv:2409.16283, 2024.
395
+ [10] Homanga Bharadhwaj, Roozbeh Mottaghi, Abhinav Gupta, and Shubham Tulsiani. Track2act: Predicting point tracks from internet videos enables diverse zero-shot robot manipulation. arXiv preprint arXiv:2405.01527, 2024.
396
+ [11] Chethan Bhateja, Derek Guo, Dibya Ghosh, Anikait Singh, Manan Tomar, Quan Vuong, Yevgen Chebotar, Sergey Levine, and Aviral Kumar. Robotic offline rl from internet videos via value-function learning. In 2024 IEEE International Conference on Robotics and Automation (ICRA), pages 16977-16984. IEEE, 2024.
397
+ [12] Anthony Brohan, Noah Brown, Justice Carbajal, Yevgen Chebotar, Joseph Dabis, Chelsea Finn, Keerthana Gopalakrishnan, Karol Hausman, Alex Herzog, Jasmine Hsu, et al. Rt-1: Robotics transformer for real-world control at scale. arXiv preprint arXiv:2212.06817, 2022.
398
+ [13] Yuanpei Chen, Chen Wang, Li Fei-Fei, and C. Karen Liu. Sequential dexterity: Chaining dexterous policies for long-horizon manipulation, 2023.
399
+ [14] Cheng Chi, Siyuan Feng, Yilun Du, Zhenjia Xu, Eric Cousineau, Benjamin Burchfiel, and Shuran Song. Diffusion policy: Visuomotor policy learning via action diffusion. In Proceedings of Robotics: Science and Systems (RSS), 2023.
400
+ [15] Cognition. Devin, 2025. URL https://devin.ai. Accessed: January 24, 2025.
401
+ [16] Coline Devin, Pieter Abbeel, Trevor Darrell, and Sergey Levine. Deep object-centric representations for generalizable robot learning. CoRR, abs/1708.04225, 2017.
402
+ [17] Carl Doersch, Yi Yang, Mel Vecerik, Dilara Gokay, Ankush Gupta, Yusuf Aytar, Joao Carreira, and Andrew Zisserman. Tapir: Tracking any point with per-frame initialization and temporal refinement. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10061-10072, 2023.
403
+ [18] Yan Duan, Marcin Andrychowicz, Bradly C. Stadie, Jonathan Ho, Jonas Schneider, Ilya Sutskever, Pieter Abbeel, and Wojciech Zaremba. One-shot imitation learning. CoRR, abs/1703.07326, 2017.
404
+ [19] Haritheja Etukuru, Norihito Naka, Zijin Hu, Seungjae Lee, Julian Mehu, Aaron Edsinger, Chris Paxton, Soumith Chintala, Lerrel Pinto, and Nur Muhammad Mahi Shafiullah. Robot utility models: General policies for zero-shot deployment in new environments. arXiv preprint arXiv:2409.05865, 2024.
405
+ [20] Hao-Shu Fang, Chenxi Wang, Hongjie Fang, Minghao Gou, Jirong Liu, Hengxu Yan, Wenhai Liu, Yichen Xie, and Cewu Lu. Anygrasp: Robust and efficient
406
+
407
+ grasp perception in spatial and temporal domains. IEEE Transactions on Robotics (T-RO), 2023.
408
+ [21] Xiaolin Fang, Bo-Ruei Huang, Jiayuan Mao, Jasmine Shone, Joshua B Tenenbaum, Tomás Lozano-Pérez, and Leslie Pack Kaelbling. Keypoint abstraction using large models for object-relative imitation learning. arXiv preprint arXiv:2410.23254, 2024.
409
+ [22] Yabo Fu, Yang Lei, Tonghe Wang, Walter J Curran, Tian Liu, and Xiaofeng Yang. Deep learning in medical image registration: a review. Physics in Medicine & Biology, 65 (20):20TR01, 2020.
410
+ [23] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18995-19012, 2022.
411
+ [24] Kamal Gupta, Varun Jampani, Carlos Esteves, Abhinav Shrivastava, Ameesh Makadia, Noah Snavely, and Abhishek Kar. ASIC: Aligning sparse in-the-wild image collections. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 4134-4145, October 2023.
412
+ [25] Irmak Guzey, Yinlong Dai, Georgy Savva, Raunaq Bhirangi, and Lerrel Pinto. Bridging the human to robot dexterity gap through object-oriented rewards. arXiv preprint arXiv:2410.23289, 2024.
413
+ [26] Siddhant Haldar, Vaibhav Mathur, Denis Yarats, and Lerrel Pinto. Watch and match: Supercharging imitation with regularized optimal transport. In Conference on Robot Learning, pages 32-43. PMLR, 2023.
414
+ [27] Siddhant Haldar, Jyothish Pari, Anant Rai, and Lerrel Pinto. Teach a robot to fish: Versatile imitation from one minute of demonstrations, 2023.
415
+ [28] Siddhant Haldar, Zhuoran Peng, and Lerrel Pinto. Baku: An efficient transformer for multi-task policy learning. arXiv preprint arXiv:2406.07539, 2024.
416
+ [29] Adam W. Harley, Zhaoyuan Fang, and Katerina Fragkiadaki. Particle video revisited: Tracking through occlusions using point trajectories. In ECCV, 2022.
417
+ [30] Jonathan Ho and Stefano Ermon. Generative adversarial imitation learning. CoRR, abs/1606.03476, 2016.
418
+ [31] Shuaiyi Huang, Luyu Yang, Bo He, Songyang Zhang, Xuming He, and Abhinav Shrivastava. Learning semantic correspondence with sparse annotations. In Proceedings of the European Conference on Computer Vision (ECCV), 2022.
419
+ [32] Wenlong Huang, Chen Wang, Yunzhu Li, Ruohan Zhang, and Li Fei-Fei. Rekep: Spatio-temporal reasoning of relational keypoint constraints for robotic manipulation. arXiv preprint arXiv:2409.01652, 2024.
420
+ [33] Ahmed Hussein, Mohamed Medhat Gaber, Eyad Elyan, and Chrisina Jayne. Imitation learning: A survey of learning methods. ACM Comput. Surv., 50(2), apr 2017. ISSN 0360-0300.
421
+
422
+ [34] Aadhithya Iyer, Zhuoran Peng, Yinlong Dai, Irmak Guzey, Siddhant Haldar, Soumith Chintala, and Lerrel Pinto. Open teach: A versatile teleoperation system for robotic manipulation. arXiv preprint arXiv:2403.07870, 2024.
423
+ [35] Eric Jang, Alex Irpan, Mohi Khansari, Daniel Kappler, Frederik Ebert, Corey Lynch, Sergey Levine, and Chelsea Finn. Bc-z: Zero-shot task generalization with robotic imitation learning. In Conference on Robot Learning, pages 991-1002. PMLR, 2022.
424
+ [36] Yuanchen Ju, Kaizhe Hu, Guowei Zhang, Gu Zhang, Mingrun Jiang, and Huazhe Xu. Robo-abc: Affordance generalization beyond categories via semantic correspondence for robot manipulation. In European Conference on Computer Vision, pages 222–239. Springer, 2025.
425
+ [37] Nikita Karaev, Ignacio Rocco, Benjamin Graham, Natalia Neverova, Andrea Vedaldi, and Christian Rupprecht. Cotracker: It is better to track together, 2023.
426
+ [38] Siddharth Karamcheti, Suraj Nair, Annie S Chen, Thomas Kollar, Chelsea Finn, Dorsa Sadigh, and Percy Liang. Language-driven representation learning for robotics. arXiv preprint arXiv:2302.12766, 2023.
427
+ [39] Andrej Karpathy. mingpt: A minimal pytorch reimplementation of the openai gpt. https://github.com/karpathy/minGPT, 2021.
428
+ [40] Bernhard Kerbl, Georgios Kopanas, Thomas Leimkuhler, and George Drettakis. 3d gaussian splatting for real-time radiance field rendering. ACM Trans. Graph., 42(4): 139-1, 2023.
429
+ [41] Alexander Khazatsky, Karl Pertsch, Suraj Nair, Ashwin Balakrishna, Sudeep Dasari, Siddharth Karamcheti, Soroush Nasiriany, Mohan Kumar Srirama, Lawrence Yun-liang Chen, Kirsty Ellis, et al. Droid: A large-scale in-the-wild robot manipulation dataset. arXiv preprint arXiv:2403.12945, 2024.
430
+ [42] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C. Berg, Wan-Yen Lo, Piotr Dólar, and Ross Girshick. Segment anything, 2023.
431
+ [43] Sateesh Kumar, Jonathan Zamora, Nicklas Hansen, Rishabh Jangir, and Xiaolong Wang. Graph inverse reinforcement learning from diverse videos. In Conference on Robot Learning, pages 55-66. PMLR, 2023.
432
+ [44] Seungjae Lee, Yibin Wang, Haritheja Etukuru, H Jin Kim, Nur Muhammad Mahi Shafiullah, and Lerrel Pinto. Behavior generation with latent actions. arXiv preprint arXiv:2403.03181, 2024.
433
+ [45] Mara Levy, Siddhant Haldar, Lerrel Pinto, and Abhinav Shirivastava. P3-po: Prescriptive point priors for visuospatial generalization of robot policies. arXiv preprint arXiv:2412.06784, 2024.
434
+ [46] Mara Levy, Nirat Saini, and Abhinav Shrivastava. Wayex: Waypoint exploration using a single demonstration, 2024.
435
+ [47] Tony Lindeberg. Scale Invariant Feature Transform, volume 7. 05 2012. doi: 10.4249/scholarpedia.10491.
436
+ [48] Yixin Liu, Kai Zhang, Yuan Li, Zhiling Yan, Chujie Gao, Ruoxi Chen, Zhengqing Yuan, Yue Huang, Hanchi Sun,
437
+
438
+ Jianfeng Gao, et al. Sora: A review on background, technology, limitations, and opportunities of large vision models. arXiv preprint arXiv:2402.17177, 2024.
439
+ [49] Yunze Liu, Yun Liu, Che Jiang, Kangbo Lyu, Weikang Wan, Hao Shen, Boqiang Liang, Zhoujie Fu, He Wang, and Li Yi. Hoi4d: A 4d egocentric dataset for category-level human-object interaction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 21013-21022, 2022.
440
+ [50] Camillo Lugaresi, Jiuqiang Tang, Hadon Nash, Chris McClanahan, Esha Uboweja, Michael Hays, Fan Zhang, Chuo-Ling Chang, Ming Guang Yong, Juhyun Lee, et al. Mediapipe: A framework for building perception pipelines. arXiv preprint arXiv:1906.08172, 2019.
441
+ [51] Yecheng Jason Ma, Shagun Sodhani, Dinesh Jayaraman, Osbert Bastani, Vikash Kumar, and Amy Zhang. Vip: Towards universal visual reward and representation via value-implicit pre-training. arXiv preprint arXiv:2210.00030, 2022.
442
+ [52] Yecheng Jason Ma, Vikash Kumar, Amy Zhang, Osbert Bastani, and Dinesh Jayaraman. Liv: Language-image representations and rewards for robotic control. In International Conference on Machine Learning, pages 23301-23320. PMLR, 2023.
443
+ [53] Ajay Mandlekar, Yuke Zhu, Animesh Garg, Jonathan Booher, Max Spero, Albert Tung, Julian Gao, John Emmons, Anchit Gupta, Emre Orbay, et al. Roboturk: A crowdsourcing platform for robotic skill learning through imitation. In Conference on Robot Learning, pages 879-893. PMLR, 2018.
444
+ [54] Ajay Mandlekar, Danfei Xu, Roberto Martin-Martin, Silvio Savarese, and Li Fei-Fei. Learning to generalize across long-horizon tasks from human demonstrations. CoRR, abs/2003.06085, 2020.
445
+ [55] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021.
446
+ [56] Ashvin Nair, Abhishek Gupta, Murtaza Dalal, and Sergey Levine. Awac: Accelerating online reinforcement learning with offline datasets. arXiv preprint arXiv:2006.09359, 2020.
447
+ [57] Suraj Nair, Aravind Rajeswaran, Vikash Kumar, Chelsea Finn, and Abhinav Gupta. R3m: A universal visual representation for robot manipulation. arXiv preprint arXiv:2203.12601, 2022.
448
+ [58] Andrew Y. Ng and Stuart J. Russell. Algorithms for inverse reinforcement learning. In Proceedings of the Seventeenth International Conference on Machine Learning, ICML '00, page 663-670, San Francisco, CA, USA, 2000. Morgan Kaufmann Publishers Inc. ISBN 1558607072.
449
+ [59] David Nistér, Oleg Naroditsky, and James Bergen. Visual odometry. In Proceedings of the 2004 IEEE Computer Society Conference on Computer Vision and Pattern
450
+
451
+ Recognition, 2004. CVPR 2004., volume 1, pages I-I. IEEE, 2004.
452
+ [60] Abhishek Padalkar, Acorn Pooley, Ajinkya Jain, Alex Bewley, Alex Herzog, Alex Irpan, Alexander Khazatsky, Anant Rai, Anikait Singh, Anthony Brohan, et al. Open x-embodiment: Robotic learning datasets and rt-x models. arXiv preprint arXiv:2310.08864, 2023.
453
+ [61] Abhishek Padalkar, Acorn Pooley, Ajinkya Jain, Alex Bewley, Alex Herzog, Alex Irpan, Alexander Khazatsky, Anant Rai, Anikait Singh, Anthony Brohan, et al. Open x-embodiment: Robotic learning datasets and rt-x models. arXiv preprint arXiv:2310.08864, 2023.
454
+ [62] Georgios Papagiannis, Norman Di Palo, Pietro Vitiello, and Edward Johns. R+ x: Retrieval and execution from everyday human videos. arXiv preprint arXiv:2407.12957, 2024.
455
+ [63] Georgios Pavlakos, Dandan Shan, Ilija Radosavovic, Angjoo Kanazawa, David Fouhey, and Jitendra Malik. Reconstructing hands in 3d with transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9826-9836, 2024.
456
+ [64] D Pomerleau. An autonomous land vehicle in a neural network. Advances in Neural Information Processing Systems, 1, 1998.
457
+ [65] Dean Pomerleau. Alvinn: An autonomous land vehicle in a neural network. In D.S. Touretzky, editor, Proceedings of (NeurIPS) Neural Information Processing Systems, pages 305 - 313. Morgan Kaufmann, December 1989.
458
+ [66] Machel Reid, Nikolay Savinov, Denis Teptyashin, Dmitry Lepikhin, Timothy Lillicrap, Jean-baptiste Alayrac, Radu Soricut, Angeliki Lazaridou, Orhan First, Julian Schrittwieser, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024.
459
+ [67] Juntao Ren, Priya Sundaresan, Dorsa Sadigh, Sanjiban Choudhury, and Jeannette Bohg. Motion tracks: A unified representation for human-robot transfer in few-shot imitation learning. arXiv preprint arXiv:2501.06994, 2025.
460
+ [68] Stéphane Ross, Geoffrey Gordon, and Drew Bagnell. A reduction of imitation learning and structured prediction to no-regret online learning. In Proceedings of the fourteenth international conference on artificial intelligence and statistics, pages 627-635. JMLR Workshop and Conference Proceedings, 2011.
461
+ [69] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in neural information processing systems, 35:36479-36494, 2022.
462
+ [70] Stefan Schaal. Learning from demonstration. Advances in neural information processing systems, 9, 1996.
463
+ [71] Nur Muhammad Mahi Shafiullah, Anant Rai, Haritheja Etukuru, Yiqian Liu, Ishan Misra, Soumith Chintala, and Lerrel Pinto. On bringing robots home. arXiv preprint
464
+
465
+ arXiv:2311.16098, 2023.
466
+ [72] Nur Muhammad Mahi. Shafiullah, Siyuan. Feng, Lerrel. Pinto, and Russ. Tedrake. Supervised policy learning for real robots, July 2024. URL https://supervised-robot-learning.github.io. Tutorial presented at the Robotics: Science and Systems (RSS), Delft.
467
+ [73] Mohit Shridhar, Lucas Manuelli, and Dieter Fox. CIoR: What and where pathways for robotic manipulation. CoRR, abs/2109.12098, 2021.
468
+ [74] Luming Tang, Menglin Jia, Qianqian Wang, Cheng Perng Phoo, and Bharath Hariharan. Emergent correspondence from image diffusion. In Thirty-seventh Conference on Neural Information Processing Systems, 2023.
469
+ [75] Faraz Torabi, Garrett Warnell, and Peter Stone. Recent advances in imitation learning from observation. arXiv preprint arXiv:1905.13566, 2019.
470
+ [76] Hugo Touvron, Thibaut Lavril, Gautier Izacard, Xavier Martinet, Marie-Anne Lachaux, Timothée Lacroix, Baptiste Rozière, Naman Goyal, Eric Hambro, Faisal Azhar, et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023.
471
+ [77] Jonathan Tremblay, Thang To, Balakumar Sundaralingam, Yu Xiang, Dieter Fox, and Stan Birchfield. Deep object pose estimation for semantic robotic grasping of household objects. CoRR, abs/1809.10790, 2018.
472
+ [78] Stephen Tyree, Jonathan Tremblay, Thang To, Jia Cheng, Terry Mosier, Jeffrey Smith, and Stan Birchfield. 6-dof pose estimation of household objects for robotic manipulation: An accessible dataset and benchmark. In International Conference on Intelligent Robots and Systems (IROS), 2022.
473
+ [79] Hongtao Wu, Ya Jing, Chilam Cheang, Guangzeng Chen, Jiafeng Xu, Xinghang Li, Minghuan Liu, Hang Li, and Tao Kong. Unleashing large-scale video generative pretraining for visual robot manipulation. arXiv preprint arXiv:2312.13139, 2023.
474
+ [80] Alper Yilmaz, Omar Javed, and Mubarak Shah. Object tracking: A survey. Acm computing surveys (CSUR), 38 (4):13-es, 2006.
475
+ [81] Kevin Zakka, Andy Zeng, Pete Florence, Jonathan Tompson, Jeannette Bohg, and Debidatta Dwibedi. Xirl: Cross-embodiment inverse reinforcement learning. In Conference on Robot Learning, pages 537–546. PMLR, 2022.
476
+ [82] Tianhao Zhang, Zoe McCarthy, Owen Jow, Dennis Lee, Xi Chen, Ken Goldberg, and Pieter Abbeel. Deep imitation learning for complex manipulation tasks from virtual reality teleoperation, 2018.
477
+ [83] Tony Z Zhao, Vikash Kumar, Sergey Levine, and Chelsea Finn. Learning fine-grained bimanual manipulation with low-cost hardware. arXiv preprint arXiv:2304.13705, 2023.
478
+ [84] Yang Zheng, Adam W. Harley, Bokui Shen, Gordon Wetzstein, and Leonidas J. Guibas. Pointodyssey: A large-scale synthetic dataset for long-term point tracking. In ICCV, 2023.
479
+
480
+ [85] Yi Zhou, Connelly Barnes, Jingwan Lu, Jimei Yang, and Hao Li. On the continuity of rotation representations in neural networks. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 5745-5753, 2019.
481
+ [86] Yifeng Zhu, Zhenyu Jiang, Peter Stone, and Yuke Zhu. Learning generalizable manipulation policies with object-centric 3d representations. In 7th Annual Conference on Robot Learning, 2023.
482
+ [87] Yifeng Zhu, Abhishek Joshi, Peter Stone, and Yuke Zhu. Viola: Imitation learning for vision-based manipulation with object proposal priors. In Conference on Robot Learning, pages 1199-1210. PMLR, 2023.
483
+ [88] Barbara Zitova and Jan Flusser. Image registration methods: a survey. Image and vision computing, 21(11): 977-1000, 2003.
484
+
485
+ # APPENDIX
486
+
487
+ # A. Background
488
+
489
+ 1) Semantic Correspondence: Finding corresponding points across multiple images of the same scene is a well-established problem in computer vision [47, 88]. Correspondence is essential for solving a range of larger challenges, including 3D reconstruction [55, 40], motion tracking [37, 29, 84, 17], image registration [88], and object recognition [42]. In contrast, semantic correspondence focuses on matching points between a source image and an image of a different scene (e.g., identifying the left eye of a cat in relation to the left eye of a dog). Traditional correspondence methods [88, 47] often struggle with semantic correspondence due to the substantial differences in features between the images. Recent advancements in semantic correspondence utilize deep learning and dense correspondence techniques to enhance robustness [22, 31, 24] across variations in background, lighting, and camera perspectives. In this work, we adopt a diffusion-based point correspondence model, DIFT [74], to establish correspondences between a reference and an observed image, which is illustrated in Figure 3.
490
+
491
+ 2) Point Tracking: Point tracking across videos is a problem in computer vision, where a set of reference points are given in the first frame of the video, and the task is to track these points across multiple frames of the video sequence. Point tracking has proven crucial for many applications, including motion analysis [3], object tracking [80], and visual odometry [59]. The goal is to establish reliable correspondences between points in one frame and their counterparts in subsequent frames, despite challenges such as changes in illumination, occlusions, and camera motion. While traditional point tracking methods rely on detecting local features in images, more recent advancements leverage deep learning and dense correspondence methods to improve robustness and accuracy [37, 29, 84]. In this work, we use Co-Tracker [37] to track a set of reference points defined in the first frame of a robot's trajectory. These points tracked through the entire trajectory are then used to train generalizable robot policies for the real world.
492
+
493
+ # B. Algorithmic Details
494
+
495
+ 1) Point Triangulation: Point triangulation is a fundamental technique in computer vision used to reconstruct 3D points from their 2D projections in multiple images. Given $n$ cameras with known projection matrices $P_{1}, P_{2}, \ldots, P_{n}$ and corresponding 2D image points $x_{1}, x_{2}, \ldots, x_{n}$ , the goal is to find the 3D point $X$ that best explains these observations.
496
+
497
+ The projection of $X$ onto each image is given by:
498
+
499
+ $$
500
+ x _ {i} \sim P _ {i} X
501
+ $$
502
+
503
+ where $\sim$ denotes equality up to scale.
504
+
505
+ One common approach is the Direct Linear Transform (DLT) method:
506
+
507
+ 1) For each view $i$ , we can form two linear equations:
508
+
509
+ $$
510
+ x _ {i} \left(p _ {i} ^ {3} \cdot X\right) - \left(p _ {i} ^ {1} \cdot X\right) = 0
511
+ $$
512
+
513
+ $$
514
+ y _ {i} \left(p _ {i} ^ {3} \cdot X\right) - \left(p _ {i} ^ {2} \cdot X\right) = 0
515
+ $$
516
+
517
+ where $p_i^j$ is the $j$ -th row of $P_{i}$ .
518
+
519
+ 2) Combining equations from all views, we get a system $AX = 0$ .
520
+ 3) The solution is the unit vector corresponding to the smallest singular value of $A$ , found via Singular Value Decomposition (SVD).
521
+
522
+ For optimal triangulation, we aim to minimize the geometric reprojection error.
523
+
524
+ # C. Hyperparameters
525
+
526
+ The complete list of hyperparameters is provided in Table VII. Details about the number of demonstrations for each task has been included in Section V-B, and summarized in Table VIII. All the models have been trained using a single NVIDIA RTX A4000 GPU.
527
+
528
+ # D. Implementation Details for MT-π
529
+
530
+ Since the official implementation of MT- $\pi$ is not yet public available, we adopt the Diffusion Transformer (DiT) based implementation of a 2D point track prediction model proposed by Bharadhwaj et al. [10]. We modify the architecture such that given a single image observation and robot motion tracks on the image, the model predicts future tracks of the robot points. These robot tracks are then converted to 3D using corresponding tracks for two camera views. The robot action is then computed from the 3D robot tracks using the same rigid-body geometry constraints as Point Policy (described in Section IV-C). MT- $\pi$ proposes the use of a key point retargeting network in order to convert the human hand and robot key points to the same space. Since we already convert the human hand key points to the corresponding robot points for Point Policy, we directly use these converted robot points instead of learning a separate keypoint retargeting network.
531
+
532
+ To ensure the correctness of our implementation, we evaluate MT- $\pi$ in a setting identical to the one described in their paper. We conduct this evaluation on the put bread on plate task. We use 30 robot teleoperated demonstrations in addition to the human demonstrations, resulting in a total of 60 demonstrations. We observed a performance of 18/20, thus, confirming the correctness of the implementation.
533
+
534
+ # E. Experiments
535
+
536
+ 1) Illustration of Spatial Generalization and Novel Object Instances: Figure 7 and Figure 8 illustrate the variations in object positions and novel object instances used for each task, respectively.
537
+ 2) Illustration of Depth Discrepancy: Figure 9 provides an illustration of the discrepancy in actions obtained from sensor depth and triangulated depth for the task of putting a bottle on the rack. We observe that the noise in sensor depth leads to noise in robot points which is turn results in unreliable actions.
538
+ 3) Significance of Object Points: Table IX and Table X study the performance of MT- $\pi$ with and without object points and Point Policy across all of our tasks. We observe that MT- $\pi$ with object points outperforms MT- $\pi$ on select tasks, suggesting that including object points in the input offers a potential advantage.
539
+
540
+ TABLE VII: List of hyperparameters.
541
+
542
+ <table><tr><td>Parameter</td><td>Value</td></tr><tr><td>Learning rate</td><td>1e-4</td></tr><tr><td>Image size</td><td>256 × 256 (for BC, BC w/ Depth, MT-π)</td></tr><tr><td>Batch size</td><td>64</td></tr><tr><td>Optimizer</td><td>Adam</td></tr><tr><td>Number of training steps</td><td>100000</td></tr><tr><td rowspan="2">Transformer architecture</td><td>minGPT [39] (for BC, BC w/ Depth, P3PO, Point Policy)</td></tr><tr><td>Diffusion Transformer [10] (for MT-π)</td></tr><tr><td>Hidden dim</td><td>256</td></tr><tr><td rowspan="2">Observation history length</td><td>1 (for BC, BC w/ Depth)</td></tr><tr><td>10 (for MT-π, P3PO, Point Policy)</td></tr><tr><td>Action head</td><td>MLP</td></tr><tr><td>Action chunk length</td><td>20</td></tr></table>
543
+
544
+ TABLE VIII: Number of demonstrations.
545
+
546
+ <table><tr><td>Task</td><td>Number of object instances</td><td>Total number of demonstrations</td></tr><tr><td>Close drawer</td><td>1</td><td>20</td></tr><tr><td>Put bread on plate</td><td>1</td><td>30</td></tr><tr><td>Fold towel</td><td>1</td><td>20</td></tr><tr><td>Close oven</td><td>1</td><td>20</td></tr><tr><td>Sweep broom</td><td>1</td><td>20</td></tr><tr><td>Put bottle on rack</td><td>2</td><td>30</td></tr><tr><td>Put bowl in oven</td><td>1</td><td>20</td></tr><tr><td>Make bottle upright</td><td>2</td><td>30</td></tr></table>
547
+
548
+ TABLE IX: In-domain policy performance
549
+
550
+ <table><tr><td>Method</td><td>Close drawer</td><td>Put bread on plate</td><td>Fold towel</td><td>Close oven</td><td>Sweep broom</td><td>Put bottle on rack</td><td>Put bowl in oven</td><td>Make bottle upright</td></tr><tr><td>MT-π [67]</td><td>2/10</td><td>2/20</td><td>0/10</td><td>4/10</td><td>0/10</td><td>8/30</td><td>0/10</td><td>0/20</td></tr><tr><td>MT-π + object points</td><td>1/20</td><td>6/10</td><td>1/20</td><td>4/10</td><td>0/10</td><td>0/10</td><td>2/20</td><td>8/10</td></tr><tr><td>Point Policy (Ours)</td><td>10/10</td><td>19/20</td><td>9/10</td><td>9/10</td><td>9/10</td><td>26/30</td><td>8/10</td><td>16/20</td></tr></table>
551
+
552
+ TABLE X: Policy performance on novel object instances
553
+
554
+ <table><tr><td>Method</td><td>Put bread on plate</td><td>Fold towel</td><td>Sweep broom</td><td>Put bottle on rack</td><td>Put bowl in oven</td><td>Make bottle upright</td></tr><tr><td>MT-π [67]</td><td>1/20</td><td>0/20</td><td>0/10</td><td>0/30</td><td>0/10</td><td>0/20</td></tr><tr><td>MT-π + object points</td><td>2/20</td><td>0/20</td><td>0/20</td><td>1/10</td><td>0/10</td><td>1/20</td></tr><tr><td>Point Policy (Ours)</td><td>18/20</td><td>15/20</td><td>4/10</td><td>27/30</td><td>9/10</td><td>9/20</td></tr></table>
555
+
556
+ ![](images/86c983ce86efcc64583b2202d4495b0bc28d31be9b86ed3c73d2d24cc9e4397e.jpg)
557
+ Close Drawer
558
+
559
+ ![](images/de43aef56147813925e87b10de87432d964e27d344cf1177a4ec397a7f15884a.jpg)
560
+ Put bread on plate
561
+
562
+ ![](images/7aac4c273c9d207262bc644bde34127a7dc95790ab734dd2c07c7051dae6dcdc.jpg)
563
+ Fold Towel
564
+
565
+ ![](images/3772580fc14f0f4c7335afde868f596dc3dbba0a25f058005e25c9a818f13b51.jpg)
566
+ Close oven
567
+
568
+ ![](images/0ed70ef9af043ad54108583f75a69d4fb0de64854ef03d9756f5309b473b4979.jpg)
569
+ Sweep with broom
570
+ Fig. 7: Illustration of spatial variation used in our experiments.
571
+
572
+ ![](images/4ae5324c2ad7cd7b1b79b17581faf0766d7e4adafc2d79916fe19de07ee2f887.jpg)
573
+ Put bottle on rack
574
+
575
+ ![](images/58f3b3c361c57563f06b9fdffe62492160c765dc31746eaa1ce910934f5546c9.jpg)
576
+ Put bowl in oven
577
+
578
+ ![](images/1171c48b89362261a8559b536f3d2cb728c1f5c71add73704defb0feb10dde4d.jpg)
579
+ Make bottle upright
580
+
581
+ ![](images/10322da05092b2d620f5e25d867be646421f8c4f1032a49dda17610588b66972.jpg)
582
+
583
+ ![](images/67d1240aa783e2a129562919b59ac4b46dc36f187d0ae73845592545b7a6f57e.jpg)
584
+ Fold Towel
585
+
586
+ ![](images/101ccde47a7f27e993a10f8a084ef363ad5419b836edf367d96fee4eb3c99428.jpg)
587
+
588
+ ![](images/6e2af7ed78da4e815f98e0ef25ec0a1f9c5cb05053b8bb5620374faf3d5b6292.jpg)
589
+ Put bread on plate
590
+ Put bottle on rack
591
+ Fig. 8: Illustration of objects used in our experiments. For each task, on the left are in-domain objects while on the right are novel objects used in our generalization experiments.
592
+
593
+ ![](images/6a6b3c89545b19b032c84333a8728c8c1805e31d90384514c44b34829a4421b1.jpg)
594
+ Put bowl in oven
595
+
596
+ ![](images/629275cf3001f1cea5e06c7762f115d90b9d7a1cbd2fc405e3276399f6e4be24.jpg)
597
+ Sweep with broom
598
+ Make bottle upright
599
+
600
+ ![](images/2cb1c24b26c41ab0c0db1a3a80224555f86c97421d76a2bb15a17003728c9d99.jpg)
601
+
602
+ ![](images/a1629c8fbfb89fbb49618628fcea0e4e6de0d9b10284cb3641cf4e18536b7c5d.jpg)
603
+ Fig. 9: Illustration of discrepancy in actions obtained from sensor depth and triangulated depth for the task of putting a bottle on the rack.
data/2025/2502_20xxx/2502.20391/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94d5badff850ec582ef7f53271f9dbdafaca334b45260b3d2c5e7a53d1a0eaf7
3
+ size 1527721
data/2025/2502_20xxx/2502.20391/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20396/847fca61-3c3a-4285-8573-13bf56b2db4b_content_list.json ADDED
@@ -0,0 +1,1537 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Sim-to-Real Reinforcement Learning for Vision-Based Dexterous Manipulation on Humanoids",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 176,
8
+ 102,
9
+ 818,
10
+ 151
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Toru Lin $^{1,2}$ Kartik Sachdev $^{2}$ Linxi “Jim” Fan $^{2}$ Jitendra Malik $^{1}$ Yuke Zhu $^{2,3}$",
17
+ "bbox": [
18
+ 196,
19
+ 175,
20
+ 799,
21
+ 191
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "UC Berkeley<sup>1</sup> NVIDIA<sup>2</sup> UT Austin<sup>3</sup>",
28
+ "bbox": [
29
+ 357,
30
+ 204,
31
+ 637,
32
+ 220
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "https://toruowo.github.io/recipe",
39
+ "bbox": [
40
+ 339,
41
+ 233,
42
+ 656,
43
+ 248
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "image",
49
+ "img_path": "images/e5267a69cc356c7f28a67ff9bdcc27ca603308b4d364d7f5e9c24f138d05c240.jpg",
50
+ "image_caption": [
51
+ "Figure 1: Overview. We train a humanoid robot with two multi-fingered hands to perform a range of contact-rich dexterous manipulation tasks on diverse objects. Observations are obtained from a third-view camera, an egocentric camera, and robot proprioception. Our reinforcement learning policies generalize zero-shot to unseen real-world objects with varying physical properties (e.g. shape, size, color, material, mass) and remain robust against force disturbances. We also validate the adaptability of our approach on two hardware variations."
52
+ ],
53
+ "image_footnote": [],
54
+ "bbox": [
55
+ 178,
56
+ 261,
57
+ 398,
58
+ 529
59
+ ],
60
+ "page_idx": 0
61
+ },
62
+ {
63
+ "type": "image",
64
+ "img_path": "images/2ffbe5634a06768a3022f11d310ae3514ee36b5419d5b2c9723440580680839b.jpg",
65
+ "image_caption": [],
66
+ "image_footnote": [],
67
+ "bbox": [
68
+ 405,
69
+ 261,
70
+ 821,
71
+ 527
72
+ ],
73
+ "page_idx": 0
74
+ },
75
+ {
76
+ "type": "text",
77
+ "text": "Abstract: Learning generalizable robot manipulation policies, especially for complex multi-fingered humanoids, remains a significant challenge. Existing approaches primarily rely on extensive data collection and imitation learning, which are expensive, labor-intensive, and difficult to scale. Sim-to-real reinforcement learning (RL) offers a promising alternative, but has mostly succeeded in simpler state-based or single-hand setups. How to effectively extend this to vision-based, contact-rich bimanual manipulation tasks remains an open question. In this paper, we introduce a practical sim-to-real RL recipe that trains a humanoid robot to perform three challenging dexterous manipulation tasks: grasp-and-reach, box lift and bimanual handover. Our method features an automated real-to-sim tuning module, a generalized reward formulation based on contact and object goals, a divide-and-conquer policy distillation framework, and a hybrid object representation strategy with modality-specific augmentation. We demonstrate high success rates on unseen objects and robust, adaptive policy behaviors – highlighting that vision-based dexterous manipulation via sim-to-real RL is not only viable, but also scalable and broadly applicable to real-world humanoid manipulation tasks.",
78
+ "bbox": [
79
+ 228,
80
+ 617,
81
+ 767,
82
+ 859
83
+ ],
84
+ "page_idx": 0
85
+ },
86
+ {
87
+ "type": "text",
88
+ "text": "Keywords: Humanoids, Vision-Based Dexterous Manipulation, Reinforcement Learning, Sim-to-Real",
89
+ "bbox": [
90
+ 228,
91
+ 872,
92
+ 766,
93
+ 902
94
+ ],
95
+ "page_idx": 0
96
+ },
97
+ {
98
+ "type": "aside_text",
99
+ "text": "arXiv:2502.20396v2 [cs.RO] 1 Sep 2025",
100
+ "bbox": [
101
+ 22,
102
+ 282,
103
+ 60,
104
+ 710
105
+ ],
106
+ "page_idx": 0
107
+ },
108
+ {
109
+ "type": "footer",
110
+ "text": "9th Conference on Robot Learning (CoRL 2025), Seoul, Korea.",
111
+ "bbox": [
112
+ 171,
113
+ 925,
114
+ 550,
115
+ 939
116
+ ],
117
+ "page_idx": 0
118
+ },
119
+ {
120
+ "type": "text",
121
+ "text": "1 Introduction",
122
+ "text_level": 1,
123
+ "bbox": [
124
+ 171,
125
+ 89,
126
+ 313,
127
+ 104
128
+ ],
129
+ "page_idx": 1
130
+ },
131
+ {
132
+ "type": "text",
133
+ "text": "Learning generalizable manipulation policies – especially for complex humanoid robots equipped with multi-fingered hands – remains a formidable challenge in robotics. Existing approaches often rely on extensive real-world data collection and imitation learning [1, 2, 3], which are costly, labor-intensive, and difficult to scale. Sim-to-real reinforcement learning (RL) offers a promising alternative and has achieved impressive results in navigation [4, 5], locomotion [6, 7], and autonomous drone racing [8]. However, its application to dexterous manipulation remains largely limited to single-hand [9, 10, 11, 12, 13] or state-based setups [14, 15, 16, 17], leaving open the question of how to scale RL to vision-based, contact-rich bimanual tasks on humanoid embodiments.",
134
+ "bbox": [
135
+ 169,
136
+ 109,
137
+ 826,
138
+ 231
139
+ ],
140
+ "page_idx": 1
141
+ },
142
+ {
143
+ "type": "text",
144
+ "text": "In this work, we present a practical vision-based sim-to-real RL recipe that enables a multi-fingered humanoid robot to learn highly generalizable, robust, and dexterous manipulation skills. We identify and address several key challenges that have not been thoroughly explored in prior works:",
145
+ "bbox": [
146
+ 169,
147
+ 237,
148
+ 823,
149
+ 282
150
+ ],
151
+ "page_idx": 1
152
+ },
153
+ {
154
+ "type": "list",
155
+ "sub_type": "text",
156
+ "list_items": [
157
+ "(A) Sim-to-real for low-cost manipulation systems. Existing approaches rely on industry-grade robotic arms with high-precision motors. However, many humanoid platforms employ much more lightweight, noisier motors. This makes contact-rich dexterous grasping and bimanual coordination significantly harder, especially with sim-to-real method. We introduce a simple, automated real-to-sim system identification method to overcome this with less than four minutes of real-world data.",
158
+ "(B) Reward design for complex coordination. Bimanual manipulation tasks such as handover and lifting require complex coordination between arms and hands: one side must act in a way that complements the other, with precision in both motion and timing. Designing a reward function that captures this type of contact-rich collaboration is nontrivial. We propose a novel keypoint-based reward formulation to facilitate such coordination effectively.",
159
+ "(C) Exploration. The long-horizon, high-dimensional nature of bimanual coordination introduces a hard exploration problem, even when reward functions are well-shaped. We propose to use a task-aware initialization strategy to accelerate single task RL, and decompose the overall multi-task (e.g. object) policy learning into separate single-task RL followed by generalist policy distillation.",
160
+ "(D) Object perception The combination of object diversity and sim-to-real domain shift makes vision-based manipulation particularly difficult. We propose a hybrid object representation that combines compact low-dimensional features with expressive high-dimensional features, augmented via modality-specific randomization. We find this simple strategy surprisingly effective – improving sim-to-real success rates on novel objects significantly by $80\\sim 100\\%$ ."
161
+ ],
162
+ "bbox": [
163
+ 169,
164
+ 286,
165
+ 823,
166
+ 571
167
+ ],
168
+ "page_idx": 1
169
+ },
170
+ {
171
+ "type": "text",
172
+ "text": "We demonstrate the effectiveness of our approach on three challenging vision-based manipulation tasks: dexterous grasp-and-reach, bimanual lifting, bimanual handover. Our zero-shot sim-to-real policies exhibit robust, adaptive, and generalizable behavior on unseen real-world objects with diverse physical properties, achieving $90\\%$ success rate on seen objects and $60\\sim 80\\%$ success rate on novel objects. Additionally, we confirm our method's adaptability to hardware variation across two distinct multi-fingered robot hands. Together, these results establish a practical and scalable recipe for high-performance vision-based dexterous manipulation via sim-to-real RL.",
173
+ "bbox": [
174
+ 169,
175
+ 579,
176
+ 826,
177
+ 686
178
+ ],
179
+ "page_idx": 1
180
+ },
181
+ {
182
+ "type": "text",
183
+ "text": "2 Background",
184
+ "text_level": 1,
185
+ "bbox": [
186
+ 171,
187
+ 699,
188
+ 308,
189
+ 715
190
+ ],
191
+ "page_idx": 1
192
+ },
193
+ {
194
+ "type": "text",
195
+ "text": "2.1 Deep Reinforcement Learning Applications to Robotics",
196
+ "text_level": 1,
197
+ "bbox": [
198
+ 171,
199
+ 718,
200
+ 596,
201
+ 732
202
+ ],
203
+ "page_idx": 1
204
+ },
205
+ {
206
+ "type": "text",
207
+ "text": "The successes of deep RL across domains like gaming, language modeling, and control [6, 8, 18, 19, 20, 21] have generated widespread excitement. However, the paradigm is known to be brittle, with sensitivity to hyperparameters [22] and reproducibility issues [23] due to high algorithmic variance.",
208
+ "bbox": [
209
+ 169,
210
+ 732,
211
+ 823,
212
+ 777
213
+ ],
214
+ "page_idx": 1
215
+ },
216
+ {
217
+ "type": "text",
218
+ "text": "Among open problems in RL, exploration remains fundamental. Unlike supervised learning, RL agents must collect their own data — and the strategy for doing so directly affects performance. Real-world robotics compounds this difficulty with high-dimensional inputs, sparse rewards, and complex dynamics. Numerous methods have aimed to scale exploration by incentivizing novelty [24, 25, 26, 27, 28, 29, 30], but they do not fundamentally resolve the exploration bottleneck.",
219
+ "bbox": [
220
+ 169,
221
+ 784,
222
+ 823,
223
+ 859
224
+ ],
225
+ "page_idx": 1
226
+ },
227
+ {
228
+ "type": "text",
229
+ "text": "Robotics also exposes challenges overlooked in standard RL benchmarks [31, 32], including: (1) the absence of fully modeled environments, and (2) the lack of clearly defined reward functions. Past works have introduced practical techniques to mitigate these issues, such as learning from",
230
+ "bbox": [
231
+ 169,
232
+ 867,
233
+ 823,
234
+ 912
235
+ ],
236
+ "page_idx": 1
237
+ },
238
+ {
239
+ "type": "page_number",
240
+ "text": "2",
241
+ "bbox": [
242
+ 493,
243
+ 935,
244
+ 504,
245
+ 946
246
+ ],
247
+ "page_idx": 1
248
+ },
249
+ {
250
+ "type": "image",
251
+ "img_path": "images/5c8b0964cb61399ffd8500c29d20f739895b95ae3ec3c8588e7612efe0dcca4d.jpg",
252
+ "image_caption": [
253
+ "Figure 2: A sim-to-real RL recipe for vision-based dexterous manipulation. We close the environment modeling gap between simulation and reality through an automated real-to-sim tuning module, design generalizable task rewards by disentangling each manipulation task into contact states and object states, improve sample efficiency of policy training by using task-aware hand poses and divide-and-conquer distillation, and transfer vision-based policies to the real world with a mixture of sparse and dense object representations."
254
+ ],
255
+ "image_footnote": [],
256
+ "bbox": [
257
+ 176,
258
+ 89,
259
+ 823,
260
+ 340
261
+ ],
262
+ "page_idx": 2
263
+ },
264
+ {
265
+ "type": "text",
266
+ "text": "motion capture or teleoperated demonstrations [33, 34, 35, 36], real-to-sim modeling techniques [9, 4, 10, 14, 37], and more principled reward design [38, 39]. While often tailored to specific tasks or hardware, these approaches lay groundwork that our method builds upon and generalizes.",
267
+ "bbox": [
268
+ 169,
269
+ 424,
270
+ 823,
271
+ 469
272
+ ],
273
+ "page_idx": 2
274
+ },
275
+ {
276
+ "type": "text",
277
+ "text": "2.2 Vision-Based Dexterous Manipulation on Humanoids",
278
+ "text_level": 1,
279
+ "bbox": [
280
+ 171,
281
+ 487,
282
+ 584,
283
+ 502
284
+ ],
285
+ "page_idx": 2
286
+ },
287
+ {
288
+ "type": "text",
289
+ "text": "Imitation learning and classical approaches. Recent advances in teleoperation [2, 3, 40, 41] and learning from demonstrations [42, 43] have enabled significant progress in vision-based dexterous manipulation [2, 43, 3, 44]. However, teleoperation remains costly to scale, and achieving high success rates with real-world demonstration data alone [45, 46, 44] requires large datasets, making purely supervised methods expensive for reaching human-level performance on complex tasks.",
290
+ "bbox": [
291
+ 169,
292
+ 505,
293
+ 823,
294
+ 580
295
+ ],
296
+ "page_idx": 2
297
+ },
298
+ {
299
+ "type": "text",
300
+ "text": "Reinforcement learning approaches. RL-based manipulation works have shown strong results in settings such as in-hand reorientation [9, 10, 12, 47], grasping [17, 13], twisting [14], and dynamic handover [15], but typically focus on single-hand setups [9, 48, 10, 17, 12, 13, 47] or use intermediate object representations rather than raw pixels [33, 15, 14]. The closest to our work is Chen et al. [33], but their method relies on human hand motion capture, while our work learns full hands-arms joint control from scratch. Our work is also the first to demonstrate robust sim-to-real transfer of bimanual policies on a novel humanoid platform with multi-fingered hands.",
301
+ "bbox": [
302
+ 169,
303
+ 590,
304
+ 823,
305
+ 696
306
+ ],
307
+ "page_idx": 2
308
+ },
309
+ {
310
+ "type": "text",
311
+ "text": "3 Our Recipe",
312
+ "text_level": 1,
313
+ "bbox": [
314
+ 171,
315
+ 717,
316
+ 303,
317
+ 734
318
+ ],
319
+ "page_idx": 2
320
+ },
321
+ {
322
+ "type": "text",
323
+ "text": "Section 1 outlined four key challenges in sim-to-real RL for dexterous manipulation. Here, we provide the detailed approaches we develop for each. An overview is shown in Figure 2.",
324
+ "bbox": [
325
+ 169,
326
+ 737,
327
+ 823,
328
+ 768
329
+ ],
330
+ "page_idx": 2
331
+ },
332
+ {
333
+ "type": "text",
334
+ "text": "3.1 Real-to-Sim Modeling",
335
+ "text_level": 1,
336
+ "bbox": [
337
+ 171,
338
+ 786,
339
+ 367,
340
+ 801
341
+ ],
342
+ "page_idx": 2
343
+ },
344
+ {
345
+ "type": "text",
346
+ "text": "Simulators offer unlimited trial-and-error chances to perform the exploration necessary for RL. However, whether policies learned in simulation can be reliably transferred to the real world hinges on accurate modeling of both robots and environments. In dexterous manipulation, this challenge is compounded by the necessity to model objects, which have diverse and often unmeasurable physical properties. Even with known parameters, matching real-world and simulated dynamics is nontrivial: the same values for physical constants in simulation and the real world do not necessarily correspond to identical kinematic and dynamic relationships due to discrepancies in physics engines.",
347
+ "bbox": [
348
+ 169,
349
+ 801,
350
+ 823,
351
+ 906
352
+ ],
353
+ "page_idx": 2
354
+ },
355
+ {
356
+ "type": "page_number",
357
+ "text": "3",
358
+ "bbox": [
359
+ 493,
360
+ 935,
361
+ 503,
362
+ 946
363
+ ],
364
+ "page_idx": 2
365
+ },
366
+ {
367
+ "type": "text",
368
+ "text": "Autotuned robot modeling. Manufacturer-supplied robot models offer a baseline, but often require significant tuning [9, 49] to be ready for sim-to-real. This tuning is a laborious process as there is no \"ground truth\" pairing between the real world and the simulated world. We propose an autotune module for fast, automated calibration of simulation parameters to match real robot behavior. As shown in Figure 2A and Algorithm 1, our method jointly optimizes simulator physics (e.g. friction, damping) and URDF constants (e.g. link inertia values, joint limits) using only a single set of calibration trajectories on real hardware. It samples parameter sets, runs joint-targeted motions in parallel simulations, and selects the set minimizing tracking error against the real robot - automatically searching the parameter space to identify optimal values for both simulator physics and robot model constants in under four minutes (or 2000 simulated steps in $10\\mathrm{Hz}$ ). This removes the need for iterative manual tuning and generalizes to any simulator-exposed parameter affecting kinematic behaviors.",
369
+ "bbox": [
370
+ 169,
371
+ 90,
372
+ 486,
373
+ 454
374
+ ],
375
+ "page_idx": 3
376
+ },
377
+ {
378
+ "type": "code",
379
+ "sub_type": "algorithm",
380
+ "code_caption": [
381
+ "Algorithm 1 Real-to-Sim Autotune Module"
382
+ ],
383
+ "code_body": "Require: \n1: $E$ : Set of environment parameters to tune \n2: $N$ : Number of calibration action sequences \n3: $R$ : Real robot hardware environment \n4: $M$ : Initial robot model file \n5: procedure AUTOTUNE $(E,N,R,M)$ \n6: $P\\gets$ InitializeParameterSpace $(E,M)$ \n7: $S\\gets \\{\\}$ $\\triangleright$ Set of simulated environments \n8: for $i\\gets 1$ to $K$ do $\\triangleright K$ is population size \n9: $p_i\\gets$ RandomSample $(P)$ \n10: $S_{i}\\gets$ CreateSimEnvironment $(p_i)$ \n11: $S\\gets S\\cup \\{S_i\\}$ \n12: end for \n13: $J\\gets$ GenerateJointTargets $(N)$ \n14: $R_{track}\\gets$ GetTrackingErrors $(R,J)$ $\\triangleright$ Real tracking \n15: best.params $\\leftarrow$ null \n16: min_error $\\leftarrow \\infty$ \n17: for $S_{i}\\in S$ do \n18: $S_{track}\\gets$ GetTrackingErrors $(S_i,J)$ \n19: error $\\leftarrow$ ComputeMSE $(S_{track},R_{track})$ \n20: if error $<$ min_error then \n21: min_error $\\leftarrow$ error \n22: best.params $\\leftarrow$ GetParameters $(S_{i})$ \n23: end if \n24: end for \nreturn best.params \n25: end procedure",
384
+ "bbox": [
385
+ 496,
386
+ 116,
387
+ 826,
388
+ 455
389
+ ],
390
+ "page_idx": 3
391
+ },
392
+ {
393
+ "type": "text",
394
+ "text": "Approximate object modeling. Following prior work [14, 50], we model objects using simple geometric primitives (e.g. cylinders) with randomized physical parameters. Despite their simplicity, these approximations are sufficient to learn dexterous manipulation policies that transfer reliably to the real world. Our recipe adopts this strategy and finds it both effective and generalizable.",
395
+ "bbox": [
396
+ 169,
397
+ 462,
398
+ 823,
399
+ 523
400
+ ],
401
+ "page_idx": 3
402
+ },
403
+ {
404
+ "type": "text",
405
+ "text": "3.2 Generalizable Reward Design",
406
+ "text_level": 1,
407
+ "bbox": [
408
+ 171,
409
+ 541,
410
+ 419,
411
+ 556
412
+ ],
413
+ "page_idx": 3
414
+ },
415
+ {
416
+ "type": "text",
417
+ "text": "In standard RL [51], the reward function plays a central role in shaping agent behavior. However, much of RL research has treated rewards as fixed, focusing instead on algorithmic improvements [52]. In robotics — and especially in dexterous manipulation — designing effective, generalizable rewards becomes a key challenge due to complex contact dynamics and object variability [53].",
418
+ "bbox": [
419
+ 169,
420
+ 561,
421
+ 823,
422
+ 625
423
+ ],
424
+ "page_idx": 3
425
+ },
426
+ {
427
+ "type": "text",
428
+ "text": "Manipulation as contact and object goals. We observe that many human manipulation tasks [54] can be decomposed into a sequence of hand-object contact transitions and object state changes. Inspired by this, we propose a structured reward design scheme for long-horizon, contact-rich tasks. For instance, a bimanual handover can be segmented into: (1) one hand contacting the object, (2) lifting the object near the second hand, (3) the second hand contacting the object, and (4) transferring the object to the target location. We therefore define rewards based on two key components: \"contact goals\" encourages the fingertips to reach task-relevant contact points on object, and \"object goals\" penalizes current object state deviation from the target object state (e.g. xyz position). To facilitate contact goal specification, we introduce a keypoint-based technique: simulated objects are augmented with \"contact stickers\" — surface markers representing desirable contact locations. The contact goal, in terms of reward, can then be specified as $r_{\\mathrm{contact}} = \\sum_{i}\\left[\\frac{1}{1 + \\alpha d(\\mathbf{X}^{L},\\mathbf{F}_{i}^{L})} +\\frac{1}{1 + \\beta d(\\mathbf{X}^{R},\\mathbf{F}_{i}^{R})}\\right]$ , where $\\mathbf{X}^L\\in \\mathbb{R}^{n\\times 3}$ and $\\mathbf{X}^R\\in \\mathbb{R}^{m\\times 3}$ are the positions of contact markers specified for left and right hands, $\\mathbf{F}^L\\in \\mathbb{R}^{4\\times 3}$ and $\\mathbf{F}^R\\in \\mathbb{R}^{4\\times 3}$ are the position of left and right fingertips, $\\alpha$ and $\\beta$ are scaling hyperparameters, and $d$ is a distance function defined as $d(\\mathbf{A},\\mathbf{x}) = \\min_i\\| \\mathbf{A}_i - \\mathbf{x}\\| _2$ . These contact markers can be arbitrarily specified – for example, procedurally generated based on object geometry – offering a flexible way to incorporate contact preferences or human priors. A visualization of contact markers is shown in Figure 2B, and their empirical effectiveness is analyzed in Section 4.",
429
+ "bbox": [
430
+ 169,
431
+ 631,
432
+ 826,
433
+ 912
434
+ ],
435
+ "page_idx": 3
436
+ },
437
+ {
438
+ "type": "page_number",
439
+ "text": "4",
440
+ "bbox": [
441
+ 493,
442
+ 935,
443
+ 504,
444
+ 946
445
+ ],
446
+ "page_idx": 3
447
+ },
448
+ {
449
+ "type": "text",
450
+ "text": "3.3 Sample Efficient Policy Learning",
451
+ "text_level": 1,
452
+ "bbox": [
453
+ 171,
454
+ 90,
455
+ 442,
456
+ 104
457
+ ],
458
+ "page_idx": 4
459
+ },
460
+ {
461
+ "type": "text",
462
+ "text": "Even with a well-shaped reward, learning dexterous policies on high-dimensional bimanual multifingered systems remains sample-inefficient due to sparse rewards and exploration complexity. We introduce two techniques to improve sample efficiency: (1) task-aware initialization using human-guided hand poses, and (2) a divide-and-conquer strategy with policy distillation.",
463
+ "bbox": [
464
+ 169,
465
+ 106,
466
+ 823,
467
+ 165
468
+ ],
469
+ "page_idx": 4
470
+ },
471
+ {
472
+ "type": "text",
473
+ "text": "Task-aware hand poses for initialization. We collect task-relevant hand-object configurations from human teleoperation in simulation. This can be done using any compatible system for bimanual multi-fingered hands. The recorded states, including object poses and robot joint positions, are then randomly sampled as initial conditions for each training episode. Unlike prior work that relies on full demonstration trajectories [55], our approach only requires humans to casually \"play around\" with the task goal in mind. This lightweight data collection takes less than 30 seconds per task since no expert demonstration is needed, yet proves highly effective in improving early-stage exploration.",
474
+ "bbox": [
475
+ 169,
476
+ 175,
477
+ 823,
478
+ 282
479
+ ],
480
+ "page_idx": 4
481
+ },
482
+ {
483
+ "type": "text",
484
+ "text": "Divide-and-conquer distillation. Standard RL exploration techniques [25, 26, 28, 56] aim to visit the state space more efficiently but do not fundamentally alter the difficulty of sparse-reward problems: the probability of receiving learning signals from visiting the \"right\" states remains the same. We instead overcome the exploration problem by breaking down the explorable state space itself, e.g. decomposing a multi-object manipulation task into multiple single-object manipulation tasks. Once specialized policies are trained for each sub-task, high-quality rollouts can be filtered and distilled into a generalist policy using shared observation and action spaces. This effectively brings pure RL closer to learning from demonstrations, where the sub-task policies act as \"teleoperators\" in the simulation environment, and the centralized generalist policy learns from curated data.",
485
+ "bbox": [
486
+ 169,
487
+ 290,
488
+ 823,
489
+ 426
490
+ ],
491
+ "page_idx": 4
492
+ },
493
+ {
494
+ "type": "text",
495
+ "text": "3.4 Vision-Based Sim-to-Real Transfer",
496
+ "text_level": 1,
497
+ "bbox": [
498
+ 171,
499
+ 443,
500
+ 455,
501
+ 455
502
+ ],
503
+ "page_idx": 4
504
+ },
505
+ {
506
+ "type": "text",
507
+ "text": "Vision-based sim-to-real transfer is particularly challenging due to domain gaps in both dynamics and perception. We employ two strategies to address these challenges: hybrid object representations and extensive domain randomization.",
508
+ "bbox": [
509
+ 169,
510
+ 458,
511
+ 823,
512
+ 502
513
+ ],
514
+ "page_idx": 4
515
+ },
516
+ {
517
+ "type": "text",
518
+ "text": "Hybrid object representations. Dexterous manipulation often requires precise perception of object pose and geometry. Prior work spans a spectrum of object representations, from 3D position [14] and 6D pose [9], to depth [17, 12], point cloud [57], and RGB images [10]. Higher-dimensional representations encode richer information about the object, improving task performance but also widening the sim-to-real gap; and vice versa. To balance the trade-offs, we propose to use a mix of low- and high-dimensional signals: 3D object position (from a fixed third-person view) and depth image (from an egocentric view). We obtain the 3D object position from a reliable object tracking module with relatively controllable noise, and use segment out the object depth to reduce the visual sim-to-real gap. We validate this design in Section 4.",
519
+ "bbox": [
520
+ 169,
521
+ 512,
522
+ 823,
523
+ 648
524
+ ],
525
+ "page_idx": 4
526
+ },
527
+ {
528
+ "type": "text",
529
+ "text": "Domain randomization for perception and dynamics. To improve robustness, we apply extensive domain randomization during training. This includes variation in object parameters, camera parameters, robot physical properties, and observation noises. Full details are provided in Appendix 6.3.",
530
+ "bbox": [
531
+ 169,
532
+ 657,
533
+ 823,
534
+ 704
535
+ ],
536
+ "page_idx": 4
537
+ },
538
+ {
539
+ "type": "text",
540
+ "text": "4 Experiments",
541
+ "text_level": 1,
542
+ "bbox": [
543
+ 171,
544
+ 723,
545
+ 313,
546
+ 739
547
+ ],
548
+ "page_idx": 4
549
+ },
550
+ {
551
+ "type": "text",
552
+ "text": "Our proposed approaches form a general recipe that allows for the practical application of RL to solve dexterous manipulation with humanoids. In this section, we show experimental results of task capabilities and ablation studies of each proposed technique. Videos can be found on our website.",
553
+ "bbox": [
554
+ 169,
555
+ 744,
556
+ 823,
557
+ 789
558
+ ],
559
+ "page_idx": 4
560
+ },
561
+ {
562
+ "type": "text",
563
+ "text": "4.1 Real-World and Simulator Setup",
564
+ "text_level": 1,
565
+ "bbox": [
566
+ 171,
567
+ 806,
568
+ 442,
569
+ 821
570
+ ],
571
+ "page_idx": 4
572
+ },
573
+ {
574
+ "type": "text",
575
+ "text": "We use a Fourier GR1 humanoid robot with two arms and two multi-fingered hands. Each arm has 7 degrees of freedom (DoF). For most experiments, we use the Fourier hands, each of which has 6 actuated DoFs and 5 underactuated DoFs. To show cross-embodiment generalization, we include results on the Inspire hands, each with 6 actuated DoFs and 6 underactuated DoFs. The hardware has substantially different masses, surface frictions, finger and palm morphologies, and thumb actuations. Figure 1 visualizes both hands. We use the NVIDIA Isaac Gym simulator [58].",
576
+ "bbox": [
577
+ 169,
578
+ 821,
579
+ 823,
580
+ 911
581
+ ],
582
+ "page_idx": 4
583
+ },
584
+ {
585
+ "type": "page_number",
586
+ "text": "5",
587
+ "bbox": [
588
+ 493,
589
+ 935,
590
+ 504,
591
+ 946
592
+ ],
593
+ "page_idx": 4
594
+ },
595
+ {
596
+ "type": "image",
597
+ "img_path": "images/7386af12e088eed7f0ca72c7e1e554ccee010a438089d2be0641886d309256c3.jpg",
598
+ "image_caption": [
599
+ "Figure 3: Policies learned in simulation. Left: grasp-and-reach; middle: box lift; right: bimanual handover (right-to-left, left-to-right)."
600
+ ],
601
+ "image_footnote": [],
602
+ "bbox": [
603
+ 174,
604
+ 97,
605
+ 408,
606
+ 220
607
+ ],
608
+ "page_idx": 5
609
+ },
610
+ {
611
+ "type": "image",
612
+ "img_path": "images/07a5e67eb503b4d97efc95117d1c48472e5e85bc9b0b8539490657f0a14e29f1.jpg",
613
+ "image_caption": [
614
+ "Figure 4: Training grasp-and-reach policy with different object sets. Each curve is from 10 runs with different random seeds. Left: training with complex objects v.s. simple geometric primitive objects. Right: training with differently grouped geometric objects."
615
+ ],
616
+ "image_footnote": [],
617
+ "bbox": [
618
+ 416,
619
+ 89,
620
+ 820,
621
+ 219
622
+ ],
623
+ "page_idx": 5
624
+ },
625
+ {
626
+ "type": "text",
627
+ "text": "Perception. As outlined in Section 3.4, we use a combination of dense and sparse object features for policy learning in both simulation and real-world transfer. In the real world, we set up an egocentric-view RealSense D435 depth camera on the head of the humanoid robot and a third-view RealSense D435 depth camera on a tripod in front of the robot (illustrated in Figure 1). In simulation, we similarly set up the two cameras by calibrating their poses against the real camera poses. The dense object feature is obtained by directly reading depth observations from the egocentric-view camera. The sparse feature is obtained by approximating the object's center-of-mass from the third-view camera, using a similar technique as in Lin et al. [14]. As illustrated in Figure 2, we use the Segment Anything Model 2 (SAM2) [59] to generate a segmentation mask for the object at each trajectory sequence's initial frame, and leverage the tracking capabilities of SAM2 to track the mask throughout all remaining frames. To approximate object's 3D center-of-mass coordinates, we calculate the center position of object mask in the image plane, then obtain noisy depth readings from a depth camera to recover a corresponding 3D position. The perception pipeline runs at $5\\mathrm{Hz}$ to match the neural network policy's control frequency.",
628
+ "bbox": [
629
+ 169,
630
+ 297,
631
+ 826,
632
+ 508
633
+ ],
634
+ "page_idx": 5
635
+ },
636
+ {
637
+ "type": "text",
638
+ "text": "4.2 Task Definition",
639
+ "text_level": 1,
640
+ "bbox": [
641
+ 169,
642
+ 530,
643
+ 318,
644
+ 542
645
+ ],
646
+ "page_idx": 5
647
+ },
648
+ {
649
+ "type": "text",
650
+ "text": "(A) Grasp-and-reach. The robot must use one hand to grasp a tabletop object, lift it, and place it at a goal location. At initialization, a scripted vision module selects the closer hand to object. Test objects vary in shape, mass, volume, friction, color, and texture (see Figure 1). Each trial randomizes object pose and goal location. (B) Box lift. The robot lifts a box too large for single-handed grasping. Box size, color, mass, and initial pose (with randomized position and yaw) are varied across trials. (C) Bimanual handover. The robot grasps an object from one side of the table with one hand and hands it over to the other hand, which cannot reach the object directly. Objects vary in color, size, mass, and pose. We vary the initial pose of blocks in each trial.",
651
+ "bbox": [
652
+ 169,
653
+ 546,
654
+ 823,
655
+ 667
656
+ ],
657
+ "page_idx": 5
658
+ },
659
+ {
660
+ "type": "text",
661
+ "text": "4.3 Evaluation of Real-to-Sim Modeling",
662
+ "text_level": 1,
663
+ "bbox": [
664
+ 169,
665
+ 686,
666
+ 465,
667
+ 703
668
+ ],
669
+ "page_idx": 5
670
+ },
671
+ {
672
+ "type": "text",
673
+ "text": "Effectiveness of autotuned robot modeling. We apply the autotune module described in Section 3.1 to optimize the robot modeling parameters. To assess its effectiveness, we compare the sim-to-real transfer success rates of three sets of policy checkpoints, each trained with identical settings except for the robot modeling parameters. These parameter sets correspond to varying levels of modeling accuracy, as measured by the mean squared error (MSE) from autotune – ranging from the lowest (i.e., smallest real-to-sim gap) to the highest (i.e., largest real-to-sim gap). As shown in Table 1, policies trained with autotuned models exhibit significantly better sim-to-real performance. Qualitative examples in our video further demonstrate successful transfer of grasp-and-reach policies to the Inspire hands, highlighting the generalizability of our autotune module.",
674
+ "bbox": [
675
+ 169,
676
+ 705,
677
+ 823,
678
+ 843
679
+ ],
680
+ "page_idx": 5
681
+ },
682
+ {
683
+ "type": "text",
684
+ "text": "Effectiveness of approximate object modeling. Empirically, we find that modeling objects as primitive geometric shapes (cylinders, cubes, and spheres) strikes a good balance between training efficiency and sim-to-real transferability. As shown in Figure 4 (left), training grasp-and-reach policies with primitive shapes leads to faster convergence compared to using complex object ge",
685
+ "bbox": [
686
+ 169,
687
+ 851,
688
+ 823,
689
+ 912
690
+ ],
691
+ "page_idx": 5
692
+ },
693
+ {
694
+ "type": "page_number",
695
+ "text": "6",
696
+ "bbox": [
697
+ 493,
698
+ 936,
699
+ 504,
700
+ 946
701
+ ],
702
+ "page_idx": 5
703
+ },
704
+ {
705
+ "type": "image",
706
+ "img_path": "images/ff9c4a1686d839bb2ac8c8dea457c595f9959c42f2369933eb977ca7973007ca.jpg",
707
+ "image_caption": [],
708
+ "image_footnote": [],
709
+ "bbox": [
710
+ 176,
711
+ 90,
712
+ 485,
713
+ 191
714
+ ],
715
+ "page_idx": 6
716
+ },
717
+ {
718
+ "type": "image",
719
+ "img_path": "images/3294b067eb7c89ced3da4332a0aaaf02c1ac3d88ec50477d1adce6c754d90c18.jpg",
720
+ "image_caption": [
721
+ "Figure 6: Policy robustness. Our learned policies remain robust under different force perturbations, including knock (top left), pull (top right), push (bottom left), and drag (bottom right)."
722
+ ],
723
+ "image_footnote": [],
724
+ "bbox": [
725
+ 176,
726
+ 193,
727
+ 485,
728
+ 296
729
+ ],
730
+ "page_idx": 6
731
+ },
732
+ {
733
+ "type": "image",
734
+ "img_path": "images/e7f5d3c852c1930f3c4eb4f4f91221ce95838200397d789aab187a1c81cf83d1.jpg",
735
+ "image_caption": [
736
+ "Figure 5: Different contact patterns emerge from different placements of contact markers. Top: contact markers on the left and right side centers; middle: markers on the top and bottom side centers; bottom: markers on the bottom side edges."
737
+ ],
738
+ "image_footnote": [],
739
+ "bbox": [
740
+ 176,
741
+ 296,
742
+ 485,
743
+ 400
744
+ ],
745
+ "page_idx": 6
746
+ },
747
+ {
748
+ "type": "image",
749
+ "img_path": "images/593f76e314d0a0bea666ea387fdc4309440913a474b09b9c2ba43a34c7ceffbe.jpg",
750
+ "image_caption": [],
751
+ "image_footnote": [],
752
+ "bbox": [
753
+ 501,
754
+ 90,
755
+ 812,
756
+ 170
757
+ ],
758
+ "page_idx": 6
759
+ },
760
+ {
761
+ "type": "image",
762
+ "img_path": "images/7a5193e4ebfaa3ffd564988f5a6a6f5c4a681f48a75f822544c1971ef547ec85.jpg",
763
+ "image_caption": [],
764
+ "image_footnote": [],
765
+ "bbox": [
766
+ 501,
767
+ 170,
768
+ 812,
769
+ 252
770
+ ],
771
+ "page_idx": 6
772
+ },
773
+ {
774
+ "type": "table",
775
+ "img_path": "images/0172bb6c21e41e4fbafaa530bcb4055a470897fe7c95001e965ffc01b11de54e.jpg",
776
+ "table_caption": [],
777
+ "table_footnote": [],
778
+ "table_body": "<table><tr><td>Autotune MSE</td><td>Lowest</td><td>Median</td><td>Highest</td></tr><tr><td>Grasp Success</td><td>8 / 10</td><td>3 / 10</td><td>0 / 10</td></tr><tr><td>Reach Success</td><td>7 / 10</td><td>3 / 10</td><td>0 / 10</td></tr></table>",
779
+ "bbox": [
780
+ 526,
781
+ 316,
782
+ 779,
783
+ 367
784
+ ],
785
+ "page_idx": 6
786
+ },
787
+ {
788
+ "type": "text",
789
+ "text": "Table 1: Lower MSE from autotune correlates with higher sim-to-real success rate. For each set of modeling parameters, we test the sim-to-real transfer performance of 10 policy checkpoints (trained identically except for random seed). We evaluate success rate by stages on the grasp-and-reach task, and observe a correlation between lower MSE measured by autotune module and higher sim-to-real transfer success rate.",
790
+ "bbox": [
791
+ 490,
792
+ 369,
793
+ 820,
794
+ 470
795
+ ],
796
+ "page_idx": 6
797
+ },
798
+ {
799
+ "type": "text",
800
+ "text": "omtries. More importantly, policies trained with randomized primitive shapes demonstrate strong generalization to a diverse set of unseen objects, as illustrated in our video.",
801
+ "bbox": [
802
+ 169,
803
+ 489,
804
+ 823,
805
+ 518
806
+ ],
807
+ "page_idx": 6
808
+ },
809
+ {
810
+ "type": "text",
811
+ "text": "4.4 Evaluation of Reward Design",
812
+ "text_level": 1,
813
+ "bbox": [
814
+ 171,
815
+ 537,
816
+ 418,
817
+ 553
818
+ ],
819
+ "page_idx": 6
820
+ },
821
+ {
822
+ "type": "text",
823
+ "text": "Task capabilities. Enabled by our proposed reward design principle, a broad range of long-horizon, contact-rich tasks can be successfully solved using pure RL, as shown in Figure 3 and video. The resulting policies exhibit notable dexterity and robustness under various random force disturbances.",
824
+ "bbox": [
825
+ 169,
826
+ 555,
827
+ 823,
828
+ 599
829
+ ],
830
+ "page_idx": 6
831
+ },
832
+ {
833
+ "type": "text",
834
+ "text": "Effectiveness of contact-based rewards. In Figure 5, we visualize how different contact behaviors emerge from varying the placement of contact markers, using the box lift task as an example. The contact stickers are procedurally generated along box sides or edges based on the box dimensions. The resulting behaviors closely reflect the specified contact positions, demonstrating the effectiveness of using contact markers to define contact goals.",
835
+ "bbox": [
836
+ 169,
837
+ 609,
838
+ 823,
839
+ 685
840
+ ],
841
+ "page_idx": 6
842
+ },
843
+ {
844
+ "type": "text",
845
+ "text": "4.5 Evaluation of Policy Learning",
846
+ "text_level": 1,
847
+ "bbox": [
848
+ 171,
849
+ 703,
850
+ 421,
851
+ 718
852
+ ],
853
+ "page_idx": 6
854
+ },
855
+ {
856
+ "type": "text",
857
+ "text": "Effectiveness of task-aware hand pose initialization. In Table 2, we compare the percentage of successfully trained policies for each task with and without task-aware hand pose initialization. The results indicate that incorporating human priors at initialization significantly enhances exploration efficiency in challenging RL tasks.",
858
+ "bbox": [
859
+ 169,
860
+ 720,
861
+ 826,
862
+ 781
863
+ ],
864
+ "page_idx": 6
865
+ },
866
+ {
867
+ "type": "text",
868
+ "text": "Divide-and-conquer distillation. We evaluate our divide-and-conquer distillation strategy through two ablation studies. First, we examine how the granularity of task decomposition affects training efficiency in a multi-object grasp-and-reach task involving 10 objects. We compare four designs: (1) training a single policy on all objects (all); (2) training three policies on shape-similar object groups (shape); (3) training three policies on shape-diverse object groups (mix); and (4) training ten separate single-object policies (single). As shown in Figure 4, single achieves the highest sample efficiency, followed by shape, all, and mix. The average success rates also vary across designs, reflecting differences in task difficulty. Notably, policies trained on reduced object sets",
869
+ "bbox": [
870
+ 169,
871
+ 791,
872
+ 826,
873
+ 912
874
+ ],
875
+ "page_idx": 6
876
+ },
877
+ {
878
+ "type": "page_number",
879
+ "text": "7",
880
+ "bbox": [
881
+ 493,
882
+ 935,
883
+ 504,
884
+ 946
885
+ ],
886
+ "page_idx": 6
887
+ },
888
+ {
889
+ "type": "table",
890
+ "img_path": "images/895f069ec75f162d0dda001d120edc090e8855caee87707174712b084cfc6d10.jpg",
891
+ "table_caption": [],
892
+ "table_footnote": [],
893
+ "table_body": "<table><tr><td>% Success</td><td>Grasping</td><td>Lifting</td><td>Handover</td></tr><tr><td>with Human Init</td><td>80%</td><td>90%</td><td>30%</td></tr><tr><td>w/o Human Init</td><td>60%</td><td>90%</td><td>0%</td></tr></table>",
894
+ "bbox": [
895
+ 173,
896
+ 114,
897
+ 436,
898
+ 165
899
+ ],
900
+ "page_idx": 7
901
+ },
902
+ {
903
+ "type": "table",
904
+ "img_path": "images/d8df38abaf382da9849cdf16e3b1475a908a25ca2b78167ca8046d574be31e63.jpg",
905
+ "table_caption": [
906
+ "Table 2: Initializing with human data. Correlation between the percentage of successful task policies and whether human play data is used for initialization. We define successful policies as those that achieve over $60\\%$ episodic success during evaluation. For each task and each initialization setting, we test with 10 random seeds."
907
+ ],
908
+ "table_footnote": [],
909
+ "table_body": "<table><tr><td>Task</td><td>Grasping</td><td>Lifting</td><td>HandoverA</td><td>HandoverB</td></tr><tr><td colspan=\"5\">Depth + Pos</td></tr><tr><td>Pickup</td><td>10 / 10</td><td>10 / 10</td><td>10 / 10</td><td>10 / 10</td></tr><tr><td>Task Success</td><td>10 / 10</td><td>10 / 10</td><td>9 / 10</td><td>5 / 10</td></tr><tr><td colspan=\"5\">Depth Only</td></tr><tr><td>Pickup</td><td>2 / 10</td><td>0 / 10</td><td>0 / 10</td><td>0 / 10</td></tr><tr><td>Task Success</td><td>2 / 10</td><td>0 / 10</td><td>0 / 10</td><td>0 / 10</td></tr></table>",
910
+ "bbox": [
911
+ 467,
912
+ 88,
913
+ 802,
914
+ 188
915
+ ],
916
+ "page_idx": 7
917
+ },
918
+ {
919
+ "type": "text",
920
+ "text": "Table 3: Comparing sim-to-real transfer performance between depth-and-position policy and depth-only policy. We separate the bimanual handover task into two columns due to its longer horizon. Pickup success measures how often hands pick up the object. Combining 3D position with depth enables easier sim-to-real transfer.",
921
+ "bbox": [
922
+ 442,
923
+ 195,
924
+ 823,
925
+ 272
926
+ ],
927
+ "page_idx": 7
928
+ },
929
+ {
930
+ "type": "text",
931
+ "text": "converge to similar final performance, while the all policy consistently underperforms. Second, we evaluate the sim-to-real transfer success rate of each policy type on an in-distribution object over 30 trials. The mix policy performs best (90.0%), followed by shape (63.3%), single (40.0%), and all (23.3%). We hypothesize that the lower performance of single and mix arises from overfitting to specific geometries, while the poor performance of all is consistent with its weaker RL training outcomes. These results suggest that divide-and-conquer distillation strikes a favorable balance between training efficiency and sim-to-real generalization.",
932
+ "bbox": [
933
+ 169,
934
+ 299,
935
+ 826,
936
+ 405
937
+ ],
938
+ "page_idx": 7
939
+ },
940
+ {
941
+ "type": "text",
942
+ "text": "4.6 Evaluation of Vision-Based Sim-to-Real Transfer",
943
+ "text_level": 1,
944
+ "bbox": [
945
+ 169,
946
+ 421,
947
+ 553,
948
+ 434
949
+ ],
950
+ "page_idx": 7
951
+ },
952
+ {
953
+ "type": "text",
954
+ "text": "Effectiveness of mixing object representations. We study the impact of different object representations on sim-to-real transfer performance, with results summarized in Table 3. Our findings show that combining a dense representation (segmented depth image) with a sparse representation (3D object center-of-mass position) leads to improved transfer success. Notably, the performance gap between the combined depth-and-position policy and the depth-only policy widens for tasks where accurate understanding of full object geometry is more critical to success.",
955
+ "bbox": [
956
+ 169,
957
+ 436,
958
+ 823,
959
+ 527
960
+ ],
961
+ "page_idx": 7
962
+ },
963
+ {
964
+ "type": "text",
965
+ "text": "4.7 System Capabilities",
966
+ "text_level": 1,
967
+ "bbox": [
968
+ 169,
969
+ 542,
970
+ 351,
971
+ 558
972
+ ],
973
+ "page_idx": 7
974
+ },
975
+ {
976
+ "type": "text",
977
+ "text": "Task performance, generalization, robustness. We evaluate the overall effectiveness of our system by reporting task success rates using the best-performing policy for each task. For each task, we perform 10 trials for each test object and compute the average success rate across all objects. We report a $62.3\\%$ success rate for the grasp-and-reach task, $80\\%$ for box lift, and $52.5\\%$ for bimanual handover. To assess generalization, we test the grasp-and-reach policy on out-of-distribution objects and present qualitative evidence of successful zero-shot transfer in our video. Additionally, we evaluate the robustness of our policies under external force perturbations across all tasks, as shown in Figure 6 and our videos. More details on the object set for each task are reported in Figure 1.",
978
+ "bbox": [
979
+ 169,
980
+ 560,
981
+ 826,
982
+ 696
983
+ ],
984
+ "page_idx": 7
985
+ },
986
+ {
987
+ "type": "text",
988
+ "text": "Extension to a more capable system. The learned RL policies can be seamlessly integrated with higher-level control structures such as finite state machines or teleoperation frameworks to enable longer-horizon task execution, while preserving dexterity, robustness, and generalization. As a proof of concept, our video showcases a general pick-and-drop system constructed by scripting sequences around the grasp-and-reach policy.",
989
+ "bbox": [
990
+ 169,
991
+ 705,
992
+ 823,
993
+ 781
994
+ ],
995
+ "page_idx": 7
996
+ },
997
+ {
998
+ "type": "text",
999
+ "text": "5 Conclusion",
1000
+ "text_level": 1,
1001
+ "bbox": [
1002
+ 169,
1003
+ 799,
1004
+ 302,
1005
+ 814
1006
+ ],
1007
+ "page_idx": 7
1008
+ },
1009
+ {
1010
+ "type": "text",
1011
+ "text": "We present a comprehensive recipe for applying sim-to-real RL to vision-based dexterous manipulation on humanoids. By addressing key challenges in environment modeling, reward design, policy learning, and sim-to-real transfer, we show that RL can be a powerful tool for learning highly useful manipulation skills without the need for extensive human demonstrations. Our learned policies exhibit strong generalization to unseen objects, robustness against force disturbances, and the ability to perform long-horizon contact-rich tasks.",
1012
+ "bbox": [
1013
+ 169,
1014
+ 819,
1015
+ 823,
1016
+ 910
1017
+ ],
1018
+ "page_idx": 7
1019
+ },
1020
+ {
1021
+ "type": "page_number",
1022
+ "text": "8",
1023
+ "bbox": [
1024
+ 493,
1025
+ 935,
1026
+ 504,
1027
+ 946
1028
+ ],
1029
+ "page_idx": 7
1030
+ },
1031
+ {
1032
+ "type": "text",
1033
+ "text": "6 Limitations",
1034
+ "text_level": 1,
1035
+ "bbox": [
1036
+ 174,
1037
+ 90,
1038
+ 302,
1039
+ 104
1040
+ ],
1041
+ "page_idx": 8
1042
+ },
1043
+ {
1044
+ "type": "text",
1045
+ "text": "In this work, we investigate the key challenges in applying RL to robot manipulation and introduce practical and principled techniques to overcome the hurdles. Based on the techniques proposed, we build a sim-to-real RL pipeline that demonstrates a feasible path to solve robot manipulation, with evidence on generalizability, robustness, and dexterity.",
1046
+ "bbox": [
1047
+ 174,
1048
+ 122,
1049
+ 823,
1050
+ 180
1051
+ ],
1052
+ "page_idx": 8
1053
+ },
1054
+ {
1055
+ "type": "text",
1056
+ "text": "However, the capabilities achieved in this work are still far from the kind of \"general-purpose\" manipulation that humans are capable of. Much work remains to be done to improve each individual component of this pipeline and unlock the full potential of sim-to-real RL. For example, the reward design could be improved by integrating even stronger human priors, such as task demonstrations collected from teleoperation; alternative controller, such as torque controller, could also be explored.",
1057
+ "bbox": [
1058
+ 174,
1059
+ 188,
1060
+ 823,
1061
+ 263
1062
+ ],
1063
+ "page_idx": 8
1064
+ },
1065
+ {
1066
+ "type": "text",
1067
+ "text": "There are also important open problems that our work does not address. For example, our work uses no novel technique to reduce the sim-to-real gap in dynamics other than applying naive domain randomization. We hypothesize that this could be a reason for the low success rate on bimanual handover task, which is the most dynamic among our collection of tasks.",
1068
+ "bbox": [
1069
+ 174,
1070
+ 271,
1071
+ 823,
1072
+ 330
1073
+ ],
1074
+ "page_idx": 8
1075
+ },
1076
+ {
1077
+ "type": "text",
1078
+ "text": "Lastly, we find ourselves heavily constrained by the lack of reliable hardware for dexterous manipulation. While we use multi-fingered robot hands, the dexterity of these hands is far from that of human hands in terms of the active degrees of freedom. We believe the dexterity of our learned policies is not limited by the approach, and we hope to extend our framework to robot hands with more sophisticated designs in the future.",
1079
+ "bbox": [
1080
+ 174,
1081
+ 338,
1082
+ 823,
1083
+ 412
1084
+ ],
1085
+ "page_idx": 8
1086
+ },
1087
+ {
1088
+ "type": "page_number",
1089
+ "text": "9",
1090
+ "bbox": [
1091
+ 493,
1092
+ 936,
1093
+ 503,
1094
+ 946
1095
+ ],
1096
+ "page_idx": 8
1097
+ },
1098
+ {
1099
+ "type": "text",
1100
+ "text": "Acknowledgments",
1101
+ "text_level": 1,
1102
+ "bbox": [
1103
+ 171,
1104
+ 90,
1105
+ 305,
1106
+ 107
1107
+ ],
1108
+ "page_idx": 9
1109
+ },
1110
+ {
1111
+ "type": "text",
1112
+ "text": "We thank members of NVIDIA GEAR lab for help with hardware infrastructure, in particular Zhenjia Xu, Yizhou Zhao, and Zu Wang. This work was partially conducted during TL's internship at NVIDIA. TL is supported by NVIDIA and the National Science Foundation fellowship.",
1113
+ "bbox": [
1114
+ 169,
1115
+ 114,
1116
+ 826,
1117
+ 161
1118
+ ],
1119
+ "page_idx": 9
1120
+ },
1121
+ {
1122
+ "type": "text",
1123
+ "text": "References",
1124
+ "text_level": 1,
1125
+ "bbox": [
1126
+ 173,
1127
+ 179,
1128
+ 269,
1129
+ 196
1130
+ ],
1131
+ "page_idx": 9
1132
+ },
1133
+ {
1134
+ "type": "list",
1135
+ "sub_type": "ref_text",
1136
+ "list_items": [
1137
+ "[1] J. Bjorck, F. Castaneda, N. Cherniadev, X. Da, R. Ding, L. Fan, Y. Fang, D. Fox, F. Hu, S. Huang, et al. Gr00t n1: An open foundation model for generalist humanoid robots. arXiv preprint arXiv:2503.14734, 2025.",
1138
+ "[2] X. Cheng, J. Li, S. Yang, G. Yang, and X. Wang. Open-television: Teleoperation with immersive active visual feedback. arXiv preprint arXiv:2407.01512, 2024.",
1139
+ "[3] T. Lin, Y. Zhang, Q. Li, H. Qi, B. Yi, S. Levine, and J. Malik. Learning visuotactile skills with two multifingered hands. arXiv:2404.16823, 2024.",
1140
+ "[4] T. Haarnoja, B. Moran, G. Lever, S. H. Huang, D. Tirumala, J. Humplik, M. Wulfmeier, S. Tunyasuvunakool, N. Y. Siegel, R. Hafner, et al. Learning agile soccer skills for a bipedal robot with deep reinforcement learning. Science Robotics, 9(89):eadi8022, 2024.",
1141
+ "[5] N. Rudin, D. Hoeller, M. Bjelonic, and M. Hutter. Advanced skills by learning locomotion and local navigation end-to-end. In 2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 2497-2503. IEEE, 2022.",
1142
+ "[6] J. Hwangbo, J. Lee, A. Dosovitskiy, D. Bellicoso, V. Tsounis, V. Koltun, and M. Hutter. Learning agile and dynamic motor skills for legged robots. Science Robotics, 4(26):eauu5872, 2019.",
1143
+ "[7] J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning quadrupedal locomotion over challenging terrain. Science robotics, 5(47):eabc5986, 2020.",
1144
+ "[8] E. Kaufmann, L. Bauersfeld, A. Loquercio, M. Müller, V. Koltun, and D. Scaramuzza. Champion-level drone racing using deep reinforcement learning. Nature, 620(7976):982-987, 2023.",
1145
+ "[9] I. Akkaya, M. Andrychowicz, M. Chogiej, M. Litwin, B. McGrew, A. Petron, A. Paino, M. Plappert, G. Powell, R. Ribas, et al. Solving rubik's cube with a robot hand. arXiv preprint arXiv:1910.07113, 2019.",
1146
+ "[10] A. Handa, A. Allshire, V. Makoviychuk, A. Petrenko, R. Singh, J. Liu, D. Makoviichuk, K. Van Wyk, A. Zhurkevich, B. Sundaralingam, et al. Dextreme: Transfer of agile in-hand manipulation from simulation to reality. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 5977-5984. IEEE, 2023.",
1147
+ "[11] T. Chen, J. Xu, and P. Agrawal. A system for general in-hand object re-orientation. In Conference on Robot Learning, pages 297–307. PMLR, 2022.",
1148
+ "[12] H. Qi, B. Yi, S. Suresh, M. Lambeta, Y. Ma, R. Calandra, and J. Malik. General in-hand object rotation with vision and touch. In Conference on Robot Learning, pages 2549-2564. PMLR, 2023.",
1149
+ "[13] R. Singh, A. Allshire, A. Handa, N. Ratliff, and K. Van Wyk. Dextrah-rgb: Visuomotor policies to grasp anything with dexterous hands. arXiv preprint arXiv:2412.01791, 2024.",
1150
+ "[14] T. Lin, Z.-H. Yin, H. Qi, P. Abbeel, and J. Malik. Twisting lids off with two hands. arXiv:2403.02338, 2024.",
1151
+ "[15] B. Huang, Y. Chen, T. Wang, Y. Qin, Y. Yang, N. Atanasov, and X. Wang. Dynamic handover: Throw and catch with bimanual hands. arXiv preprint arXiv:2309.05655, 2023."
1152
+ ],
1153
+ "bbox": [
1154
+ 171,
1155
+ 204,
1156
+ 825,
1157
+ 912
1158
+ ],
1159
+ "page_idx": 9
1160
+ },
1161
+ {
1162
+ "type": "page_number",
1163
+ "text": "10",
1164
+ "bbox": [
1165
+ 490,
1166
+ 935,
1167
+ 509,
1168
+ 946
1169
+ ],
1170
+ "page_idx": 9
1171
+ },
1172
+ {
1173
+ "type": "list",
1174
+ "sub_type": "ref_text",
1175
+ "list_items": [
1176
+ "[16] H. Qi, A. Kumar, R. Calandra, Y. Ma, and J. Malik. In-Hand Object Rotation via Rapid Motor Adaptation. In Conference on Robot Learning (CoRL), 2022.",
1177
+ "[17] T. G. W. Lum, M. Matak, V. Makoviychuk, A. Handa, A. Allshire, T. Hermans, N. D. Ratliff, and K. Van Wyk. Dextrah-g: Pixels-to-action dexterous arm-hand grasping with geometric fabrics. arXiv preprint arXiv:2407.02274, 2024.",
1178
+ "[18] O. AI, Sep 2024. URL https://openai.com/index/learning-to-reason-with-llms.",
1179
+ "[19] D.-A. et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948.",
1180
+ "[20] D. Silver, T. Hubert, J. Schrittwieser, I. Antonoglou, M. Lai, A. Guez, M. Lanctot, L. Sifre, D. Kumaran, T. Graepel, et al. Mastering chess and shogi by self-play with a general reinforcement learning algorithm. arXiv preprint arXiv:1712.01815, 2017.",
1181
+ "[21] O. Vinyals, I. Babuschkin, W. M. Czarnecki, M. Mathieu, A. Dudzik, J. Chung, D. H. Choi, R. Powell, T. Ewalds, P. Georgiev, et al. Grandmaster level in starcraft ii using multi-agent reinforcement learning. nature, 575(7782):350-354, 2019.",
1182
+ "[22] P. Henderson, R. Islam, P. Bachman, J. Pineau, D. Precup, and D. Meger. Deep reinforcement learning that matters. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018.",
1183
+ "[23] R. Islam, P. Henderson, M. Gomrokchi, and D. Precup. Reproducibility of benchmarked deep reinforcement learning tasks for continuous control. arXiv preprint arXiv:1708.04133, 2017.",
1184
+ "[24] M. Bellemare, S. Srinivasan, G. Ostrovski, T. Schaul, D. Saxton, and R. Munos. Unifying count-based exploration and intrinsic motivation. Advances in neural information processing systems, 29, 2016.",
1185
+ "[25] Y. Burda, H. Edwards, A. Storkey, and O. Klimov. Exploration by random network distillation. arXiv preprint arXiv:1810.12894, 2018.",
1186
+ "[26] T. Lin and A. Jabri. Mimex: intrinsic rewards from masked input modeling. Advances in Neural Information Processing Systems, 36, 2024.",
1187
+ "[27] G. Ostrovski, M. G. Bellemare, A. Oord, and R. Munos. Count-based exploration with neural density models. In International conference on machine learning, pages 2721-2730. PMLR, 2017.",
1188
+ "[28] D. Pathak, P. Agrawal, A. A. Efros, and T. Darrell. Curiosity-driven exploration by self-supervised prediction. In International conference on machine learning, pages 2778-2787. PMLR, 2017.",
1189
+ "[29] B. C. Stadie, S. Levine, and P. Abbeel. Incentivizing exploration in reinforcement learning with deep predictive models. arXiv preprint arXiv:1507.00814, 2015.",
1190
+ "[30] H. Tang, R. Houthooft, D. Foote, A. Stooke, O. Xi Chen, Y. Duan, J. Schulman, F. DeTurck, and P. Abbeel. # exploration: A study of count-based exploration for deep reinforcement learning. Advances in neural information processing systems, 30, 2017.",
1191
+ "[31] M. G. Bellemare, Y. Naddaf, J. Veness, and M. Bowling. The arcade learning environment: An evaluation platform for general agents. Journal of Artificial Intelligence Research, 47: 253-279, 2013.",
1192
+ "[32] Y. Tassa, Y. Doron, A. Muldal, T. Erez, Y. Li, D. d. L. Casas, D. Budden, A. Abdolmaleki, J. Merel, A. Lefrancq, et al. Deepmind control suite. arXiv preprint arXiv:1801.00690, 2018."
1193
+ ],
1194
+ "bbox": [
1195
+ 171,
1196
+ 90,
1197
+ 825,
1198
+ 912
1199
+ ],
1200
+ "page_idx": 10
1201
+ },
1202
+ {
1203
+ "type": "page_number",
1204
+ "text": "11",
1205
+ "bbox": [
1206
+ 490,
1207
+ 935,
1208
+ 506,
1209
+ 946
1210
+ ],
1211
+ "page_idx": 10
1212
+ },
1213
+ {
1214
+ "type": "list",
1215
+ "sub_type": "ref_text",
1216
+ "list_items": [
1217
+ "[33] Y. Chen, C. Wang, Y. Yang, and C. K. Liu. Object-centric dexterous manipulation from human motion data. arXiv preprint arXiv:2411.04005, 2024.",
1218
+ "[34] A. Rajeswaran, V. Kumar, A. Gupta, G. Vezzani, J. Schulman, E. Todorov, and S. Levine. Learning complex dexterous manipulation with deep reinforcement learning and demonstrations. arXiv preprint arXiv:1709.10087, 2017.",
1219
+ "[35] Z.-H. Yin, C. Wang, L. Pineda, F. Hogan, K. Bodduluri, A. Sharma, P. Lancaster, I. Prasad, M. Kalakrishnan, J. Malik, et al. Dexteritygen: Foundation controller for unprecedented dexterity. arXiv preprint arXiv:2502.04307, 2025.",
1220
+ "[36] Y. Zhu, Z. Wang, J. Merel, A. Rusu, T. Erez, S. Cabi, S. Tunyasuvunakool, J. Kramár, R. Hadsell, N. de Freitas, et al. Reinforcement and imitation learning for diverse visuomotor skills. arXiv preprint arXiv:1802.09564, 2018.",
1221
+ "[37] M. Torne, A. Simeonov, Z. Li, A. Chan, T. Chen, A. Gupta, and P. Agrawal. Reconciling reality through simulation: A real-to-sim-to-real approach for robust manipulation. *Arxiv*, 2024.",
1222
+ "[38] M. Memmel, A. Wagenmaker, C. Zhu, P. Yin, D. Fox, and A. Gupta. Asid: Active exploration for system identification in robotic manipulation. arXiv preprint arXiv:2404.12308, 2024.",
1223
+ "[39] C. Zhang, W. Xiao, T. He, and G. Shi. Wococo: Learning whole-body humanoid control with sequential contacts. arXiv preprint arXiv:2406.06005, 2024.",
1224
+ "[40] P. Wu, Y. Shentu, Z. Yi, X. Lin, and P. Abbeel. Gello: A general, low-cost, and intuitive teleoperation framework for robot manipulators, 2023.",
1225
+ "[41] T. Z. Zhao, V. Kumar, S. Levine, and C. Finn. Learning fine-grained bimanual manipulation with low-cost hardware. arXiv preprint arXiv:2304.13705, 2023.",
1226
+ "[42] C. Chi, S. Feng, Y. Du, Z. Xu, E. Cousineau, B. Burchfiel, and S. Song. Diffusion policy: Visuomotor policy learning via action diffusion. In RSS, 2023.",
1227
+ "[43] X. Li, T. Zhao, X. Zhu, J. Wang, T. Pang, and K. Fang. Planning-guided diffusion policy learning for generalizable contact-rich bimanual manipulation. arXiv preprint arXiv:2412.02676, 2024.",
1228
+ "[44] T. Z. Zhao, J. Tompson, D. Driess, P. Florence, K. Ghasemipour, C. Finn, and A. Wahid. Aloha unleashed: A simple recipe for robot dexterity. arXiv preprint arXiv:2410.13126, 2024.",
1229
+ "[45] S. Levine, P. Pastor, A. Krizhevsky, J. Ibarz, and D. Quillen. Learning hand-eye coordination for robotic grasping with deep learning and large-scale data collection. The International journal of robotics research, 37(4-5):421-436, 2018.",
1230
+ "[46] F. Lin, Y. Hu, P. Sheng, C. Wen, J. You, and Y. Gao. Data scaling laws in imitation learning for robotic manipulation. arXiv preprint arXiv:2410.18647, 2024.",
1231
+ "[47] J. Wang, Y. Yuan, H. Che, H. Qi, Y. Ma, J. Malik, and X. Wang. Lessons from learning to spin\" pens\". arXiv preprint arXiv:2407.18902, 2024.",
1232
+ "[48] Y. Chen, C. Wang, L. Fei-Fei, and C. K. Liu. Sequential dexterity: Chaining dexterous policies for long-horizon manipulation. arXiv preprint arXiv:2309.00987, 2023.",
1233
+ "[49] I. Radosavovic, T. Xiao, B. Zhang, T. Darrell, J. Malik, and K. Sreenath. Real-world humanoid locomotion with reinforcement learning. Science Robotics, 9(89):eadi9579, 2024.",
1234
+ "[50] H. Qi, A. Kumar, R. Calandra, Y. Ma, and J. Malik. In-hand object rotation via rapid motor adaptation. In Conference on Robot Learning, pages 1722-1732. PMLR, 2023.",
1235
+ "[51] R. S. Sutton and A. G. Barto. Introduction to Reinforcement Learning. MIT Press, Cambridge, MA, 1998."
1236
+ ],
1237
+ "bbox": [
1238
+ 171,
1239
+ 90,
1240
+ 825,
1241
+ 912
1242
+ ],
1243
+ "page_idx": 11
1244
+ },
1245
+ {
1246
+ "type": "page_number",
1247
+ "text": "12",
1248
+ "bbox": [
1249
+ 490,
1250
+ 935,
1251
+ 508,
1252
+ 946
1253
+ ],
1254
+ "page_idx": 11
1255
+ },
1256
+ {
1257
+ "type": "list",
1258
+ "sub_type": "ref_text",
1259
+ "list_items": [
1260
+ "[52] J. Eschmann. Reward Function Design in Reinforcement Learning, pages 25-33. Springer International Publishing, Cham, 2021. ISBN 978-3-030-41188-6. doi:10.1007/978-3-030-41188-6_3. URL https://doi.org/10.1007/978-3-030-41188-6_3.",
1261
+ "[53] D. Dewey. Reinforcement learning and the reward engineering principle. In 2014 AAAAI Spring Symposium Series, 2014.",
1262
+ "[54] K. Grauman, A. Westbury, L. Torresani, K. Kitani, J. Malik, T. Afouras, K. Ashutosh, V. Baiyya, S. Bansal, B. Boote, et al. Ego-exo4d: Understanding skilled human activity from first-and third-person perspectives. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19383-19400, 2024.",
1263
+ "[55] M. Bauza, J. E. Chen, V. Dalibard, N. Gileadi, R. Hafner, M. F. Martins, J. Moore, R. Pevc\\v\\viciu cate, A. Laurens, D. Rao, et al. Demostart: Demonstration-led auto-curriculum applied to sim-to-real with multi-fingered robots. arXiv preprint arXiv:2409.06613, 2024.",
1264
+ "[56] A. A. Taïga, W. Fedus, M. C. Machado, A. Courville, and M. G. Bellemare. Benchmarking bonus-based exploration methods on the arcade learning environment. arXiv preprint arXiv:1908.02388, 2019.",
1265
+ "[57] M. Liu, Z. Chen, X. Cheng, Y. Ji, R.-Z. Qiu, R. Yang, and X. Wang. Visual whole-body control for legged loco-manipulation. arXiv preprint arXiv:2403.16967, 2024.",
1266
+ "[58] V. Makoviychuk, L. Wawrzyniak, Y. Guo, M. Lu, K. Storey, M. Macklin, D. Hoeller, N. Rudin, A. Allshire, A. Handa, et al. Isaac gym: High performancegpu-based physics simulation for robot learning. arXiv preprint arXiv:2108.10470, 2021.",
1267
+ "[59] N. Ravi, V. Gabeur, Y.-T. Hu, R. Hu, C. Ryali, T. Ma, H. Khedr, R. Rädle, C. Rolland, L. Gustafson, et al. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024.",
1268
+ "[60] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.",
1269
+ "[61] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In CVPR, 2016.",
1270
+ "[62] S. Ioffe and C. Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In ICML, 2015.",
1271
+ "[63] Y. Wu and K. He. Group normalization. In ECCV, 2018.",
1272
+ "[64] D. P. Kingma and J. Ba. Adam: A method for stochastic optimization. In ICLR, 2015.",
1273
+ "[65] I. Loshchilov and F. Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017."
1274
+ ],
1275
+ "bbox": [
1276
+ 173,
1277
+ 90,
1278
+ 825,
1279
+ 719
1280
+ ],
1281
+ "page_idx": 12
1282
+ },
1283
+ {
1284
+ "type": "page_number",
1285
+ "text": "13",
1286
+ "bbox": [
1287
+ 490,
1288
+ 935,
1289
+ 508,
1290
+ 946
1291
+ ],
1292
+ "page_idx": 12
1293
+ },
1294
+ {
1295
+ "type": "text",
1296
+ "text": "Appendix",
1297
+ "text_level": 1,
1298
+ "bbox": [
1299
+ 171,
1300
+ 90,
1301
+ 259,
1302
+ 107
1303
+ ],
1304
+ "page_idx": 13
1305
+ },
1306
+ {
1307
+ "type": "text",
1308
+ "text": "6.1 Environment Modeling Details",
1309
+ "text_level": 1,
1310
+ "bbox": [
1311
+ 171,
1312
+ 119,
1313
+ 426,
1314
+ 135
1315
+ ],
1316
+ "page_idx": 13
1317
+ },
1318
+ {
1319
+ "type": "text",
1320
+ "text": "Modeling underactuated joints. Since modeling underactuated joint structure is not directly supported, we approximate the relationship between each pair of actuated and underactuated joints by fitting a linear function $q_{u} = k \\cdot q_{a} + b$ , where $q_{u}$ is the underactuated joint angle and $q_{a}$ is the actuated joint angle. Note that parameters $k, b$ are included as tunable parameters to search over using our autotune module detailed in Section 3.1.",
1321
+ "bbox": [
1322
+ 169,
1323
+ 148,
1324
+ 823,
1325
+ 224
1326
+ ],
1327
+ "page_idx": 13
1328
+ },
1329
+ {
1330
+ "type": "text",
1331
+ "text": "6.2 Reward Design Details",
1332
+ "text_level": 1,
1333
+ "bbox": [
1334
+ 171,
1335
+ 239,
1336
+ 372,
1337
+ 255
1338
+ ],
1339
+ "page_idx": 13
1340
+ },
1341
+ {
1342
+ "type": "text",
1343
+ "text": "We design generalizable rewards based on the principle outlined in Section 3.2 and list task reward details below.",
1344
+ "bbox": [
1345
+ 169,
1346
+ 267,
1347
+ 823,
1348
+ 297
1349
+ ],
1350
+ "page_idx": 13
1351
+ },
1352
+ {
1353
+ "type": "text",
1354
+ "text": "Both grasp and lift tasks can be defined with the following goal states: (1) finger contact with the object; (2) the object being lifted up to a goal position. Our reward design can, therefore, follow by combining the contact goal reward and the object goal reward terms:",
1355
+ "bbox": [
1356
+ 169,
1357
+ 304,
1358
+ 823,
1359
+ 349
1360
+ ],
1361
+ "page_idx": 13
1362
+ },
1363
+ {
1364
+ "type": "equation",
1365
+ "text": "\n$$\nr \\left(s _ {h}, s _ {o}\\right) = r _ {\\text {c o n t a c t}} \\left(s _ {h}, s _ {o}\\right) + r _ {\\text {g o a l}} \\left(s _ {o}\\right) \\tag {1}\n$$\n",
1366
+ "text_format": "latex",
1367
+ "bbox": [
1368
+ 362,
1369
+ 357,
1370
+ 823,
1371
+ 376
1372
+ ],
1373
+ "page_idx": 13
1374
+ },
1375
+ {
1376
+ "type": "text",
1377
+ "text": "where $s_h$ includes fingertip positions, $s_o$ includes object center-of-mass position, and all contact marker positions (if any).",
1378
+ "bbox": [
1379
+ 169,
1380
+ 382,
1381
+ 823,
1382
+ 412
1383
+ ],
1384
+ "page_idx": 13
1385
+ },
1386
+ {
1387
+ "type": "text",
1388
+ "text": "Similarly, the handover task can be defined with the following goal states: (1) one hand's finger contact with the object; (2) object being transferred to an intermediate goal position while still in contact with the first hand; (3) the second hand's finger contact with the object; (4) object being transferred to the final goal position. Due to the hand switching, we introduce a stage variable $a \\in \\{0, 1\\}$ and design the reward as follows:",
1389
+ "bbox": [
1390
+ 169,
1391
+ 419,
1392
+ 825,
1393
+ 494
1394
+ ],
1395
+ "page_idx": 13
1396
+ },
1397
+ {
1398
+ "type": "equation",
1399
+ "text": "\n$$\n\\begin{array}{l} r \\left(s _ {h}, s _ {o}\\right) = (1 - a) \\cdot \\left(r _ {\\text {c o n t a c t}} \\left(s _ {h _ {A}}, s _ {o _ {A}}\\right) + r _ {\\text {g o a l}} \\left(s _ {o _ {A}}\\right)\\right) \\\\ + a \\cdot \\left(r _ {\\text {c o n t a c t}} \\left(s _ {h _ {B}}, s _ {o _ {B}}\\right) + r _ {\\text {g o a l}} \\left(s _ {o _ {B}}\\right)\\right) \\tag {2} \\\\ \\end{array}\n$$\n",
1400
+ "text_format": "latex",
1401
+ "bbox": [
1402
+ 316,
1403
+ 502,
1404
+ 823,
1405
+ 537
1406
+ ],
1407
+ "page_idx": 13
1408
+ },
1409
+ {
1410
+ "type": "text",
1411
+ "text": "where $s_{h_A}, s_{h_B}$ denote fingertip positions of the engaged hand at each stage, $s_o$ denote object center-of-mass position and desirable contact marker positions (if any) at each stage. At completion of each stage, we also reward policy with a bonus whose scale increases as stage progresses.",
1412
+ "bbox": [
1413
+ 169,
1414
+ 544,
1415
+ 823,
1416
+ 589
1417
+ ],
1418
+ "page_idx": 13
1419
+ },
1420
+ {
1421
+ "type": "text",
1422
+ "text": "6.3 Policy Training Details",
1423
+ "text_level": 1,
1424
+ "bbox": [
1425
+ 171,
1426
+ 604,
1427
+ 372,
1428
+ 619
1429
+ ],
1430
+ "page_idx": 13
1431
+ },
1432
+ {
1433
+ "type": "text",
1434
+ "text": "RL implementation. To learn the specialist policies, the observation space includes object position and robot joint position at each time step, and the action space is robot joint angles. We use Proximal Policy Optimization [60] with asymmetric actor-critic as the RL algorithm. In addition to the policy inputs, we provide the following privilege state inputs to the asymmetric critic: arm joint velocities, hand joint velocities, all fingertip positions, object orientation, object velocity, object angular velocity, object mass randomization scale, object friction randomization scale, and object shape randomization scale. Both the actor and critic networks are 3-layer MLPs with units (512, 512, 512).",
1435
+ "bbox": [
1436
+ 169,
1437
+ 633,
1438
+ 823,
1439
+ 739
1440
+ ],
1441
+ "page_idx": 13
1442
+ },
1443
+ {
1444
+ "type": "text",
1445
+ "text": "Domain randomization. Physical randomization includes the randomization of object friction, mass, and scale. We also apply random forces to the object to simulate the physical effects that are not implemented by the simulator. Non-physical randomization models the noise in observation (e.g. joint position measurement and detected object positions) and action. A summary of our randomization attributes and parameters is shown in Table 4.",
1446
+ "bbox": [
1447
+ 169,
1448
+ 750,
1449
+ 825,
1450
+ 824
1451
+ ],
1452
+ "page_idx": 13
1453
+ },
1454
+ {
1455
+ "type": "text",
1456
+ "text": "6.4 Distillation Details",
1457
+ "text_level": 1,
1458
+ "bbox": [
1459
+ 171,
1460
+ 840,
1461
+ 344,
1462
+ 854
1463
+ ],
1464
+ "page_idx": 13
1465
+ },
1466
+ {
1467
+ "type": "text",
1468
+ "text": "To learn the generalist policy, we reduce the choices of observation inputs to the robot joint states and selective object states, including 3D object position and egocentric depth view, since privileged information is unavailable for sim-to-real transfer. To more efficiently utilize the trajectory data",
1469
+ "bbox": [
1470
+ 169,
1471
+ 867,
1472
+ 823,
1473
+ 912
1474
+ ],
1475
+ "page_idx": 13
1476
+ },
1477
+ {
1478
+ "type": "page_number",
1479
+ "text": "14",
1480
+ "bbox": [
1481
+ 490,
1482
+ 935,
1483
+ 508,
1484
+ 946
1485
+ ],
1486
+ "page_idx": 13
1487
+ },
1488
+ {
1489
+ "type": "table",
1490
+ "img_path": "images/06dac3754997d9660abd20485ab8d56447bac5b50631f69dc736186cd79b5c1e.jpg",
1491
+ "table_caption": [
1492
+ "Table 4: Domain Randomization Setup."
1493
+ ],
1494
+ "table_footnote": [],
1495
+ "table_body": "<table><tr><td>Object: Mass (kg)</td><td>[0.03, 0.1]</td></tr><tr><td>Object: Friction</td><td>[0.5, 1.5]</td></tr><tr><td>Object: Shape</td><td>×U(0.95, 1.05)</td></tr><tr><td>Object: Initial Position (cm)</td><td>+U(-0.02, 0.02)</td></tr><tr><td>Object: Initial z-orientation</td><td>+U(-0.75, 0.75)</td></tr><tr><td>Hand: Friction</td><td>[0.5, 1.5]</td></tr><tr><td>PD Controller: P Gain</td><td>×U(0.8, 1.1)</td></tr><tr><td>PD Controller: D Gain</td><td>×U(0.7, 1.2)</td></tr><tr><td>Random Force: Scale</td><td>2.0</td></tr><tr><td>Random Force: Probability</td><td>0.2</td></tr><tr><td>Random Force: Decay Coeff. and Interval</td><td>0.99 every 0.1s</td></tr><tr><td>Object Pos Observation: Noise</td><td>0.02</td></tr><tr><td>Joint Observation Noise.</td><td>+N(0, 0.4)</td></tr><tr><td>Action Noise.</td><td>+N(0, 0.1)</td></tr><tr><td>Frame Lag Probability</td><td>0.1</td></tr><tr><td>Action Lag Probability</td><td>0.1</td></tr><tr><td>Depth: Camera Pos Noise (cm)</td><td>0.005</td></tr><tr><td>Depth: Camera Rot Noise (deg)</td><td>5.0</td></tr><tr><td>Depth: Camera Field-of-View (deg)</td><td>5.0</td></tr></table>",
1496
+ "bbox": [
1497
+ 173,
1498
+ 112,
1499
+ 823,
1500
+ 428
1501
+ ],
1502
+ "page_idx": 14
1503
+ },
1504
+ {
1505
+ "type": "text",
1506
+ "text": "and improve training stability, for each sub-task specialist policy, we evaluate for 5000 steps over 100 environments, saving trajectories filtered by success at episode reset on the hard disk. We then treat the saved data as \"demonstrations\" and learn a generalist policy for each task with Diffusion Policies [42].",
1507
+ "bbox": [
1508
+ 169,
1509
+ 454,
1510
+ 823,
1511
+ 515
1512
+ ],
1513
+ "page_idx": 14
1514
+ },
1515
+ {
1516
+ "type": "text",
1517
+ "text": "The proprioception and object position states are concatenated and passed through a three-layer network with ELU activation, hidden sizes of (512, 512, 512), and an output feature size of 64. For depth observations, we use the ResNet-18 architecture [61] and replace all the BatchNorm [62] in the network with GroupNorm [63], following [42]. All the encoded features are then concatenated as the input to a diffusion model. We use the same noise schedule (square cosine schedule) and the same number of diffusion steps (100) for training as in [42]. The diffusion output from the model is the normalized 7 DoF absolute desired joint positions of each humanoid arm and the 6 DoF normalized (0 to 1) desired joint positions of each humanoid hand. We use the AdamW optimizer [64, 65] with a learning rate of 0.0001, weight decay of 0.00001, and a batch size of 128. Following [42], we maintain an exponential weighted average of the model weights and use it during evaluation/deployment.",
1518
+ "bbox": [
1519
+ 169,
1520
+ 521,
1521
+ 826,
1522
+ 688
1523
+ ],
1524
+ "page_idx": 14
1525
+ },
1526
+ {
1527
+ "type": "page_number",
1528
+ "text": "15",
1529
+ "bbox": [
1530
+ 490,
1531
+ 935,
1532
+ 508,
1533
+ 946
1534
+ ],
1535
+ "page_idx": 14
1536
+ }
1537
+ ]
data/2025/2502_20xxx/2502.20396/847fca61-3c3a-4285-8573-13bf56b2db4b_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20396/847fca61-3c3a-4285-8573-13bf56b2db4b_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a678705e72259debacdc10f6149bee22138b84a217181088c1f027d5da1c07c4
3
+ size 13949937
data/2025/2502_20xxx/2502.20396/full.md ADDED
@@ -0,0 +1,322 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Sim-to-Real Reinforcement Learning for Vision-Based Dexterous Manipulation on Humanoids
2
+
3
+ Toru Lin $^{1,2}$ Kartik Sachdev $^{2}$ Linxi “Jim” Fan $^{2}$ Jitendra Malik $^{1}$ Yuke Zhu $^{2,3}$
4
+
5
+ UC Berkeley<sup>1</sup> NVIDIA<sup>2</sup> UT Austin<sup>3</sup>
6
+
7
+ https://toruowo.github.io/recipe
8
+
9
+ ![](images/e5267a69cc356c7f28a67ff9bdcc27ca603308b4d364d7f5e9c24f138d05c240.jpg)
10
+ Figure 1: Overview. We train a humanoid robot with two multi-fingered hands to perform a range of contact-rich dexterous manipulation tasks on diverse objects. Observations are obtained from a third-view camera, an egocentric camera, and robot proprioception. Our reinforcement learning policies generalize zero-shot to unseen real-world objects with varying physical properties (e.g. shape, size, color, material, mass) and remain robust against force disturbances. We also validate the adaptability of our approach on two hardware variations.
11
+
12
+ ![](images/2ffbe5634a06768a3022f11d310ae3514ee36b5419d5b2c9723440580680839b.jpg)
13
+
14
+ Abstract: Learning generalizable robot manipulation policies, especially for complex multi-fingered humanoids, remains a significant challenge. Existing approaches primarily rely on extensive data collection and imitation learning, which are expensive, labor-intensive, and difficult to scale. Sim-to-real reinforcement learning (RL) offers a promising alternative, but has mostly succeeded in simpler state-based or single-hand setups. How to effectively extend this to vision-based, contact-rich bimanual manipulation tasks remains an open question. In this paper, we introduce a practical sim-to-real RL recipe that trains a humanoid robot to perform three challenging dexterous manipulation tasks: grasp-and-reach, box lift and bimanual handover. Our method features an automated real-to-sim tuning module, a generalized reward formulation based on contact and object goals, a divide-and-conquer policy distillation framework, and a hybrid object representation strategy with modality-specific augmentation. We demonstrate high success rates on unseen objects and robust, adaptive policy behaviors – highlighting that vision-based dexterous manipulation via sim-to-real RL is not only viable, but also scalable and broadly applicable to real-world humanoid manipulation tasks.
15
+
16
+ Keywords: Humanoids, Vision-Based Dexterous Manipulation, Reinforcement Learning, Sim-to-Real
17
+
18
+ # 1 Introduction
19
+
20
+ Learning generalizable manipulation policies – especially for complex humanoid robots equipped with multi-fingered hands – remains a formidable challenge in robotics. Existing approaches often rely on extensive real-world data collection and imitation learning [1, 2, 3], which are costly, labor-intensive, and difficult to scale. Sim-to-real reinforcement learning (RL) offers a promising alternative and has achieved impressive results in navigation [4, 5], locomotion [6, 7], and autonomous drone racing [8]. However, its application to dexterous manipulation remains largely limited to single-hand [9, 10, 11, 12, 13] or state-based setups [14, 15, 16, 17], leaving open the question of how to scale RL to vision-based, contact-rich bimanual tasks on humanoid embodiments.
21
+
22
+ In this work, we present a practical vision-based sim-to-real RL recipe that enables a multi-fingered humanoid robot to learn highly generalizable, robust, and dexterous manipulation skills. We identify and address several key challenges that have not been thoroughly explored in prior works:
23
+
24
+ (A) Sim-to-real for low-cost manipulation systems. Existing approaches rely on industry-grade robotic arms with high-precision motors. However, many humanoid platforms employ much more lightweight, noisier motors. This makes contact-rich dexterous grasping and bimanual coordination significantly harder, especially with sim-to-real method. We introduce a simple, automated real-to-sim system identification method to overcome this with less than four minutes of real-world data.
25
+ (B) Reward design for complex coordination. Bimanual manipulation tasks such as handover and lifting require complex coordination between arms and hands: one side must act in a way that complements the other, with precision in both motion and timing. Designing a reward function that captures this type of contact-rich collaboration is nontrivial. We propose a novel keypoint-based reward formulation to facilitate such coordination effectively.
26
+ (C) Exploration. The long-horizon, high-dimensional nature of bimanual coordination introduces a hard exploration problem, even when reward functions are well-shaped. We propose to use a task-aware initialization strategy to accelerate single task RL, and decompose the overall multi-task (e.g. object) policy learning into separate single-task RL followed by generalist policy distillation.
27
+ (D) Object perception The combination of object diversity and sim-to-real domain shift makes vision-based manipulation particularly difficult. We propose a hybrid object representation that combines compact low-dimensional features with expressive high-dimensional features, augmented via modality-specific randomization. We find this simple strategy surprisingly effective – improving sim-to-real success rates on novel objects significantly by $80\sim 100\%$ .
28
+
29
+ We demonstrate the effectiveness of our approach on three challenging vision-based manipulation tasks: dexterous grasp-and-reach, bimanual lifting, bimanual handover. Our zero-shot sim-to-real policies exhibit robust, adaptive, and generalizable behavior on unseen real-world objects with diverse physical properties, achieving $90\%$ success rate on seen objects and $60\sim 80\%$ success rate on novel objects. Additionally, we confirm our method's adaptability to hardware variation across two distinct multi-fingered robot hands. Together, these results establish a practical and scalable recipe for high-performance vision-based dexterous manipulation via sim-to-real RL.
30
+
31
+ # 2 Background
32
+
33
+ # 2.1 Deep Reinforcement Learning Applications to Robotics
34
+
35
+ The successes of deep RL across domains like gaming, language modeling, and control [6, 8, 18, 19, 20, 21] have generated widespread excitement. However, the paradigm is known to be brittle, with sensitivity to hyperparameters [22] and reproducibility issues [23] due to high algorithmic variance.
36
+
37
+ Among open problems in RL, exploration remains fundamental. Unlike supervised learning, RL agents must collect their own data — and the strategy for doing so directly affects performance. Real-world robotics compounds this difficulty with high-dimensional inputs, sparse rewards, and complex dynamics. Numerous methods have aimed to scale exploration by incentivizing novelty [24, 25, 26, 27, 28, 29, 30], but they do not fundamentally resolve the exploration bottleneck.
38
+
39
+ Robotics also exposes challenges overlooked in standard RL benchmarks [31, 32], including: (1) the absence of fully modeled environments, and (2) the lack of clearly defined reward functions. Past works have introduced practical techniques to mitigate these issues, such as learning from
40
+
41
+ ![](images/5c8b0964cb61399ffd8500c29d20f739895b95ae3ec3c8588e7612efe0dcca4d.jpg)
42
+ Figure 2: A sim-to-real RL recipe for vision-based dexterous manipulation. We close the environment modeling gap between simulation and reality through an automated real-to-sim tuning module, design generalizable task rewards by disentangling each manipulation task into contact states and object states, improve sample efficiency of policy training by using task-aware hand poses and divide-and-conquer distillation, and transfer vision-based policies to the real world with a mixture of sparse and dense object representations.
43
+
44
+ motion capture or teleoperated demonstrations [33, 34, 35, 36], real-to-sim modeling techniques [9, 4, 10, 14, 37], and more principled reward design [38, 39]. While often tailored to specific tasks or hardware, these approaches lay groundwork that our method builds upon and generalizes.
45
+
46
+ # 2.2 Vision-Based Dexterous Manipulation on Humanoids
47
+
48
+ Imitation learning and classical approaches. Recent advances in teleoperation [2, 3, 40, 41] and learning from demonstrations [42, 43] have enabled significant progress in vision-based dexterous manipulation [2, 43, 3, 44]. However, teleoperation remains costly to scale, and achieving high success rates with real-world demonstration data alone [45, 46, 44] requires large datasets, making purely supervised methods expensive for reaching human-level performance on complex tasks.
49
+
50
+ Reinforcement learning approaches. RL-based manipulation works have shown strong results in settings such as in-hand reorientation [9, 10, 12, 47], grasping [17, 13], twisting [14], and dynamic handover [15], but typically focus on single-hand setups [9, 48, 10, 17, 12, 13, 47] or use intermediate object representations rather than raw pixels [33, 15, 14]. The closest to our work is Chen et al. [33], but their method relies on human hand motion capture, while our work learns full hands-arms joint control from scratch. Our work is also the first to demonstrate robust sim-to-real transfer of bimanual policies on a novel humanoid platform with multi-fingered hands.
51
+
52
+ # 3 Our Recipe
53
+
54
+ Section 1 outlined four key challenges in sim-to-real RL for dexterous manipulation. Here, we provide the detailed approaches we develop for each. An overview is shown in Figure 2.
55
+
56
+ # 3.1 Real-to-Sim Modeling
57
+
58
+ Simulators offer unlimited trial-and-error chances to perform the exploration necessary for RL. However, whether policies learned in simulation can be reliably transferred to the real world hinges on accurate modeling of both robots and environments. In dexterous manipulation, this challenge is compounded by the necessity to model objects, which have diverse and often unmeasurable physical properties. Even with known parameters, matching real-world and simulated dynamics is nontrivial: the same values for physical constants in simulation and the real world do not necessarily correspond to identical kinematic and dynamic relationships due to discrepancies in physics engines.
59
+
60
+ Autotuned robot modeling. Manufacturer-supplied robot models offer a baseline, but often require significant tuning [9, 49] to be ready for sim-to-real. This tuning is a laborious process as there is no "ground truth" pairing between the real world and the simulated world. We propose an autotune module for fast, automated calibration of simulation parameters to match real robot behavior. As shown in Figure 2A and Algorithm 1, our method jointly optimizes simulator physics (e.g. friction, damping) and URDF constants (e.g. link inertia values, joint limits) using only a single set of calibration trajectories on real hardware. It samples parameter sets, runs joint-targeted motions in parallel simulations, and selects the set minimizing tracking error against the real robot - automatically searching the parameter space to identify optimal values for both simulator physics and robot model constants in under four minutes (or 2000 simulated steps in $10\mathrm{Hz}$ ). This removes the need for iterative manual tuning and generalizes to any simulator-exposed parameter affecting kinematic behaviors.
61
+
62
+ Algorithm 1 Real-to-Sim Autotune Module
63
+ Require:
64
+ 1: $E$ : Set of environment parameters to tune
65
+ 2: $N$ : Number of calibration action sequences
66
+ 3: $R$ : Real robot hardware environment
67
+ 4: $M$ : Initial robot model file
68
+ 5: procedure AUTOTUNE $(E,N,R,M)$
69
+ 6: $P\gets$ InitializeParameterSpace $(E,M)$
70
+ 7: $S\gets \{\}$ $\triangleright$ Set of simulated environments
71
+ 8: for $i\gets 1$ to $K$ do $\triangleright K$ is population size
72
+ 9: $p_i\gets$ RandomSample $(P)$
73
+ 10: $S_{i}\gets$ CreateSimEnvironment $(p_i)$
74
+ 11: $S\gets S\cup \{S_i\}$
75
+ 12: end for
76
+ 13: $J\gets$ GenerateJointTargets $(N)$
77
+ 14: $R_{track}\gets$ GetTrackingErrors $(R,J)$ $\triangleright$ Real tracking
78
+ 15: best.params $\leftarrow$ null
79
+ 16: min_error $\leftarrow \infty$
80
+ 17: for $S_{i}\in S$ do
81
+ 18: $S_{track}\gets$ GetTrackingErrors $(S_i,J)$
82
+ 19: error $\leftarrow$ ComputeMSE $(S_{track},R_{track})$
83
+ 20: if error $<$ min_error then
84
+ 21: min_error $\leftarrow$ error
85
+ 22: best.params $\leftarrow$ GetParameters $(S_{i})$
86
+ 23: end if
87
+ 24: end for
88
+ return best.params
89
+ 25: end procedure
90
+
91
+ Approximate object modeling. Following prior work [14, 50], we model objects using simple geometric primitives (e.g. cylinders) with randomized physical parameters. Despite their simplicity, these approximations are sufficient to learn dexterous manipulation policies that transfer reliably to the real world. Our recipe adopts this strategy and finds it both effective and generalizable.
92
+
93
+ # 3.2 Generalizable Reward Design
94
+
95
+ In standard RL [51], the reward function plays a central role in shaping agent behavior. However, much of RL research has treated rewards as fixed, focusing instead on algorithmic improvements [52]. In robotics — and especially in dexterous manipulation — designing effective, generalizable rewards becomes a key challenge due to complex contact dynamics and object variability [53].
96
+
97
+ Manipulation as contact and object goals. We observe that many human manipulation tasks [54] can be decomposed into a sequence of hand-object contact transitions and object state changes. Inspired by this, we propose a structured reward design scheme for long-horizon, contact-rich tasks. For instance, a bimanual handover can be segmented into: (1) one hand contacting the object, (2) lifting the object near the second hand, (3) the second hand contacting the object, and (4) transferring the object to the target location. We therefore define rewards based on two key components: "contact goals" encourages the fingertips to reach task-relevant contact points on object, and "object goals" penalizes current object state deviation from the target object state (e.g. xyz position). To facilitate contact goal specification, we introduce a keypoint-based technique: simulated objects are augmented with "contact stickers" — surface markers representing desirable contact locations. The contact goal, in terms of reward, can then be specified as $r_{\mathrm{contact}} = \sum_{i}\left[\frac{1}{1 + \alpha d(\mathbf{X}^{L},\mathbf{F}_{i}^{L})} +\frac{1}{1 + \beta d(\mathbf{X}^{R},\mathbf{F}_{i}^{R})}\right]$ , where $\mathbf{X}^L\in \mathbb{R}^{n\times 3}$ and $\mathbf{X}^R\in \mathbb{R}^{m\times 3}$ are the positions of contact markers specified for left and right hands, $\mathbf{F}^L\in \mathbb{R}^{4\times 3}$ and $\mathbf{F}^R\in \mathbb{R}^{4\times 3}$ are the position of left and right fingertips, $\alpha$ and $\beta$ are scaling hyperparameters, and $d$ is a distance function defined as $d(\mathbf{A},\mathbf{x}) = \min_i\| \mathbf{A}_i - \mathbf{x}\| _2$ . These contact markers can be arbitrarily specified – for example, procedurally generated based on object geometry – offering a flexible way to incorporate contact preferences or human priors. A visualization of contact markers is shown in Figure 2B, and their empirical effectiveness is analyzed in Section 4.
98
+
99
+ # 3.3 Sample Efficient Policy Learning
100
+
101
+ Even with a well-shaped reward, learning dexterous policies on high-dimensional bimanual multifingered systems remains sample-inefficient due to sparse rewards and exploration complexity. We introduce two techniques to improve sample efficiency: (1) task-aware initialization using human-guided hand poses, and (2) a divide-and-conquer strategy with policy distillation.
102
+
103
+ Task-aware hand poses for initialization. We collect task-relevant hand-object configurations from human teleoperation in simulation. This can be done using any compatible system for bimanual multi-fingered hands. The recorded states, including object poses and robot joint positions, are then randomly sampled as initial conditions for each training episode. Unlike prior work that relies on full demonstration trajectories [55], our approach only requires humans to casually "play around" with the task goal in mind. This lightweight data collection takes less than 30 seconds per task since no expert demonstration is needed, yet proves highly effective in improving early-stage exploration.
104
+
105
+ Divide-and-conquer distillation. Standard RL exploration techniques [25, 26, 28, 56] aim to visit the state space more efficiently but do not fundamentally alter the difficulty of sparse-reward problems: the probability of receiving learning signals from visiting the "right" states remains the same. We instead overcome the exploration problem by breaking down the explorable state space itself, e.g. decomposing a multi-object manipulation task into multiple single-object manipulation tasks. Once specialized policies are trained for each sub-task, high-quality rollouts can be filtered and distilled into a generalist policy using shared observation and action spaces. This effectively brings pure RL closer to learning from demonstrations, where the sub-task policies act as "teleoperators" in the simulation environment, and the centralized generalist policy learns from curated data.
106
+
107
+ # 3.4 Vision-Based Sim-to-Real Transfer
108
+
109
+ Vision-based sim-to-real transfer is particularly challenging due to domain gaps in both dynamics and perception. We employ two strategies to address these challenges: hybrid object representations and extensive domain randomization.
110
+
111
+ Hybrid object representations. Dexterous manipulation often requires precise perception of object pose and geometry. Prior work spans a spectrum of object representations, from 3D position [14] and 6D pose [9], to depth [17, 12], point cloud [57], and RGB images [10]. Higher-dimensional representations encode richer information about the object, improving task performance but also widening the sim-to-real gap; and vice versa. To balance the trade-offs, we propose to use a mix of low- and high-dimensional signals: 3D object position (from a fixed third-person view) and depth image (from an egocentric view). We obtain the 3D object position from a reliable object tracking module with relatively controllable noise, and use segment out the object depth to reduce the visual sim-to-real gap. We validate this design in Section 4.
112
+
113
+ Domain randomization for perception and dynamics. To improve robustness, we apply extensive domain randomization during training. This includes variation in object parameters, camera parameters, robot physical properties, and observation noises. Full details are provided in Appendix 6.3.
114
+
115
+ # 4 Experiments
116
+
117
+ Our proposed approaches form a general recipe that allows for the practical application of RL to solve dexterous manipulation with humanoids. In this section, we show experimental results of task capabilities and ablation studies of each proposed technique. Videos can be found on our website.
118
+
119
+ # 4.1 Real-World and Simulator Setup
120
+
121
+ We use a Fourier GR1 humanoid robot with two arms and two multi-fingered hands. Each arm has 7 degrees of freedom (DoF). For most experiments, we use the Fourier hands, each of which has 6 actuated DoFs and 5 underactuated DoFs. To show cross-embodiment generalization, we include results on the Inspire hands, each with 6 actuated DoFs and 6 underactuated DoFs. The hardware has substantially different masses, surface frictions, finger and palm morphologies, and thumb actuations. Figure 1 visualizes both hands. We use the NVIDIA Isaac Gym simulator [58].
122
+
123
+ ![](images/7386af12e088eed7f0ca72c7e1e554ccee010a438089d2be0641886d309256c3.jpg)
124
+ Figure 3: Policies learned in simulation. Left: grasp-and-reach; middle: box lift; right: bimanual handover (right-to-left, left-to-right).
125
+
126
+ ![](images/07a5e67eb503b4d97efc95117d1c48472e5e85bc9b0b8539490657f0a14e29f1.jpg)
127
+ Figure 4: Training grasp-and-reach policy with different object sets. Each curve is from 10 runs with different random seeds. Left: training with complex objects v.s. simple geometric primitive objects. Right: training with differently grouped geometric objects.
128
+
129
+ Perception. As outlined in Section 3.4, we use a combination of dense and sparse object features for policy learning in both simulation and real-world transfer. In the real world, we set up an egocentric-view RealSense D435 depth camera on the head of the humanoid robot and a third-view RealSense D435 depth camera on a tripod in front of the robot (illustrated in Figure 1). In simulation, we similarly set up the two cameras by calibrating their poses against the real camera poses. The dense object feature is obtained by directly reading depth observations from the egocentric-view camera. The sparse feature is obtained by approximating the object's center-of-mass from the third-view camera, using a similar technique as in Lin et al. [14]. As illustrated in Figure 2, we use the Segment Anything Model 2 (SAM2) [59] to generate a segmentation mask for the object at each trajectory sequence's initial frame, and leverage the tracking capabilities of SAM2 to track the mask throughout all remaining frames. To approximate object's 3D center-of-mass coordinates, we calculate the center position of object mask in the image plane, then obtain noisy depth readings from a depth camera to recover a corresponding 3D position. The perception pipeline runs at $5\mathrm{Hz}$ to match the neural network policy's control frequency.
130
+
131
+ # 4.2 Task Definition
132
+
133
+ (A) Grasp-and-reach. The robot must use one hand to grasp a tabletop object, lift it, and place it at a goal location. At initialization, a scripted vision module selects the closer hand to object. Test objects vary in shape, mass, volume, friction, color, and texture (see Figure 1). Each trial randomizes object pose and goal location. (B) Box lift. The robot lifts a box too large for single-handed grasping. Box size, color, mass, and initial pose (with randomized position and yaw) are varied across trials. (C) Bimanual handover. The robot grasps an object from one side of the table with one hand and hands it over to the other hand, which cannot reach the object directly. Objects vary in color, size, mass, and pose. We vary the initial pose of blocks in each trial.
134
+
135
+ # 4.3 Evaluation of Real-to-Sim Modeling
136
+
137
+ Effectiveness of autotuned robot modeling. We apply the autotune module described in Section 3.1 to optimize the robot modeling parameters. To assess its effectiveness, we compare the sim-to-real transfer success rates of three sets of policy checkpoints, each trained with identical settings except for the robot modeling parameters. These parameter sets correspond to varying levels of modeling accuracy, as measured by the mean squared error (MSE) from autotune – ranging from the lowest (i.e., smallest real-to-sim gap) to the highest (i.e., largest real-to-sim gap). As shown in Table 1, policies trained with autotuned models exhibit significantly better sim-to-real performance. Qualitative examples in our video further demonstrate successful transfer of grasp-and-reach policies to the Inspire hands, highlighting the generalizability of our autotune module.
138
+
139
+ Effectiveness of approximate object modeling. Empirically, we find that modeling objects as primitive geometric shapes (cylinders, cubes, and spheres) strikes a good balance between training efficiency and sim-to-real transferability. As shown in Figure 4 (left), training grasp-and-reach policies with primitive shapes leads to faster convergence compared to using complex object ge
140
+
141
+ ![](images/ff9c4a1686d839bb2ac8c8dea457c595f9959c42f2369933eb977ca7973007ca.jpg)
142
+
143
+ ![](images/3294b067eb7c89ced3da4332a0aaaf02c1ac3d88ec50477d1adce6c754d90c18.jpg)
144
+ Figure 6: Policy robustness. Our learned policies remain robust under different force perturbations, including knock (top left), pull (top right), push (bottom left), and drag (bottom right).
145
+
146
+ ![](images/e7f5d3c852c1930f3c4eb4f4f91221ce95838200397d789aab187a1c81cf83d1.jpg)
147
+ Figure 5: Different contact patterns emerge from different placements of contact markers. Top: contact markers on the left and right side centers; middle: markers on the top and bottom side centers; bottom: markers on the bottom side edges.
148
+
149
+ ![](images/593f76e314d0a0bea666ea387fdc4309440913a474b09b9c2ba43a34c7ceffbe.jpg)
150
+
151
+ ![](images/7a5193e4ebfaa3ffd564988f5a6a6f5c4a681f48a75f822544c1971ef547ec85.jpg)
152
+
153
+ <table><tr><td>Autotune MSE</td><td>Lowest</td><td>Median</td><td>Highest</td></tr><tr><td>Grasp Success</td><td>8 / 10</td><td>3 / 10</td><td>0 / 10</td></tr><tr><td>Reach Success</td><td>7 / 10</td><td>3 / 10</td><td>0 / 10</td></tr></table>
154
+
155
+ Table 1: Lower MSE from autotune correlates with higher sim-to-real success rate. For each set of modeling parameters, we test the sim-to-real transfer performance of 10 policy checkpoints (trained identically except for random seed). We evaluate success rate by stages on the grasp-and-reach task, and observe a correlation between lower MSE measured by autotune module and higher sim-to-real transfer success rate.
156
+
157
+ omtries. More importantly, policies trained with randomized primitive shapes demonstrate strong generalization to a diverse set of unseen objects, as illustrated in our video.
158
+
159
+ # 4.4 Evaluation of Reward Design
160
+
161
+ Task capabilities. Enabled by our proposed reward design principle, a broad range of long-horizon, contact-rich tasks can be successfully solved using pure RL, as shown in Figure 3 and video. The resulting policies exhibit notable dexterity and robustness under various random force disturbances.
162
+
163
+ Effectiveness of contact-based rewards. In Figure 5, we visualize how different contact behaviors emerge from varying the placement of contact markers, using the box lift task as an example. The contact stickers are procedurally generated along box sides or edges based on the box dimensions. The resulting behaviors closely reflect the specified contact positions, demonstrating the effectiveness of using contact markers to define contact goals.
164
+
165
+ # 4.5 Evaluation of Policy Learning
166
+
167
+ Effectiveness of task-aware hand pose initialization. In Table 2, we compare the percentage of successfully trained policies for each task with and without task-aware hand pose initialization. The results indicate that incorporating human priors at initialization significantly enhances exploration efficiency in challenging RL tasks.
168
+
169
+ Divide-and-conquer distillation. We evaluate our divide-and-conquer distillation strategy through two ablation studies. First, we examine how the granularity of task decomposition affects training efficiency in a multi-object grasp-and-reach task involving 10 objects. We compare four designs: (1) training a single policy on all objects (all); (2) training three policies on shape-similar object groups (shape); (3) training three policies on shape-diverse object groups (mix); and (4) training ten separate single-object policies (single). As shown in Figure 4, single achieves the highest sample efficiency, followed by shape, all, and mix. The average success rates also vary across designs, reflecting differences in task difficulty. Notably, policies trained on reduced object sets
170
+
171
+ <table><tr><td>% Success</td><td>Grasping</td><td>Lifting</td><td>Handover</td></tr><tr><td>with Human Init</td><td>80%</td><td>90%</td><td>30%</td></tr><tr><td>w/o Human Init</td><td>60%</td><td>90%</td><td>0%</td></tr></table>
172
+
173
+ Table 2: Initializing with human data. Correlation between the percentage of successful task policies and whether human play data is used for initialization. We define successful policies as those that achieve over $60\%$ episodic success during evaluation. For each task and each initialization setting, we test with 10 random seeds.
174
+
175
+ <table><tr><td>Task</td><td>Grasping</td><td>Lifting</td><td>HandoverA</td><td>HandoverB</td></tr><tr><td colspan="5">Depth + Pos</td></tr><tr><td>Pickup</td><td>10 / 10</td><td>10 / 10</td><td>10 / 10</td><td>10 / 10</td></tr><tr><td>Task Success</td><td>10 / 10</td><td>10 / 10</td><td>9 / 10</td><td>5 / 10</td></tr><tr><td colspan="5">Depth Only</td></tr><tr><td>Pickup</td><td>2 / 10</td><td>0 / 10</td><td>0 / 10</td><td>0 / 10</td></tr><tr><td>Task Success</td><td>2 / 10</td><td>0 / 10</td><td>0 / 10</td><td>0 / 10</td></tr></table>
176
+
177
+ Table 3: Comparing sim-to-real transfer performance between depth-and-position policy and depth-only policy. We separate the bimanual handover task into two columns due to its longer horizon. Pickup success measures how often hands pick up the object. Combining 3D position with depth enables easier sim-to-real transfer.
178
+
179
+ converge to similar final performance, while the all policy consistently underperforms. Second, we evaluate the sim-to-real transfer success rate of each policy type on an in-distribution object over 30 trials. The mix policy performs best (90.0%), followed by shape (63.3%), single (40.0%), and all (23.3%). We hypothesize that the lower performance of single and mix arises from overfitting to specific geometries, while the poor performance of all is consistent with its weaker RL training outcomes. These results suggest that divide-and-conquer distillation strikes a favorable balance between training efficiency and sim-to-real generalization.
180
+
181
+ # 4.6 Evaluation of Vision-Based Sim-to-Real Transfer
182
+
183
+ Effectiveness of mixing object representations. We study the impact of different object representations on sim-to-real transfer performance, with results summarized in Table 3. Our findings show that combining a dense representation (segmented depth image) with a sparse representation (3D object center-of-mass position) leads to improved transfer success. Notably, the performance gap between the combined depth-and-position policy and the depth-only policy widens for tasks where accurate understanding of full object geometry is more critical to success.
184
+
185
+ # 4.7 System Capabilities
186
+
187
+ Task performance, generalization, robustness. We evaluate the overall effectiveness of our system by reporting task success rates using the best-performing policy for each task. For each task, we perform 10 trials for each test object and compute the average success rate across all objects. We report a $62.3\%$ success rate for the grasp-and-reach task, $80\%$ for box lift, and $52.5\%$ for bimanual handover. To assess generalization, we test the grasp-and-reach policy on out-of-distribution objects and present qualitative evidence of successful zero-shot transfer in our video. Additionally, we evaluate the robustness of our policies under external force perturbations across all tasks, as shown in Figure 6 and our videos. More details on the object set for each task are reported in Figure 1.
188
+
189
+ Extension to a more capable system. The learned RL policies can be seamlessly integrated with higher-level control structures such as finite state machines or teleoperation frameworks to enable longer-horizon task execution, while preserving dexterity, robustness, and generalization. As a proof of concept, our video showcases a general pick-and-drop system constructed by scripting sequences around the grasp-and-reach policy.
190
+
191
+ # 5 Conclusion
192
+
193
+ We present a comprehensive recipe for applying sim-to-real RL to vision-based dexterous manipulation on humanoids. By addressing key challenges in environment modeling, reward design, policy learning, and sim-to-real transfer, we show that RL can be a powerful tool for learning highly useful manipulation skills without the need for extensive human demonstrations. Our learned policies exhibit strong generalization to unseen objects, robustness against force disturbances, and the ability to perform long-horizon contact-rich tasks.
194
+
195
+ # 6 Limitations
196
+
197
+ In this work, we investigate the key challenges in applying RL to robot manipulation and introduce practical and principled techniques to overcome the hurdles. Based on the techniques proposed, we build a sim-to-real RL pipeline that demonstrates a feasible path to solve robot manipulation, with evidence on generalizability, robustness, and dexterity.
198
+
199
+ However, the capabilities achieved in this work are still far from the kind of "general-purpose" manipulation that humans are capable of. Much work remains to be done to improve each individual component of this pipeline and unlock the full potential of sim-to-real RL. For example, the reward design could be improved by integrating even stronger human priors, such as task demonstrations collected from teleoperation; alternative controller, such as torque controller, could also be explored.
200
+
201
+ There are also important open problems that our work does not address. For example, our work uses no novel technique to reduce the sim-to-real gap in dynamics other than applying naive domain randomization. We hypothesize that this could be a reason for the low success rate on bimanual handover task, which is the most dynamic among our collection of tasks.
202
+
203
+ Lastly, we find ourselves heavily constrained by the lack of reliable hardware for dexterous manipulation. While we use multi-fingered robot hands, the dexterity of these hands is far from that of human hands in terms of the active degrees of freedom. We believe the dexterity of our learned policies is not limited by the approach, and we hope to extend our framework to robot hands with more sophisticated designs in the future.
204
+
205
+ # Acknowledgments
206
+
207
+ We thank members of NVIDIA GEAR lab for help with hardware infrastructure, in particular Zhenjia Xu, Yizhou Zhao, and Zu Wang. This work was partially conducted during TL's internship at NVIDIA. TL is supported by NVIDIA and the National Science Foundation fellowship.
208
+
209
+ # References
210
+
211
+ [1] J. Bjorck, F. Castaneda, N. Cherniadev, X. Da, R. Ding, L. Fan, Y. Fang, D. Fox, F. Hu, S. Huang, et al. Gr00t n1: An open foundation model for generalist humanoid robots. arXiv preprint arXiv:2503.14734, 2025.
212
+ [2] X. Cheng, J. Li, S. Yang, G. Yang, and X. Wang. Open-television: Teleoperation with immersive active visual feedback. arXiv preprint arXiv:2407.01512, 2024.
213
+ [3] T. Lin, Y. Zhang, Q. Li, H. Qi, B. Yi, S. Levine, and J. Malik. Learning visuotactile skills with two multifingered hands. arXiv:2404.16823, 2024.
214
+ [4] T. Haarnoja, B. Moran, G. Lever, S. H. Huang, D. Tirumala, J. Humplik, M. Wulfmeier, S. Tunyasuvunakool, N. Y. Siegel, R. Hafner, et al. Learning agile soccer skills for a bipedal robot with deep reinforcement learning. Science Robotics, 9(89):eadi8022, 2024.
215
+ [5] N. Rudin, D. Hoeller, M. Bjelonic, and M. Hutter. Advanced skills by learning locomotion and local navigation end-to-end. In 2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 2497-2503. IEEE, 2022.
216
+ [6] J. Hwangbo, J. Lee, A. Dosovitskiy, D. Bellicoso, V. Tsounis, V. Koltun, and M. Hutter. Learning agile and dynamic motor skills for legged robots. Science Robotics, 4(26):eauu5872, 2019.
217
+ [7] J. Lee, J. Hwangbo, L. Wellhausen, V. Koltun, and M. Hutter. Learning quadrupedal locomotion over challenging terrain. Science robotics, 5(47):eabc5986, 2020.
218
+ [8] E. Kaufmann, L. Bauersfeld, A. Loquercio, M. Müller, V. Koltun, and D. Scaramuzza. Champion-level drone racing using deep reinforcement learning. Nature, 620(7976):982-987, 2023.
219
+ [9] I. Akkaya, M. Andrychowicz, M. Chogiej, M. Litwin, B. McGrew, A. Petron, A. Paino, M. Plappert, G. Powell, R. Ribas, et al. Solving rubik's cube with a robot hand. arXiv preprint arXiv:1910.07113, 2019.
220
+ [10] A. Handa, A. Allshire, V. Makoviychuk, A. Petrenko, R. Singh, J. Liu, D. Makoviichuk, K. Van Wyk, A. Zhurkevich, B. Sundaralingam, et al. Dextreme: Transfer of agile in-hand manipulation from simulation to reality. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pages 5977-5984. IEEE, 2023.
221
+ [11] T. Chen, J. Xu, and P. Agrawal. A system for general in-hand object re-orientation. In Conference on Robot Learning, pages 297–307. PMLR, 2022.
222
+ [12] H. Qi, B. Yi, S. Suresh, M. Lambeta, Y. Ma, R. Calandra, and J. Malik. General in-hand object rotation with vision and touch. In Conference on Robot Learning, pages 2549-2564. PMLR, 2023.
223
+ [13] R. Singh, A. Allshire, A. Handa, N. Ratliff, and K. Van Wyk. Dextrah-rgb: Visuomotor policies to grasp anything with dexterous hands. arXiv preprint arXiv:2412.01791, 2024.
224
+ [14] T. Lin, Z.-H. Yin, H. Qi, P. Abbeel, and J. Malik. Twisting lids off with two hands. arXiv:2403.02338, 2024.
225
+ [15] B. Huang, Y. Chen, T. Wang, Y. Qin, Y. Yang, N. Atanasov, and X. Wang. Dynamic handover: Throw and catch with bimanual hands. arXiv preprint arXiv:2309.05655, 2023.
226
+
227
+ [16] H. Qi, A. Kumar, R. Calandra, Y. Ma, and J. Malik. In-Hand Object Rotation via Rapid Motor Adaptation. In Conference on Robot Learning (CoRL), 2022.
228
+ [17] T. G. W. Lum, M. Matak, V. Makoviychuk, A. Handa, A. Allshire, T. Hermans, N. D. Ratliff, and K. Van Wyk. Dextrah-g: Pixels-to-action dexterous arm-hand grasping with geometric fabrics. arXiv preprint arXiv:2407.02274, 2024.
229
+ [18] O. AI, Sep 2024. URL https://openai.com/index/learning-to-reason-with-llms.
230
+ [19] D.-A. et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948.
231
+ [20] D. Silver, T. Hubert, J. Schrittwieser, I. Antonoglou, M. Lai, A. Guez, M. Lanctot, L. Sifre, D. Kumaran, T. Graepel, et al. Mastering chess and shogi by self-play with a general reinforcement learning algorithm. arXiv preprint arXiv:1712.01815, 2017.
232
+ [21] O. Vinyals, I. Babuschkin, W. M. Czarnecki, M. Mathieu, A. Dudzik, J. Chung, D. H. Choi, R. Powell, T. Ewalds, P. Georgiev, et al. Grandmaster level in starcraft ii using multi-agent reinforcement learning. nature, 575(7782):350-354, 2019.
233
+ [22] P. Henderson, R. Islam, P. Bachman, J. Pineau, D. Precup, and D. Meger. Deep reinforcement learning that matters. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 32, 2018.
234
+ [23] R. Islam, P. Henderson, M. Gomrokchi, and D. Precup. Reproducibility of benchmarked deep reinforcement learning tasks for continuous control. arXiv preprint arXiv:1708.04133, 2017.
235
+ [24] M. Bellemare, S. Srinivasan, G. Ostrovski, T. Schaul, D. Saxton, and R. Munos. Unifying count-based exploration and intrinsic motivation. Advances in neural information processing systems, 29, 2016.
236
+ [25] Y. Burda, H. Edwards, A. Storkey, and O. Klimov. Exploration by random network distillation. arXiv preprint arXiv:1810.12894, 2018.
237
+ [26] T. Lin and A. Jabri. Mimex: intrinsic rewards from masked input modeling. Advances in Neural Information Processing Systems, 36, 2024.
238
+ [27] G. Ostrovski, M. G. Bellemare, A. Oord, and R. Munos. Count-based exploration with neural density models. In International conference on machine learning, pages 2721-2730. PMLR, 2017.
239
+ [28] D. Pathak, P. Agrawal, A. A. Efros, and T. Darrell. Curiosity-driven exploration by self-supervised prediction. In International conference on machine learning, pages 2778-2787. PMLR, 2017.
240
+ [29] B. C. Stadie, S. Levine, and P. Abbeel. Incentivizing exploration in reinforcement learning with deep predictive models. arXiv preprint arXiv:1507.00814, 2015.
241
+ [30] H. Tang, R. Houthooft, D. Foote, A. Stooke, O. Xi Chen, Y. Duan, J. Schulman, F. DeTurck, and P. Abbeel. # exploration: A study of count-based exploration for deep reinforcement learning. Advances in neural information processing systems, 30, 2017.
242
+ [31] M. G. Bellemare, Y. Naddaf, J. Veness, and M. Bowling. The arcade learning environment: An evaluation platform for general agents. Journal of Artificial Intelligence Research, 47: 253-279, 2013.
243
+ [32] Y. Tassa, Y. Doron, A. Muldal, T. Erez, Y. Li, D. d. L. Casas, D. Budden, A. Abdolmaleki, J. Merel, A. Lefrancq, et al. Deepmind control suite. arXiv preprint arXiv:1801.00690, 2018.
244
+
245
+ [33] Y. Chen, C. Wang, Y. Yang, and C. K. Liu. Object-centric dexterous manipulation from human motion data. arXiv preprint arXiv:2411.04005, 2024.
246
+ [34] A. Rajeswaran, V. Kumar, A. Gupta, G. Vezzani, J. Schulman, E. Todorov, and S. Levine. Learning complex dexterous manipulation with deep reinforcement learning and demonstrations. arXiv preprint arXiv:1709.10087, 2017.
247
+ [35] Z.-H. Yin, C. Wang, L. Pineda, F. Hogan, K. Bodduluri, A. Sharma, P. Lancaster, I. Prasad, M. Kalakrishnan, J. Malik, et al. Dexteritygen: Foundation controller for unprecedented dexterity. arXiv preprint arXiv:2502.04307, 2025.
248
+ [36] Y. Zhu, Z. Wang, J. Merel, A. Rusu, T. Erez, S. Cabi, S. Tunyasuvunakool, J. Kramár, R. Hadsell, N. de Freitas, et al. Reinforcement and imitation learning for diverse visuomotor skills. arXiv preprint arXiv:1802.09564, 2018.
249
+ [37] M. Torne, A. Simeonov, Z. Li, A. Chan, T. Chen, A. Gupta, and P. Agrawal. Reconciling reality through simulation: A real-to-sim-to-real approach for robust manipulation. *Arxiv*, 2024.
250
+ [38] M. Memmel, A. Wagenmaker, C. Zhu, P. Yin, D. Fox, and A. Gupta. Asid: Active exploration for system identification in robotic manipulation. arXiv preprint arXiv:2404.12308, 2024.
251
+ [39] C. Zhang, W. Xiao, T. He, and G. Shi. Wococo: Learning whole-body humanoid control with sequential contacts. arXiv preprint arXiv:2406.06005, 2024.
252
+ [40] P. Wu, Y. Shentu, Z. Yi, X. Lin, and P. Abbeel. Gello: A general, low-cost, and intuitive teleoperation framework for robot manipulators, 2023.
253
+ [41] T. Z. Zhao, V. Kumar, S. Levine, and C. Finn. Learning fine-grained bimanual manipulation with low-cost hardware. arXiv preprint arXiv:2304.13705, 2023.
254
+ [42] C. Chi, S. Feng, Y. Du, Z. Xu, E. Cousineau, B. Burchfiel, and S. Song. Diffusion policy: Visuomotor policy learning via action diffusion. In RSS, 2023.
255
+ [43] X. Li, T. Zhao, X. Zhu, J. Wang, T. Pang, and K. Fang. Planning-guided diffusion policy learning for generalizable contact-rich bimanual manipulation. arXiv preprint arXiv:2412.02676, 2024.
256
+ [44] T. Z. Zhao, J. Tompson, D. Driess, P. Florence, K. Ghasemipour, C. Finn, and A. Wahid. Aloha unleashed: A simple recipe for robot dexterity. arXiv preprint arXiv:2410.13126, 2024.
257
+ [45] S. Levine, P. Pastor, A. Krizhevsky, J. Ibarz, and D. Quillen. Learning hand-eye coordination for robotic grasping with deep learning and large-scale data collection. The International journal of robotics research, 37(4-5):421-436, 2018.
258
+ [46] F. Lin, Y. Hu, P. Sheng, C. Wen, J. You, and Y. Gao. Data scaling laws in imitation learning for robotic manipulation. arXiv preprint arXiv:2410.18647, 2024.
259
+ [47] J. Wang, Y. Yuan, H. Che, H. Qi, Y. Ma, J. Malik, and X. Wang. Lessons from learning to spin" pens". arXiv preprint arXiv:2407.18902, 2024.
260
+ [48] Y. Chen, C. Wang, L. Fei-Fei, and C. K. Liu. Sequential dexterity: Chaining dexterous policies for long-horizon manipulation. arXiv preprint arXiv:2309.00987, 2023.
261
+ [49] I. Radosavovic, T. Xiao, B. Zhang, T. Darrell, J. Malik, and K. Sreenath. Real-world humanoid locomotion with reinforcement learning. Science Robotics, 9(89):eadi9579, 2024.
262
+ [50] H. Qi, A. Kumar, R. Calandra, Y. Ma, and J. Malik. In-hand object rotation via rapid motor adaptation. In Conference on Robot Learning, pages 1722-1732. PMLR, 2023.
263
+ [51] R. S. Sutton and A. G. Barto. Introduction to Reinforcement Learning. MIT Press, Cambridge, MA, 1998.
264
+
265
+ [52] J. Eschmann. Reward Function Design in Reinforcement Learning, pages 25-33. Springer International Publishing, Cham, 2021. ISBN 978-3-030-41188-6. doi:10.1007/978-3-030-41188-6_3. URL https://doi.org/10.1007/978-3-030-41188-6_3.
266
+ [53] D. Dewey. Reinforcement learning and the reward engineering principle. In 2014 AAAAI Spring Symposium Series, 2014.
267
+ [54] K. Grauman, A. Westbury, L. Torresani, K. Kitani, J. Malik, T. Afouras, K. Ashutosh, V. Baiyya, S. Bansal, B. Boote, et al. Ego-exo4d: Understanding skilled human activity from first-and third-person perspectives. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 19383-19400, 2024.
268
+ [55] M. Bauza, J. E. Chen, V. Dalibard, N. Gileadi, R. Hafner, M. F. Martins, J. Moore, R. Pevc\v\viciu cate, A. Laurens, D. Rao, et al. Demostart: Demonstration-led auto-curriculum applied to sim-to-real with multi-fingered robots. arXiv preprint arXiv:2409.06613, 2024.
269
+ [56] A. A. Taïga, W. Fedus, M. C. Machado, A. Courville, and M. G. Bellemare. Benchmarking bonus-based exploration methods on the arcade learning environment. arXiv preprint arXiv:1908.02388, 2019.
270
+ [57] M. Liu, Z. Chen, X. Cheng, Y. Ji, R.-Z. Qiu, R. Yang, and X. Wang. Visual whole-body control for legged loco-manipulation. arXiv preprint arXiv:2403.16967, 2024.
271
+ [58] V. Makoviychuk, L. Wawrzyniak, Y. Guo, M. Lu, K. Storey, M. Macklin, D. Hoeller, N. Rudin, A. Allshire, A. Handa, et al. Isaac gym: High performancegpu-based physics simulation for robot learning. arXiv preprint arXiv:2108.10470, 2021.
272
+ [59] N. Ravi, V. Gabeur, Y.-T. Hu, R. Hu, C. Ryali, T. Ma, H. Khedr, R. Rädle, C. Rolland, L. Gustafson, et al. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024.
273
+ [60] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.
274
+ [61] K. He, X. Zhang, S. Ren, and J. Sun. Deep residual learning for image recognition. In CVPR, 2016.
275
+ [62] S. Ioffe and C. Szegedy. Batch normalization: Accelerating deep network training by reducing internal covariate shift. In ICML, 2015.
276
+ [63] Y. Wu and K. He. Group normalization. In ECCV, 2018.
277
+ [64] D. P. Kingma and J. Ba. Adam: A method for stochastic optimization. In ICLR, 2015.
278
+ [65] I. Loshchilov and F. Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017.
279
+
280
+ # Appendix
281
+
282
+ # 6.1 Environment Modeling Details
283
+
284
+ Modeling underactuated joints. Since modeling underactuated joint structure is not directly supported, we approximate the relationship between each pair of actuated and underactuated joints by fitting a linear function $q_{u} = k \cdot q_{a} + b$ , where $q_{u}$ is the underactuated joint angle and $q_{a}$ is the actuated joint angle. Note that parameters $k, b$ are included as tunable parameters to search over using our autotune module detailed in Section 3.1.
285
+
286
+ # 6.2 Reward Design Details
287
+
288
+ We design generalizable rewards based on the principle outlined in Section 3.2 and list task reward details below.
289
+
290
+ Both grasp and lift tasks can be defined with the following goal states: (1) finger contact with the object; (2) the object being lifted up to a goal position. Our reward design can, therefore, follow by combining the contact goal reward and the object goal reward terms:
291
+
292
+ $$
293
+ r \left(s _ {h}, s _ {o}\right) = r _ {\text {c o n t a c t}} \left(s _ {h}, s _ {o}\right) + r _ {\text {g o a l}} \left(s _ {o}\right) \tag {1}
294
+ $$
295
+
296
+ where $s_h$ includes fingertip positions, $s_o$ includes object center-of-mass position, and all contact marker positions (if any).
297
+
298
+ Similarly, the handover task can be defined with the following goal states: (1) one hand's finger contact with the object; (2) object being transferred to an intermediate goal position while still in contact with the first hand; (3) the second hand's finger contact with the object; (4) object being transferred to the final goal position. Due to the hand switching, we introduce a stage variable $a \in \{0, 1\}$ and design the reward as follows:
299
+
300
+ $$
301
+ \begin{array}{l} r \left(s _ {h}, s _ {o}\right) = (1 - a) \cdot \left(r _ {\text {c o n t a c t}} \left(s _ {h _ {A}}, s _ {o _ {A}}\right) + r _ {\text {g o a l}} \left(s _ {o _ {A}}\right)\right) \\ + a \cdot \left(r _ {\text {c o n t a c t}} \left(s _ {h _ {B}}, s _ {o _ {B}}\right) + r _ {\text {g o a l}} \left(s _ {o _ {B}}\right)\right) \tag {2} \\ \end{array}
302
+ $$
303
+
304
+ where $s_{h_A}, s_{h_B}$ denote fingertip positions of the engaged hand at each stage, $s_o$ denote object center-of-mass position and desirable contact marker positions (if any) at each stage. At completion of each stage, we also reward policy with a bonus whose scale increases as stage progresses.
305
+
306
+ # 6.3 Policy Training Details
307
+
308
+ RL implementation. To learn the specialist policies, the observation space includes object position and robot joint position at each time step, and the action space is robot joint angles. We use Proximal Policy Optimization [60] with asymmetric actor-critic as the RL algorithm. In addition to the policy inputs, we provide the following privilege state inputs to the asymmetric critic: arm joint velocities, hand joint velocities, all fingertip positions, object orientation, object velocity, object angular velocity, object mass randomization scale, object friction randomization scale, and object shape randomization scale. Both the actor and critic networks are 3-layer MLPs with units (512, 512, 512).
309
+
310
+ Domain randomization. Physical randomization includes the randomization of object friction, mass, and scale. We also apply random forces to the object to simulate the physical effects that are not implemented by the simulator. Non-physical randomization models the noise in observation (e.g. joint position measurement and detected object positions) and action. A summary of our randomization attributes and parameters is shown in Table 4.
311
+
312
+ # 6.4 Distillation Details
313
+
314
+ To learn the generalist policy, we reduce the choices of observation inputs to the robot joint states and selective object states, including 3D object position and egocentric depth view, since privileged information is unavailable for sim-to-real transfer. To more efficiently utilize the trajectory data
315
+
316
+ Table 4: Domain Randomization Setup.
317
+
318
+ <table><tr><td>Object: Mass (kg)</td><td>[0.03, 0.1]</td></tr><tr><td>Object: Friction</td><td>[0.5, 1.5]</td></tr><tr><td>Object: Shape</td><td>×U(0.95, 1.05)</td></tr><tr><td>Object: Initial Position (cm)</td><td>+U(-0.02, 0.02)</td></tr><tr><td>Object: Initial z-orientation</td><td>+U(-0.75, 0.75)</td></tr><tr><td>Hand: Friction</td><td>[0.5, 1.5]</td></tr><tr><td>PD Controller: P Gain</td><td>×U(0.8, 1.1)</td></tr><tr><td>PD Controller: D Gain</td><td>×U(0.7, 1.2)</td></tr><tr><td>Random Force: Scale</td><td>2.0</td></tr><tr><td>Random Force: Probability</td><td>0.2</td></tr><tr><td>Random Force: Decay Coeff. and Interval</td><td>0.99 every 0.1s</td></tr><tr><td>Object Pos Observation: Noise</td><td>0.02</td></tr><tr><td>Joint Observation Noise.</td><td>+N(0, 0.4)</td></tr><tr><td>Action Noise.</td><td>+N(0, 0.1)</td></tr><tr><td>Frame Lag Probability</td><td>0.1</td></tr><tr><td>Action Lag Probability</td><td>0.1</td></tr><tr><td>Depth: Camera Pos Noise (cm)</td><td>0.005</td></tr><tr><td>Depth: Camera Rot Noise (deg)</td><td>5.0</td></tr><tr><td>Depth: Camera Field-of-View (deg)</td><td>5.0</td></tr></table>
319
+
320
+ and improve training stability, for each sub-task specialist policy, we evaluate for 5000 steps over 100 environments, saving trajectories filtered by success at episode reset on the hard disk. We then treat the saved data as "demonstrations" and learn a generalist policy for each task with Diffusion Policies [42].
321
+
322
+ The proprioception and object position states are concatenated and passed through a three-layer network with ELU activation, hidden sizes of (512, 512, 512), and an output feature size of 64. For depth observations, we use the ResNet-18 architecture [61] and replace all the BatchNorm [62] in the network with GroupNorm [63], following [42]. All the encoded features are then concatenated as the input to a diffusion model. We use the same noise schedule (square cosine schedule) and the same number of diffusion steps (100) for training as in [42]. The diffusion output from the model is the normalized 7 DoF absolute desired joint positions of each humanoid arm and the 6 DoF normalized (0 to 1) desired joint positions of each humanoid hand. We use the AdamW optimizer [64, 65] with a learning rate of 0.0001, weight decay of 0.00001, and a batch size of 128. Following [42], we maintain an exponential weighted average of the model weights and use it during evaluation/deployment.
data/2025/2502_20xxx/2502.20396/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18b138369423c74469c727ac91f5387fccd7c2dc3e1700051d043750634a6047
3
+ size 440958
data/2025/2502_20xxx/2502.20396/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20502/a95429a0-0940-4d78-b3f9-687f81572084_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20502/a95429a0-0940-4d78-b3f9-687f81572084_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20502/a95429a0-0940-4d78-b3f9-687f81572084_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7606106eec01d7cc6bd6155b577768f63f06f139f1de484598db26e179583a0c
3
+ size 2033136
data/2025/2502_20xxx/2502.20502/full.md ADDED
@@ -0,0 +1,414 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # On Benchmarking Human-Like Intelligence in Machines
2
+
3
+ Lance Ying $^{12}$ Katherine M. Collins $^{3}$ Lionel Wong $^{4}$ Ilia Sucholutsky $^{5}$ Ryan Liu $^{6}$ Adrian Weller $^{3}$ Tianmin Shu $^{7}$ Thomas L. Griffiths $^{6}$ Joshua B. Tenenbaum $^{1}$
4
+
5
+ # Abstract
6
+
7
+ Recent benchmark studies have claimed that AI has approached or even surpassed human-level performances on various cognitive tasks. However, this position paper argues that current AI evaluation paradigms are insufficient for assessing human-like cognitive capabilities. We identify a set of key shortcomings: a lack of human-validated labels, inadequate representation of human response variability and uncertainty, and reliance on simplified and ecologically-invalid tasks. We support our claims by conducting a human evaluation study on ten existing AI benchmarks, suggesting significant biases and flaws in task and label designs. To address these limitations, we propose five concrete recommendations for developing future benchmarks that will enable more rigorous and meaningful evaluations of humanlike cognitive capacities in AI with various implications for such AI applications.
8
+
9
+ # 1. Introduction
10
+
11
+ From the earliest days of artificial intelligence (AI), the vision of creating machines that think and act like humans has captured the imagination of researchers and the public alike (Turing, 1950; Lake et al., 2017; Cave & Dihal, 2023; Weizenbaum, 1966; Anderson et al., 1990). This pursuit is driven not only by scientific curiosity – to better understand intelligence and what it means to be human – but also by the potential of human-like AI to reshape our world, through the ways that we engage with our work and with each other. Furthermore, building AI that mirrors human cognition is crucial for the critical task of AI alignment. Ensuring that these powerful systems understand and share our values will ultimately lead to safer and more benefi-
12
+
13
+ cial interactions (Kasirzadeh & Gabriel, 2023). A deeper understanding of the mechanisms underlying human intelligence can also inform and enhance the development of more robust and adaptable AI systems.
14
+
15
+ Despite the acknowledged importance of building human-like AI, a clear and consistent definition of what constitutes "human-like" performance remains elusive, and we have seen this term inconsistently applied across the literature and public discourse. Recent years have witnessed a surge in claims that AI systems have achieved human-level performance on various tasks. However, the relevance of these results for determining whether AI systems act in a way that is human-"like" is challenged by the limitations of existing evaluation benchmarks.
16
+
17
+ In this paper, we argue that current evaluation paradigms are insufficient for assessing the true extent of human-like capabilities in AI systems. Specifically, we highlight three major shortcomings: the too-frequent absence of human validation in dataset labeling, inadequate representation of human variability in collected human data, and over-reliance on simplified tasks that lack ecological validity and fail to reflect the complexity of real-world scenarios. We support these claims with a human evaluation study on 10 well-known AI benchmark tasks, showcasing potential flaws along these three axes. To address these critical gaps, we propose five concrete recommendations for the development of future benchmarks, derived from best practices in cognitive modeling. We believe these recommendations will pave the way for more rigorous and meaningful evaluations of human-like AI, fostering a more accurate understanding of the current state of the field and guiding its future progress. We close with open questions and challenges of implementing these recommendations.
18
+
19
+ # 2. Building and Evaluating Human-like AI
20
+
21
+ There has been a long history of interest in building and evaluating human-like intelligence in machines. But what do we mean by human-like intelligence? In this paper, we adopt the definition given by Alan Turing (Turing, 1950): an intelligent system that can elicit similar judgments and behaviors "indistinguishable from that of a human being."
22
+
23
+ <table><tr><td>Benchmark</td><td>Task</td><td>Description</td></tr><tr><td rowspan="8">BigBench(Srivastava et al., 2022)</td><td>Fantasy reasoning</td><td>Reason about scenarios that violate the ordinary rules of the world</td></tr><tr><td>Social IQA</td><td>Reason about typical social situations.</td></tr><tr><td>Moral permissibility</td><td>Reason about morally permissible actions in scenarios</td></tr><tr><td>Simple ethical questions</td><td>Give perspectives on a set of hypothetical, consequential, political, and social questions.</td></tr><tr><td>Social support</td><td>Distinguish supportive and unsupportive language uses.</td></tr><tr><td>Irony identification</td><td>Determine whether a text is meant to be ironic or not.</td></tr><tr><td>Dark humor detection</td><td>Detect whether a particular piece of text is intended to be humorous (in a dark way) or not</td></tr><tr><td>Movie dialog same or different</td><td>Determine whether two adjacent &quot;lines&quot; from a movie dialogue were produced by the same or different individuals.</td></tr><tr><td>ToMBench(Chen et al., 2024)</td><td>Ambiguous story task</td><td>Reason and answer questions about ambiguous social situations</td></tr><tr><td>BigToM(Gandhi et al., 2024)</td><td>Theory of Mind Reasoning</td><td>Answer questions about agent&#x27;s beliefs and actions</td></tr></table>
24
+
25
+ Table 1. Benchmark tasks used in our experiment to evaluate human response distributions and levels of agreement.
26
+
27
+ But why may we aim for human-like AI? The pursuit of human-like AI is motivated by both scientific curiosity and practical considerations. From the earliest days of AI, scholars have sought to understand, model, and attempt to replicate the intricacies of human cognition and intelligence (Rosenblatt, 1958; Rumelhart et al., 1988; Minsky, 1988; Mitchell, 2024) and use these cognitively-informed models for practical applications. Building human-like AI offers a powerful lens through which to explore fundamental questions about the philosophy of mind, the nature of human cognition, and the underlying mechanisms driving complex human behavior. This quest not only pushes the boundaries of computer science but also promises to deepen our understanding of human intelligence.
28
+
29
+ Creating AI systems that exhibit human-like thinking and behaviors offers several potential advantages for applications. Human-like AI can think and act instead of humans in many scenarios while ensuring safety and reliability:
30
+
31
+ - Effective Human-AI Interaction: Humans have developed complex social cognitive skills for effective collaboration, which involves simulating other agents' mental states and future actions (Bandura, 2001; Gallese, 2007). AI systems that adhere to human-like patterns of reasoning and behavior can enable human users to easily construct accurate mental models of the AI partner and better simulate and predict the AI partner's future actions (Collins et al., 2024c). This leads to more effective collaboration and coordination between human users and AI agents (Carroll et al., 2019; Ho & Griffiths, 2022; Zhi-Xuan et al., 2024). Additionally, interacting with agents that be
32
+
33
+ have predictably and understandably can reduce cognitive load (Dragan et al., 2013; Fisac et al., 2020). We don't have to expend as much mental effort trying to decipher unfamiliar or unexpected behaviors.
34
+
35
+ - Better simulated agents: AI systems with human-like cognitive capabilities are valuable tools for building simulations of people. This has many benefits, including improving communication (Liu et al., 2023; Shaikh et al., 2024), generating feedback on pilot studies, and even potentially automating human participant responses in social sciences (Ashokkumar et al., 2024; Park et al., 2024; Demszky et al., 2023) or Human Computer Interaction (Hämäläinen et al., 2023). Prior work has also explored the use of LLMs for product testing (Brand et al., 2023) and substituting human subjects in software engineering (Gerosa et al., 2024).
36
+ - Flexible generalization: Humans are often considered the gold standard for generalizing from small data and getting AI systems to replicate the mechanisms that drive the human ability to learn so efficiently may enable AI systems to do so too (Lake et al., 2017; Sucholutsky & Schonlau, 2021; Sucholutsky et al., 2024).
37
+
38
+ # 3. Benchmark Selection and Evaluation
39
+
40
+ To motivate our recommendations, we collected human data on 10 commonly used AI Benchmarks. We selected 8 benchmarks from BigBench (Srivastava et al., 2022) under the common-sense reasoning category and two Theory-of-Mind reasoning benchmarks, BigToM (Gandhi et al., 2024) and ToMBench (Chen et al., 2024). The benchmarks are
41
+
42
+ ![](images/5311e5c2cc8954d7402611440c66b831b76a89ea0cb111ede1f17436c16b388e.jpg)
43
+ Figure 1. Distribution of participants' agreement with benchmark labels across all 300 stimuli. $26.67\%$ of the stimuli have less than $50\%$ agreement with the label (i.e. less than half of the participants selected the label provided by the benchmark).
44
+
45
+ described in Table 1. We chose these benchmarks as they represent a wide range of cognitive tasks and do not require any specialized knowledge. Many focuses on language understanding and social cognition, which are particularly pertinent for human-AI interaction. All 10 benchmarks have a single ground truth label for each stimulus.
46
+
47
+ We randomly sampled 30 stimuli from each benchmark and recruited 240 participants from Prolific to label the dataset. Each participant was randomly assigned to a dataset and completed 30 trials in a randomized order. We used the same answer options provided by the benchmarks, but instead of using a multiple choice question we asked participants to drag a slider on a scale from $1 - 100$ (e.g. $1 =$ strongly disagree, $100 =$ strongly agree) for each answer option.
48
+
49
+ We highlight some aggregate statistics and diagnostic examples in the section below to support our arguments. More detailed analysis and examples can be found in the Appendix.
50
+
51
+ # 4. Pitfalls and Recommendations for Benchmarking Human-like AI
52
+
53
+ In this section, we present recommendations for evaluating "human-like" AI. There have been several works emphasizing alternate ways to evaluate AI system performance (Burnell et al., 2023; Shanahan et al., 2023; Beyret et al., 2019). Here, we focus particularly on how insights from decades of computational modeling can inform how we approach AI benchmarking. The recommendations we propose here derive from years of development and debate in cognitive science to determine best practices for designing tasks, richly comparing models to human judgments, and sharpening hypotheses about what aspects of human behavior a computational model is intended to capture in the first place – all cornerstones, we argue, of what it means to make theoretically rich, replicable, and measured claims about the sense in which a given model is and is not comparable to human
54
+
55
+ behavior. We urge developers of AI benchmarks to engage with and capitalize on this history.
56
+
57
+ # 4.1. Recommendation 1: Measure 'human-like AI' against actual humans - and collect robust, replicable sample sizes of human data
58
+
59
+ A surprising number of "cognitively-inspired" benchmark suites and AI evaluations claim to measure human-like AI performance without any human data at all. Rather, tasks derived or sometimes loosely adapted from psychological assays are used to directly evaluate computational model performance, often with ground-truth notions of what it means to "solve" a task (for instance, to identify whether a model can label mental states in simple "false-belief" tasks derived from cognitive theory-of-mind experiments (Wimmer & Perner, 1983)). Our first and perhaps most fundamental recommendation is that the ground-truth labels for measuring whether AI is human-like should be response data collected from humans themselves.
60
+
61
+ Using actual human behavior as the "gold" labels for AI benchmarks, we propose, is important for many structural aspects that have been well documented in cognitive science. First, many AI benchmarks seek to evaluate inherently subjective concepts – such as whether an act is morally permissible – where a single, objectively correct answer (or even any set of "correct answers") may not exist. Rather, computational models of subjective behavior like moral reasoning, have long sought to characterize distributions of human judgments, including to account for known variation across populations, social groups, and cultures (Graham et al., 2009; 2016), while also seeking to explain how these differences arise (Levine et al., 2020).
62
+
63
+ Second, even on tasks that appear to have a single objective "gold label" based on external measures, measuring human behavior may still reveal important variation and disagreement, sometimes with high confidence, that is nonetheless revealing of the internal computations by which humans process particular inputs. The famous visual illusion involving The Dress, for instance, illustrates people's strongly diverging judgments even given a measurable external label, the true color of the dress. These divergent judgments on this single stimulus reveal important, measurable, and modelable facets of human visual processing (Lafer-Sousa et al., 2015). More generally, building systems that are truly human-like or that can well-model human-like behavior requires also modeling human error patterns and uncertainty. Computational cognitive modelers do not shy away from human errors, but rather lean into them; consider Battaglia et al. (2013) which build a model of how humans reason about our physical world. They find, and model, that we humans are not always accurate in our inferences about physics; such errors - as the history of studying visual and other perceptual
64
+
65
+ illusions has emphasized – can help reveal structure in what we do or do not know. Understanding whether a machine is human-like therefore ought to examine such error patterns from the “true” state of the world.
66
+
67
+ In our analysis of a suite of common AI evaluation benchmarks that had previously been annotated with only a single "correct" answer, we found high levels of disagreement in human judgments. Specifically, we found that on average only $63.51\%$ of participants agree with the ground truth label for each stimulus with a standard deviation of 20.99. Notably, we found that $26.67\%$ of the stimuli have a human agreement rate below $50\%$ . Consider the specific example in Figure 2, participants are asked to rate whether the statement "There's nothing wrong with the quotations or discussing her art" is supportive. Absent of the context, most participants find the statement to be more supportive than unsupportive, yet the ground truth label is "unsupportive". We show more such examples in Table 3, 4 and 5 in the Appendix.
68
+
69
+ Taken together, our re-annotation of these benchmarks – with real humans – suggests that there are serious concerns as to the validity of some published ground-truth labels for benchmarking “human-likeness.”
70
+
71
+ # 4.2. Recommendation 2: Evaluate models of human populations against population-level distributions of human judgments
72
+
73
+ Our second recommendation builds more specifically on the inter-annotator variation we discuss above – for many AI models, particularly machine learning models explicitly trained on large distributions of human-generated data, we propose that model evaluations should explicitly collect, analyze, and use population-level distributions of human responses as the “gold” soft labels for evaluating model performance. A fundamental distinction for computational cognitive and psychological models is clarifying which populations of humans one seeks to model, and at what level one seek to model them – distinguishing, for instance, between a granular model of the algorithms, strategies, and errors that a single human might make across related stimuli on a single domain, with the overall pattern of responses we can expect to find across many subjects. Because many AI models are trained on population-level human data using objectives designed to measure population-level responses, and are often intended for deployment across populations, we argue that it is crucial to collecting and evaluating performance explicitly on how well models capture the structure and variation of behavior across sets of human subjects.
74
+
75
+ Nearly all facets of human cognition – perception, decision-making, and commonsense reasoning on any number of inherently subjective tasks – are influenced by a complex set of individual differences and cultural factors. These include differences in underlying cognitive abilities or resources like
76
+
77
+ ![](images/5025f0878e37c26fa0e6649130fa8416ff83de51e79c9aaffcd747599aa4b928.jpg)
78
+ Statement: There's nothing wrong with the quotations or discussing her art.
79
+ Figure 2. Distribution of participants' ratings on one of the stimuli. The ground truth label is "unsupportive".
80
+
81
+ working memory or attention (Boogert et al., 2018); differences in prior experiences, preferences and goals, which can influence how they predict unknowns given limited evidence or choose among a set of options and actions (Ongchoco et al., 2024); and cultural variation in values, expectations, and experiences that systematically influences priors or decision making strategies (Henrich et al., 2010).
82
+
83
+ Many existing benchmarks collect human annotations but rely on majority voting to collapse the human responses to a single "ground-truth" label, effectively discarding valuable information about the range and distribution of human judgments. This may disproportionately lead models to align with the majority view, even if there are important subpopulations that are otherwise underrepresented (Gordon et al., 2022). Additional pitfalls of such information loss in label construction have been raised in the context of image classification systems wherein the labels used to train models were often taken to be the label with the majority vote; several works identified that training and evaluating such models on distributions over annotator uncertainty ("soft labels") revealed and guarded against otherwise fragility in such model predictions (Peterson et al., 2019; Sucholutsky et al., 2023a; Collins et al., 2023b; Uma et al., 2020). These works also highlight the potential benefit of then training on labels that better capture the richness of human beliefs for enhanced generalization and robustness. We advocate for the consideration of distributions over human data in the context of AI evaluation more broadly.
84
+
85
+ Researchers in AI Alignment, specifically "pluralistic alignment", have advocated for similar recommendations (Kirk et al., 2024; Sorensen et al., 2024) but more restricted to alignment to a distribution of values and preferences in decision-making. In our paper, we argue modeling distributions over annotators should extend to all cognitive tasks, including perception, planning and reasoning, and
86
+
87
+ # should be beyond just culture and values.
88
+
89
+ Designing and evaluating population-level metrics Once we collect the distribution of human data, how may we evaluate AI models? As in cognitive modeling, where researchers often deploy a range of evaluation measures on collected data and conduct analyses on subgroups within populations of participants, we recommend being clear and seeking explicitly to measure the following:
90
+
91
+ - Report metrics used to compare distributions of samples from models (with comparable numbers of samples from the model versus samples from a population of participants) to distributions of human judgments, such as measures on probability distributions (e.g., KL divergence or Wasserstein distances). These metrics can ensure that models do not simply report narrow means, with little of the expected distributional diversity shown across populations as a whole.
92
+ - Explain structure within a given distribution of answers. For instance, if distributions have distinct modes, can the model interpretably and consistently explain how these modes arise, or how modes are correlated across related questions?
93
+ - Measure how the model represent individual patterns of answers and explain individual differences across the population – for instance, to what degree can it capture conditional patterns based on personal traits (eg. how a pluralist would answer a moral value judgment query versus a utilitarian)? Evaluating conditional distributions can help further focus which parts of a population are well-modeled, and which may be more divergent.
94
+
95
+ # 4.3. Recommendation 3: Evaluate model gradedness and uncertainty against gradedness in individual human judgments
96
+
97
+ Just as different people may come to different conclusions about any given task, any single person may be uncertain about what decision they want to make or what plan they want to take. Decades of cognitive science research has shown that graded beliefs and uncertainties are an essential part of human cognition, driving nuanced human perception, reasoning and behaviors (Tversky & Kahneman, 1974; Chater & Manning, 2006; Griffiths et al., 2024). We encourage benchmark builders to consider eliciting, maintaining, and measuring not just judgment over hard labels with multiple choice questions but graded judgments from individual annotators using soft labels. The collection and consideration of soft labels for capturing graded judgments from humans has been standard practice for cognitive modeling and has more recently been advocated for in the context of computer vision (Sucholutsky et al., 2023b), human-AI
98
+
99
+ ![](images/8b5752cb2c45053d944eed60b516acd7608eae0d0f6598daecda53acdccc1571.jpg)
100
+ Figure 3. Distribution of participants' ratings on soft labels across all 300 stimuli. Each rating maps onto a ground-truth label of 0 or 100, except 625 ratings where the underlying label is 50 (Neutral).
101
+
102
+ interaction (Collins et al., 2023a), and the elicitation of knowledge from experts more broadly (O'Hagan et al., 2006; O'Hagan, 2019).
103
+
104
+ Discrete multiple-choice questions that require an annotator to select only one choice are typically too coarse for such measures. In our data collection, we find that $57.69\%$ of the ratings are between 20 to 80, reflecting participants' graded judgments which are not reflected by binary labels (see Figure 3 and Appendix for examples).
105
+
106
+ We call on AI benchmarks to consider collecting and assessing soft labels from annotators to measure their graded judgments for the following reasons. First, graded judgments better reflect the nuances of real-world scenarios. Real-world decision-making rarely involves absolute, binary choices. Consider emotions, which vary in intensity, or moral judgments, where two wrong actions might warrant different levels of reprimand. Graded responses allow benchmarks to capture these crucial distinctions and nuances and can in turn be used to train models for better generalization to new situations (Peterson et al., 2019).
107
+
108
+ Second, soft labels capture the inherent uncertainty prevalent in many tasks. A binary choice often fails to represent the full spectrum of human beliefs and judgment. Individuals may lean towards one option while acknowledging some doubt. This uncertainty is fundamental to real-world reasoning and decision-making. Quantifying uncertainty allows for flexible planning, adaptive strategies, and appropriate risk assessment—essential skills for robust AI systems. While some might argue that large samples with hard labels can approximate uncertainty, this approach hinges on the assumption of independent and identically distributed (i.i.d.) samples. However, this assumption often does not hold in many real-world cases due to individual and group-level variations. Again, consider the example of The Dress. Averaging judgments across all samples would show high uncertainty between the two color labels. However, in fact each person is quite adamant about what they see.
109
+
110
+ To deeply understand whether a model is human-like, we urge finer-grained consideration of the rich, structured beliefs that any single annotator may have. Researchers may fear perceived "messiness" of collecting human uncertainty. An oft heard retort to the collection of uncertainty is that people are "miscalibrated" in their uncertainty. Decades of research in cognitive science, however, have designed studies to examine people's probabilistic judgments in order to study and model human cognition (Keren, 1991; Tenenbaum, 1998; Chater & Manning, 2006; Windschitl & Wells, 1996; O'Hagan et al., 2006; Griffiths et al., 2024). We encourage designers of AI benchmarks to engage with such literature and lean into these uncertainties in humans' judgments in order to assess models' human-like behaviors.
111
+
112
+ # 4.4. Recommendation 4: Situate tasks with respect to meta-reviews of existing cognitive theory
113
+
114
+ Many AI benchmarks focus on testing human and machine judgments on various commonsense reasoning tasks, from object recognition to classifying sentiments in texts. However, the number of tasks in the world is unbounded, and we cannot have infinitely many benchmarks. To draw generalizable conclusions about an AI model, tasks should be carefully designed to measure whether the model's cognitive capabilities are human-like (Hernández-Orallo, 2017). To do so, benchmarks should begin with a theory of the target mental construct, outlining its sub-components and how they manifest in observable behaviors. This theoretical framework then guides the construction of the benchmark, ensuring that tasks effectively probe the specific cognitive capacities of interest and provide meaningful insights into to what extent AI possesses these mental constructs in a human-like way.
115
+
116
+ Recently, there has been surging interest in probing human-like mental capacities in LLMs, such as personality traits, reasoning, planning, etc. (Hagendorff et al., 2023; Safdari et al., 2023; Coda-Forno et al., 2024). We encourage these investigations, but we highlight two common pitfalls in existing practice.
117
+
118
+ One common pitfall is the use of impoverished theory in guiding benchmark creation. For example, many benchmarks have been created to evaluate a machine's Theory of Mind (ToM), which refers to the human ability to make inferences about other agents' mental states. ToM benchmarks for AI commonly or exclusively use the Sally-Anne test (a.k.a. false-belief test) (e.g. Le et al. 2019), which has traditionally been used in developmental psychology for evaluating the timing of children's developing Theory of Mind. The results from these evaluations have led to claims such as ToM having emerged in LLMs (Kosinski, 2024; Gandhi et al., 2024). However, ToM embodies a wide range of subcomponents beyond those assessed by the
119
+
120
+ Sally-Anne test. In a comprehensive review, Beaudoin et al (2020) identified 220 ToM tasks and measures previously used by psychological studies. Other authors have also questioned the validity and effectiveness of the Sally-Anne test in assessing children's ToM (Bloom & German, 2000). By exclusively focusing on false-belief tasks, many studies on evaluating AI models' ToM reflect a poor understanding of the meta-theory of ToM as construed in cognitive psychology. Instead, benchmarking intelligent systems should start from a meta-theory of the cognitive construct and design tasks grounded in the cognitive theory, including a comprehensive survey of its subdomains, taxonomies, and measures.
121
+
122
+ Another common pitfall is the naive use and adaptation of psychological tests in evaluating AI models. Passing a few psychological tests is insufficient to claim certain cognitive capacities exist in machines. Again take the Sally-Anne test as an example. Although it may be effective in measuring children's ToM, tests as such are insufficient for evaluating AI's ToM because AI models are trained specifically to do well on these tests while humans are not. Therefore, blindly taking psychological scales and applying them to AI benchmarks to claim an AI is human-like can result in misleading conclusions and the results will be unlikely to generalize to richer tasks in the real world. Instead, we encourage AI benchmark creators to use psychological theories as a guide and psychological tests as inspirations for designing tasks for evaluating AI's cognitive capacity, but the tasks should be richer, more grounded, and more complex. Research in Cognitive Science in the past decades have introduced many rich and interactive paradigms for studying and evaluating models' social cognition, such as the ones used in Baker et al. (2017), Jara-Ettinger et al. (2020) and Ying et al. (2023), which were used to extract sophisticated and graded reasoning patterns from humans (See Fig 4 as an example). In the next section, we discuss some concrete recommendations for designing such tasks.
123
+
124
+ # 4.5. Recommendation 5: Design Ecologically Valid and Cognitively Rich Tasks
125
+
126
+ Benchmark tasks should be ecologically-valid, reflecting the complexity and ambiguity of real-world scenarios, to effectively evaluate AI systems designed for human-like reasoning and interaction. Many existing benchmarks focus on simple, straightforward tasks, often excluding those with low inter-annotator agreement. However, real-world challenges rarely present themselves in such simplified forms. Humans routinely navigate complex situations involving incomplete information, contextual nuances, and ambiguous stimuli. If we want to deeply understand in which ways AI systems are (or are not) human-like in the diversity of settings in which humans engage with the real world, AI benchmarks must move beyond these simplified cases. We
127
+
128
+ ![](images/71c47d71b1ea5b4f3967b548bd1cba0249f6aa96863a6f8b46128a42c8241df4.jpg)
129
+ Figure 4. The Food truck experiment used by Baker et al. (2017) to study human social reasoning. In this domain, a participant watches an agent moving to get food from a foodtruck. There are three kinds of food trucks: Lebanese (L), Mexican (M) and Korean (K). The agent cannot see what foodtruck is behind the wall unless they walk behind it to check. After observing the agent's trajectory, the participant is asked to judge the agent's preference of the foodtrucks and their belief of what foodtruck is behind the wall on a Likert scale. The results show graded judgment in humans across different agent trajectories.
130
+
131
+ next provide several key suggestions for eliciting interesting and rich response patterns in humans and models in more naturalistic settings that paint a broader picture of what it means to be "human-like".
132
+
133
+ Integration of cognitive capacities: Benchmarks should incorporate tasks that require integrating multiple cognitive processes, including multimodal reasoning and interaction. For example, understanding the intent behind a sentence might require considering conversational context, the speaker's tone, and even visual cues. The foodtruck example shown in Fig. 4 requires observers to model the perception and mental states of the agent as well as their goal-directed actions and plans. By incorporating such complexities, benchmarks can better assess an AI's ability to handle nuanced, real-world situations.
134
+
135
+ Naturalistic traces of human behavior: Benchmarks may also consider comparing AI system performance across richer traces of how humans go about solving and creating problems, making decisions, and communicating with each over potentially many interactions, which may include traces of student-teacher interactions (Wang et al., 2024) or other professionals' workflows, e.g., how mathematicians come up with proofs (Frieder et al., 2024).
136
+
137
+ Systematic Ablation: Ablating tasks by systematically withholding or providing specific information or context can reveal how different factors influence both human and AI judgments and uncertainty. Comparing performance across ablated and full stimuli provides valuable insights into the reasoning processes of both humans and AI systems in settings of varied contextual information, which are common in the real-world.
138
+
139
+ Structured Ambiguity: Tasks involving ambiguous perceptual and reasoning challenges, like the example illustrated in The Dress, can elicit diverse response patterns among humans. While some benchmarks exclude such stimuli due to lower inter-annotator agreement, we argue that these ambiguous cases are crucial for understanding the nuances of human cognition and evaluating an AI's ability to handle uncertainty. Excluding them limits the benchmark's ability to assess real-world applicability. Rather, we encourage leaning into whether tasks are difficult (which could involve collecting new human-derived ratings of expected difficulty (Zhou et al., 2024)) and creating more such tasks; for instance, more ambiguous or challenging tasks can be created iteratively by modifying the task based on previous humans' responses as in Collins et al. (2022) or via other iterative sampling procedures (Harrison et al., 2020; Sanborn & Griffiths, 2007).
140
+
141
+ By incorporating these design principles, we can create benchmarks that assess AI models' capacity for human-like reasoning, interaction, and adaptation to complex, real-world scenarios.
142
+
143
+ # 5. Alternative Views and Open Challenges
144
+
145
+ In this section, we address some challenges and alternative views/arguments on benchmarking Human-like intelligence.
146
+
147
+ # 5.1. Do We Need Human-like AI?
148
+
149
+ We acknowledge that certain highly specialized AI applications, such as protein structure prediction (Jumper et al., 2021) or weather forecasting (Lam et al., 2023; Bodnar et al., 2024), do not require human-like characteristics. Benchmarks for these domains fall outside the scope of this paper. Our focus lies on core cognitive capacities that enable machines to reason, interact, and collaborate with humans in the real world (Collins et al., 2024c).
150
+
151
+ Some might argue that, even in common-sense reasoning tasks, AI systems simply need to perform tasks effectively and be understandable or interpretable, without necessarily mimicking human cognition. We address this perspective in two ways. First, we reiterate the numerous benefits of human-like AI outlined in Section 2, including potentially enhanced model performance (robustness and flexible generalization), predictability by other humans, and potential for applications that warrant human-like cognition (e.g. agent simulations).
152
+
153
+ Second, even when the explicit goal is not to create human-like AI, adhering to the guidelines presented in this paper and looking to best practices from cognitive modeling can provide valuable insights into the AI system. Already, insights from cognitive science are being used to better understand LLMs (Binz & Schulz, 2023). By comparing AI per
154
+
155
+ formance on human-centric benchmarks with actual human responses, we can pinpoint the specific cognitive capacities where AI systems deviate from human-like intelligence. This comparative analysis reveals which aspects of an AI's reasoning and decision-making capabilities align with human thinking and which diverge, providing crucial information for AI safety and governance and informing the ways in which we use these systems. Furthermore, understanding these differences helps AI engineers and system users develop more accurate mental models of their systems (Bansal et al., 2019; Steyvers & Kumar, 2023), facilitating more informed design and effective use.
156
+
157
+ # 5.2. Biases and Errors in Human Responses
158
+
159
+ A critical consideration in using human data for AI benchmarks is the potential for biases and errors in human judgments. Cognitive science research has extensively documented human limitations in rational reasoning and decision-making, due to limited cognitive resources (Griffiths, 2020; Lieder & Griffiths, 2020) or systematic biases (Tversky & Kahneman, 1974). This raises the question: should AI systems replicate these human cognitive limitations?
160
+
161
+ There is no clear answer here. While there are some biases that we want to avoid baking into such models (e.g., harmful racial or gender prejudices), other cognitive biases can be useful for decision making (?Lieder & Griffiths, 2020) and essential for accurately modeling human behavior – and early evidence suggests that such patterns of errors are not implicitly learned in some of today's models, which risks hampered human-AI interaction (Liu et al., 2024). For instance, human loss aversion, a well-established cognitive bias, plays a significant role in economic decision-making. Modeling such biases can be crucial for AI systems designed to simulate human behaviors or interact effectively within human economic systems. Conversely, an AI devoid of all cognitive biases might create friction or inefficiencies in collaborative decision-making with humans.
162
+
163
+ Ultimately, the extent to which AI should replicate human cognitive biases must be evaluated on a case-by-case basis, considering the specific objectives and application of the AI system. Nevertheless, to provide maximum flexibility and support diverse research goals, we recommend that benchmark creators provide both human data and "bias-free" labels whenever feasible. This approach empowers researchers to choose the appropriate data for their specific needs, whether it is training AI systems to make highly complex decisions free of bias and errors or accurately modeling human behavior for seamless human-AI collaboration or agent simulation.
164
+
165
+ # 5.3. Scalability and Practicality of Human Data Collection
166
+
167
+ Concerns regarding the scalability and practicality of human data collection for AI benchmarks are valid. Gathering human judgments can be resource-intensive, potentially hindering rapid benchmark development particularly if such collection involves eliciting many attributes per annotator (Wu et al., 2023; Collins et al., 2024b; Chung et al., 2019; Kirk et al., 2024). However, we argue that prioritizing quality over quantity, and leveraging readily available tools, enable us to begin to address these challenges.
168
+
169
+ First, benchmark effectiveness does not necessarily correlate with size. A smaller, carefully curated dataset focusing on challenging and edge cases can be more insightful than a massive dataset filled with redundant or trivial examples. By concentrating on high-quality, diagnostically valuable stimuli, we can maximize the benchmark's ability to reveal interesting and rich response patterns in AI systems and humans while minimizing the required data collection effort.
170
+
171
+ Second, advancements in crowdsourcing platforms, such as Amazon Mechanical Turk and Prolific, have significantly streamlined large-scale data annotation (Griffiths, 2015). These tools provide access to diverse populations, enabling researchers to collect representative samples efficiently. However, maintaining data quality remains crucial. Implementing rigorous exclusion criteria, clear instructions, and attention checks are essential for ensuring the reliability and validity of the collected data. For best practices in data crowdsourcing, we refer readers to Stewart et al. (2017).
172
+
173
+ By focusing on quality over quantity and utilizing available crowdsourcing tools effectively, the challenges of human data collection for benchmark development can be successfully mitigated. However, we urge substantial additional research into ways that we can make evaluation with humans more scalable especially as we consider human-likeness not just in a single decision or reasoning trace but in interactions with others (Lee et al., 2023; Collins et al., 2024a; Lee et al., 2024; Wang et al., 2024).
174
+
175
+ # 6. Conclusion
176
+
177
+ AI systems are increasingly deployed alongside humans. Characterizing the ways in which AI systems are, or are not, like humans is critical for ensuring we can understand where and how we may interact with these AI systems, and help us design systems that themselves may be more robust and flexible - like people. However, to really know whether an AI system is "human-like" demands careful evaluation. In this work, we have encouraged builders of AI evaluation to look to decades of research in cognitive modeling. Cognitive scientists have toiled at the question of how to measure human reasoning and decision-making; AI researchers would
178
+
179
+ be well-positioned to build on this work. Specifically, we encourage AI practitioners to ensure that if they are making claims about a system being "human-like" (or want to understand whether a system is or is not), human labels must be collected. We encourage researchers to lean towards, not away, from variability and uncertainty: looking at the distribution of annotators' responses and capturing graded beliefs from each annotator. Further, the tasks over which AI systems are benchmarked demand careful theory-driven design, as well as development in more ecologically-valid settings. AI systems are growing increasingly powerful; we need more robust and reliable evaluation not only if we want to build more human-compatible AI thought partners that we understand but also if we want to deeply understand ourselves.
180
+
181
+ # 7. Acknowledgments
182
+
183
+ This work was funded in part by Schmidt AI 2050, ONR, the MIT-IBM Watson AI Lab, and gifts from Reid Hoffman and the Siegel Family Foundation.
184
+
185
+ KMC acknowledges support from King's College Cambridge and the Cambridge Trust. AW acknowledges support from a Turing AI Fellowship under grant EP/V025279/1, EPSRC grants EP/V056522/1 and EP/V056883/1, and the Leverhulme Trust via CFI.
186
+
187
+ # References
188
+
189
+ Anderson, J. R., Boyle, C. F., Corbett, A. T., and Lewis, M. W. Cognitive modeling and intelligent tutoring. 1990.
190
+ Ashokkumar, A., Hewitt, L., Ghezae, I., and Willer, R. Predicting results of social science experiments using large language models. Work. Pap., New York Univ., New York, NY, 2024.
191
+ Baker, C. L., Jara-Ettinger, J., Saxe, R., and Tenenbaum, J. B. Rational quantitative attribution of beliefs, desires and percepts in human mentalizing. Nature Human Behaviour, 1(4):0064, 2017.
192
+ Bandura, A. Social cognitive theory: An agentic perspective. Annual review of psychology, 52(1):1-26, 2001.
193
+ Bansal, G., Nushi, B., Kamar, E., Lasecki, W. S., and Weld, D. S. e. a. Beyond accuracy: The role of mental models in human-AI team performance. In Proceedings of the AAAI Conference on Human Computation and Crowdsourcing, volume 7, pp. 2-11, 2019.
194
+ Battaglia, P. W., Hamrick, J. B., and Tenenbaum, J. B. Simulation as an engine of physical scene understanding. Proceedings of the National Academy of Sciences, 110 (45):18327-18332, 2013.
195
+
196
+ Beaudoin, C., Leblanc, É., Gagner, C., and Beauchamp, M. H. Systematic review and inventory of theory of mind measures for young children. Frontiers in psychology, 10: 2905, 2020.
197
+ Beyret, B., Hernández-Orallo, J., Cheke, L., Halina, M., Shanahan, M., and Crosby, M. The animal-ai environment: Training and testing animal-like artificial cognition. arXiv preprint arXiv:1909.07483, 2019.
198
+ Binz, M. and Schulz, E. Using cognitive psychology to understand gpt-3. Proceedings of the National Academy of Sciences, 120(6):e2218523120, 2023.
199
+ Bloom, P. and German, T. P. Two reasons to abandon the false belief task as a test of theory of mind. Cognition, 77 (1):B25-B31, 2000.
200
+ Bodnar, C., Bruinsma, W. P., Lucic, A., Stanley, M., Brandstetter, J., Garvan, P., Riechert, M., Weyn, J., Dong, H., Vaughan, A., et al. Aurora: A foundation model of the atmosphere. arXiv preprint arXiv:2405.13063, 2024.
201
+ Boogert, N. J., Madden, J. R., Morand-Ferron, J., and Thornton, A. Measuring and understanding individual differences in cognition, 2018.
202
+ Brand, J., Israeli, A., and Ngwe, D. Using llms for market research. Harvard Business School Marketing Unit Working Paper, (23-062), 2023.
203
+ Burnell, R., Schellaert, W., Burden, J., Ullman, T. D., and et al, F. M.-P. Rethink reporting of evaluation results in ai. Science, 380(6641):136-138, 2023. doi: 10.1126/science.adf6369.
204
+ Carroll, M., Shah, R., Ho, M. K., Griffiths, T., and Seshia, S. e. a. On the utility of learning about humans for human-ai coordination. Advances in neural information processing systems, 32, 2019.
205
+ Cave, S. and Dihal, K. Imagining AI: how the world sees intelligent machines. Oxford University Press, 2023.
206
+ Chater, N. and Manning, C. D. Probabilistic models of language processing and acquisition. Trends in cognitive sciences, 10(7):335-344, 2006.
207
+ Chen, Z., Wu, J., Zhou, J., Wen, B., Bi, G., Jiang, G., Cao, Y., Hu, M., Lai, Y., Xiong, Z., and Huang, M. Tombench: Benchmarking theory of mind in large language models, 2024.
208
+ Chung, J. J. Y., Song, J. Y., Kutty, S., Hong, S., Kim, J., and Lasecki, W. S. Efficient elicitation approaches to estimate collective crowd answers. Proceedings of the ACM on Human-Computer Interaction, 3(CSCW):1-25, 2019.
209
+
210
+ Coda-Forno, J., Binz, M., Wang, J. X., and Schulz, E. Cog-bench: a large language model walks into a psychology lab. arXiv preprint arXiv:2402.18225, 2024.
211
+ Collins, K. M., Wong, C., Feng, J., Wei, M., and Tenenbaum, J. B. Structured, flexible, and robust: benchmarking and improving large language models towards more human-like behavior in out-of-distribution reasoning tasks. arXiv preprint arXiv:2205.05718, 44, 2022.
212
+ Collins, K. M., Barker, M., Espinosa Zarlenga, M., Raman, N., and Bhatt, U. e. a. Human uncertainty in concept-based ai systems. In Proceedings of the 2023 AAAI/ACM Conference on AI, Ethics, and Society, pp. 869-889, 2023a.
213
+ Collins, K. M., Bhatt, U., Liu, W., Piratla, V., Sucholutsky, I., Love, B., and Weller, A. Human-in-the-loop mixup. In Uncertainty in Artificial Intelligence, pp. 454-464. PMLR, 2023b.
214
+ Collins, K. M., Jiang, A. Q., Frieder, S., Wong, L., and Zilka, M. e. a. Evaluating language models for mathematics through interactions. Proceedings of the National Academy of Sciences, 121(24):e2318124121, 2024a.
215
+ Collins, K. M., Kim, N., Bitton, Y., Rieser, V., Omidshafiei, S., Hu, Y., Chen, S., Dutta, S., Chang, M., Lee, K., et al. Beyond thumbs up/down: Untangling challenges of fine-grained feedback for text-to-image generation. In Proceedings of the AAAI/ACM Conference on AI, Ethics, and Society, volume 7, pp. 293-303, 2024b.
216
+ Collins, K. M., Sucholutsky, I., Bhatt, U., Chandra, K., Wong, L., Lee, M., Zhang, C. E., Zhi-Xuan, T., Ho, M., Mansinghka, V., et al. Building machines that learn and think with people. Nature Human Behaviour, 8(10):1851-1863, 2024c.
217
+ Demszky, D., Yang, D., Yeager, D. S., Bryan, C. J., Clapper, M., Chandhok, S., Eichstaedt, J. C., Hecht, C., Jamieson, J., Johnson, M., et al. Using large language models in psychology. Nature Reviews Psychology, 2(11):688-701, 2023.
218
+ Dragan, A. D., Lee, K. C., and Srinivasa, S. S. Legibility and predictability of robot motion. In 2013 8th ACM/IEEE International Conference on Human-Robot Interaction (HRI), pp. 301-308. IEEE, 2013.
219
+ Fisac, J. F., Liu, C., Hamrick, J. B., Sastry, S., and Hedrick, J. K. e. a. Generating plans that predict themselves. In *Algorithmic Foundations of Robotics* XII: Proceedings of the Twelfth Workshop on the Algorithmic Foundations of Robotics*, pp. 144–159. Springer, 2020.
220
+
221
+ Frieder, S., Bayer, J., Collins, K. M., Berner, J., Loader, J., Juhász, A., Ruehle, F., Welleck, S., Poesia, G., Griffiths, R.-R., et al. Data for mathematical copilots: Better ways of presenting proofs for machine learning. arXiv preprint arXiv:2412.15184, 2024.
222
+ Gallese, V. Before and below 'theory of mind': embodied simulation and the neural correlates of social cognition. Philosophical Transactions of the Royal Society B: Biological Sciences, 362(1480):659-669, 2007.
223
+ Gandhi, K., Franken, J.-P., Gerstenberg, T., and Goodman, N. Understanding social reasoning in language models with language models. Advances in Neural Information Processing Systems, 36, 2024.
224
+ Gerosa, M., Trinkenreich, B., Steinmacher, I., and Sarma, A. Can ai serve as a substitute for human subjects in software engineering research? Automated Software Engineering, 31(1):13, 2024.
225
+ Gordon, M. L., Lam, M. S., Park, J. S., Patel, K., and Hancock, J. e. a. Jury learning: Integrating dissenting voices into machine learning models. In CHI Conference on Human Factors in Computing Systems, pp. 1-19, 2022.
226
+ Graham, J., Haidt, J., and Nosek, B. A. Liberals and conservatives rely on different sets of moral foundations. Journal of personality and social psychology, 96(5):1029, 2009.
227
+ Graham, J., Meindl, P., Beall, E., Johnson, K. M., and Zhang, L. Cultural differences in moral judgment and behavior, across and within societies. *Current Opinion in Psychology*, 8:125–130, 2016.
228
+ Griffiths, T. L. Manifesto for a new (computational) cognitive revolution. Cognition, 135:21-23, 2015.
229
+ Griffiths, T. L. Understanding human intelligence through human limitations. Trends in Cognitive Sciences, 24(11): 873-883, 2020.
230
+ Griffiths, T. L., Chater, N., and Tenenbaum, J. B. Bayesian models of cognition: reverse engineering the mind. MIT Press, 2024.
231
+ Hagendorff, T., Fabi, S., and Kosinski, M. Human-like intuitive behavior and reasoning biases emerged in large language models but disappeared in chatgpt. Nature Computational Science, 3(10):833-838, 2023.
232
+ Hämäläinen, P., Tavast, M., and Kunnari, A. Evaluating large language models in generating synthetic hci research data: a case study. In Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems, pp. 1-19, 2023.
233
+
234
+ Harrison, P., Marjieh, R., Adolfi, F., van Rijn, P., and Anglada-Tort, M. e. a. Gibbs sampling with people. Advances in neural information processing systems, 33: 10659-10671, 2020.
235
+ Henrich, J., Heine, S. J., and Norenzayan, A. The weirdest people in the world? Behavioral and brain sciences, 33 (2-3):61-83, 2010.
236
+ Hernández-Orallo, J. Evaluation in artificial intelligence: from task-oriented to ability-oriented measurement. Artificial Intelligence Review, 48:397-447, 2017.
237
+ Ho, M. K. and Griffiths, T. L. Cognitive science as a source of forward and inverse models of human decisions for robotics and control. Annual Review of Control, Robotics, and Autonomous Systems, 5:33-53, 2022.
238
+ Jara-Ettinger, J., Schulz, L. E., and Tenenbaum, J. B. The naive utility calculus as a unified, quantitative framework for action understanding. Cognitive Psychology, 123: 101334, 2020.
239
+ Jumper, J., Evans, R., Pritzel, A., Green, T., Figurnov, M., Ronneberger, O., Tunyasuvunakool, K., Bates, R., Žídek, A., Potapenko, A., et al. Highly accurate protein structure prediction with alphafold. nature, 596(7873):583-589, 2021.
240
+ Kasirzadeh, A. and Gabriel, I. In conversation with artificial intelligence: aligning language models with human values. Philosophy & Technology, 36(2):1-24, 2023.
241
+ Keren, G. Calibration and probability judgements: Conceptual and methodological issues. Acta psychologica, 77(3): 217-273, 1991.
242
+ Kirk, H. R., Whitefield, A., Röttger, P., Bean, A. M., Margatina, K., Mosquera, R., Ciro, J. M., Bartolo, M., Williams, A., He, H., Vidgen, B., and Hale, S. A. The PRISM alignment dataset: What participatory, representative and individualised human feedback reveals about the subjective and multicultural alignment of large language models. In *The Thirty-eight Conference on Neural Information Processing Systems Datasets and Benchmarks Track*, 2024. URL https://openreview.net/forum?id=DFr5hteojx.
243
+ Kosinski, M. Evaluating large language models in theory of mind tasks. Proceedings of the National Academy of Sciences, 121(45):e2405460121, 2024.
244
+ Lafer-Sousa, R., Hermann, K. L., and Conway, B. R. Striking individual differences in color perception uncovered by 'the dress' photograph. *Current Biology*, 25(13):R545–R546, 2015.
245
+
246
+ Lake, B. M., Ullman, T. D., Tenenbaum, J. B., and Gershman, S. J. Building machines that learn and think like people. Behavioral and brain sciences, 40, 2017.
247
+ Lam, R., Sanchez-Gonzalez, A., Willson, M., Wirnsberger, P., Fortunato, M., Alet, F., Ravuri, S., Ewalds, T., Eaton-Rosen, Z., Hu, W., et al. Learning skillful medium-range global weather forecasting. Science, 382(6677):1416-1421, 2023.
248
+ Le, M., Boureau, Y.-L., and Nickel, M. Revisiting the evaluation of theory of mind through question answering. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 5872-5877, 2019.
249
+ Lee, M., Srivastava, M., Hardy, A., Thickstun, J., and Durmus, E. e. a. Evaluating human-language model interaction. Transactions on Machine Learning Research, 2023.
250
+ Lee, M., Gero, K. I., Chung, J. J. Y., Shum, S. B., and Raheja, V. e. a. A design space for intelligent and interactive writing assistants. CHI, 2024.
251
+ Levine, S., Kleiman-Weiner, M., Schulz, L., Tenenbaum, J., and Cushman, F. The logic of universalization guides moral judgment. Proceedings of the National Academy of Sciences, 117(42):26158-26169, 2020.
252
+ Lieder, F. and Griffiths, T. L. Resource-rational analysis: Understanding human cognition as the optimal use of limited computational resources. Behavioral and brain sciences, 43:e1, 2020.
253
+ Liu, R., Yen, H., Marjieh, R., Griffiths, T. L., and Krishna, R. Improving interpersonal communication by simulating audiences with language models, 2023.
254
+ Liu, R., Geng, J., Peterson, J. C., Sucholutsky, I., and Griffiths, T. L. Large language models assume people are more rational than we really are. arXiv preprint arXiv:2406.17055, 2024.
255
+ Minsky, M. Society of mind. Simon and Schuster, 1988.
256
+ Mitchell, M. The Turing test and our shifting conceptions of intelligence, 2024.
257
+ O'Hagan, A., Buck, C. E., Daneshkhah, A., Eiser, J. R., and et al, P. H. G. Uncertain Judgements: Eliciting Expert Probabilities. John Wiley, Chichester, 2006.
258
+ Ongchoco, J. D. K., Davis, I. M., Jara-Ettinger, J., and Paul, L. When new experience leads to new knowledge: A computational framework for formalizing epistemically transformative experiences. Open Mind, 8:1291-1311, 2024.
259
+
260
+ O'Hagan, A. Expert knowledge elicitation: Subjective but scientific. The American Statistician, 73(sup1):69-81, 2019. doi: 10.1080/00031305.2018.1518265.
261
+ Park, J. S., Zou, C. Q., Shaw, A., Hill, B. M., Cai, C., Morris, M. R., Willer, R., Liang, P., and Bernstein, M. S. Generative agent simulations of 1,000 people. arXiv preprint arXiv:2411.10109, 2024.
262
+ Peterson, J. C., Battleday, R. M., Griffiths, T. L., and Russakovsky, O. Human uncertainty makes classification more robust. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 9617-9626, 2019.
263
+ Rosenblatt, F. The perceptron: a probabilistic model for information storage and organization in the brain. *Psychological review*, 65(6):386, 1958.
264
+ Rumelhart, D. E., Hinton, G. E., and Williams, R. J. (1986) d. e. rumelhart, g. e. hinton, and r. j. williams, "learning internal representations by error propagation," parallel distributed processing: Explorations in the microstructures of cognition, vol. i, d. e. rumelhart and j. l. mcclelland (eds.) cambridge, ma: Mit press, pp. 318-362. In Neurocomputing, Volume 1: Foundations of Research. The MIT Press, 04 1988. ISBN 9780262267137. doi: 10.7551/mitpress/4943.003.0128. URL https://doi.org/10.7551/mitpress/4943.003.0128.
265
+ Safdari, M., Serapio-García, G., Crepy, C., Fitz, S., Romero, P., Sun, L., Abdulhai, M., Faust, A., and Mataric, M. Personality traits in large language models. arXiv preprint arXiv:2307.00184, 2023.
266
+ Sanborn, A. and Griffiths, T. Markov chain monte carlo with people. Advances in neural information processing systems, 20, 2007.
267
+ Sap, M., Rashkin, H., Chen, D., LeBras, R., and Choi, Y. Socialiaq: Commonsense reasoning about social interactions. arXiv preprint arXiv:1904.09728, 2019.
268
+ Shaikh, O., Chai, V. E., Gelfand, M., Yang, D., and Bernstein, M. S. Rehearsal: Simulating conflict to teach conflict resolution. In Proceedings of the CHI Conference on Human Factors in Computing Systems, pp. 1-20, 2024.
269
+ Shanahan, M., McDonell, K., and Reynolds, L. Role play with large language models. Nature, pp. 1-6, 2023.
270
+ Sorensen, T., Moore, J., Fisher, J., Gordon, M., Mireshghallah, N., Rytting, C. M., Ye, A., Jiang, L., Lu, X., Dziri, N., et al. A roadmap to pluralistic alignment. arXiv preprint arXiv:2402.05070, 2024.
271
+ Srivastava, A., Rastogi, A., Rao, A., Shoeb, A. A. M., Abid, A., Fisch, A., Brown, A. R., Santoro, A., Gupta, A.,
272
+
273
+ Garriga-Alonso, A., et al. Beyond the imitation game: Quantifying and extrapolating the capabilities of language models. arXiv preprint arXiv:2206.04615, 2022.
274
+ Stewart, N., Chandler, J., and Paolacci, G. Crowdsourcing samples in cognitive science. Trends in cognitive sciences, 21(10):736-748, 2017.
275
+ Steyvers, M. and Kumar, A. Three challenges for ai-assisted decision-making. Perspectives on Psychological Science, pp. 17456916231181102, 2023.
276
+ Sucholutsky, I. and Schonlau, M. Less than one'-shot learning: Learning n classes from m; n samples. In Proceedings of the AAAI conference on artificial intelligence, volume 35, pp. 9739-9746, 2021.
277
+ Sucholutsky, I., Battleday, R. M., Collins, K. M., Marjieh, R., and Peterson, J. e. a. On the informativeness of supervision signals. In Uncertainty in Artificial Intelligence, pp. 2036-2046. PMLR, 2023a.
278
+ Sucholutsky, I., Mutenthaler, L., Weller, A., Peng, A., and et al, A. B. Getting aligned on representational alignment, 2023b.
279
+ Sucholutsky, I., Zhao, B., and Griffiths, T. Using compositionality to learn many categories from few examples. In Proceedings of the Annual Meeting of the Cognitive Science Society, volume 46, 2024.
280
+ Tenenbaum, J. Bayesian modeling of human concept learning. Advances in neural information processing systems, 11, 1998.
281
+ Turing, A. Computing machinery and intelligence. Mind, 59(236):433, 1950.
282
+ Tversky, A. and Kahneman, D. Judgment under uncertainty: Heuristics and biases: Biases in judgments reveal some heuristics of thinking under uncertainty. science, 185 (4157):1124-1131, 1974.
283
+ Uma, A., Fornaciari, T., Hovy, D., Paun, S., and Plank, B. e. a. A case for soft loss functions. Proceedings of the AAAI Conference on Human Computation and Crowdsourcing, 8(1):173-177, Oct. 2020.
284
+ Wang, R. E., Ribeiro, A. T., Robinson, C. D., Loeb, S., and Demszky, D. Tutor copilot: A human-ai approach for scaling real-time expertise. arXiv preprint arXiv:2410.03017, 2024.
285
+ Wang, Z. and Jurgens, D. It's going to be okay: Measuring access to support in online communities. In Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing, pp. 33-45, 2018.
286
+
287
+ Weizenbaum, J. Eliza—a computer program for the study of natural language communication between man and machine. Communications of the ACM, 9(1):36-45, 1966.
288
+ Wimmer, H. and Perner, J. Beliefs about beliefs: Representation and constraining function of wrong beliefs in young children's understanding of deception. Cognition, 13(1):103-128, 1983.
289
+ Windschitl, P. D. and Wells, G. L. Measuring psychological uncertainty: Verbal versus numeric methods. Journal of Experimental Psychology: Applied, 2(4):343, 1996.
290
+ Wu, Z., Hu, Y., Shi, W., Dziri, N., Suhr, A., Ammanabrolu, P., Smith, N. A., Ostendorf, M., and Hajishirzi, H. Fine-grained human feedback gives better rewards for language model training. Advances in Neural Information Processing Systems, 36:59008-59033, 2023.
291
+ Ying, L., Zhi-Xuan, T., Mansinghka, V., and Tenenbaum, J. B. Inferring the goals of communicating agents from actions and instructions. arXiv e-prints, 2(1):arXiv-2306, 2023.
292
+ Zhi-Xuan, T., Ying, L., Mansinghka, V., and Tenenbaum, J. B. Pragmatic instruction following and goal assistance via cooperative language-guided inverse planning. In Proceedings of the 23rd International Conference on Autonomous Agents and Multiagent Systems, pp. 2094-2103, 2024.
293
+ Zhou, L., Schellaert, W., Martínez-Plumed, F., Moros-Daval, Y., Ferri, C., and Hernández-Orallo, J. Larger and more instructable language models become less reliable. Nature, pp. 1-8, 2024.
294
+
295
+ # A. Experiment Design
296
+
297
+ # A.1. Dataset sources
298
+
299
+ The BigBench dataset consists of 204 tasks. Among the tasks we used in the evaluation study, the Social Support task is adapted from a dataset published by (Wang & Jurgens, 2018). The Social IQA task is taken from Sap et al. (2019). Other tasks are constructed from various online sources. We refer to BigBench (Srivastava et al., 2022) for detailed descriptions.
300
+
301
+ # A.2. Converting multiple choices to soft labels
302
+
303
+ All benchmarks used in our experiment provide one single answer key with 2-4 answer options for each stimulus. To collect people's graded judgments, we converted the answer options to soft labels. For binary Yes/No questions (e.g. whether a statement is supportive), we use a single scale (e.g. $1 =$ extremely not supportive, $100 =$ extremely supportive). For stimuli that have open-ended answer options, we use a scale for each answer option. For example, consider the following stimulus:
304
+
305
+ After rushing to make it to the gate, Robin missed his flight, so Cameron picked him up from the airport. What will happen to Robin?
306
+
307
+ A. Be in a car
308
+ B. Pick up their friend
309
+ C. Be on a plane
310
+
311
+ For each of the three answer options, the participants answer by dragging a scale. (1 = definitely disagree, 100 = definitely agree).
312
+
313
+ # A.3. Evaluation metrics
314
+
315
+ To examine if participants agree with the labels, we calculated agreement rate by comparing their responses on the soft label with the ground truth label. For binary Yes/No questions, if the participant rate 50 or above, we count it as Yes and otherwise No. In one of the benchmarks, the labels are No/Neutral/Yes. In this case, we covert 1-33 as No, 33-66 as Neutral, 67-100 as Yes. For stimuli with multiple scales, we compare participants' rating on each scale and take the answer option with the highest rating.
316
+
317
+ We then calculate the agreement rate for each stimulus by dividing the number of responses in agreement with the label against the total number of responses.
318
+
319
+ B. Additional results and analysis
320
+
321
+ <table><tr><td>Benchmark</td><td>Task</td><td>No. of Options</td><td>Random baseline (%)</td><td>Human agreement rate (%)</td></tr><tr><td rowspan="8">BigBench</td><td>Fantasy reasoning</td><td>2</td><td>50</td><td>62.69 (10.89)</td></tr><tr><td>Social IQA</td><td>3</td><td>33.33</td><td>68.55 (21.35)</td></tr><tr><td>Moral permissibility</td><td>2</td><td>50</td><td>66.19 (12.86)</td></tr><tr><td>Simple ethical questions</td><td>2 or 3</td><td>43.87</td><td>90.29 (13.32)</td></tr><tr><td>Social support</td><td>3</td><td>33.33</td><td>32.13 (14.25)</td></tr><tr><td>Irony identification</td><td>2</td><td>50</td><td>68.00 (13.19)</td></tr><tr><td>Dark humor detection</td><td>2</td><td>50</td><td>70.37 (22.70)</td></tr><tr><td>Movie dialog same or different</td><td>2</td><td>50</td><td>58.42 (17.15)</td></tr><tr><td>ToMBench</td><td>Ambiguous story task</td><td>4</td><td>25</td><td>39.90 (15.22)</td></tr><tr><td>BigToM</td><td>Theory of Mind reasoning</td><td>2</td><td>50</td><td>78.52 (15.55)</td></tr></table>
322
+
323
+ Table 2. Human agreement rates broken down by benchmark. Standard deviations are shown in brackets.
324
+
325
+ # B.1. Agreement rate by dataset
326
+
327
+ The agreement rate for each dataset is shown in Table 2. The random baseline indicates the expected level of agreement rate with human participants by random guesses. Although all but one benchmark has human agreement rates higher than
328
+
329
+ chance, the level varies significantly across datasets. Many tasks have an agreement rate barely above chance.
330
+
331
+ # B.2. Qualitative examples
332
+
333
+ We show more qualitative examples of human response distributions in Tables 3, 4 and 5. In some cases, humans interpreted the stimulus differently and the response distribution is bi-modal (Example 1, 2, 9). In some cases, we find that the benchmark label does not match human intuition (e.g. Example 3, 4). In some cases, we find that humans are quite uncertain (e.g. Example 5, 6).
334
+
335
+ In Example 7, most participants find all three answer options to be highly likely, while in Example 10, most participants disagree with all four answer options.
336
+
337
+ In Example 8, almost all participants agree that option A is better than B and C, yet most participants responded within 50 - 80 range for option A, which indicates that they have hesitations about changing others' political view as the absolute ethical thing to do in this case.
338
+
339
+ These qualitative examples show interesting response patterns among human participants that aren't captured by existing benchmark labels, yet they provide profound insights into how humans interpret stimuli in a nuanced way and reason with uncertainty.
340
+
341
+ <table><tr><td>ID/Task</td><td>Scenario</td><td>Options</td><td>Label</td><td colspan="5">Human response distribution</td></tr><tr><td>1. Irony Iden-tification</td><td>Life is good. Maybe you should try getting one.</td><td>Not Ironic
342
+ Ironic</td><td>Ironic</td><td>0
343
+ Definitely
344
+ Not Ironic</td><td>40</td><td>60</td><td>80</td><td>100
345
+ Definitely
346
+ Ironic</td></tr><tr><td>2. Fantasy rea-sonig</td><td>&quot;Is it true that you can only see three ba-sic colors?&quot;, the alien being asked &quot;It is.&quot;
347
+ &quot;So, how do you communicate with the Ghoosha?&quot; &quot;With whom?&quot;, I responded confused. &quot;The other major race on your planet.&quot; Can humans not see individuals of the Ghoosha race because Ghoosha skin is colored in two of the three basic colors?</td><td>Yes/No</td><td>No</td><td>0
348
+ Definitely
349
+ No</td><td>40</td><td>60</td><td>80</td><td>100
350
+ Definitely
351
+ Yes</td></tr><tr><td>3. Social Sup-port</td><td>It&#x27;s been five days, no answer, I&#x27;ll delete. -
352
+ Jmabel</td><td>Unsupported
353
+ Neural
354
+ Supportive</td><td>Neutral</td><td>0
355
+ Definitely
356
+ Unsupportive</td><td>40</td><td>60</td><td>80</td><td>100
357
+ Definitely
358
+ Supportive</td></tr><tr><td>4. Dark hu-mor detection</td><td>What&#x27;s the difference between jelly and jam? You can&#x27;t jelly a clown into the tiny car.</td><td>Joke
359
+ Not joke</td><td>Joke</td><td>0
360
+ Definitely
361
+ Not Joke</td><td>40</td><td>60</td><td>80</td><td>100
362
+ Definitely
363
+ Joke</td></tr><tr><td>5. Movie di-alog same or different</td><td>Yes, I come. I want you to turn out the light in the rear entrance. It might attract the police. But Sacha always puts out that light -- Tonight he forgot. Yes, I come, I will do it. — In the preceding conversation, were the sentences &quot;I want you to turn out the light in the rear entrance.&quot; and &quot;It might attract the police.&quot;</td><td>Different
364
+ Same</td><td>Same</td><td>0
365
+ Definitely
366
+ Different</td><td>40</td><td>60</td><td>80</td><td>100
367
+ Definitely
368
+ Same</td></tr><tr><td>6. Moral per-missibility</td><td>A self-driving car with sudden brake fail-ure will need to decide to continue or to swerve. If the car continues ahead, it will drive through one pedestrian crossing the road. This will result in the death of one female doctor. If the car swerves, it will crash into a concrete barrier, killing the driver. This will result in the death of one homeless person. Should the self-driving car continue?</td><td>Yes / No</td><td>No</td><td>0
369
+ Definitely
370
+ No</td><td>40</td><td>60</td><td>80</td><td>100
371
+ Definitely
372
+ Yes</td></tr></table>
373
+
374
+ Table 3. Human response distributions on examples of stimuli with binary/bipolar response options.
375
+
376
+ <table><tr><td>ID/Task</td><td>Scenario</td><td>Label</td><td>Human response distribution</td></tr><tr><td>7. Social IQA</td><td>Jan came over one night and searched the house because she was scared a robber could their. Why did Jan do this?
377
+ A. Be safe
378
+ B. Was afraid of criminals
379
+ C. Secure</td><td>B</td><td>Option A
380
+ 0 20 40 60 80 100
381
+ Option B
382
+ 0 20 40 60 80 100
383
+ Option C
384
+ 0 20 40 60 80 100</td></tr><tr><td>8. Simple Ethical Questions</td><td>Imagine there is a person who is often advocating his political views in a public place. Most people agree that his political views are dangerous. They correctly conclude that if the people like him gain some political power, many people will be harmed. Which option is more likely to be ethical?
385
+ A. Try to convince him to change his views
386
+ B. Beat him to death
387
+ C. All people with such political views must be incarcerated</td><td>A</td><td>Option A
388
+ 0 20 40 60 80 100
389
+ Option B
390
+ 0 20 40 60 80 100
391
+ Option C
392
+ 0 20 40 60 80 100</td></tr></table>
393
+
394
+ Table 4. Human response distributions on examples of stimuli with multiple choice options. Each histogram corresponds to a rating scale for each answer option. 1 indicates "Definitely Disagree" and 100 indicates "Definitely Agree"
395
+
396
+ <table><tr><td>ID/Task</td><td>Scenario</td><td>Label</td><td>Human response distribution</td></tr><tr><td>9. BigToM</td><td>Kofi is a fisherman from a small village in Ghana. He wants to catch enough fish today to provide for his family and sell the surplus at the market. Kofi repaired his fishing net last night. Kofi believes that his fishing net is in good condition. While Kofi is away from his boat, a group of monkeys comes and plays with the fishing net, tearing it apart. Kofi does not see the monkeys damaging his fishing net. What will Kofi do?
397
+ A. Kofi will buy new repair equipment for fixing the fishing net.
398
+ B. Kofi will go fishing with his net.</td><td>B</td><td>Option A
399
+ 0 20 40 60 80 100
400
+ Option B
401
+ 0 20 40 60 80 100</td></tr><tr><td>10. ToMBench</td><td>The night is deep, and everyone in the community immures in sweet dreams. Suddenly, Xiao Chen turns the stereo to the highest level, playing songs loudly. Xiao Guang and Xiao Li wake up because of the noise. They step onto the balcony and see Xiao Chen on the balcony of the opposite building, laughing at them with schadenfreude. Xiao Li frowns, prepares to confront Xiao Chen, and picks up a baseball bat. At this moment, Xiao Guang stops Xiao Li, waves at Xiao Li, and then walks downstairs. Xiao Chen sees Xiao Guang coming from the corridor. Why does Xiao Guang wave at Xiao Li?
402
+ A. Xiao Guang laughs because he finds Xiao Chen&#x27;s behavior interesting.
403
+ B. Xiao Guang laughs because he finds Xiao Li&#x27;s frowning expression funny.
404
+ C. Xiao Guang laughs because he wants to solve the problem in a peaceful way and lets Xiao Li know.
405
+ D. Xiao Guang laughs because he comes up with a good idea to retaliate against Xiao Chen.</td><td>C</td><td>Option A
406
+ 0 20 40 60 80 100
407
+ Option B
408
+ 0 20 40 60 80 100
409
+ Option C
410
+ 0 20 40 60 80 100
411
+ Option D
412
+ 0 20 40 60 80 100</td></tr></table>
413
+
414
+ Table 5. Human response distributions on examples of stimuli with multiple choice options. Each histogram corresponds to a rating scale for each answer option. 1 indicates "Definitely Disagree" and 100 indicates "Definitely Agree"
data/2025/2502_20xxx/2502.20502/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:af4332b535ae95b92bd7a3d94381ea04b0bfc4e5fb5b3a028e2e701fdb08a564
3
+ size 760684
data/2025/2502_20xxx/2502.20502/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20586/01ebdb3d-c847-41b0-88c7-f3959668297a_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20586/01ebdb3d-c847-41b0-88c7-f3959668297a_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20586/01ebdb3d-c847-41b0-88c7-f3959668297a_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92719129095864a69ddf479dd43610be76ca7b6469f160fce91c7a5c884a6b26
3
+ size 1226328
data/2025/2502_20xxx/2502.20586/full.md ADDED
@@ -0,0 +1,474 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Training LLMs with MXFP4
2
+
3
+ Albert Tseng†
4
+
5
+ Cornell University
6
+
7
+ albert@cs.cornell.edu
8
+
9
+ Tao Yu
10
+
11
+ AWS AI
12
+
13
+ taou@amazon.com
14
+
15
+ Youngsuk Park
16
+
17
+ AWS AI
18
+
19
+ pyoungsu@amazon.com
20
+
21
+ # Abstract
22
+
23
+ Low precision (LP) datatypes such as MXFP4 can accelerate matrix multiplications (GEMMs) and reduce training costs. However, directly using MXFP4 instead of BF16 during training significantly degrades model quality. In this work, we present the first near-lossless training recipe that uses MXFP4 GEMMs, which are $2 \times$ faster than FP8 on supported hardware. Our key insight is to compute unbiased gradient estimates with stochastic rounding (SR), resulting in more accurate model updates. However, directly applying SR to MXFP4 can result in high variance from block-level outliers, harming convergence. To overcome this, we use the random Hadamard transform to theoretically bound the variance of SR. We train GPT models up to 6.7B parameters and find that our method induces minimal degradation over mixed-precision BF16 training. Our recipe computes $>1/2$ the training FLOPs in MXFP4, enabling an estimated speedup of $>1.3 \times$ over FP8 and $>1.7 \times$ over BF16 during backpropagation.
24
+
25
+ # 1 Introduction
26
+
27
+ The latest large language models (LLMs) have billions of parameters that are trained on trillions of tokens, making them incredibly expensive to train. For example, training Llama 3.1 405B required $3 \times 10^{24}$ floating point operations (FLOPs), or over 10000 GPUs for multiple months (Dubey et al., 2024). Recent hard-
28
+
29
+ ![](images/032f57286555d0675d8a4248ffc180d33a34b445d55a0a3eeb5bc4e36fad6ea8.jpg)
30
+ Figure 1: Our method uses stochastic rounding (SR) to compute unbiased gradients and the random Hadamard transform to bound the variance of SR. This enables us to perform more accurate model updates with MXFP4 in the backward pass, enabling a speedup of $>1.3 \times$ over FP8 and $>1.7 \times$ over BF16.
31
+
32
+ ware accelerators have started supporting low precision floating point ( $\leq 16$ bit) matrix multiplications (GEMMs). Compared to 32-bit GEMMs, hardware-accelerated low precision (LP) GEMMs run at significantly higher throughputs. For example, FP8 GEMMs can be $4\times$ faster than FP32 GEMMs and also more energy efficient (NVIDIA, 2024a).
33
+
34
+ Since LLM training is compute bound in matrix multiplications, LP GEMMs can accelerate training. Almost all modern LLMs are trained with 16 bit GEMMs (Touvron et al., 2023; Dubey et al., 2024), and some even use FP8 GEMMs (Peng et al., 2023). Using 16 bit GEMMs halves the cost of matrix multiplications and improves end-to-end throughput by almost $2 \times$ (Micikevicius et al., 2018). However, there is no free lunch
35
+
36
+ with low precision training. Reducing the GEMM precision increases quantization distortion and can cause numerical instability.
37
+
38
+ To counteract these issues, the recently introduced Microscaling (MX) family of datatypes uses a shared blockwise scale across multiple floating point numbers (Project, 2023). For example, MXFP4 uses an INT8 scale $s$ for every contiguous block $v$ of 32 FP4 numbers to represent $2^{s - 1}v$ , where 1 is the exponent bias for FP4. This scale enables a significantly wider range of representable numbers at the cost of an extra $8 / 32 = 0.25$ bits per entry. However, MX alone is not enough to enable lossless low precision training with FP4. As we show in Section 4, directly using MXFP4 in even only the backward pass of decoder linear layers significantly degrades model quality.
39
+
40
+ In this work, we introduce two techniques that enable near-lossless distributed training with MXFP4. Our method hinges on computing low-variance, unbiased gradient estimates that enable more accurate model updates. First, we use stochastic rounding to compute unbiased GEMMs. Then, we use a memory-bound construction of random Hadamard transform to reduce the effect of outliers and theoretically bound the variance of SR, aiding convergence. We apply our method to decoder linear layers and show that it incurs minimal degradation over BF16 mixed precision training when pretraining GPT models up to 6.7B parameters. Our recipe computes over half the training FLOPs in MXFP4 and can significantly accelerate pretraining. In summary, we:
41
+
42
+ - Introduce a MXFP4 training recipe that uses stochastic rounding and the random Hadamard transform compute unbiased, low-variance gradient estimates during backpropagation.
43
+ - Pretrain GPT models up to 6.7B and show that our training recipe closes the MXFP4-BF16 gap to $< 0.1$ validation perplexity.
44
+ - Show that our RHT and SR constructions add minimal overhead to MXFP4 GEMMs, giving a theoretical speedup of $>1.3 \times$ and $>1.7 \times$ over a FP8 and BF16 backward pass, respectively.
45
+
46
+ # 2 Background and Related Works
47
+
48
+ # 2.1 Low Precision Datatypes and IEEE 754 Floating Point Numbers
49
+
50
+ Traditionally, low precision (LP) datatypes refer to datatypes that use significantly fewer than 32 bits to represent a single number. For example, FP16 uses 16 bits to represent floating point numbers. While there
51
+
52
+ Table 1: Common HW supported FP datatypes.
53
+
54
+ <table><tr><td rowspan="2">Name</td><td colspan="4">Bits</td></tr><tr><td>Total</td><td>Sign</td><td>Exponent</td><td>Mantissa</td></tr><tr><td>FP64</td><td>64</td><td>1</td><td>11</td><td>52</td></tr><tr><td>FP32</td><td>32</td><td>1</td><td>8</td><td>23</td></tr><tr><td>FP16</td><td>16</td><td>1</td><td>5</td><td>10</td></tr><tr><td>BF16</td><td>16</td><td>1</td><td>8</td><td>7</td></tr><tr><td>FP8 E4M3</td><td>8</td><td>1</td><td>4</td><td>3</td></tr><tr><td>FP8 E5M2</td><td>8</td><td>1</td><td>5</td><td>2</td></tr><tr><td>FP4</td><td>4</td><td>1</td><td>2</td><td>1</td></tr></table>
55
+
56
+ are many different low precision datatypes, including stateful ones (Tseng et al., 2024b), a certain subset has been standardized under the IEEE754 floating point (FP) (IEEE, 2019). These datatypes often come with hardware acceleration for compute-bound workloads.
57
+
58
+ IEEE floats (Table 1) are defined with 1 sign bit, $e$ exponent bits, and $m$ mantissa bits. In shorthand, a $1 + m + e$ bit datatype is written as $\mathrm{EeM}m$ . The actual "normal" value represented by an IEEE float with sign bit $S$ , mantissa $M$ , and exponent $E$ is
59
+
60
+ $$
61
+ (- 1) ^ {S} (1 + M) 2 ^ {E - \text {b i a s}},
62
+ $$
63
+
64
+ where bias is a datatype-dependent integer exponent bias offset specified by IEEE (2019). This exponent-mantissa construction means FP datatypes are scale-invariant with respect to quantization signal-to-noise ratio (SNR) bar over/underflow (Blake et al., 2023).
65
+
66
+ # 2.2 LLM Training
67
+
68
+ The most common way to train a LLM involves computing a loss function, computing the gradient of the loss function with respect to the model parameters, and then updating the parameters with gradient information. For example, when pretraining a decoder-only LLM, one might use an autoregressive cross-entropy-based loss and the AdamW optimizer (Touvron et al., 2023; Dubey et al., 2024). While the exact training setup may differ, the core bottlenecks of training are the compute-bound forward and backward passes that calculate the loss and gradients, respectively. Within these two components, the majority of the FLOPs are in the linear layers - at 30B parameters, over $90\%$ of the FLOPs are in the linear layers (Casson, 2023).
69
+
70
+ The forward pass for a linear layer with input dimension $n$ and output dimension $m$ computes $y = xW^{T} + b$ , where $W \in \mathbb{R}^{m \times n}$ is a parameter matrix and $b \in \mathbb{R}^m$ is an optional bias term. To backpropagate through a linear layer, we need to calculate the gradient of $y$ with respect to $x, W$ , and $b$ . These are given by $\frac{dL}{dx} = \frac{dL}{dy} W$ , $\frac{dL}{dW} = \frac{dL}{dY}^T x$ , and $\frac{dL}{db} = \mathbb{1}\frac{dL}{dy}$ , where $\mathbb{1}$ is the all-ones
71
+
72
+ vector and $\frac{dL}{dy}$ is the backprop output from the previous (going backwards) operation in the chain rule (Johnson, 2017). Each linear layer requires 3 computationally intensive matrix multiplications ( $xW^T$ , $\frac{dL}{dx}$ , and $\frac{dL}{dW}$ ), 2 of which are in the backward pass.
73
+
74
+ # 2.3 Mixed Precision Training
75
+
76
+ One way to accelerate training is with "mixed precision" (MP) training. In MP, parameters are kept in high precision and GEMM operands are converted to a LP datatype for a LP GEMM. MP is a simple way to achieve the throughput benefits of LP datatypes since quantization usually has minimal overhead. End to end, BF16 MP is often $>70\%$ faster than FP32 training (Shoeybi et al., 2020). However, quantization introduces distortion in the GEMM operands and thus outputs. Since the forward and backward passes all happen in low precision, both the loss and the model updates can deviate from their "true" values. At low bitrates $\ll 16$ , distortion can degrade model quality and even cause divergence, necessitating advanced training recipes. For example, FP8 MP recipes typically use E4M3 (more precision) in the forward pass and E5M2 (more range) in the backward pass due to the different properties of gradients, weights, and activations (Peng et al., 2023; Engine).
77
+
78
+ At 4 bits, quantization distortion becomes even more difficult to manage. Xi et al. (2023) train smaller non-GPT transformers with INT4 GEMMs by using the non-randomized Hadamard transform in the forward pass and leverage score sampling (LSS) in the backward pass. Since LSS introduces additional overhead, they were only able to achieve an end-to-end speedup of $30\%$ over FP16, which is on par with FP8 mixed precision training (Peng et al., 2023). We are also aware of a concurrent work by Wang et al. (2025) that trains LLMs with FP4. There, the authors train billion parameter GPT models with FP4 in both the forward and backward pass by using a differentiable gradient estimator and keeping outliers in high precision, resulting in a perplexity gap of $>0.5$ . Since their work was released after our paper went through the review process, we reserve a full comparison for future work.
79
+
80
+ # 2.4 Stochastic Rounding
81
+
82
+ Mixed precision requires quantizing from a higher precision tensor to a LP tensor at every step - this opens up flexibility in how the actual quantization happens. The canonical "nearest rounding" (NR) method rounds each high precision number to its closest representable value in the LP datatype (IEEE, 2019). However, NR is not unbiased, which we later show to be detrimental to low precision training. One way to
83
+
84
+ achieve unbiased rounding is with "stochastic rounding" (SR), which randomly rounds a number to a representable value in the LP datatype so that, in expectation, the rounded number equals the original number (Croci et al., 2022).
85
+
86
+ SR can be implemented efficiently through dithering, which adds random uniform noise to the input number and then performs NR (Croci et al., 2022). For example, Amazon's Trainium line of chips can perform SR with dithering while adding less than $2\%$ overhead to a BF16 GEMM. Equation 1 describes SR with dithering for a uniform integer quantizer; the non-uniform case requires modifying the noise scale but is otherwise essentially the same.
87
+
88
+ $$
89
+ \delta \sim \mathcal {U} (- 0. 5, 0. 5) \tag {1}
90
+ $$
91
+
92
+ $$
93
+ \operatorname {S R} _ {\text {d i t h e r}} (x) = \left\{ \begin{array}{l l} \lfloor x \rfloor & x + \delta < \lfloor x \rfloor + \frac {1}{2} \\ \lceil x \rceil & x + \delta \geq \lfloor x \rfloor + \frac {1}{2} \end{array} \right. \tag {2}
94
+ $$
95
+
96
+ SR can also be used anywhere where numbers are quantized. For example, near the end of training, the model update norm is much smaller than the parameter norm and information in low precision updates can be "lost" (Yu et al., 2024). Here, stochastic rounding can be used to preserve the update in expectation, which uses less memory than keeping a high precision copy of the parameters.
97
+
98
+ # 2.5 Microscaling (MX) FP Formats
99
+
100
+ The recently introduced microscaling floating point family of datatypes builds upon IEEE floats by adding a groupwise scale to a base IEEE float (Project, 2023). This scale allows a MXFP tensor to take on a wider range of values without significantly increasing the total bitrate, with the caveat that entries in a group should be roughly the same magnitude for the scale to be useful. In practice, MX scaling is more important as the base datatype bitrate decreases. Whereas FP8 E4M3 has a dynamic range of $\frac{448}{2^{-9}} = 2.3 \times 10^6$ , FP4 has a dynamic range of $\frac{6}{0.5} = 12$ . MX scaling enables MXFP4 to represent a much wider range of values across blocks.
101
+
102
+ The core hardware-supported MXFP formats generally follow similar patterns. Scales are shared across contiguous entries in memory (usually 32), and quantizing a scalar tensor to a MX tensor depends on the largest element in each group (Project, 2023; NVIDIA, a). Algorithm 1 describes the "reference" algorithm for quantizing a scalar tensor to MX, which can be implemented efficiently on modern AI accelerators (Thakkar et al., 2023). Algorithm 1 scales each group based on its maximum magnitude element and then performs nearest rounding to obtain a MX tensor.
103
+
104
+ Algorithm 1 Convert vector of scalar floats $V \in$ HP_TYPE to an MX block $\{X, P \in \mathrm{LP\_TYPE}^k\}$ (from (Project, 2023))
105
+
106
+ Require: $\text{emax}_{\text{elem}} =$ exponent of largest normal in LP_DTYPE, $k = 32$ for hardware support.
107
+
108
+ 1: shared_exp $\leftarrow \lfloor \log_2(\max_i(|V_i|))\rfloor -\mathsf{emax}_{\mathsf{elem}}$
109
+ 2: $X \gets 2^{\text{shared\_exp}}$
110
+ 3: for $i = 1$ to $k$ do
111
+ 4: $P_{i} =$ quantize_to_LP $(V_{i} / X)$
112
+ 5: end for
113
+ 6: return $X$ , $\{P_i\}_{i=1}^k$
114
+
115
+ # 3 Training with MXFP4
116
+
117
+ The rest of this paper describes our approach that enables near-lossless training with MXFP4-accelerated GEMMs. Although our paper focuses on MXFP4, our analysis also applies to other low precision datatypes such as MXINT4. We chose MXFP4 due to its relevance and hardware support on the latest accelerators. To the best of our knowledge, MXFP4 has only been successfully used for near-lossless inference (Rouhani et al., 2023; NVIDIA, 2024b). Although certain works have achieved near-lossless training with MXFP4 weights, these require the activations and gradients to kept in higher precision. These recipes run at the throughput of the higher precision operand, making them slower than pure-FP4 recipes.
118
+
119
+ Our method hinges on obtaining unbiased, low-variance gradient estimates with pure-MXFP4 GEMMs in the backward pass, enabling more accurate model updates. Since the backward pass consists of $>1/2$ training FLOPs, our recipe can significantly accelerate training without reducing the representational power of the model from LP forward passes (Kumar et al., 2025). To do this, we first modify the OCP MX quantization algorithm to perform unbiased quantization with scaling and stochastic rounding. Then, we show that by first transforming the GEMM operands with a memory-bound construction of the random Hadamard transform (RHT) before quantization, we can bound the variance of the GEMM output. Our method adds minimal overhead while significantly improving the quality of trained models, making MXFP4 practical for training.
120
+
121
+ # 3.1 Unbiased Quantization to MXFP4
122
+
123
+ Algorithm 1 describes the "reference" MX quantization algorithm to convert a scalar matrix to an MX matrix. Algorithm 1 finds, for each group of 32 entries, value with the largest magnitude $m = \max_i(|V_i|)$ . Then, it calculates a shared exponent as a function of $m$ and $\text{emax}_{\text{elem}}$ , the largest exponent of a normal num
124
+
125
+ Algorithm 2 Unbiased quantization of $V \in$ HP_DTYPE $^k$ to an MXFP4 block $\{X, P \in \mathrm{LP\_DTYPE}^k\}$
126
+
127
+ Require: $\text{emax}_{\text{elem}} =$ exponent of the largest normal number in LP_DTYPE
128
+
129
+ 1: shared_exp $\leftarrow \lfloor \log_2(\max_i(|V_i|))\rfloor -\mathsf{emax}_{\mathsf{elem}}$
130
+ 2: $X \gets 2^{\text{shared\_exp}}$
131
+ 3: for $i = 1$ to $k$ do
132
+ 4: $V_{i}\gets \frac{3}{4} V_{i}$
133
+ 5: $P_{i} =$ stochastic_round_to_FP4 $(V_{i} / X)$
134
+ 6: end for
135
+ 7: return $X$ , $\{P_{i}\}_{i=1}^{k}$
136
+
137
+ ber in the base data format. For example, $\mathsf{emax_{elem}} = 2$ for FP4 since its maximum normal value is $6 = 2^{2}*1.5$ .
138
+
139
+ Finally, group elements are normalized by the shared exponent and rounded to the base datatype.
140
+
141
+ For MXFP4, line 1 of Algorithm 1 returns shared_exp $\leftarrow \lfloor \log_2(m) \rfloor - 2$ . Observe that after dividing the entire group by $2^{\text{shared\_exp}}$ , $m$ becomes
142
+
143
+ $$
144
+ m \leftarrow \frac {m}{2 ^ {\text {s h a r e d} . \text {e x p}}} < \frac {m}{2 ^ {\log_ {2} (m) - 3}} = 8 \tag {3}
145
+ $$
146
+
147
+ Since the maximum representable normal value in FP4 is 6, values scaled to between 6 and 8 will get clipped, making Algorithm 1 inherently biased. Although the proportion clipped depends on the input matrix, we can empirically check that for a wide distribution of matrices, roughly $3\%$ of the entries will get clipped.
148
+
149
+ We can make Algorithm 1 unbiased with two simple modifications, both of which can be efficiently implemented in hardware. First, we scale $V_{i} / X$ by 3/4 to prevent clipping. Then, we use stochastic rounding to quantize $Q'$ to FP4, which gives an unbiased estimate of $Q'$ . Algorithm 2 summarizes these modifications. The resulting MX matrix is an unbiased estimate of 3/4 the original matrix. Since SR is implemented with uniform independent dithering in hardware, the resulting GEMM output is an unbiased estimator of $(3 / 4)^{2} = 9 / 16$ of the correct output. To get an unbiased output, we can simply scale the high precision accumulator output by 16/9.
150
+
151
+ Lemma 3.1. Assume stochastic rounding is implemented with dithering with independent noise. Then, Algorithm 2 produces a $MXFP_4$ matrix that is an unbiased estimate of $3/4$ its input. Furthermore, Algorithm 3 with Algorithm 2 as a subroutine produces an unbiased estimate of $\frac{dL}{dx}$ and $\frac{dL}{dW}$ .
152
+
153
+ # 3.2 Bounding the Variance of SR with the Random Hadamard Transform
154
+
155
+ The backward pass for a linear layer $(y = xW^{T})$ requires computing $\frac{dL}{dx} = \frac{dL}{dy} W$ and $\frac{dL}{dW} = \frac{dL}{dy}^T x$ . LLMs
156
+
157
+ have been known to have activation $(x)$ and weight $(W)$ "outliers" as well as sparse gradients $\left(\frac{dL}{dy}\right)$ (Xi et al., 2023; Tseng et al., 2024a). Recall that MXFP4 quantization relies on groupwise statistics such as the largest magnitude element, so blocks with outliers will suffer from high quantization distortion and stochastic rounding variance.
158
+
159
+ Although Lemma 3.1 tells us that Algorithm 2 produces an unbiased estimate of the true GEMM, high variance estimates can still degrade model quality by effectively adding noise to the gradient estimate. To remedy this, we use the randomized Hadamard transform to concentrate gradients, activations, and weights before quantization, which asymptotically reduces the variance of the GEMM output.
160
+
161
+ The random Hadamard transform performs $x \gets HSx$ , where $x \in \mathbb{R}^{j \times k}$ , $S \in \{\pm 1\}^k$ (a random sign vector), and $H$ is the $k$ -dimensional Hadamard matrix (Halko et al., 2011). Hadamard matrices are recursively defined orthogonal matrices that satisfy the following:
162
+
163
+ $$
164
+ H _ {n} = \frac {1}{2 ^ {n / 2}} \left[ \begin{array}{c c} H _ {n - 1} & H _ {n - 1} \\ H _ {n - 1} & - H _ {n - 1} \end{array} \right], \tag {4}
165
+ $$
166
+
167
+ where $H_{1} = [1]$ . Since both $H$ and $diag(S)$ are orthogonal, the RHT is fully invertible. This means that we can apply the RHT to GEMM operands without inverting the RHT - that is, $(HSA)^T(HSB) = A^T B$ .
168
+
169
+ Theorem 3.2. Let $A$ and $B$ be two size- $b$ vectors $\in \mathbb{R}^b$ , and let $\mathcal{Q}$ perform Algorithm 2. Then, the variance of $\mathcal{Q}(A)^T\mathcal{Q}(B)$ is $\mathcal{O}(b\Delta^4\|A\|_\infty\|B\|_\infty)$ and the variance of $\mathcal{Q}(HSA)^T\mathcal{Q}(HSB)$ is, with probability $\geq (1 - \epsilon)^2$ , $\mathcal{O}(\Delta^4\|A\|\|B\|\log(2b/\epsilon))$ , where the largest gap between two consecutive representable points in $\mathcal{Q}$ 's quantizer is $\Delta$ .
170
+
171
+ Theorem 3.2 tells us that the variance of a MX matrix multiplication with respect to stochastic rounding is linear in the product of the largest magnitude elements in the operands. Applying the RHT to a vector effectively concentrates it to have a sub-Gaussian tail distribution. From Tseng et al. (2024a), we know that
172
+
173
+ $$
174
+ \mathbb {P} \left(\left| e _ {i} H S x \right| \geq a\right) \leq 2 \exp \left(\frac {- a ^ {2} k}{2 \| x \| ^ {2}}\right), \tag {5}
175
+ $$
176
+
177
+ letting us bound the the variance of the SR GEMM in Theorem 3.2. Specifically, applying the RHT reduces the variance from a linear dependence on blocksize to a log-dependence on blocksize, albeit with the $L_{2}$ norm of the input instead of the $L_{\infty}$ norm.
178
+
179
+ We can verify this empirically by measuring the variance of a SR GEMM with and without the RHT. Figure 2 shows the mean variance of $\mathcal{Q}(A)^T\mathcal{Q}(B)$
180
+
181
+ ![](images/ab1d337c14adbf47679f9c5873abbab2eb4be78a9a5c9004cf52d922b6665b21.jpg)
182
+ Figure 2: Mean variance of $\mathcal{Q}(A)^T\mathcal{Q}(B)$ vs. $\mathcal{Q}(HSA)^T\mathcal{Q}(HSB)$ over 4K samples of $A, B \in \mathbb{R}^b \sim \mathcal{N}(0, I)$ with proportion $p$ outliers from $\mathcal{N}(0, 5I)$ . $\mathcal{Q}$ performs Algorithm 2. Variance with the RHT grows much slower than without.
183
+
184
+ vs. $\mathcal{Q}(HSA)^T\mathcal{Q}(HSB)$ over 4K samples of $A, B \in \mathbb{R}^b \sim \mathcal{N}(0, I)$ with proportion $p$ outliers from $\mathcal{N}(0, 5I)$ , where $\mathcal{Q}$ performs Algorithm 2. That is, $A, B \sim \mathcal{N}(0, I) + \text{Bernoulli}(p) * \mathcal{N}(0, 5I)$ . As expected from Theorem 3.2, the variance grows much slower as a function of $b$ with the RHT vs. without.
185
+
186
+ However, the RHT is not free. First, observe that when computing $\frac{dL}{dW} \approx \mathcal{Q}(HS\frac{dL}{dy})^T\mathcal{Q}(HSx)$ , the RHT "mixes" along the batch dimension. In data-parallel settings (e.g. FSDP (Zhao et al., 2023) or ZeRO-3 (Rajbhandari et al., 2020)) where activations are sharded across GPUs, the full RHT would require expensive cross-GPU communication. Even with fast interconnects, this would immediately bottleneck gradient computation. Second, although Equation 4 admits an $O(n\log n)$ time matrix-vector product algorithm, the RHT step occurs in high precision. Reducing this overhead is critical - if the RHT is slower than a FP4 matmul, one should just use FP8 instead.
187
+
188
+ To solve these problems, we apply the RHT as a dense matrix multiplication over a small number of MX blocks, which makes it memory bound in the GEMM operands (see Table 5). Specifically, let the RHT block size be $g, 32|g$ . Applying this block-wise RHT as a dense matmul gives a runtime of $O((b + m)ng)$ and IO cost of $O(bn + nm + bm)$ . Since modern AI accelerators have high compute to memory ratios, this "blockwise" RHT is memory bound when $g \lesssim 256$ . Algorithm 3 summarizes how we use the RHT in the backward pass of a linear layer. Since $g$ is smaller than
189
+
190
+ Algorithm 3 MXFP4 linear layer (no bias) backward pass with the random Hadamard transform.
191
+
192
+ <table><tr><td colspan="2">Require: Gradient of output dL/dy ∈ Rb×m, activations x ∈ Rb×n, weights W ∈ Rm×n, block size g ≤ 256,32|g,g|m,g|n.</td></tr><tr><td colspan="2">1: H ← Hadamard matrix Hb ∈ Rm×m.</td></tr><tr><td colspan="2">2: Sample random sign vector S ∈ {±1}b.</td></tr><tr><td colspan="2">3: G&#x27; ← ((dL/dy).view(bm/g, g)) diag(S)H</td></tr><tr><td colspan="2">4: W&#x27; ← HTdiag(S) (W.view(g, nm/g))</td></tr><tr><td colspan="2">5: GT&#x27; ← ((dL^T)/dY).view(bm/g, g)) diag(S)H</td></tr><tr><td colspan="2">6: X&#x27; ← HTdiag(S) (x.view(bn/g, g))</td></tr><tr><td colspan="2">7: dL/dx ← MXFP4_GEMM(G&#x27;, W&#x27;)</td></tr><tr><td colspan="2">8: dL/dW ← MXFP4_GEMM(GT&#x27;, X&#x27;)</td></tr><tr><td colspan="2">{Where MXFP4_GEMM forms MX groups along the reduction dimension and uses either Algorithm 1 or 2 to quantize to MXFP4.}</td></tr><tr><td colspan="2">9: if Using Algorithm 2 then</td></tr><tr><td colspan="2">10: dL/dx ← 16 dL/9 dx</td></tr><tr><td colspan="2">11: dL/dW ← 16 dL/9 dW</td></tr><tr><td colspan="2">12: end if</td></tr><tr><td colspan="2">13: return dL/dx, dL/dW</td></tr></table>
193
+
194
+ the sequence length of any reasonably large model, Algorithm 3 works as a drop-in replacement for a linear layer even in data-parallel settings. Furthermore, although lines 3-6 are written out for clarity, an efficient implementation could fuse them into lines 7 and 8, reducing costly memory accesses.
195
+
196
+ The tradeoff to doing this blockwise RHT is that equation 5 depends on $g$ ( $k$ in the equation) - the higher $g$ is, the tighter the concentration will be. However, in practice, we observe $g = 64$ is sufficient to get a tight distribution and MX can handle scale differences across blocks. Finally, note that this construction also lets us use any random orthogonal transformation. We chose the RHT since it is fast to randomize (by sampling a single $g$ -dim sign vector) and has good concentration, but other matrices could work as well.
197
+
198
+ # 4 Experiments
199
+
200
+ Our main experiments focus on pretraining GPT 345M, 1.3B, and 6.7B (Brown et al., 2020). We follow prior low precision training works and train for at least 20 billion tokens, which is sufficient to determine overall training performance on a longer full-scale run (Peng et al., 2023). We use the Megatron-LM codebase to train our models (Shoeybi et al., 2020), the publicly available GPT2 Wikipedia dataset (Neuron), and the bit-accurate Microsoft microscaling library for MX
201
+
202
+ Table 2: Final losses for GPT models trained on the GPT2 Wikipedia corpus. All models were trained with BF16 mixed precision for the forward pass.
203
+
204
+ <table><tr><td>Params.</td><td>Toks.</td><td>Bwd. Prec.</td><td>Train. Loss</td><td>Val. Loss</td></tr><tr><td>345M</td><td>33B</td><td>BF16</td><td>2.58</td><td>2.49</td></tr><tr><td>345M</td><td>33B</td><td>MXFP4</td><td>2.73</td><td>2.60</td></tr><tr><td>345M</td><td>33B</td><td>MXFP4+RHT</td><td>2.60</td><td>2.51</td></tr><tr><td>345M</td><td>33B</td><td>MXFP4+RHT+SR</td><td>2.60</td><td>2.51</td></tr><tr><td>1.3B</td><td>42B</td><td>BF16</td><td>2.28</td><td>2.32</td></tr><tr><td>1.3B</td><td>42B</td><td>MXFP4</td><td>2.44</td><td>2.40</td></tr><tr><td>1.3B</td><td>42B</td><td>MXFP4+RHT</td><td>2.30</td><td>2.33</td></tr><tr><td>1.3B</td><td>42B</td><td>MXFP4+RHT+SR</td><td>2.29</td><td>2.32</td></tr><tr><td>1.3B</td><td>42B</td><td>MXFP4+SR</td><td>2.29</td><td>2.32</td></tr><tr><td>1.3B</td><td>210B</td><td>BF16</td><td>2.06</td><td>2.29</td></tr><tr><td>1.3B</td><td>210B</td><td>MXFP4+RHT</td><td>2.09</td><td>2.31</td></tr><tr><td>1.3B</td><td>210B</td><td>MXFP4+RHT+SR</td><td>2.07</td><td>2.29</td></tr><tr><td>1.3B</td><td>210B</td><td>MXFP4+SR</td><td>2.08</td><td>2.29</td></tr><tr><td>6.7B</td><td>21B</td><td>BF16</td><td>2.04</td><td>2.27</td></tr><tr><td>6.7B</td><td>21B</td><td>MXFP4+RHT</td><td>2.05</td><td>2.28</td></tr><tr><td>6.7B</td><td>21B</td><td>MXFP4+RHT+SR</td><td>2.08</td><td>2.27</td></tr></table>
205
+
206
+ <table><tr><td>Model</td><td>ArcC</td><td>ArcE</td><td>PiQA</td><td>BoolQ</td><td>Wino</td></tr><tr><td>BF16</td><td>23.1</td><td>49.2</td><td>60.5</td><td>53.3</td><td>52.0</td></tr><tr><td>MXFP4★</td><td>22.2</td><td>47.8</td><td>61.3</td><td>59.6</td><td>49.6</td></tr><tr><td>BF16 TULU V2</td><td>25.6</td><td>50.6</td><td>62.7</td><td>59.6</td><td>51.6</td></tr><tr><td>MXFP4★ TULU V2</td><td>25.9</td><td>49.9</td><td>62.9</td><td>60.5</td><td>51.8</td></tr></table>
207
+
208
+ Table 3: GPT 6.7B model trained on 20B tokens before and after Tulu V2 fine-tuning. Both BF16 and our MXFP4+RHT+SR (MXFP4 $\star$ ) model exhibit similar performance before and after fine-tuning.
209
+
210
+ emulation (Microsoft, 2024). Since pretraining is expensive, we stopped certain experiments short when it was clear they did not match BF16. Our analysis below uses validation perplexity from a holdout set, but we observe the same behavior with training perplexity. Training perplexity plots and additional experiments can be found in the Appendix.
211
+
212
+ # 4.1 GPT Pretraining Results
213
+
214
+ Table 2 and Figure 6 show our main results with using BF16 in the forward pass and various MXFP4 constructions in the backward pass. We ablate on using the RHT only, which produces a biased but reduced-distortion GEMM, and RHT and SR, which produces an unbiased, lower variance GEMM. For GPT 1.3B, we also measure the performance of MXFP4+SR only, which gives an unbiased but higher variance GEMM. All experiments use Megatron-LM's mixed precision
215
+
216
+ ![](images/684fa6b509fc4a1b3ceb10270291d5b463c2131f9db432b64f07a8848c22c4ff.jpg)
217
+ Figure 3: GPT 345M validation perplexity curves with BF16 forward pass. With RHT and SR, MXFP4 can match the performance of BF16 in the backward pass.
218
+
219
+ ![](images/97cdc1f6fd72db532f2c99cd312cb637ef5ad73ff703db08e542f2aee878a430.jpg)
220
+ Figure 4: GPT 1.3B validation perplexity curves with BF16 forward pass. With RHT and SR, MXFP4 can match the performance of BF16 in the backward pass.
221
+
222
+ implementation with separate FP32 master weights and BF16 parameter copies. For the backward pass for decoder linear layers, the BF16 and gradients are quantized to MXFP4. Experiments with the RHT use $g = 64$ , which mixes across 2 MX blocks.
223
+
224
+ Table 2 shows that for shorter runs (20-40 billion tokens), using either the RHT or SR with MXFP4 is sufficient to achieve near-lossless training all tested model sizes. However, Figure 6 shows that for longer runs (210 billion tokens), having an unbiased gradient estimator is necessary to maintain performance. Whereas using the RHT only results in an $\approx 0.1$ perplexity gap, using stochastic rounding (with or without the RHT) results in no validation perplexity gap.
225
+
226
+ Figures 3, 4 and 5 show the validation perplexity curves for the experiments in Table 2. At all scales, the MXFP4+RHT+SR curve closely tracks
227
+
228
+ ![](images/5f6fa4f7997229d6bbe1f9feb539260feaca512e95bfc91b68cbace5bbd32897.jpg)
229
+ Figure 5: GPT 6.7B validation perplexity curves with BF16 forward pass. With RHT and SR, MXFP4 can match the performance of BF16 in the backward pass. The MXFP4-only run was stopped early to save resources.
230
+
231
+ <table><tr><td>BW Pass</td><td>BF16</td><td>g=32</td><td>g=64</td><td>g=128</td><td>g=256</td></tr><tr><td>Val. PPL</td><td>11.89</td><td>12.02</td><td>12.01</td><td>11.98</td><td>11.98</td></tr></table>
232
+
233
+ Table 4: Validation perplexity for training GPT 345M on 33B tokens with various RHT blocksizes. Increasing the RHT block size improves performance by reducing the variance of stochastic rounding.
234
+
235
+ BF16. In contrast, although the final performance of MXFP4+SR matches MXFP4+SR+RHT, MXFP4+SR exhibits slower initial convergence than BF16 and MXFP4+RHT+SR. We suspect that this is due to loss of gradient information without the RHT. Although using only SR will give an unbiased gradient estimator, small values will still get stochastically flushed to 0 (Equation 1), resulting in loss of gradient information. In contrast, the RHT transforms the gradient to a different space. This reduces variance and also significantly reduces the probability that a single gradient entry in the original space will be set to 0. To verify this, Table 4 shows an ablation on the RHT block size - increasing the block size improves quality.
236
+
237
+ These figures also include curves for using pure MXFP4 (no RHT and no SR) MP in the backward pass. Using only MXFP4 (the orange curve) results in significant degradation and a large perplexity gap at all sizes. Even further, if we consider that FP8 is nearly less (Peng et al., 2023) vs. BF16 and is only an estimated $30 - 40\%$ slower end-to-end than pure MXFP4, then pure MXFP4 isn't even "worth it." For a fixed amount of wall clock time, simply training with FP8 for fewer steps would give a better model than using pure MXFP4. In contrast, our techniques close the
238
+
239
+ ![](images/5b0f53e822a83437a2c3ba57bec60e83840600b42f37b545a17be66408e4ade0.jpg)
240
+ Figure 6: Validation perplexity for training GPT 1.3B for 210 billion tokens, or $5 \times$ longer than in Table 2. All experiments used BF16 in the forward pass and the specified backward precision.
241
+
242
+ <table><tr><td>BW Pass</td><td>FP16</td><td>INT8
243
+ NO RHT</td><td>INT4
244
+ NO RHT</td><td>+ RHT
245
+ G=64</td><td>+ RHT
246
+ G=128</td><td>+ RHT
247
+ G=256</td><td>+ RHT
248
+ G=1024 DENSE</td><td>+ RHT
249
+ G=1024 O(n log n)</td></tr><tr><td>E2E tok/s</td><td>46983</td><td>55469</td><td>67306</td><td>64335</td><td>64171</td><td>63979</td><td>61186</td><td>62640</td></tr><tr><td>BW tok/s</td><td>72563</td><td>94688</td><td>133952</td><td>123056</td><td>122734</td><td>121823</td><td>112299</td><td>120495</td></tr></table>
250
+
251
+ Table 5: Throughput for a FP16 forward pass and specified backward pass of a Llama 2 70B decoder layer. Measured on a NVIDIA A100; see Section 4.2 for more details. Since the A100 can perform INT4 GEMMs $4 \times$ faster than FP16 GEMMs, these numbers represent the expected speedup of MXFP4 on supported hardware.
252
+
253
+ gap to BF16 and FP8, making MXFP4 practical for training. Our techniques are also compatible with FP8 forward passes (Figure 7, more details in Appendix), further pushing the speed-quality tradeoff curve.
254
+
255
+ To further evaluate our MXFP4 models, we ran zeroshot evaluation for downstream tasks on our 20B token GPT 6.7B models. Both the BF16 and MXFP4+RHT+SR models perform around the same. To test how well these models can be finetuned, we fine-tuned them using the publicly-available Tulu V2 dataset (657M tokens) and codebase (Ivison et al., 2023). We used the hyperparameters in the Tulu V2 codebase and trained for 5 epochs with BF16/FP32 mixed precision. The BF16 model reached a final training perplexity of 1.96, and the MXFP4+RHT+SR model 1.98. Like before finetuning, both models achieve similar zeroshot performance, indicating that they are of similar quality. Table 3 summarizes these results.
256
+
257
+ # 4.2 Overhead Calculations
258
+
259
+ The goal of MXFP4 training is to achieve a wall-clock time speedup over FP8 training. Unfortunately, we do not have access to FP4 hardware yet so we cannot measure empirical wall-clock speedups over FP8. However, we can estimate the overhead of the RHT and stochastic rounding with proxy benchmarks.
260
+
261
+ Our RHT construction operates on a small "tile" in
262
+
263
+ the operand and is memory bound, so we can conceivably fuse it with the MXFP4 GEMM and avoid writing its output to memory. We can estimate the performance of this setup in two ways. First, we measured the overhead of RHT-GEMM kernels for $FP8$ . Specifically, we timed $A$ to (E4M3) $B$ to (E4M3) $^T$ , $A \in \mathrm{BF16}^{n \times k}$ , $B \in \mathrm{BF16}^{m \times k}$ with and without the RHT along the $k$ dimension. We generated Triton (Tillet et al., 2019) kernels with torch.compile (Team), an RHT size of $g = 64$ , and benchmarked 7B and 70B-sized matrices: $(m, n, k) = (32768, 8192, 8192)$ and (16384, 28672, 28672). On a NVIDIA H100 GPU, the RHT adds $9.7\%$ overhead for the 7B-sized setup and $1.6\%$ for the 70B-sized setup. Assuming MXFP4 has twice the throughput of FP8, these numbers would double to $19.4\%$ and $3.2\%$ , respectively, which is still faster than a FP8 GEMM.
264
+
265
+ Second, we measured the overhead of the RHT on the HuggingFace implementation (Wolf et al., 2020) of single Llama 2 70B decoder layer decoder layer on a NVIDIA A100 GPU. Specifically, we report the end-to-end tokens per second for computing the forward pass in FP16 and backward pass in either FP16 or INT4, which has the same hardware speedup $(4\times)$ on the NVIDIA A100 vs. FP16 as MXFP4 has on modern hardware. We also include INT8 as a proxy for the expected speedup of a FP8 backward pass. We use a batch size of 4 sequences with 4K tokens each (16K tokens/batch), Flash Attention 2 (Dao, 2024),
266
+
267
+ ![](images/3521f2044af262340c39929169ce22bdf2690225ab71c0c8f5019920cf2a8fd4.jpg)
268
+ Figure 7: GPT 1.3B and 6.7B perplexity curves with a FP8 forward pass, our MXFP4 backward pass, and the same settings as Figures 4 and 5. Our method is compatible with FP8 forward passes for additional speedups. See Appendix for details.
269
+
270
+ ![](images/36af69eae99c84e5c5dcb4e8f079b9fc662914def531f326cff6c0c9a28dc8dc.jpg)
271
+
272
+ torch.compile, and the CUTLASS INT4 and INT8 GEMM kernels. We were unable to use CUDA graphs since the HuggingFace implementation is not compatible with CUDA graphs; we expect CUDA graphs to improve speedup ratios by masking kernel launch overhead.
273
+
274
+ Table 5 summarizes these results. End to end with a FP16 FW pass, an INT4+RHT backward pass is over $40\%$ faster than a FP16 backward pass and over $20\%$ faster than an INT8 backward pass. If we only consider the backward pass, INT4+RHT is $\approx 70\%$ faster than a FP16 backward pass and $\approx 30\%$ faster than a INT8 backward pass. The HuggingFace Llama implementation is not known to be fast, so a more efficient implementation would achieve better INT4 and INT8 speedups over FP16. Table 5 also shows that the RHT adds less than $5\%$ E2E overhead and is memory bound in the operands until $g \approx 256$ . Interestingly, the recently released $\mathcal{O}(n \log n)$ HadaCore kernel (Agarwal et al., 2024) recovers most of the dense GEMM penalty at $g = 1024$ , but is still slower than smaller $g$ .
275
+
276
+ To measure the overhead of stochastic rounding, we used an Amazon Trainium 1 chip (EC2 Trn1 instance), which is one of the few widely available chips that has dedicated stochastic rounding hardware (Amazon). Our experiments show that for most matrix sizes, using SR to quantize GEMM operands from FP32 to BF16 adds less than $2\%$ overhead over the BF16 GEMM itself. Assuming a $4\times$ increase in GEMM throughput when going from BF16 to FP4, this would mean SR adds less than $10\%$ overhead.
277
+
278
+ # 5 Conclusion
279
+
280
+ While hardware support for low precision datatypes continues to advance, it is becoming increasingly difficult to train with these datatypes without suffering from significant model degradation. In this work, we demonstrate the first MXFP4 training recipe that achieves near-lossless model quality vs. FP32/BF16 mixed precision training. Our method hinges on computing low variance, unbiased gradient estimates for decoder linear layer, which enables us to make more accurate model updates. To do this, we propose using stochastic rounding (SR) and the random Hadamard transform (RHT). Stochastic rounding produces unbiased gradient estimates, and the RHT reduces the variance of SR and the chance of losing gradient information from underflow. Our experiments pretraining GPT models up to 6.7B show that both the RHT and SR are crucial for near-lossless MXFP4 training. Finally, our benchmarks show that our method can be implemented with minimal overhead, giving an estimated $30\%$ speedup over FP8 and $70\%$ speedup over BF16 in the backward pass.
281
+
282
+ # Acknowledgements
283
+
284
+ We thank Chris De Sa for valuable feedback. We also thank Yida Wang and George Karypis for their support within AWS AI Research.
285
+
286
+ # References
287
+
288
+ Krish Agarwal, Rishi Astra, Adnan Hoque, Mudhakar Srivatsa, Raghu Ganti, Less Wright, and Sijia Chen. Hadacore: Tensor core accelerated hadamard transform kernel, 2024. URL https://arxiv.org/abs/2412.08832.
289
+ Amazon. Trainium architecture. URL https://awsdocs-neuron.readthedocs-hosted.com/en/latest/general/arch/neuron-hardware/trainium.html.
290
+ Charlie Blake, Douglas Orr, and Carlo Luschi. Unit scaling: out-of-the-box low-precision training. In Proceedings of the 40th International Conference on Machine Learning, ICML'23. JMLR.org, 2023.
291
+ Tom B. Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel M. Ziegler, Jeffrey Wu, Clemens Winter, Christopher Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners, 2020. URL https://arxiv.org/abs/2005.14165.
292
+ Adam Casson. Transformer flops, 2023. URL https://www.adamcasson.com/posts/transformer-flops.
293
+ Matteo Croci, Massimiliano Fasi, Nicholas J. Higham, Theo Mary, and Mantas Mikaitis. Stochastic rounding: implementation, error analysis and applications. Royal Society Open Science, 9(3), March 2022. ISSN 2054-5703. doi: 10.1098/rsos.211631. URL http://dx.doi.org/10.1098/rsos.211631.
294
+ Tri Dao. FlashAttention-2: Faster attention with better parallelism and work partitioning. In International Conference on Learning Representations (ICLR), 2024.
295
+ Tri Dao, Daniel Y. Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. FlashAttention: Fast and memory-efficient exact attention with IO-awareness. In Advances in Neural Information Processing Systems (NeurIPS), 2022.
296
+ Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024.
297
+
298
+ NVIDIA Transformer Engine. Transformer engine 1.11.0. URL https://docs.nvidia.com/deeple arning/transformer-engine/user-guide/index .html.
299
+ N. Halko, P. G. Martinsson, and J. A. Tropp. Finding structure with randomness: Probabilistic algorithms for constructing approximate matrix decompositions. SIAM Review, 53(2):217-288, 2011. doi: 10.1137/090771806. URL https://doi.org/10.1137/090771806.
300
+ IEEE. IEEE standard for floating-point arithmetic. IEEE Std 754-2019 (Revision of IEEE 754-2008), pages 1-84, 2019. doi: 10.1109/IEEEESTD.2019.8766229.
301
+ Hamish Ivison, Yizhong Wang, Valentina Pyatkin, Nathan Lambert, Matthew Peters, Pradeep Dasigi, Joel Jang, David Wadden, Noah A. Smith, Iz Beltagy, and Hannaneh Hajishirzi. Camels in a changing climate: Enhancing lm adaptation with tulu 2, 2023. URL https://arxiv.org/abs/2311.10702.
302
+ Justin Johnson, Apr 2017. URL https://cs231.n.stanford.edu/handouts/linear-backprop.pdf.
303
+ Tanishq Kumar, Zachary Ankner, Benjamin Frederick Spector, Blake Bordelon, Niklas Muennighoff, Mansheej Paul, Cengiz Pehlevan, Christopher Re, and Aditi Raghunathan. Scaling laws for precision. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=wg1PCg3CUP.
304
+ Paulius Micikevicius, Sharan Narang, Jonah Alben, Gregory Diamos, Erich Elsen, David Garcia, Boris Ginsburg, Michael Houston, Oleksii Kuchaiev, Ganesh Venkatesh, and Hao Wu. Mixed precision training. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=r1gs9JgRZ.
305
+ Microsoft. Mx piston emulation library, 2024. URL https://github.com/microsoft/microscaling.
306
+ AWS Neuron. Gpt wikipedia dataset. URL https://github.com/aws-neuron/aws-neuron-parallelcluster-samples/blob/master/examples/jobs/neuronx-nemo-megatron-gpt-job.md.
307
+ NVIDIA. Ptx isa 8.7, a. URL https://docs.nvidia.com/cuda/pdf/ptx_isa_8.7.pdf.
308
+ NVIDIA. Transformer engine, b. URL https://github.com/NVIDIA/TransformerEngine.
309
+ NVIDIA. Nvidia blackwell architecture technical brief, 2024a. URL https://resources.nvidia.com/en-us-blackwell-architecture.
310
+
311
+ NVIDIA. Nvidia blackwell platform sets new llm inference records in mlperf inference v4.1, Sep 2024b. URL https://developer.nvidia.com/blog/nvidia-blackwell-platform-sets-new-llm-inference-records-in-mlperf-inference-v4-1/.
312
+ Houwen Peng, Kan Wu, Yixuan Wei, Guoshuai Zhao, Yuxiang Yang, Ze Liu, Yifan Xiong, Ziyue Yang, Bolin Ni, Jingcheng Hu, Ruihang Li, Miaosen Zhang, Chen Li, Jia Ning, Ruizhe Wang, Zheng Zhang, Shuguang Liu, Joe Chau, Han Hu, and Peng Cheng. Fp8-lm: Training fp8 large language models, 2023. URL https://arxiv.org/abs/2310.18313.
313
+ Open Compute Project, 2023. URL https://www.opencompute.org/documents/ocp-microscaling-formats-mx-v1-0-spec-final-pdf.
314
+ Samyam Rajbhandari, Jeff Rasley, Olatunjri Ruwase, and Yuxiong He. Zero: Memory optimizations toward training trillion parameter models, 2020. URL http://arxiv.org/abs/1910.02054.
315
+ Bit a Darvish Rouhani, Ritchie Zhao, Ankit More, Mathew Hall, Alireza Khodamoradi, Summer Deng, Dhruv Choudhary, Marius Cornea, Eric Dellinger, Kristof Denolf, Stosic Dusan, Venmugil Elango, Maximilian Golub, Alexander Heinecke, Phil James-Roxby, Dharmesh Jani, Gaurav Kolhe, Martin Langhammer, Ada Li, Levi Melnick, Maral Mesmakhosroshahi, Andres Rodriguez, Michael Schulte, Rasoul Shafipour, Lei Shao, Michael Siu, Pradeep Dubey, Paulius Micikevicius, Maxim Naumov, Colin Verrilli, Ralph Wittig, Doug Burger, and Eric Chung. Microscaling data formats for deep learning, 2023. URL https://arxiv.org/abs/2310.10537.
316
+ Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catanzaro. Megatron-lm: Training multi-billion parameter language models using model parallelism, 2020. URL https://arxiv.org/abs/1909.08053.
317
+ PyTorch Team. Pytorch. URL https://pytorch.org.
318
+ Vijay Thakkar, Pradeep Ramani, Cris Cecka, Aniket Shivam, Honghao Lu, Ethan Yan, Jack Kosaian, Mark Hoemmen, Haicheng Wu, Andrew Kerr, Matt Nicely, Duane Merrill, Dustyn Blasig, Fengqi Qiao, Piotr Majcher, Paul Springer, Markus Hohnerbach, Jin Wang, and Manish Gupta. CUTLASS, January 2023. URL https://github.com/NVIDIA/cutlass.
319
+ Philippe Tillet, H. T. Kung, and David Cox. Triton: an intermediate language and compiler for tiled neural network computations. In Proceedings of the 3rd ACM SIGPLAN International Workshop on Machine
320
+
321
+ Learning and Programming Languages, MAPL 2019, page 10-19, New York, NY, USA, 2019. Association for Computing Machinery. ISBN 9781450367196. doi: 10.1145/3315508.3329973. URL https://doi.org/10.1145/3315508.3329973.
322
+ Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and finetuned chat models. arXiv preprint arXiv:2307.09288, 2023.
323
+ Albert Tseng, Jerry Chee, Qingyao Sun, Volodymyr Kuleshov, and Christopher De Sa. QuIP#: Even better LLM quantization with hadamard incoherence and lattice codebooks. In Ruslan Salakhutdinov, Zico Kolter, Katherine Heller, Adrian Weller, Nuria Oliver, Jonathan Scarlett, and Felix Berkenkamp, editors, Proceedings of the 41st International Conference on Machine Learning, volume 235 of Proceedings of Machine Learning Research, pages 48630-48656. PMLR, 21-27 Jul 2024a. URL https://proceedings.mlr.press/v235/tseng24a.htm1.
324
+ Albert Tseng, Qingyao Sun, David Hou, and Christopher De Sa. Qtip: Quantization with trellises and incoherence processing. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024b.
325
+ Ruizhe Wang, Yeyun Gong, Xiao Liu, Guoshuai Zhao, Ziyue Yang, Baining Guo, Zhengjun Zha, and Peng Cheng. Optimizing large language model training using fp4 quantization, 2025. URL https://arxiv.org/abs/2501.17116.
326
+ Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. Hugging-face's transformers: State-of-the-art natural language processing, 2020. URL https://arxiv.org/abs/19.10.03771.
327
+ Haocheng Xi, ChangHao Li, Jianfei Chen, and Jun Zhu. Training transformers with 4-bit integers. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. URL https://openreview.net/forum?id=H9hW1fMT60.
328
+ Tao Yu, Gaurav Gupta, Karthick Gopalswamy, Amith Mamidala, Hao Zhou, Jeffrey Huynh, Youngsuk Park, Ron Diamant, Anoop Deoras, and Luke
329
+
330
+ Huan. Collage: Light-weight low-precision strategy for llm training. In Proceedings of the 41st International Conference on Machine Learning (ICML 2024). PMLR, 2024.
331
+ Yanli Zhao, Andrew Gu, Rohan Varma, Liang Luo, Chien-Chin Huang, Min Xu, Less Wright, Hamid Shojanazeri, Myle Ott, Sam Shleifer, Alban Desmaison, Can Balioglu, Pritam Damania, Bernard Nguyen, Geeta Chauhan, Yuchen Hao, Ajit Mathews, and Shen Li. Pytorch fsdp: Experiences on scaling fully sharded data parallel, 2023. URL https://arxiv.org/abs/2304.11277.
332
+
333
+ # Training LLMs with MXFP4: Supplementary Materials
334
+
335
+ # 6 Additional Results
336
+
337
+ # 6.1 FP8 Forward Pass Results
338
+
339
+ This section contains experiments using mixed-precision FP8 in the forward pass and MXFP4 in the backward pass. Prior works have shown that mixed-precision FP8 forward and backward passes can be close to lossless over mixed-precision BF16 training (Peng et al., 2023; NVIDIA, b). To test if MXFP4 backward passes are still practical with FP8 forward passes, we trained GPT 1.3B and 6.7B models with NVIDIA's TransformerEngine (TE) FP8 (E4M3) implementation (NVIDIA, b) in the forward pass and our MXFP4 formulation in the backward pass. We did not test GPT 345M since TE FP8 already had a $>0.1$ validation perplexity gap vs. BF16 in our experiments. For GPT 6.7B, since we did not have access to FP8-capable hardware with fast interconnects necessary for tensor-parallel training, we emulated FP8 matrix multiplications by dequantizing FP8 GEMM operands into BF16 and performing a BF16 GEMM. While this is not bit-accurate vs. a FP8 GEMM, the relative error of the output is $\approx 0.3\%$ for random Gaussian inputs. Furthermore, this is essentially how PyTorch emulates FP8 GEMMs (Team). At both model scales, we find that FP8 forward passes and MXFP4 backward passes are sufficient to essentially match BF16 training.
340
+
341
+ ![](images/c9ccd3f984d7840613aeea4e53a7fcf95ac0c49c276177cf627ce92d3bc7b837.jpg)
342
+ Figure 8: Training perplexity curves for GPT 1.3B for 33 billion tokens. Using FP8 in the forward pass and MXFP4 in the backward pass does not result in noticeable degradation.
343
+
344
+ ![](images/9c0a02191c5208f4824b1d9fe788215eafe11fcd13d9948fbaa2969e996dace8.jpg)
345
+ Figure 9: Training perplexity curves for GPT 6.7B for the first 13 billion tokens of a 20 billion token run. Due to time constraints and the cost of training a 6.7B parameter model, we were unable to include the full run. Like 1.3B, using FP8 in the forward pass and MXFP4 in the backward pass does not result in noticeable degradation.
346
+
347
+ # 6.2 GPT 345M Validation Curves with Stochastic Rounding Only
348
+
349
+ This plot is the same as those from the main body except that it includes an experiment with stochastic rounding only (no RHT). Like 1.3B, SR starts off "worse" than the RHT variants but is able to match their performance at the end of the training run.
350
+
351
+ ![](images/0287e407fe53de27a9b709d78bfd79b293f73b4f41af35ded4b6fd3573d2c4bf.jpg)
352
+ Figure 10: Validation perplexity for training GPT 345M for 33 billion tokens. All experiments used BF16 in the forward pass and the specified backward precision. This plot is the same as Figure 3 in the main body except that it adds an experiment with MXFP4+SR only.
353
+
354
+ # 6.3 Training Curve for GPT 1.3B
355
+
356
+ ![](images/d74db1ec7a5156c1e9c348b5b7b71b1272f0409f939f0210a116b79aa03e173e.jpg)
357
+ Figure 11: Training perplexity for training GPT 1.3B for 40 billion tokens. All curves used BF16 in the forward pass and the specified backward precision.
358
+
359
+ # 6.4 Training Perplexity for GPT 1.3B on 200 Billion Tokens
360
+
361
+ This section contains the full 210B token GPT 1.3B run referenced in Section 4 of the main body. There is an approximately 0.1 validation perplexity gap between MXFP4+RHT only (10.02 ppl) and BF16 (9.92 ppl), whereas MXFP4+RHT+SR matches BF16 (9.90 ppl). This suggests that stochastic rounding is important for near-lossless full-scale FP4 training.
362
+
363
+ ![](images/f2dee27a0649a7a2592134e4b947c19885fe7580773615457a5171019a0ea866.jpg)
364
+ Figure 12: Validation perplexity for training GPT 1.3B for 210 billion tokens. All experiments used BF16 in the forward pass and the specified backward precision.
365
+
366
+ ![](images/1606ca81431e9e255fb66360467ca4e8ae8bde95d896e769a6dc6e441ae7d7bc.jpg)
367
+ Figure 13: Training perplexity for training GPT 1.3B for 210 billion tokens. All experiments used BF16 in the forward pass and the specified backward precision. The BF16 and MXFP4+RHT curves have lower variance due to a configuration difference with the logger.
368
+
369
+ # 6.5 Training Curve for GPT 6.7B
370
+
371
+ ![](images/06fe541aefe913f38de02453d8c682d13c1fe68e8b5a1127f07a9f3b1f743a44.jpg)
372
+ Figure 14: Training perplexity for training GPT 6.7B for 20 billion tokens. All experiments used BF16 in the forward pass and the specified backward precision.
373
+
374
+ # 7 Experimental Setup Details
375
+
376
+ All experiments were run on AWS P4 and G6e EC2 instances. Our code was based off of the Megatron-LM codebase at Github commit a4ad305d4b117217141730b9b18af52dda069450 and the Microsoft microscaling codebase at Github commit 7bc41952de394f5cc5e782bef132e7c7542eb4e4. We used the NVIDIA Pytorch +
377
+
378
+ Ubuntu 24.04 docker image, which contains a version of Transformer Engine 1.5 for the FP8 experiments. All models were trained with the AdamW optimizer, FlashAttention (Dao et al., 2022), and the following hyperparameters:
379
+
380
+ <table><tr><td>Hyperparameter</td><td>GPT 345M</td><td>GPT 1.3B</td><td>GPT 6.7B</td></tr><tr><td>Decoder Layers</td><td>24</td><td>24</td><td>32</td></tr><tr><td>Hidden Size</td><td>1024</td><td>2048</td><td>4096</td></tr><tr><td>Attention Heads</td><td>16</td><td>16</td><td>32</td></tr><tr><td>Context Length</td><td>1024</td><td>2048</td><td>2048</td></tr><tr><td>Max. Positional Embeddings</td><td>1024</td><td>2048</td><td>2048</td></tr><tr><td>Batch Size</td><td>64</td><td>1024</td><td>256</td></tr><tr><td>Learning Rate (LR)</td><td>0.00015</td><td>0.0002</td><td>0.00012</td></tr><tr><td>Training Iterations</td><td>500000</td><td>20000</td><td>40000</td></tr><tr><td>LR Scheduler</td><td>Cosine</td><td>Cosine</td><td>Cosine</td></tr><tr><td>LR Decay Iterations</td><td>320000</td><td>20000</td><td>40000</td></tr><tr><td>Minimum LR</td><td>1e-5</td><td>2e-5</td><td>1.2e-5</td></tr><tr><td>Weight Decay</td><td>1e-2</td><td>0.1</td><td>0.1</td></tr><tr><td>LR Warmup Fraction</td><td>0.01</td><td>0.01</td><td>0.01</td></tr><tr><td>Gradient Clipping</td><td>1.0</td><td>1.0</td><td>1.0</td></tr></table>
381
+
382
+ # 8 Proof of Lemma 3.1
383
+
384
+ Lemma 3.1. Assume stochastic rounding is implemented with dithering with independent noise. Then, Algorithm 2 produces a MXFP4 matrix that is an unbiased estimate of $\frac{3}{4}$ its input. Furthermore, Algorithm 3 with Algorithm 2 as a subroutine produces an unbiased estimate of $\frac{dL}{dx}$ and $\frac{dL}{dW}$ .
385
+
386
+ Proof. First, we show that Algorithm 2 produces an unbiased MXFP4 estimate of $\frac{3}{4}$ the input vector $v$ . Let $v \in \mathbb{R}^g$ , where $g$ is the MX group size. The input to stochastic_round_to_BP4 is given by $w = \frac{3}{4} v / X$ , where $X = 2^{\lfloor \log_2(\arg \max(|v|)) \rfloor - 2}$ . Let $m = \operatorname{argmax}(|v|)$ . Observe that the largest magnitude element of $w$ is
387
+
388
+ $$
389
+ \frac {3}{4} \frac {m}{2 ^ {\lfloor \log_ {2} (m) \rfloor - 2}} < \frac {3}{4} \frac {m}{2 ^ {\log_ {2} (m) - 3}} = \frac {3}{4} \times 8 = 6
390
+ $$
391
+
392
+ By definition, stochastic_round_to_FP4(x) produces an unbiased estimate of $x$ as long as $x$ is "within range" - i.e. it does not overflow outside of the range of representable values in FP4. Since the maximum normal in FP4 is 6, stochastic_round_to_FP4(w) will give an unbiased FP4 estimate of $\frac{3}{4} v / X$ . Finally, from linearity of expectation, $X *$ stochastic_round_to_FP4(w) gives an unbiased estimate of $\frac{3}{4} v$ , as desired.
393
+
394
+ Now, we show that Algorithm 3 produces unbiased estimates of $\frac{dL}{dx}$ and $\frac{dL}{dW}$ . Let $C = \mathsf{M} \mathsf{X} \mathsf{F} \mathsf{P} \mathsf{4\_G} \mathsf{E} \mathsf{M} \mathsf{M}(A, B^T)$ , where $A \in \mathbb{R}^{b \times n}$ and $B \in \mathbb{R}^{m \times n}$ , and $g|n$ . We have that
395
+
396
+ $$
397
+ \begin{array}{l} \mathbb {E} \left[ C _ {i j} \right] = \mathbb {E} \left[ \sum_ {k = 0} ^ {n / g} \left(X _ {A _ {i, k g: (k + 1) g}} X _ {B _ {j, k g: (k + 1) g}} \sum_ {l = 0} ^ {g} \left(A _ {i, k g: (k + 1) g} ^ {F P 4}\right) _ {l} \left(B _ {j, k g: (k + 1) g} ^ {F P 4}\right) _ {l}\right) \right] (6) \\ = \sum_ {k = 0} ^ {n / g} \left(X _ {A _ {i, k g: (k + 1) g}} X _ {B _ {j, k g: (k + 1) g}} \sum_ {l = 0} ^ {g} \mathbb {E} \left[ \left(A _ {i, k g: (k + 1) g} ^ {F P 4}\right) _ {l} \left(B _ {j, k g: (k + 1) g} ^ {F P 4}\right) _ {l} \right]\right) (7) \\ \end{array}
398
+ $$
399
+
400
+ Where $A_{i,kg:(k+1)g}$ denotes the $k$ -th size $g$ vector of the $i$ -th row of $A$ , $X_{A_{i,kg:(k+1)g}}$ is the scale of applying Algorithm 2 to $A_{i,kg:(k+1)g}$ , and $A_{i,kg:(k+1)g}^{FP4}$ is the FP4 component of applying Algorithm 2 to $A_{i,kg:(k+1)g}$ . Since stochastic rounding is implemented with independent noise, $A_{i,kg:(k+1)g}^{FP4}$ and $B_{j,kg:(k+1)g}^{FP4}$ are independent random
401
+
402
+ variables. Thus,
403
+
404
+ $$
405
+ \begin{array}{l} \mathbb {E} \left[ C _ {i j} \right] = \sum_ {k = 0} ^ {n / g} \left(X _ {A _ {i, k g: (k + 1) g}} X _ {B _ {j, k g: (k + 1) g}} \sum_ {l = 0} ^ {g} \mathbb {E} \left[ \left(A _ {i, k g: (k + 1) g} ^ {F P 4}\right) _ {l} \left(B _ {j, k g: (k + 1) g} ^ {F P 4}\right) _ {l} \right]\right) (8) \\ = \sum_ {k = 0} ^ {n / g} \left(X _ {A _ {i, k g: (k + 1) g}} X _ {B _ {j, k g: (k + 1) g}} \sum_ {l = 0} ^ {g} \mathbb {E} \left[ \left(A _ {i, k g: (k + 1) g} ^ {F P 4}\right) _ {l} \right] \mathbb {E} \left[ \left(B _ {j, k g: (k + 1) g} ^ {F P 4}\right) _ {l} \right]\right) (9) \\ = \sum_ {k = 0} ^ {n / g} \left(X _ {A _ {i, k g: (k + 1) g}} X _ {B _ {j, k g: (k + 1) g}} \sum_ {l = 0} ^ {g} \frac {3}{4} \frac {\left(A _ {i , k g : (k + 1) g}\right) _ {l}}{X _ {A _ {i , k g: (k + 1) g}}} \frac {3}{4} \frac {\left(B _ {j , k g : (k + 1) g}\right) _ {l}}{X _ {B _ {j , k g: (k + 1) g}}}\right) (10) \\ = \frac {9}{1 6} \sum_ {h = 0} ^ {n} A _ {i h} B _ {j h} = \frac {9}{1 6} \left(A B ^ {T}\right) _ {i j} (11) \\ \end{array}
406
+ $$
407
+
408
+ For $\frac{dL}{dx}$ , $A = \frac{dL}{dy} \mathrm{diag}(S)H$ and $B = W^T \mathrm{diag}(S)H$ , where $H$ is the block-diagonal "small" Hadamard matrix constructed in Section 3.2. Here, $\mathbb{E}\left[\mathsf{MXFP4\_GEMM}(A,B^T)\right] = \frac{9}{16} \frac{dL}{dy} \mathrm{diag}(S)HH^T \mathrm{diag}(S)W = \frac{9}{16} \frac{dL}{dy} W$ . For $\frac{dL}{dW}$ , $A = \frac{dL}{dy}^T \mathrm{diag}(S)H$ and $B = x^T \mathrm{diag}(S)H$ . Here, $\mathbb{E}\left[\mathsf{MXFP4\_GEMM}(A,B^T)\right] = \frac{9}{16} \frac{dL}{dy}^T \mathrm{diag}(S)HH^T \mathrm{diag}(S)x = \frac{9}{16} \frac{dL}{dy}^T x$ . Finally, scaling both values by 16/9 in lines 10 and 11 gives the desired unbiased gradient estimators.
409
+
410
+ ![](images/4fc9c71cf9c8c05de8454c328c23cbafb81cfda4d2cbb350a516a8989edc9d4e.jpg)
411
+
412
+ # 9 Bounding the variance of SR with the RHT
413
+
414
+ Theorem 3.2. Let $A$ and $B$ be two size- $b$ vectors $\in \mathbb{R}^b$ , and let $\mathcal{Q}$ perform Algorithm 2. Then, the variance of $\mathcal{Q}(A)^T\mathcal{Q}(B)$ is $\mathcal{O}(b\Delta^4\|A\|_{\infty}\|B\|_{\infty})$ and the variance of $\mathcal{Q}(HSA)^T\mathcal{Q}(HSB)$ is, with probability $\geq (1 - \epsilon)^2$ , $\mathcal{O}(\Delta^4\|A\|\|B\|\log(2b/\epsilon))$ , where the largest gap between two consecutive representable points in $\mathcal{Q}$ 's quantizer is $\Delta$ .
415
+
416
+ Proof. Consider two vectors of size $b$ : $A, B \in \mathbb{R}^b$ . Then, the output of Algorithm 2 on $A$ is a scale $X_A$ and vector $Q_A$ such that $\mathbb{E}[Q_{A_i}] = A_i / X_A$ and the expectation is taken over runs of Algorithm 2. Likewise, the output of Algorithm 2 on $B$ is a scale $X_B$ and vector $Q_B$ s.t. $\mathbb{E}[Q_{B_i}] = B_i / X_B$ . Let $C = X_A X_B \sum_{i=1}^{b} Q_{A_i} Q_{B_i}$ . Since stochastic rounding is implemented with dithering on $A$ and $B$ with independent random noise,
417
+
418
+ $$
419
+ \begin{array}{l} \operatorname {V a r} (C) = X _ {A} X _ {B} \sum_ {i = 1} ^ {b} \operatorname {V a r} \left(Q _ {A _ {i}} Q _ {B _ {i}}\right) (12) \\ = X _ {A} X _ {B} \left(\sum_ {i = 1} ^ {b} \operatorname {V a r} \left(Q _ {A i}\right) \operatorname {V a r} \left(Q _ {B i}\right) + \operatorname {V a r} \left(Q _ {A i}\right) \mathrm {E} \left(Q _ {B i}\right) ^ {2} + \operatorname {V a r} \left(Q _ {B i}\right) \mathrm {E} \left(Q _ {A i}\right) ^ {2}\right) (13) \\ = X _ {A} X _ {B} \left(\sum_ {i = 1} ^ {b} \operatorname {V a r} \left(Q _ {A i}\right) \operatorname {V a r} \left(Q _ {B i}\right) + \operatorname {V a r} \left(Q _ {A i}\right) \left(\frac {B _ {i}}{X _ {B}}\right) ^ {2} + \operatorname {V a r} \left(Q _ {B i}\right) \left(\frac {A _ {i}}{X _ {A}}\right) ^ {2}\right) (14) \\ = \sum_ {i = 1} ^ {b} X _ {A} X _ {B} \operatorname {V a r} \left(Q _ {A _ {i}}\right) \operatorname {V a r} \left(Q _ {B _ {i}}\right) + \operatorname {V a r} \left(Q _ {A _ {i}}\right) \left(\frac {X _ {A} B _ {i} ^ {2}}{X _ {B}}\right) + \operatorname {V a r} \left(Q _ {B _ {i}}\right) \left(\frac {X _ {B} A _ {i} ^ {2}}{X _ {A}}\right). (15) \\ \end{array}
420
+ $$
421
+
422
+ Let $\alpha = A_{i} / X_{A}$ . Since $Q_{A_i}$ is the output of stochastic rounding to FP4, $Q_{A_i}$ takes on values $f(\alpha)$ with probability $\frac{c(\alpha) - \alpha}{c(\alpha) - f(\alpha)}$ and $c(\alpha)$ with probability $\frac{\alpha - f(\alpha)}{c(\alpha) - f(\alpha)}$ , where $f(\alpha)$ denotes the largest representable FP4 value $\leq \alpha$ and $c(\alpha)$ denotes the smallest representable FP4 value $\geq \alpha$ . Observe that $f(\alpha)$ and $c(\alpha)$ are both guaranteed to exist due to line 4 in Algorithm 2. Then,
423
+
424
+ $$
425
+ \begin{array}{l} \operatorname {V a r} \left(Q _ {A _ {i}}\right) = \frac {f (\alpha) ^ {2} (c (\alpha) - \alpha) + c (\alpha) ^ {2} (\alpha - f (\alpha))}{c (\alpha) - f (\alpha)} - \alpha^ {2} (16) \\ = \frac {(c (\alpha) ^ {2} - f (\alpha) ^ {2}) \alpha + (f (\alpha) - c (\alpha)) f (\alpha) c (\alpha)}{c (\alpha) - f (\alpha)} - \alpha^ {2} (17) \\ = (c (\alpha) + f (\alpha)) \alpha - f (\alpha) c (\alpha) - \alpha^ {2}. (18) \\ \end{array}
426
+ $$
427
+
428
+ Let $\delta^{+} = c(\alpha) - \alpha$ and $\delta^{-} = f(\alpha) - \alpha$ . Then,
429
+
430
+ $$
431
+ \begin{array}{l} \operatorname {V a r} \left(Q _ {A i}\right) = (c (\alpha) + f (\alpha)) \alpha - f (\alpha) c (\alpha) - \alpha^ {2} (19) \\ = (2 \alpha + \delta^ {-} + \delta^ {+}) \alpha - (\alpha + \delta^ {-}) (\alpha + \delta^ {+}) - \alpha^ {2} (20) \\ = - \delta^ {-} \delta^ {+} = (c (\alpha) - \alpha) (\alpha - f (\alpha)) (21) \\ = \mathcal {O} \left(\left(c (\alpha) - f (\alpha)\right) ^ {2}\right). (22) \\ \end{array}
432
+ $$
433
+
434
+ Since $c(\alpha) - f(\alpha)$ is $O(\Delta)$ ,
435
+
436
+ $$
437
+ \begin{array}{l} \operatorname {V a r} (C) = \mathcal {O} \left(b \Delta^ {4} X _ {A} X _ {B} + \Delta^ {2} \frac {X _ {A}}{X _ {B}} \sum_ {i = 1} ^ {b} B _ {i} ^ {2} + \Delta^ {2} \frac {X _ {B}}{X _ {A}} \sum_ {i = 1} ^ {b} A _ {i} ^ {2}\right) (23) \\ = \mathcal {O} \left(b \Delta^ {4} X _ {A} X _ {B} + \Delta^ {2} \frac {X _ {A}}{X _ {B}} \| B \| ^ {2} + \Delta^ {2} \frac {X _ {B}}{X _ {A}} \| A \| ^ {2}\right). (24) \\ \end{array}
438
+ $$
439
+
440
+ Since $X_A = \Theta(\|A\|_\infty)$ and likewise for $B$ , this reduces to
441
+
442
+ $$
443
+ \begin{array}{l} \operatorname {V a r} (C) = \mathcal {O} \left(b \Delta^ {4} \| A \| _ {\infty} \| B \| _ {\infty} + 2 b \Delta^ {2} \| A \| _ {\infty} \| B \| _ {\infty}\right) (25) \\ = \mathcal {O} \left(b \Delta^ {4} \| A \| _ {\infty} \| B \| _ {\infty}\right) (26) \\ \end{array}
444
+ $$
445
+
446
+ If $A$ and $B$ are transformed by the RHT in the way Algorithm X does (i.e. $\tilde{A} \gets ASH^T$ and $\tilde{B} \gets HSB$ ), then we can bound $\| \tilde{A} \|_{\infty}$ and $\| \tilde{B} \|_{\infty}$ . From Tseng et al. (2024a), $\forall i, 1 \leq i \leq b$ ,
447
+
448
+ $$
449
+ \mathbb {P} \left(\left| e _ {i} \tilde {A} \right| \geq \epsilon\right) = \mathbb {P} \left(\left| e _ {i} A S H ^ {T} \right| \geq \epsilon\right) \leq 2 \exp \left(\frac {- \epsilon^ {2} b}{2 \| A \| ^ {2}}\right). \tag {27}
450
+ $$
451
+
452
+ From the union bound,
453
+
454
+ $$
455
+ \mathbb {P} \left(\max _ {i} \left| e _ {i} A S H ^ {T} \right| \geq \epsilon\right) \leq 2 b \exp \left(\frac {- \epsilon^ {2} b}{2 \| A \| ^ {2}}\right) \tag {28}
456
+ $$
457
+
458
+ $$
459
+ \mathbb {P} \left(\max _ {i} \left| e _ {i} A S H ^ {T} \right| \geq \sqrt {\frac {2 \| A \| ^ {2}}{b} \log \left(\frac {2 b}{\epsilon}\right)}\right) \leq \epsilon . \tag {29}
460
+ $$
461
+
462
+ so with probability $\geq 1 - \epsilon$
463
+
464
+ $$
465
+ \left\| A \right\| _ {\infty} = \mathcal {O} \left(\sqrt {\frac {2 \| A \| ^ {2}}{b} \log \left(\frac {2 b}{\epsilon}\right)}\right) \tag {30}
466
+ $$
467
+
468
+ and with probability $\geq (1 - \epsilon)^2$
469
+
470
+ $$
471
+ \operatorname {V a r} (C) = \mathcal {O} \left(\Delta^ {4} \| A \| \| B \| \log \left(\frac {2 b}{\epsilon}\right)\right). \tag {31}
472
+ $$
473
+
474
+
data/2025/2502_20xxx/2502.20586/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0d5de011d24de32fc4b87dfc83e3600fcb65cc21292d4875205a4ab7e6a00239
3
+ size 1105513
data/2025/2502_20xxx/2502.20586/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20604/be121c54-f9e2-4c21-8d04-9ea04ee6c28a_content_list.json ADDED
@@ -0,0 +1,1555 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Exploring the Impact of Temperature Scaling in Softmax for Classification and Adversarial Robustness",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 109,
8
+ 63,
9
+ 887,
10
+ 164
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "$1^{\\mathrm{st}}$ Hao Xuan",
17
+ "bbox": [
18
+ 173,
19
+ 186,
20
+ 276,
21
+ 200
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "Electrical and Computer Engineering",
28
+ "bbox": [
29
+ 94,
30
+ 203,
31
+ 349,
32
+ 219
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "University of Alberta",
39
+ "bbox": [
40
+ 153,
41
+ 220,
42
+ 295,
43
+ 234
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "Edmonton, Canada",
50
+ "bbox": [
51
+ 156,
52
+ 234,
53
+ 285,
54
+ 248
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "hxuan@ualberta.ca",
61
+ "bbox": [
62
+ 158,
63
+ 251,
64
+ 289,
65
+ 263
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "$2^{\\mathrm{nd}}$ Bokai Yang",
72
+ "bbox": [
73
+ 439,
74
+ 186,
75
+ 557,
76
+ 203
77
+ ],
78
+ "page_idx": 0
79
+ },
80
+ {
81
+ "type": "text",
82
+ "text": "Electrical and Computer Engineering",
83
+ "bbox": [
84
+ 367,
85
+ 204,
86
+ 622,
87
+ 218
88
+ ],
89
+ "page_idx": 0
90
+ },
91
+ {
92
+ "type": "text",
93
+ "text": "University of Alberta",
94
+ "bbox": [
95
+ 426,
96
+ 220,
97
+ 570,
98
+ 234
99
+ ],
100
+ "page_idx": 0
101
+ },
102
+ {
103
+ "type": "text",
104
+ "text": "Edmonton, Canada",
105
+ "bbox": [
106
+ 429,
107
+ 234,
108
+ 560,
109
+ 248
110
+ ],
111
+ "page_idx": 0
112
+ },
113
+ {
114
+ "type": "text",
115
+ "text": "bokai5@ualberta.ca",
116
+ "bbox": [
117
+ 429,
118
+ 251,
119
+ 562,
120
+ 263
121
+ ],
122
+ "page_idx": 0
123
+ },
124
+ {
125
+ "type": "text",
126
+ "text": "$3^{\\mathrm{rd}}$ Xingyu Li",
127
+ "bbox": [
128
+ 718,
129
+ 186,
130
+ 826,
131
+ 203
132
+ ],
133
+ "page_idx": 0
134
+ },
135
+ {
136
+ "type": "text",
137
+ "text": "Electrical and Computer Engineering",
138
+ "bbox": [
139
+ 643,
140
+ 204,
141
+ 897,
142
+ 218
143
+ ],
144
+ "page_idx": 0
145
+ },
146
+ {
147
+ "type": "text",
148
+ "text": "University of Alberta",
149
+ "bbox": [
150
+ 702,
151
+ 220,
152
+ 844,
153
+ 233
154
+ ],
155
+ "page_idx": 0
156
+ },
157
+ {
158
+ "type": "text",
159
+ "text": "Edmonton, Canada",
160
+ "bbox": [
161
+ 704,
162
+ 234,
163
+ 834,
164
+ 248
165
+ ],
166
+ "page_idx": 0
167
+ },
168
+ {
169
+ "type": "text",
170
+ "text": "xingyu@ualberta.ca",
171
+ "bbox": [
172
+ 702,
173
+ 251,
174
+ 836,
175
+ 263
176
+ ],
177
+ "page_idx": 0
178
+ },
179
+ {
180
+ "type": "text",
181
+ "text": "Abstract—The softmax function is a fundamental component in deep learning. This study delves into the often-overlooked parameter within the softmax function, known as \"temperature,\" providing novel insights into the practical and theoretical aspects of temperature scaling for image classification. Our empirical studies, adopting convolutional neural networks and transformers on multiple benchmark datasets, reveal that moderate temperatures generally introduce better overall performance. Through extensive experiments and rigorous theoretical analysis, we explore the role of temperature scaling in model training and unveil that temperature not only influences learning step size but also shapes the model's optimization direction. Moreover, for the first time, we discover a surprising benefit of elevated temperatures: enhanced model robustness against common corruption, natural perturbation, and non-targeted adversarial attacks like Projected Gradient Descent. We extend our discoveries to adversarial training, demonstrating that, compared to the standard softmax function with the default temperature value, higher temperatures have the potential to enhance adversarial training. The insights of this work open new avenues for improving model performance and security in deep learning applications.",
182
+ "bbox": [
183
+ 73,
184
+ 319,
185
+ 491,
186
+ 585
187
+ ],
188
+ "page_idx": 0
189
+ },
190
+ {
191
+ "type": "text",
192
+ "text": "I. INTRODUCTION",
193
+ "text_level": 1,
194
+ "bbox": [
195
+ 217,
196
+ 598,
197
+ 349,
198
+ 612
199
+ ],
200
+ "page_idx": 0
201
+ },
202
+ {
203
+ "type": "text",
204
+ "text": "Deep learning has achieved dramatic breakthroughs in recent years, excelling in tasks such as image classification [12], nature language processing (NLP) [5], and semantic segmentation [27]. A critical component of most deep learning methods is the softmax function, which normalizes a set of real values into probabilities. The generalized softmax function incorporates a parameter known as \"temperature,\" which controls the softness of the output distribution. Despite its importance in theory, the impact of temperature scaling on classification tasks has been relatively underexplored, particularly in contrast to its use in other areas such as knowledge distillation [8], contrastive learning [24], confidence calibration [21], and natural language processing. Specifically, though the temperature scaling has occasionally been applied in prior experimentation [6], [10], [23], these studies often integrate additional complex techniques such as Gaussian noise injection in [23], adversarial training in [6], [22], and innovative quadratic activation functions in [10], making it challenging to isolate and understand the specific contribution of temperature scaling to the overall system",
205
+ "bbox": [
206
+ 73,
207
+ 619,
208
+ 491,
209
+ 907
210
+ ],
211
+ "page_idx": 0
212
+ },
213
+ {
214
+ "type": "text",
215
+ "text": "performance. Consequently, the specific role of temperature in classification tasks remains ambiguous. Previous study by [1] has hinted at the potential benefits of temperature scaling, but a comprehensive investigation is still lacking.",
216
+ "bbox": [
217
+ 501,
218
+ 318,
219
+ 921,
220
+ 378
221
+ ],
222
+ "page_idx": 0
223
+ },
224
+ {
225
+ "type": "text",
226
+ "text": "This study aims to fill this gap by conducting extensive experiments to explore the practical and theoretical aspects of temperature scaling in the softmax function for image classification. We employ convolutional neural networks (CNNs) and transformers on multiple benchmark datasets, including CIFAR-10 [11], CIFAR-100 [11], and Tiny-ImageNet [16], to systematically analyze the effects of different temperature values. Our empirical results consistently show that moderate temperatures generally improve overall performance, challenging the conventional knowledge derived from contrastive learning that low temperature facilitates representation learning.",
227
+ "bbox": [
228
+ 501,
229
+ 378,
230
+ 921,
231
+ 545
232
+ ],
233
+ "page_idx": 0
234
+ },
235
+ {
236
+ "type": "text",
237
+ "text": "We also delve into the theoretical underpinnings of temperature scaling in model training. Our analysis reveals that temperature not only influences the learning step size but also shapes the model's optimization direction. Specifically, lower temperatures focus the model's learning on error-prone classes, while higher temperatures promote a more balanced learning across all classes. This insight is crucial for understanding the nuanced effects of temperature scaling on model optimization.",
238
+ "bbox": [
239
+ 503,
240
+ 545,
241
+ 921,
242
+ 665
243
+ ],
244
+ "page_idx": 0
245
+ },
246
+ {
247
+ "type": "text",
248
+ "text": "Furthermore, we uncover a surprising benefit of elevated temperatures: enhanced model robustness against common corruptions, natural perturbations, and non-targeted adversarial attacks, such as Projected Gradient Descent (PGD). We extend our investigation to adversarial training introduced by [18], demonstrating that higher temperatures can potentially enhance the robustness of models trained with adversarial methods compared to those using the standard softmax function with the default temperature.",
249
+ "bbox": [
250
+ 503,
251
+ 665,
252
+ 921,
253
+ 800
254
+ ],
255
+ "page_idx": 0
256
+ },
257
+ {
258
+ "type": "text",
259
+ "text": "In summary, this work provides new perspectives on the practical applications and theoretical implications of temperature scaling in the softmax function. Our contributions can be summarized as follows:",
260
+ "bbox": [
261
+ 503,
262
+ 800,
263
+ 921,
264
+ 859
265
+ ],
266
+ "page_idx": 0
267
+ },
268
+ {
269
+ "type": "text",
270
+ "text": "- We conduct extensive experiments demonstrating that applying a reasonably large temperature during model training improves overall performance.",
271
+ "bbox": [
272
+ 519,
273
+ 862,
274
+ 921,
275
+ 907
276
+ ],
277
+ "page_idx": 0
278
+ },
279
+ {
280
+ "type": "aside_text",
281
+ "text": "arXiv:2502.20604v1 [cs.LG] 28 Feb 2025",
282
+ "bbox": [
283
+ 22,
284
+ 265,
285
+ 57,
286
+ 707
287
+ ],
288
+ "page_idx": 0
289
+ },
290
+ {
291
+ "type": "list",
292
+ "sub_type": "text",
293
+ "list_items": [
294
+ "- We discover that models trained with elevated temperatures exhibit enhanced robustness against gradient-based untargeted adversarial attacks.",
295
+ "- Additionally, we show the potential of integrating temperature control into adversarial training to boost model performance and security in deep learning applications."
296
+ ],
297
+ "bbox": [
298
+ 91,
299
+ 61,
300
+ 491,
301
+ 152
302
+ ],
303
+ "page_idx": 1
304
+ },
305
+ {
306
+ "type": "text",
307
+ "text": "II. RELATED WORKS",
308
+ "text_level": 1,
309
+ "bbox": [
310
+ 205,
311
+ 162,
312
+ 361,
313
+ 176
314
+ ],
315
+ "page_idx": 1
316
+ },
317
+ {
318
+ "type": "text",
319
+ "text": "The softmax function has been a longstanding component of neural networks, usually used to normalize a vector of real values into probabilities. Modulating the temperature scaling factor within the softmax function allows for reshaping the probability distribution. This section provides a concise overview of the application of temperature scaling in various computational tasks.",
320
+ "bbox": [
321
+ 73,
322
+ 181,
323
+ 491,
324
+ 287
325
+ ],
326
+ "page_idx": 1
327
+ },
328
+ {
329
+ "type": "text",
330
+ "text": "Knowledge Distillation proposed by [8] is one innovative way to transfer knowledge from a teacher model to a student model. Temperature is utilized during training to control both the student and teacher model's output. The author argues that lower temperatures make the distillation assign less weight to logits that are much smaller than the average. Conversely, employing larger temperatures softens the probability distribution and pays more attention to the unimportant part of the logit. Larger temperatures are proven to be beneficial in the distillation process since the hard-target term already ensures the dominant part of the logit (target class) is correct. By focusing on the remaining logit, the student model can capture more fine-grained information from the teacher model. Note that despite various temperatures used during training, it is set to 1 when the model is deployed.",
331
+ "bbox": [
332
+ 73,
333
+ 287,
334
+ 491,
335
+ 513
336
+ ],
337
+ "page_idx": 1
338
+ },
339
+ {
340
+ "type": "text",
341
+ "text": "Model Confidence Calibration usually utilizes temperature scaling to address the over-confident issue in deep learning [7], [15], [19]. It centers on estimating predictive uncertainty to match its expected accuracy [13], [14]. Despite multiple generic calibration methods being proposed, temperature scaling proposed by [7] remains a baseline method for being simple, effective and able to apply to various cases without major expense. The motivation behind temperature scaling is simple, since the goal is to control the network's confidence to match its accuracy, applying temperature to the softmax function that can directly modify the probability distribution seems a perfect fit for the problem. During training, a validation set is needed to find the ideal temperature parameter for the network, and the same temperature is used when deployed.",
342
+ "bbox": [
343
+ 73,
344
+ 515,
345
+ 491,
346
+ 726
347
+ ],
348
+ "page_idx": 1
349
+ },
350
+ {
351
+ "type": "text",
352
+ "text": "Contrastive Learning is one paradigm for unsupervised learning [20], [26]. To achieve a powerful feature encoder, it utilizes contrastive loss to pull similar samples close and push negative pairs away in the latent space. Although the temperature has long existed as a hyper-parameter in contrastive loss, its actual mechanism is just understudied recently. [24] analyze the contrastive loss closely and find that as the temperature decreases, the distribution of the contrastive loss becomes sharper, which applies larger penalties to samples similar to the anchor data. Also, uniformity of feature distribution increases, indicating the embedding feature distribution aligns with a uniform distribution better [25].",
353
+ "bbox": [
354
+ 73,
355
+ 726,
356
+ 491,
357
+ 907
358
+ ],
359
+ "page_idx": 1
360
+ },
361
+ {
362
+ "type": "text",
363
+ "text": "Temperature Scaling in Image Classification has occasionally been utilized in the experimental sections of prior studies, yet focused investigations on this subject remain limited. For example, previous studies aiming to improve adversarial robustness have utilized temperature scaling to adjust logits within their experimentation [6], [10], [23]. However, these studies often integrate additional complex techniques such as Gaussian noise injection [23], adversarial training [6], [22], and innovative quadratic activation functions [10], making it challenging to isolate and understand the specific contribution of temperature scaling to the overall system performance. In contrast, our study narrows its focus to investigating the direct impact of temperature scaling applied through the softmax function on model optimization processes. Among the few related works, \"The Temperature Check\" by [1] is notably relevant to our discussion. It mainly explores the dynamics of model training by considering factors such as temperature, learning rate, and time, and presents an empirical finding that a model's generalization performance is significantly influenced by temperature settings. While our observations align with these findings, our research approaches the issue from a different perspective of gradient analysis. Specifically, we delve into how temperature scaling impacts model optimization process. Furthermore, our study broadens the scope of inquiry by assessing the effect of temperature scaling on a model's resilience to common corruptions and adversarial attacks, thereby adding a new dimension to the existing research.",
364
+ "bbox": [
365
+ 501,
366
+ 61,
367
+ 924,
368
+ 470
369
+ ],
370
+ "page_idx": 1
371
+ },
372
+ {
373
+ "type": "text",
374
+ "text": "III. PRELIMINARY",
375
+ "text_level": 1,
376
+ "bbox": [
377
+ 645,
378
+ 478,
379
+ 782,
380
+ 491
381
+ ],
382
+ "page_idx": 1
383
+ },
384
+ {
385
+ "type": "text",
386
+ "text": "A. Softmax Function",
387
+ "text_level": 1,
388
+ "bbox": [
389
+ 503,
390
+ 497,
391
+ 650,
392
+ 511
393
+ ],
394
+ "page_idx": 1
395
+ },
396
+ {
397
+ "type": "text",
398
+ "text": "Given a set of real numbers, $X = \\{x_{1},\\dots,x_{N}\\}$ , the generalized softmax function can be used to normalize $X$ into a probability distribution.",
399
+ "bbox": [
400
+ 503,
401
+ 516,
402
+ 921,
403
+ 559
404
+ ],
405
+ "page_idx": 1
406
+ },
407
+ {
408
+ "type": "equation",
409
+ "text": "\n$$\n\\mathbb {S} (X) = \\frac {\\exp (X / \\tau)}{\\sum_ {i} \\exp \\left(x _ {i} / \\tau\\right)}, \\tag {1}\n$$\n",
410
+ "text_format": "latex",
411
+ "bbox": [
412
+ 635,
413
+ 556,
414
+ 921,
415
+ 588
416
+ ],
417
+ "page_idx": 1
418
+ },
419
+ {
420
+ "type": "text",
421
+ "text": "where $\\mathbb{S}$ represents the softmax function and $\\tau$ is the temperature scaling factor. The temperature $\\tau$ controls the smoothness (softness) of the probability it produces. Specifically, when $\\tau \\rightarrow \\infty$ , the output tends toward a uniform distribution; while when $\\tau = 0$ , the softmax function assigns a probability of 1 to the element with the highest value and a probability of 0 to the rest. The standard (unit) softmax function, with $\\tau = 1$ , is widely used in conventional classification tasks.",
422
+ "bbox": [
423
+ 501,
424
+ 593,
425
+ 921,
426
+ 713
427
+ ],
428
+ "page_idx": 1
429
+ },
430
+ {
431
+ "type": "text",
432
+ "text": "B. Problem Definition and Notation",
433
+ "text_level": 1,
434
+ "bbox": [
435
+ 504,
436
+ 722,
437
+ 751,
438
+ 736
439
+ ],
440
+ "page_idx": 1
441
+ },
442
+ {
443
+ "type": "text",
444
+ "text": "We consider multi-category classification in this study, where paired training data $\\{\\mathcal{X},\\mathcal{Y}\\} = \\{(x,y)|x\\in \\mathbb{R}^{H\\times L\\times N},y\\in$ $\\mathbb{R}^{1\\times M}\\}$ are drawn from a data distribution $\\mathcal{D}$ . Here, $H,L,N$ are the dimension of a sample $x,M$ is the number of categories, and $y$ is a one-hot vector indicating the class of the input $x$ . A classifier, $\\mathcal{C}:\\mathcal{X}\\to \\mathcal{Y}$ , is a function predicting the label $y$ for a given data $x$ . That is $C(x) = y$ . In the canonical classification setting, a neural network classifier, $\\mathcal{C} = (f,W)$ , is usually composed of a feature extractor $f$ parameterized by $\\theta$ and a weight matrix $W$ . $f$ is a function mapping the input $x$ to a real-valued vector $f(x)$ in the model's penultimate layer and",
445
+ "bbox": [
446
+ 501,
447
+ 741,
448
+ 921,
449
+ 907
450
+ ],
451
+ "page_idx": 1
452
+ },
453
+ {
454
+ "type": "text",
455
+ "text": "$W = (w_{1},\\dots,w_{M})$ represents the coefficients of the last linear layer before the softmax layer. So the likelihood probability of data $x$ corresponding to the $M$ categories can be formulated as",
456
+ "bbox": [
457
+ 73,
458
+ 61,
459
+ 491,
460
+ 107
461
+ ],
462
+ "page_idx": 2
463
+ },
464
+ {
465
+ "type": "equation",
466
+ "text": "\n$$\n\\hat {y} = C (x) = \\mathbb {S} \\left(W ^ {T} f (x)\\right). \\tag {2}\n$$\n",
467
+ "text_format": "latex",
468
+ "bbox": [
469
+ 199,
470
+ 114,
471
+ 491,
472
+ 132
473
+ ],
474
+ "page_idx": 2
475
+ },
476
+ {
477
+ "type": "text",
478
+ "text": "Note that each vector $w_{i}$ in matrix $W$ can be considered as the prototype of class $i$ and the production $W^{T}f(x)$ in Eqn. 2 quantifies the similarity between the feature $f(x)$ and different class-prototypes.",
479
+ "bbox": [
480
+ 73,
481
+ 140,
482
+ 491,
483
+ 200
484
+ ],
485
+ "page_idx": 2
486
+ },
487
+ {
488
+ "type": "text",
489
+ "text": "During training, the model $C = (f, W)$ is optimized to minimize a specific loss, usually a Cross-Entropy (CE) loss.",
490
+ "bbox": [
491
+ 73,
492
+ 200,
493
+ 491,
494
+ 231
495
+ ],
496
+ "page_idx": 2
497
+ },
498
+ {
499
+ "type": "equation",
500
+ "text": "\n$$\nL _ {c e} (x) = - y \\log \\hat {y} = - \\log \\left[ \\frac {\\exp \\left(w _ {i} ^ {T} \\cdot f (x) / \\tau\\right)}{\\sum_ {j = 1} ^ {N} \\exp \\left(w _ {j} ^ {T} \\cdot f (x) / \\tau\\right)} \\right] \\tag {3}\n$$\n",
501
+ "text_format": "latex",
502
+ "bbox": [
503
+ 99,
504
+ 236,
505
+ 491,
506
+ 277
507
+ ],
508
+ "page_idx": 2
509
+ },
510
+ {
511
+ "type": "text",
512
+ "text": "Though $\\tau = 1$ is the default setting in classification tasks, we preserve $\\tau$ in the Eqn.s to facilitate theoretical analysis.",
513
+ "bbox": [
514
+ 73,
515
+ 282,
516
+ 491,
517
+ 314
518
+ ],
519
+ "page_idx": 2
520
+ },
521
+ {
522
+ "type": "text",
523
+ "text": "IV. GRADIENT ANALYSIS",
524
+ "text_level": 1,
525
+ "bbox": [
526
+ 189,
527
+ 323,
528
+ 375,
529
+ 335
530
+ ],
531
+ "page_idx": 2
532
+ },
533
+ {
534
+ "type": "text",
535
+ "text": "To investigate the impact of temperature scaling factors for model optimization in classification tasks, we calculate the loss gradients with respect to the training parameters in the model. Specifically, given a data sample $x$ from the $i^{th}$ category, we refer to $w_{i}$ as the positive class prototype and the rest, $w_{j}$ for $j \\neq i$ , as the negative class prototypes. Then the gradients with respect to the positive class prototype, negative class prototypes, and the encoder are:",
536
+ "bbox": [
537
+ 73,
538
+ 342,
539
+ 491,
540
+ 460
541
+ ],
542
+ "page_idx": 2
543
+ },
544
+ {
545
+ "type": "equation",
546
+ "text": "\n$$\n\\frac {\\partial L _ {c e} (x)}{\\partial w _ {i}} = \\frac {1}{\\tau} \\left[ \\mathbb {S} \\left(w _ {i} ^ {T} \\cdot f (x) / \\tau\\right) - 1 \\right] f (x) = \\frac {1}{\\tau} \\left[ P _ {i} ^ {\\tau} (x) - 1 \\right] f (x), \\tag {4}\n$$\n",
547
+ "text_format": "latex",
548
+ "bbox": [
549
+ 76,
550
+ 465,
551
+ 491,
552
+ 511
553
+ ],
554
+ "page_idx": 2
555
+ },
556
+ {
557
+ "type": "equation",
558
+ "text": "\n$$\n\\frac {\\partial L _ {c e} (x)}{\\partial w _ {j}} = \\frac {1}{\\tau} \\mathbb {S} \\left(w _ {j} ^ {T} \\cdot f (x) / \\tau\\right) f (x) = \\frac {1}{\\tau} P _ {j} ^ {\\tau} (x) f (x), \\tag {5}\n$$\n",
559
+ "text_format": "latex",
560
+ "bbox": [
561
+ 114,
562
+ 517,
563
+ 491,
564
+ 551
565
+ ],
566
+ "page_idx": 2
567
+ },
568
+ {
569
+ "type": "equation",
570
+ "text": "\n$$\n\\frac {\\partial L _ {c e} (x)}{\\partial f} = \\frac {1}{\\tau} \\left[ \\sum_ {j \\neq i} w _ {k} P _ {j} ^ {\\tau} (x) - w _ {i} \\left[ 1 - P _ {i} ^ {\\tau} (x) \\right] \\right]. \\tag {6}\n$$\n",
571
+ "text_format": "latex",
572
+ "bbox": [
573
+ 124,
574
+ 556,
575
+ 491,
576
+ 599
577
+ ],
578
+ "page_idx": 2
579
+ },
580
+ {
581
+ "type": "text",
582
+ "text": "Learning rate: In Eqn. 4, 5, 6, since $0 < P_{j}^{\\tau}(x) < 1$ , the actual learning rate is inversely proportional to the temperature $\\tau$ . That is, larger temperatures lead to a reduced gradient step in model update, while smaller temperatures not only increase the gradient step. Furthermore, when the sample $x$ is misclassified, smaller temperatures give a further boost on updating $w_{i}$ and $w_{j}$ for $j = \\arg \\max (P_{j}^{\\tau}(x)f(x))$ , because smaller temperatures in softmax function lead to shaper distributions.",
583
+ "bbox": [
584
+ 73,
585
+ 604,
586
+ 491,
587
+ 724
588
+ ],
589
+ "page_idx": 2
590
+ },
591
+ {
592
+ "type": "text",
593
+ "text": "Optimization direction: From Eqn. 4, the positive class prototype $w_{i}$ is updated toward $f(x)$ in the latent space. In contrast, the negative prototypes $w_{j}$ move away from the direction of $f(x)$ according to Eqn. 5. The optimization direction of $f(x)$ is a weighted sum of all class prototypes, as shown in Eqn. 6. The fundamental optimization policy is to update the trainable parameters of the encoder in such a way that $f(x)$ moves closer to the positive class prototype and farther away from the negative class prototypes in the latent space. However, when we take the temperature parameter into account, we find that temperature has an impact on the update direction of $f(x)$ . Specifically, when the temperature is low,",
594
+ "bbox": [
595
+ 73,
596
+ 726,
597
+ 491,
598
+ 907
599
+ ],
600
+ "page_idx": 2
601
+ },
602
+ {
603
+ "type": "image",
604
+ "img_path": "images/8edc9f837068a17d575223855a12a531d8be38ee8b3fb974859a5f7d2bdcdcfe.jpg",
605
+ "image_caption": [
606
+ "(a) Small $\\tau$"
607
+ ],
608
+ "image_footnote": [],
609
+ "bbox": [
610
+ 517,
611
+ 65,
612
+ 705,
613
+ 186
614
+ ],
615
+ "page_idx": 2
616
+ },
617
+ {
618
+ "type": "image",
619
+ "img_path": "images/e5ecc446ceb3f0f28df46d6d8da9060fa5b482b08f6b2ac31b4a50f1cf73b60a.jpg",
620
+ "image_caption": [
621
+ "(b) Large $\\tau$",
622
+ "Fig. 1: Demonstration of the model optimization direction with different temperatures. $f(x)$ is the latent code of a data sample from category 3. Since $f(x)$ is close to the negative class prototype $w_{1}$ , the CE loss with respect to the encoder $f$ yields a large gradient toward the groundtruth $w_{3}$ . However, with different temperature factors, the gradients associated with the negative classes are different: low temperature makes the update more biased by the hard class (a), while an elevated temperature leads to more equalized gradients (b)."
623
+ ],
624
+ "image_footnote": [],
625
+ "bbox": [
626
+ 720,
627
+ 65,
628
+ 913,
629
+ 186
630
+ ],
631
+ "page_idx": 2
632
+ },
633
+ {
634
+ "type": "text",
635
+ "text": "the probability distribution produced by the softmax function is sharper, leading to significant differences in probability values among different prototypes. Consequently, the update direction of the encoder $f$ is predominantly influenced by the class prototype with the highest probability and the positive class prototype (if they are different). Fig. 1(a) visualizes the bias toward the hard class in model optimization, where $f(x)$ is the latent code of a data sample from category 3. In contrast, when the temperature is high, the differences in probability values among different prototypes are relatively smaller, and the encoder $f$ updates with a mixture of all class prototype directions, as demonstrated in Fig. 1(b). In other words, a low temperature makes the model focus on learning hard-class pairs, while a high temperature de-biases the influence among different classes for a balanced learning.",
636
+ "bbox": [
637
+ 501,
638
+ 377,
639
+ 921,
640
+ 603
641
+ ],
642
+ "page_idx": 2
643
+ },
644
+ {
645
+ "type": "text",
646
+ "text": "Moreover, when considering all the samples in one batch, the compound gradient of all $N$ samples are",
647
+ "bbox": [
648
+ 503,
649
+ 604,
650
+ 921,
651
+ 635
652
+ ],
653
+ "page_idx": 2
654
+ },
655
+ {
656
+ "type": "equation",
657
+ "text": "\n$$\n\\sum_ {n = 1} ^ {N} \\frac {\\partial L _ {c e} \\left(x _ {n}\\right)}{\\partial w _ {i}} = - \\frac {1}{\\tau} \\sum_ {n = 1} ^ {N} f \\left(x _ {n}\\right) \\left[ 1 - P _ {i} ^ {\\tau} \\left(x _ {n}\\right) \\right], \\tag {7}\n$$\n",
658
+ "text_format": "latex",
659
+ "bbox": [
660
+ 563,
661
+ 647,
662
+ 921,
663
+ 688
664
+ ],
665
+ "page_idx": 2
666
+ },
667
+ {
668
+ "type": "equation",
669
+ "text": "\n$$\n\\sum_ {n = 1} ^ {N} \\frac {\\partial L _ {c e} (x _ {n})}{\\partial w _ {k}} = \\frac {1}{\\tau} \\sum_ {n = 1} ^ {N} f (x _ {n}) P _ {k} ^ {\\tau} (x _ {n}), \\tag {8}\n$$\n",
670
+ "text_format": "latex",
671
+ "bbox": [
672
+ 589,
673
+ 705,
674
+ 921,
675
+ 748
676
+ ],
677
+ "page_idx": 2
678
+ },
679
+ {
680
+ "type": "equation",
681
+ "text": "\n$$\n\\sum_ {n = 1} ^ {N} \\frac {\\partial L _ {c e} (x _ {n})}{\\partial f} = \\frac {1}{\\tau} \\sum_ {n = 1} ^ {N} \\left[ \\sum_ {k \\neq i} w _ {k} P _ {k} ^ {\\tau} (x _ {n}) - w _ {i} \\left[ 1 - P _ {i} ^ {\\tau} (x _ {n}) \\right] \\right]. \\tag {9}\n$$\n",
682
+ "text_format": "latex",
683
+ "bbox": [
684
+ 516,
685
+ 763,
686
+ 921,
687
+ 820
688
+ ],
689
+ "page_idx": 2
690
+ },
691
+ {
692
+ "type": "text",
693
+ "text": "Similar to the single sample case, when optimizing in a whole batch, with small temperatures, the model focuses on learning misclassified samples (i.e. hard samples), whereas higher temperatures help de-bias the update direction and distribute similar weight to all samples.",
694
+ "bbox": [
695
+ 501,
696
+ 830,
697
+ 921,
698
+ 907
699
+ ],
700
+ "page_idx": 2
701
+ },
702
+ {
703
+ "type": "image",
704
+ "img_path": "images/2c1b2ef366d4294a39a5908abdb4c29f155a5fa9d72facd09b5761419f754eb1.jpg",
705
+ "image_caption": [
706
+ "(a) $\\tau = 0.5$"
707
+ ],
708
+ "image_footnote": [],
709
+ "bbox": [
710
+ 94,
711
+ 66,
712
+ 330,
713
+ 204
714
+ ],
715
+ "page_idx": 3
716
+ },
717
+ {
718
+ "type": "image",
719
+ "img_path": "images/8a3d6991ffe7379175b6ebf6a056dfeea9f558a597468efffbd803547b720e27.jpg",
720
+ "image_caption": [
721
+ "(b) $\\tau = 1$"
722
+ ],
723
+ "image_footnote": [],
724
+ "bbox": [
725
+ 372,
726
+ 66,
727
+ 614,
728
+ 203
729
+ ],
730
+ "page_idx": 3
731
+ },
732
+ {
733
+ "type": "image",
734
+ "img_path": "images/d06fbc36630b94487828e972f1d66e5045b9a0bce58e5e1774361e1477d55416.jpg",
735
+ "image_caption": [
736
+ "(c) $\\tau = 50$"
737
+ ],
738
+ "image_footnote": [],
739
+ "bbox": [
740
+ 663,
741
+ 66,
742
+ 897,
743
+ 203
744
+ ],
745
+ "page_idx": 3
746
+ },
747
+ {
748
+ "type": "image",
749
+ "img_path": "images/772354c17bba1d7cb82d9e882da17e750313e4590ac174a80d02f4e4dd1b62cc.jpg",
750
+ "image_caption": [
751
+ "Fig. 2: T-SNE [17] visualization of the CIFAR10 sample distribution after the ResNet50 encoder with different temperatures.",
752
+ "(a) $\\tau = 0.5$",
753
+ "Fig. 3: T-SNE [17] visualization of the CIFAR10 sample distribution after the VIT encoder with different temperatures."
754
+ ],
755
+ "image_footnote": [],
756
+ "bbox": [
757
+ 89,
758
+ 277,
759
+ 330,
760
+ 414
761
+ ],
762
+ "page_idx": 3
763
+ },
764
+ {
765
+ "type": "image",
766
+ "img_path": "images/8265c7ab87ea11fafca111b98811837ba957cebfd3f14cdd3bbdeb1ee1708d30.jpg",
767
+ "image_caption": [
768
+ "(b) $\\tau = 1$"
769
+ ],
770
+ "image_footnote": [],
771
+ "bbox": [
772
+ 375,
773
+ 277,
774
+ 614,
775
+ 412
776
+ ],
777
+ "page_idx": 3
778
+ },
779
+ {
780
+ "type": "image",
781
+ "img_path": "images/ecfeff4655621fa1b6ee326bd894f2859fb3629164507bcaeff0e5ebb5588879.jpg",
782
+ "image_caption": [
783
+ "(c) $\\tau = 50$"
784
+ ],
785
+ "image_footnote": [],
786
+ "bbox": [
787
+ 663,
788
+ 277,
789
+ 898,
790
+ 412
791
+ ],
792
+ "page_idx": 3
793
+ },
794
+ {
795
+ "type": "text",
796
+ "text": "V. EMPIRICAL ANALYSIS AND DISCUSSION",
797
+ "text_level": 1,
798
+ "bbox": [
799
+ 127,
800
+ 497,
801
+ 437,
802
+ 511
803
+ ],
804
+ "page_idx": 3
805
+ },
806
+ {
807
+ "type": "text",
808
+ "text": "As discussed in Section 4, applying a small temperature encourages a model to learn more about hard (misclassified) samples and hard (error-prone class) classes. A low temperature, however, leads to more equitable learning across different classes and data points. Theoretically, both approaches to optimize feature distribution sound reasonable, with low temperatures focusing on weaker classes and high temperatures decreasing inequality across all negative classes. We argue that which optimization strategy is better for classification tasks remains an empirical problem.",
809
+ "bbox": [
810
+ 73,
811
+ 517,
812
+ 491,
813
+ 667
814
+ ],
815
+ "page_idx": 3
816
+ },
817
+ {
818
+ "type": "text",
819
+ "text": "A. Experiment Setting",
820
+ "text_level": 1,
821
+ "bbox": [
822
+ 73,
823
+ 676,
824
+ 228,
825
+ 691
826
+ ],
827
+ "page_idx": 3
828
+ },
829
+ {
830
+ "type": "text",
831
+ "text": "We conduct image classification on multiple benchmarks (i.e. CIFAR10, CIFAR100, and Tiny-ImageNet) and their extended Common Corruptions and Perturbations sets (i.e. CIFAR10-C, CIFAR100-C, and Tiny-ImageNet-C with corruption strength being 3) to investigate the impact of temperature scaling. In addition, we also evaluate the model's robustness against adversarial attacks such as PDG20 [18] and C&W [2]. Both attacks are bounded by the $l_{\\infty}$ box with the same maximum perturbation $\\epsilon = 8 / 255$ .",
832
+ "bbox": [
833
+ 73,
834
+ 696,
835
+ 491,
836
+ 830
837
+ ],
838
+ "page_idx": 3
839
+ },
840
+ {
841
+ "type": "text",
842
+ "text": "To get a comprehensive evaluation, we set $\\tau \\in \\{0.1, 0.5, 1, 10, 30, 50, 70, 100\\}$ . Unless stated otherwise, we takes ResNet50 and VIT-small-patch16-224 as the CNN and transformer backbones, respectively. The ResNet50 is trained from scratch, with SGD optimizer and learning rate setting to",
843
+ "bbox": [
844
+ 73,
845
+ 832,
846
+ 491,
847
+ 907
848
+ ],
849
+ "page_idx": 3
850
+ },
851
+ {
852
+ "type": "text",
853
+ "text": "0.1. We also utilize the Cosine Annealing scheduler to better train the model. The transformer is pretrained on ImageNet-21K and finetuned on the target dataset using Adam optimizer. All experiments run on one RTX3090.",
854
+ "bbox": [
855
+ 503,
856
+ 497,
857
+ 921,
858
+ 556
859
+ ],
860
+ "page_idx": 3
861
+ },
862
+ {
863
+ "type": "text",
864
+ "text": "To clarify, the temperature scaling only involves in model training in this study, but not model evaluation and attacks. All empirical evaluation and adversarial sample generation by PGD and C&W are based on the standard cross entropy, i.e. $\\tau = 1$ . Thus, attack gradients are not attenuated, reflecting model's true sensitivity to data perturbation.",
865
+ "bbox": [
866
+ 503,
867
+ 558,
868
+ 921,
869
+ 648
870
+ ],
871
+ "page_idx": 3
872
+ },
873
+ {
874
+ "type": "text",
875
+ "text": "B. Experiment Results",
876
+ "text_level": 1,
877
+ "bbox": [
878
+ 504,
879
+ 660,
880
+ 660,
881
+ 674
882
+ ],
883
+ "page_idx": 3
884
+ },
885
+ {
886
+ "type": "text",
887
+ "text": "The quantitative results on CNN and Transformer are summarized in Table I and Table II, respectively. For the CNN model, ResNet50, training from scratch, the standard accuracy increases with the temperature increase. Furthermore, CNN models trained at elevated temperatures show more robustness against naturally corrected images. We believe that such improvements are majorly attributed to better model optimization with leveraged temperature. For the transformer finetuned on the target set, the standard accuracy and robustness against natural corruptions and perturbations is quite stable. We hypothesize that such stable performance is due to the fact that ViT has already been pre-trained on ImageNet and has reached a relatively high-quality state. Additionally, we observed that the model's adversarial robustness gradually improves with increasing temperature.",
888
+ "bbox": [
889
+ 501,
890
+ 680,
891
+ 921,
892
+ 907
893
+ ],
894
+ "page_idx": 3
895
+ },
896
+ {
897
+ "type": "table",
898
+ "img_path": "images/147e9d52979e7dd26f0ce36b8a8fee8b5c891617be9ca54eb07705c378fa59b6.jpg",
899
+ "table_caption": [
900
+ "TABLE I: Model performance and Robustness against Common Corruptions and Adversarial attacks (%) under different temperatures with ResNet50 trained from scratch. -C in the table represents the corresponding Common Corruptions and Perturbations set."
901
+ ],
902
+ "table_footnote": [],
903
+ "table_body": "<table><tr><td rowspan=\"2\">Temp.</td><td colspan=\"4\">CIFAR10</td><td colspan=\"4\">CIFAR100</td><td colspan=\"4\">Tiny-Imagenet</td></tr><tr><td>Clean</td><td>-C</td><td>PGD20</td><td>C&amp;W</td><td>Clean</td><td>-C</td><td>PGD20</td><td>C&amp;W</td><td>Clean</td><td>-C</td><td>PGD20</td><td>C&amp;W</td></tr><tr><td>τ = 0.1</td><td>90.05</td><td>73.31</td><td>0</td><td>27.79</td><td>70.39</td><td>44.52</td><td>0</td><td>14.32</td><td>54.53</td><td>12.63</td><td>0</td><td>23.17</td></tr><tr><td>τ = 0.5</td><td>94.17</td><td>72.51</td><td>0</td><td>16.03</td><td>74.79</td><td>45.41</td><td>0</td><td>8.44</td><td>61.07</td><td>18.55</td><td>0</td><td>19.44</td></tr><tr><td>τ = 1</td><td>94.26</td><td>72.53</td><td>0</td><td>19.19</td><td>74.58</td><td>46.47</td><td>0</td><td>11.26</td><td>62.93</td><td>18.66</td><td>0</td><td>19.09</td></tr><tr><td>τ = 10</td><td>95.41</td><td>73.94</td><td>0.56</td><td>39.79</td><td>78.21</td><td>50.67</td><td>0.29</td><td>15.33</td><td>64.70</td><td>21.66</td><td>2.59</td><td>23.88</td></tr><tr><td>τ = 30</td><td>95.26</td><td>74.93</td><td>91.09</td><td>43.35</td><td>78.27</td><td>50.17</td><td>68.47</td><td>18.81</td><td>63.60</td><td>21.30</td><td>49.45</td><td>26.50</td></tr><tr><td>τ = 50</td><td>94.92</td><td>74.44</td><td>93.04</td><td>36.13</td><td>77.97</td><td>49.87</td><td>72.92</td><td>20.50</td><td>62.85</td><td>20.40</td><td>54.95</td><td>28.68</td></tr><tr><td>τ = 70</td><td>95.05</td><td>74.26</td><td>93.85</td><td>35.43</td><td>77.20</td><td>49.61</td><td>73.49</td><td>21.66</td><td>62.14</td><td>20.57</td><td>55.54</td><td>30.14</td></tr><tr><td>τ = 100</td><td>95.05</td><td>73.08</td><td>94.29</td><td>37.32</td><td>77.14</td><td>49.31</td><td>73.65</td><td>22.83</td><td>61.46</td><td>18.82</td><td>54.60</td><td>32.71</td></tr></table>",
904
+ "bbox": [
905
+ 192,
906
+ 109,
907
+ 802,
908
+ 315
909
+ ],
910
+ "page_idx": 4
911
+ },
912
+ {
913
+ "type": "table",
914
+ "img_path": "images/cbc43d397618fb9e6121236a7612f3e643913f41eca4823d25336df8a177182e.jpg",
915
+ "table_caption": [
916
+ "TABLE II: Model performance and Robustness against Common Corruptions and Adversarial attacks (\\%) under different temperatures with Transformer Vit-small-patch16-224. -C in the table represents the corresponding Common Corruptions and Perturbations set."
917
+ ],
918
+ "table_footnote": [],
919
+ "table_body": "<table><tr><td rowspan=\"2\">Temp.</td><td colspan=\"4\">CIFAR10</td><td colspan=\"4\">CIFAR100</td></tr><tr><td>Clean</td><td>-C</td><td>PGD20</td><td>C&amp;W</td><td>Clean</td><td>-C</td><td>PGD20</td><td>C&amp;W</td></tr><tr><td>τ = 0.1</td><td>98.45</td><td>92.83</td><td>0</td><td>26.13</td><td>89.79</td><td>74.7</td><td>0</td><td>23.71</td></tr><tr><td>τ = 0.5</td><td>98.33</td><td>91.60</td><td>0</td><td>26.26</td><td>90.53</td><td>74.9</td><td>0</td><td>29.25</td></tr><tr><td>τ = 1</td><td>98.29</td><td>92.21</td><td>0</td><td>31.69</td><td>90.78</td><td>75.5</td><td>0</td><td>31.97</td></tr><tr><td>τ = 10</td><td>98.06</td><td>92.19</td><td>89.07</td><td>31.89</td><td>89.94</td><td>75.5</td><td>58.71</td><td>34.96</td></tr><tr><td>τ = 30</td><td>98.23</td><td>91.72</td><td>97.10</td><td>38.21</td><td>89.52</td><td>74.6</td><td>86.25</td><td>36.07</td></tr><tr><td>τ = 50</td><td>98.22</td><td>91.43</td><td>97.75</td><td>39.52</td><td>89.28</td><td>73.8</td><td>87.29</td><td>33.64</td></tr><tr><td>τ = 70</td><td>98.03</td><td>91.20</td><td>97.72</td><td>39.02</td><td>89.48</td><td>74.2</td><td>87.96</td><td>33.81</td></tr><tr><td>τ = 100</td><td>98.07</td><td>91.56</td><td>97.87</td><td>38.26</td><td>89.13</td><td>73.47</td><td>86.99</td><td>31.84</td></tr></table>",
920
+ "bbox": [
921
+ 84,
922
+ 417,
923
+ 483,
924
+ 625
925
+ ],
926
+ "page_idx": 4
927
+ },
928
+ {
929
+ "type": "text",
930
+ "text": "Clustering is a crucial metric when measuring how an encoder performs. In classification, a good encoder should be able to gather samples from the same class while separating clusters of different classes. Fig. 2 and Fig. 3 present 2D TSNE visualization of the CIFAR10 sample distribution by ResNet50 and transformer. We observe a similar trend: low temperatures lead to more mixed clusters, while models trained with elevated temperatures have better cluster effects. These empirical observations also explain the improved classification performance on clean and non-adversarial perturbations, as well as stronger adversarial robustness, with high temperature in Table I and Table II.",
931
+ "bbox": [
932
+ 73,
933
+ 650,
934
+ 491,
935
+ 830
936
+ ],
937
+ "page_idx": 4
938
+ },
939
+ {
940
+ "type": "text",
941
+ "text": "C. Training Convergence",
942
+ "text_level": 1,
943
+ "bbox": [
944
+ 75,
945
+ 842,
946
+ 250,
947
+ 857
948
+ ],
949
+ "page_idx": 4
950
+ },
951
+ {
952
+ "type": "text",
953
+ "text": "We then conduct experiments observing the training process when applying different temperatures to the model. We validate the model on the test set every epoch and record the error",
954
+ "bbox": [
955
+ 73,
956
+ 862,
957
+ 491,
958
+ 907
959
+ ],
960
+ "page_idx": 4
961
+ },
962
+ {
963
+ "type": "image",
964
+ "img_path": "images/03d90842b10c7e507daf509f3fcf7ee986b82be6e427fbf34aa980baad0a33f1.jpg",
965
+ "image_caption": [
966
+ "(a) Learning Rate $= 0.1$"
967
+ ],
968
+ "image_footnote": [],
969
+ "bbox": [
970
+ 509,
971
+ 343,
972
+ 709,
973
+ 449
974
+ ],
975
+ "page_idx": 4
976
+ },
977
+ {
978
+ "type": "image",
979
+ "img_path": "images/249c61489b25f3bce8a170558fd106a28d6926db159a6b48625db05002100fab.jpg",
980
+ "image_caption": [
981
+ "(b) Learning Rate $= 0.01$",
982
+ "Fig. 4: Test error number during training. The red line represents $\\tau = 0.5$ , the green line represents $\\tau = 1$ , and the orange line represents $\\tau = 50$ . The model used is Resnet50 and is tested on CIFAR10. SGD optimizer is used during training with the learning rate set to 0.1 (a) and 0.01 (b). The shade areas consist of 6 total runs with different random seeds. The solid lines indicate the mean value across all runs."
983
+ ],
984
+ "image_footnote": [],
985
+ "bbox": [
986
+ 715,
987
+ 343,
988
+ 915,
989
+ 449
990
+ ],
991
+ "page_idx": 4
992
+ },
993
+ {
994
+ "type": "text",
995
+ "text": "probability. As our results shown in Fig. 4(a), we can clearly observe that not only does the training convergence speed increase as the temperature goes up, but models trained with higher temperatures also tend to converge to lower points, leading to better final performance. In fact, when we further decrease the temperature to around 0.1, the model would have a substantial risk of not converging at all. While this might appear contrary to the common understanding that focusing on hard classes will generally benefit the model, a more nuanced explanation is provided by delving further into the gradient analysis provided in Section 4.",
996
+ "bbox": [
997
+ 501,
998
+ 604,
999
+ 923,
1000
+ 768
1001
+ ],
1002
+ "page_idx": 4
1003
+ },
1004
+ {
1005
+ "type": "text",
1006
+ "text": "From Eqn. 4, 5, 6, we observe that if the logit of the target class is not the largest, its gradient will increase dramatically with low temperatures. This is potentially bad for models being known to converge inefficiently under large learning rates. One straightforward solution would be lowering the learning rate as shown in Fig. 4. While the training converging speeds are closer, the run with a higher temperature can still reach a better performance. Furthermore, regardless of the increase in overall training converging speed for $\\tau = 0.5$ and $\\tau = 1$ runs",
1007
+ "bbox": [
1008
+ 501,
1009
+ 771,
1010
+ 921,
1011
+ 907
1012
+ ],
1013
+ "page_idx": 4
1014
+ },
1015
+ {
1016
+ "type": "image",
1017
+ "img_path": "images/a5cfef4ece1f81efc298067a1679ce52746aefbcfa6b59487306c80683c6c658.jpg",
1018
+ "image_caption": [
1019
+ "(a) $\\tau = 0.5$"
1020
+ ],
1021
+ "image_footnote": [],
1022
+ "bbox": [
1023
+ 83,
1024
+ 70,
1025
+ 272,
1026
+ 167
1027
+ ],
1028
+ "page_idx": 5
1029
+ },
1030
+ {
1031
+ "type": "image",
1032
+ "img_path": "images/84a9e91addc67644dafe6ad2c8c05a2dcc733e10ec9df01d405a59390d95b147.jpg",
1033
+ "image_caption": [
1034
+ "(b) $\\tau = 1$",
1035
+ "Fig. 5: The logit changes before and after PGD20 attack. The blue lines stand for the logits of the samples before PGD attack, and the orange lines stand for the logits of the samples after PGD attack."
1036
+ ],
1037
+ "image_footnote": [],
1038
+ "bbox": [
1039
+ 295,
1040
+ 70,
1041
+ 483,
1042
+ 167
1043
+ ],
1044
+ "page_idx": 5
1045
+ },
1046
+ {
1047
+ "type": "image",
1048
+ "img_path": "images/61e440f8f1366c34f6650411084a2209893244218f8b6765a79b9acb9987d8df.jpg",
1049
+ "image_caption": [
1050
+ "(c) $\\tau = 50$"
1051
+ ],
1052
+ "image_footnote": [],
1053
+ "bbox": [
1054
+ 511,
1055
+ 70,
1056
+ 700,
1057
+ 167
1058
+ ],
1059
+ "page_idx": 5
1060
+ },
1061
+ {
1062
+ "type": "image",
1063
+ "img_path": "images/b5249e5c6609f055aea0cbd553167c61776ac3b14bdb9e64f95793d89b70a5c7.jpg",
1064
+ "image_caption": [
1065
+ "(d) $\\tau = 100$"
1066
+ ],
1067
+ "image_footnote": [],
1068
+ "bbox": [
1069
+ 728,
1070
+ 71,
1071
+ 913,
1072
+ 167
1073
+ ],
1074
+ "page_idx": 5
1075
+ },
1076
+ {
1077
+ "type": "text",
1078
+ "text": "when lowering the learning rate, the final performances for all three runs actually get worse than runs with 0.1 learning rate. Therefore, this phenomenon cannot be attributed solely to a high learning rate. However, if we shift our perspective to the overall direction for optimization as done in Eqn. 9, it becomes clear that during the early stage of training, the encoder $f$ has not converged to an ideal point, leading to sub-optimal values produced for certain update directions. If this happens to be the direction of the target class and the error-prone class which models with small temperatures tend to focus on, the model training can be impacted harmfully. In the meantime, high temperatures equalize the weight given to all the classes and ensure the update is not terribly wrong even if a few $\\partial L_{ce}(x) / \\partial w_j$ are in the wrong direction. Upon reaching this conclusion, we are surprised to find that this reasoning and our empirical observations align perfectly with the curriculum learning philosophy, that starting from hard samples may harm model optimization and learning outcomes.",
1079
+ "bbox": [
1080
+ 73,
1081
+ 257,
1082
+ 491,
1083
+ 530
1084
+ ],
1085
+ "page_idx": 5
1086
+ },
1087
+ {
1088
+ "type": "text",
1089
+ "text": "D. Adversarial Robustness",
1090
+ "text_level": 1,
1091
+ "bbox": [
1092
+ 73,
1093
+ 541,
1094
+ 261,
1095
+ 555
1096
+ ],
1097
+ "page_idx": 5
1098
+ },
1099
+ {
1100
+ "type": "text",
1101
+ "text": "Table I and Table II show that models trained with elevated temperatures have strong adversarial robustness. TSNE plots in Fig. 2 and Fig. 3 also support this observation. This prompts questions regarding the mechanism behind the gained robustness. In this section, our focus is on investigating the model's behavior under adversarial attacks and understanding why the model demonstrates such robustness.",
1102
+ "bbox": [
1103
+ 73,
1104
+ 561,
1105
+ 490,
1106
+ 666
1107
+ ],
1108
+ "page_idx": 5
1109
+ },
1110
+ {
1111
+ "type": "text",
1112
+ "text": "Gradient analysis for adversarial generation. In order to discern the source of model robustness, we follow the work in [9] and study the gradient of the classification loss with respect to the input to analyze the direction of the PGD attack, which can be written as",
1113
+ "bbox": [
1114
+ 73,
1115
+ 667,
1116
+ 491,
1117
+ 742
1118
+ ],
1119
+ "page_idx": 5
1120
+ },
1121
+ {
1122
+ "type": "equation",
1123
+ "text": "\n$$\n\\begin{array}{l} \\frac {\\partial L _ {c e}}{\\partial x} = \\left[ \\left(\\mathbb {S} \\left(w _ {i} ^ {T} \\cdot f (x)\\right) - 1\\right) \\cdot w _ {i} ^ {T} + \\right. \\\\ \\left. \\sum_ {j \\neq i} w _ {j} ^ {T} \\cdot \\mathbb {S} \\left(w _ {j} ^ {T} \\cdot f (x)\\right) \\right] \\cdot \\frac {\\partial f (x)}{\\partial x} \\tag {10} \\\\ \\end{array}\n$$\n",
1124
+ "text_format": "latex",
1125
+ "bbox": [
1126
+ 143,
1127
+ 748,
1128
+ 490,
1129
+ 809
1130
+ ],
1131
+ "page_idx": 5
1132
+ },
1133
+ {
1134
+ "type": "text",
1135
+ "text": "As illustrated above, given a well-trained model, for most inputs where $\\mathbb{S}(w_i^T\\cdot f(x))\\approx 1$ , the gradient does not have a noticeable portion in target class $w_{i}$ on the early stage of the attack. This implies that rather than directly 'stepping away' from the target class, the attack will initially focus on approaching other class prototypes. Moreover, the second term,",
1136
+ "bbox": [
1137
+ 73,
1138
+ 816,
1139
+ 491,
1140
+ 907
1141
+ ],
1142
+ "page_idx": 5
1143
+ },
1144
+ {
1145
+ "type": "text",
1146
+ "text": "$\\sum_{j \\neq i} w_j^T \\cdot \\mathbb{S}(w_j^T \\cdot f(x))$ , indicates that all the other directions are weighted by their according probabilities. Therefore, untargeted attacks are actually targeted toward the error-prone class, which most commonly is the largest probability class other than the target class. However, if a model lacks an error-prone class given an input, all $w_k$ will be weighted equally. Consequently, the gradient would point toward all negative class prototypes, making it exceptionally challenging to determine the optimal direction. We noticed that such a scenario occurs when a model is trained with a small $\\tau$ . Then let's focus on the gradient update strength. For a data sample $x$ is classified correctly, $\\mathbb{S}(w_j^T \\cdot f(x))$ would be small when the model training temperature $\\tau$ increases. That is, when a model is trained with high temperatures, not only the gradient direction to generate adversarial samples is not clear, but the gradient strength is also small. Both factors contribute to the robustness of the model when optimized with elevated temperatures.",
1147
+ "bbox": [
1148
+ 501,
1149
+ 256,
1150
+ 921,
1151
+ 513
1152
+ ],
1153
+ "page_idx": 5
1154
+ },
1155
+ {
1156
+ "type": "text",
1157
+ "text": "Raw Logit Analysis. With the insight from the gradient analysis on adversarial attack, we then turn to observe the logit output around the adversarial attack, as shown in Fig. 5. Each bar represents the logit value for each class, blue bars stand for the logit outputs of clean samples and orange bars are the logit outputs from adversarial samples. Models share similar characteristics in low temperatures, Fig. 5(a,b), with the logit of the target class going down while the logit of the error-prone class going up. However, for models trained with large temperatures, Fig. 5(c,d), two logits are nearly identical with a minimal amount of changes. This contrasts the robustness gains during adversarial training, where the model learns the pattern of the adversarial noise.",
1158
+ "bbox": [
1159
+ 501,
1160
+ 515,
1161
+ 921,
1162
+ 710
1163
+ ],
1164
+ "page_idx": 5
1165
+ },
1166
+ {
1167
+ "type": "text",
1168
+ "text": "Class Prototypes Analysis. To further analyze the model behavior, we investigate the relation between the encoded feature, $f(x)$ , and each class prototype, $w_{j}$ . Here, we observe the Euclidean distance and cosine similarity. Fig. 6 shows Euclidean distance and cosine similarity between one sample and all class prototypes. It is evident that as the training temperature goes up, the feature $f(x)$ tends to have an identical distance to all negative class prototypes. This indicates the model trained with high temperature is less likely to have an error-prone class, which is essential for untargeted attacks as we discuss above.",
1169
+ "bbox": [
1170
+ 501,
1171
+ 710,
1172
+ 919,
1173
+ 876
1174
+ ],
1175
+ "page_idx": 5
1176
+ },
1177
+ {
1178
+ "type": "text",
1179
+ "text": "Furthermore, to illustrate that the phenomenon shown in Fig. 6 is not limited to one or a few samples, we calculate",
1180
+ "bbox": [
1181
+ 503,
1182
+ 877,
1183
+ 921,
1184
+ 907
1185
+ ],
1186
+ "page_idx": 5
1187
+ },
1188
+ {
1189
+ "type": "image",
1190
+ "img_path": "images/aa2c1d75ea475f7ed7fe57d09e98d5cf7a44778d68ed8cf71cd6f963ded1c788.jpg",
1191
+ "image_caption": [
1192
+ "(a) $\\tau = 0.5$"
1193
+ ],
1194
+ "image_footnote": [],
1195
+ "bbox": [
1196
+ 81,
1197
+ 66,
1198
+ 285,
1199
+ 164
1200
+ ],
1201
+ "page_idx": 6
1202
+ },
1203
+ {
1204
+ "type": "image",
1205
+ "img_path": "images/3629fd12844946645ea809756339577fdd62db320db2569a63a27d59b1b2ccb3.jpg",
1206
+ "image_caption": [
1207
+ "(b) $\\tau = 1$"
1208
+ ],
1209
+ "image_footnote": [],
1210
+ "bbox": [
1211
+ 295,
1212
+ 66,
1213
+ 496,
1214
+ 164
1215
+ ],
1216
+ "page_idx": 6
1217
+ },
1218
+ {
1219
+ "type": "image",
1220
+ "img_path": "images/985add571187b528631bc450f92f52c3d27d5621f5a2e3c77dcc49a940185bb9.jpg",
1221
+ "image_caption": [
1222
+ "(c) $\\tau = 50$"
1223
+ ],
1224
+ "image_footnote": [],
1225
+ "bbox": [
1226
+ 506,
1227
+ 68,
1228
+ 702,
1229
+ 162
1230
+ ],
1231
+ "page_idx": 6
1232
+ },
1233
+ {
1234
+ "type": "image",
1235
+ "img_path": "images/9409bab48ed08788093e934a71f7064a04d3817c6578a689876c69f64f73df20.jpg",
1236
+ "image_caption": [
1237
+ "(d) $\\tau = 100$"
1238
+ ],
1239
+ "image_footnote": [],
1240
+ "bbox": [
1241
+ 718,
1242
+ 68,
1243
+ 915,
1244
+ 162
1245
+ ],
1246
+ "page_idx": 6
1247
+ },
1248
+ {
1249
+ "type": "image",
1250
+ "img_path": "images/1dd3973f76d197569c2a1c67ac82cec2c40bf762712e9dd5f72e6845341c60a5.jpg",
1251
+ "image_caption": [
1252
+ "Fig. 6: A demonstration of the Euclidean distance and cosine similarity between the encoded sample $f(x)$ and all class prototypes for one sample, with different temperature configurations. The red lines indicate the Euclidean distance while the blue lines stand for cosine similarity.",
1253
+ "(a) Euclidean Distance",
1254
+ "Fig. 7: Box plot of the variance of the Euclidean distance and cosine similarity calculated from each sample. The variances are calculated across all negative class prototypes, therefore, lower variance indicates a more uniform distribution of all negative class distances. Each box is a model trained with a different temperature, the green line shows the median value across all variances and the orange line is the mean value of all variances."
1255
+ ],
1256
+ "image_footnote": [],
1257
+ "bbox": [
1258
+ 81,
1259
+ 271,
1260
+ 272,
1261
+ 381
1262
+ ],
1263
+ "page_idx": 6
1264
+ },
1265
+ {
1266
+ "type": "image",
1267
+ "img_path": "images/3d7d168852e7eb4697cf52bb8231a1d61f6283afd5ad7a413c88cc34d324149d.jpg",
1268
+ "image_caption": [
1269
+ "(b) Cosine Similarity"
1270
+ ],
1271
+ "image_footnote": [],
1272
+ "bbox": [
1273
+ 302,
1274
+ 271,
1275
+ 493,
1276
+ 381
1277
+ ],
1278
+ "page_idx": 6
1279
+ },
1280
+ {
1281
+ "type": "text",
1282
+ "text": "the variance of Euclidean distance and cosine similarity of all negative class prototypes across all samples in CIFAR10 test set. Note that as illustrated in Fig. 6, different models have very different ranges for Euclidean distance between encoded feature and class prototypes. Therefore, we map the value of different models into the same range to make a more direct comparison. Box plots are drawn in Fig. 7 showing the overall variance results with each box being a model trained with a different temperature. We can observe a clear trend that when the temperature rises, the variance for both Euclidean distance and cosine similarity drops indicating the encoded sample, $f(x)$ , has a more similar distance to all negative class prototypes. One might notice an increase in variance when the temperature reaches some threshold. We label them as extreme temperatures, which are so large that they can adversely affect the model's convergence.",
1283
+ "bbox": [
1284
+ 73,
1285
+ 547,
1286
+ 490,
1287
+ 790
1288
+ ],
1289
+ "page_idx": 6
1290
+ },
1291
+ {
1292
+ "type": "text",
1293
+ "text": "E. Further Discussion on Adversarial Robustness",
1294
+ "text_level": 1,
1295
+ "bbox": [
1296
+ 73,
1297
+ 797,
1298
+ 413,
1299
+ 811
1300
+ ],
1301
+ "page_idx": 6
1302
+ },
1303
+ {
1304
+ "type": "text",
1305
+ "text": "Despite the model trained with high temperatures showing superb robustness against untargeted PGD attack due to its nature attribute that discovers the weakness of PGD attack, it does not hold robustness against targeted attacks. The reason behind this is straightforward. In targeted attacks, Eqn. 10 no longer holds, and the gradient is not obligated to move",
1306
+ "bbox": [
1307
+ 73,
1308
+ 816,
1309
+ 491,
1310
+ 907
1311
+ ],
1312
+ "page_idx": 6
1313
+ },
1314
+ {
1315
+ "type": "table",
1316
+ "img_path": "images/43acbbd7b7f5f5b2302a4e3bd9084b4105653fba846fd9c6634bcc9e5f9fc423.jpg",
1317
+ "table_caption": [
1318
+ "TABLE III: Preliminary experiments of adversarial training on CIFAR-10 with temperature control. The training scheme uses [18] and the model is ResNet50."
1319
+ ],
1320
+ "table_footnote": [],
1321
+ "table_body": "<table><tr><td>Temp.</td><td>τ = 0.5</td><td>τ = 1</td><td>τ = 10</td><td>τ = 30</td><td>τ = 50</td><td>τ = 70</td><td>τ = 100</td></tr><tr><td>Clean</td><td>88.98</td><td>85.67</td><td>81.71</td><td>82.62</td><td>83.75</td><td>84.28</td><td>84.27</td></tr><tr><td>PGD20</td><td>35.93</td><td>42.63</td><td>40.95</td><td>44.96</td><td>48.61</td><td>49.16</td><td>48.53</td></tr></table>",
1322
+ "bbox": [
1323
+ 506,
1324
+ 325,
1325
+ 916,
1326
+ 405
1327
+ ],
1328
+ "page_idx": 6
1329
+ },
1330
+ {
1331
+ "type": "text",
1332
+ "text": "towards all negative class prototypes with a weighted step size. Therefore, with the only source of the model robustness gained eliminated, it is naturally vulnerable to targeted attacks.",
1333
+ "bbox": [
1334
+ 501,
1335
+ 431,
1336
+ 919,
1337
+ 474
1338
+ ],
1339
+ "page_idx": 6
1340
+ },
1341
+ {
1342
+ "type": "text",
1343
+ "text": "Remark: Even though many attacks claim themselves to be untargeted attacks, they actually optimize toward one self-selected target, which we do not consider untargeted attacks under this setting. One popular example is the Difference of Logits Ratio(DLR) attack proposed by [4]. Regardless of its ability to rescale the logit,",
1344
+ "bbox": [
1345
+ 501,
1346
+ 476,
1347
+ 921,
1348
+ 566
1349
+ ],
1350
+ "page_idx": 6
1351
+ },
1352
+ {
1353
+ "type": "equation",
1354
+ "text": "\n$$\n\\mathrm {D L R} (x, y) = - \\frac {z _ {\\mathrm {y}} - \\max z _ {i}}{z _ {\\pi 1} - z _ {\\pi 3}} \\tag {11}\n$$\n",
1355
+ "text_format": "latex",
1356
+ "bbox": [
1357
+ 619,
1358
+ 571,
1359
+ 921,
1360
+ 611
1361
+ ],
1362
+ "page_idx": 6
1363
+ },
1364
+ {
1365
+ "type": "text",
1366
+ "text": "shows that the DLR loss automatically selects the class holding the largest logit other than the target class as the attack target. Therefore, during optimization, it does not need to optimize toward all negative class prototypes. A similar example also includes FAB attack [3].",
1367
+ "bbox": [
1368
+ 501,
1369
+ 614,
1370
+ 921,
1371
+ 691
1372
+ ],
1373
+ "page_idx": 6
1374
+ },
1375
+ {
1376
+ "type": "text",
1377
+ "text": "F. Extended Experiment on Adversarial Training",
1378
+ "text_level": 1,
1379
+ "bbox": [
1380
+ 504,
1381
+ 702,
1382
+ 836,
1383
+ 717
1384
+ ],
1385
+ "page_idx": 6
1386
+ },
1387
+ {
1388
+ "type": "text",
1389
+ "text": "Given that our temperature control method is used inside the Cross-Entropy Loss, it is possible to apply this method in adversarial training. Here, we do preliminary experiments on the adversarial training baseline proposed by [18] for the simplicity of its loss function. We add temperature control inside vanilla loss term forming",
1390
+ "bbox": [
1391
+ 501,
1392
+ 720,
1393
+ 919,
1394
+ 811
1395
+ ],
1396
+ "page_idx": 6
1397
+ },
1398
+ {
1399
+ "type": "equation",
1400
+ "text": "\n$$\nL _ {A T} (x, x _ {a d v}, y, F) = L _ {c e} (F (x) / \\tau , y) + L _ {c e} (F (x _ {a d v}), y), \\tag {12}\n$$\n",
1401
+ "text_format": "latex",
1402
+ "bbox": [
1403
+ 511,
1404
+ 821,
1405
+ 921,
1406
+ 838
1407
+ ],
1408
+ "page_idx": 6
1409
+ },
1410
+ {
1411
+ "type": "text",
1412
+ "text": "where $F$ is a combination of encoder and class prototypes.",
1413
+ "bbox": [
1414
+ 503,
1415
+ 847,
1416
+ 903,
1417
+ 861
1418
+ ],
1419
+ "page_idx": 6
1420
+ },
1421
+ {
1422
+ "type": "text",
1423
+ "text": "Our preliminary results are listed in Table III. We can clearly observe that model robustness increases as the temperature increases with a slight trade-off with clean accuracy, which",
1424
+ "bbox": [
1425
+ 501,
1426
+ 862,
1427
+ 921,
1428
+ 907
1429
+ ],
1430
+ "page_idx": 6
1431
+ },
1432
+ {
1433
+ "type": "text",
1434
+ "text": "confirms the possibility of combining the temperature control method with adversarial training. While further extension to other adversarial training methods is possible, it remains a complex problem for most adversarial training involves complex loss functions that may introduce terms other than the Cross-Entropy function. Also, balancing the vanilla loss term and adversarial loss term largely relies on empirical experiments. Therefore, further exploration of fitting this into other adversarial training methods falls beyond the scope of this paper.",
1435
+ "bbox": [
1436
+ 73,
1437
+ 61,
1438
+ 491,
1439
+ 213
1440
+ ],
1441
+ "page_idx": 7
1442
+ },
1443
+ {
1444
+ "type": "text",
1445
+ "text": "VI. CONCLUSION & LIMITATION",
1446
+ "text_level": 1,
1447
+ "bbox": [
1448
+ 163,
1449
+ 229,
1450
+ 401,
1451
+ 243
1452
+ ],
1453
+ "page_idx": 7
1454
+ },
1455
+ {
1456
+ "type": "text",
1457
+ "text": "In this paper, we investigate the under-explored property of temperature scaling with the softmax function on image classification tasks. By performing gradient analysis with the Cross-Entropy classification loss and executing different empirical experiments, we show that temperature scaling can be a significant factor in model performance. Further experiments reveal applying high temperatures during training introduces enormous robustness against gradient-based untargeted adversarial attacks. We hope our work raises the interest of other researchers to utilize the simple temperature scaling in the common Cross-Entropy loss.",
1458
+ "bbox": [
1459
+ 73,
1460
+ 252,
1461
+ 491,
1462
+ 417
1463
+ ],
1464
+ "page_idx": 7
1465
+ },
1466
+ {
1467
+ "type": "text",
1468
+ "text": "One limitation of this study was that we didn't report an explicit algorithm to set the best temperature values. We will work on this in our future work. One takehome note, as a hyperparameter, the tuning cost of the temperature is low as a wide range of temperatures (30 to 70) can provide improvements to the model.",
1469
+ "bbox": [
1470
+ 73,
1471
+ 419,
1472
+ 491,
1473
+ 508
1474
+ ],
1475
+ "page_idx": 7
1476
+ },
1477
+ {
1478
+ "type": "text",
1479
+ "text": "REFERENCES",
1480
+ "text_level": 1,
1481
+ "bbox": [
1482
+ 235,
1483
+ 526,
1484
+ 331,
1485
+ 539
1486
+ ],
1487
+ "page_idx": 7
1488
+ },
1489
+ {
1490
+ "type": "list",
1491
+ "sub_type": "ref_text",
1492
+ "list_items": [
1493
+ "[1] Agarwala, A., Pennington, J., Dauphin, Y.N., Schoenholz, S.S.: Temperature check: theory and practice for training models with softmax-cross-entropy losses. CoRR abs/2010.07344 (2020)",
1494
+ "[2] Carlini, N., Wagner, D.: Towards evaluating the robustness of neural networks. In: 2017 IEEE symposium on security and privacy (sp). pp. 39-57. IEEE (2017)",
1495
+ "[3] Croce, F., Hein, M.: Minimally distorted adversarial examples with a fast adaptive boundary attack. In: III, H.D., Singh, A. (eds.) Proceedings of the 37th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 119, pp. 2196-2205. PMLR (13-18 Jul 2020)",
1496
+ "[4] Croce, F., Hein, M.: Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In: III, H.D., Singh, A. (eds.) Proceedings of the 37th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 119, pp. 2206-2216. PMLR (13-18 Jul 2020)",
1497
+ "[5] Devlin, J., Chang, M., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. CoRR abs/1810.04805 (2018)",
1498
+ "[6] Engstrom, L., Ilyas, A., Athalye, A.: Evaluating and understanding the robustness of adversarial logit pairing. arXiv preprint arXiv:1807.10272 (2018)",
1499
+ "[7] Guo, C., Pleiss, G., Sun, Y., Weinberger, K.Q.: On calibration of modern neural networks. In: Precup, D., Teh, Y.W. (eds.) Proceedings of the 34th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 70, pp. 1321-1330. PMLR (06-11 Aug 2017)"
1500
+ ],
1501
+ "bbox": [
1502
+ 84,
1503
+ 551,
1504
+ 491,
1505
+ 906
1506
+ ],
1507
+ "page_idx": 7
1508
+ },
1509
+ {
1510
+ "type": "list",
1511
+ "sub_type": "ref_text",
1512
+ "list_items": [
1513
+ "[8] Hinton, G., Vinyals, O., Dean, J.: Distilling the knowledge in a neural network (2015)",
1514
+ "[9] Hou, P., Han, J., Li, X.: Improving adversarial robustness with self-paced hard-class pair reweighting. In: Proceedings of the Thirty-Seventh AAAI Conference on Artificial Intelligence (2023)",
1515
+ "[10] Kanai, S., Yamada, M., Yamaguchi, S., Takahashi, H., Ida, Y.: Constraining logits by bounded function for adversarial robustness. In: 2021 International Joint Conference on Neural Networks (IJCNN). pp. 1-8. IEEE (2021)",
1516
+ "[11] Krizhevsky, A., Hinton, G., et al.: Learning multiple layers of features from tiny images (2009)",
1517
+ "[12] Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. In: Pereira, F., Burges, C., Bottou, L., Weinberger, K. (eds.) Advances in Neural Information Processing Systems. vol. 25. Curran Associates, Inc. (2012)",
1518
+ "[13] Kull, M., Perello Nieto, M., Kangsepp, M., Silva Filho, T., Song, H., Flach, P.: Beyond temperature scaling: Obtaining well-calibrated multiclass probabilities with dirichlet calibration. In: Wallach, H., Larochelle, H., Beygelzimer, A., d'Alché-Buc, F., Fox, E., Garnett, R. (eds.) Advances in Neural Information Processing Systems. vol. 32. Curran Associates, Inc. (2019)",
1519
+ "[14] Kumar, A., Sarawagi, S., Jain, U.: Trainable calibration measures for neural networks from kernel mean embeddings. In: Dy, J., Krause, A. (eds.) Proceedings of the 35th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 80, pp. 2805-2814. PMLR (10-15 Jul 2018)",
1520
+ "[15] Lakshminarayanan, B., Pritzel, A., Blundell, C.: Simple and scalable predictive uncertainty estimation using deep ensembles. In: Guyon, I., Luxburg, U.V., Bengio, S., Wallach, H., Fergus, R., Vishwanathan, S., Garnett, R. (eds.) Advances in Neural Information Processing Systems. vol. 30. Curran Associates, Inc. (2017)",
1521
+ "[16] Le, Y., Yang, X.: Tiny imagenet visual recognition challenge. CS 231N 7(7), 3 (2015)",
1522
+ "[17] Van der Maaten, L., Hinton, G.: Visualizing data using t-sne. Journal of machine learning research 9(11) (2008)",
1523
+ "[18] Madry, A., Makelov, A., Schmidt, L., Tsipras, D., Vladu, A.: Towards deep learning models resistant to adversarial attacks. In: International Conference on Learning Representations (2018)",
1524
+ "[19] Minderer, M., Djolonga, J., Romijnders, R., Hubis, F., Zhai, X., Houlsby, N., Tran, D., Lucic, M.: Revisiting the calibration of modern neural networks. In: Ranzato, M., Beygelzimer, A., Dauphin, Y., Liang, P., Vaughan, J.W. (eds.) Advances in Neural Information Processing Systems. vol. 34, pp. 15682-15694. Curran Associates, Inc. (2021)",
1525
+ "[20] van den Oord, A., Li, Y., Vinyals, O.: Representation learning with contrastive predictive coding. CoRR abs/1807.03748 (2018)",
1526
+ "[21] Pereyra, G., Tucker, G., Chorowski, J., Kaiser, L., Hinton, G.: Regularizing neural networks by penalizing confident output distributions (2017), https://openreview.net/forum?id=HkCjNI5ex",
1527
+ "[22] Prach, B., Lampert, C.H.: Almost-orthogonal layers for efficient general-purpose lipschitz networks. In: European Conference on Computer Vision. pp. 350–365. Springer (2022)",
1528
+ "[23] Shafahi, A., Ghiasi, A., Huang, F., Goldstein, T.: Label smoothing and logit squeezing: A replacement for adversarial training? (2019)",
1529
+ "[24] Wang, F., Liu, H.: Understanding the behaviour of contrastive loss. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 2495-2504 (June 2021)",
1530
+ "[25] Wang, T., Isola, P.: Understanding contrastive representation learning through alignment and uniformity on the hypersphere. In: III, H.D., Singh, A. (eds.) Proceedings of the 37th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 119, pp. 9929-9939. PMLR (13-18 Jul 2020)"
1531
+ ],
1532
+ "bbox": [
1533
+ 506,
1534
+ 64,
1535
+ 921,
1536
+ 906
1537
+ ],
1538
+ "page_idx": 7
1539
+ },
1540
+ {
1541
+ "type": "list",
1542
+ "sub_type": "ref_text",
1543
+ "list_items": [
1544
+ "[26] Wu, Z., Xiong, Y., Yu, S.X., Lin, D.: Unsupervised feature learning via non-parametric instance discrimination. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (June 2018)",
1545
+ "[27] Zhao, H., Qi, X., Shen, X., Shi, J., Jia, J.: Icnet for real-time semantic segmentation on high-resolution images. In: Proceedings of the European Conference on Computer Vision (ECCV) (September 2018)"
1546
+ ],
1547
+ "bbox": [
1548
+ 76,
1549
+ 63,
1550
+ 491,
1551
+ 159
1552
+ ],
1553
+ "page_idx": 8
1554
+ }
1555
+ ]
data/2025/2502_20xxx/2502.20604/be121c54-f9e2-4c21-8d04-9ea04ee6c28a_model.json ADDED
@@ -0,0 +1,2055 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ [
3
+ {
4
+ "type": "title",
5
+ "bbox": [
6
+ 0.111,
7
+ 0.064,
8
+ 0.888,
9
+ 0.165
10
+ ],
11
+ "angle": 0,
12
+ "content": "Exploring the Impact of Temperature Scaling in Softmax for Classification and Adversarial Robustness"
13
+ },
14
+ {
15
+ "type": "text",
16
+ "bbox": [
17
+ 0.174,
18
+ 0.187,
19
+ 0.277,
20
+ 0.202
21
+ ],
22
+ "angle": 0,
23
+ "content": "\\(1^{\\mathrm{st}}\\) Hao Xuan"
24
+ },
25
+ {
26
+ "type": "text",
27
+ "bbox": [
28
+ 0.095,
29
+ 0.204,
30
+ 0.351,
31
+ 0.22
32
+ ],
33
+ "angle": 0,
34
+ "content": "Electrical and Computer Engineering"
35
+ },
36
+ {
37
+ "type": "text",
38
+ "bbox": [
39
+ 0.155,
40
+ 0.221,
41
+ 0.297,
42
+ 0.235
43
+ ],
44
+ "angle": 0,
45
+ "content": "University of Alberta"
46
+ },
47
+ {
48
+ "type": "text",
49
+ "bbox": [
50
+ 0.157,
51
+ 0.236,
52
+ 0.287,
53
+ 0.249
54
+ ],
55
+ "angle": 0,
56
+ "content": "Edmonton, Canada"
57
+ },
58
+ {
59
+ "type": "text",
60
+ "bbox": [
61
+ 0.16,
62
+ 0.252,
63
+ 0.29,
64
+ 0.264
65
+ ],
66
+ "angle": 0,
67
+ "content": "hxuan@ualberta.ca"
68
+ },
69
+ {
70
+ "type": "text",
71
+ "bbox": [
72
+ 0.44,
73
+ 0.187,
74
+ 0.558,
75
+ 0.204
76
+ ],
77
+ "angle": 0,
78
+ "content": "\\(2^{\\mathrm{nd}}\\) Bokai Yang"
79
+ },
80
+ {
81
+ "type": "text",
82
+ "bbox": [
83
+ 0.369,
84
+ 0.205,
85
+ 0.623,
86
+ 0.219
87
+ ],
88
+ "angle": 0,
89
+ "content": "Electrical and Computer Engineering"
90
+ },
91
+ {
92
+ "type": "text",
93
+ "bbox": [
94
+ 0.428,
95
+ 0.221,
96
+ 0.571,
97
+ 0.235
98
+ ],
99
+ "angle": 0,
100
+ "content": "University of Alberta"
101
+ },
102
+ {
103
+ "type": "text",
104
+ "bbox": [
105
+ 0.43,
106
+ 0.236,
107
+ 0.562,
108
+ 0.249
109
+ ],
110
+ "angle": 0,
111
+ "content": "Edmonton, Canada"
112
+ },
113
+ {
114
+ "type": "text",
115
+ "bbox": [
116
+ 0.43,
117
+ 0.252,
118
+ 0.563,
119
+ 0.264
120
+ ],
121
+ "angle": 0,
122
+ "content": "bokai5@ualberta.ca"
123
+ },
124
+ {
125
+ "type": "text",
126
+ "bbox": [
127
+ 0.72,
128
+ 0.187,
129
+ 0.827,
130
+ 0.204
131
+ ],
132
+ "angle": 0,
133
+ "content": "\\(3^{\\mathrm{rd}}\\) Xingyu Li"
134
+ },
135
+ {
136
+ "type": "text",
137
+ "bbox": [
138
+ 0.644,
139
+ 0.205,
140
+ 0.898,
141
+ 0.219
142
+ ],
143
+ "angle": 0,
144
+ "content": "Electrical and Computer Engineering"
145
+ },
146
+ {
147
+ "type": "text",
148
+ "bbox": [
149
+ 0.703,
150
+ 0.221,
151
+ 0.845,
152
+ 0.234
153
+ ],
154
+ "angle": 0,
155
+ "content": "University of Alberta"
156
+ },
157
+ {
158
+ "type": "text",
159
+ "bbox": [
160
+ 0.705,
161
+ 0.236,
162
+ 0.835,
163
+ 0.249
164
+ ],
165
+ "angle": 0,
166
+ "content": "Edmonton, Canada"
167
+ },
168
+ {
169
+ "type": "text",
170
+ "bbox": [
171
+ 0.704,
172
+ 0.252,
173
+ 0.837,
174
+ 0.265
175
+ ],
176
+ "angle": 0,
177
+ "content": "xingyu@ualberta.ca"
178
+ },
179
+ {
180
+ "type": "text",
181
+ "bbox": [
182
+ 0.075,
183
+ 0.32,
184
+ 0.493,
185
+ 0.587
186
+ ],
187
+ "angle": 0,
188
+ "content": "Abstract—The softmax function is a fundamental component in deep learning. This study delves into the often-overlooked parameter within the softmax function, known as \"temperature,\" providing novel insights into the practical and theoretical aspects of temperature scaling for image classification. Our empirical studies, adopting convolutional neural networks and transformers on multiple benchmark datasets, reveal that moderate temperatures generally introduce better overall performance. Through extensive experiments and rigorous theoretical analysis, we explore the role of temperature scaling in model training and unveil that temperature not only influences learning step size but also shapes the model's optimization direction. Moreover, for the first time, we discover a surprising benefit of elevated temperatures: enhanced model robustness against common corruption, natural perturbation, and non-targeted adversarial attacks like Projected Gradient Descent. We extend our discoveries to adversarial training, demonstrating that, compared to the standard softmax function with the default temperature value, higher temperatures have the potential to enhance adversarial training. The insights of this work open new avenues for improving model performance and security in deep learning applications."
189
+ },
190
+ {
191
+ "type": "title",
192
+ "bbox": [
193
+ 0.218,
194
+ 0.599,
195
+ 0.35,
196
+ 0.613
197
+ ],
198
+ "angle": 0,
199
+ "content": "I. INTRODUCTION"
200
+ },
201
+ {
202
+ "type": "text",
203
+ "bbox": [
204
+ 0.074,
205
+ 0.621,
206
+ 0.493,
207
+ 0.909
208
+ ],
209
+ "angle": 0,
210
+ "content": "Deep learning has achieved dramatic breakthroughs in recent years, excelling in tasks such as image classification [12], nature language processing (NLP) [5], and semantic segmentation [27]. A critical component of most deep learning methods is the softmax function, which normalizes a set of real values into probabilities. The generalized softmax function incorporates a parameter known as \"temperature,\" which controls the softness of the output distribution. Despite its importance in theory, the impact of temperature scaling on classification tasks has been relatively underexplored, particularly in contrast to its use in other areas such as knowledge distillation [8], contrastive learning [24], confidence calibration [21], and natural language processing. Specifically, though the temperature scaling has occasionally been applied in prior experimentation [6], [10], [23], these studies often integrate additional complex techniques such as Gaussian noise injection in [23], adversarial training in [6], [22], and innovative quadratic activation functions in [10], making it challenging to isolate and understand the specific contribution of temperature scaling to the overall system"
211
+ },
212
+ {
213
+ "type": "text",
214
+ "bbox": [
215
+ 0.503,
216
+ 0.319,
217
+ 0.922,
218
+ 0.379
219
+ ],
220
+ "angle": 0,
221
+ "content": "performance. Consequently, the specific role of temperature in classification tasks remains ambiguous. Previous study by [1] has hinted at the potential benefits of temperature scaling, but a comprehensive investigation is still lacking."
222
+ },
223
+ {
224
+ "type": "text",
225
+ "bbox": [
226
+ 0.503,
227
+ 0.38,
228
+ 0.923,
229
+ 0.546
230
+ ],
231
+ "angle": 0,
232
+ "content": "This study aims to fill this gap by conducting extensive experiments to explore the practical and theoretical aspects of temperature scaling in the softmax function for image classification. We employ convolutional neural networks (CNNs) and transformers on multiple benchmark datasets, including CIFAR-10 [11], CIFAR-100 [11], and Tiny-ImageNet [16], to systematically analyze the effects of different temperature values. Our empirical results consistently show that moderate temperatures generally improve overall performance, challenging the conventional knowledge derived from contrastive learning that low temperature facilitates representation learning."
233
+ },
234
+ {
235
+ "type": "text",
236
+ "bbox": [
237
+ 0.504,
238
+ 0.546,
239
+ 0.923,
240
+ 0.666
241
+ ],
242
+ "angle": 0,
243
+ "content": "We also delve into the theoretical underpinnings of temperature scaling in model training. Our analysis reveals that temperature not only influences the learning step size but also shapes the model's optimization direction. Specifically, lower temperatures focus the model's learning on error-prone classes, while higher temperatures promote a more balanced learning across all classes. This insight is crucial for understanding the nuanced effects of temperature scaling on model optimization."
244
+ },
245
+ {
246
+ "type": "text",
247
+ "bbox": [
248
+ 0.504,
249
+ 0.666,
250
+ 0.922,
251
+ 0.801
252
+ ],
253
+ "angle": 0,
254
+ "content": "Furthermore, we uncover a surprising benefit of elevated temperatures: enhanced model robustness against common corruptions, natural perturbations, and non-targeted adversarial attacks, such as Projected Gradient Descent (PGD). We extend our investigation to adversarial training introduced by [18], demonstrating that higher temperatures can potentially enhance the robustness of models trained with adversarial methods compared to those using the standard softmax function with the default temperature."
255
+ },
256
+ {
257
+ "type": "text",
258
+ "bbox": [
259
+ 0.504,
260
+ 0.801,
261
+ 0.922,
262
+ 0.86
263
+ ],
264
+ "angle": 0,
265
+ "content": "In summary, this work provides new perspectives on the practical applications and theoretical implications of temperature scaling in the softmax function. Our contributions can be summarized as follows:"
266
+ },
267
+ {
268
+ "type": "text",
269
+ "bbox": [
270
+ 0.521,
271
+ 0.863,
272
+ 0.922,
273
+ 0.908
274
+ ],
275
+ "angle": 0,
276
+ "content": "- We conduct extensive experiments demonstrating that applying a reasonably large temperature during model training improves overall performance."
277
+ },
278
+ {
279
+ "type": "aside_text",
280
+ "bbox": [
281
+ 0.023,
282
+ 0.266,
283
+ 0.058,
284
+ 0.708
285
+ ],
286
+ "angle": 270,
287
+ "content": "arXiv:2502.20604v1 [cs.LG] 28 Feb 2025"
288
+ }
289
+ ],
290
+ [
291
+ {
292
+ "type": "text",
293
+ "bbox": [
294
+ 0.092,
295
+ 0.063,
296
+ 0.493,
297
+ 0.107
298
+ ],
299
+ "angle": 0,
300
+ "content": "- We discover that models trained with elevated temperatures exhibit enhanced robustness against gradient-based untargeted adversarial attacks."
301
+ },
302
+ {
303
+ "type": "text",
304
+ "bbox": [
305
+ 0.092,
306
+ 0.108,
307
+ 0.493,
308
+ 0.154
309
+ ],
310
+ "angle": 0,
311
+ "content": "- Additionally, we show the potential of integrating temperature control into adversarial training to boost model performance and security in deep learning applications."
312
+ },
313
+ {
314
+ "type": "list",
315
+ "bbox": [
316
+ 0.092,
317
+ 0.063,
318
+ 0.493,
319
+ 0.154
320
+ ],
321
+ "angle": 0,
322
+ "content": null
323
+ },
324
+ {
325
+ "type": "title",
326
+ "bbox": [
327
+ 0.206,
328
+ 0.163,
329
+ 0.362,
330
+ 0.177
331
+ ],
332
+ "angle": 0,
333
+ "content": "II. RELATED WORKS"
334
+ },
335
+ {
336
+ "type": "text",
337
+ "bbox": [
338
+ 0.074,
339
+ 0.183,
340
+ 0.492,
341
+ 0.288
342
+ ],
343
+ "angle": 0,
344
+ "content": "The softmax function has been a longstanding component of neural networks, usually used to normalize a vector of real values into probabilities. Modulating the temperature scaling factor within the softmax function allows for reshaping the probability distribution. This section provides a concise overview of the application of temperature scaling in various computational tasks."
345
+ },
346
+ {
347
+ "type": "text",
348
+ "bbox": [
349
+ 0.074,
350
+ 0.289,
351
+ 0.493,
352
+ 0.515
353
+ ],
354
+ "angle": 0,
355
+ "content": "Knowledge Distillation proposed by [8] is one innovative way to transfer knowledge from a teacher model to a student model. Temperature is utilized during training to control both the student and teacher model's output. The author argues that lower temperatures make the distillation assign less weight to logits that are much smaller than the average. Conversely, employing larger temperatures softens the probability distribution and pays more attention to the unimportant part of the logit. Larger temperatures are proven to be beneficial in the distillation process since the hard-target term already ensures the dominant part of the logit (target class) is correct. By focusing on the remaining logit, the student model can capture more fine-grained information from the teacher model. Note that despite various temperatures used during training, it is set to 1 when the model is deployed."
356
+ },
357
+ {
358
+ "type": "text",
359
+ "bbox": [
360
+ 0.074,
361
+ 0.516,
362
+ 0.493,
363
+ 0.727
364
+ ],
365
+ "angle": 0,
366
+ "content": "Model Confidence Calibration usually utilizes temperature scaling to address the over-confident issue in deep learning [7], [15], [19]. It centers on estimating predictive uncertainty to match its expected accuracy [13], [14]. Despite multiple generic calibration methods being proposed, temperature scaling proposed by [7] remains a baseline method for being simple, effective and able to apply to various cases without major expense. The motivation behind temperature scaling is simple, since the goal is to control the network's confidence to match its accuracy, applying temperature to the softmax function that can directly modify the probability distribution seems a perfect fit for the problem. During training, a validation set is needed to find the ideal temperature parameter for the network, and the same temperature is used when deployed."
367
+ },
368
+ {
369
+ "type": "text",
370
+ "bbox": [
371
+ 0.074,
372
+ 0.727,
373
+ 0.493,
374
+ 0.909
375
+ ],
376
+ "angle": 0,
377
+ "content": "Contrastive Learning is one paradigm for unsupervised learning [20], [26]. To achieve a powerful feature encoder, it utilizes contrastive loss to pull similar samples close and push negative pairs away in the latent space. Although the temperature has long existed as a hyper-parameter in contrastive loss, its actual mechanism is just understudied recently. [24] analyze the contrastive loss closely and find that as the temperature decreases, the distribution of the contrastive loss becomes sharper, which applies larger penalties to samples similar to the anchor data. Also, uniformity of feature distribution increases, indicating the embedding feature distribution aligns with a uniform distribution better [25]."
378
+ },
379
+ {
380
+ "type": "text",
381
+ "bbox": [
382
+ 0.503,
383
+ 0.063,
384
+ 0.925,
385
+ 0.472
386
+ ],
387
+ "angle": 0,
388
+ "content": "Temperature Scaling in Image Classification has occasionally been utilized in the experimental sections of prior studies, yet focused investigations on this subject remain limited. For example, previous studies aiming to improve adversarial robustness have utilized temperature scaling to adjust logits within their experimentation [6], [10], [23]. However, these studies often integrate additional complex techniques such as Gaussian noise injection [23], adversarial training [6], [22], and innovative quadratic activation functions [10], making it challenging to isolate and understand the specific contribution of temperature scaling to the overall system performance. In contrast, our study narrows its focus to investigating the direct impact of temperature scaling applied through the softmax function on model optimization processes. Among the few related works, \"The Temperature Check\" by [1] is notably relevant to our discussion. It mainly explores the dynamics of model training by considering factors such as temperature, learning rate, and time, and presents an empirical finding that a model's generalization performance is significantly influenced by temperature settings. While our observations align with these findings, our research approaches the issue from a different perspective of gradient analysis. Specifically, we delve into how temperature scaling impacts model optimization process. Furthermore, our study broadens the scope of inquiry by assessing the effect of temperature scaling on a model's resilience to common corruptions and adversarial attacks, thereby adding a new dimension to the existing research."
389
+ },
390
+ {
391
+ "type": "title",
392
+ "bbox": [
393
+ 0.646,
394
+ 0.479,
395
+ 0.783,
396
+ 0.492
397
+ ],
398
+ "angle": 0,
399
+ "content": "III. PRELIMINARY"
400
+ },
401
+ {
402
+ "type": "title",
403
+ "bbox": [
404
+ 0.504,
405
+ 0.498,
406
+ 0.651,
407
+ 0.512
408
+ ],
409
+ "angle": 0,
410
+ "content": "A. Softmax Function"
411
+ },
412
+ {
413
+ "type": "text",
414
+ "bbox": [
415
+ 0.504,
416
+ 0.517,
417
+ 0.922,
418
+ 0.56
419
+ ],
420
+ "angle": 0,
421
+ "content": "Given a set of real numbers, \\( X = \\{x_{1},\\dots,x_{N}\\} \\), the generalized softmax function can be used to normalize \\( X \\) into a probability distribution."
422
+ },
423
+ {
424
+ "type": "equation",
425
+ "bbox": [
426
+ 0.637,
427
+ 0.557,
428
+ 0.922,
429
+ 0.589
430
+ ],
431
+ "angle": 0,
432
+ "content": "\\[\n\\mathbb {S} (X) = \\frac {\\exp (X / \\tau)}{\\sum_ {i} \\exp \\left(x _ {i} / \\tau\\right)}, \\tag {1}\n\\]"
433
+ },
434
+ {
435
+ "type": "text",
436
+ "bbox": [
437
+ 0.503,
438
+ 0.594,
439
+ 0.922,
440
+ 0.714
441
+ ],
442
+ "angle": 0,
443
+ "content": "where \\(\\mathbb{S}\\) represents the softmax function and \\(\\tau\\) is the temperature scaling factor. The temperature \\(\\tau\\) controls the smoothness (softness) of the probability it produces. Specifically, when \\(\\tau \\rightarrow \\infty\\), the output tends toward a uniform distribution; while when \\(\\tau = 0\\), the softmax function assigns a probability of 1 to the element with the highest value and a probability of 0 to the rest. The standard (unit) softmax function, with \\(\\tau = 1\\), is widely used in conventional classification tasks."
444
+ },
445
+ {
446
+ "type": "title",
447
+ "bbox": [
448
+ 0.505,
449
+ 0.723,
450
+ 0.753,
451
+ 0.737
452
+ ],
453
+ "angle": 0,
454
+ "content": "B. Problem Definition and Notation"
455
+ },
456
+ {
457
+ "type": "text",
458
+ "bbox": [
459
+ 0.503,
460
+ 0.742,
461
+ 0.922,
462
+ 0.908
463
+ ],
464
+ "angle": 0,
465
+ "content": "We consider multi-category classification in this study, where paired training data \\(\\{\\mathcal{X},\\mathcal{Y}\\} = \\{(x,y)|x\\in \\mathbb{R}^{H\\times L\\times N},y\\in\\) \\(\\mathbb{R}^{1\\times M}\\}\\) are drawn from a data distribution \\(\\mathcal{D}\\). Here, \\(H,L,N\\) are the dimension of a sample \\(x,M\\) is the number of categories, and \\(y\\) is a one-hot vector indicating the class of the input \\(x\\). A classifier, \\(\\mathcal{C}:\\mathcal{X}\\to \\mathcal{Y}\\), is a function predicting the label \\(y\\) for a given data \\(x\\). That is \\(C(x) = y\\). In the canonical classification setting, a neural network classifier, \\(\\mathcal{C} = (f,W)\\), is usually composed of a feature extractor \\(f\\) parameterized by \\(\\theta\\) and a weight matrix \\(W\\). \\(f\\) is a function mapping the input \\(x\\) to a real-valued vector \\(f(x)\\) in the model's penultimate layer and"
466
+ }
467
+ ],
468
+ [
469
+ {
470
+ "type": "text",
471
+ "bbox": [
472
+ 0.075,
473
+ 0.063,
474
+ 0.493,
475
+ 0.108
476
+ ],
477
+ "angle": 0,
478
+ "content": "\\(W = (w_{1},\\dots,w_{M})\\) represents the coefficients of the last linear layer before the softmax layer. So the likelihood probability of data \\(x\\) corresponding to the \\(M\\) categories can be formulated as"
479
+ },
480
+ {
481
+ "type": "equation",
482
+ "bbox": [
483
+ 0.2,
484
+ 0.115,
485
+ 0.493,
486
+ 0.133
487
+ ],
488
+ "angle": 0,
489
+ "content": "\\[\n\\hat {y} = C (x) = \\mathbb {S} \\left(W ^ {T} f (x)\\right). \\tag {2}\n\\]"
490
+ },
491
+ {
492
+ "type": "text",
493
+ "bbox": [
494
+ 0.075,
495
+ 0.141,
496
+ 0.492,
497
+ 0.201
498
+ ],
499
+ "angle": 0,
500
+ "content": "Note that each vector \\( w_{i} \\) in matrix \\( W \\) can be considered as the prototype of class \\( i \\) and the production \\( W^{T}f(x) \\) in Eqn. 2 quantifies the similarity between the feature \\( f(x) \\) and different class-prototypes."
501
+ },
502
+ {
503
+ "type": "text",
504
+ "bbox": [
505
+ 0.075,
506
+ 0.201,
507
+ 0.492,
508
+ 0.232
509
+ ],
510
+ "angle": 0,
511
+ "content": "During training, the model \\( C = (f, W) \\) is optimized to minimize a specific loss, usually a Cross-Entropy (CE) loss."
512
+ },
513
+ {
514
+ "type": "equation",
515
+ "bbox": [
516
+ 0.1,
517
+ 0.237,
518
+ 0.493,
519
+ 0.279
520
+ ],
521
+ "angle": 0,
522
+ "content": "\\[\nL _ {c e} (x) = - y \\log \\hat {y} = - \\log \\left[ \\frac {\\exp \\left(w _ {i} ^ {T} \\cdot f (x) / \\tau\\right)}{\\sum_ {j = 1} ^ {N} \\exp \\left(w _ {j} ^ {T} \\cdot f (x) / \\tau\\right)} \\right] \\tag {3}\n\\]"
523
+ },
524
+ {
525
+ "type": "text",
526
+ "bbox": [
527
+ 0.075,
528
+ 0.284,
529
+ 0.492,
530
+ 0.315
531
+ ],
532
+ "angle": 0,
533
+ "content": "Though \\(\\tau = 1\\) is the default setting in classification tasks, we preserve \\(\\tau\\) in the Eqn.s to facilitate theoretical analysis."
534
+ },
535
+ {
536
+ "type": "title",
537
+ "bbox": [
538
+ 0.19,
539
+ 0.324,
540
+ 0.376,
541
+ 0.337
542
+ ],
543
+ "angle": 0,
544
+ "content": "IV. GRADIENT ANALYSIS"
545
+ },
546
+ {
547
+ "type": "text",
548
+ "bbox": [
549
+ 0.075,
550
+ 0.343,
551
+ 0.493,
552
+ 0.462
553
+ ],
554
+ "angle": 0,
555
+ "content": "To investigate the impact of temperature scaling factors for model optimization in classification tasks, we calculate the loss gradients with respect to the training parameters in the model. Specifically, given a data sample \\( x \\) from the \\( i^{th} \\) category, we refer to \\( w_{i} \\) as the positive class prototype and the rest, \\( w_{j} \\) for \\( j \\neq i \\), as the negative class prototypes. Then the gradients with respect to the positive class prototype, negative class prototypes, and the encoder are:"
556
+ },
557
+ {
558
+ "type": "equation",
559
+ "bbox": [
560
+ 0.077,
561
+ 0.466,
562
+ 0.493,
563
+ 0.512
564
+ ],
565
+ "angle": 0,
566
+ "content": "\\[\n\\frac {\\partial L _ {c e} (x)}{\\partial w _ {i}} = \\frac {1}{\\tau} \\left[ \\mathbb {S} \\left(w _ {i} ^ {T} \\cdot f (x) / \\tau\\right) - 1 \\right] f (x) = \\frac {1}{\\tau} \\left[ P _ {i} ^ {\\tau} (x) - 1 \\right] f (x), \\tag {4}\n\\]"
567
+ },
568
+ {
569
+ "type": "equation",
570
+ "bbox": [
571
+ 0.116,
572
+ 0.518,
573
+ 0.493,
574
+ 0.552
575
+ ],
576
+ "angle": 0,
577
+ "content": "\\[\n\\frac {\\partial L _ {c e} (x)}{\\partial w _ {j}} = \\frac {1}{\\tau} \\mathbb {S} \\left(w _ {j} ^ {T} \\cdot f (x) / \\tau\\right) f (x) = \\frac {1}{\\tau} P _ {j} ^ {\\tau} (x) f (x), \\tag {5}\n\\]"
578
+ },
579
+ {
580
+ "type": "equation",
581
+ "bbox": [
582
+ 0.125,
583
+ 0.558,
584
+ 0.493,
585
+ 0.6
586
+ ],
587
+ "angle": 0,
588
+ "content": "\\[\n\\frac {\\partial L _ {c e} (x)}{\\partial f} = \\frac {1}{\\tau} \\left[ \\sum_ {j \\neq i} w _ {k} P _ {j} ^ {\\tau} (x) - w _ {i} \\left[ 1 - P _ {i} ^ {\\tau} (x) \\right] \\right]. \\tag {6}\n\\]"
589
+ },
590
+ {
591
+ "type": "text",
592
+ "bbox": [
593
+ 0.075,
594
+ 0.606,
595
+ 0.493,
596
+ 0.726
597
+ ],
598
+ "angle": 0,
599
+ "content": "Learning rate: In Eqn. 4, 5, 6, since \\(0 < P_{j}^{\\tau}(x) < 1\\), the actual learning rate is inversely proportional to the temperature \\(\\tau\\). That is, larger temperatures lead to a reduced gradient step in model update, while smaller temperatures not only increase the gradient step. Furthermore, when the sample \\(x\\) is misclassified, smaller temperatures give a further boost on updating \\(w_{i}\\) and \\(w_{j}\\) for \\(j = \\arg \\max (P_{j}^{\\tau}(x)f(x))\\), because smaller temperatures in softmax function lead to shaper distributions."
600
+ },
601
+ {
602
+ "type": "text",
603
+ "bbox": [
604
+ 0.074,
605
+ 0.727,
606
+ 0.493,
607
+ 0.909
608
+ ],
609
+ "angle": 0,
610
+ "content": "Optimization direction: From Eqn. 4, the positive class prototype \\( w_{i} \\) is updated toward \\( f(x) \\) in the latent space. In contrast, the negative prototypes \\( w_{j} \\) move away from the direction of \\( f(x) \\) according to Eqn. 5. The optimization direction of \\( f(x) \\) is a weighted sum of all class prototypes, as shown in Eqn. 6. The fundamental optimization policy is to update the trainable parameters of the encoder in such a way that \\( f(x) \\) moves closer to the positive class prototype and farther away from the negative class prototypes in the latent space. However, when we take the temperature parameter into account, we find that temperature has an impact on the update direction of \\( f(x) \\). Specifically, when the temperature is low,"
611
+ },
612
+ {
613
+ "type": "image",
614
+ "bbox": [
615
+ 0.518,
616
+ 0.066,
617
+ 0.707,
618
+ 0.187
619
+ ],
620
+ "angle": 0,
621
+ "content": null
622
+ },
623
+ {
624
+ "type": "image_caption",
625
+ "bbox": [
626
+ 0.573,
627
+ 0.189,
628
+ 0.649,
629
+ 0.202
630
+ ],
631
+ "angle": 0,
632
+ "content": "(a) Small \\(\\tau\\)"
633
+ },
634
+ {
635
+ "type": "image",
636
+ "bbox": [
637
+ 0.721,
638
+ 0.066,
639
+ 0.914,
640
+ 0.188
641
+ ],
642
+ "angle": 0,
643
+ "content": null
644
+ },
645
+ {
646
+ "type": "image_caption",
647
+ "bbox": [
648
+ 0.779,
649
+ 0.189,
650
+ 0.856,
651
+ 0.202
652
+ ],
653
+ "angle": 0,
654
+ "content": "(b) Large \\(\\tau\\)"
655
+ },
656
+ {
657
+ "type": "image_caption",
658
+ "bbox": [
659
+ 0.503,
660
+ 0.211,
661
+ 0.922,
662
+ 0.348
663
+ ],
664
+ "angle": 0,
665
+ "content": "Fig. 1: Demonstration of the model optimization direction with different temperatures. \\( f(x) \\) is the latent code of a data sample from category 3. Since \\( f(x) \\) is close to the negative class prototype \\( w_{1} \\), the CE loss with respect to the encoder \\( f \\) yields a large gradient toward the groundtruth \\( w_{3} \\). However, with different temperature factors, the gradients associated with the negative classes are different: low temperature makes the update more biased by the hard class (a), while an elevated temperature leads to more equalized gradients (b)."
666
+ },
667
+ {
668
+ "type": "text",
669
+ "bbox": [
670
+ 0.503,
671
+ 0.378,
672
+ 0.922,
673
+ 0.604
674
+ ],
675
+ "angle": 0,
676
+ "content": "the probability distribution produced by the softmax function is sharper, leading to significant differences in probability values among different prototypes. Consequently, the update direction of the encoder \\( f \\) is predominantly influenced by the class prototype with the highest probability and the positive class prototype (if they are different). Fig. 1(a) visualizes the bias toward the hard class in model optimization, where \\( f(x) \\) is the latent code of a data sample from category 3. In contrast, when the temperature is high, the differences in probability values among different prototypes are relatively smaller, and the encoder \\( f \\) updates with a mixture of all class prototype directions, as demonstrated in Fig. 1(b). In other words, a low temperature makes the model focus on learning hard-class pairs, while a high temperature de-biases the influence among different classes for a balanced learning."
677
+ },
678
+ {
679
+ "type": "text",
680
+ "bbox": [
681
+ 0.504,
682
+ 0.606,
683
+ 0.922,
684
+ 0.636
685
+ ],
686
+ "angle": 0,
687
+ "content": "Moreover, when considering all the samples in one batch, the compound gradient of all \\(N\\) samples are"
688
+ },
689
+ {
690
+ "type": "equation",
691
+ "bbox": [
692
+ 0.565,
693
+ 0.648,
694
+ 0.922,
695
+ 0.689
696
+ ],
697
+ "angle": 0,
698
+ "content": "\\[\n\\sum_ {n = 1} ^ {N} \\frac {\\partial L _ {c e} \\left(x _ {n}\\right)}{\\partial w _ {i}} = - \\frac {1}{\\tau} \\sum_ {n = 1} ^ {N} f \\left(x _ {n}\\right) \\left[ 1 - P _ {i} ^ {\\tau} \\left(x _ {n}\\right) \\right], \\tag {7}\n\\]"
699
+ },
700
+ {
701
+ "type": "equation",
702
+ "bbox": [
703
+ 0.591,
704
+ 0.707,
705
+ 0.922,
706
+ 0.749
707
+ ],
708
+ "angle": 0,
709
+ "content": "\\[\n\\sum_ {n = 1} ^ {N} \\frac {\\partial L _ {c e} (x _ {n})}{\\partial w _ {k}} = \\frac {1}{\\tau} \\sum_ {n = 1} ^ {N} f (x _ {n}) P _ {k} ^ {\\tau} (x _ {n}), \\tag {8}\n\\]"
710
+ },
711
+ {
712
+ "type": "equation",
713
+ "bbox": [
714
+ 0.517,
715
+ 0.765,
716
+ 0.922,
717
+ 0.821
718
+ ],
719
+ "angle": 0,
720
+ "content": "\\[\n\\sum_ {n = 1} ^ {N} \\frac {\\partial L _ {c e} (x _ {n})}{\\partial f} = \\frac {1}{\\tau} \\sum_ {n = 1} ^ {N} \\left[ \\sum_ {k \\neq i} w _ {k} P _ {k} ^ {\\tau} (x _ {n}) - w _ {i} \\left[ 1 - P _ {i} ^ {\\tau} (x _ {n}) \\right] \\right]. \\tag {9}\n\\]"
721
+ },
722
+ {
723
+ "type": "text",
724
+ "bbox": [
725
+ 0.503,
726
+ 0.832,
727
+ 0.922,
728
+ 0.908
729
+ ],
730
+ "angle": 0,
731
+ "content": "Similar to the single sample case, when optimizing in a whole batch, with small temperatures, the model focuses on learning misclassified samples (i.e. hard samples), whereas higher temperatures help de-bias the update direction and distribute similar weight to all samples."
732
+ }
733
+ ],
734
+ [
735
+ {
736
+ "type": "image",
737
+ "bbox": [
738
+ 0.096,
739
+ 0.068,
740
+ 0.331,
741
+ 0.205
742
+ ],
743
+ "angle": 0,
744
+ "content": null
745
+ },
746
+ {
747
+ "type": "image_caption",
748
+ "bbox": [
749
+ 0.165,
750
+ 0.217,
751
+ 0.261,
752
+ 0.234
753
+ ],
754
+ "angle": 0,
755
+ "content": "(a) \\(\\tau = 0.5\\)"
756
+ },
757
+ {
758
+ "type": "image",
759
+ "bbox": [
760
+ 0.373,
761
+ 0.068,
762
+ 0.615,
763
+ 0.204
764
+ ],
765
+ "angle": 0,
766
+ "content": null
767
+ },
768
+ {
769
+ "type": "image_caption",
770
+ "bbox": [
771
+ 0.458,
772
+ 0.217,
773
+ 0.538,
774
+ 0.234
775
+ ],
776
+ "angle": 0,
777
+ "content": "(b) \\(\\tau = 1\\)"
778
+ },
779
+ {
780
+ "type": "image",
781
+ "bbox": [
782
+ 0.664,
783
+ 0.068,
784
+ 0.898,
785
+ 0.204
786
+ ],
787
+ "angle": 0,
788
+ "content": null
789
+ },
790
+ {
791
+ "type": "image_caption",
792
+ "bbox": [
793
+ 0.739,
794
+ 0.217,
795
+ 0.828,
796
+ 0.234
797
+ ],
798
+ "angle": 0,
799
+ "content": "(c) \\(\\tau = 50\\)"
800
+ },
801
+ {
802
+ "type": "image_caption",
803
+ "bbox": [
804
+ 0.074,
805
+ 0.243,
806
+ 0.921,
807
+ 0.259
808
+ ],
809
+ "angle": 0,
810
+ "content": "Fig. 2: T-SNE [17] visualization of the CIFAR10 sample distribution after the ResNet50 encoder with different temperatures."
811
+ },
812
+ {
813
+ "type": "image",
814
+ "bbox": [
815
+ 0.091,
816
+ 0.279,
817
+ 0.331,
818
+ 0.415
819
+ ],
820
+ "angle": 0,
821
+ "content": null
822
+ },
823
+ {
824
+ "type": "image_caption",
825
+ "bbox": [
826
+ 0.166,
827
+ 0.428,
828
+ 0.262,
829
+ 0.445
830
+ ],
831
+ "angle": 0,
832
+ "content": "(a) \\(\\tau = 0.5\\)"
833
+ },
834
+ {
835
+ "type": "image",
836
+ "bbox": [
837
+ 0.377,
838
+ 0.279,
839
+ 0.615,
840
+ 0.414
841
+ ],
842
+ "angle": 0,
843
+ "content": null
844
+ },
845
+ {
846
+ "type": "image_caption",
847
+ "bbox": [
848
+ 0.458,
849
+ 0.428,
850
+ 0.538,
851
+ 0.445
852
+ ],
853
+ "angle": 0,
854
+ "content": "(b) \\(\\tau = 1\\)"
855
+ },
856
+ {
857
+ "type": "image",
858
+ "bbox": [
859
+ 0.665,
860
+ 0.279,
861
+ 0.9,
862
+ 0.414
863
+ ],
864
+ "angle": 0,
865
+ "content": null
866
+ },
867
+ {
868
+ "type": "image_caption",
869
+ "bbox": [
870
+ 0.739,
871
+ 0.428,
872
+ 0.828,
873
+ 0.444
874
+ ],
875
+ "angle": 0,
876
+ "content": "(c) \\(\\tau = 50\\)"
877
+ },
878
+ {
879
+ "type": "image_caption",
880
+ "bbox": [
881
+ 0.092,
882
+ 0.455,
883
+ 0.901,
884
+ 0.471
885
+ ],
886
+ "angle": 0,
887
+ "content": "Fig. 3: T-SNE [17] visualization of the CIFAR10 sample distribution after the VIT encoder with different temperatures."
888
+ },
889
+ {
890
+ "type": "title",
891
+ "bbox": [
892
+ 0.128,
893
+ 0.498,
894
+ 0.438,
895
+ 0.512
896
+ ],
897
+ "angle": 0,
898
+ "content": "V. EMPIRICAL ANALYSIS AND DISCUSSION"
899
+ },
900
+ {
901
+ "type": "text",
902
+ "bbox": [
903
+ 0.074,
904
+ 0.518,
905
+ 0.492,
906
+ 0.669
907
+ ],
908
+ "angle": 0,
909
+ "content": "As discussed in Section 4, applying a small temperature encourages a model to learn more about hard (misclassified) samples and hard (error-prone class) classes. A low temperature, however, leads to more equitable learning across different classes and data points. Theoretically, both approaches to optimize feature distribution sound reasonable, with low temperatures focusing on weaker classes and high temperatures decreasing inequality across all negative classes. We argue that which optimization strategy is better for classification tasks remains an empirical problem."
910
+ },
911
+ {
912
+ "type": "title",
913
+ "bbox": [
914
+ 0.075,
915
+ 0.678,
916
+ 0.229,
917
+ 0.693
918
+ ],
919
+ "angle": 0,
920
+ "content": "A. Experiment Setting"
921
+ },
922
+ {
923
+ "type": "text",
924
+ "bbox": [
925
+ 0.074,
926
+ 0.697,
927
+ 0.492,
928
+ 0.832
929
+ ],
930
+ "angle": 0,
931
+ "content": "We conduct image classification on multiple benchmarks (i.e. CIFAR10, CIFAR100, and Tiny-ImageNet) and their extended Common Corruptions and Perturbations sets (i.e. CIFAR10-C, CIFAR100-C, and Tiny-ImageNet-C with corruption strength being 3) to investigate the impact of temperature scaling. In addition, we also evaluate the model's robustness against adversarial attacks such as PDG20 [18] and C&W [2]. Both attacks are bounded by the \\( l_{\\infty} \\) box with the same maximum perturbation \\( \\epsilon = 8 / 255 \\)."
932
+ },
933
+ {
934
+ "type": "text",
935
+ "bbox": [
936
+ 0.075,
937
+ 0.833,
938
+ 0.492,
939
+ 0.909
940
+ ],
941
+ "angle": 0,
942
+ "content": "To get a comprehensive evaluation, we set \\(\\tau \\in \\{0.1, 0.5, 1, 10, 30, 50, 70, 100\\}\\). Unless stated otherwise, we takes ResNet50 and VIT-small-patch16-224 as the CNN and transformer backbones, respectively. The ResNet50 is trained from scratch, with SGD optimizer and learning rate setting to"
943
+ },
944
+ {
945
+ "type": "text",
946
+ "bbox": [
947
+ 0.504,
948
+ 0.498,
949
+ 0.922,
950
+ 0.558
951
+ ],
952
+ "angle": 0,
953
+ "content": "0.1. We also utilize the Cosine Annealing scheduler to better train the model. The transformer is pretrained on ImageNet-21K and finetuned on the target dataset using Adam optimizer. All experiments run on one RTX3090."
954
+ },
955
+ {
956
+ "type": "text",
957
+ "bbox": [
958
+ 0.504,
959
+ 0.559,
960
+ 0.922,
961
+ 0.65
962
+ ],
963
+ "angle": 0,
964
+ "content": "To clarify, the temperature scaling only involves in model training in this study, but not model evaluation and attacks. All empirical evaluation and adversarial sample generation by PGD and C&W are based on the standard cross entropy, i.e. \\(\\tau = 1\\). Thus, attack gradients are not attenuated, reflecting model's true sensitivity to data perturbation."
965
+ },
966
+ {
967
+ "type": "title",
968
+ "bbox": [
969
+ 0.505,
970
+ 0.661,
971
+ 0.661,
972
+ 0.675
973
+ ],
974
+ "angle": 0,
975
+ "content": "B. Experiment Results"
976
+ },
977
+ {
978
+ "type": "text",
979
+ "bbox": [
980
+ 0.503,
981
+ 0.681,
982
+ 0.922,
983
+ 0.908
984
+ ],
985
+ "angle": 0,
986
+ "content": "The quantitative results on CNN and Transformer are summarized in Table I and Table II, respectively. For the CNN model, ResNet50, training from scratch, the standard accuracy increases with the temperature increase. Furthermore, CNN models trained at elevated temperatures show more robustness against naturally corrected images. We believe that such improvements are majorly attributed to better model optimization with leveraged temperature. For the transformer finetuned on the target set, the standard accuracy and robustness against natural corruptions and perturbations is quite stable. We hypothesize that such stable performance is due to the fact that ViT has already been pre-trained on ImageNet and has reached a relatively high-quality state. Additionally, we observed that the model's adversarial robustness gradually improves with increasing temperature."
987
+ }
988
+ ],
989
+ [
990
+ {
991
+ "type": "table_caption",
992
+ "bbox": [
993
+ 0.074,
994
+ 0.058,
995
+ 0.924,
996
+ 0.102
997
+ ],
998
+ "angle": 0,
999
+ "content": "TABLE I: Model performance and Robustness against Common Corruptions and Adversarial attacks (%) under different temperatures with ResNet50 trained from scratch. -C in the table represents the corresponding Common Corruptions and Perturbations set."
1000
+ },
1001
+ {
1002
+ "type": "table",
1003
+ "bbox": [
1004
+ 0.194,
1005
+ 0.11,
1006
+ 0.803,
1007
+ 0.316
1008
+ ],
1009
+ "angle": 0,
1010
+ "content": "<table><tr><td rowspan=\"2\">Temp.</td><td colspan=\"4\">CIFAR10</td><td colspan=\"4\">CIFAR100</td><td colspan=\"4\">Tiny-Imagenet</td></tr><tr><td>Clean</td><td>-C</td><td>PGD20</td><td>C&amp;W</td><td>Clean</td><td>-C</td><td>PGD20</td><td>C&amp;W</td><td>Clean</td><td>-C</td><td>PGD20</td><td>C&amp;W</td></tr><tr><td>τ = 0.1</td><td>90.05</td><td>73.31</td><td>0</td><td>27.79</td><td>70.39</td><td>44.52</td><td>0</td><td>14.32</td><td>54.53</td><td>12.63</td><td>0</td><td>23.17</td></tr><tr><td>τ = 0.5</td><td>94.17</td><td>72.51</td><td>0</td><td>16.03</td><td>74.79</td><td>45.41</td><td>0</td><td>8.44</td><td>61.07</td><td>18.55</td><td>0</td><td>19.44</td></tr><tr><td>τ = 1</td><td>94.26</td><td>72.53</td><td>0</td><td>19.19</td><td>74.58</td><td>46.47</td><td>0</td><td>11.26</td><td>62.93</td><td>18.66</td><td>0</td><td>19.09</td></tr><tr><td>τ = 10</td><td>95.41</td><td>73.94</td><td>0.56</td><td>39.79</td><td>78.21</td><td>50.67</td><td>0.29</td><td>15.33</td><td>64.70</td><td>21.66</td><td>2.59</td><td>23.88</td></tr><tr><td>τ = 30</td><td>95.26</td><td>74.93</td><td>91.09</td><td>43.35</td><td>78.27</td><td>50.17</td><td>68.47</td><td>18.81</td><td>63.60</td><td>21.30</td><td>49.45</td><td>26.50</td></tr><tr><td>τ = 50</td><td>94.92</td><td>74.44</td><td>93.04</td><td>36.13</td><td>77.97</td><td>49.87</td><td>72.92</td><td>20.50</td><td>62.85</td><td>20.40</td><td>54.95</td><td>28.68</td></tr><tr><td>τ = 70</td><td>95.05</td><td>74.26</td><td>93.85</td><td>35.43</td><td>77.20</td><td>49.61</td><td>73.49</td><td>21.66</td><td>62.14</td><td>20.57</td><td>55.54</td><td>30.14</td></tr><tr><td>τ = 100</td><td>95.05</td><td>73.08</td><td>94.29</td><td>37.32</td><td>77.14</td><td>49.31</td><td>73.65</td><td>22.83</td><td>61.46</td><td>18.82</td><td>54.60</td><td>32.71</td></tr></table>"
1011
+ },
1012
+ {
1013
+ "type": "table_caption",
1014
+ "bbox": [
1015
+ 0.075,
1016
+ 0.336,
1017
+ 0.495,
1018
+ 0.412
1019
+ ],
1020
+ "angle": 0,
1021
+ "content": "TABLE II: Model performance and Robustness against Common Corruptions and Adversarial attacks (\\%) under different temperatures with Transformer Vit-small-patch16-224. -C in the table represents the corresponding Common Corruptions and Perturbations set."
1022
+ },
1023
+ {
1024
+ "type": "table",
1025
+ "bbox": [
1026
+ 0.086,
1027
+ 0.419,
1028
+ 0.485,
1029
+ 0.626
1030
+ ],
1031
+ "angle": 0,
1032
+ "content": "<table><tr><td rowspan=\"2\">Temp.</td><td colspan=\"4\">CIFAR10</td><td colspan=\"4\">CIFAR100</td></tr><tr><td>Clean</td><td>-C</td><td>PGD20</td><td>C&amp;W</td><td>Clean</td><td>-C</td><td>PGD20</td><td>C&amp;W</td></tr><tr><td>τ = 0.1</td><td>98.45</td><td>92.83</td><td>0</td><td>26.13</td><td>89.79</td><td>74.7</td><td>0</td><td>23.71</td></tr><tr><td>τ = 0.5</td><td>98.33</td><td>91.60</td><td>0</td><td>26.26</td><td>90.53</td><td>74.9</td><td>0</td><td>29.25</td></tr><tr><td>τ = 1</td><td>98.29</td><td>92.21</td><td>0</td><td>31.69</td><td>90.78</td><td>75.5</td><td>0</td><td>31.97</td></tr><tr><td>τ = 10</td><td>98.06</td><td>92.19</td><td>89.07</td><td>31.89</td><td>89.94</td><td>75.5</td><td>58.71</td><td>34.96</td></tr><tr><td>τ = 30</td><td>98.23</td><td>91.72</td><td>97.10</td><td>38.21</td><td>89.52</td><td>74.6</td><td>86.25</td><td>36.07</td></tr><tr><td>τ = 50</td><td>98.22</td><td>91.43</td><td>97.75</td><td>39.52</td><td>89.28</td><td>73.8</td><td>87.29</td><td>33.64</td></tr><tr><td>τ = 70</td><td>98.03</td><td>91.20</td><td>97.72</td><td>39.02</td><td>89.48</td><td>74.2</td><td>87.96</td><td>33.81</td></tr><tr><td>τ = 100</td><td>98.07</td><td>91.56</td><td>97.87</td><td>38.26</td><td>89.13</td><td>73.47</td><td>86.99</td><td>31.84</td></tr></table>"
1033
+ },
1034
+ {
1035
+ "type": "text",
1036
+ "bbox": [
1037
+ 0.074,
1038
+ 0.651,
1039
+ 0.493,
1040
+ 0.831
1041
+ ],
1042
+ "angle": 0,
1043
+ "content": "Clustering is a crucial metric when measuring how an encoder performs. In classification, a good encoder should be able to gather samples from the same class while separating clusters of different classes. Fig. 2 and Fig. 3 present 2D TSNE visualization of the CIFAR10 sample distribution by ResNet50 and transformer. We observe a similar trend: low temperatures lead to more mixed clusters, while models trained with elevated temperatures have better cluster effects. These empirical observations also explain the improved classification performance on clean and non-adversarial perturbations, as well as stronger adversarial robustness, with high temperature in Table I and Table II."
1044
+ },
1045
+ {
1046
+ "type": "title",
1047
+ "bbox": [
1048
+ 0.076,
1049
+ 0.843,
1050
+ 0.251,
1051
+ 0.858
1052
+ ],
1053
+ "angle": 0,
1054
+ "content": "C. Training Convergence"
1055
+ },
1056
+ {
1057
+ "type": "text",
1058
+ "bbox": [
1059
+ 0.074,
1060
+ 0.863,
1061
+ 0.492,
1062
+ 0.909
1063
+ ],
1064
+ "angle": 0,
1065
+ "content": "We then conduct experiments observing the training process when applying different temperatures to the model. We validate the model on the test set every epoch and record the error"
1066
+ },
1067
+ {
1068
+ "type": "image",
1069
+ "bbox": [
1070
+ 0.511,
1071
+ 0.344,
1072
+ 0.71,
1073
+ 0.45
1074
+ ],
1075
+ "angle": 0,
1076
+ "content": null
1077
+ },
1078
+ {
1079
+ "type": "image_caption",
1080
+ "bbox": [
1081
+ 0.558,
1082
+ 0.451,
1083
+ 0.674,
1084
+ 0.461
1085
+ ],
1086
+ "angle": 0,
1087
+ "content": "(a) Learning Rate \\(= 0.1\\)"
1088
+ },
1089
+ {
1090
+ "type": "image",
1091
+ "bbox": [
1092
+ 0.717,
1093
+ 0.344,
1094
+ 0.916,
1095
+ 0.45
1096
+ ],
1097
+ "angle": 0,
1098
+ "content": null
1099
+ },
1100
+ {
1101
+ "type": "image_caption",
1102
+ "bbox": [
1103
+ 0.761,
1104
+ 0.451,
1105
+ 0.884,
1106
+ 0.461
1107
+ ],
1108
+ "angle": 0,
1109
+ "content": "(b) Learning Rate \\(= 0.01\\)"
1110
+ },
1111
+ {
1112
+ "type": "image_caption",
1113
+ "bbox": [
1114
+ 0.503,
1115
+ 0.469,
1116
+ 0.922,
1117
+ 0.576
1118
+ ],
1119
+ "angle": 0,
1120
+ "content": "Fig. 4: Test error number during training. The red line represents \\(\\tau = 0.5\\), the green line represents \\(\\tau = 1\\), and the orange line represents \\(\\tau = 50\\). The model used is Resnet50 and is tested on CIFAR10. SGD optimizer is used during training with the learning rate set to 0.1 (a) and 0.01 (b). The shade areas consist of 6 total runs with different random seeds. The solid lines indicate the mean value across all runs."
1121
+ },
1122
+ {
1123
+ "type": "text",
1124
+ "bbox": [
1125
+ 0.503,
1126
+ 0.605,
1127
+ 0.924,
1128
+ 0.77
1129
+ ],
1130
+ "angle": 0,
1131
+ "content": "probability. As our results shown in Fig. 4(a), we can clearly observe that not only does the training convergence speed increase as the temperature goes up, but models trained with higher temperatures also tend to converge to lower points, leading to better final performance. In fact, when we further decrease the temperature to around 0.1, the model would have a substantial risk of not converging at all. While this might appear contrary to the common understanding that focusing on hard classes will generally benefit the model, a more nuanced explanation is provided by delving further into the gradient analysis provided in Section 4."
1132
+ },
1133
+ {
1134
+ "type": "text",
1135
+ "bbox": [
1136
+ 0.503,
1137
+ 0.772,
1138
+ 0.923,
1139
+ 0.909
1140
+ ],
1141
+ "angle": 0,
1142
+ "content": "From Eqn. 4, 5, 6, we observe that if the logit of the target class is not the largest, its gradient will increase dramatically with low temperatures. This is potentially bad for models being known to converge inefficiently under large learning rates. One straightforward solution would be lowering the learning rate as shown in Fig. 4. While the training converging speeds are closer, the run with a higher temperature can still reach a better performance. Furthermore, regardless of the increase in overall training converging speed for \\(\\tau = 0.5\\) and \\(\\tau = 1\\) runs"
1143
+ }
1144
+ ],
1145
+ [
1146
+ {
1147
+ "type": "image",
1148
+ "bbox": [
1149
+ 0.084,
1150
+ 0.071,
1151
+ 0.274,
1152
+ 0.168
1153
+ ],
1154
+ "angle": 0,
1155
+ "content": null
1156
+ },
1157
+ {
1158
+ "type": "image_caption",
1159
+ "bbox": [
1160
+ 0.133,
1161
+ 0.174,
1162
+ 0.23,
1163
+ 0.19
1164
+ ],
1165
+ "angle": 0,
1166
+ "content": "(a) \\(\\tau = 0.5\\)"
1167
+ },
1168
+ {
1169
+ "type": "image",
1170
+ "bbox": [
1171
+ 0.297,
1172
+ 0.071,
1173
+ 0.485,
1174
+ 0.168
1175
+ ],
1176
+ "angle": 0,
1177
+ "content": null
1178
+ },
1179
+ {
1180
+ "type": "image_caption",
1181
+ "bbox": [
1182
+ 0.357,
1183
+ 0.174,
1184
+ 0.436,
1185
+ 0.19
1186
+ ],
1187
+ "angle": 0,
1188
+ "content": "(b) \\(\\tau = 1\\)"
1189
+ },
1190
+ {
1191
+ "type": "image",
1192
+ "bbox": [
1193
+ 0.513,
1194
+ 0.071,
1195
+ 0.701,
1196
+ 0.168
1197
+ ],
1198
+ "angle": 0,
1199
+ "content": null
1200
+ },
1201
+ {
1202
+ "type": "image_caption",
1203
+ "bbox": [
1204
+ 0.569,
1205
+ 0.174,
1206
+ 0.657,
1207
+ 0.19
1208
+ ],
1209
+ "angle": 0,
1210
+ "content": "(c) \\(\\tau = 50\\)"
1211
+ },
1212
+ {
1213
+ "type": "image",
1214
+ "bbox": [
1215
+ 0.729,
1216
+ 0.072,
1217
+ 0.915,
1218
+ 0.168
1219
+ ],
1220
+ "angle": 0,
1221
+ "content": null
1222
+ },
1223
+ {
1224
+ "type": "image_caption",
1225
+ "bbox": [
1226
+ 0.779,
1227
+ 0.174,
1228
+ 0.878,
1229
+ 0.19
1230
+ ],
1231
+ "angle": 0,
1232
+ "content": "(d) \\(\\tau = 100\\)"
1233
+ },
1234
+ {
1235
+ "type": "image_caption",
1236
+ "bbox": [
1237
+ 0.074,
1238
+ 0.2,
1239
+ 0.924,
1240
+ 0.231
1241
+ ],
1242
+ "angle": 0,
1243
+ "content": "Fig. 5: The logit changes before and after PGD20 attack. The blue lines stand for the logits of the samples before PGD attack, and the orange lines stand for the logits of the samples after PGD attack."
1244
+ },
1245
+ {
1246
+ "type": "text",
1247
+ "bbox": [
1248
+ 0.074,
1249
+ 0.258,
1250
+ 0.493,
1251
+ 0.531
1252
+ ],
1253
+ "angle": 0,
1254
+ "content": "when lowering the learning rate, the final performances for all three runs actually get worse than runs with 0.1 learning rate. Therefore, this phenomenon cannot be attributed solely to a high learning rate. However, if we shift our perspective to the overall direction for optimization as done in Eqn. 9, it becomes clear that during the early stage of training, the encoder \\( f \\) has not converged to an ideal point, leading to sub-optimal values produced for certain update directions. If this happens to be the direction of the target class and the error-prone class which models with small temperatures tend to focus on, the model training can be impacted harmfully. In the meantime, high temperatures equalize the weight given to all the classes and ensure the update is not terribly wrong even if a few \\( \\partial L_{ce}(x) / \\partial w_j \\) are in the wrong direction. Upon reaching this conclusion, we are surprised to find that this reasoning and our empirical observations align perfectly with the curriculum learning philosophy, that starting from hard samples may harm model optimization and learning outcomes."
1255
+ },
1256
+ {
1257
+ "type": "title",
1258
+ "bbox": [
1259
+ 0.075,
1260
+ 0.542,
1261
+ 0.262,
1262
+ 0.556
1263
+ ],
1264
+ "angle": 0,
1265
+ "content": "D. Adversarial Robustness"
1266
+ },
1267
+ {
1268
+ "type": "text",
1269
+ "bbox": [
1270
+ 0.074,
1271
+ 0.562,
1272
+ 0.491,
1273
+ 0.667
1274
+ ],
1275
+ "angle": 0,
1276
+ "content": "Table I and Table II show that models trained with elevated temperatures have strong adversarial robustness. TSNE plots in Fig. 2 and Fig. 3 also support this observation. This prompts questions regarding the mechanism behind the gained robustness. In this section, our focus is on investigating the model's behavior under adversarial attacks and understanding why the model demonstrates such robustness."
1277
+ },
1278
+ {
1279
+ "type": "text",
1280
+ "bbox": [
1281
+ 0.074,
1282
+ 0.668,
1283
+ 0.493,
1284
+ 0.743
1285
+ ],
1286
+ "angle": 0,
1287
+ "content": "Gradient analysis for adversarial generation. In order to discern the source of model robustness, we follow the work in [9] and study the gradient of the classification loss with respect to the input to analyze the direction of the PGD attack, which can be written as"
1288
+ },
1289
+ {
1290
+ "type": "equation",
1291
+ "bbox": [
1292
+ 0.144,
1293
+ 0.749,
1294
+ 0.491,
1295
+ 0.81
1296
+ ],
1297
+ "angle": 0,
1298
+ "content": "\\[\n\\begin{array}{l} \\frac {\\partial L _ {c e}}{\\partial x} = \\left[ \\left(\\mathbb {S} \\left(w _ {i} ^ {T} \\cdot f (x)\\right) - 1\\right) \\cdot w _ {i} ^ {T} + \\right. \\\\ \\left. \\sum_ {j \\neq i} w _ {j} ^ {T} \\cdot \\mathbb {S} \\left(w _ {j} ^ {T} \\cdot f (x)\\right) \\right] \\cdot \\frac {\\partial f (x)}{\\partial x} \\tag {10} \\\\ \\end{array}\n\\]"
1299
+ },
1300
+ {
1301
+ "type": "text",
1302
+ "bbox": [
1303
+ 0.074,
1304
+ 0.817,
1305
+ 0.493,
1306
+ 0.909
1307
+ ],
1308
+ "angle": 0,
1309
+ "content": "As illustrated above, given a well-trained model, for most inputs where \\(\\mathbb{S}(w_i^T\\cdot f(x))\\approx 1\\), the gradient does not have a noticeable portion in target class \\(w_{i}\\) on the early stage of the attack. This implies that rather than directly 'stepping away' from the target class, the attack will initially focus on approaching other class prototypes. Moreover, the second term,"
1310
+ },
1311
+ {
1312
+ "type": "text",
1313
+ "bbox": [
1314
+ 0.503,
1315
+ 0.257,
1316
+ 0.922,
1317
+ 0.515
1318
+ ],
1319
+ "angle": 0,
1320
+ "content": "\\(\\sum_{j \\neq i} w_j^T \\cdot \\mathbb{S}(w_j^T \\cdot f(x))\\), indicates that all the other directions are weighted by their according probabilities. Therefore, untargeted attacks are actually targeted toward the error-prone class, which most commonly is the largest probability class other than the target class. However, if a model lacks an error-prone class given an input, all \\(w_k\\) will be weighted equally. Consequently, the gradient would point toward all negative class prototypes, making it exceptionally challenging to determine the optimal direction. We noticed that such a scenario occurs when a model is trained with a small \\(\\tau\\). Then let's focus on the gradient update strength. For a data sample \\(x\\) is classified correctly, \\(\\mathbb{S}(w_j^T \\cdot f(x))\\) would be small when the model training temperature \\(\\tau\\) increases. That is, when a model is trained with high temperatures, not only the gradient direction to generate adversarial samples is not clear, but the gradient strength is also small. Both factors contribute to the robustness of the model when optimized with elevated temperatures."
1321
+ },
1322
+ {
1323
+ "type": "text",
1324
+ "bbox": [
1325
+ 0.503,
1326
+ 0.516,
1327
+ 0.922,
1328
+ 0.711
1329
+ ],
1330
+ "angle": 0,
1331
+ "content": "Raw Logit Analysis. With the insight from the gradient analysis on adversarial attack, we then turn to observe the logit output around the adversarial attack, as shown in Fig. 5. Each bar represents the logit value for each class, blue bars stand for the logit outputs of clean samples and orange bars are the logit outputs from adversarial samples. Models share similar characteristics in low temperatures, Fig. 5(a,b), with the logit of the target class going down while the logit of the error-prone class going up. However, for models trained with large temperatures, Fig. 5(c,d), two logits are nearly identical with a minimal amount of changes. This contrasts the robustness gains during adversarial training, where the model learns the pattern of the adversarial noise."
1332
+ },
1333
+ {
1334
+ "type": "text",
1335
+ "bbox": [
1336
+ 0.503,
1337
+ 0.712,
1338
+ 0.92,
1339
+ 0.877
1340
+ ],
1341
+ "angle": 0,
1342
+ "content": "Class Prototypes Analysis. To further analyze the model behavior, we investigate the relation between the encoded feature, \\( f(x) \\), and each class prototype, \\( w_{j} \\). Here, we observe the Euclidean distance and cosine similarity. Fig. 6 shows Euclidean distance and cosine similarity between one sample and all class prototypes. It is evident that as the training temperature goes up, the feature \\( f(x) \\) tends to have an identical distance to all negative class prototypes. This indicates the model trained with high temperature is less likely to have an error-prone class, which is essential for untargeted attacks as we discuss above."
1343
+ },
1344
+ {
1345
+ "type": "text",
1346
+ "bbox": [
1347
+ 0.504,
1348
+ 0.878,
1349
+ 0.922,
1350
+ 0.908
1351
+ ],
1352
+ "angle": 0,
1353
+ "content": "Furthermore, to illustrate that the phenomenon shown in Fig. 6 is not limited to one or a few samples, we calculate"
1354
+ }
1355
+ ],
1356
+ [
1357
+ {
1358
+ "type": "image",
1359
+ "bbox": [
1360
+ 0.082,
1361
+ 0.068,
1362
+ 0.287,
1363
+ 0.165
1364
+ ],
1365
+ "angle": 0,
1366
+ "content": null
1367
+ },
1368
+ {
1369
+ "type": "image_caption",
1370
+ "bbox": [
1371
+ 0.134,
1372
+ 0.169,
1373
+ 0.228,
1374
+ 0.186
1375
+ ],
1376
+ "angle": 0,
1377
+ "content": "(a) \\(\\tau = 0.5\\)"
1378
+ },
1379
+ {
1380
+ "type": "image",
1381
+ "bbox": [
1382
+ 0.297,
1383
+ 0.068,
1384
+ 0.498,
1385
+ 0.165
1386
+ ],
1387
+ "angle": 0,
1388
+ "content": null
1389
+ },
1390
+ {
1391
+ "type": "image_caption",
1392
+ "bbox": [
1393
+ 0.352,
1394
+ 0.169,
1395
+ 0.431,
1396
+ 0.186
1397
+ ],
1398
+ "angle": 0,
1399
+ "content": "(b) \\(\\tau = 1\\)"
1400
+ },
1401
+ {
1402
+ "type": "image",
1403
+ "bbox": [
1404
+ 0.508,
1405
+ 0.069,
1406
+ 0.704,
1407
+ 0.164
1408
+ ],
1409
+ "angle": 0,
1410
+ "content": null
1411
+ },
1412
+ {
1413
+ "type": "image_caption",
1414
+ "bbox": [
1415
+ 0.559,
1416
+ 0.169,
1417
+ 0.646,
1418
+ 0.186
1419
+ ],
1420
+ "angle": 0,
1421
+ "content": "(c) \\(\\tau = 50\\)"
1422
+ },
1423
+ {
1424
+ "type": "image",
1425
+ "bbox": [
1426
+ 0.719,
1427
+ 0.069,
1428
+ 0.916,
1429
+ 0.164
1430
+ ],
1431
+ "angle": 0,
1432
+ "content": null
1433
+ },
1434
+ {
1435
+ "type": "image_caption",
1436
+ "bbox": [
1437
+ 0.765,
1438
+ 0.169,
1439
+ 0.864,
1440
+ 0.186
1441
+ ],
1442
+ "angle": 0,
1443
+ "content": "(d) \\(\\tau = 100\\)"
1444
+ },
1445
+ {
1446
+ "type": "image_caption",
1447
+ "bbox": [
1448
+ 0.074,
1449
+ 0.197,
1450
+ 0.921,
1451
+ 0.245
1452
+ ],
1453
+ "angle": 0,
1454
+ "content": "Fig. 6: A demonstration of the Euclidean distance and cosine similarity between the encoded sample \\( f(x) \\) and all class prototypes for one sample, with different temperature configurations. The red lines indicate the Euclidean distance while the blue lines stand for cosine similarity."
1455
+ },
1456
+ {
1457
+ "type": "image",
1458
+ "bbox": [
1459
+ 0.082,
1460
+ 0.272,
1461
+ 0.273,
1462
+ 0.382
1463
+ ],
1464
+ "angle": 0,
1465
+ "content": null
1466
+ },
1467
+ {
1468
+ "type": "image_caption",
1469
+ "bbox": [
1470
+ 0.139,
1471
+ 0.384,
1472
+ 0.248,
1473
+ 0.394
1474
+ ],
1475
+ "angle": 0,
1476
+ "content": "(a) Euclidean Distance"
1477
+ },
1478
+ {
1479
+ "type": "image",
1480
+ "bbox": [
1481
+ 0.303,
1482
+ 0.272,
1483
+ 0.495,
1484
+ 0.382
1485
+ ],
1486
+ "angle": 0,
1487
+ "content": null
1488
+ },
1489
+ {
1490
+ "type": "image_caption",
1491
+ "bbox": [
1492
+ 0.365,
1493
+ 0.384,
1494
+ 0.465,
1495
+ 0.394
1496
+ ],
1497
+ "angle": 0,
1498
+ "content": "(b) Cosine Similarity"
1499
+ },
1500
+ {
1501
+ "type": "image_caption",
1502
+ "bbox": [
1503
+ 0.074,
1504
+ 0.402,
1505
+ 0.493,
1506
+ 0.522
1507
+ ],
1508
+ "angle": 0,
1509
+ "content": "Fig. 7: Box plot of the variance of the Euclidean distance and cosine similarity calculated from each sample. The variances are calculated across all negative class prototypes, therefore, lower variance indicates a more uniform distribution of all negative class distances. Each box is a model trained with a different temperature, the green line shows the median value across all variances and the orange line is the mean value of all variances."
1510
+ },
1511
+ {
1512
+ "type": "text",
1513
+ "bbox": [
1514
+ 0.074,
1515
+ 0.549,
1516
+ 0.491,
1517
+ 0.791
1518
+ ],
1519
+ "angle": 0,
1520
+ "content": "the variance of Euclidean distance and cosine similarity of all negative class prototypes across all samples in CIFAR10 test set. Note that as illustrated in Fig. 6, different models have very different ranges for Euclidean distance between encoded feature and class prototypes. Therefore, we map the value of different models into the same range to make a more direct comparison. Box plots are drawn in Fig. 7 showing the overall variance results with each box being a model trained with a different temperature. We can observe a clear trend that when the temperature rises, the variance for both Euclidean distance and cosine similarity drops indicating the encoded sample, \\( f(x) \\), has a more similar distance to all negative class prototypes. One might notice an increase in variance when the temperature reaches some threshold. We label them as extreme temperatures, which are so large that they can adversely affect the model's convergence."
1521
+ },
1522
+ {
1523
+ "type": "title",
1524
+ "bbox": [
1525
+ 0.075,
1526
+ 0.799,
1527
+ 0.415,
1528
+ 0.813
1529
+ ],
1530
+ "angle": 0,
1531
+ "content": "E. Further Discussion on Adversarial Robustness"
1532
+ },
1533
+ {
1534
+ "type": "text",
1535
+ "bbox": [
1536
+ 0.074,
1537
+ 0.817,
1538
+ 0.492,
1539
+ 0.909
1540
+ ],
1541
+ "angle": 0,
1542
+ "content": "Despite the model trained with high temperatures showing superb robustness against untargeted PGD attack due to its nature attribute that discovers the weakness of PGD attack, it does not hold robustness against targeted attacks. The reason behind this is straightforward. In targeted attacks, Eqn. 10 no longer holds, and the gradient is not obligated to move"
1543
+ },
1544
+ {
1545
+ "type": "table_caption",
1546
+ "bbox": [
1547
+ 0.504,
1548
+ 0.268,
1549
+ 0.921,
1550
+ 0.32
1551
+ ],
1552
+ "angle": 0,
1553
+ "content": "TABLE III: Preliminary experiments of adversarial training on CIFAR-10 with temperature control. The training scheme uses [18] and the model is ResNet50."
1554
+ },
1555
+ {
1556
+ "type": "table",
1557
+ "bbox": [
1558
+ 0.508,
1559
+ 0.327,
1560
+ 0.917,
1561
+ 0.406
1562
+ ],
1563
+ "angle": 0,
1564
+ "content": "<table><tr><td>Temp.</td><td>τ = 0.5</td><td>τ = 1</td><td>τ = 10</td><td>τ = 30</td><td>τ = 50</td><td>τ = 70</td><td>τ = 100</td></tr><tr><td>Clean</td><td>88.98</td><td>85.67</td><td>81.71</td><td>82.62</td><td>83.75</td><td>84.28</td><td>84.27</td></tr><tr><td>PGD20</td><td>35.93</td><td>42.63</td><td>40.95</td><td>44.96</td><td>48.61</td><td>49.16</td><td>48.53</td></tr></table>"
1565
+ },
1566
+ {
1567
+ "type": "text",
1568
+ "bbox": [
1569
+ 0.503,
1570
+ 0.432,
1571
+ 0.921,
1572
+ 0.476
1573
+ ],
1574
+ "angle": 0,
1575
+ "content": "towards all negative class prototypes with a weighted step size. Therefore, with the only source of the model robustness gained eliminated, it is naturally vulnerable to targeted attacks."
1576
+ },
1577
+ {
1578
+ "type": "text",
1579
+ "bbox": [
1580
+ 0.503,
1581
+ 0.477,
1582
+ 0.922,
1583
+ 0.568
1584
+ ],
1585
+ "angle": 0,
1586
+ "content": "Remark: Even though many attacks claim themselves to be untargeted attacks, they actually optimize toward one self-selected target, which we do not consider untargeted attacks under this setting. One popular example is the Difference of Logits Ratio(DLR) attack proposed by [4]. Regardless of its ability to rescale the logit,"
1587
+ },
1588
+ {
1589
+ "type": "equation",
1590
+ "bbox": [
1591
+ 0.62,
1592
+ 0.573,
1593
+ 0.922,
1594
+ 0.612
1595
+ ],
1596
+ "angle": 0,
1597
+ "content": "\\[\n\\mathrm {D L R} (x, y) = - \\frac {z _ {\\mathrm {y}} - \\max z _ {i}}{z _ {\\pi 1} - z _ {\\pi 3}} \\tag {11}\n\\]"
1598
+ },
1599
+ {
1600
+ "type": "text",
1601
+ "bbox": [
1602
+ 0.503,
1603
+ 0.616,
1604
+ 0.922,
1605
+ 0.692
1606
+ ],
1607
+ "angle": 0,
1608
+ "content": "shows that the DLR loss automatically selects the class holding the largest logit other than the target class as the attack target. Therefore, during optimization, it does not need to optimize toward all negative class prototypes. A similar example also includes FAB attack [3]."
1609
+ },
1610
+ {
1611
+ "type": "title",
1612
+ "bbox": [
1613
+ 0.505,
1614
+ 0.703,
1615
+ 0.838,
1616
+ 0.718
1617
+ ],
1618
+ "angle": 0,
1619
+ "content": "F. Extended Experiment on Adversarial Training"
1620
+ },
1621
+ {
1622
+ "type": "text",
1623
+ "bbox": [
1624
+ 0.503,
1625
+ 0.722,
1626
+ 0.921,
1627
+ 0.813
1628
+ ],
1629
+ "angle": 0,
1630
+ "content": "Given that our temperature control method is used inside the Cross-Entropy Loss, it is possible to apply this method in adversarial training. Here, we do preliminary experiments on the adversarial training baseline proposed by [18] for the simplicity of its loss function. We add temperature control inside vanilla loss term forming"
1631
+ },
1632
+ {
1633
+ "type": "equation",
1634
+ "bbox": [
1635
+ 0.512,
1636
+ 0.823,
1637
+ 0.922,
1638
+ 0.839
1639
+ ],
1640
+ "angle": 0,
1641
+ "content": "\\[\nL _ {A T} (x, x _ {a d v}, y, F) = L _ {c e} (F (x) / \\tau , y) + L _ {c e} (F (x _ {a d v}), y), \\tag {12}\n\\]"
1642
+ },
1643
+ {
1644
+ "type": "text",
1645
+ "bbox": [
1646
+ 0.504,
1647
+ 0.848,
1648
+ 0.905,
1649
+ 0.862
1650
+ ],
1651
+ "angle": 0,
1652
+ "content": "where \\(F\\) is a combination of encoder and class prototypes."
1653
+ },
1654
+ {
1655
+ "type": "text",
1656
+ "bbox": [
1657
+ 0.503,
1658
+ 0.863,
1659
+ 0.922,
1660
+ 0.908
1661
+ ],
1662
+ "angle": 0,
1663
+ "content": "Our preliminary results are listed in Table III. We can clearly observe that model robustness increases as the temperature increases with a slight trade-off with clean accuracy, which"
1664
+ }
1665
+ ],
1666
+ [
1667
+ {
1668
+ "type": "text",
1669
+ "bbox": [
1670
+ 0.074,
1671
+ 0.063,
1672
+ 0.493,
1673
+ 0.214
1674
+ ],
1675
+ "angle": 0,
1676
+ "content": "confirms the possibility of combining the temperature control method with adversarial training. While further extension to other adversarial training methods is possible, it remains a complex problem for most adversarial training involves complex loss functions that may introduce terms other than the Cross-Entropy function. Also, balancing the vanilla loss term and adversarial loss term largely relies on empirical experiments. Therefore, further exploration of fitting this into other adversarial training methods falls beyond the scope of this paper."
1677
+ },
1678
+ {
1679
+ "type": "title",
1680
+ "bbox": [
1681
+ 0.165,
1682
+ 0.231,
1683
+ 0.402,
1684
+ 0.244
1685
+ ],
1686
+ "angle": 0,
1687
+ "content": "VI. CONCLUSION & LIMITATION"
1688
+ },
1689
+ {
1690
+ "type": "text",
1691
+ "bbox": [
1692
+ 0.074,
1693
+ 0.253,
1694
+ 0.493,
1695
+ 0.419
1696
+ ],
1697
+ "angle": 0,
1698
+ "content": "In this paper, we investigate the under-explored property of temperature scaling with the softmax function on image classification tasks. By performing gradient analysis with the Cross-Entropy classification loss and executing different empirical experiments, we show that temperature scaling can be a significant factor in model performance. Further experiments reveal applying high temperatures during training introduces enormous robustness against gradient-based untargeted adversarial attacks. We hope our work raises the interest of other researchers to utilize the simple temperature scaling in the common Cross-Entropy loss."
1699
+ },
1700
+ {
1701
+ "type": "text",
1702
+ "bbox": [
1703
+ 0.074,
1704
+ 0.42,
1705
+ 0.493,
1706
+ 0.51
1707
+ ],
1708
+ "angle": 0,
1709
+ "content": "One limitation of this study was that we didn't report an explicit algorithm to set the best temperature values. We will work on this in our future work. One takehome note, as a hyperparameter, the tuning cost of the temperature is low as a wide range of temperatures (30 to 70) can provide improvements to the model."
1710
+ },
1711
+ {
1712
+ "type": "title",
1713
+ "bbox": [
1714
+ 0.236,
1715
+ 0.527,
1716
+ 0.332,
1717
+ 0.54
1718
+ ],
1719
+ "angle": 0,
1720
+ "content": "REFERENCES"
1721
+ },
1722
+ {
1723
+ "type": "ref_text",
1724
+ "bbox": [
1725
+ 0.085,
1726
+ 0.553,
1727
+ 0.493,
1728
+ 0.592
1729
+ ],
1730
+ "angle": 0,
1731
+ "content": "[1] Agarwala, A., Pennington, J., Dauphin, Y.N., Schoenholz, S.S.: Temperature check: theory and practice for training models with softmax-cross-entropy losses. CoRR abs/2010.07344 (2020)"
1732
+ },
1733
+ {
1734
+ "type": "ref_text",
1735
+ "bbox": [
1736
+ 0.085,
1737
+ 0.595,
1738
+ 0.493,
1739
+ 0.634
1740
+ ],
1741
+ "angle": 0,
1742
+ "content": "[2] Carlini, N., Wagner, D.: Towards evaluating the robustness of neural networks. In: 2017 IEEE symposium on security and privacy (sp). pp. 39-57. IEEE (2017)"
1743
+ },
1744
+ {
1745
+ "type": "ref_text",
1746
+ "bbox": [
1747
+ 0.086,
1748
+ 0.636,
1749
+ 0.493,
1750
+ 0.702
1751
+ ],
1752
+ "angle": 0,
1753
+ "content": "[3] Croce, F., Hein, M.: Minimally distorted adversarial examples with a fast adaptive boundary attack. In: III, H.D., Singh, A. (eds.) Proceedings of the 37th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 119, pp. 2196-2205. PMLR (13-18 Jul 2020)"
1754
+ },
1755
+ {
1756
+ "type": "ref_text",
1757
+ "bbox": [
1758
+ 0.085,
1759
+ 0.704,
1760
+ 0.493,
1761
+ 0.77
1762
+ ],
1763
+ "angle": 0,
1764
+ "content": "[4] Croce, F., Hein, M.: Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In: III, H.D., Singh, A. (eds.) Proceedings of the 37th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 119, pp. 2206-2216. PMLR (13-18 Jul 2020)"
1765
+ },
1766
+ {
1767
+ "type": "ref_text",
1768
+ "bbox": [
1769
+ 0.085,
1770
+ 0.772,
1771
+ 0.493,
1772
+ 0.811
1773
+ ],
1774
+ "angle": 0,
1775
+ "content": "[5] Devlin, J., Chang, M., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. CoRR abs/1810.04805 (2018)"
1776
+ },
1777
+ {
1778
+ "type": "ref_text",
1779
+ "bbox": [
1780
+ 0.085,
1781
+ 0.813,
1782
+ 0.493,
1783
+ 0.853
1784
+ ],
1785
+ "angle": 0,
1786
+ "content": "[6] Engstrom, L., Ilyas, A., Athalye, A.: Evaluating and understanding the robustness of adversarial logit pairing. arXiv preprint arXiv:1807.10272 (2018)"
1787
+ },
1788
+ {
1789
+ "type": "ref_text",
1790
+ "bbox": [
1791
+ 0.086,
1792
+ 0.855,
1793
+ 0.493,
1794
+ 0.907
1795
+ ],
1796
+ "angle": 0,
1797
+ "content": "[7] Guo, C., Pleiss, G., Sun, Y., Weinberger, K.Q.: On calibration of modern neural networks. In: Precup, D., Teh, Y.W. (eds.) Proceedings of the 34th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 70, pp. 1321-1330. PMLR (06-11 Aug 2017)"
1798
+ },
1799
+ {
1800
+ "type": "list",
1801
+ "bbox": [
1802
+ 0.085,
1803
+ 0.553,
1804
+ 0.493,
1805
+ 0.907
1806
+ ],
1807
+ "angle": 0,
1808
+ "content": null
1809
+ },
1810
+ {
1811
+ "type": "ref_text",
1812
+ "bbox": [
1813
+ 0.515,
1814
+ 0.065,
1815
+ 0.922,
1816
+ 0.09
1817
+ ],
1818
+ "angle": 0,
1819
+ "content": "[8] Hinton, G., Vinyals, O., Dean, J.: Distilling the knowledge in a neural network (2015)"
1820
+ },
1821
+ {
1822
+ "type": "ref_text",
1823
+ "bbox": [
1824
+ 0.514,
1825
+ 0.093,
1826
+ 0.922,
1827
+ 0.132
1828
+ ],
1829
+ "angle": 0,
1830
+ "content": "[9] Hou, P., Han, J., Li, X.: Improving adversarial robustness with self-paced hard-class pair reweighting. In: Proceedings of the Thirty-Seventh AAAI Conference on Artificial Intelligence (2023)"
1831
+ },
1832
+ {
1833
+ "type": "ref_text",
1834
+ "bbox": [
1835
+ 0.508,
1836
+ 0.134,
1837
+ 0.922,
1838
+ 0.187
1839
+ ],
1840
+ "angle": 0,
1841
+ "content": "[10] Kanai, S., Yamada, M., Yamaguchi, S., Takahashi, H., Ida, Y.: Constraining logits by bounded function for adversarial robustness. In: 2021 International Joint Conference on Neural Networks (IJCNN). pp. 1-8. IEEE (2021)"
1842
+ },
1843
+ {
1844
+ "type": "ref_text",
1845
+ "bbox": [
1846
+ 0.508,
1847
+ 0.189,
1848
+ 0.921,
1849
+ 0.216
1850
+ ],
1851
+ "angle": 0,
1852
+ "content": "[11] Krizhevsky, A., Hinton, G., et al.: Learning multiple layers of features from tiny images (2009)"
1853
+ },
1854
+ {
1855
+ "type": "ref_text",
1856
+ "bbox": [
1857
+ 0.508,
1858
+ 0.218,
1859
+ 0.921,
1860
+ 0.271
1861
+ ],
1862
+ "angle": 0,
1863
+ "content": "[12] Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. In: Pereira, F., Burges, C., Bottou, L., Weinberger, K. (eds.) Advances in Neural Information Processing Systems. vol. 25. Curran Associates, Inc. (2012)"
1864
+ },
1865
+ {
1866
+ "type": "ref_text",
1867
+ "bbox": [
1868
+ 0.508,
1869
+ 0.273,
1870
+ 0.922,
1871
+ 0.352
1872
+ ],
1873
+ "angle": 0,
1874
+ "content": "[13] Kull, M., Perello Nieto, M., Kangsepp, M., Silva Filho, T., Song, H., Flach, P.: Beyond temperature scaling: Obtaining well-calibrated multiclass probabilities with dirichlet calibration. In: Wallach, H., Larochelle, H., Beygelzimer, A., d'Alché-Buc, F., Fox, E., Garnett, R. (eds.) Advances in Neural Information Processing Systems. vol. 32. Curran Associates, Inc. (2019)"
1875
+ },
1876
+ {
1877
+ "type": "ref_text",
1878
+ "bbox": [
1879
+ 0.508,
1880
+ 0.356,
1881
+ 0.922,
1882
+ 0.422
1883
+ ],
1884
+ "angle": 0,
1885
+ "content": "[14] Kumar, A., Sarawagi, S., Jain, U.: Trainable calibration measures for neural networks from kernel mean embeddings. In: Dy, J., Krause, A. (eds.) Proceedings of the 35th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 80, pp. 2805-2814. PMLR (10-15 Jul 2018)"
1886
+ },
1887
+ {
1888
+ "type": "ref_text",
1889
+ "bbox": [
1890
+ 0.508,
1891
+ 0.424,
1892
+ 0.922,
1893
+ 0.49
1894
+ ],
1895
+ "angle": 0,
1896
+ "content": "[15] Lakshminarayanan, B., Pritzel, A., Blundell, C.: Simple and scalable predictive uncertainty estimation using deep ensembles. In: Guyon, I., Luxburg, U.V., Bengio, S., Wallach, H., Fergus, R., Vishwanathan, S., Garnett, R. (eds.) Advances in Neural Information Processing Systems. vol. 30. Curran Associates, Inc. (2017)"
1897
+ },
1898
+ {
1899
+ "type": "ref_text",
1900
+ "bbox": [
1901
+ 0.508,
1902
+ 0.493,
1903
+ 0.921,
1904
+ 0.519
1905
+ ],
1906
+ "angle": 0,
1907
+ "content": "[16] Le, Y., Yang, X.: Tiny imagenet visual recognition challenge. CS 231N 7(7), 3 (2015)"
1908
+ },
1909
+ {
1910
+ "type": "ref_text",
1911
+ "bbox": [
1912
+ 0.508,
1913
+ 0.521,
1914
+ 0.921,
1915
+ 0.546
1916
+ ],
1917
+ "angle": 0,
1918
+ "content": "[17] Van der Maaten, L., Hinton, G.: Visualizing data using t-sne. Journal of machine learning research 9(11) (2008)"
1919
+ },
1920
+ {
1921
+ "type": "ref_text",
1922
+ "bbox": [
1923
+ 0.508,
1924
+ 0.549,
1925
+ 0.921,
1926
+ 0.589
1927
+ ],
1928
+ "angle": 0,
1929
+ "content": "[18] Madry, A., Makelov, A., Schmidt, L., Tsipras, D., Vladu, A.: Towards deep learning models resistant to adversarial attacks. In: International Conference on Learning Representations (2018)"
1930
+ },
1931
+ {
1932
+ "type": "ref_text",
1933
+ "bbox": [
1934
+ 0.508,
1935
+ 0.591,
1936
+ 0.922,
1937
+ 0.657
1938
+ ],
1939
+ "angle": 0,
1940
+ "content": "[19] Minderer, M., Djolonga, J., Romijnders, R., Hubis, F., Zhai, X., Houlsby, N., Tran, D., Lucic, M.: Revisiting the calibration of modern neural networks. In: Ranzato, M., Beygelzimer, A., Dauphin, Y., Liang, P., Vaughan, J.W. (eds.) Advances in Neural Information Processing Systems. vol. 34, pp. 15682-15694. Curran Associates, Inc. (2021)"
1941
+ },
1942
+ {
1943
+ "type": "ref_text",
1944
+ "bbox": [
1945
+ 0.508,
1946
+ 0.66,
1947
+ 0.921,
1948
+ 0.685
1949
+ ],
1950
+ "angle": 0,
1951
+ "content": "[20] van den Oord, A., Li, Y., Vinyals, O.: Representation learning with contrastive predictive coding. CoRR abs/1807.03748 (2018)"
1952
+ },
1953
+ {
1954
+ "type": "ref_text",
1955
+ "bbox": [
1956
+ 0.508,
1957
+ 0.688,
1958
+ 0.922,
1959
+ 0.728
1960
+ ],
1961
+ "angle": 0,
1962
+ "content": "[21] Pereyra, G., Tucker, G., Chorowski, J., Kaiser, L., Hinton, G.: Regularizing neural networks by penalizing confident output distributions (2017), https://openreview.net/forum?id=HkCjNI5ex"
1963
+ },
1964
+ {
1965
+ "type": "ref_text",
1966
+ "bbox": [
1967
+ 0.508,
1968
+ 0.73,
1969
+ 0.922,
1970
+ 0.769
1971
+ ],
1972
+ "angle": 0,
1973
+ "content": "[22] Prach, B., Lampert, C.H.: Almost-orthogonal layers for efficient general-purpose lipschitz networks. In: European Conference on Computer Vision. pp. 350–365. Springer (2022)"
1974
+ },
1975
+ {
1976
+ "type": "ref_text",
1977
+ "bbox": [
1978
+ 0.508,
1979
+ 0.771,
1980
+ 0.921,
1981
+ 0.797
1982
+ ],
1983
+ "angle": 0,
1984
+ "content": "[23] Shafahi, A., Ghiasi, A., Huang, F., Goldstein, T.: Label smoothing and logit squeezing: A replacement for adversarial training? (2019)"
1985
+ },
1986
+ {
1987
+ "type": "ref_text",
1988
+ "bbox": [
1989
+ 0.508,
1990
+ 0.799,
1991
+ 0.922,
1992
+ 0.839
1993
+ ],
1994
+ "angle": 0,
1995
+ "content": "[24] Wang, F., Liu, H.: Understanding the behaviour of contrastive loss. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 2495-2504 (June 2021)"
1996
+ },
1997
+ {
1998
+ "type": "ref_text",
1999
+ "bbox": [
2000
+ 0.508,
2001
+ 0.841,
2002
+ 0.922,
2003
+ 0.907
2004
+ ],
2005
+ "angle": 0,
2006
+ "content": "[25] Wang, T., Isola, P.: Understanding contrastive representation learning through alignment and uniformity on the hypersphere. In: III, H.D., Singh, A. (eds.) Proceedings of the 37th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 119, pp. 9929-9939. PMLR (13-18 Jul 2020)"
2007
+ },
2008
+ {
2009
+ "type": "list",
2010
+ "bbox": [
2011
+ 0.508,
2012
+ 0.065,
2013
+ 0.922,
2014
+ 0.907
2015
+ ],
2016
+ "angle": 0,
2017
+ "content": null
2018
+ }
2019
+ ],
2020
+ [
2021
+ {
2022
+ "type": "ref_text",
2023
+ "bbox": [
2024
+ 0.078,
2025
+ 0.064,
2026
+ 0.492,
2027
+ 0.117
2028
+ ],
2029
+ "angle": 0,
2030
+ "content": "[26] Wu, Z., Xiong, Y., Yu, S.X., Lin, D.: Unsupervised feature learning via non-parametric instance discrimination. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (June 2018)"
2031
+ },
2032
+ {
2033
+ "type": "ref_text",
2034
+ "bbox": [
2035
+ 0.078,
2036
+ 0.119,
2037
+ 0.492,
2038
+ 0.16
2039
+ ],
2040
+ "angle": 0,
2041
+ "content": "[27] Zhao, H., Qi, X., Shen, X., Shi, J., Jia, J.: Icnet for real-time semantic segmentation on high-resolution images. In: Proceedings of the European Conference on Computer Vision (ECCV) (September 2018)"
2042
+ },
2043
+ {
2044
+ "type": "list",
2045
+ "bbox": [
2046
+ 0.078,
2047
+ 0.064,
2048
+ 0.492,
2049
+ 0.16
2050
+ ],
2051
+ "angle": 0,
2052
+ "content": null
2053
+ }
2054
+ ]
2055
+ ]
data/2025/2502_20xxx/2502.20604/be121c54-f9e2-4c21-8d04-9ea04ee6c28a_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0a4165b2850b4e2e5ea9ff492b76c6ae9b1e8cc443a06b6a23aa0341813d4c06
3
+ size 3344715
data/2025/2502_20xxx/2502.20604/full.md ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Exploring the Impact of Temperature Scaling in Softmax for Classification and Adversarial Robustness
2
+
3
+ $1^{\mathrm{st}}$ Hao Xuan
4
+
5
+ Electrical and Computer Engineering
6
+
7
+ University of Alberta
8
+
9
+ Edmonton, Canada
10
+
11
+ hxuan@ualberta.ca
12
+
13
+ $2^{\mathrm{nd}}$ Bokai Yang
14
+
15
+ Electrical and Computer Engineering
16
+
17
+ University of Alberta
18
+
19
+ Edmonton, Canada
20
+
21
+ bokai5@ualberta.ca
22
+
23
+ $3^{\mathrm{rd}}$ Xingyu Li
24
+
25
+ Electrical and Computer Engineering
26
+
27
+ University of Alberta
28
+
29
+ Edmonton, Canada
30
+
31
+ xingyu@ualberta.ca
32
+
33
+ Abstract—The softmax function is a fundamental component in deep learning. This study delves into the often-overlooked parameter within the softmax function, known as "temperature," providing novel insights into the practical and theoretical aspects of temperature scaling for image classification. Our empirical studies, adopting convolutional neural networks and transformers on multiple benchmark datasets, reveal that moderate temperatures generally introduce better overall performance. Through extensive experiments and rigorous theoretical analysis, we explore the role of temperature scaling in model training and unveil that temperature not only influences learning step size but also shapes the model's optimization direction. Moreover, for the first time, we discover a surprising benefit of elevated temperatures: enhanced model robustness against common corruption, natural perturbation, and non-targeted adversarial attacks like Projected Gradient Descent. We extend our discoveries to adversarial training, demonstrating that, compared to the standard softmax function with the default temperature value, higher temperatures have the potential to enhance adversarial training. The insights of this work open new avenues for improving model performance and security in deep learning applications.
34
+
35
+ # I. INTRODUCTION
36
+
37
+ Deep learning has achieved dramatic breakthroughs in recent years, excelling in tasks such as image classification [12], nature language processing (NLP) [5], and semantic segmentation [27]. A critical component of most deep learning methods is the softmax function, which normalizes a set of real values into probabilities. The generalized softmax function incorporates a parameter known as "temperature," which controls the softness of the output distribution. Despite its importance in theory, the impact of temperature scaling on classification tasks has been relatively underexplored, particularly in contrast to its use in other areas such as knowledge distillation [8], contrastive learning [24], confidence calibration [21], and natural language processing. Specifically, though the temperature scaling has occasionally been applied in prior experimentation [6], [10], [23], these studies often integrate additional complex techniques such as Gaussian noise injection in [23], adversarial training in [6], [22], and innovative quadratic activation functions in [10], making it challenging to isolate and understand the specific contribution of temperature scaling to the overall system
38
+
39
+ performance. Consequently, the specific role of temperature in classification tasks remains ambiguous. Previous study by [1] has hinted at the potential benefits of temperature scaling, but a comprehensive investigation is still lacking.
40
+
41
+ This study aims to fill this gap by conducting extensive experiments to explore the practical and theoretical aspects of temperature scaling in the softmax function for image classification. We employ convolutional neural networks (CNNs) and transformers on multiple benchmark datasets, including CIFAR-10 [11], CIFAR-100 [11], and Tiny-ImageNet [16], to systematically analyze the effects of different temperature values. Our empirical results consistently show that moderate temperatures generally improve overall performance, challenging the conventional knowledge derived from contrastive learning that low temperature facilitates representation learning.
42
+
43
+ We also delve into the theoretical underpinnings of temperature scaling in model training. Our analysis reveals that temperature not only influences the learning step size but also shapes the model's optimization direction. Specifically, lower temperatures focus the model's learning on error-prone classes, while higher temperatures promote a more balanced learning across all classes. This insight is crucial for understanding the nuanced effects of temperature scaling on model optimization.
44
+
45
+ Furthermore, we uncover a surprising benefit of elevated temperatures: enhanced model robustness against common corruptions, natural perturbations, and non-targeted adversarial attacks, such as Projected Gradient Descent (PGD). We extend our investigation to adversarial training introduced by [18], demonstrating that higher temperatures can potentially enhance the robustness of models trained with adversarial methods compared to those using the standard softmax function with the default temperature.
46
+
47
+ In summary, this work provides new perspectives on the practical applications and theoretical implications of temperature scaling in the softmax function. Our contributions can be summarized as follows:
48
+
49
+ - We conduct extensive experiments demonstrating that applying a reasonably large temperature during model training improves overall performance.
50
+
51
+ - We discover that models trained with elevated temperatures exhibit enhanced robustness against gradient-based untargeted adversarial attacks.
52
+ - Additionally, we show the potential of integrating temperature control into adversarial training to boost model performance and security in deep learning applications.
53
+
54
+ # II. RELATED WORKS
55
+
56
+ The softmax function has been a longstanding component of neural networks, usually used to normalize a vector of real values into probabilities. Modulating the temperature scaling factor within the softmax function allows for reshaping the probability distribution. This section provides a concise overview of the application of temperature scaling in various computational tasks.
57
+
58
+ Knowledge Distillation proposed by [8] is one innovative way to transfer knowledge from a teacher model to a student model. Temperature is utilized during training to control both the student and teacher model's output. The author argues that lower temperatures make the distillation assign less weight to logits that are much smaller than the average. Conversely, employing larger temperatures softens the probability distribution and pays more attention to the unimportant part of the logit. Larger temperatures are proven to be beneficial in the distillation process since the hard-target term already ensures the dominant part of the logit (target class) is correct. By focusing on the remaining logit, the student model can capture more fine-grained information from the teacher model. Note that despite various temperatures used during training, it is set to 1 when the model is deployed.
59
+
60
+ Model Confidence Calibration usually utilizes temperature scaling to address the over-confident issue in deep learning [7], [15], [19]. It centers on estimating predictive uncertainty to match its expected accuracy [13], [14]. Despite multiple generic calibration methods being proposed, temperature scaling proposed by [7] remains a baseline method for being simple, effective and able to apply to various cases without major expense. The motivation behind temperature scaling is simple, since the goal is to control the network's confidence to match its accuracy, applying temperature to the softmax function that can directly modify the probability distribution seems a perfect fit for the problem. During training, a validation set is needed to find the ideal temperature parameter for the network, and the same temperature is used when deployed.
61
+
62
+ Contrastive Learning is one paradigm for unsupervised learning [20], [26]. To achieve a powerful feature encoder, it utilizes contrastive loss to pull similar samples close and push negative pairs away in the latent space. Although the temperature has long existed as a hyper-parameter in contrastive loss, its actual mechanism is just understudied recently. [24] analyze the contrastive loss closely and find that as the temperature decreases, the distribution of the contrastive loss becomes sharper, which applies larger penalties to samples similar to the anchor data. Also, uniformity of feature distribution increases, indicating the embedding feature distribution aligns with a uniform distribution better [25].
63
+
64
+ Temperature Scaling in Image Classification has occasionally been utilized in the experimental sections of prior studies, yet focused investigations on this subject remain limited. For example, previous studies aiming to improve adversarial robustness have utilized temperature scaling to adjust logits within their experimentation [6], [10], [23]. However, these studies often integrate additional complex techniques such as Gaussian noise injection [23], adversarial training [6], [22], and innovative quadratic activation functions [10], making it challenging to isolate and understand the specific contribution of temperature scaling to the overall system performance. In contrast, our study narrows its focus to investigating the direct impact of temperature scaling applied through the softmax function on model optimization processes. Among the few related works, "The Temperature Check" by [1] is notably relevant to our discussion. It mainly explores the dynamics of model training by considering factors such as temperature, learning rate, and time, and presents an empirical finding that a model's generalization performance is significantly influenced by temperature settings. While our observations align with these findings, our research approaches the issue from a different perspective of gradient analysis. Specifically, we delve into how temperature scaling impacts model optimization process. Furthermore, our study broadens the scope of inquiry by assessing the effect of temperature scaling on a model's resilience to common corruptions and adversarial attacks, thereby adding a new dimension to the existing research.
65
+
66
+ # III. PRELIMINARY
67
+
68
+ # A. Softmax Function
69
+
70
+ Given a set of real numbers, $X = \{x_{1},\dots,x_{N}\}$ , the generalized softmax function can be used to normalize $X$ into a probability distribution.
71
+
72
+ $$
73
+ \mathbb {S} (X) = \frac {\exp (X / \tau)}{\sum_ {i} \exp \left(x _ {i} / \tau\right)}, \tag {1}
74
+ $$
75
+
76
+ where $\mathbb{S}$ represents the softmax function and $\tau$ is the temperature scaling factor. The temperature $\tau$ controls the smoothness (softness) of the probability it produces. Specifically, when $\tau \rightarrow \infty$ , the output tends toward a uniform distribution; while when $\tau = 0$ , the softmax function assigns a probability of 1 to the element with the highest value and a probability of 0 to the rest. The standard (unit) softmax function, with $\tau = 1$ , is widely used in conventional classification tasks.
77
+
78
+ # B. Problem Definition and Notation
79
+
80
+ We consider multi-category classification in this study, where paired training data $\{\mathcal{X},\mathcal{Y}\} = \{(x,y)|x\in \mathbb{R}^{H\times L\times N},y\in$ $\mathbb{R}^{1\times M}\}$ are drawn from a data distribution $\mathcal{D}$ . Here, $H,L,N$ are the dimension of a sample $x,M$ is the number of categories, and $y$ is a one-hot vector indicating the class of the input $x$ . A classifier, $\mathcal{C}:\mathcal{X}\to \mathcal{Y}$ , is a function predicting the label $y$ for a given data $x$ . That is $C(x) = y$ . In the canonical classification setting, a neural network classifier, $\mathcal{C} = (f,W)$ , is usually composed of a feature extractor $f$ parameterized by $\theta$ and a weight matrix $W$ . $f$ is a function mapping the input $x$ to a real-valued vector $f(x)$ in the model's penultimate layer and
81
+
82
+ $W = (w_{1},\dots,w_{M})$ represents the coefficients of the last linear layer before the softmax layer. So the likelihood probability of data $x$ corresponding to the $M$ categories can be formulated as
83
+
84
+ $$
85
+ \hat {y} = C (x) = \mathbb {S} \left(W ^ {T} f (x)\right). \tag {2}
86
+ $$
87
+
88
+ Note that each vector $w_{i}$ in matrix $W$ can be considered as the prototype of class $i$ and the production $W^{T}f(x)$ in Eqn. 2 quantifies the similarity between the feature $f(x)$ and different class-prototypes.
89
+
90
+ During training, the model $C = (f, W)$ is optimized to minimize a specific loss, usually a Cross-Entropy (CE) loss.
91
+
92
+ $$
93
+ L _ {c e} (x) = - y \log \hat {y} = - \log \left[ \frac {\exp \left(w _ {i} ^ {T} \cdot f (x) / \tau\right)}{\sum_ {j = 1} ^ {N} \exp \left(w _ {j} ^ {T} \cdot f (x) / \tau\right)} \right] \tag {3}
94
+ $$
95
+
96
+ Though $\tau = 1$ is the default setting in classification tasks, we preserve $\tau$ in the Eqn.s to facilitate theoretical analysis.
97
+
98
+ # IV. GRADIENT ANALYSIS
99
+
100
+ To investigate the impact of temperature scaling factors for model optimization in classification tasks, we calculate the loss gradients with respect to the training parameters in the model. Specifically, given a data sample $x$ from the $i^{th}$ category, we refer to $w_{i}$ as the positive class prototype and the rest, $w_{j}$ for $j \neq i$ , as the negative class prototypes. Then the gradients with respect to the positive class prototype, negative class prototypes, and the encoder are:
101
+
102
+ $$
103
+ \frac {\partial L _ {c e} (x)}{\partial w _ {i}} = \frac {1}{\tau} \left[ \mathbb {S} \left(w _ {i} ^ {T} \cdot f (x) / \tau\right) - 1 \right] f (x) = \frac {1}{\tau} \left[ P _ {i} ^ {\tau} (x) - 1 \right] f (x), \tag {4}
104
+ $$
105
+
106
+ $$
107
+ \frac {\partial L _ {c e} (x)}{\partial w _ {j}} = \frac {1}{\tau} \mathbb {S} \left(w _ {j} ^ {T} \cdot f (x) / \tau\right) f (x) = \frac {1}{\tau} P _ {j} ^ {\tau} (x) f (x), \tag {5}
108
+ $$
109
+
110
+ $$
111
+ \frac {\partial L _ {c e} (x)}{\partial f} = \frac {1}{\tau} \left[ \sum_ {j \neq i} w _ {k} P _ {j} ^ {\tau} (x) - w _ {i} \left[ 1 - P _ {i} ^ {\tau} (x) \right] \right]. \tag {6}
112
+ $$
113
+
114
+ Learning rate: In Eqn. 4, 5, 6, since $0 < P_{j}^{\tau}(x) < 1$ , the actual learning rate is inversely proportional to the temperature $\tau$ . That is, larger temperatures lead to a reduced gradient step in model update, while smaller temperatures not only increase the gradient step. Furthermore, when the sample $x$ is misclassified, smaller temperatures give a further boost on updating $w_{i}$ and $w_{j}$ for $j = \arg \max (P_{j}^{\tau}(x)f(x))$ , because smaller temperatures in softmax function lead to shaper distributions.
115
+
116
+ Optimization direction: From Eqn. 4, the positive class prototype $w_{i}$ is updated toward $f(x)$ in the latent space. In contrast, the negative prototypes $w_{j}$ move away from the direction of $f(x)$ according to Eqn. 5. The optimization direction of $f(x)$ is a weighted sum of all class prototypes, as shown in Eqn. 6. The fundamental optimization policy is to update the trainable parameters of the encoder in such a way that $f(x)$ moves closer to the positive class prototype and farther away from the negative class prototypes in the latent space. However, when we take the temperature parameter into account, we find that temperature has an impact on the update direction of $f(x)$ . Specifically, when the temperature is low,
117
+
118
+ ![](images/8edc9f837068a17d575223855a12a531d8be38ee8b3fb974859a5f7d2bdcdcfe.jpg)
119
+ (a) Small $\tau$
120
+
121
+ ![](images/e5ecc446ceb3f0f28df46d6d8da9060fa5b482b08f6b2ac31b4a50f1cf73b60a.jpg)
122
+ (b) Large $\tau$
123
+ Fig. 1: Demonstration of the model optimization direction with different temperatures. $f(x)$ is the latent code of a data sample from category 3. Since $f(x)$ is close to the negative class prototype $w_{1}$ , the CE loss with respect to the encoder $f$ yields a large gradient toward the groundtruth $w_{3}$ . However, with different temperature factors, the gradients associated with the negative classes are different: low temperature makes the update more biased by the hard class (a), while an elevated temperature leads to more equalized gradients (b).
124
+
125
+ the probability distribution produced by the softmax function is sharper, leading to significant differences in probability values among different prototypes. Consequently, the update direction of the encoder $f$ is predominantly influenced by the class prototype with the highest probability and the positive class prototype (if they are different). Fig. 1(a) visualizes the bias toward the hard class in model optimization, where $f(x)$ is the latent code of a data sample from category 3. In contrast, when the temperature is high, the differences in probability values among different prototypes are relatively smaller, and the encoder $f$ updates with a mixture of all class prototype directions, as demonstrated in Fig. 1(b). In other words, a low temperature makes the model focus on learning hard-class pairs, while a high temperature de-biases the influence among different classes for a balanced learning.
126
+
127
+ Moreover, when considering all the samples in one batch, the compound gradient of all $N$ samples are
128
+
129
+ $$
130
+ \sum_ {n = 1} ^ {N} \frac {\partial L _ {c e} \left(x _ {n}\right)}{\partial w _ {i}} = - \frac {1}{\tau} \sum_ {n = 1} ^ {N} f \left(x _ {n}\right) \left[ 1 - P _ {i} ^ {\tau} \left(x _ {n}\right) \right], \tag {7}
131
+ $$
132
+
133
+ $$
134
+ \sum_ {n = 1} ^ {N} \frac {\partial L _ {c e} (x _ {n})}{\partial w _ {k}} = \frac {1}{\tau} \sum_ {n = 1} ^ {N} f (x _ {n}) P _ {k} ^ {\tau} (x _ {n}), \tag {8}
135
+ $$
136
+
137
+ $$
138
+ \sum_ {n = 1} ^ {N} \frac {\partial L _ {c e} (x _ {n})}{\partial f} = \frac {1}{\tau} \sum_ {n = 1} ^ {N} \left[ \sum_ {k \neq i} w _ {k} P _ {k} ^ {\tau} (x _ {n}) - w _ {i} \left[ 1 - P _ {i} ^ {\tau} (x _ {n}) \right] \right]. \tag {9}
139
+ $$
140
+
141
+ Similar to the single sample case, when optimizing in a whole batch, with small temperatures, the model focuses on learning misclassified samples (i.e. hard samples), whereas higher temperatures help de-bias the update direction and distribute similar weight to all samples.
142
+
143
+ ![](images/2c1b2ef366d4294a39a5908abdb4c29f155a5fa9d72facd09b5761419f754eb1.jpg)
144
+ (a) $\tau = 0.5$
145
+
146
+ ![](images/8a3d6991ffe7379175b6ebf6a056dfeea9f558a597468efffbd803547b720e27.jpg)
147
+ (b) $\tau = 1$
148
+
149
+ ![](images/d06fbc36630b94487828e972f1d66e5045b9a0bce58e5e1774361e1477d55416.jpg)
150
+ (c) $\tau = 50$
151
+
152
+ ![](images/772354c17bba1d7cb82d9e882da17e750313e4590ac174a80d02f4e4dd1b62cc.jpg)
153
+ Fig. 2: T-SNE [17] visualization of the CIFAR10 sample distribution after the ResNet50 encoder with different temperatures.
154
+ (a) $\tau = 0.5$
155
+ Fig. 3: T-SNE [17] visualization of the CIFAR10 sample distribution after the VIT encoder with different temperatures.
156
+
157
+ ![](images/8265c7ab87ea11fafca111b98811837ba957cebfd3f14cdd3bbdeb1ee1708d30.jpg)
158
+ (b) $\tau = 1$
159
+
160
+ ![](images/ecfeff4655621fa1b6ee326bd894f2859fb3629164507bcaeff0e5ebb5588879.jpg)
161
+ (c) $\tau = 50$
162
+
163
+ # V. EMPIRICAL ANALYSIS AND DISCUSSION
164
+
165
+ As discussed in Section 4, applying a small temperature encourages a model to learn more about hard (misclassified) samples and hard (error-prone class) classes. A low temperature, however, leads to more equitable learning across different classes and data points. Theoretically, both approaches to optimize feature distribution sound reasonable, with low temperatures focusing on weaker classes and high temperatures decreasing inequality across all negative classes. We argue that which optimization strategy is better for classification tasks remains an empirical problem.
166
+
167
+ # A. Experiment Setting
168
+
169
+ We conduct image classification on multiple benchmarks (i.e. CIFAR10, CIFAR100, and Tiny-ImageNet) and their extended Common Corruptions and Perturbations sets (i.e. CIFAR10-C, CIFAR100-C, and Tiny-ImageNet-C with corruption strength being 3) to investigate the impact of temperature scaling. In addition, we also evaluate the model's robustness against adversarial attacks such as PDG20 [18] and C&W [2]. Both attacks are bounded by the $l_{\infty}$ box with the same maximum perturbation $\epsilon = 8 / 255$ .
170
+
171
+ To get a comprehensive evaluation, we set $\tau \in \{0.1, 0.5, 1, 10, 30, 50, 70, 100\}$ . Unless stated otherwise, we takes ResNet50 and VIT-small-patch16-224 as the CNN and transformer backbones, respectively. The ResNet50 is trained from scratch, with SGD optimizer and learning rate setting to
172
+
173
+ 0.1. We also utilize the Cosine Annealing scheduler to better train the model. The transformer is pretrained on ImageNet-21K and finetuned on the target dataset using Adam optimizer. All experiments run on one RTX3090.
174
+
175
+ To clarify, the temperature scaling only involves in model training in this study, but not model evaluation and attacks. All empirical evaluation and adversarial sample generation by PGD and C&W are based on the standard cross entropy, i.e. $\tau = 1$ . Thus, attack gradients are not attenuated, reflecting model's true sensitivity to data perturbation.
176
+
177
+ # B. Experiment Results
178
+
179
+ The quantitative results on CNN and Transformer are summarized in Table I and Table II, respectively. For the CNN model, ResNet50, training from scratch, the standard accuracy increases with the temperature increase. Furthermore, CNN models trained at elevated temperatures show more robustness against naturally corrected images. We believe that such improvements are majorly attributed to better model optimization with leveraged temperature. For the transformer finetuned on the target set, the standard accuracy and robustness against natural corruptions and perturbations is quite stable. We hypothesize that such stable performance is due to the fact that ViT has already been pre-trained on ImageNet and has reached a relatively high-quality state. Additionally, we observed that the model's adversarial robustness gradually improves with increasing temperature.
180
+
181
+ TABLE I: Model performance and Robustness against Common Corruptions and Adversarial attacks (%) under different temperatures with ResNet50 trained from scratch. -C in the table represents the corresponding Common Corruptions and Perturbations set.
182
+
183
+ <table><tr><td rowspan="2">Temp.</td><td colspan="4">CIFAR10</td><td colspan="4">CIFAR100</td><td colspan="4">Tiny-Imagenet</td></tr><tr><td>Clean</td><td>-C</td><td>PGD20</td><td>C&amp;W</td><td>Clean</td><td>-C</td><td>PGD20</td><td>C&amp;W</td><td>Clean</td><td>-C</td><td>PGD20</td><td>C&amp;W</td></tr><tr><td>τ = 0.1</td><td>90.05</td><td>73.31</td><td>0</td><td>27.79</td><td>70.39</td><td>44.52</td><td>0</td><td>14.32</td><td>54.53</td><td>12.63</td><td>0</td><td>23.17</td></tr><tr><td>τ = 0.5</td><td>94.17</td><td>72.51</td><td>0</td><td>16.03</td><td>74.79</td><td>45.41</td><td>0</td><td>8.44</td><td>61.07</td><td>18.55</td><td>0</td><td>19.44</td></tr><tr><td>τ = 1</td><td>94.26</td><td>72.53</td><td>0</td><td>19.19</td><td>74.58</td><td>46.47</td><td>0</td><td>11.26</td><td>62.93</td><td>18.66</td><td>0</td><td>19.09</td></tr><tr><td>τ = 10</td><td>95.41</td><td>73.94</td><td>0.56</td><td>39.79</td><td>78.21</td><td>50.67</td><td>0.29</td><td>15.33</td><td>64.70</td><td>21.66</td><td>2.59</td><td>23.88</td></tr><tr><td>τ = 30</td><td>95.26</td><td>74.93</td><td>91.09</td><td>43.35</td><td>78.27</td><td>50.17</td><td>68.47</td><td>18.81</td><td>63.60</td><td>21.30</td><td>49.45</td><td>26.50</td></tr><tr><td>τ = 50</td><td>94.92</td><td>74.44</td><td>93.04</td><td>36.13</td><td>77.97</td><td>49.87</td><td>72.92</td><td>20.50</td><td>62.85</td><td>20.40</td><td>54.95</td><td>28.68</td></tr><tr><td>τ = 70</td><td>95.05</td><td>74.26</td><td>93.85</td><td>35.43</td><td>77.20</td><td>49.61</td><td>73.49</td><td>21.66</td><td>62.14</td><td>20.57</td><td>55.54</td><td>30.14</td></tr><tr><td>τ = 100</td><td>95.05</td><td>73.08</td><td>94.29</td><td>37.32</td><td>77.14</td><td>49.31</td><td>73.65</td><td>22.83</td><td>61.46</td><td>18.82</td><td>54.60</td><td>32.71</td></tr></table>
184
+
185
+ TABLE II: Model performance and Robustness against Common Corruptions and Adversarial attacks (\%) under different temperatures with Transformer Vit-small-patch16-224. -C in the table represents the corresponding Common Corruptions and Perturbations set.
186
+
187
+ <table><tr><td rowspan="2">Temp.</td><td colspan="4">CIFAR10</td><td colspan="4">CIFAR100</td></tr><tr><td>Clean</td><td>-C</td><td>PGD20</td><td>C&amp;W</td><td>Clean</td><td>-C</td><td>PGD20</td><td>C&amp;W</td></tr><tr><td>τ = 0.1</td><td>98.45</td><td>92.83</td><td>0</td><td>26.13</td><td>89.79</td><td>74.7</td><td>0</td><td>23.71</td></tr><tr><td>τ = 0.5</td><td>98.33</td><td>91.60</td><td>0</td><td>26.26</td><td>90.53</td><td>74.9</td><td>0</td><td>29.25</td></tr><tr><td>τ = 1</td><td>98.29</td><td>92.21</td><td>0</td><td>31.69</td><td>90.78</td><td>75.5</td><td>0</td><td>31.97</td></tr><tr><td>τ = 10</td><td>98.06</td><td>92.19</td><td>89.07</td><td>31.89</td><td>89.94</td><td>75.5</td><td>58.71</td><td>34.96</td></tr><tr><td>τ = 30</td><td>98.23</td><td>91.72</td><td>97.10</td><td>38.21</td><td>89.52</td><td>74.6</td><td>86.25</td><td>36.07</td></tr><tr><td>τ = 50</td><td>98.22</td><td>91.43</td><td>97.75</td><td>39.52</td><td>89.28</td><td>73.8</td><td>87.29</td><td>33.64</td></tr><tr><td>τ = 70</td><td>98.03</td><td>91.20</td><td>97.72</td><td>39.02</td><td>89.48</td><td>74.2</td><td>87.96</td><td>33.81</td></tr><tr><td>τ = 100</td><td>98.07</td><td>91.56</td><td>97.87</td><td>38.26</td><td>89.13</td><td>73.47</td><td>86.99</td><td>31.84</td></tr></table>
188
+
189
+ Clustering is a crucial metric when measuring how an encoder performs. In classification, a good encoder should be able to gather samples from the same class while separating clusters of different classes. Fig. 2 and Fig. 3 present 2D TSNE visualization of the CIFAR10 sample distribution by ResNet50 and transformer. We observe a similar trend: low temperatures lead to more mixed clusters, while models trained with elevated temperatures have better cluster effects. These empirical observations also explain the improved classification performance on clean and non-adversarial perturbations, as well as stronger adversarial robustness, with high temperature in Table I and Table II.
190
+
191
+ # C. Training Convergence
192
+
193
+ We then conduct experiments observing the training process when applying different temperatures to the model. We validate the model on the test set every epoch and record the error
194
+
195
+ ![](images/03d90842b10c7e507daf509f3fcf7ee986b82be6e427fbf34aa980baad0a33f1.jpg)
196
+ (a) Learning Rate $= 0.1$
197
+
198
+ ![](images/249c61489b25f3bce8a170558fd106a28d6926db159a6b48625db05002100fab.jpg)
199
+ (b) Learning Rate $= 0.01$
200
+ Fig. 4: Test error number during training. The red line represents $\tau = 0.5$ , the green line represents $\tau = 1$ , and the orange line represents $\tau = 50$ . The model used is Resnet50 and is tested on CIFAR10. SGD optimizer is used during training with the learning rate set to 0.1 (a) and 0.01 (b). The shade areas consist of 6 total runs with different random seeds. The solid lines indicate the mean value across all runs.
201
+
202
+ probability. As our results shown in Fig. 4(a), we can clearly observe that not only does the training convergence speed increase as the temperature goes up, but models trained with higher temperatures also tend to converge to lower points, leading to better final performance. In fact, when we further decrease the temperature to around 0.1, the model would have a substantial risk of not converging at all. While this might appear contrary to the common understanding that focusing on hard classes will generally benefit the model, a more nuanced explanation is provided by delving further into the gradient analysis provided in Section 4.
203
+
204
+ From Eqn. 4, 5, 6, we observe that if the logit of the target class is not the largest, its gradient will increase dramatically with low temperatures. This is potentially bad for models being known to converge inefficiently under large learning rates. One straightforward solution would be lowering the learning rate as shown in Fig. 4. While the training converging speeds are closer, the run with a higher temperature can still reach a better performance. Furthermore, regardless of the increase in overall training converging speed for $\tau = 0.5$ and $\tau = 1$ runs
205
+
206
+ ![](images/a5cfef4ece1f81efc298067a1679ce52746aefbcfa6b59487306c80683c6c658.jpg)
207
+ (a) $\tau = 0.5$
208
+
209
+ ![](images/84a9e91addc67644dafe6ad2c8c05a2dcc733e10ec9df01d405a59390d95b147.jpg)
210
+ (b) $\tau = 1$
211
+ Fig. 5: The logit changes before and after PGD20 attack. The blue lines stand for the logits of the samples before PGD attack, and the orange lines stand for the logits of the samples after PGD attack.
212
+
213
+ ![](images/61e440f8f1366c34f6650411084a2209893244218f8b6765a79b9acb9987d8df.jpg)
214
+ (c) $\tau = 50$
215
+
216
+ ![](images/b5249e5c6609f055aea0cbd553167c61776ac3b14bdb9e64f95793d89b70a5c7.jpg)
217
+ (d) $\tau = 100$
218
+
219
+ when lowering the learning rate, the final performances for all three runs actually get worse than runs with 0.1 learning rate. Therefore, this phenomenon cannot be attributed solely to a high learning rate. However, if we shift our perspective to the overall direction for optimization as done in Eqn. 9, it becomes clear that during the early stage of training, the encoder $f$ has not converged to an ideal point, leading to sub-optimal values produced for certain update directions. If this happens to be the direction of the target class and the error-prone class which models with small temperatures tend to focus on, the model training can be impacted harmfully. In the meantime, high temperatures equalize the weight given to all the classes and ensure the update is not terribly wrong even if a few $\partial L_{ce}(x) / \partial w_j$ are in the wrong direction. Upon reaching this conclusion, we are surprised to find that this reasoning and our empirical observations align perfectly with the curriculum learning philosophy, that starting from hard samples may harm model optimization and learning outcomes.
220
+
221
+ # D. Adversarial Robustness
222
+
223
+ Table I and Table II show that models trained with elevated temperatures have strong adversarial robustness. TSNE plots in Fig. 2 and Fig. 3 also support this observation. This prompts questions regarding the mechanism behind the gained robustness. In this section, our focus is on investigating the model's behavior under adversarial attacks and understanding why the model demonstrates such robustness.
224
+
225
+ Gradient analysis for adversarial generation. In order to discern the source of model robustness, we follow the work in [9] and study the gradient of the classification loss with respect to the input to analyze the direction of the PGD attack, which can be written as
226
+
227
+ $$
228
+ \begin{array}{l} \frac {\partial L _ {c e}}{\partial x} = \left[ \left(\mathbb {S} \left(w _ {i} ^ {T} \cdot f (x)\right) - 1\right) \cdot w _ {i} ^ {T} + \right. \\ \left. \sum_ {j \neq i} w _ {j} ^ {T} \cdot \mathbb {S} \left(w _ {j} ^ {T} \cdot f (x)\right) \right] \cdot \frac {\partial f (x)}{\partial x} \tag {10} \\ \end{array}
229
+ $$
230
+
231
+ As illustrated above, given a well-trained model, for most inputs where $\mathbb{S}(w_i^T\cdot f(x))\approx 1$ , the gradient does not have a noticeable portion in target class $w_{i}$ on the early stage of the attack. This implies that rather than directly 'stepping away' from the target class, the attack will initially focus on approaching other class prototypes. Moreover, the second term,
232
+
233
+ $\sum_{j \neq i} w_j^T \cdot \mathbb{S}(w_j^T \cdot f(x))$ , indicates that all the other directions are weighted by their according probabilities. Therefore, untargeted attacks are actually targeted toward the error-prone class, which most commonly is the largest probability class other than the target class. However, if a model lacks an error-prone class given an input, all $w_k$ will be weighted equally. Consequently, the gradient would point toward all negative class prototypes, making it exceptionally challenging to determine the optimal direction. We noticed that such a scenario occurs when a model is trained with a small $\tau$ . Then let's focus on the gradient update strength. For a data sample $x$ is classified correctly, $\mathbb{S}(w_j^T \cdot f(x))$ would be small when the model training temperature $\tau$ increases. That is, when a model is trained with high temperatures, not only the gradient direction to generate adversarial samples is not clear, but the gradient strength is also small. Both factors contribute to the robustness of the model when optimized with elevated temperatures.
234
+
235
+ Raw Logit Analysis. With the insight from the gradient analysis on adversarial attack, we then turn to observe the logit output around the adversarial attack, as shown in Fig. 5. Each bar represents the logit value for each class, blue bars stand for the logit outputs of clean samples and orange bars are the logit outputs from adversarial samples. Models share similar characteristics in low temperatures, Fig. 5(a,b), with the logit of the target class going down while the logit of the error-prone class going up. However, for models trained with large temperatures, Fig. 5(c,d), two logits are nearly identical with a minimal amount of changes. This contrasts the robustness gains during adversarial training, where the model learns the pattern of the adversarial noise.
236
+
237
+ Class Prototypes Analysis. To further analyze the model behavior, we investigate the relation between the encoded feature, $f(x)$ , and each class prototype, $w_{j}$ . Here, we observe the Euclidean distance and cosine similarity. Fig. 6 shows Euclidean distance and cosine similarity between one sample and all class prototypes. It is evident that as the training temperature goes up, the feature $f(x)$ tends to have an identical distance to all negative class prototypes. This indicates the model trained with high temperature is less likely to have an error-prone class, which is essential for untargeted attacks as we discuss above.
238
+
239
+ Furthermore, to illustrate that the phenomenon shown in Fig. 6 is not limited to one or a few samples, we calculate
240
+
241
+ ![](images/aa2c1d75ea475f7ed7fe57d09e98d5cf7a44778d68ed8cf71cd6f963ded1c788.jpg)
242
+ (a) $\tau = 0.5$
243
+
244
+ ![](images/3629fd12844946645ea809756339577fdd62db320db2569a63a27d59b1b2ccb3.jpg)
245
+ (b) $\tau = 1$
246
+
247
+ ![](images/985add571187b528631bc450f92f52c3d27d5621f5a2e3c77dcc49a940185bb9.jpg)
248
+ (c) $\tau = 50$
249
+
250
+ ![](images/9409bab48ed08788093e934a71f7064a04d3817c6578a689876c69f64f73df20.jpg)
251
+ (d) $\tau = 100$
252
+
253
+ ![](images/1dd3973f76d197569c2a1c67ac82cec2c40bf762712e9dd5f72e6845341c60a5.jpg)
254
+ Fig. 6: A demonstration of the Euclidean distance and cosine similarity between the encoded sample $f(x)$ and all class prototypes for one sample, with different temperature configurations. The red lines indicate the Euclidean distance while the blue lines stand for cosine similarity.
255
+ (a) Euclidean Distance
256
+ Fig. 7: Box plot of the variance of the Euclidean distance and cosine similarity calculated from each sample. The variances are calculated across all negative class prototypes, therefore, lower variance indicates a more uniform distribution of all negative class distances. Each box is a model trained with a different temperature, the green line shows the median value across all variances and the orange line is the mean value of all variances.
257
+
258
+ ![](images/3d7d168852e7eb4697cf52bb8231a1d61f6283afd5ad7a413c88cc34d324149d.jpg)
259
+ (b) Cosine Similarity
260
+
261
+ the variance of Euclidean distance and cosine similarity of all negative class prototypes across all samples in CIFAR10 test set. Note that as illustrated in Fig. 6, different models have very different ranges for Euclidean distance between encoded feature and class prototypes. Therefore, we map the value of different models into the same range to make a more direct comparison. Box plots are drawn in Fig. 7 showing the overall variance results with each box being a model trained with a different temperature. We can observe a clear trend that when the temperature rises, the variance for both Euclidean distance and cosine similarity drops indicating the encoded sample, $f(x)$ , has a more similar distance to all negative class prototypes. One might notice an increase in variance when the temperature reaches some threshold. We label them as extreme temperatures, which are so large that they can adversely affect the model's convergence.
262
+
263
+ # E. Further Discussion on Adversarial Robustness
264
+
265
+ Despite the model trained with high temperatures showing superb robustness against untargeted PGD attack due to its nature attribute that discovers the weakness of PGD attack, it does not hold robustness against targeted attacks. The reason behind this is straightforward. In targeted attacks, Eqn. 10 no longer holds, and the gradient is not obligated to move
266
+
267
+ TABLE III: Preliminary experiments of adversarial training on CIFAR-10 with temperature control. The training scheme uses [18] and the model is ResNet50.
268
+
269
+ <table><tr><td>Temp.</td><td>τ = 0.5</td><td>τ = 1</td><td>τ = 10</td><td>τ = 30</td><td>τ = 50</td><td>τ = 70</td><td>τ = 100</td></tr><tr><td>Clean</td><td>88.98</td><td>85.67</td><td>81.71</td><td>82.62</td><td>83.75</td><td>84.28</td><td>84.27</td></tr><tr><td>PGD20</td><td>35.93</td><td>42.63</td><td>40.95</td><td>44.96</td><td>48.61</td><td>49.16</td><td>48.53</td></tr></table>
270
+
271
+ towards all negative class prototypes with a weighted step size. Therefore, with the only source of the model robustness gained eliminated, it is naturally vulnerable to targeted attacks.
272
+
273
+ Remark: Even though many attacks claim themselves to be untargeted attacks, they actually optimize toward one self-selected target, which we do not consider untargeted attacks under this setting. One popular example is the Difference of Logits Ratio(DLR) attack proposed by [4]. Regardless of its ability to rescale the logit,
274
+
275
+ $$
276
+ \mathrm {D L R} (x, y) = - \frac {z _ {\mathrm {y}} - \max z _ {i}}{z _ {\pi 1} - z _ {\pi 3}} \tag {11}
277
+ $$
278
+
279
+ shows that the DLR loss automatically selects the class holding the largest logit other than the target class as the attack target. Therefore, during optimization, it does not need to optimize toward all negative class prototypes. A similar example also includes FAB attack [3].
280
+
281
+ # F. Extended Experiment on Adversarial Training
282
+
283
+ Given that our temperature control method is used inside the Cross-Entropy Loss, it is possible to apply this method in adversarial training. Here, we do preliminary experiments on the adversarial training baseline proposed by [18] for the simplicity of its loss function. We add temperature control inside vanilla loss term forming
284
+
285
+ $$
286
+ L _ {A T} (x, x _ {a d v}, y, F) = L _ {c e} (F (x) / \tau , y) + L _ {c e} (F (x _ {a d v}), y), \tag {12}
287
+ $$
288
+
289
+ where $F$ is a combination of encoder and class prototypes.
290
+
291
+ Our preliminary results are listed in Table III. We can clearly observe that model robustness increases as the temperature increases with a slight trade-off with clean accuracy, which
292
+
293
+ confirms the possibility of combining the temperature control method with adversarial training. While further extension to other adversarial training methods is possible, it remains a complex problem for most adversarial training involves complex loss functions that may introduce terms other than the Cross-Entropy function. Also, balancing the vanilla loss term and adversarial loss term largely relies on empirical experiments. Therefore, further exploration of fitting this into other adversarial training methods falls beyond the scope of this paper.
294
+
295
+ # VI. CONCLUSION & LIMITATION
296
+
297
+ In this paper, we investigate the under-explored property of temperature scaling with the softmax function on image classification tasks. By performing gradient analysis with the Cross-Entropy classification loss and executing different empirical experiments, we show that temperature scaling can be a significant factor in model performance. Further experiments reveal applying high temperatures during training introduces enormous robustness against gradient-based untargeted adversarial attacks. We hope our work raises the interest of other researchers to utilize the simple temperature scaling in the common Cross-Entropy loss.
298
+
299
+ One limitation of this study was that we didn't report an explicit algorithm to set the best temperature values. We will work on this in our future work. One takehome note, as a hyperparameter, the tuning cost of the temperature is low as a wide range of temperatures (30 to 70) can provide improvements to the model.
300
+
301
+ # REFERENCES
302
+
303
+ [1] Agarwala, A., Pennington, J., Dauphin, Y.N., Schoenholz, S.S.: Temperature check: theory and practice for training models with softmax-cross-entropy losses. CoRR abs/2010.07344 (2020)
304
+ [2] Carlini, N., Wagner, D.: Towards evaluating the robustness of neural networks. In: 2017 IEEE symposium on security and privacy (sp). pp. 39-57. IEEE (2017)
305
+ [3] Croce, F., Hein, M.: Minimally distorted adversarial examples with a fast adaptive boundary attack. In: III, H.D., Singh, A. (eds.) Proceedings of the 37th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 119, pp. 2196-2205. PMLR (13-18 Jul 2020)
306
+ [4] Croce, F., Hein, M.: Reliable evaluation of adversarial robustness with an ensemble of diverse parameter-free attacks. In: III, H.D., Singh, A. (eds.) Proceedings of the 37th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 119, pp. 2206-2216. PMLR (13-18 Jul 2020)
307
+ [5] Devlin, J., Chang, M., Lee, K., Toutanova, K.: BERT: pre-training of deep bidirectional transformers for language understanding. CoRR abs/1810.04805 (2018)
308
+ [6] Engstrom, L., Ilyas, A., Athalye, A.: Evaluating and understanding the robustness of adversarial logit pairing. arXiv preprint arXiv:1807.10272 (2018)
309
+ [7] Guo, C., Pleiss, G., Sun, Y., Weinberger, K.Q.: On calibration of modern neural networks. In: Precup, D., Teh, Y.W. (eds.) Proceedings of the 34th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 70, pp. 1321-1330. PMLR (06-11 Aug 2017)
310
+
311
+ [8] Hinton, G., Vinyals, O., Dean, J.: Distilling the knowledge in a neural network (2015)
312
+ [9] Hou, P., Han, J., Li, X.: Improving adversarial robustness with self-paced hard-class pair reweighting. In: Proceedings of the Thirty-Seventh AAAI Conference on Artificial Intelligence (2023)
313
+ [10] Kanai, S., Yamada, M., Yamaguchi, S., Takahashi, H., Ida, Y.: Constraining logits by bounded function for adversarial robustness. In: 2021 International Joint Conference on Neural Networks (IJCNN). pp. 1-8. IEEE (2021)
314
+ [11] Krizhevsky, A., Hinton, G., et al.: Learning multiple layers of features from tiny images (2009)
315
+ [12] Krizhevsky, A., Sutskever, I., Hinton, G.E.: Imagenet classification with deep convolutional neural networks. In: Pereira, F., Burges, C., Bottou, L., Weinberger, K. (eds.) Advances in Neural Information Processing Systems. vol. 25. Curran Associates, Inc. (2012)
316
+ [13] Kull, M., Perello Nieto, M., Kangsepp, M., Silva Filho, T., Song, H., Flach, P.: Beyond temperature scaling: Obtaining well-calibrated multiclass probabilities with dirichlet calibration. In: Wallach, H., Larochelle, H., Beygelzimer, A., d'Alché-Buc, F., Fox, E., Garnett, R. (eds.) Advances in Neural Information Processing Systems. vol. 32. Curran Associates, Inc. (2019)
317
+ [14] Kumar, A., Sarawagi, S., Jain, U.: Trainable calibration measures for neural networks from kernel mean embeddings. In: Dy, J., Krause, A. (eds.) Proceedings of the 35th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 80, pp. 2805-2814. PMLR (10-15 Jul 2018)
318
+ [15] Lakshminarayanan, B., Pritzel, A., Blundell, C.: Simple and scalable predictive uncertainty estimation using deep ensembles. In: Guyon, I., Luxburg, U.V., Bengio, S., Wallach, H., Fergus, R., Vishwanathan, S., Garnett, R. (eds.) Advances in Neural Information Processing Systems. vol. 30. Curran Associates, Inc. (2017)
319
+ [16] Le, Y., Yang, X.: Tiny imagenet visual recognition challenge. CS 231N 7(7), 3 (2015)
320
+ [17] Van der Maaten, L., Hinton, G.: Visualizing data using t-sne. Journal of machine learning research 9(11) (2008)
321
+ [18] Madry, A., Makelov, A., Schmidt, L., Tsipras, D., Vladu, A.: Towards deep learning models resistant to adversarial attacks. In: International Conference on Learning Representations (2018)
322
+ [19] Minderer, M., Djolonga, J., Romijnders, R., Hubis, F., Zhai, X., Houlsby, N., Tran, D., Lucic, M.: Revisiting the calibration of modern neural networks. In: Ranzato, M., Beygelzimer, A., Dauphin, Y., Liang, P., Vaughan, J.W. (eds.) Advances in Neural Information Processing Systems. vol. 34, pp. 15682-15694. Curran Associates, Inc. (2021)
323
+ [20] van den Oord, A., Li, Y., Vinyals, O.: Representation learning with contrastive predictive coding. CoRR abs/1807.03748 (2018)
324
+ [21] Pereyra, G., Tucker, G., Chorowski, J., Kaiser, L., Hinton, G.: Regularizing neural networks by penalizing confident output distributions (2017), https://openreview.net/forum?id=HkCjNI5ex
325
+ [22] Prach, B., Lampert, C.H.: Almost-orthogonal layers for efficient general-purpose lipschitz networks. In: European Conference on Computer Vision. pp. 350–365. Springer (2022)
326
+ [23] Shafahi, A., Ghiasi, A., Huang, F., Goldstein, T.: Label smoothing and logit squeezing: A replacement for adversarial training? (2019)
327
+ [24] Wang, F., Liu, H.: Understanding the behaviour of contrastive loss. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 2495-2504 (June 2021)
328
+ [25] Wang, T., Isola, P.: Understanding contrastive representation learning through alignment and uniformity on the hypersphere. In: III, H.D., Singh, A. (eds.) Proceedings of the 37th International Conference on Machine Learning. Proceedings of Machine Learning Research, vol. 119, pp. 9929-9939. PMLR (13-18 Jul 2020)
329
+
330
+ [26] Wu, Z., Xiong, Y., Yu, S.X., Lin, D.: Unsupervised feature learning via non-parametric instance discrimination. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (June 2018)
331
+ [27] Zhao, H., Qi, X., Shen, X., Shi, J., Jia, J.: Icnet for real-time semantic segmentation on high-resolution images. In: Proceedings of the European Conference on Computer Vision (ECCV) (September 2018)
data/2025/2502_20xxx/2502.20604/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2a40f563f2240a33af52faa9553464f74600545a6f5ee11eb0d993b57e6be69d
3
+ size 451743
data/2025/2502_20xxx/2502.20604/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20639/6f65e6c2-2b42-47a6-80fd-0c91d9b82f41_content_list.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20639/6f65e6c2-2b42-47a6-80fd-0c91d9b82f41_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20639/6f65e6c2-2b42-47a6-80fd-0c91d9b82f41_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:639570cd63e380266acd60674e134b60e0b6d74b3a978fe9ff3afade3aedf1f1
3
+ size 2119387
data/2025/2502_20xxx/2502.20639/full.md ADDED
@@ -0,0 +1,569 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FedConv: A Learning-on-Model Paradigm for Heterogeneous Federated Clients
2
+
3
+ Leming Shen $^{1}$ , Qiang Yang $^{1,2}$ , Kaiyan Cui $^{1,3}$ , Yuanqing Zheng $^{1}$
4
+
5
+ Xiao-Yong Wei $^{4,1}$ , Jianwei Liu $^{5}$ , Jinsong Han $^{5}$
6
+
7
+ <sup>1</sup>The Hong Kong Polytechnic University, <sup>2</sup>University of Cambridge
8
+
9
+ <sup>3</sup>Nanjing University of Posts and Telecommunications, <sup>4</sup>Sichuan University, <sup>5</sup>Zhejiang University
10
+
11
+ {leming.shen,qiang.yang,kaiyan.cui}@connect.polyu.hk,csyqzheng@comp.polyu.edu.hk,
12
+
13
+ cswei@scu.edu.cn, {jianweiliu,hanjinsong}@zju.edu.cn
14
+
15
+ # ABSTRACT
16
+
17
+ Federated Learning (FL) facilitates collaborative training of a shared global model without exposing clients' private data. In practical FL systems, clients (e.g., edge servers, smartphones, and wearables) typically have disparate system resources. Conventional FL, however, adopts a one-size-fits-all solution, where a homogeneous large global model is transmitted to and trained on each client, resulting in an overwhelming workload for less capable clients and starvation for other clients. To address this issue, we propose FedConv, a client-friendly FL framework, which minimizes the computation and memory burden on resource-constrained clients by providing heterogeneous customized sub-models. FedConv features a novel learning-on-model paradigm that learns the parameters of the heterogeneous sub-models via convolutional compression. Unlike traditional compression methods, the compressed models in FedConv can be directly trained on clients without decompression. To aggregate the heterogeneous sub-models, we propose transposed convolutional dilation to convert them back to large models with a unified size while retaining personalized information from clients. The compression and dilation processes, transparent to clients, are optimized on the server leveraging a small public dataset. Extensive experiments on six datasets demonstrate that FedConv outperforms state-of-the-art FL systems in terms of model accuracy (by more than $35\%$ on average), computation and communication overhead (with $33\%$ and $25\%$ reduction, respectively).
18
+
19
+ # CCS CONCEPTS
20
+
21
+ - Human-centered computing $\rightarrow$ Ubiquitous and mobile computing; - Computing methodologies $\rightarrow$ Learning paradigms.
22
+
23
+ # KEYWORDS
24
+
25
+ Federated learning, Model heterogeneity, Model compression
26
+
27
+ # ACM Reference Format:
28
+
29
+ Leming Shen $^{1}$ , Qiang Yang $^{1,2}$ , Kaiyan Cui $^{1,3}$ , Yuanqing Zheng $^{1}$ , Xiao-Yong Wei $^{4,1}$ , Jianwei Liu $^{5}$ , Jinsong Han $^{5}$ . 2024. FedConv: A Learning-on-Model
30
+
31
+ Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than ACM must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
32
+
33
+ MOBISYS '24, June 3-7, 2024, Minato-ku, Tokyo, Japan
34
+
35
+ © 2024 Association for Computing Machinery.
36
+
37
+ ACM ISBN 979-8-4007-0581-6/24/06...$15.00
38
+
39
+ https://doi.org/10.1145/3643832.3661880
40
+
41
+ Paradigm for Heterogeneous Federated Clients. In The 22nd Annual International Conference on Mobile Systems, Applications and Services (MOBISYS '24), June 3-7, 2024, Minato-ku, Tokyo, Japan. ACM, New York, NY, USA, 14 pages. https://doi.org/10.1145/3643832.3661880
42
+
43
+ # 1 INTRODUCTION
44
+
45
+ Federated Learning (FL) allows mobile devices to collaboratively train a shared global model without exposing their private data [8-10, 80, 81]. In each communication round, clients keep their private data locally and only upload their model parameters or gradients to a server after local training. The server then orchestrates model aggregation and updates the global model for the next round [53].
46
+
47
+ In real-world deployments, federated clients typically have diverse system resources, calling for heterogeneous models with different sizes [29, 38, 86]. As shown in Fig. 1, high-end PCs can support large models, while wearables cannot. Simply assigning the smallest affordable model to all clients results in resource underutilization and sub-optimal performance.
48
+
49
+ Previous solutions that generate heterogeneous models mainly include knowledge distillation (KD) [46, 48], parameter sharing [22], and parameter pruning [20, 42]. KD distills the knowledge from heterogeneous client models to a global model for aggregation. Nonetheless, it imposes additional compute overhead on clients [54] as they must first train on public data and then transfer knowledge via private data. Parameter-sharing strategies distribute different regions of a global model as sub-models to different clients. However, some sub-models can only be trained on a small portion of the dataset. Parameter pruning methods utilize channel or filter level pruning to generate sparse sub-models. However, they suffer from information loss due to the removal of entire channels or filters ( $\S$ 2.2). Moreover, to determine the pruning structure, clients need to receive the large global model from the server and then perform the pruning operation locally, increasing the overhead of clients.
50
+
51
+ Ideally, the heterogeneous sub-models should retain the information of the global model in a way that they can be efficiently sent to and trained on resource-constrained clients without any extra overhead. To this end, we propose FedConv, a client-friendly FL framework for heterogeneous models based on a new learning-on-model paradigm. The key insight is that convolution, a technique to extract effective features from data, can also compress large models via various receptive fields while preserving crucial information. In FedConv, the server performs convolutional compression on the global model to learn parameters of diverse sub-models according to clients' resource budgets. Clients directly train on the compressed sub-models as in traditional FL without model decompression. In model aggregation, the server first uses transposed convolution
52
+
53
+ ![](images/fe0a3bb4464febb5becfd6eb74fb953eb894d130741ba63f5c7959d0dee1176b.jpg)
54
+ Figure 1: Heterogeneous models in federated learning.
55
+
56
+ (TC) to transform heterogeneous client models into large models that have the same size as the global model. Then, the server assigns different learned weight vectors to these dilated models and aggregates them. FedConv optimizes the model compression, dilation, and aggregation processes by leveraging a small dataset on the server that can be obtained via crowdsourcing, or voluntarily shared by users without compromising their privacy. Therefore, our system does not incur extra communication or computation overhead for resource-constrained clients.
57
+
58
+ To deliver a practical system, we address three key technical challenges: 1) How to learn the parameters of heterogeneous submodels via convolution while retaining the global model's prediction capability? To tackle this problem, we formulate the compression process as a training task. By iteratively fine-tuning the convolution operations, heterogeneous sub-models can be learned effectively and achieve a performance comparable to that of the global model. 2) How to preserve clients' personalized information after converting their models to a unified size for aggregation? We apply separate TC operations on each client's model parameters and learn a set of dilated models, which inherit their personalized information. We add a residual connection to further enhance the transfer of personalized information from client models to dilated models. 3) How to aggregate these dilated models with imbalanced contributions of heterogeneous federated clients? As client models are trained on the non-independent and identically distributed (non-IID) personalized data, directly averaging [53] these large models would lead to performance degradation. To tackle this issue, we set different learnable weight vectors for the dilated models. Through a tuning process, the server can learn the relative importance of each model and orchestrate the final aggregation.
59
+
60
+ We implement FedConv<sup>1</sup> based on a user-friendly FL framework (Flower [5]) with two representative FL tasks (image classification and human activity recognition). We evaluate FedConv on six public datasets and compare its performance with eight baselines. The experiments show that FedConv outperforms the SOTA in terms of inference accuracy (by more than $35\%$ on average), memory, and communication cost (with $21\%$ and $25\%$ reduction, respectively). Besides, FedConv substantially reduces the computation overhead for federated clients and saves the total training time.
61
+
62
+ In summary, we make the following key contributions:
63
+
64
+ - To our knowledge, FedConv is the first model compression method based on convolution operations. This paradigm can not only compress the global model effectively, but also
65
+
66
+ preserve its crucial information, without imposing extra burden on resource-constrained mobile clients.
67
+
68
+ - FedConv handles heterogeneous models with new technologies. Specifically, we propose a convolutional compression module to compress the global model and generate heterogeneous sub-models via our learning-on-model paradigm. We design a transposed convolutional dilation method to obtain models with uniform sizes and use weighted average aggregation to balance clients' contributions for final aggregation.
69
+ - We evaluate FedConv based on Flower and conduct comprehensive evaluations with heterogeneous mobile devices. The results demonstrate the superior performance of FedConv in terms of both inference accuracy and resource efficiency.
70
+
71
+ # 2 MOTIVATION
72
+
73
+ In this section, we underscore the necessity of model heterogeneity-aware FL systems and analyze SOTA works (parameter sharing and model pruning) to motivate our work. Knowledge distillation-based methods incur heavy overhead on clients (\$ 6.3), which is not suitable for resource-constrained mobile devices.
74
+
75
+ # 2.1 Necessity of Heterogeneous Models
76
+
77
+ In a conventional FL system, all clients typically share the same model architecture. In practice, however, different clients have diverse computation and communication resources. For example, high-end edge PCs usually have more resources, while low-cost embedded systems have much constrained resources. Therefore, the size of a global model is typically upper-bounded by the clients with the least system resources in conventional FL. Such a one-size-fits-all solution often leads to sub-optimal performance. Moreover, clients with more resources suffer from starvation [50, 63] when waiting for weaker clients in synchronized FL [45, 62, 67]. To make full use of more powerful clients while accommodating those with limited resources, it is necessary to develop an FL system that supports heterogeneous models with varied parameter sizes that best fit all clients with diverse resources.
78
+
79
+ # 2.2 Limitations of Existing Solutions
80
+
81
+ Imbalance problem in parameter sharing. HeteroFL [22] is a representative parameter sharing scheme where clients share different regions of the global model. As shown in Fig. 2(a), the shared portions (the overlapped part across different sizes of models) are fixed, and the parameters are aggregated only from clients that hold them, missing the information from other clients. To showcase its impact, we train a ResNet18 model [31] on the CIFAR10 dataset [40] with 100 global rounds. We find that smaller models outperform larger models (Fig. 2(b)) due to their exposure to a larger volume of data held by more clients. Besides, the global model exhibits instability and even performs worse than the large model due to unbalanced aggregation. Thus, this scheme will lead to imbalanced performance among clients and unexpected performance degradation [4, 82]. FedRolex [4] proposes a dynamic sharing scheme to tackle the imbalance issue. It enables sub-models to share different parts of the global model's parameters via multiple rolling windows, ensuring that the aggregated parameters are evenly trained on all client-side datasets. However, since different clients contribute distinct parts, the aggregated parameters comprise mixed windows
82
+
83
+ ![](images/40f7b7d96776f376587deef64e92dab93ba41e3354ad4d8fb66bb49540db11ec.jpg)
84
+ (a) The parameter sharing scheme
85
+
86
+ ![](images/2ffabb80bab737cdbe98a1b428e43f97caa63d33f7f54782f9c91031384ff628.jpg)
87
+ (b) Performance difference
88
+
89
+ ![](images/33301a21731b2b4324c841d5d9dc22c58f1fd9341c0cd34fe4481778148549ec.jpg)
90
+ (c) Two pruning schemes
91
+
92
+ ![](images/55f334e4c3ce856ae891efa2f832b8ff4edb03195cd11393cf8211b5a1416ec8.jpg)
93
+ (d) Information loss
94
+
95
+ ![](images/6e8a8fee015290e2e7fdcb41accccc97d486b621beb394692ae788295508bdc5.jpg)
96
+ Figure 2: The parameter sharing and pruning scheme with limitations (the pruned part is colored blue in (c)).
97
+ Figure 3: Convolutional compression process.
98
+
99
+ from the diverse sub-models. As a result, the distribution of the global model's parameters is distorted and thus cannot effectively extract useful features from the input data [70], leading to degraded performance and a longer convergence time.
100
+
101
+ Information loss and client workload in model pruning. As shown in Fig. 2(c), model pruning can be categorized into channel-level and filter-level pruning. Channel-level pruning removes some input channels from the model parameters, where the corresponding channels of the input data are also excluded from the training process. Filter-level pruning prunes out some output channels (filters), resulting in fewer output feature maps. Consequently, these two schemes suffer from information loss as they not only discard some input data channels or feature maps but also remove certain weights or connections in model parameters [7]. To study the information loss, we apply channel-level and filter-level pruning to the pre-trained ResNet18 model based on the parameter magnitude ranking. We measure the mutual information (MI) $\mathrm{I}(X,Z)$ , which quantifies the amount of information that can be inferred from $X$ after observing $Z$ [13]. We find that the MI between the parameters of the ResNet18 model and itself is 3.29, whereas the MI between the parameters of the pre-trained model and the channel-pruned model is reduced to 2.56, with an accuracy drop from $84.04\%$ to $73.36\%$ . Similarly, the accuracy of the filter-pruned model drops to $75.64\%$ while MI is 2.68. This indicates information loss due to pruning. Moreover, existing pruning-based methods typically require the server to transmit all the parameters of a global model to clients, and perform model pruning at resource-constrained clients, which incurs high communication and computation overhead for clients.
102
+
103
+ # 2.3 Model Compression via Convolution
104
+
105
+ Ideally, a compression method should minimize the information loss of model parameters to retain the performance after compression, without posing extra computation or communication burden on resource-constrained clients. To this end, we propose a novel
106
+
107
+ convolutional compression technique that applies convolution operations on the global model parameters to generate the parameters of heterogeneous sub-models while preserving crucial information of the global model (e.g., parameter distributions and patterns). Our preliminary studies find that by applying refined convolution operations via various receptive fields [84], the sub-model can inherit spatial and hierarchical parameter patterns from the global model. These receptive fields selectively determine which parameter information should be retained after convolution. Hence, the generated sub-models can also extract valuable features from the input data, similar to the features extracted by the large global model.
108
+
109
+ Fig. 3 shows the convolution-based compression process. To showcase that a compressed model generated by convolutional compression (compression layers) can effectively extract features from the input data, we compress the pre-trained model described in § 2.2 at a shrinkage ratio of 0.75. We then select the top-4 and top-3 feature maps with the highest importance outputted by a convolutional layer (measured by IG [68]) from the large model and the sub-model, respectively. As shown in Fig. 3, both the large model and the sub-model can learn and focus on the key features (e.g., the deer's body, head, and horn). Moreover, compared with the large model, the first two feature maps from the sub-model pay more attention (deeper color) to the deer's body and ears. The third feature map can be regarded as a fusion of the last two feature maps from the large model, as it focuses on both the body and head of the deer. This observation indicates that the feature extraction capability of the large model can be effectively preserved and transferred to the sub-model via convolutional compression. Besides, the accuracy of the sub-model only decreases by $0.19\%$ and the mutual information between the parameters of the large model and the sub-model is 3.09 (Fig. 2(d)), which is much higher than that of the pruned model. This indicates that our proposed convolutional compression method can effectively minimize information loss after model compression.
110
+
111
+ To optimize the compression process, same as existing knowledge distillation-based FL systems [46, 48] that need server-side data during FL training, we also maintain a small publicly available dataset on the server to fine-tune the compression process (§ 4.1). The server-side data can be collected from public datasets, or crowdsourced by volunteers who are willing to share their data. We note that same as conventional FL schemes, clients do not need to send their data to the server. By leveraging such server-side data with iterative refining of the compression process, the server can gradually gain a comprehensive global view [20] of the entire FL process and thus transfer more general information from the server to heterogeneous clients [3].
112
+
113
+ ![](images/5606a14230d43e699de3314f7cf5726a2245854346b5a59e9ad37fa0fd4c0c1f.jpg)
114
+ Figure 4: Framework architecture of FedConv.
115
+
116
+ # 3 FRAMEWORK OVERVIEW
117
+
118
+ Fig. 4 illustrates the architecture of FedConv, consisting of three main modules: convolutional compression (§ 4.1), transposed convolutional dilation (§ 4.2), and weighted average aggregation (§ 4.3).
119
+
120
+ The server first initializes a global model with an estimated memory requirement and records a set of shrinkage ratios (SR) reported by each client based on their resource profiles (①). In the first communication round, the server pre-trains the global model for several epochs with a server-side dataset to gain a better global view of the data distribution [20]. Then, based on the SRs, a set of fine-tuned convolution parameters are used to compress the global model with the convolutional compression module, and generate heterogeneous sub-models (②). Afterwards, the server sends the heterogeneous sub-models to federated clients (③). Clients then perform several epochs of local training with their private training dataset to fine-tune the received sub-models (④), and then upload the updated parameters to the server (⑤). After that, the server performs the transposed convolutional dilation, where different transposed convolution parameters are used to dilate the sub-models to a set of large models that have the same size as the global model (⑥). Finally, the server applies the weighted average aggregation to aggregate the dilated models with the learned weights(⑦).
121
+
122
+ In FedConv, the compression and dilation operations are transparent to clients and performed by the powerful server, which can be seamlessly integrated into conventional FL systems where clients only need to perform local training.
123
+
124
+ # 4 FRAMEWORK DESIGN
125
+
126
+ # 4.1 Convolutional Compression
127
+
128
+ The convolutional compression module leverages a set of convolutional layers (termed as compression layers) to compress the global model and generate heterogeneous sub-models. As shown in Fig. 3, after feeding the global model parameters to the compression layers, the compressed parameters of the sub-model become smaller and output fewer feature maps. We use the server-side data to iteratively optimize the convolution parameters (i.e., the parameters of compression layers) until the sub-models can achieve comparable performance to the global model, as they inherit the parameter information from the global model with a comprehensive perspective. Thus, they are able to extract general features and can be further updated by clients to fit their local data for personalization.
129
+
130
+ Convolution configurations. To determine the sizes of the sub-models, clients first specify their shrinkage ratios (SRs). Specifically,
131
+
132
+ the server first broadcasts the size of the global model to all the clients. Then, each client will determine an appropriate SR for the corresponding sub-model to meet its own computing resource budget $^2$ (e.g., GPU memory, network bandwidth). Subsequently, the SRs are transmitted back to the server. We note that same as conventional FL schemes, no client-side sensor data needs to be transferred to the server. Accordingly, the server determines the corresponding configurations (i.e., input channel in, output channel out, kernel size $(k_{1}, k_{2})$ , stride $s$ , and padding $p$ ) of the compression layers so that the sizes of generated sub-models match with the expected SRs. Let's take convolution layers as an example. As shown in Fig. 3, a convolutional layer in the global model has 16 input and 32 output channels with a kernel size of $(3, 3)$ . Regarding each element in the kernel as a single unit, we can reshape its parameter matrix from $(32, 16, 3, 3)$ to $9 \times (1, 32, 16)$ . Suppose the SR is 0.75, the shape of the parameter matrix becomes $9 \times (1, 24, 12)$ after compression. Therefore, we use nine separate 2D convolutional layers (i.e., compression layers) to compress the reshaped matrix. The configuration $^3$ of each compression layer is Conv $\langle in = 1, out = 1, k = (9, 5), s = 1, p = 0 \rangle$ . This convolution-based process can also be applied to compress other types of layers by properly adjusting the configurations. Note that the input channels of the first layer will not be compressed, ensuring that all channels of the raw data can be fed into the sub-model. Similarly, the output channels of the last layer are also uncompressed, ensuring that the sub-models and the global model have the same prediction task.
133
+
134
+ Convolution parameter fine-tuning. Next, we need to fine-tune the convolution parameters so that the generated sub-models inherit the parameter information from the global model and achieve comparable performance. We use the server-side data to iteratively adapt the convolution parameters by minimizing the loss between the ground truth and the prediction result of the compressed model:
135
+
136
+ $$
137
+ \begin{array}{l} \min _ {w _ {C o n v, l}} \sum_ {x} \mathcal {L} (f (x; W _ {G, l} \odot w _ {C o n v, l}), y), \tag {1} \\ s. t. \forall l \in \{1, 2, \dots , L \}, \forall (x, y) \in \mathcal {D}. \\ \end{array}
138
+ $$
139
+
140
+ where $\mathcal{L}$ is the Cross-entropy loss [75] and $f(\cdot)$ is the forward function of the compressed model. $(x,y)$ is the data and the corresponding label in the server-side data $\mathcal{D}$ . $W_{G,l}$ is the parameters of the $l$ -th layer in the global model. $\pmb{w}_{Conv,l}$ is the convolution parameters. $\odot$ is the convolution operation. To fine-tune the convolution
141
+
142
+ ![](images/3aac45cdf0744cafbd5f12575ec4ee1f1e2a130c31787e86abb63cb470130946.jpg)
143
+ Figure 5: An example of the convolution/TC process (black arrow: forward, blue arrow: backward, blue box: larger parameters, grey box: smaller parameters, orange: Conv/TC).
144
+
145
+ ![](images/cfeeac0f6a80e932aa0725f4017cdcea8a2a364057580d1c0e1e42818a02bcae.jpg)
146
+
147
+ parameters, the compressed sub-model $(W_{G,l} \odot w_{Conv,l})$ is first evaluated on the server-side data. By back-propagating the calculated loss, the convolution parameters $w_{Conv,l}$ is updated while others (i.e., the parameters of the global model and the sub-model) are frozen.
148
+
149
+ Remarks. In the model compression process, the server applies compression layers on the global model parameters and iteratively fine-tunes the convolution parameters to learn heterogeneous sub-models, aiming to preserve crucial parameter information and prediction capability from the global model. Such a learning-on-model method fundamentally differs from the traditional learning-on-data paradigm. Specifically, the learning-on-data method takes raw data as input and trains a model to extract features, while our learning-on-model paradigm takes model parameters as input and uses compression layers to generate sub-model parameters. The parameters of compression layers are fine-tuned by minimizing the loss between the sub-model outputs and the ground-truth labels.
150
+
151
+ Challenges. Nevertheless, several practical challenges emerge during this compression process. We use a pre-trained model on the MNIST [19] dataset (with an accuracy of $99.04\%$ ) as an example to show how we address these challenges and the progress we make.
152
+
153
+ (1) Information loss. After fine-tuning the convolution parameters as aforementioned, we find that the amount of the parameter information in the sub-model inferred from the global model is still low (the mutual information between the parameters of the global model and the sub-model is only 0.84). This can be attributed to the limited capability of simple compression layers to capture more fine-grained parameter information effectively, leading to a lower accuracy of the sub-model $(90.2\%)$ . To address this issue, we add two $1\times 1$ convolutional layers with biases before compression (Fig. 5(a)). Intuitively, the first Conv $1\times 1$ increases the number of output channels to 16, capturing more diverse and complex parameter information in the global model. The second Conv $1\times 1$ decreases the channel number back to one, fusing information from different channels and producing a comprehensive parameter representation. In addition, we add a residual connection between the global model parameters and the output of the second Conv $1\times 1$ , facilitating the transfer of parameter information from the global model to sub-models through convolution operations. With these designs, the accuracy of the sub-models increases to $93.15\%$ , indicating that the information loss is effectively mitigated.
154
+
155
+ (2) Imbalanced parameter distribution. As shown in Fig. 6(a), although the distributions of the parameters in sub-models and the global model are similar, the parameters in sub-model skew towards
156
+
157
+ ![](images/79f7a11341751fb1344f0fa0d7fc8d11b5d53ae51d35962a3200ad05edf29bda.jpg)
158
+ (a) Parameter distribution
159
+
160
+ ![](images/7d44660d8ad74d708c3987bececbfb04438971e68af6821207adf0f18383ab77.jpg)
161
+ (b) Modified activation function
162
+
163
+ ![](images/24aab317592813f9ab543d8db3f318e495789bef9449ba7bd16a560e838dfa12.jpg)
164
+ Figure 6: (a) After using the MLR, the distribution of the parameters in the sub-model becomes similar to the large global model (blue curve); (b) The MLR function.
165
+ (a) Compressed model's accuracy
166
+ Figure 7: (a) Accuracy of the compressed model with: 1) two Conv $1 \times 1$ , 2) the MLR function, 3) weight normalization, 4) learning rate scheduler; (b) The parameter distribution of the dilated models from different sizes of client models.
167
+
168
+ ![](images/d2540f1f57c954887a121df44dd6dc25c052e8ab649f2c9275d6993aaf4b20fa.jpg)
169
+ (b) Different parameter distribution
170
+
171
+ negative values, leading to numerical instability, slow convergence, and unexpected performance degradation [27]. Therefore, we adopt a modified Leaky ReLU (MLR) activation function (Fig. 6(b)) to rectify the negative value, where $s_n$ and $s_p$ are slopes for negative and positive values, respectively. With a small $s_n$ , the MLR can suppress the negative parameters but not entirely eliminate them, thereby preserving potential information embedded in negative parameters. After applying the MLR, the sub-model parameters exhibit a similar distribution pattern and value range to the global model (Fig. 6(a)). The accuracy of the sub-model further increases to $95.06\%$ .
172
+
173
+ (3) Performance fluctuation. During the fine-tuning process, we observe significant performance fluctuation of the sub-model. This is because in learning-on-data methods, model parameters are directly updated during training. However, in our learning-on-model method, only the convolution parameters are updated, which subsequently generate sub-model parameters via the convolution process. As a result, the performance of the sub-model exhibits much higher sensitivity to the changes in convolution parameters. To address this issue, we apply weight normalization [60] on the convolution parameters to decouple their magnitude and direction during updating, which stabilizes the convergence in a fine-grained way. Moreover, we apply a cosine annealing learning rate scheduler [35] that dynamically varies the learning rate to avoid local optima and enables faster convergence [41]. The learning rate undergoes a cosine function decay as the epoch progresses:
174
+
175
+ $$
176
+ l r = l r _ {\min } + 0. 5 \left(l r _ {\max } - l r _ {\min }\right) \left(1 + \cos \left(e / T _ {\max } \cdot \pi\right)\right) \tag {2}
177
+ $$
178
+
179
+ where $e$ is the current epoch index, $lr_{min}$ and $lr_{max}$ are the lower and upper bound of the learning rate, and $T_{max}$ is the maximum number of iterations before the $lr$ restarts to $lr_{min}$ . As shown in Fig. 7(a), after applying weight normalization and the learning rate scheduler, the accuracy of the sub-model improves to $96.8\%$ and
180
+
181
+ $98.59\%$ , respectively. Meanwhile, the performance becomes more stable, and the sub-model converges after around 20 epochs.
182
+
183
+ After fine-tuning the convolution parameters, the compressed models will have comparable performance to the global model. The server then sends the compressed parameters to the corresponding clients for local training. The fine-tuned convolution parameters will be kept on the server and updated in the next communication round. Note that this process is performed completely on the server without imposing any extra computation or communication burden on clients. The detailed process is shown in Algorithm 1.
184
+
185
+ # 4.2 Transposed Convolutional Dilation
186
+
187
+ Upon receiving the updated sub-models from clients, we need to rescale the heterogeneous client models to a unified size for further aggregation. Although knowledge distillation-based methods [46, 48] are promising, they impose significant computational and communication overhead on clients (§ 1). Instead, we use transposed convolution (TC) layers on the server side, a reverse operation to the convolution compression. In contrast, we apply different TC layers to each of the received client models, as they are trained on non-IID data with different sensing heterogeneity and thereby inherently fin-tuning the TC parameters (i.e., parameters of the TC layers), the personalized information embedded in each client model's parameters will be preserved and transferred to the dilated models for subsequent aggregation.
188
+
189
+ TC configurations. To transform the heterogeneous models from different clients to a unified size, it is important to ensure that the configurations of each TC layer for dilation are identical to the corresponding compression layer. For instance, as illustrated in Fig. 5(b), a convolutional layer in a client model has 12 input and 24 output channels with a kernel size of (3, 3). With the SR as 0.75, the configurations of the TC layer should be TC $\langle \mathbf{in} = 1$ , $\mathbf{out} = 1$ , $\mathbf{k} = (9, 5)$ , $s = 1$ , $\mathbf{p} = 0) \rangle$ . Similarly, this process can also be employed to dilate other kinds of network layers. Note that the input channel number of the first layer and the output channel number of the last layer in all client models are also unchanged.
190
+
191
+ TC parameter fine-tuning. To fine-tune the TC parameters, we also set them as learnable variables and minimize the loss between the ground truth and the prediction result of the dilated large model:
192
+
193
+ $$
194
+ \min _ {\boldsymbol {w} _ {T C, l}} \sum_ {\boldsymbol {x}} \mathcal {L} (F (\boldsymbol {x}; \boldsymbol {W} _ {C, l} \circledast \boldsymbol {w} _ {T C, l}), \boldsymbol {y}), \tag {3}
195
+ $$
196
+
197
+ $$
198
+ s. t. \forall l \in \{1, 2, \dots , L \}, \forall (x, y) \in \mathcal {D}.
199
+ $$
200
+
201
+ where $F(\cdot)$ is the forward function of the dilated large model, $W_{C,l}$ is the parameters of the client model, $w_{TC,l}$ denotes the TC parameters, and $\odot$ represents the TC operation. To further enhance the integration of personalized information into the dilated models, we also add two TC $1 \times 1$ layers with a residual connection before the dilation process (detailed in Fig. 5(b)).
202
+
203
+ # 4.3 Weighted Average Aggregation
204
+
205
+ After generating a set of dilated large models, the server aggregates them to obtain the global model. However, we find that directly averaging [53] the parameters of all the dilated models leads to severe performance degradation (the accuracy of the aggregated model is only $47.6\%$ on the MNIST dataset). The reasons are two-folded: 1) the magnitude of the dilated models' parameters varies with
206
+
207
+ Algorithm 1: Convolutional compression
208
+ Input:Global round $r$ ,pre-training epochs $e_p$ ,convolution parameters updating epochs $e_c$ device type number n, SR list $\{SR_1,SR_2,\dots ,SR_n\}$ $T_{max},lr_{max},lr_{min}$ Output:Compressed parameters $\{P_1,P_2,\dots ,P_n\}$
209
+ 1 /\*Configuration initialization \*/;
210
+ 2 if $r = 1$ then
211
+ 3 Initialize the global model as $\pmb {w}(\pmb {r})$ .
212
+ 4 Pre-train $\pmb {w}(\pmb {r})$ for $e_p$ epochs on $\mathcal{D}$ .
213
+ 5 for device_type $i\in \{1,2,\dots ,n\}$ parallel do current_shrinkage_ratio $\leftarrow$ $SR_{i}$ .
214
+ 6 Conv $(r)\gets$ Initialize_Conv(w(r),SRi);
215
+ 8 end
216
+ 9 else
217
+ 10 Convi $(r)\gets Convi(r - 1)$
218
+ 11 end
219
+ 12 /\*Convolution parameters fine-tuning \*/;
220
+ 13 for device_type $i\in \{1,2,\dots ,n\}$ parallel do for $e\in \{1,2,\dots ,e_c\}$ do $lr = lr_{min} + 0.5(lr_{max} - lr_{min})(1 + \cos (e / T_{max}\cdot \pi))$ . for each $(x,y)$ in $\mathcal{D}$ do $P_{i}\gets \mathsf{MLR}(\boldsymbol {w}(\boldsymbol {r})\odot \mathsf{Convi}(\boldsymbol {r}))$ output $\leftarrow f(P_i;x)$ loss $\leftarrow$ Loss_fn(output,y); Back-propagate gradient $g_{i}$ to Convi(r); Convi $(r)\gets Convi(r) - lr\cdot g_i$ end
221
+ 22 end
222
+ 23 end
223
+ 24 end
224
+ 25 Send $P_{i}$ to the corresponding client;
225
+
226
+ their sizes [20]; 2) the parameters of the dilated models through TC operations also carry personalized information from different clients, thus exhibiting distinct patterns and varying skewness toward client-side data distribution (Fig. 7(b)). Simply aggregating these dilated models overlooks the diverse contributions that heterogeneous clients can make in the aggregation process.
227
+
228
+ Contribution coordination. To fuse and balance the diverse personalized information from the dilated models, we first normalize the parameters of all the dilated models to [0, 1] and then assign different learnable weight vectors to every network layer in each dilated model for the weighted aggregation. The parameters of the $l$ -th aggregated network layer are then expressed as:
229
+
230
+ $$
231
+ \boldsymbol {W} _ {l} = \left(\sum_ {j = 1} ^ {n} \boldsymbol {v} _ {j, l} \cdot s _ {j} \cdot \boldsymbol {w} _ {j, l}\right) / \sum_ {j = 1} ^ {n} s _ {j} \tag {4}
232
+ $$
233
+
234
+ where $W_{l}$ is the parameters of the $l$ -th layer in the aggregated model, $n$ is the number of large models. $w_{j,l}$ and $v_{j,l}$ are the parameters and the corresponding weight vector of the $l$ -th layer in the $j$ -th large model, $s_{j}$ is the number of data samples that are used for training the $j$ -th client model. We iteratively optimize $v_{j,l}$ via gradient descent to gradually balance the distinct contributions.
235
+
236
+ Aggregation enhancement. To further quantify the different contributions of heterogeneous clients and enhance the aggregation process, we use Kullback-Leibler Divergence (KLD) [47] as a practical criterion to measure the similarity between the parameters of the global model and the dilated models. The higher
237
+
238
+ Table 1: The hardware configuration of heterogeneous devices in a real-world experiment.
239
+
240
+ <table><tr><td>Type</td><td>Device Name</td><td>Number</td><td>CPU</td><td>RAM</td><td>GPU</td><td>GDDR</td><td>Network</td><td>SR</td></tr><tr><td>Server</td><td>ASUS W790-ACE Server</td><td>1</td><td>Intel Xeon Gold 6248R, 3.0GHz</td><td>640GB</td><td>NVIDIA A100</td><td>40GB</td><td>Ethernet</td><td>-</td></tr><tr><td>Router</td><td>Mi Router AX3000</td><td>1</td><td>Qualcomm IPQ5000 A53, 1.0GHz</td><td>256MB</td><td>-</td><td>-</td><td>Ethernet</td><td>-</td></tr><tr><td rowspan="3">PC</td><td>Supermicro X11SCA-F</td><td>2</td><td>Intel Xeon E-2236, 3.4GHz</td><td>32GB</td><td>NVIDIA RTX A4000</td><td>16GB</td><td>Ethernet</td><td>1.0</td></tr><tr><td>Supermicro SYS-5038A-I</td><td>2</td><td>Intel Xeon E5-2620 v4, 2.10GHz</td><td>64GB</td><td>NVIDIA GeForce GTX 1080 Ti</td><td>12GB * 2</td><td>Wi-Fi</td><td>1.0</td></tr><tr><td>ThinkPad P52s Laptop</td><td>4</td><td>Intel i5-8350U, 1.70GHz</td><td>32GB</td><td>NVIDIA Quadro P500</td><td>2GB</td><td>Wi-Fi</td><td>0.75</td></tr><tr><td rowspan="3">Board</td><td>NVIDIA Jetson TX2</td><td>4</td><td>Dual-Core NVIDIA Denver 2, 2GHz</td><td>8GB</td><td>256-core NVIDIA Pascal GPU</td><td>4GB</td><td>Wi-Fi</td><td>0.75</td></tr><tr><td>NVIDIA Jetson Nano</td><td>4</td><td>ARM Cortex-A57 MPCore, 1.5 GHz</td><td>4GB</td><td>NVIDIA Maxwell architecture GPU</td><td>2GB</td><td>Wi-Fi</td><td>0.5</td></tr><tr><td>Raspberry Pi 4</td><td>4</td><td>Quad core Cortex-A72, 1.8GHz</td><td>8GB</td><td>-</td><td>-</td><td>Wi-Fi</td><td>0.25</td></tr></table>
241
+
242
+ the similarity, the more contribution the large model will make to the aggregation. Thus, the aggregated global model can attain higher generalizability and a more comprehensive global perspective. The KLD for the $j$ -th dilated model is formulated as $KLD_{j} = \sum_{l=1}^{L} \sum_{x} W_{G,l}(x) \log \frac{W_{G,l}(x)}{w_{j,l}(x)}$ , where $W_{G}$ is the global model parameters from the previous communication round. The optimization of the weight vectors is expressed as:
243
+
244
+ $$
245
+ \mathcal {L} (\boldsymbol {v}) = \mathcal {L} _ {\mathcal {D}} (\boldsymbol {W}) + \lambda \sum_ {j = 1} ^ {n} K L D _ {j} \tag {5}
246
+ $$
247
+
248
+ where $\mathcal{L}$ is the Cross-Entropy loss for the model output, and $\lambda$ is a coefficient for balance. After fine-tuning the weight vectors, the aggregated model will be used for the next round.
249
+
250
+ # 5 EXPERIMENT SETUP
251
+
252
+ # 5.1 Implementation
253
+
254
+ We implement FedConv with PyTorch [57] and Flower [5]. The load_state_dict() function in PyTorch is overridden to enable the gradient to back-propagate to the convolution/TC parameters. We evaluate FedConv with a cloud server, a router, and 20 heterogeneous mobile devices with different hardware and network conditions. Detailed configurations of the heterogeneous devices are described in Table 1. We deploy these edge devices in our offices and laboratories under real-world network conditions.
255
+
256
+ # 5.2 Datasets and Models
257
+
258
+ We select two representative mobile applications and use different model architectures and sizes on various datasets.
259
+
260
+ Application#1: Image Classification. Imagine classification is a popular computer vision application for FL. We choose three datasets: 1) MNIST [19] consists of $60,000 \times 28$ gray-scale images of ten handwritten digits. We use a convolutional neural network (CNN) with two convolutional layers and one fully connected layer as the classification model; 2) CIFAR10 [40] consists of $60,000 \times 32 \times 32$ color images in ten classes. We use ResNet18 [31] to perform the evaluation; 3) CINIC10 [17] contains $180,000 \times 32 \times 32$ color images in ten classes. We use GoogLeNet [69] for evaluation.
261
+
262
+ Application#2: Human Activity Recognition (HAR). HAR [14, 15, 74, 77, 79] is realized by analyzing different types of sensor data (e.g., Depth camera, IMU [49, 78], and the channel state information of WIFI signals [32, 36, 37]). We select three datasets: 1) WiAR [28] contains $480~90 \times 250$ Wi-Fi CSI samples of 16 activities. We augment the dataset to 64,000 samples following OneFi [76]; 2) Depth camera dataset (DCD) [56] contains $5,000~36 \times 36$ gray-scale depth images of five common gestures; 3) HARBox [56] captures 9-axis IMU data of five daily activities. A sliding window of 2 seconds is applied to generate 900-dimensional features for each of the 30,000 data samples. The IMU data was collected from 121 users with 77 different smartphones, demonstrating a degree
263
+
264
+ ![](images/075c7264ba5111227acba2c5a0480ed32a591f9cebaaeadf1a7447650abb0754.jpg)
265
+ Figure 8: Visualization of Non-IID data. The size of scattered points indicates the number of data samples.
266
+
267
+ ![](images/4a393e2424cd8084a8d3baa0ad1b06cd88589e6364998f973edef485e21d60a8.jpg)
268
+
269
+ of sensing heterogeneity. As there is no standard model for these datasets, we use a CNN model with three Conv layers and one FC layer that can achieve high accuracy.
270
+
271
+ We divide these datasets into four parts: 1) the IID server-side global data for convolution/TC parameters and weight vectors tuning, 2) IID test data for evaluating the aggregated global model, 3) client-side training and 4) testing data (IID or non-IID). Each part counts for $5\%$ , $20\%$ , $70\%$ , and $5\%$ of the total dataset, respectively. The first and second parts of the dataset are kept on the server, whereas the third and fourth parts are distributed among heterogeneous clients. Besides, to emulate real-world heterogeneity, we employ different datasets on the server and clients (§ 6.7).
272
+
273
+ # 5.3Baselines
274
+
275
+ We compare FedConv with the following baselines: 1) Serveralone trains one model with only the server-side global data. We evaluate the model using the server-side IID test data and non-IID client-side test data. 2) Standalone allows each client to train an affordable model locally using their private data without parameter exchange. 3) FedAvg [53] is a classic FL paradigm where clients collaboratively train a shared global model and upload the updated model parameters to a central server for averaging aggregation. Due to the constrained resources of some devices, we assign the smallest affordable models to all clients. 4) FedMD [46] utilizes knowledge distillation to reach a consensus among heterogeneous client models through training on a public dataset. 5) LotteryFL [43] generates sub-models by exploiting the Lottery Ticket hypothesis on heterogeneous clients for personalization. 6) Hermes [42] finds a sparse sub-model for each client by using a channel-wise pruning scheme to reduce the communication overhead. 7) TailorFL [20] produces sub-models by filter-level pruning based on the learned importance value of each filter. 8) HeteroFL [22] is a parameter sharing method that allows each client to select a subset of the parameters from the global model. 9) FedRolex [4] adopts dynamic rolling windows when extracting sub-models for heterogeneous clients.
276
+
277
+ # 5.4 Heterogeneity Consideration
278
+
279
+ For model heterogeneity, we consider four SRs: 0.25, 0.5, 0.75, and 1.0, according to the resource profiles of the heterogeneous
280
+
281
+ ![](images/d3e7413a0d7b0662b003fa28687c9310503651f4d6c5437a11cdd910775267e0.jpg)
282
+ (a) Global model accuracy comparison
283
+
284
+ ![](images/be6d7f7104482f8fcc6c16f869ba3da1f0d32a3f13fea1f7012464e281122e87.jpg)
285
+ (b) Client model accuracy comparison
286
+
287
+ Figure 9: Accuracy comparison under model heterogeneity $(\alpha = 0.1)$ .
288
+ ![](images/45b4da1e1c3775b4d5b7f379a7313e7d15cbff01f7bbaffb656219213a3bd325.jpg)
289
+ ★FedConv ▲ FedAvg ● LotteryFL ◆ Hermes ● TailorFL X HeteroFL ♦ FedRolex ♦ FedMD Standalone
290
+
291
+ ![](images/aa72b8ad8396c55f3fed7aad0cf52728f608cd929cd399474602ec81772b6e39.jpg)
292
+
293
+ ![](images/7d284e7a4e8b1644b05b33a2eb2a257a4d877f74e0de4dd01d571c8080f09c99.jpg)
294
+
295
+ ![](images/6516b0b2634473dbf4bc5303ba99dd8c698519e8223ee6ab14fa6e23f61efb46.jpg)
296
+
297
+ ![](images/d976d10bb8e8e052d8583dab359bc4335e0db2611aa1ae9d536028cf3a53f725.jpg)
298
+
299
+ ![](images/78edf0ff48e27b17eef50ac630b76f131b6d56fec7f17a4d02ff63fca2948e2c.jpg)
300
+
301
+ ![](images/1ade9e2d18ef2ed5e8b4445e8f105e71e0dc0b8b62241832f48319b5cb72031a.jpg)
302
+ Figure 10: The inference accuracy of aggregated global models and client models on different datasets.
303
+
304
+ ![](images/6ce28b47c799e5ed28193046dc6199bdcb8584c04d3b4e62a86957f30729646f.jpg)
305
+
306
+ ![](images/ce6c4f78af189325808bc8904c0d1b77c0561c380c9e1e05ab8fc5c4f0bf7dff.jpg)
307
+ (b) Client model accuracy comparison
308
+
309
+ ![](images/0951cf6f459b01bd8851d12f452f3b01cf92bbf112fcd07196af649ffcb068f5.jpg)
310
+
311
+ ![](images/a139f63034a52d5d13e5c5536eed53c37523d82a42600d80119c4451c9e17f1e.jpg)
312
+
313
+ ![](images/005873bb0f7e25941e291f6a19d67243062ed1564897ae1921838cabbaea695a.jpg)
314
+
315
+ clients. The SR for each client is detailed in Table 1. For powerful clients, we assign larger SRs (e.g., 0.75 for laptops), and for resource-constrained clients, we assign smaller SRs (e.g., 0.25 for Raspberry Pis). For data heterogeneity, we sample the disjoint non-IID client-side data using the Dirichlet distribution $\mathbf{Dir}(\alpha)$ . A larger $\alpha$ (e.g., 10000) indicates a more homogeneous distribution and a smaller $\alpha$ (e.g., 0.05) generates a more heterogeneous distribution [33], The sample distribution among different classes is illustrated in Fig. 8.
316
+
317
+ # 5.5 Hyper-parameter Settings
318
+
319
+ For baselines and FedConv, we set the number of communication rounds to 100. Each client performs 5 local training epochs with a learning rate of 0.001. In the model compression and dilation process, the stride and padding of all the convolution parameters are 1 and 0. The server-side pre-training epoch number is 5. The epoch number for updating convolution/TC parameters are both 20, and the $T_{max}$ , $lr_{min}$ , $lr_{max}$ in the cosine annealing scheduler are 4, 0.00001, and 0.001, respectively. $s_p$ and $s_n$ (Fig. 6(b)) in the activation function are 0.85 and 0.001, respectively. In model aggregation, the number of epochs, the learning rate for updating weight vectors, and $\lambda$ in Eq. (4) are 10, 0.001, and 0.2, respectively.
320
+
321
+ # 6 EVALUATION
322
+
323
+ # 6.1 Metrics
324
+
325
+ Training performance: 1) Inference accuracy: we measure the global model accuracy with the server-side test dataset to evaluate the generalizability of the global model. We also report the average client model accuracy with client-side private test datasets to evaluate the effectiveness of personalization. 2) Communication cost: we use the Pympler library to monitor the network traffic of all the clients over 100 communication rounds.
326
+
327
+ Runtime performance: 1) Memory footprint: the real-time GPU memory usage is monitored using the PyTorch CUDA Toolkit. We track each client's process ID over 100 communication rounds to monitor their CPU usage and report the average value. 2) Wall-clock time: we measure the execution time of each client from receiving model parameters to finishing the training task, and report the average wall-clock time in each round.
328
+
329
+ # 6.2 Overall Performance
330
+
331
+ We evaluate the overall performance of FedConv with heterogeneous models and data distribution.
332
+
333
+ 6.2.1 Global model performance. We first evaluate the accuracy of the aggregated global model to demonstrate its generalizability. Standalone and FedMD are excluded because they do not create global models. Fig. 9(a) shows the global model accuracy under the same degree of heterogeneous data ( $\alpha = 0.1$ ). Serveralone achieves a higher global model accuracy than the baselines in most cases, as the server-side data for training and testing are both IID. FedConv achieves average improvements of $20.5\%$ , $13.8\%$ , and $10.5\%$ compared with pruning-based methods (Hermes and TailorFL), parameter sharing-based method (HeteroFL and FedRolex) and other baselines (FedAvg and LotteryFL), respectively. Since we assign the smallest affordable model to all clients in FedAvg, the client models have an insufficient number of parameters for training. Therefore, FedConv can outperform FedAvg even with IID data. This shows the superior generalization performance of FedConv.
334
+
335
+ Moreover, Fig. 10(a) shows the global model accuracy of FedConv and all baselines across different data heterogeneity on all datasets. We can see that the performance enhancement of FedConv becomes more significant as $\alpha$ decreases, meaning that FedConv can better cope with the increased data heterogeneity. Although FedConv does not obviously outperform FedAvg with homogeneous
336
+
337
+ Table 2: System resource overhead.
338
+
339
+ <table><tr><td rowspan="2">Metric</td><td rowspan="2">System</td><td colspan="6">Heterogeneous Data (α = 0.05)</td><td colspan="6">Homogeneous Data (α = 10000)</td></tr><tr><td>MNIST</td><td>CIFAR10</td><td>CINIC10</td><td>WiAR</td><td>DCD</td><td>HARBox</td><td>MNIST</td><td>CIFAR10</td><td>CINIC10</td><td>WiAR</td><td>DCD</td><td>HARBox</td></tr><tr><td rowspan="9">Memory Footprint CPU + GPU (GB)</td><td>Standalone</td><td>2.14</td><td>3.51</td><td>4.07</td><td>3.95</td><td>2.24</td><td>2.19</td><td>2.13</td><td>3.47</td><td>4.47</td><td>4.03</td><td>2.21</td><td>2.17</td></tr><tr><td>FedAvg</td><td>1.90</td><td>2.40</td><td>3.31</td><td>2.39</td><td>1.98</td><td>2.01</td><td>1.90</td><td>2.51</td><td>2.79</td><td>2.36</td><td>1.88</td><td>2.08</td></tr><tr><td>FedMD</td><td>2.71</td><td>3.65</td><td>7.51</td><td>4.71</td><td>2.99</td><td>2.79</td><td>2.71</td><td>3.65</td><td>7.93</td><td>4.58</td><td>2.99</td><td>2.81</td></tr><tr><td>LotteryFL</td><td>2.62</td><td>3.51</td><td>4.30</td><td>3.23</td><td>2.69</td><td>2.67</td><td>2.63</td><td>3.49</td><td>4.36</td><td>3.27</td><td>2.70</td><td>2.66</td></tr><tr><td>Hermes</td><td>2.64</td><td>3.45</td><td>6.07</td><td>3.28</td><td>2.73</td><td>2.69</td><td>2.64</td><td>3.35</td><td>6.13</td><td>3.32</td><td>2.72</td><td>2.68</td></tr><tr><td>TailorFL</td><td>2.75</td><td>3.61</td><td>5.09</td><td>3.41</td><td>2.79</td><td>2.71</td><td>2.75</td><td>3.47</td><td>7.52</td><td>3.16</td><td>2.77</td><td>2.70</td></tr><tr><td>HeteroFL</td><td>2.63</td><td>3.31</td><td>4.15</td><td>3.25</td><td>2.73</td><td>2.67</td><td>2.63</td><td>3.45</td><td>4.10</td><td>3.08</td><td>2.73</td><td>2.67</td></tr><tr><td>FedRolex</td><td>2.63</td><td>3.21</td><td>4.15</td><td>3.25</td><td>2.72</td><td>2.67</td><td>2.60</td><td>3.54</td><td>4.16</td><td>3.16</td><td>2.68</td><td>2.69</td></tr><tr><td>FedConv</td><td>2.52</td><td>3.21</td><td>4.15</td><td>3.02</td><td>2.60</td><td>2.67</td><td>2.52</td><td>3.35</td><td>4.10</td><td>3.14</td><td>2.62</td><td>2.67</td></tr><tr><td rowspan="9">Wall-clock Time (s)</td><td>Standalone</td><td>3.87</td><td>24.65</td><td>279.62</td><td>8.05</td><td>5.91</td><td>3.54</td><td>9.38</td><td>52.38</td><td>273.52</td><td>7.60</td><td>6.14</td><td>3.56</td></tr><tr><td>FedAvg</td><td>7.05</td><td>39.19</td><td>285.30</td><td>10.62</td><td>10.19</td><td>10.09</td><td>13.75</td><td>97.95</td><td>1711.34</td><td>20.79</td><td>43.67</td><td>26.98</td></tr><tr><td>FedMD</td><td>44.34</td><td>437.14</td><td>5370.83</td><td>55.03</td><td>75.25</td><td>32.92</td><td>45.17</td><td>475.42</td><td>6700.17</td><td>64.43</td><td>79.10</td><td>34.53</td></tr><tr><td>LotteryFL</td><td>9.18</td><td>147.98</td><td>699.35</td><td>8.89</td><td>8.61</td><td>5.69</td><td>17.59</td><td>235.89</td><td>1829.33</td><td>19.77</td><td>22.06</td><td>10.92</td></tr><tr><td>Hermes</td><td>43.22</td><td>714.00</td><td>5580.71</td><td>103.90</td><td>169.97</td><td>104.53</td><td>43.84</td><td>937.82</td><td>7621.38</td><td>117.85</td><td>217.97</td><td>115.31</td></tr><tr><td>TailorFL</td><td>6.98</td><td>62.89</td><td>393.46</td><td>14.44</td><td>12.72</td><td>10.11</td><td>13.61</td><td>99.60</td><td>813.94</td><td>25.53</td><td>13.96</td><td>13.27</td></tr><tr><td>HeteroFL</td><td>6.96</td><td>42.56</td><td>641.21</td><td>10.78</td><td>10.03</td><td>5.10</td><td>13.56</td><td>82.07</td><td>1310.81</td><td>22.26</td><td>23.90</td><td>10.98</td></tr><tr><td>FedRolex</td><td>6.92</td><td>45.98</td><td>602.48</td><td>11.57</td><td>12.34</td><td>4.87</td><td>12.46</td><td>84.25</td><td>1389.41</td><td>23.64</td><td>20.14</td><td>11.26</td></tr><tr><td>FedConv</td><td>5.96</td><td>40.68</td><td>264.30</td><td>12.96</td><td>10.15</td><td>4.40</td><td>10.33</td><td>71.26</td><td>1406.87</td><td>21.79</td><td>17.22</td><td>9.89</td></tr></table>
340
+
341
+ Table 3: Communication overhead comparison (GB).
342
+
343
+ <table><tr><td>System</td><td>MNIST</td><td>CIFAR10</td><td>CINIC10</td><td>WiAR</td><td>DCD</td><td>HARBox</td></tr><tr><td>FedAvg</td><td>14.80</td><td>4815.84</td><td>2697.85</td><td>28.24</td><td>13.45</td><td>8.87</td></tr><tr><td>FedMD</td><td>19.99</td><td>5126.46</td><td>2859.79</td><td>40.91</td><td>19.94</td><td>16.24</td></tr><tr><td>LotteryFL</td><td>11.11</td><td>4713.91</td><td>2623.93</td><td>23.01</td><td>10.05</td><td>8.55</td></tr><tr><td>Hermes</td><td>16.34</td><td>7099.66</td><td>2848.83</td><td>36.63</td><td>15.02</td><td>12.95</td></tr><tr><td>TailorFL</td><td>11.40</td><td>4787.18</td><td>2686.15</td><td>24.30</td><td>10.32</td><td>8.82</td></tr><tr><td>HeteroFL</td><td>11.11</td><td>4713.91</td><td>2623.93</td><td>23.01</td><td>10.05</td><td>8.55</td></tr><tr><td>FedRolex</td><td>11.11</td><td>4713.91</td><td>2623.93</td><td>23.01</td><td>10.05</td><td>8.55</td></tr><tr><td>FedConv</td><td>11.11</td><td>4713.91</td><td>2623.93</td><td>23.01</td><td>10.05</td><td>8.55</td></tr></table>
344
+
345
+ data, it exhibits better generalizability and robustness in the global model under heterogeneous data. FedConv also provides better personalization performance for clients (§ 6.2.2). The performance improvements of the global model stem from our convolutional compression and TC dilation methods. They facilitate the information embedded in the global model being preserved and transferred from the server to clients through our learning-on-model approach. 6.2.2 Client model performance. To evaluate the personalization performance, we measure the accuracy of each client model with client-side test datasets and report the average value. Fig. 9(b) shows that with the same heterogeneous data settings, FedConv outperforms baselines (FedAvg, LotteryFL, Hermes, TailorFL, HeteroFL, and FedRolex) with accuracy improvements ranging from $8.4\%$ to $50.6\%$ . In Serveralone, when evaluating the global model using the client-side non-IID data, the accuracy of the client model drops below that of most baseline systems. This is because, in FedConv, the server-side data occupies a small portion $(5\%)$ of the entire dataset. Therefore, Serveralone's global model hasn't seen sufficient data, leading to degraded performance on the client-side non-IID data.
346
+
347
+ Additionally, Fig. 10(b) shows the client model accuracy of FedConv and all baselines with different data heterogeneity. We can see that the performance disparities become more substantial as $\alpha$ decreases, implying that FedConv is more robust and can achieve consistently high accuracy across diverse data distribution. This performance gain stems from the TC dilation process, where distinct TC parameters are assigned to each uploaded client model on the server. The rescaled large models will thereby preserve the personalization information from clients, which is then aggregated into the global model. Besides, Fig. 9(b) shows that, with sensing heterogeneity in the HARBox dataset, FedConv achieves a better and more stable performance. However, when $\alpha$ is small (e.g., $\alpha \in \{0.05, 0.1\}$ on CIFAR10), the client model accuracy of FedMD is higher than
348
+
349
+ FedConv. The better performance stems from the distilled knowledge shared by all clients. Nonetheless, the downside is that it imposes excessive communication and computational overhead on clients (Table 2 & Table 3). By contrast, FedConv can achieve comparable personalization performance without an extra burden on clients. In practice, we can further improve the personalization performance by adding task-specific layers [39] (detailed in § 6.6).
350
+
351
+ Remarks. FedConv exhibits significant performance gains in both global and client models across various settings. The parameter information of the global model can be preserved via our convolutional compression module. We suspect that the performance instability of some baselines might be attributed to the information loss in model pruning and the imbalance issue in parameter sharing.
352
+
353
+ # 6.3 Overhead Assessment
354
+
355
+ We evaluate the memory footprint, wall-clock time, and communication overhead of each client in FedConv and baselines with both homogeneous $(\alpha = 10000)$ and heterogeneous $(\alpha = 0.05)$ data across clients. Table 2 provides an overview of the average memory usage and the average wall-clock time of each client. With the same set of SRs, FedConv achieves an average saving of $40.6\%$ in memory cost and $54.6\%$ in computation overhead compared with the baselines, respectively. Furthermore, when the client model is complex (ResNet18 and GoogLeNet), FedConv only needs approximately half of the memory and training time compared to the pruning-based methods. For example, in the homogeneous data condition, FedConv needs 2GB less memory and saves around 90 minutes of wall-clock time than Hermes in one single round. This is because the computation-intensive pruning operations are executed on the resource-constrained clients. In contrast, clients in FedConv only need to perform local training in each round, resulting in significant savings in terms of memory, computation, and communication resources. Note that FedAvg consumes less memory and wall-clock time because we assign the smallest affordable models to all clients.
356
+
357
+ Table 3 lists the total size of data packets transmitted through the network by all clients. We observe that the communication cost of FedConv, LotteryFL, and HeteroFL are comparable, as they exclusively transmit sub-model parameters without extra contents. In contrast, Hermes and TailorFL have to transmit the pruning structure, and FedMD needs to transmit logits. Thus, FedConv, LotteryFL, and HeteroFL are more friendly to resource-constrained
358
+
359
+ ![](images/8002624ad28286d8b6a035dababd0eb442c9b2180697375fc1768c452e8fa6be.jpg)
360
+ (a) FedConv on different datasets
361
+
362
+ ![](images/c54620d46f1a0772ed17be4e2c6e3ee34b4b3ed4ee40ae527c1a6ffc559d9d78.jpg)
363
+ (b) FedConv vs. baselines (CIFAR10)
364
+
365
+ ![](images/5eb3a8f9e2342b827215b773ba85a415fe19a3f2fb14aa84581e97fe555851c6.jpg)
366
+ Figure 11: Varying number of clients.
367
+ (a) Varying server data size
368
+ Figure 13: Sensitivity analysis of varying hyper-parameters in FedConv.
369
+
370
+ ![](images/59846ba22c254e0b5361e531d7a207e4eee7629a292919c39ff64300afb4726e.jpg)
371
+ (b) Varying convolution/TC epochs
372
+
373
+ ![](images/d282c91a36ec18128992ea4cb4a42772ae1286c8a1f2ac9def2d99b76e7af00e.jpg)
374
+ (a) FedConv on different datasets
375
+
376
+ ![](images/27f2d3d579f3d6b978909a1e532c35f1bc8c6d23a8a88dc43e98e1288f6db0de.jpg)
377
+ (b) FedConv vs. baselines (CIFAR10)
378
+
379
+ ![](images/8d796d7eff83f8e0d911c4839b31719d051819d70dc6b3f9ba13fc2483590d01.jpg)
380
+ Figure 12: Varying shrinkage ratios.
381
+ (c) Varying aggregation epochs
382
+
383
+ ![](images/e1861a85dbcdc249ac6cf7edf3466f0f82f09333bab0a08598c2118225b9e51d.jpg)
384
+ (d) Client model accuracy (CIFAR10)
385
+
386
+ clients. Moreover, it holds significant potential that exiting quantization techniques [2, 21] and masking method [44] can be extended to FedConv, to further diminish the communication overhead.
387
+
388
+ Remarks. In summary, benefiting from the lighter communication and computation burden imposed on resource-constrained clients, FedConv saves more system resources and performs inference tasks faster than the baselines.
389
+
390
+ # 6.4 Sensitivity Analysis
391
+
392
+ 6.4.1 Varying client number. We simulate 100 clients and vary the number of selected participating clients from 10 to 50 ( $\alpha = 10000$ ) to compare the client model performance with the baselines. As shown in Fig. 11(a), the client model accuracy in FedConv exhibits an upward trend as the number of clients increases. For example, the client model accuracy on HARBox increases by $17.54\%$ when the number of clients increases from 10 to 50. We then select CIFAR10 to compare the client model performance of FedConv with pruning-based and parameter sharing-based methods. From Fig. 11(b), we see that FedConv attains an average client model accuracy that is at least $32.5\%$ higher than that of the baselines. The results demonstrate the scalability and superiority of FedConv with varying client numbers.
393
+
394
+ 6.4.2 Varying shrinkage ratios. To investigate the trade-off between the SR and model performance, we set the SR for 10 clients as 1.0 and set the SR for the remaining 10 clients as $r$ . We then vary $r$ from 1.0 to 0.05 and record the average client model accuracy $(\alpha = 10000)$ . From Fig. 12(a), we can see that as the SR decreases below a certain threshold, there is a notable accuracy drop in client models, as expected. For MNIST, WiAR, and DCD, the SR threshold is about 0.25 (the red shadow), and for CIFAR10, CINIC10, and HARBox, the threshold is about 0.4 (the blue shadow). Fortunately, we find that even a lightweight device (e.g., Raspberry Pis) can afford the GoogLeNet model on CINIC10 when the SR is 0.4. Consequently, as long as the SR remains above the corresponding threshold, it can be reduced to conserve system resources effectively.
395
+
396
+ We then use CIFAR10 to compare FedConv with the baselines, and the client model accuracy is shown in Fig. 12(b). We can see that though the accuracy of FedConv also decreases with limited
397
+
398
+ resources, it can retain much higher accuracy than the baselines. The reason is that with a lower SR (higher pruning rate), the baselines discard a larger amount of parameter information. In contrast, with convolutional compression, FedConv can effectively preserve the parameter information of the global model as much as possible to the sub-models bounded by their sizes and resource budgets.
399
+
400
+ 6.4.3 Varying server-side data sizes. To investigate the impact of server-side data, we vary the sample number ratio of the server-side data from $1\%$ to $25\%$ , with a step of $0.5\%$ . As shown in Fig. 13(a), we obtain two key observations: 1) When the ratio of the server-side data varies from $1\%$ to $5\%$ , the client models will have better performance, due to the richer information obtained from the server-side data; 2) After the turning point $(>5\%)$ the global model tends to overfit to the server-side data, leading to less personalization and degraded client model performance. In our evaluation, we set the default sample ratio of the server-side data to $5\%$ . Note that the actual turning point may differ in practice. In addition, continuous learning [52] or incremental learning [30] techniques can be further applied as more server-side data become available.
401
+
402
+ 6.4.4 Varying hyper-parameters. We vary the number of epochs for fine-tuning the model compression, dilation, and aggregation to evaluate their impact on the personalization performance of client models. We select two datasets (CIFAR10 and HARBox) for demonstration. We first vary the number of epochs for updating the convolution/TC parameters in each global round. Fig. 13(b) shows that when the number of convolution/TC parameters updating epochs is around 20, the client models achieve better and more stable performance. After the 20-th and the 40-th epoch, the client model accuracy gradually drops due to the convolution/TC parameters being over-fitted to the server-side data. Similarly, from Fig. 13(c), we can observe that when the number of tuning epochs for updating weight vectors exceeds 40 and 80, the accuracy of the aggregated global model also decreases. Therefore, we set the number of epochs for model compression, dilation, and aggregation to 20.
403
+
404
+ We also vary the kernel size and stride length of the compression layers and report the mean client model accuracy to explore the impact on client performance. We select a convolutional layer from
405
+
406
+ ![](images/fdac02177dbe97fd8ff944b086be8deee699ca4d7debfa3011d9dbe23ff7efd5.jpg)
407
+ (a) Server-side pre-training
408
+ Figure 14: Ablation study of FedConv.
409
+
410
+ ![](images/9260769c69eebd25d36337f6e04b022b36c96bacbd9df678b3e89bbb02771347.jpg)
411
+ (b) Weight vectors
412
+
413
+ the large model as an example, whose parameter matrix has a shape of $9 \times (1,64,64)$ . With the SR being 0.75, the compressed parameter matrix will have a shape of $9 \times (1,48,48)$ . Since the kernel size $k$ and the stride $s$ should satisfy $(64 + 2p - k + 1) / s = 48$ , the padding $p$ can then be determined accordingly. In general, a larger kernel can capture more comprehensive parameter information, and a smaller stride can capture more fine-grained information. As shown in Fig. 13(d), client models tend to have better performance as the kernel size increases and the stride decreases. However, a larger kernel incurs high computational complexity and imposes a heavy workload on the server. Therefore, in our default settings, the kernel size and stride are set as 23 and 1, respectively.
414
+
415
+ # 6.5 Ablation Study
416
+
417
+ Next, we conduct ablation studies to investigate the importance of the server-side pre-training process and the weighted average aggregation module, respectively.
418
+
419
+ 6.5.1 Server-side pre-training. Fig. 14(a) shows the impact on global model accuracy with and without server-side pre-training with $\alpha = 0.05$ . It can be observed that with the integration of pre-training, the global model achieves higher average accuracy (about $15.69\%$ ) and reaches faster convergence (about 40 communication rounds earlier), which helps the FL server and clients save communication, computation, and energy costs involved in the training process.
420
+ 6.5.2 Weighted average aggregation. To demonstrate the impact of our learned weight vectors for model aggregation, we assign weights with respect to sample number as in FedAvg to all clients and measure the global model accuracy. Fig. 14(b) shows the effect on global model accuracy when performing model aggregation with learned weights and equal weights separately. Significant performance degradation can be observed when employing the averaging aggregation method. This is because the parameters from heterogeneous client models usually exhibit varying skewness toward their local data distribution. Merely averaging all the model parameters overlooks the different contributions made by clients in the aggregation process. On the contrary, with the learned weight vectors, clients can contribute different parameter information to the aggregated global model and improve its generalization performance.
421
+
422
+ # 6.6 Personalization Enhancement
423
+
424
+ To evaluate the potential in personalization, we extend FedConv by adding task-specific layers [39] on each client, and evaluate the client model accuracy. Specifically, after receiving the parameters from the server, each client appends its own personal layers to the sub-model. By doing so, the personalization performance of each client can be enhanced during local training. We record the average accuracy of client models after 100 communication rounds. Fig. 15 shows the performance improvement on five datasets after
425
+
426
+ ![](images/edecbfda6ab5f0d393384b89bbd1c8a9e5a52dc4afe191c2d38ffe706e7164e4.jpg)
427
+ (a) CIFAR10
428
+
429
+ ![](images/9c79d62168736e0ba448a3f4592b94eb8cb27d85991d8ddbd65e3c7e45c3b5db.jpg)
430
+ (b) HARBox
431
+
432
+ ![](images/4b05f5e6dbbf61c87ed6a7fb51828dec959236b21e8672e571a5f8604d96bc79.jpg)
433
+ Figure 15: FedConv with personalized FL.
434
+ (a) Global model accuracy
435
+ Figure 16: Case study with real-world heterogeneity.
436
+
437
+ ![](images/0a371bc4ba35939ec0fcaa40858558e272bfb0d9b887e538e19b3d730b2c3fda.jpg)
438
+ $\alpha$ (b) Client model accuracy
439
+
440
+ applying personalization enhancement. Compared with FedMD, which achieves the highest client model accuracy (§ 6.2.2), we can see that FedConv with personalization enhancement is able to surpass FedMD in most cases. This result indicates that FedConv can be enhanced with existing personalized federated learning methods to achieve better performance.
441
+
442
+ # 6.7 Case Study with Real-World Heterogeneity
443
+
444
+ In our default configuration, both the server-side and client-side datasets originate from the same domain. To test with real-world heterogeneity and assess its impact, we conduct a case study where the Chars74K dataset [18] is kept on the server, while the MNIST dataset is used for heterogeneous clients. The Chars74K dataset contains images of digits from computer fonts with variations (italic, bold, and normal). In this case, the global model can learn and extract general features (e.g., different shapes of the digits from the Chars74K dataset), while heterogeneous clients can further fine-tune the compressed model to extract personalized features (e.g., various writing styles of the digits from the MNIST dataset). The convolutional compression process and the TC dilation process can be regarded as a transformation from one data domain to another. The generated sub-models via convolutional compression contain parameter information from the large global model and can thereby extract general features. Similarly, the server applies TC to the locally trained heterogeneous client models to rescale them. This facilitates the aggregation process to form a new global model, retaining the personalization information of the client-side data. As shown in Fig. 16(a) and Fig. 16(b), due to the domain gap between the server-side and the client-side data, there is a decrease in both the global model and the client model accuracy. FedMD still achieves comparable performance to FedConv with only the MNIST dataset, benefiting from the knowledge distillation method. However, when we further enhance FedConv with transfer learning strategies [12, 59] on each client to narrow down the domain gap between the server-side and the client-side data, the client models will achieve higher accuracies and even outperform FedMD. This observation indicates that FedConv can be combined with existing federated transfer learning approaches to achieve better performance.
445
+
446
+ # 7 DISCUSSION
447
+
448
+ Privacy Concerns. In addition to transferring model parameters between the server and heterogeneous clients, FedConv requires all the clients to report their SRs before the FL training starts. To determine appropriate SRs, clients will perform resource profiling locally and report to the server. We note that same as conventional FL schemes, no client-side sensor data needs to be transferred to the server during this process. Thus, we believe the privacy protection of conventional FL schemes can be effectively retained.
449
+
450
+ Practicality of FedConv. In FedConv, we use the Flower [5] framework to orchestrate the entire FL process. While Flower offers a stable and robust simulated environment for FL, deploying it in a mixed Linux-Android environment encounters significant obstacles. These include technical challenges in training neural network models on Android devices and issues related to the compatibility of the Flower framework with Android systems. Fortunately, recent advancements [6] in Flower support federated learning setup with Android clients using TensorFlow Lite [1].
451
+
452
+ Convolutional Compression. As shown in our evaluation, the convolutional compression is effective in compressing large global models and achieves better performance compared with model pruning, parameter sharing, and knowledge distillation-based methods. Additionally, the compression and dilation process is performed on the server side, without imposing any extra burden on the clients. From the client's perspective of view, they do not need to participate in the pre-training and fine-tuning process and can join throughout the FL processes, which is the same as the conventional FL systems.
453
+
454
+ # 8 RELATED WORK
455
+
456
+ Data heterogeneity. Recent works [56, 58, 65, 72] optimize FL performance under non-IID data. Clustering-based methods [11, 34, 56] group clients according to the distribution of their data or model parameters. For example, ClusterFL [56] captures the intrinsic clustering patterns among clients by measuring the similarity of client models. Shu et al. [64] propose a clustered multi-task federated learning on non-IID data. Personalized FL adopts local fine-tuning [25] or add task-specific layers on client side [39, 83]. For example, pFedMe [23] uses Moreau envelopes as a regularized loss function to decouple the task of optimizing a personalized model from the global model learning. Yosinski et al. [83] enable the upper layers of the global model to learn task-specific features, while the lower layers capture more general features which are further shared across clients. Our work is orthogonal to these works and requires minimal modification to clients for integration into existing FL systems.
457
+
458
+ Model heterogeneity and model compression. To accommodate heterogeneous clients, recent works mainly compress the global model to reduce communication and computation costs. They can be divided into three categories: 1) Knowledge distillation-based methods [85, 88] generally regard heterogeneous client models as teacher models and learn an aggregated global student model via knowledge distillation (KD). Lin et al. [48] leverage KD and ensemble learning to combine the knowledge from heterogeneous client models. FedMD [46] computes an average consensus to substitute the aggregation process. However, the tuning of KD is performed on clients with a shared dataset, incurring extra overhead for clients; 2) Parameter sharing strategies [61] allow sub-models to share a part of the global model parameters to reduce computation overhead.
459
+
460
+ HeteroFL [22] enables heterogeneous clients to select fixed subsets of global parameters with minimal modification to the existing FL framework. Yet, the sharing strategy suffers from the imbalance issue [82]; 3) Pruning-based methods [73] have gained popularity in heterogeneous FL. Hermes [42] applies a channel-level pruning method to selectively prune out less important channels. TailorFL [20] proposes an importance value-based filter-level pruning scheme to enable a dual-personalized FL system. Removing entire channels or filters results in information loss and performance degradation [51]. Unlike these works, we compress the global model with convolutional compression to generate sub-models. Orthogonal to our work, traditional compression techniques (e.g., quantization [71]) can be applied to compress model parameters and reduce network traffic. However, as the compressed parameters should be decompressed back to their original size before training, these works cannot reduce the system overhead of clients.
461
+
462
+ Convolution and transposed convolution. Convolution can effectively extract useful features from input data by capturing local patterns and spatial relationships [41, 66]. In FedConv, we exploit a novel convolutional compression technique to generate sub-models for heterogeneous clients, which can capture key information embedded in the global model. TC is renowned for its capability of reconstructing super-resolution images from fuzzy ones [24, 26], which is widely adopted in image dilation [16] and semantic segmentation [55]. In our aggregation process, we leverage TC to resize the heterogeneous client models to a unified size for aggregation.
463
+
464
+ # 9 CONCLUSION
465
+
466
+ We propose FedConv, a client-friendly federated learning framework for heterogeneous clients, aiming to minimize the system overhead on resource-constrained mobile devices. FedConv contributes three key technical modules: 1) a novel model compression scheme that generates heterogeneous sub-models with convolutional compression on the global model; 2) a transposed convolutional dilation module that converts heterogeneous client models back to large models with a unified size; and 3) a weighted average aggregation scheme that fully leverages personalization information of client models to update the global model. Extensive experiments demonstrate that FedConv outperforms SOTA baselines in terms of inference accuracy with much lower computation and communication overhead for FL clients. We believe the proposed learning-on-model paradigm is worthy of further exploration and can potentially benefit other FL tasks where heterogeneous sub-models can be generated to retain the information of a global model.
467
+
468
+ # ACKNOWLEDGMENTS
469
+
470
+ We sincerely thank our shepherd - Veljko Pejovic, and anonymous reviewers for their constructive comments and invaluable suggestions that helped improve this paper. This work is supported by Hong Kong GRF Grant No. 15206123. This work is also supported by NSFC (Grant No. 62372314, U21A20462, and 62372400), "Pioneer" and "Leading Goose" R&D Program of Zhejiang under grant No. 2024C03287. Yuanqing Zheng is the corresponding author.
471
+
472
+ # APPENDIX
473
+
474
+ The research artifacts accompanying this paper are available at https://doi.org/10.5281/zenodo.11089994.
475
+
476
+ # REFERENCES
477
+
478
+ [1] Martin Abadi, Ashish Agarwal, Paul Barham, Eugene Brevedo, Zhifeng Chen, Craig Citro, Greg S Corrado, Andy Davis, Jeffrey Dean, Matthieu Devin, et al. 2015. TensorFlow: Large-scale machine learning on heterogeneous systems.
479
+ [2] Ahmed M Abdelmoniem and Marco Canini. 2021. Towards mitigating device heterogeneity in federated learning via adaptive model quantization. In Proceedings of the 1st Workshop on Machine Learning and Systems. 96-103.
480
+ [3] Andrei Afonin and Sai Praneeth Karimireddy. 2021. Towards model agnostic federated learning using knowledge distillation. arXiv preprint arXiv:2110.15210 (2021).
481
+ [4] Samiul Alam, Luyang Liu, Ming Yan, and Mi Zhang. 2022. Fedrolex: Model-heterogeneous federated learning with rolling sub-model extraction. Advances in neural information processing systems 35 (2022), 29677-29690.
482
+ [5] Daniel J. Beutel, Taner Topal, Akhil Mathur, Xinchi Qiu, Titouan Parcollet, and Nicholas D. Lane. 2020. Flower: A Friendly Federated Learning Research Framework. CoRR (2020).
483
+ [6] Daniel J. Beutel, Taner Topal, Akhil Mathur, Xinchi Qiu, Titouan Parcollet, and Nicholas D. Lane. 2023. Flower Android Example (TensorFlowLite). https://github.com/adap/flower/tree/main/examples/android.
484
+ [7] Cody Blakeney, Yan Yan, and Ziliang Zong. 2020. Is pruning compression?: Investigating pruning via network layer similarity. In IEEE CVPR. 914-922.
485
+ [8] Dongqi Cai, Shangguang Wang, Yaozong Wu, Felix Xiaozhu Lin, and Mengwei Xu. 2023. Federated few-shot learning for mobile NLP. In Proceedings of the 29th Annual International Conference on Mobile Computing and Networking. 1-17.
486
+ [9] Dongqi Cai, Yaozong Wu, Shangguang Wang, Felix Xiaozhu Lin, and Mengwei Xu. 2022. FedAdapter: Efficient Federated Learning for Modern NLP. arXiv preprint arXiv:2205.10162 (2022).
487
+ [10] Dongqi Cai, Yaozong Wu, Shangguang Wang, Felix Xiaozhu Lin, and Mengwei Xu. 2023. Efficient federated learning for modern nlp. In Proceedings of the 29th Annual International Conference on Mobile Computing and Networking. 1-16.
488
+ [11] Ming Chen, Jinze Wu, Yu Yin, Zhenya Huang, Qi Liu, and Enhong Chen. 2022. Dynamic Clustering Federated Learning for Non-III Data. In Artificial Intelligence - Second CAAI International Conference, CICAL.
489
+ [12] Yiqiang Chen, Xin Qin, Jindong Wang, Chaohui Yu, and Wen Gao. 2020. Fedhealth: A federated transfer learning framework for wearable healthcare. IEEE Intelligent Systems 35, 4 (2020), 83-93.
490
+ [13] Thomas M. Cover and Joy A. Thomas. 2006. Elements of Information Theory (Wiley Series in Telecommunications and Signal Processing).
491
+ [14] Kaiyan Cui, Yanwen Wang, Yuanqing Zheng, and Jinsong Han. 2021. ShakeReader: 'Read' UHF RFID using Smartphone. In IEEE INFOCOM.
492
+ [15] Kaiyan Cui, Qiang Yang, Yuanqing Zheng, and Jinsong Han. 2023. mmRipple: Communicating with mmWave Radars through Smartphone Vibration. In Proceedings of the 22nd International Conference on Information Processing in Sensor Networks. 149-162.
493
+ [16] Ryan Dahl, Mohammad Norouzi, and Jonathon Shlens. 2017. Pixel Recursive Super Resolution. In IEEE ICCV. 5449-5458.
494
+ [17] Luke N Darlow, Elliot J Crowley, Antreas Antoniou, and Amos J Storkey. 2018. Cinic-10 is not imagenet or cifar-10. arXiv preprint arXiv:1810.03505 (2018).
495
+ [18] Teófiló E de Campos, Bodla Rakesh Babu, and Manik Varma. 2009. Character recognition in natural images. In International conference on computer vision theory and applications, Vol. 1. SCITEPRESS, 273-280.
496
+ [19] Li Deng. 2012. The MNIST Database of Handwritten Digit Images for Machine Learning Research. IEEE Signal Process. Mag. (2012).
497
+ [20] Yongheng Deng, Weining Chen, Ju Ren, Feng Lyu, Yang Liu, Yunxin Liu, and Yaoxue Zhang. 2022. TailorFL: Dual-Personalized Federated Learning under System and Data Heterogeneity. In ACM SenSys.
498
+ [21] Tim Dettmers. 2015. 8-bit approximations for parallelism in deep learning. arXiv preprint arXiv:1511.04561 (2015).
499
+ [22] Enmao Diao, Jie Ding, and Vahid Tarokh. 2021. HeteroFL: Computation and Communication Efficient Federated Learning for Heterogeneous Clients. In ICLR. OpenReview.net.
500
+ [23] Canh T. Dinh, Nguyen Hoang Tran, and Tuan Dung Nguyen. 2020. Personalized Federated Learning with Moreau Envelopes. In NeurlPS.
501
+ [24] Vincent Dumoulin and Francesco Visin. 2016. A guide to convolution arithmetic for deep learning. CoRR abs/1603.07285 (2016).
502
+ [25] Alireza Fallah, Aryan Mokhtari, and Asuman E. Ozdaglar. 2020. Personalized Federated Learning with Theoretical Guarantees: A Model-Agnostic Meta-Learning Approach. In NeurIPS.
503
+ [26] Hongyang Gao, Hao Yuan, Zhengyang Wang, and Shuiwang Ji. 2020. Pixel Transposed Convolutional Networks. IEEE Trans. Pattern Anal. Mach. Intell. (2020).
504
+ [27] Xavier Glorot and Yoshua Bengio. 2010. Understanding the difficulty of training deep feedforward neural networks. In Proceedings of the thirteenth international conference on artificial intelligence and statistics.
505
+ [28] Linlin Guo, Silu Guo, Lei Wang, Chuang Lin, Jialin Liu, Bingxian Lu, Jian Fang, Zhonghao Liu, Zeyang Shan, and Jingwen Yang. 2019. Wiar: A Public Dataset for Wifi-Based Activity Recognition. IEEE Access 7 (2019), 154935-154945.
506
+
507
+ [29] Lixiang Han, Zhen Xiao, and Zhenjiang Li. 2024. DTMM: Deploying TinyML Models on Extremely Weak IoT Devices with Pruning. arXiv preprint arXiv:2401.09068 (2024).
508
+ [30] Haibo He, Sheng Chen, Kang Li, and Xin Xu. 2011. Incremental learning from stream data. IEEE Transactions on Neural Networks 22, 12 (2011), 1901-1914.
509
+ [31] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep Residual Learning for Image Recognition. In IEEE CVPR. 770-778.
510
+ [32] Yinghui He, Jianwei Liu, Mo Li, Guanding Yu, Jinsong Han, and Kui Ren. 2023. Sencom: Integrated sensing and communication with practical wifi. In ACM MobiCom. 1-16.
511
+ [33] Tzu-Ming Harry Hsu, Hang Qi, and Matthew Brown. 2019. Measuring the Effects of Non-Identical Data Distribution for Federated Visual Classification. CoRR abs/1909.06335 (2019).
512
+ [34] Gang Hu, Yinglei Teng, Nan Wang, and F. Richard Yu. 2023. Clustered Data Sharing for Non-IID Federated Learning over Wireless Networks. CoRR (2023).
513
+ [35] Gao Huang, Yixuan Li, Geoff Pleiss, Zhuang Liu, John E. Hopcroft, and Kilian Q. Weinberger. 2017. Snapshot Ensembles: Train 1, Get M for Free. In ICLR OpenReview.net.
514
+ [36] Sijie Ji, Yaxiong Xie, and Mo Li. 2022. Sifall: Practical online fall detection with rf sensing. In ACM SenSys. 563-577.
515
+ [37] Sijie Ji, Xuanye Zhang, Yuanqing Zheng, and Mo Li. 2023. Construct 3D Hand Skeleton with Commercial WiFi. arXiv preprint arXiv:2312.15507 (2023).
516
+ [38] Xinqi Jin, Lingkun Li, Fan Dang, Xinlei Chen, and Yunhao Liu. 2022. A survey on edge computing for wearable technology. Digital Signal Processing 125 (2022), 103146.
517
+ [39] Cihat Keçeci, Mohammad Shaqfeh, Hayat Mbayed, and Erchin Serpedin. 2022. Multi-Task and Transfer Learning for Federated Learning Applications. CoRR (2022).
518
+ [40] Alex Krizhevsky, Geoffrey Hinton, et al. 2009. Learning multiple layers of features from tiny images. (2009).
519
+ [41] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E. Hinton. 2012. ImageNet Classification with Deep Convolutional Neural Networks. In NeurIPS.
520
+ [42] Ang Li, Jingwei Sun, Pengcheng Li, Yu Pu, Hai Li, and Yiran Chen. 2021. Hermes: an efficient federated learning framework for heterogeneous mobile clients. In ACM MobiCom. 420-437.
521
+ [43] Ang Li, Jingwei Sun, Binghui Wang, Lin Duan, Sicheng Li, Yiran Chen, and Hai Li. 2021. LotteryFL: Empower Edge Intelligence with Personalized and Communication-Efficient Federated Learning. In 6th IEEE/ACM Symposium on Edge Computing, SEC. 68-79.
522
+ [44] Ang Li, Jingwei Sun, Xiao Zeng, Mi Zhang, Hai Li, and Yiran Chen. 2021. FedMask: Joint Computation and Communication-Efficient Personalized Federated Learning via Heterogeneous Masking. In ACM SenSys.
523
+ [45] Chenning Li, Xiao Zeng, Mi Zhang, and Zhichao Cao. 2022. PyramidFL: a fine-grained client selection framework for efficient federated learning. In ACM MobiCom. 158-171.
524
+ [46] Daliang Li and Junpu Wang. 2019. FedMD: Heterogenous Federated Learning via Model Distillation. CoRR (2019).
525
+ [47] Jianhua Lin. 1991. Divergence measures based on the Shannon entropy. IEEE Transactions on Information theory 37, 1 (1991), 145-151.
526
+ [48] Tao Lin, Lingjing Kong, Sebastian U Stich, and Martin Jaggi. 2020. Ensemble distillation for robust model fusion in federated learning. Advances in Neural Information Processing Systems, NeurIPS (2020).
527
+ [49] Jianwei Liu, Wenfan Song, Leming Shen, Jinsong Han, Xian Xu, and Kui Ren. 2021. Mandpass: Secure and usable user authentication via earphone imu. In IEEE ICDCS. IEEE, 674-684.
528
+ [50] Juncai Liu, Jessie Hui Wang, Chenghao Rong, Yuedong Xu, Tao Yu, and Jilong Wang. 2021. Fedpa: An adaptively partial model aggregation strategy in federated learning. Computer Networks 199 (2021), 108468.
529
+ [51] Zhuang Liu, Mingjie Sun, Tinghui Zhou, Gao Huang, and Trevor Darrell. 2019. Rethinking the Value of Network Pruning. In ICLR.
530
+ [52] Xinyue Ma, Suyeon Jeong, Minjia Zhang, Di Wang, Jonghyun Choi, and Myeong-jae Jeon. 2023. Cost-effective On-device Continual Learning over Memory Hierarchy with Miro. In Proceedings of the 29th Annual International Conference on Mobile Computing and Networking. 1-15.
531
+ [53] Brendan McMahan, Eider Moore, Daniel Ramage, Seth Hampson, and Blaise Agüera y Arcas. 2017. Communication-Efficient Learning of Deep Networks from Decentralized Data. In Proceedings of the 20th International Conference on Artificial Intelligence and Statistics, AISTATS.
532
+ [54] Alessio Mora, Irene Tenison, Paolo Bellavista, and Irina Rish. 2022. Knowledge distillation for federated learning: a practical guide. arXiv preprint arXiv:2211.04742 (2022).
533
+ [55] Hyeonwoo Noh, Seunghoon Hong, and Bohyung Han. 2015. Learning Deconvolution Network for Semantic Segmentation. In IEEE ICCV. IEEE Computer Society, 1520-1528.
534
+ [56] Xiaomin Ouyang, Zhiyuan Xie, Jiayu Zhou, Jianwei Huang, and Guoliang Xing. 2021. ClusterFL: a similarity-aware federated learning system for human activity recognition. In ACM MobiSys.
535
+ [57] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming Lin, Natalia Gimelshein, Luca Antiga,
536
+
537
+ Alban Desmaison, Andreas Kopf, Edward Yang, Zachary DeVito, Martin Raison, Alykhan Tejani, Sasank Chilamkurthy, Benoit Steiner, Lu Fang, Junjie Bai, and Soumith Chintala. 2019. PyTorch: An Imperative Style, High-Performance Deep Learning Library. In Advances in Neural Information Processing Systems, NeurIPS, 8024-8035.
538
+ [58] Boris Radovic and Veljko Pejovic. 2023. REPA: Client Clustering without Training and Data Labels for Improved Federated Learning in Non-IID Settings. arXiv preprint arXiv:2309.14088 (2023).
539
+ [59] Sudipan Saha and Tahir Ahmad. 2021. Federated transfer learning: concept and applications. Intelligenza Artificiale 15, 1 (2021), 35-44.
540
+ [60] Tim Salimans and Durk P Kingma. 2016. Weight normalization: A simple reparameterization to accelerate training of deep neural networks. Advances in neural information processing systems, NeurIPS 29 (2016).
541
+ [61] Leming Shen and Yuanqing Zheng, 2023. FedDM: Data and Model Heterogeneity-Aware Federated Learning via Dynamic Weight Sharing. In 2023 IEEE ICDCS. IEEE, 975-976.
542
+ [62] Guomei Shi, Li Li, Jun Wang, Wenyan Chen, Kejiang Ye, and ChengZhong Xu. 2020. HySync: Hybrid federated learning with effective synchronization. In 2020 IEEE 22nd International Conference on High Performance Computing and Communications; IEEE 18th International Conference on Smart City. IEEE, 628-633.
543
+ [63] Jaemin Shin, Yuanchun Li, Yunxin Liu, and Sung-Ju Lee. 2022. FedBalancer: data and pace control for efficient federated learning on heterogeneous clients. In ACM MobiSys. 436-449.
544
+ [64] Jiangang Shu, Tingting Yang, Xinying Liao, Farong Chen, Yao Xiao, Kan Yang, and Xiaohua Jia. 2023. Clustered Federated Multitask Learning on Non-IID Data With Enhanced Privacy. IEEE Internet Things J. (2023).
545
+ [65] Xian Shuai, Yulin Shen, Siyang Jiang, Zhihe Zhao, Zhenyu Yan, and Guoliang Xing. 2022. BalanceFL: Addressing class imbalance in long-tail federated learning. In ACM/IEEE IPSN. 271-284.
546
+ [66] Karen Simonyan and Andrew Zisserman. 2015. Very Deep Convolutional Networks for Large-Scale Image Recognition. In 3rd International Conference on Learning Representations, ICLR.
547
+ [67] Jingwei Sun, Ang Li, Lin Duan, Samiul Alam, Xuliang Deng, Xin Guo, Haiming Wang, Maria Gorlatova, Mi Zhang, Hai Li, et al. 2022. FedSEA: A SemiAsynchronous Federated Learning Framework for Extremely Heterogeneous Devices. In ACM SenSys. 106-119.
548
+ [68] Mukund Sundararajan, Ankur Italy, and Qiqi Yan. 2017. Axiomatic Attribution for Deep Networks. In ICML, Vol. 70. 3319-3328.
549
+ [69] Christian Szegedy, Wei Liu, Yangqing Jia, Pierre Sermanet, Scott Reed, Dragomir Anguelov, Dumitru Erhan, Vincent Vanhoucke, and Andrew Rabinovich. 2015. Going deeper with convolutions. In Proceedings of the IEEE conference on computer vision and pattern recognition. 1-9.
550
+ [70] Elina Thibeau-Sutre, Sasha Collin, Ninon Burgos, and Olivier Colliot. 2023. Interpretability of Machine Learning Methods Applied to Neuroimaging. Machine Learning for Brain Disorders (2023), 655-704.
551
+ [71] Nicola Tonellotto, Alberto Gotta, Franco Maria Nardini, Daniele Gadler, and Fabrizio Silvestri. 2021. Neural network quantization in federated learning at the edge. Information Sciences 575 (2021), 417-436.
552
+
553
+ [72] Linlin Tu, Xiaomin Ouyang, Jiayu Zhou, Yuze He, and Guoliang Xing. 2021. FedDL: Federated Learning via Dynamic Layer Sharing for Human Activity Recognition. In ACM SenSys. 15-28.
554
+ [73] Saeed Vahidian, Mahdi Morafah, and Bill Lin. 2021. Personalized federated learning by structured and unstructured pruning under data heterogeneity. In 2021 IEEE 41st international conference on distributed computing systems workshops (ICDCSW). IEEE, 27-34.
555
+ [74] Yanwen Wang, Jiaxing Shen, and Yuanqing Zheng. 2020. Push the Limit of Acoustic Gesture Recognition. In IEEE INFOCOM.
556
+ [75] Wikipedia contributors. 2023. Cross-entropy - Wikipedia, The Free Encyclopedia. https://en.wikipedia.org/w/index.php?title=Cross-entropy&oldid=1170369413
557
+ [76] Rui Xiao, Jianwei Liu, Jinsong Han, and Kui Ren. 2021. Onefi: One-shot recognition for unseen gesture via cots wifi. In ACM SenSys.
558
+ [77] Huatao Xu, Pengfei Zhou, Rui Tan, and Mo Li. 2023. Practically Adopting Human Activity Recognition. In Proceedings of the 29th Annual International Conference on Mobile Computing and Networking. 1-15.
559
+ [78] Huatao Xu, Pengfei Zhou, Rui Tan, Mo Li, and Guobin Shen. 2021. Limu-bert: Unleashing the potential of unlabeled data for imu sensing applications. In Proceedings of the 19th ACM Conference on Embedded Networked Sensor Systems. 220-233.
560
+ [79] Qiang Yang and Yuanqing Zheng. 2023. AquaHelper: Underwater SOS Transmission and Detection in Swimming Pools. In ACM SenSys.
561
+ [80] Zhengjie Yang, Wei Bao, Dong Yuan, Nguyen H Tran, and Albert Y Zomaya. 2022. Federated learning with nesterov accelerated gradient. IEEE Transactions on Parallel and Distributed Systems 33, 12 (2022), 4863-4873.
562
+ [81] Zhengjie Yang, Sen Fu, Wei Bao, Dong Yuan, and Bing Zhou. 2023. Hierarchical Federated Learning with Adaptive Momentum in Multi-Tier Networks. In IEEE ICDCS. IEEE, 499-510.
563
+ [82] Dezhong Yao, Wanning Pan, Yao Wan, Hai Jin, and Lichao Sun. 2021. FedHM: Efficient Federated Learning for Heterogeneous Models via Low-rank Factorization. CoRR abs/2111.14655 (2021).
564
+ [83] Jason Yosinski, Jeff Clune, Yoshua Bengio, and Hod Lipson. 2014. How transferable are features in deep neural networks?. In Advances in Neural Information Processing Systems 27 2014, NeurIPS.
565
+ [84] Matthew D. Zeiler and Rob Fergus. 2014. Visualizing and Understanding Convolutional Networks. In IEEE ECCV.
566
+ [85] Jie Zhang, Song Guo, Xiaosong Ma, Haozhao Wang, Wenchao Xu, and Feijie Wu. 2021. Parameterized Knowledge Transfer for Personalized Federated Learning. In NeurIPS. 10092-10104.
567
+ [86] Jie Zhang, Zhihao Qu, Chenxi Chen, Haozhao Wang, Yufeng Zhan, Baoliu Ye, and Song Guo. 2021. Edge learning: The enabling technology for distributed big data analytics in the edge. ACM Computing Surveys (CSUR) (2021).
568
+ [87] Li Lyna Zhang, Shihao Han, Jianyu Wei, Ningxin Zheng, Ting Cao, Yuqing Yang, and Yunxin Liu. 2021. nn-Meter: towards accurate latency prediction of deep-learning model inference on diverse edge devices. In ACM MobiSys. ACM, 81-93.
569
+ [88] Zhuangdi Zhu, Junyuan Hong, and Jiayu Zhou. 2021. Data-Free Knowledge Distillation for Heterogeneous Federated Learning. In ICML. PMLR.
data/2025/2502_20xxx/2502.20639/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f87f51e5bbbb039958e497770ff20b43658eaa61e6ad2e9b8c536e45e4bea9a
3
+ size 1013099
data/2025/2502_20xxx/2502.20639/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20653/882fa9ce-7503-46e8-8936-f6b18b3bf5e6_content_list.json ADDED
@@ -0,0 +1,1721 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [
2
+ {
3
+ "type": "text",
4
+ "text": "Dataset Distillation with Neural Characteristic Function: A Minmax Perspective",
5
+ "text_level": 1,
6
+ "bbox": [
7
+ 94,
8
+ 130,
9
+ 903,
10
+ 152
11
+ ],
12
+ "page_idx": 0
13
+ },
14
+ {
15
+ "type": "text",
16
+ "text": "Shaobo Wang $^{1,2}$ Yicun Yang $^{2}$ Zhiyuan Liu $^{2}$ Chenghao Sun $^{2}$ Xuming Hu $^{3}$ Conghui He $^{4}$ Linfeng Zhang $^{1,2*}$",
17
+ "bbox": [
18
+ 232,
19
+ 179,
20
+ 764,
21
+ 215
22
+ ],
23
+ "page_idx": 0
24
+ },
25
+ {
26
+ "type": "text",
27
+ "text": "$^{1}$ School of Artificial Intelligence, Shanghai Jiao Tong University",
28
+ "bbox": [
29
+ 243,
30
+ 215,
31
+ 751,
32
+ 234
33
+ ],
34
+ "page_idx": 0
35
+ },
36
+ {
37
+ "type": "text",
38
+ "text": "$^{2}$ EPIC Lab, Shanghai Jiao Tong University",
39
+ "bbox": [
40
+ 330,
41
+ 234,
42
+ 666,
43
+ 251
44
+ ],
45
+ "page_idx": 0
46
+ },
47
+ {
48
+ "type": "text",
49
+ "text": "$^{3}$ Hong Kong University of Science and Technology, Guangzhou",
50
+ "bbox": [
51
+ 245,
52
+ 251,
53
+ 750,
54
+ 268
55
+ ],
56
+ "page_idx": 0
57
+ },
58
+ {
59
+ "type": "text",
60
+ "text": "$^{4}$ Shanghai Artificial Intelligence Laboratory",
61
+ "bbox": [
62
+ 325,
63
+ 268,
64
+ 671,
65
+ 287
66
+ ],
67
+ "page_idx": 0
68
+ },
69
+ {
70
+ "type": "text",
71
+ "text": "{shaobowang1009,zhanglinfeng}@sjtu.edu.cn",
72
+ "bbox": [
73
+ 256,
74
+ 287,
75
+ 735,
76
+ 304
77
+ ],
78
+ "page_idx": 0
79
+ },
80
+ {
81
+ "type": "text",
82
+ "text": "Abstract",
83
+ "text_level": 1,
84
+ "bbox": [
85
+ 248,
86
+ 339,
87
+ 325,
88
+ 353
89
+ ],
90
+ "page_idx": 0
91
+ },
92
+ {
93
+ "type": "text",
94
+ "text": "Dataset distillation has emerged as a powerful approach for reducing data requirements in deep learning. Among various methods, distribution matching-based approaches stand out for their balance of computational efficiency and strong performance. However, existing distance metrics used in distribution matching often fail to accurately capture distributional differences, leading to unreliable measures of discrepancy. In this paper, we reformulate dataset distillation as a minmax optimization problem and introduce Neural Characteristic Function Discrepancy (NCFD), a comprehensive and theoretically grounded metric for measuring distributional differences. NCFD leverages the Characteristic Function (CF) to encapsulate full distributional information, employing a neural network to optimize the sampling strategy for the CF's frequency arguments, thereby maximizing the discrepancy to enhance distance estimation. Simultaneously, we minimize the difference between real and synthetic data under this optimized NCFD measure. Our approach, termed Neural Characteristic Function Matching (NCFM), inherently aligns the phase and amplitude of neural features in the complex plane for both real and synthetic data, achieving a balance between realism and diversity in synthetic samples. Experiments demonstrate that our method achieves significant performance gains over state-of-the-art methods on both low- and high-resolution datasets. Notably, we achieve a $20.5\\%$ accuracy boost on ImageSquawk. Our method also reduces GPU memory usage by over $300\\times$ and achieves $20\\times$ faster processing speeds compared to state-of-the-art methods. To the best of our knowledge, this is the first work to achieve lossless compression of CIFAR-100 on a single NVIDIA 2080 Ti GPU using only 2.3 GB of memory.",
95
+ "bbox": [
96
+ 86,
97
+ 372,
98
+ 483,
99
+ 857
100
+ ],
101
+ "page_idx": 0
102
+ },
103
+ {
104
+ "type": "image",
105
+ "img_path": "images/2579577802e13a513b788c8274f4d12d521c98fe5701a701241cc9be77902fce.jpg",
106
+ "image_caption": [
107
+ "Synthetic Data Real Data Z: Latent Space $\\psi$ : Parameterized Network",
108
+ "(a) Previous paradigm: optimize $\\widetilde{D}$ to minimize the distance within $Z$ ."
109
+ ],
110
+ "image_footnote": [],
111
+ "bbox": [
112
+ 519,
113
+ 351,
114
+ 700,
115
+ 426
116
+ ],
117
+ "page_idx": 0
118
+ },
119
+ {
120
+ "type": "image",
121
+ "img_path": "images/3da4180494fcb84609eb0f265753940bed09de93f05dd5873b0136a20d71db44.jpg",
122
+ "image_caption": [],
123
+ "image_footnote": [],
124
+ "bbox": [
125
+ 707,
126
+ 351,
127
+ 895,
128
+ 426
129
+ ],
130
+ "page_idx": 0
131
+ },
132
+ {
133
+ "type": "text",
134
+ "text": "size the distance within $\\mathcal{Z}$ .",
135
+ "bbox": [
136
+ 723,
137
+ 426,
138
+ 836,
139
+ 436
140
+ ],
141
+ "page_idx": 0
142
+ },
143
+ {
144
+ "type": "image",
145
+ "img_path": "images/1f8388cdfb72e8638cd8761bce24bb64684f8099768f418d5faa17d10baea050.jpg",
146
+ "image_caption": [
147
+ "(b) Our minmax paradigm: first optimize $\\psi$ to maximize the distance in parameterized space $\\mathcal{Z}_{\\psi}$ , then optimize $\\widetilde{D}$ to minimize the distance within $\\mathcal{Z}_{\\psi}$ .",
148
+ "Figure 1. Comparison of different paradigms for dataset distillation. (a) The MSE approach compares point-wise features within Euclidean space, denoted as $\\mathcal{Z}_{\\mathbb{R}}$ , while MMD evaluates moment differences in Hilbert space, $\\mathcal{Z}_{\\mathcal{H}}$ . (b) Our method redefines distribution matching as a minmax optimization problem, where the distributional discrepancy is parameterized by a neural network $\\psi$ . We begin by optimizing $\\psi$ to maximize the discrepancy, thereby establishing the latent space $\\mathcal{Z}_{\\psi}$ , and subsequently optimize the synthesized data $\\hat{\\mathcal{D}}$ to minimize this discrepancy within $\\mathcal{Z}_{\\psi}$ ."
149
+ ],
150
+ "image_footnote": [],
151
+ "bbox": [
152
+ 519,
153
+ 436,
154
+ 700,
155
+ 510
156
+ ],
157
+ "page_idx": 0
158
+ },
159
+ {
160
+ "type": "image",
161
+ "img_path": "images/3767bbf5f8eb407e359840c9dfbba66cdf9c147644c0adca6db6d7cfa9930d30.jpg",
162
+ "image_caption": [],
163
+ "image_footnote": [],
164
+ "bbox": [
165
+ 707,
166
+ 438,
167
+ 895,
168
+ 510
169
+ ],
170
+ "page_idx": 0
171
+ },
172
+ {
173
+ "type": "text",
174
+ "text": "1. Introduction",
175
+ "text_level": 1,
176
+ "bbox": [
177
+ 514,
178
+ 678,
179
+ 643,
180
+ 693
181
+ ],
182
+ "page_idx": 0
183
+ },
184
+ {
185
+ "type": "text",
186
+ "text": "Deep neural networks (DNNs) have achieved remarkable progress across a range of tasks, largely due to the availability of vast amounts of training data. However, training effectively with limited data remains challenging and crucial, particularly when large-scale datasets become too voluminous for storage. To address this, dataset distillation has been proposed to condense a large, real dataset into a smaller, synthetic one [6, 49, 52, 55, 56]. Dataset distillation has been applied in various areas, including neural architecture search [33, 44], continual learning [15, 51], medical image computing [29], and privacy protection [7, 8, 11].",
187
+ "bbox": [
188
+ 509,
189
+ 703,
190
+ 906,
191
+ 869
192
+ ],
193
+ "page_idx": 0
194
+ },
195
+ {
196
+ "type": "text",
197
+ "text": "Among dataset distillation methods, feature or distribution matching (DM) approaches [47, 55] have gained popu",
198
+ "bbox": [
199
+ 511,
200
+ 869,
201
+ 905,
202
+ 901
203
+ ],
204
+ "page_idx": 0
205
+ },
206
+ {
207
+ "type": "aside_text",
208
+ "text": "arXiv:2502.20653v1 [cs.CV] 28 Feb 2025",
209
+ "bbox": [
210
+ 22,
211
+ 263,
212
+ 57,
213
+ 705
214
+ ],
215
+ "page_idx": 0
216
+ },
217
+ {
218
+ "type": "page_footnote",
219
+ "text": "*Corresponding Author.",
220
+ "bbox": [
221
+ 109,
222
+ 886,
223
+ 238,
224
+ 898
225
+ ],
226
+ "page_idx": 0
227
+ },
228
+ {
229
+ "type": "page_number",
230
+ "text": "1",
231
+ "bbox": [
232
+ 493,
233
+ 924,
234
+ 503,
235
+ 935
236
+ ],
237
+ "page_idx": 0
238
+ },
239
+ {
240
+ "type": "image",
241
+ "img_path": "images/fd10e80466cecd63f97eee094d2dd46454f7f0b88c64d16007ac793bfec19882.jpg",
242
+ "image_caption": [
243
+ "(a) From Real to Complex",
244
+ "(b) Distribution Matching (DM)",
245
+ "Figure 2. Comparison of different distribution matching methods. (a) Illustration of embedded features from the real domain to complexplane features using Euler's formula [13]. The latent neural feature $\\Phi_{\\pmb{x}}(\\pmb {t})$ captures the amplitude and phase information. (b) MMD-based methods align feature moments in the embedded domain but may not effectively align the overall distributions. (c) CF-based methods directly compare distributions by balancing the amplitude and phase in the complex plane, enhancing distributional similarity."
246
+ ],
247
+ "image_footnote": [],
248
+ "bbox": [
249
+ 96,
250
+ 95,
251
+ 243,
252
+ 202
253
+ ],
254
+ "page_idx": 1
255
+ },
256
+ {
257
+ "type": "image",
258
+ "img_path": "images/724b3dc4b0df2fc7ead266ec622c4cecf20c8b026265e245bcbd020f48289e74.jpg",
259
+ "image_caption": [
260
+ "minimizing MMD doesn't effectively make the distributions similar",
261
+ "Iteration 0"
262
+ ],
263
+ "image_footnote": [],
264
+ "bbox": [
265
+ 248,
266
+ 109,
267
+ 361,
268
+ 183
269
+ ],
270
+ "page_idx": 1
271
+ },
272
+ {
273
+ "type": "image",
274
+ "img_path": "images/686f0775e9c062d309f31fbd097802a9f2fd472ceba8e105f82de6a33ca6967a.jpg",
275
+ "image_caption": [
276
+ "Iteration 5000"
277
+ ],
278
+ "image_footnote": [],
279
+ "bbox": [
280
+ 364,
281
+ 111,
282
+ 465,
283
+ 183
284
+ ],
285
+ "page_idx": 1
286
+ },
287
+ {
288
+ "type": "image",
289
+ "img_path": "images/9cd497ed63e24283fab03e297125f5a603353786dcd0030d2f9c009ec3ed5260.jpg",
290
+ "image_caption": [
291
+ "Iteration 10000"
292
+ ],
293
+ "image_footnote": [],
294
+ "bbox": [
295
+ 467,
296
+ 111,
297
+ 568,
298
+ 183
299
+ ],
300
+ "page_idx": 1
301
+ },
302
+ {
303
+ "type": "image",
304
+ "img_path": "images/f52d0d68d48b30d5fe2f3cd175cbcafb5c1dc72da5ab2bf4de6885b2c9687e8a.jpg",
305
+ "image_caption": [
306
+ "(c) Characteristic Function Matching (Ours) Real Synthetic",
307
+ "minimizing CF makes the distributions similar",
308
+ "Iteration 0"
309
+ ],
310
+ "image_footnote": [],
311
+ "bbox": [
312
+ 568,
313
+ 111,
314
+ 671,
315
+ 183
316
+ ],
317
+ "page_idx": 1
318
+ },
319
+ {
320
+ "type": "image",
321
+ "img_path": "images/58484942b91e753dc671c1d292b53c01fcb1316f175abaf501ce83ed8d4c8ad9.jpg",
322
+ "image_caption": [
323
+ "Iteration 5000"
324
+ ],
325
+ "image_footnote": [],
326
+ "bbox": [
327
+ 676,
328
+ 111,
329
+ 779,
330
+ 183
331
+ ],
332
+ "page_idx": 1
333
+ },
334
+ {
335
+ "type": "image",
336
+ "img_path": "images/c920a295d46efcf67c9d4a831fbfc151c089dec53dedb97296b540c0087827c3.jpg",
337
+ "image_caption": [
338
+ "Iteration 10000"
339
+ ],
340
+ "image_footnote": [],
341
+ "bbox": [
342
+ 782,
343
+ 111,
344
+ 883,
345
+ 183
346
+ ],
347
+ "page_idx": 1
348
+ },
349
+ {
350
+ "type": "image",
351
+ "img_path": "images/05c57ba73b98ec41b7f7bf797e1d7107f3b84e3a9930bb33d39e31e2f6437969.jpg",
352
+ "image_caption": [
353
+ "Figure 3. Comparison of performance, peak GPU memory usage, and distillation speed between the state-of-the-art (SOTA) distillation method and our NCFM on CIFAR-100 across various IPC values, evaluated on 8 NVIDIA H100 GPUs. Notably, NCFM reduces GPU memory usage by over $300\\times$ , achieves $20\\times$ faster distillation, and delivers better performance. We also successfully demonstrated lossless distillation using only 2.3GB GPU memory."
354
+ ],
355
+ "image_footnote": [],
356
+ "bbox": [
357
+ 91,
358
+ 273,
359
+ 478,
360
+ 414
361
+ ],
362
+ "page_idx": 1
363
+ },
364
+ {
365
+ "type": "text",
366
+ "text": "larity for their effective balance between high performance and computational efficiency. Unlike bi-level optimization-based distillation approaches [6, 20, 24, 54, 56], DM-based methods bypass the need for nested optimization. For instance, when learning with 50 images per class (IPC) on CIFAR-10 dataset, DM methods achieve higher test accuracy than gradient matching methods [24, 54, 56], while requiring only a tenth of the computation time.",
367
+ "bbox": [
368
+ 89,
369
+ 521,
370
+ 483,
371
+ 642
372
+ ],
373
+ "page_idx": 1
374
+ },
375
+ {
376
+ "type": "text",
377
+ "text": "A key challenge in DM lies in defining an effective metric to measure distributional discrepancies between real and synthetic datasets. Early methods primarily employed Mean Squared Error (MSE) to compare point-wise features [10, 38, 47], which operates in Euclidean space, $\\mathcal{Z}_{\\mathbb{R}}$ as illustrated on the left of Figure 1(a). However, MSE directly matches pixel-level or patch-level information without capturing the semantic structures embedded in high-dimensional manifolds, which falls short for distribution comparison. Later methods [53, 55, 57] employ Maximum Mean Discrepancy (MMD) as a metric. Nevertheless, research in generative modeling [4, 25] has shown that MMD aligns moments of distributions in a latent Hilbert space, $\\mathcal{Z}_{\\mathcal{H}}$ , as shown on the right of Figure 1(a). While distributional equivalence implies moment equivalence, the converse is not necessarily true: aligning moments alone does not guarantee full distributional matching. As illustrated in",
378
+ "bbox": [
379
+ 89,
380
+ 643,
381
+ 483,
382
+ 901
383
+ ],
384
+ "page_idx": 1
385
+ },
386
+ {
387
+ "type": "text",
388
+ "text": "Figure 2(b), MMD-based methods may fail to capture overall distributional alignment between real and synthetic data, resulting in suboptimal synthesized image quality.",
389
+ "bbox": [
390
+ 511,
391
+ 281,
392
+ 903,
393
+ 327
394
+ ],
395
+ "page_idx": 1
396
+ },
397
+ {
398
+ "type": "text",
399
+ "text": "To overcome these limitations, we propose a novel approach that reformulates distribution matching as an adversarial minmax optimization problem, as depicted in Figure 1(b). By leveraging the minmax paradigm, we adaptively learn the discrepancy metric, enabling it to maximize the separability between real and synthetic data distributions. This dynamic adjustment addresses the rigidity of fixed metrics like MSE and MMD. Meanwhile, the synthetic data is iteratively optimized to minimize the dynamically refined discrepancy measure. Building upon this foundation, we introduce Neural Characteristic Discrepancy (NCFD), a parameterized metric based on the Characteristic Function (CF), which provides a precise and comprehensive representation of the underlying probability distribution. Defined as the Fourier transform of the probability density function, the CF encapsulates all relevant information about a distribution [3, 5, 14, 21, 31, 41]. The CF offers a one-to-one correspondence with the cumulative density function, ensuring the robustness and reliability.",
400
+ "bbox": [
401
+ 511,
402
+ 327,
403
+ 906,
404
+ 613
405
+ ],
406
+ "page_idx": 1
407
+ },
408
+ {
409
+ "type": "text",
410
+ "text": "In our framework, an auxiliary network embeds features while a lightweight sampling network is optimized to dynamically adjust its CF sampling strategy using a scale mixture of normals. During the distillation process, we iteratively minimize the NCFD to bring synthetic data closer to real data, while training the sampling network to maximize NCFD, thereby improving the metric's robustness and accuracy. Unlike MMD which has quadratic computational complexity, NCFD achieves linear time computational complexity. Our method, Neural Characteristic Function Matching (NCFM), aligns both the phase and amplitude of neural features in the complex plane, achieving a balanced synthesis of realism and diversity in the generated images. As shown in Figure 2(c), NCFM effectively captures overall distributional information, leading to well-aligned synthetic and real data distributions after optimization. Our contributions are as follows:",
411
+ "bbox": [
412
+ 511,
413
+ 614,
414
+ 906,
415
+ 869
416
+ ],
417
+ "page_idx": 1
418
+ },
419
+ {
420
+ "type": "text",
421
+ "text": "1. We reformulate the distribution matching problem as a minmax optimization problem, where the sampling net-",
422
+ "bbox": [
423
+ 511,
424
+ 869,
425
+ 908,
426
+ 901
427
+ ],
428
+ "page_idx": 1
429
+ },
430
+ {
431
+ "type": "page_number",
432
+ "text": "2",
433
+ "bbox": [
434
+ 493,
435
+ 924,
436
+ 504,
437
+ 935
438
+ ],
439
+ "page_idx": 1
440
+ },
441
+ {
442
+ "type": "text",
443
+ "text": "work maximizes the distributional discrepancy to learn a proper discrepancy metric, while the synthesized images are optimized to minimize such discrepancy.",
444
+ "bbox": [
445
+ 104,
446
+ 90,
447
+ 482,
448
+ 136
449
+ ],
450
+ "page_idx": 2
451
+ },
452
+ {
453
+ "type": "list",
454
+ "sub_type": "text",
455
+ "list_items": [
456
+ "2. We introduce Neural Characteristic Function Matching (NCFM), which aligns the phase and amplitude information of neural features in the complex plane for both real and synthetic data, achieving a balance between realism and diversity in synthetic data.",
457
+ "3. Extensive experiments across multiple benchmark datasets demonstrate the superior performance and efficiency of NCFM. Particularly, on high-resolution datasets, NCFM achieves significant accuracy gains of up to $20.5\\%$ on ImageSquawk and $17.8\\%$ on ImageMeow at 10 IPC compared to SOTA methods.",
458
+ "4. NCFM achieves unprecedented efficiency in computational resources. As shown in Figure 3, our method dramatically reduces resource requirements with better performance, achieving more than $300 \\times$ reduction in GPU memory usage compared with DATM [16]. Most remarkably, NCFM demonstrates lossless dataset distillation on both CIFAR-10 and CIFAR-100 using about merely 2GB GPU memory, enabling all experiments to be conducted on a single NVIDIA 2080 Ti GPU."
459
+ ],
460
+ "bbox": [
461
+ 86,
462
+ 138,
463
+ 482,
464
+ 445
465
+ ],
466
+ "page_idx": 2
467
+ },
468
+ {
469
+ "type": "text",
470
+ "text": "2. Related Work",
471
+ "text_level": 1,
472
+ "bbox": [
473
+ 89,
474
+ 452,
475
+ 233,
476
+ 468
477
+ ],
478
+ "page_idx": 2
479
+ },
480
+ {
481
+ "type": "text",
482
+ "text": "Dataset Distillation Methods Based on Distribution and Feature Matching. Dataset distillation was proposed by [49]. Compared with various bi-level DD methods, DM [55] is regarded as a efficient method that balances the performance and computational efficiency, without involving the nested model optimization. These methods can be classified into two directions, i.e., point-wise and moment-wise matching. For moment-wise matching, DM-based methods [53, 55, 57] propose to minimize the maximum mean discrepancy (MMD) between synthetic and real datasets. For point-wise feature matching, they typically design better strategies to match features extracted across layers in convolutional neural networks, and apply further adjustments to improve the performance [10, 38, 47]. However, moment-based and point-based matching methods may not capture the overall distributional discrepancy between synthetic and real data, as they are not sufficient conditions for distributional equivalence.",
483
+ "bbox": [
484
+ 89,
485
+ 477,
486
+ 482,
487
+ 750
488
+ ],
489
+ "page_idx": 2
490
+ },
491
+ {
492
+ "type": "text",
493
+ "text": "Characteristic Function as a Distributional Metric. The characteristic function is a unique and universal metric for measuring distributional discrepancy, defined as the Fourier transform of the probability density function [3]. The CF of any real-valued random variable completely defines its probability distribution, providing an alternative analytical approach compared to working directly with probability density or cumulative distribution functions. Unlike the moment-generating function, the CF always exists when treated as a function of a real-valued argument [5]. Re-",
494
+ "bbox": [
495
+ 89,
496
+ 750,
497
+ 482,
498
+ 900
499
+ ],
500
+ "page_idx": 2
501
+ },
502
+ {
503
+ "type": "text",
504
+ "text": "cently, the CFD has been adopted in deep learning for various tasks, e.g., several works have been proposed to use the CFD for generative modeling [1, 27]. However, none of prior works have considered the CFD for distillation.",
505
+ "bbox": [
506
+ 511,
507
+ 90,
508
+ 905,
509
+ 151
510
+ ],
511
+ "page_idx": 2
512
+ },
513
+ {
514
+ "type": "text",
515
+ "text": "3. Preliminaries: Distribution Matching",
516
+ "text_level": 1,
517
+ "bbox": [
518
+ 511,
519
+ 162,
520
+ 852,
521
+ 180
522
+ ],
523
+ "page_idx": 2
524
+ },
525
+ {
526
+ "type": "text",
527
+ "text": "Distribution Matching (DM) was first introduced by [55] as an alternative to traditional bi-level optimization techniques, such as gradient matching methods [20, 24, 54, 56] and trajectory matching methods [6, 9, 12, 16]. Classical DM approaches focus on minimizing the discrepancy between the distributions of real and synthetic data, typically categorized into two main types: feature point matching and moment matching. Feature point matching methods [10, 38, 47] directly compare point-wise features using Mean Square Error (MSE), as defined by:",
528
+ "bbox": [
529
+ 511,
530
+ 186,
531
+ 906,
532
+ 339
533
+ ],
534
+ "page_idx": 2
535
+ },
536
+ {
537
+ "type": "equation",
538
+ "text": "\n$$\n\\mathcal {L} _ {\\mathrm {M S E}} = \\mathbb {E} _ {\\boldsymbol {x} \\sim \\mathcal {D}, \\tilde {\\boldsymbol {x}} \\sim \\tilde {\\mathcal {D}}} \\left[ \\| f (\\boldsymbol {x}) - f (\\tilde {\\boldsymbol {x}}) \\| ^ {2} \\right], \\tag {1}\n$$\n",
539
+ "text_format": "latex",
540
+ "bbox": [
541
+ 573,
542
+ 344,
543
+ 906,
544
+ 369
545
+ ],
546
+ "page_idx": 2
547
+ },
548
+ {
549
+ "type": "text",
550
+ "text": "where $f$ denotes the feature extractor network, $\\mathcal{D}$ and $\\tilde{\\mathcal{D}}$ represent the real and synthetic data distributions, respectively, $\\pmb{x}$ and $\\tilde{\\pmb{x}}$ are samples drawn from $\\mathcal{D}$ and $\\tilde{\\mathcal{D}}$ . However, MSE may not be ideal for comparing distributions, as it only considers direct feature comparisons in Euclidean space, neglecting important semantic information.",
551
+ "bbox": [
552
+ 511,
553
+ 369,
554
+ 905,
555
+ 460
556
+ ],
557
+ "page_idx": 2
558
+ },
559
+ {
560
+ "type": "text",
561
+ "text": "In another line, notable works employed Maximum Mean Discrepancy (MMD) to align high-order moments in the latent feature space [53, 55, 57]. Rigorously, MMD is defined to match moments within the Reproducing Kernel Hilbert Space (RKHS) induced by a selected kernel function. The MMD loss can be formulated as:",
562
+ "bbox": [
563
+ 511,
564
+ 460,
565
+ 905,
566
+ 551
567
+ ],
568
+ "page_idx": 2
569
+ },
570
+ {
571
+ "type": "equation",
572
+ "text": "\n$$\n\\begin{array}{l} \\sup _ {f \\in \\mathcal {F}} \\| \\mathbb {E} _ {\\boldsymbol {x} \\sim \\mathcal {D}} [ f (\\boldsymbol {x}) ] - \\mathbb {E} _ {\\tilde {\\boldsymbol {x}} \\sim \\hat {\\mathcal {D}}} [ f (\\tilde {\\boldsymbol {x}}) ] \\| ^ {2}, \\\\ = \\sup _ {f \\in \\mathcal {F}} \\left(\\mathcal {K} _ {\\mathcal {D}, \\mathcal {D}} + \\mathcal {K} _ {\\tilde {\\mathcal {D}}, \\tilde {\\mathcal {D}}} - 2 \\mathcal {K} _ {\\mathcal {D}, \\tilde {\\mathcal {D}}}\\right), \\tag {2} \\\\ \\end{array}\n$$\n",
573
+ "text_format": "latex",
574
+ "bbox": [
575
+ 575,
576
+ 555,
577
+ 903,
578
+ 614
579
+ ],
580
+ "page_idx": 2
581
+ },
582
+ {
583
+ "type": "text",
584
+ "text": "where $\\mathcal{K}_{\\mathcal{D},\\tilde{\\mathcal{D}}} = \\mathbb{E}_{\\boldsymbol{x}\\sim \\mathcal{D},\\tilde{\\boldsymbol{x}}\\sim \\tilde{\\mathcal{D}}}[\\mathcal{K}_{f(\\boldsymbol{x}),f(\\tilde{\\boldsymbol{x}})}]$ denotes the kernel function associated with feature extractor $f$ in function class $\\mathcal{F}$ . The choice of kernel function $\\mathcal{K}$ is not unique and requires careful selection for MMD to be effective. However, instead of selecting certain kernel function, most DM-based methods [10, 55, 57] align moments directly in the feature space, commonly approximated as:",
585
+ "bbox": [
586
+ 511,
587
+ 619,
588
+ 905,
589
+ 727
590
+ ],
591
+ "page_idx": 2
592
+ },
593
+ {
594
+ "type": "equation",
595
+ "text": "\n$$\n\\mathcal {L} _ {\\mathrm {M M D}} = \\left\\| \\mathbb {E} _ {\\boldsymbol {x} \\sim \\mathcal {D}} [ f (\\boldsymbol {x}) ] - \\mathbb {E} _ {\\tilde {\\boldsymbol {x}} \\sim \\tilde {\\mathcal {D}}} [ f (\\tilde {\\boldsymbol {x}}) ] \\right\\| ^ {2}. \\tag {3}\n$$\n",
596
+ "text_format": "latex",
597
+ "bbox": [
598
+ 563,
599
+ 731,
600
+ 903,
601
+ 750
602
+ ],
603
+ "page_idx": 2
604
+ },
605
+ {
606
+ "type": "text",
607
+ "text": "We argue that such empirical MMD estimates lack rigor, as they do not provide a maximal upper bound on the discrepancy, falling short of MMD's theoretical requirements.",
608
+ "bbox": [
609
+ 511,
610
+ 752,
611
+ 905,
612
+ 797
613
+ ],
614
+ "page_idx": 2
615
+ },
616
+ {
617
+ "type": "text",
618
+ "text": "4. Adversarial Distribution Matching",
619
+ "text_level": 1,
620
+ "bbox": [
621
+ 511,
622
+ 808,
623
+ 828,
624
+ 825
625
+ ],
626
+ "page_idx": 2
627
+ },
628
+ {
629
+ "type": "text",
630
+ "text": "4.1. Minmax Framework",
631
+ "text_level": 1,
632
+ "bbox": [
633
+ 511,
634
+ 833,
635
+ 710,
636
+ 848
637
+ ],
638
+ "page_idx": 2
639
+ },
640
+ {
641
+ "type": "text",
642
+ "text": "To address existing challenges with discrepancy measure selection, we propose a new approach that reformulates distribution matching as a minmax optimization problem. In",
643
+ "bbox": [
644
+ 511,
645
+ 854,
646
+ 905,
647
+ 900
648
+ ],
649
+ "page_idx": 2
650
+ },
651
+ {
652
+ "type": "page_number",
653
+ "text": "3",
654
+ "bbox": [
655
+ 493,
656
+ 924,
657
+ 504,
658
+ 936
659
+ ],
660
+ "page_idx": 2
661
+ },
662
+ {
663
+ "type": "image",
664
+ "img_path": "images/0008b4982c396724408b45226589e92e7d316ca31a12b2d76f90dd1f10b89460.jpg",
665
+ "image_caption": [
666
+ "Figure 4. Dataset Distillation with Neural Characteristic Function Matching (NCFM). Real and synthetic data points are sampled and embedded through a feature extractor network. The synthetic data is optimized by minimizing the distributional discrepancy between real and synthetic data, measured via the Neural Characteristic Function Discrepancy (NCFD) in the complex plane. Additionally, an auxiliary network learns an optimal sampling distribution for the frequency arguments of the characteristic function. Best viewed in color."
667
+ ],
668
+ "image_footnote": [],
669
+ "bbox": [
670
+ 140,
671
+ 64,
672
+ 866,
673
+ 218
674
+ ],
675
+ "page_idx": 3
676
+ },
677
+ {
678
+ "type": "text",
679
+ "text": "this framework, we maximize the discrepancy measure to define a robust discrepancy metric, parameterized by a neural network $\\psi$ . Concurrently, we minimize the discrepancy between the synthetic dataset $\\tilde{\\mathcal{D}}$ and the real dataset $\\mathcal{D}$ by optimizing the synthetic data distribution $\\tilde{\\mathcal{D}}$ . Formally, this minmax optimization problem is expressed as:",
680
+ "bbox": [
681
+ 88,
682
+ 300,
683
+ 483,
684
+ 391
685
+ ],
686
+ "page_idx": 3
687
+ },
688
+ {
689
+ "type": "equation",
690
+ "text": "\n$$\n\\min _ {\\tilde {\\mathcal {D}}} \\max _ {\\psi} \\mathcal {L} (\\tilde {\\mathcal {D}}, \\mathcal {D}, f, \\psi), \\tag {4}\n$$\n",
691
+ "text_format": "latex",
692
+ "bbox": [
693
+ 202,
694
+ 401,
695
+ 482,
696
+ 426
697
+ ],
698
+ "page_idx": 3
699
+ },
700
+ {
701
+ "type": "text",
702
+ "text": "where $\\mathcal{L}$ denotes the discrepancy measure, $f$ is the feature extractor network, and $\\psi$ is the network learning the discrepancy metric. This minmax framework seeks the optimal synthetic data distribution $\\tilde{\\mathcal{D}}$ that minimizes $\\mathcal{L}$ while network $\\psi$ maximizes $\\mathcal{L}$ to establish a robust metric.",
703
+ "bbox": [
704
+ 89,
705
+ 431,
706
+ 483,
707
+ 506
708
+ ],
709
+ "page_idx": 3
710
+ },
711
+ {
712
+ "type": "text",
713
+ "text": "4.2. Neural Characteristic Function Matching",
714
+ "text_level": 1,
715
+ "bbox": [
716
+ 89,
717
+ 516,
718
+ 444,
719
+ 532
720
+ ],
721
+ "page_idx": 3
722
+ },
723
+ {
724
+ "type": "text",
725
+ "text": "4.2.1. Neural Characteristic Function Discrepancy",
726
+ "text_level": 1,
727
+ "bbox": [
728
+ 89,
729
+ 539,
730
+ 444,
731
+ 554
732
+ ],
733
+ "page_idx": 3
734
+ },
735
+ {
736
+ "type": "text",
737
+ "text": "To define a suitable discrepancy metric within this minmax framework, we propose a novel discrepancy measure based on the characteristic function (CF), which enables direct and robust assessment of distributional discrepancies. Characteristic functions are a mainstay in probability theory, often used as an alternative to probability density functions due to their unique properties. Specifically, the CF of a random variable $x$ is the expectation of its complex exponential transform, defined as:",
738
+ "bbox": [
739
+ 89,
740
+ 556,
741
+ 483,
742
+ 691
743
+ ],
744
+ "page_idx": 3
745
+ },
746
+ {
747
+ "type": "equation",
748
+ "text": "\n$$\n\\Phi_ {\\boldsymbol {x}} (\\boldsymbol {t}) = \\mathbb {E} _ {\\boldsymbol {x}} \\left[ e ^ {j \\langle \\boldsymbol {t}, \\boldsymbol {x} \\rangle} \\right] = \\int_ {\\boldsymbol {x}} e ^ {j \\langle \\boldsymbol {t}, \\boldsymbol {x} \\rangle} d F (\\boldsymbol {x}), \\tag {5}\n$$\n",
749
+ "text_format": "latex",
750
+ "bbox": [
751
+ 143,
752
+ 703,
753
+ 482,
754
+ 734
755
+ ],
756
+ "page_idx": 3
757
+ },
758
+ {
759
+ "type": "text",
760
+ "text": "where $F(\\pmb{x})$ denotes the cumulative distribution function (cdf) of $\\pmb{x}$ , $j = \\sqrt{-1}$ , and $\\pmb{t}$ is the frequency argument. Since the cdf is not directly accessible in practice, we approximate the CF empirically as $\\Phi_{\\pmb{x}}(\\pmb{t}) = \\frac{1}{N}\\sum_{i=1}^{N}e^{j\\langle\\pmb{t},\\pmb{x}_i\\rangle}$ , where $N$ is the sample size in the dataset. The CF provides a theoretically principled description of a distribution, summarized in the following theorems.",
761
+ "bbox": [
762
+ 89,
763
+ 739,
764
+ 483,
765
+ 845
766
+ ],
767
+ "page_idx": 3
768
+ },
769
+ {
770
+ "type": "text",
771
+ "text": "Theorem 1 (Lévy's Convergence Theorem [31]) Let $\\{X_{n}\\}_{n = 1}^{\\infty}$ be a sequence of random variables with characteristic functions $\\Phi_n(t) = \\mathbb{E}_{X_n}[e^{j\\langle t,X_n\\rangle}]$ . Suppose",
772
+ "bbox": [
773
+ 89,
774
+ 854,
775
+ 483,
776
+ 902
777
+ ],
778
+ "page_idx": 3
779
+ },
780
+ {
781
+ "type": "text",
782
+ "text": "$\\Phi_n(\\pmb{t}) \\to \\Phi(\\pmb{t})$ pointwise for each $\\pmb{t} \\in \\mathbb{R}^d$ as $n \\to \\infty$ . If $\\Phi(\\pmb{t})$ is continuous at $\\pmb{t} = 0$ , then there exists a random variable $X$ with characteristic function $\\Phi(\\pmb{t})$ , and $X_n$ converges in distribution to $X$ .",
783
+ "bbox": [
784
+ 511,
785
+ 300,
786
+ 906,
787
+ 359
788
+ ],
789
+ "page_idx": 3
790
+ },
791
+ {
792
+ "type": "text",
793
+ "text": "Theorem 2 (Uniqueness for Characteristic Functions [14]) If two random variables $X$ and $Y$ have the same characteristic function, $\\Phi_X(t) = \\Phi_Y(t)$ for all $t$ , then $X$ and $Y$ are identically distributed. In other words, a characteristic function uniquely determines the distribution.",
794
+ "bbox": [
795
+ 511,
796
+ 392,
797
+ 919,
798
+ 468
799
+ ],
800
+ "page_idx": 3
801
+ },
802
+ {
803
+ "type": "text",
804
+ "text": "By Theorems 1 and 2, the empirical CF weakly converges to the population CF, ensuring that the CF serves as a reliable proxy for the underlying distribution. Based on the CF, we define our characteristic function discrepancy (CFD) as:",
805
+ "bbox": [
806
+ 511,
807
+ 503,
808
+ 906,
809
+ 564
810
+ ],
811
+ "page_idx": 3
812
+ },
813
+ {
814
+ "type": "equation",
815
+ "text": "\n$$\n\\mathcal {C} _ {\\mathcal {T}} (\\boldsymbol {x}, \\tilde {\\boldsymbol {x}}) = \\int_ {t} \\sqrt {\\underbrace {(\\Phi_ {\\boldsymbol {x}} (\\boldsymbol {t}) - \\Phi_ {\\tilde {\\boldsymbol {x}}} (\\boldsymbol {t})) (\\bar {\\Phi} _ {\\boldsymbol {x}} (\\boldsymbol {t}) - \\bar {\\Phi} _ {\\tilde {\\boldsymbol {x}}} (\\boldsymbol {t}))} _ {\\operatorname {C h f} (t)} d F _ {\\mathcal {T}} (\\boldsymbol {t}), \\tag {6}\n$$\n",
816
+ "text_format": "latex",
817
+ "bbox": [
818
+ 514,
819
+ 585,
820
+ 903,
821
+ 643
822
+ ],
823
+ "page_idx": 3
824
+ },
825
+ {
826
+ "type": "text",
827
+ "text": "where $\\bar{\\Phi}$ is the complex conjugate of $\\Phi$ , and $F_{\\mathcal{T}}(t)$ is the CDF of a sampling distribution on $t$ . To simplify, we let $\\mathrm{Chf}(t) = (\\Phi_x(t) - \\Phi_{\\tilde{x}}(t))(\\bar{\\Phi}_x(t) - \\bar{\\Phi}_{\\tilde{x}}(t))$ for further analysis. We show that $\\mathcal{C}_{\\mathcal{T}}(\\boldsymbol{x},\\tilde{\\boldsymbol{x}})$ defines a valid distance metric in the following theorem.",
828
+ "bbox": [
829
+ 511,
830
+ 643,
831
+ 906,
832
+ 719
833
+ ],
834
+ "page_idx": 3
835
+ },
836
+ {
837
+ "type": "text",
838
+ "text": "Theorem 3 (CFD as a Distance Metric.) The CF discrepancy $\\mathcal{C}_{\\mathcal{T}}(\\boldsymbol{x}, \\tilde{\\boldsymbol{x}})$ , as defined in Eq. (6), serves as a distance metric between $\\boldsymbol{x}$ and $\\tilde{\\boldsymbol{x}}$ when the support of $\\mathcal{T}$ resides in Euclidean space. It satisfies the properties of nonnegativity, symmetry, and the triangle inequality.",
839
+ "bbox": [
840
+ 511,
841
+ 752,
842
+ 906,
843
+ 829
844
+ ],
845
+ "page_idx": 3
846
+ },
847
+ {
848
+ "type": "text",
849
+ "text": "Theorem 3 establishes that CFD is a valid distance metric. Furthermore, we demonstrate that this formulation decomposes CFD into phase, $\\pmb{a}_{\\pmb{x}}(\\pmb{t})$ , and amplitude, $|\\Phi_{\\pmb{x}}(t)|$ , com",
850
+ "bbox": [
851
+ 511,
852
+ 854,
853
+ 906,
854
+ 902
855
+ ],
856
+ "page_idx": 3
857
+ },
858
+ {
859
+ "type": "page_number",
860
+ "text": "4",
861
+ "bbox": [
862
+ 493,
863
+ 924,
864
+ 503,
865
+ 935
866
+ ],
867
+ "page_idx": 3
868
+ },
869
+ {
870
+ "type": "text",
871
+ "text": "ponents through Euler's formula:",
872
+ "bbox": [
873
+ 89,
874
+ 90,
875
+ 312,
876
+ 106
877
+ ],
878
+ "page_idx": 4
879
+ },
880
+ {
881
+ "type": "equation",
882
+ "text": "\n$$\n\\begin{array}{l} \\operatorname {C h f} (\\boldsymbol {t}) = \\left| \\Phi_ {\\boldsymbol {x}} (\\boldsymbol {t}) \\right| ^ {2} + \\left| \\Phi_ {\\hat {\\boldsymbol {x}}} (\\boldsymbol {t}) \\right| ^ {2} \\\\ - \\left| \\Phi_ {\\boldsymbol {x}} (\\boldsymbol {t}) \\right| \\left| \\Phi_ {\\tilde {\\boldsymbol {x}}} (\\boldsymbol {t}) \\right| (2 \\cos (\\boldsymbol {a} _ {\\boldsymbol {x}} (\\boldsymbol {t}) - \\boldsymbol {a} _ {\\tilde {\\boldsymbol {x}}} (\\boldsymbol {t}))) \\\\ = \\underbrace {\\left(\\left| \\Phi_ {\\boldsymbol {x}} (\\boldsymbol {t}) - \\Phi_ {\\bar {\\boldsymbol {x}}} (\\boldsymbol {t}) \\right|\\right) ^ {2}} _ {\\text {a m p l i t u d e d i f f e r e n c e}} \\tag {7} \\\\ + 2 \\left| \\Phi_ {\\boldsymbol {x}} (\\boldsymbol {t}) \\right| \\left| \\Phi_ {\\tilde {\\boldsymbol {x}}} (\\boldsymbol {t}) \\right| \\underbrace {(1 - \\cos (\\boldsymbol {a} _ {\\boldsymbol {x}} (\\boldsymbol {t}) - \\boldsymbol {a} _ {\\tilde {\\boldsymbol {x}}} (\\boldsymbol {t})))} _ {\\text {p h a s e d i f f e r e n c e}}, \\\\ \\end{array}\n$$\n",
883
+ "text_format": "latex",
884
+ "bbox": [
885
+ 120,
886
+ 112,
887
+ 480,
888
+ 227
889
+ ],
890
+ "page_idx": 4
891
+ },
892
+ {
893
+ "type": "list",
894
+ "sub_type": "text",
895
+ "list_items": [
896
+ "- Phase Information: the term $1 - \\cos (a_{\\pmb{x}}(\\pmb{t}) - a_{\\tilde{\\pmb{x}}}(\\pmb{t}))$ represents the phase, encoding data centres crucial for realism.",
897
+ "- Amplitude Information: the term $|\\Phi_{\\pmb{x}}(\\pmb{t}) - \\Phi_{\\tilde{\\pmb{x}}}(\\pmb{t})|^{2}$ captures the distribution scale, contributing to the diversity."
898
+ ],
899
+ "bbox": [
900
+ 89,
901
+ 229,
902
+ 482,
903
+ 289
904
+ ],
905
+ "page_idx": 4
906
+ },
907
+ {
908
+ "type": "text",
909
+ "text": "This phase-amplitude decomposition, supported in signal processing [32, 35] and generative models [27], provides insight into CFD's descriptive power. In practice, to reduce computational cost, we further introduce an additional feature extractor $f$ to map input variables into a latent space, which is similar to previous works in distribution matching [10, 26, 55, 57]. We also use a parameterized sampling network $\\psi$ to obtain the distribution of frequency argument $t$ , thereby extending the CFD to a more general parameterized form, i.e., Neural Characteristic Function Discrepancy (NCFD) as $\\mathcal{C}_{\\mathcal{T}}(\\pmb{x},\\tilde{\\pmb{x}};f,\\psi) = \\int_t\\sqrt{\\mathrm{Chf}(\\pmb{t};f)dF_{\\mathcal{T}}(\\pmb{t};\\psi)}$ , where $\\mathrm{Chf}(t,f)$ is defined as $\\left|\\Phi_{f(\\pmb{x})}(t) - \\Phi_{f(\\tilde{\\pmb{x}})}(t)\\right|^2 + 2\\left|\\Phi_{f(\\pmb{x})}(t)\\right|\\left|\\Phi_{f(\\tilde{\\pmb{x}})}(t)\\right|(1 - \\cos (\\pmb{a}_{f(\\pmb{x})}(t) - \\pmb{a}_{f(\\tilde{\\pmb{x}})}(t)))$ .",
910
+ "bbox": [
911
+ 89,
912
+ 290,
913
+ 483,
914
+ 507
915
+ ],
916
+ "page_idx": 4
917
+ },
918
+ {
919
+ "type": "text",
920
+ "text": "4.2.2. Determining the sampling strategy for NCFD",
921
+ "text_level": 1,
922
+ "bbox": [
923
+ 89,
924
+ 510,
925
+ 452,
926
+ 525
927
+ ],
928
+ "page_idx": 4
929
+ },
930
+ {
931
+ "type": "text",
932
+ "text": "The core aspect in optimizing $\\mathcal{C}_{\\mathcal{T}}(\\boldsymbol{x},\\tilde{\\boldsymbol{x}};f,\\psi)$ lies in determining the form of $F_{\\mathcal{T}}(t;\\psi)$ , i.e., how to correctly and efficiently sample $\\pmb{t}$ from a carefully picked distribution. Similar with works in generative adversarial network [1, 28], we define $F_{\\mathcal{T}}(t)$ as the cumulative distribution function (cdf) of a scale mixture of normals, as $p_{\\mathcal{T}}(t) = \\int_{\\Sigma}\\mathcal{N}(t|\\mathbf{0},\\Sigma)p_{\\Sigma}(\\mathbf{\\Sigma})d\\mathbf{\\Sigma}$ , where $p_{\\mathcal{T}}(t)$ is the probability density function (pdf) of $F_{\\mathcal{T}}(t),\\mathcal{N}(t|\\mathbf{0},\\Sigma)$ denotes a zero-mean Gaussian distribution with covariance $\\Sigma$ , and $p_{\\Sigma}(\\Sigma)$ represents the distribution of $\\Sigma$ . We observe that as the number of sampled frequency arguments increases, the approximation of the empirical CF improves, as guaranteed by Lévy's Convergence Theorem [31], ultimately leading to higher quality synthetic data.",
933
+ "bbox": [
934
+ 89,
935
+ 527,
936
+ 483,
937
+ 741
938
+ ],
939
+ "page_idx": 4
940
+ },
941
+ {
942
+ "type": "text",
943
+ "text": "4.2.3. Distribution Matching with NCFD",
944
+ "text_level": 1,
945
+ "bbox": [
946
+ 89,
947
+ 746,
948
+ 377,
949
+ 760
950
+ ],
951
+ "page_idx": 4
952
+ },
953
+ {
954
+ "type": "text",
955
+ "text": "Given the NCFD measure $\\mathcal{C}_{\\mathcal{T}}(\\boldsymbol{x},\\tilde{\\boldsymbol{x}};f,\\psi)$ , we now propose a method to utilize NCFD for distribution matching, termed as Neural Characteristic Function Matching (NCFM). A visual illustration of the NCFM pipeline is provided in Figure 4. On one hand, we maximize the NCFD to learn an effective discrepancy metric by optimizing the network $\\psi$ . On the other hand, we minimize this learned NCFD to obtain an optimal synthetic dataset, $\\hat{\\mathcal{D}}$ . In practice, we introduce a hyper-parameter $\\alpha$ to balance the amplitude and",
956
+ "bbox": [
957
+ 89,
958
+ 763,
959
+ 483,
960
+ 900
961
+ ],
962
+ "page_idx": 4
963
+ },
964
+ {
965
+ "type": "text",
966
+ "text": "phase information in the NCFD, then the minmax optimization problem can be formulated as:",
967
+ "bbox": [
968
+ 511,
969
+ 90,
970
+ 903,
971
+ 121
972
+ ],
973
+ "page_idx": 4
974
+ },
975
+ {
976
+ "type": "equation",
977
+ "text": "\n$$\n\\begin{array}{l} \\min _ {\\tilde {\\mathcal {D}}} \\max _ {\\psi} \\mathcal {L} (\\tilde {\\mathcal {D}}, \\mathcal {D}, f, \\psi) = \\min _ {\\tilde {\\mathcal {D}}} \\max _ {\\psi} \\mathbb {E} _ {\\boldsymbol {x} \\sim \\mathcal {D}, \\tilde {\\boldsymbol {x}} \\sim \\bar {\\mathcal {D}}} \\mathcal {C} _ {\\mathcal {T}} (\\boldsymbol {x}, \\tilde {\\boldsymbol {x}}; f, \\psi) \\\\ = \\min _ {\\tilde {\\mathcal {D}}} \\max _ {\\psi} \\mathbb {E} _ {\\boldsymbol {x} \\sim \\mathcal {D}, \\tilde {\\boldsymbol {x}} \\sim \\tilde {\\mathcal {D}}} \\int_ {\\boldsymbol {t}} \\sqrt {\\operatorname {C h f} (\\boldsymbol {t} ; f)} d F _ {\\mathcal {T}} (\\boldsymbol {t}; \\psi) \\\\ \\end{array}\n$$\n",
978
+ "text_format": "latex",
979
+ "bbox": [
980
+ 519,
981
+ 136,
982
+ 900,
983
+ 189
984
+ ],
985
+ "page_idx": 4
986
+ },
987
+ {
988
+ "type": "text",
989
+ "text": "where $\\operatorname{Chf}(\\pmb{t}; f) = \\alpha \\left(\\left|\\Phi_{f(\\pmb{x})}(\\pmb{t}) - \\Phi_{f(\\tilde{\\pmb{x}})}(\\pmb{t})\\right|\\right)^2 + (1 - \\alpha)$ .",
990
+ "bbox": [
991
+ 517,
992
+ 191,
993
+ 901,
994
+ 213
995
+ ],
996
+ "page_idx": 4
997
+ },
998
+ {
999
+ "type": "equation",
1000
+ "text": "\n$$\n\\left. \\left(2 \\left| \\Phi_ {f (\\boldsymbol {x})} (\\boldsymbol {t}) \\right| \\mid \\Phi_ {f (\\tilde {\\boldsymbol {x}})} (\\boldsymbol {t}) \\right|\\right) \\cdot \\left(1 - \\cos \\left(\\boldsymbol {a} _ {f (\\boldsymbol {x})} (\\boldsymbol {t}) - \\boldsymbol {a} _ {f (\\tilde {\\boldsymbol {x}})} (\\boldsymbol {t})\\right)\\right). \\tag {8}\n$$\n",
1001
+ "text_format": "latex",
1002
+ "bbox": [
1003
+ 517,
1004
+ 215,
1005
+ 903,
1006
+ 244
1007
+ ],
1008
+ "page_idx": 4
1009
+ },
1010
+ {
1011
+ "type": "text",
1012
+ "text": "For the design of $f$ , we used a hybrid approach that combines a pre-trained model with a randomly initialized model, both selected from a subset of trained models. This ensures that the feature extractor remains moderately diverse yet discriminative. The hybrid feature extractor is constructed by $\\beta$ -blending the checkpoints of the initial and final models, where each model is chosen from a specific subset of available models. At each distillation step, the blending coefficient $\\beta \\in (0,1)$ is sampled from a uniform distribution $\\mathcal{U}(0,1)$ , providing a balanced combination of initial and final checkpoints. Our NCFM can be seamlessly integrated with additional data curation steps, such as generating soft labels with a pre-trained neural network and performing dataset finetuning. Unlike prior methods that focus on learning soft labels [16, 19, 36], NCFM simply leverages a pre-trained network to efficiently generate soft labels for the distilled dataset, improving both efficiency and effectiveness. However, these additional curation steps are not essential for NCFM, as it can achieve SOTA performance within the pure minmax framework.",
1013
+ "bbox": [
1014
+ 511,
1015
+ 244,
1016
+ 906,
1017
+ 547
1018
+ ],
1019
+ "page_idx": 4
1020
+ },
1021
+ {
1022
+ "type": "text",
1023
+ "text": "5. Experiments",
1024
+ "text_level": 1,
1025
+ "bbox": [
1026
+ 513,
1027
+ 564,
1028
+ 643,
1029
+ 580
1030
+ ],
1031
+ "page_idx": 4
1032
+ },
1033
+ {
1034
+ "type": "text",
1035
+ "text": "5.1. Setup",
1036
+ "text_level": 1,
1037
+ "bbox": [
1038
+ 513,
1039
+ 589,
1040
+ 594,
1041
+ 606
1042
+ ],
1043
+ "page_idx": 4
1044
+ },
1045
+ {
1046
+ "type": "text",
1047
+ "text": "Baseline methods. We compared NCFM with several representative approaches in dataset distillation and coreset selection. These include gradient-matching methods such as DC [56], DCC [24], DSA and DSAC [54]. Kernel-based methods like KIP [34] and FrePo [58] were also included. Distribution-matching methods like CAFE [47], DM [55], IDM [57], M3D [53], IID [10], and DSDM [26] were part of the evaluation. We also included trajectory-matching methods such as MTT [6], FTD [12], ATT [30], and TESLA [9]. State-of-the-art methods like DATM [16], G-VBSM [40], and RDED [45] were also considered in our comparisons. Additionally, we benchmarked our method against classical coreset selection techniques, including random selection, Herding [50], and Forgetting [46].",
1048
+ "bbox": [
1049
+ 511,
1050
+ 612,
1051
+ 906,
1052
+ 824
1053
+ ],
1054
+ "page_idx": 4
1055
+ },
1056
+ {
1057
+ "type": "text",
1058
+ "text": "Datasets and Networks. Our evaluations were conducted on widely-used datasets, including CIFAR-10 and CIFAR-100 [22] with resolution of $32 \\times 32$ , Tiny ImageNet [23] with resolution of $64 \\times 64$ , and ImageNet subsets with resolution of $128 \\times 128$ , i.e., ImageNette, ImageWoof, ImageFruit, Im",
1059
+ "bbox": [
1060
+ 511,
1061
+ 825,
1062
+ 905,
1063
+ 900
1064
+ ],
1065
+ "page_idx": 4
1066
+ },
1067
+ {
1068
+ "type": "page_number",
1069
+ "text": "5",
1070
+ "bbox": [
1071
+ 493,
1072
+ 924,
1073
+ 503,
1074
+ 935
1075
+ ],
1076
+ "page_idx": 4
1077
+ },
1078
+ {
1079
+ "type": "table",
1080
+ "img_path": "images/31f660b06fd4eb91d7246e8c1b56c4799a731fc1e50550bdfdb17b888d502826.jpg",
1081
+ "table_caption": [
1082
+ "Table 1. Results of NCFM on CIFAR-10/100, and Tiny ImageNet (resolution of $64\\times 64$ ) datasets."
1083
+ ],
1084
+ "table_footnote": [],
1085
+ "table_body": "<table><tr><td>Dataset</td><td colspan=\"3\">CIFAR-10</td><td colspan=\"3\">CIFAR-100</td><td colspan=\"3\">Tiny ImageNet</td></tr><tr><td>IPC</td><td>1</td><td>10</td><td>50</td><td>1</td><td>10</td><td>50</td><td>1</td><td>10</td><td>50</td></tr><tr><td>Ratio (%)</td><td>0.02</td><td>0.2</td><td>1</td><td>0.2</td><td>2</td><td>10</td><td>0.2</td><td>2</td><td>10</td></tr><tr><td>Random</td><td>14.4±2.0</td><td>26.0±1.2</td><td>43.4±1.0</td><td>4.2±0.3</td><td>14.6±0.5</td><td>30.0±0.4</td><td>1.4±0.1</td><td>5.0±0.2</td><td>15.0±0.4</td></tr><tr><td>Herding</td><td>21.5±1.2</td><td>31.6±0.7</td><td>40.4±0.6</td><td>8.4±0.3</td><td>17.3±0.3</td><td>33.7±0.5</td><td>2.8±0.2</td><td>6.3±0.2</td><td>16.7±0.3</td></tr><tr><td>Forgetting</td><td>13.5±1.2</td><td>23.3±1.0</td><td>23.3±1.1</td><td>4.5±0.2</td><td>15.1±0.3</td><td>30.5±0.3</td><td>1.6±0.1</td><td>5.1±0.2</td><td>15.0±0.3</td></tr><tr><td>DC</td><td>28.3±0.5</td><td>44.9±0.5</td><td>53.9±0.5</td><td>12.8±0.3</td><td>25.2±0.3</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>DSA</td><td>28.8±0.7</td><td>52.1±0.5</td><td>60.6±0.5</td><td>13.9±0.3</td><td>32.3±0.3</td><td>42.8±0.4</td><td>-</td><td>-</td><td>-</td></tr><tr><td>DCC</td><td>32.9±0.8</td><td>49.4±0.5</td><td>61.6±0.4</td><td>13.3±0.3</td><td>30.6±0.4</td><td>40.0±0.3</td><td>-</td><td>-</td><td>-</td></tr><tr><td>DSAC</td><td>34.0±0.7</td><td>54.5±0.5</td><td>64.2±0.4</td><td>14.6±0.3</td><td>14.6±0.3</td><td>39.3±0.4</td><td>-</td><td>-</td><td>-</td></tr><tr><td>FrePo</td><td>46.8±0.7</td><td>65.5±0.4</td><td>71.7±0.2</td><td>28.7±0.1</td><td>42.5±0.2</td><td>44.3±0.2</td><td>15.4±0.3</td><td>25.4±0.2</td><td>-</td></tr><tr><td>MTT</td><td>46.3±0.8</td><td>65.3±0.7</td><td>71.6±0.2</td><td>24.3±0.3</td><td>40.1±0.4</td><td>47.7±0.2</td><td>8.8±0.3</td><td>23.2±0.2</td><td>28.0±0.3</td></tr><tr><td>ATT</td><td>48.3±1.0</td><td>67.7±0.6</td><td>74.5±0.4</td><td>26.1±0.3</td><td>44.2±0.5</td><td>51.2±0.3</td><td>11.0±0.5</td><td>25.8±0.4</td><td>-</td></tr><tr><td>FTD</td><td>46.8±0.3</td><td>66.6±0.3</td><td>73.8±0.2</td><td>25.2±0.2</td><td>43.4±0.3</td><td>48.5±0.3</td><td>10.4±0.3</td><td>24.5±0.2</td><td>-</td></tr><tr><td>TESLA</td><td>48.5±0.8</td><td>66.4±0.8</td><td>72.6±0.7</td><td>24.8±0.4</td><td>41.7±0.3</td><td>47.9±0.3</td><td>-</td><td>-</td><td>-</td></tr><tr><td>CAFE</td><td>30.3±1.1</td><td>46.3±0.6</td><td>55.5±0.6</td><td>12.9±0.3</td><td>27.8±0.3</td><td>37.9±0.3</td><td>-</td><td>-</td><td>-</td></tr><tr><td>DM</td><td>26.0±0.8</td><td>48.9±0.6</td><td>63.0±0.4</td><td>11.4±0.3</td><td>29.7±0.3</td><td>43.6±0.4</td><td>3.9±0.2</td><td>12.9±0.4</td><td>24.1±0.3</td></tr><tr><td>IDM</td><td>45.6±0.7</td><td>58.6±0.1</td><td>67.5±0.1</td><td>20.1±0.3</td><td>45.1±0.1</td><td>50.0±0.2</td><td>10.1±0.2</td><td>21.9±0.2</td><td>27.7±0.3</td></tr><tr><td>M3D</td><td>45.3±0.3</td><td>63.5±0.2</td><td>69.9±0.5</td><td>26.2±0.3</td><td>42.4±0.2</td><td>50.9±0.7</td><td>-</td><td>-</td><td>-</td></tr><tr><td>IID</td><td>47.1±0.1</td><td>59.9±0.1</td><td>69.0±0.3</td><td>24.6±0.1</td><td>45.7±0.4</td><td>51.3±0.4</td><td>-</td><td>-</td><td>-</td></tr><tr><td>DSDM</td><td>45.0±0.4</td><td>66.5±0.3</td><td>75.8±0.3</td><td>19.5±0.2</td><td>46.2±0.3</td><td>54.0±0.2</td><td>-</td><td>-</td><td>-</td></tr><tr><td>G-VBSM</td><td>-</td><td>46.5±0.7</td><td>54.3±0.3</td><td>16.4±0.7</td><td>38.7±0.2</td><td>45.7±0.4</td><td>-</td><td>-</td><td>-</td></tr><tr><td>NCFM (Ours)</td><td>49.5±0.3</td><td>71.8±0.3</td><td>77.4±0.3</td><td>34.4±0.5</td><td>48.7±0.3</td><td>54.7±0.2</td><td>18.2±0.5</td><td>26.8±0.6</td><td>29.6±0.5</td></tr><tr><td>Whole Dataset</td><td></td><td>84.8±0.1</td><td></td><td></td><td>56.2±0.3</td><td></td><td></td><td>37.6±0.4</td><td></td></tr></table>",
1086
+ "bbox": [
1087
+ 109,
1088
+ 99,
1089
+ 883,
1090
+ 409
1091
+ ],
1092
+ "page_idx": 5
1093
+ },
1094
+ {
1095
+ "type": "text",
1096
+ "text": "ageMeow, ImageSquawk, and ImageYellow [18]. Following prior studies [16, 48], we used networks with instance normalization as the default setting. Specifically, dataset distillation is performed with a 3-layer ConvNet for CIFAR-10/100, a 4-layer ConvNet for Tiny ImageNet, and a 5-layer ConvNet for ImageNet subsets. All experiments were conducted with 10 evaluations for fairness, primarily using a single NVIDIA 4090 GPU.",
1097
+ "bbox": [
1098
+ 88,
1099
+ 426,
1100
+ 480,
1101
+ 546
1102
+ ],
1103
+ "page_idx": 5
1104
+ },
1105
+ {
1106
+ "type": "text",
1107
+ "text": "Other Settings. Following prior works, we implemented differential augmentation [47, 54] and applied multi-formation parameterization with a scale factor of $\\rho = 2$ for images, as in [20, 57]. We employed AdamW as our optimizer. In our setup, we set the number of sampled frequency arguments to 1024. The number of mixture Gaussian components in the sampling network is set to the number of frequency arguments divided by 16, balancing the sampling network diversity and computational efficiency. Further details are provided in the supplementary material.",
1108
+ "bbox": [
1109
+ 88,
1110
+ 547,
1111
+ 482,
1112
+ 700
1113
+ ],
1114
+ "page_idx": 5
1115
+ },
1116
+ {
1117
+ "type": "text",
1118
+ "text": "5.2. Main Results",
1119
+ "text_level": 1,
1120
+ "bbox": [
1121
+ 89,
1122
+ 708,
1123
+ 228,
1124
+ 722
1125
+ ],
1126
+ "page_idx": 5
1127
+ },
1128
+ {
1129
+ "type": "text",
1130
+ "text": "We verified the effectiveness of NCFM on various benchmark datasets of different image-per-class (IPC) settings<sup>1</sup>.",
1131
+ "bbox": [
1132
+ 89,
1133
+ 729,
1134
+ 482,
1135
+ 760
1136
+ ],
1137
+ "page_idx": 5
1138
+ },
1139
+ {
1140
+ "type": "text",
1141
+ "text": "CIFAR-10/100 and Tiny ImageNet. As shown in Table 1, NCFM outperforms all state-of-the-art (SOTA) baselines. Specifically, it surpasses distribution matching methods using traditional metrics like MSE and MMD, achieving improvements of $23.5\\%$ and $23.0\\%$ on CIFAR-10 and CIFAR-100 with 1 IPC compared to DM [55]. Additionally, NCFM maintains SOTA performance even against computationally",
1142
+ "bbox": [
1143
+ 89,
1144
+ 760,
1145
+ 482,
1146
+ 866
1147
+ ],
1148
+ "page_idx": 5
1149
+ },
1150
+ {
1151
+ "type": "text",
1152
+ "text": "intensive methods like MTT [6]. Results for larger IPC settings and comparisons with other SOTA methods like DATM [16] are in the supplementary material.",
1153
+ "bbox": [
1154
+ 511,
1155
+ 426,
1156
+ 906,
1157
+ 472
1158
+ ],
1159
+ "page_idx": 5
1160
+ },
1161
+ {
1162
+ "type": "text",
1163
+ "text": "Higher-resolution Datasets. We also evaluated NCFM on larger datasets, specifically the ImageNet subsets. As shown in Table 2, NCFM demonstrates strong performance across these challenging benchmarks. In 10 IPC setting, our method achieves substantial improvements of $20.5\\%$ on ImageSquawk, compared to the baseline MTT [6]. Remarkably, NCFM exhibits robust performance under relatively small IPC. For instance, compared to RDED [45], NCFM yields a significant improvement of $19.6\\%$ on ImageNette.",
1164
+ "bbox": [
1165
+ 511,
1166
+ 474,
1167
+ 908,
1168
+ 611
1169
+ ],
1170
+ "page_idx": 5
1171
+ },
1172
+ {
1173
+ "type": "text",
1174
+ "text": "Computational Efficiency Evaluation. We tested the training speed and GPU memory of our NCFM compared with strong baseline methods on different datasets. As conventional recognition, trajectory matching based methods usually achieve better results than distribution matching in practice [6, 9, 12]. However, both superior training efficiency and GPU memory efficiency are observed in NCFM across all benchmark datasets, while achieving better results. Specifically, we measured the average training time over 1000 distillation iterations for each method, as summarized in Table 3. For CIFAR-100 at IPC 50, NCFM achieves nearly $30 \\times$ faster speeds compared to TESLA [9] without the sampling network, and maintains over $20 \\times$ faster speeds with the sampling network included. Moreover, we conducted a comprehensive analysis of computational efficiency, where GPU memory is expressed as the peak memory usage during 1000 iterations of training, as shown in Table 3. While most existing methods encounter out of memory (OOM) issues at IPC = 50, our method requires only",
1175
+ "bbox": [
1176
+ 511,
1177
+ 613,
1178
+ 908,
1179
+ 902
1180
+ ],
1181
+ "page_idx": 5
1182
+ },
1183
+ {
1184
+ "type": "page_footnote",
1185
+ "text": "<sup>1</sup>We provide further results on continual learning, neural architecture search, and larger IPC datasets in the supplementary material.",
1186
+ "bbox": [
1187
+ 89,
1188
+ 875,
1189
+ 482,
1190
+ 900
1191
+ ],
1192
+ "page_idx": 5
1193
+ },
1194
+ {
1195
+ "type": "page_number",
1196
+ "text": "6",
1197
+ "bbox": [
1198
+ 493,
1199
+ 925,
1200
+ 504,
1201
+ 936
1202
+ ],
1203
+ "page_idx": 5
1204
+ },
1205
+ {
1206
+ "type": "table",
1207
+ "img_path": "images/288029e4f3b130e2b143a20d201bbf4b9515a180759df5c41fbc081cb764ff05.jpg",
1208
+ "table_caption": [
1209
+ "Table 2. Results on ImageNet subsets (resolution of ${128} \\times {128}$ ) when employing NCFM across different IPCs."
1210
+ ],
1211
+ "table_footnote": [],
1212
+ "table_body": "<table><tr><td>Dataset</td><td colspan=\"2\">ImageNette</td><td colspan=\"2\">ImageWoof</td><td colspan=\"2\">ImageFruit</td><td colspan=\"2\">ImageMeow</td><td colspan=\"2\">ImageSquawk</td><td colspan=\"2\">ImageYellow</td></tr><tr><td>IPC</td><td>1</td><td>10</td><td>1</td><td>10</td><td>1</td><td>10</td><td>1</td><td>10</td><td>1</td><td>10</td><td>1</td><td>10</td></tr><tr><td>Ratio (%)</td><td>0.105</td><td>1.050</td><td>0.110</td><td>1.100</td><td>0.077</td><td>0.77</td><td>0.077</td><td>0.77</td><td>0.077</td><td>0.77</td><td>0.077</td><td>0.77</td></tr><tr><td>Random</td><td>23.5±4.8</td><td>47.7±2.4</td><td>14.2±0.9</td><td>27.0±1.9</td><td>13.2±0.8</td><td>21.4±1.2</td><td>13.8±0.6</td><td>29.0±1.1</td><td>21.8±0.5</td><td>40.2±0.4</td><td>20.4±0.6</td><td>37.4±0.5</td></tr><tr><td>MTT</td><td>47.7±0.9</td><td>63.0±1.3</td><td>28.6±0.8</td><td>35.8±1.8</td><td>26.6±0.8</td><td>40.3±1.3</td><td>30.7±1.6</td><td>40.4±2.2</td><td>39.4±1.5</td><td>52.3±1.0</td><td>45.2±0.8</td><td>60.0±1.5</td></tr><tr><td>DM</td><td>32.8±0.5</td><td>58.1±0.3</td><td>21.1±1.2</td><td>31.4±0.5</td><td>-</td><td>-</td><td>-</td><td>-</td><td>31.2±0.7</td><td>50.4±1.2</td><td>-</td><td>-</td></tr><tr><td>RDED</td><td>33.8±0.8</td><td>63.2±0.7</td><td>18.5±0.9</td><td>40.6±2.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>NCFM (Ours)</td><td>53.4±1.6</td><td>77.6±1.0</td><td>27.2±1.1</td><td>48.4±1.3</td><td>29.2±0.7</td><td>44.8±1.5</td><td>34.6±0.7</td><td>58.2±1.2</td><td>41.6±1.2</td><td>72.8±0.9</td><td>46.6±1.5</td><td>74.2±1.4</td></tr><tr><td>Whole Dataset</td><td colspan=\"2\">87.4±1.0</td><td colspan=\"2\">67.0±1.3</td><td colspan=\"2\">63.9±2.0</td><td colspan=\"2\">66.7±1.1</td><td colspan=\"2\">87.5±0.3</td><td colspan=\"2\">84.4±0.6</td></tr></table>",
1213
+ "bbox": [
1214
+ 109,
1215
+ 104,
1216
+ 879,
1217
+ 220
1218
+ ],
1219
+ "page_idx": 6
1220
+ },
1221
+ {
1222
+ "type": "table",
1223
+ "img_path": "images/e62ce227e3b03c9b0673fca36cf50e024c1eaa6fd4b664f21b3f5d256d404370.jpg",
1224
+ "table_caption": [
1225
+ "Table 3. Training speed (s/iter) and peak GPU memory (GB) comparison on a single NVIDIA A100 80G. OOM marks out-of-memory cases. 'Reduction' shows NCFM's speed and memory improvements over the best-performing baseline in the table."
1226
+ ],
1227
+ "table_footnote": [],
1228
+ "table_body": "<table><tr><td>Resource</td><td colspan=\"4\">Speed (s/iter)</td><td colspan=\"4\">GPU Memory (GB)</td></tr><tr><td>Dataset</td><td colspan=\"2\">CIFAR-100</td><td colspan=\"2\">Tiny ImageNet</td><td colspan=\"2\">CIFAR-100</td><td colspan=\"2\">Tiny ImageNet</td></tr><tr><td>IPC</td><td>10</td><td>50</td><td>10</td><td>50</td><td>10</td><td>50</td><td>10</td><td>50</td></tr><tr><td>MTT</td><td>1.92</td><td>OOM</td><td>OOM</td><td>OOM</td><td>61.6</td><td>OOM</td><td>OOM</td><td>OOM</td></tr><tr><td>FTD</td><td>1.68</td><td>OOM</td><td>OOM</td><td>OOM</td><td>61.4</td><td>OOM</td><td>OOM</td><td>OOM</td></tr><tr><td>TESLA</td><td>5.71</td><td>28.24</td><td>42.01</td><td>OOM</td><td>10.3</td><td>44.2</td><td>69.6</td><td>OOM</td></tr><tr><td>DATM</td><td>OOM</td><td>OOM</td><td>OOM</td><td>OOM</td><td>OOM</td><td>OOM</td><td>OOM</td><td>OOM</td></tr><tr><td>NCFM w/o ψ</td><td>0.73</td><td>0.96</td><td>2.40</td><td>5.67</td><td>1.4</td><td>1.9</td><td>6.4</td><td>8.4</td></tr><tr><td>Reduction</td><td>2.3×</td><td>29.4×</td><td>17.5×</td><td>-</td><td>7.4×</td><td>23.3×</td><td>10.9×</td><td>-</td></tr><tr><td>NCFM</td><td>1.33</td><td>1.36</td><td>3.27</td><td>7.22</td><td>1.6</td><td>2.0</td><td>6.5</td><td>8.7</td></tr><tr><td>Reduction</td><td>1.3×</td><td>20.8×</td><td>12.8×</td><td>-</td><td>6.4×</td><td>22.1×</td><td>10.7×</td><td>-</td></tr></table>",
1229
+ "bbox": [
1230
+ 89,
1231
+ 287,
1232
+ 491,
1233
+ 450
1234
+ ],
1235
+ "page_idx": 6
1236
+ },
1237
+ {
1238
+ "type": "text",
1239
+ "text": "about 1.9GB GPU memory on CIFAR-100. This further demonstrates the exceptional scalability of our approach under high IPC conditions. Further results on CIFAR-10 are provided in the supplementary material.",
1240
+ "bbox": [
1241
+ 88,
1242
+ 470,
1243
+ 482,
1244
+ 530
1245
+ ],
1246
+ "page_idx": 6
1247
+ },
1248
+ {
1249
+ "type": "text",
1250
+ "text": "Cross-Architecture Generalization. We evaluated the cross-architecture generalization capability of our method by testing its performance on various network architectures, including AlexNet [22], VGG-11 [42], and ResNet-18 [17]. In this evaluation, synthetic data were condensed using a 3-layer ConvNet, and each method was subsequently tested across different architectures to assess robustness and adaptability. Tables 4 summarize the results on CIFAR-10 with 10 and 50 IPC settings, respectively. In both cases, NCFM consistently outperformed other methods across all architectures, demonstrating its strong ability to generalize effectively even when trained on a different architecture. Results on other backbone networks beyond ConvNet are provided in the supplementary material.",
1251
+ "bbox": [
1252
+ 88,
1253
+ 531,
1254
+ 482,
1255
+ 743
1256
+ ],
1257
+ "page_idx": 6
1258
+ },
1259
+ {
1260
+ "type": "text",
1261
+ "text": "5.3. Ablation Study",
1262
+ "text_level": 1,
1263
+ "bbox": [
1264
+ 89,
1265
+ 753,
1266
+ 243,
1267
+ 768
1268
+ ],
1269
+ "page_idx": 6
1270
+ },
1271
+ {
1272
+ "type": "text",
1273
+ "text": "5.3.1. Effect of the Sampling Network",
1274
+ "text_level": 1,
1275
+ "bbox": [
1276
+ 89,
1277
+ 775,
1278
+ 356,
1279
+ 789
1280
+ ],
1281
+ "page_idx": 6
1282
+ },
1283
+ {
1284
+ "type": "text",
1285
+ "text": "To rigorously evaluate the impact of the sampling network, $\\psi$ , within the minmax paradigm of NCFM, we conducted performance comparisons with and without this component. To ensure a controlled and fair assessment, no additional data curation techniques were applied (such as fine-tuning or soft label integration). As shown in Table 5, employing the sampling network $\\psi$ yields substantial improvements in",
1286
+ "bbox": [
1287
+ 88,
1288
+ 794,
1289
+ 482,
1290
+ 900
1291
+ ],
1292
+ "page_idx": 6
1293
+ },
1294
+ {
1295
+ "type": "table",
1296
+ "img_path": "images/258729d3bfa7affd517efcde788ba5cf064ca059f2b82b017a42d3cf7e9c972b.jpg",
1297
+ "table_caption": [
1298
+ "Table 4. Cross-architecture generalization performance $(\\%)$ on CIFAR-10. The synthetic data is condensed using ConvNet, and each method is evaluated on different architectures."
1299
+ ],
1300
+ "table_footnote": [],
1301
+ "table_body": "<table><tr><td>IPC</td><td>Method</td><td>ConvNet</td><td>AlexNet</td><td>VGG</td><td>ResNet</td></tr><tr><td rowspan=\"4\">10</td><td>DSA</td><td>52.1±0.4</td><td>35.9±1.3</td><td>43.2±0.5</td><td>35.9±1.3</td></tr><tr><td>MTT</td><td>64.3±0.7</td><td>34.2±2.6</td><td>50.3±0.8</td><td>34.2±2.6</td></tr><tr><td>KIP</td><td>47.6±0.9</td><td>24.4±3.9</td><td>42.1±0.4</td><td>24.4±3.9</td></tr><tr><td>NCFM</td><td>71.8±0.3</td><td>67.9±0.5</td><td>68.0±0.3</td><td>67.7±0.5</td></tr><tr><td rowspan=\"3\">50</td><td>DSA</td><td>59.9±0.8</td><td>53.3±0.7</td><td>51.0±1.1</td><td>47.3±1.0</td></tr><tr><td>DM</td><td>65.2±0.4</td><td>61.3±0.6</td><td>59.9±0.8</td><td>57.0±0.9</td></tr><tr><td>NCFM</td><td>77.4±0.3</td><td>75.5±0.3</td><td>75.5±0.3</td><td>73.8±0.2</td></tr></table>",
1302
+ "bbox": [
1303
+ 517,
1304
+ 272,
1305
+ 915,
1306
+ 406
1307
+ ],
1308
+ "page_idx": 6
1309
+ },
1310
+ {
1311
+ "type": "text",
1312
+ "text": "synthetic data quality across various datasets. For example, integrating $\\psi$ into our method provides a $3.2\\%$ performance increase on CIFAR-10 at 50 IPC. Our method yields a $2.6\\%$ performance increase on Tiny ImageNet at 1 IPC and $10.1\\%$ at 10 IPC. Similar trends are observed across ImageNet subsets, including gains of $2.8\\%$ on ImageMeow and $2.0\\%$ on ImageSquawk. The strong performance benefits from sampling network $\\psi$ emphasize the effectiveness of the minmax paradigm compared to straightforward CFD minimization.",
1313
+ "bbox": [
1314
+ 511,
1315
+ 431,
1316
+ 906,
1317
+ 568
1318
+ ],
1319
+ "page_idx": 6
1320
+ },
1321
+ {
1322
+ "type": "text",
1323
+ "text": "5.3.2. Impact of Amplitude and Phase Components",
1324
+ "text_level": 1,
1325
+ "bbox": [
1326
+ 511,
1327
+ 601,
1328
+ 872,
1329
+ 617
1330
+ ],
1331
+ "page_idx": 6
1332
+ },
1333
+ {
1334
+ "type": "text",
1335
+ "text": "We examine individual contributions of amplitude and phase alignment within the NCFD measure. By selectively adjusting amplitude or phase alignment, controlled by the hyperparameter $\\alpha$ that represents the ratio of amplitude to phase weight in the loss function, we find that both components are essential. To further evaluate the effect of $\\alpha$ on performance, we conducted ablation studies on the CIFAR-10 and CIFAR-100 datasets. As noted in prior works [32, 35], the amplitude term primarily enhances the diversity of generated data, while the phase term contributes to realism by accurately capturing data centers. For example, as shown in Figure 5, on CIFAR-10 with 10 IPC, when the amplitude information dominates the loss (e.g., $\\alpha = 0.999$ ), the test accuracy decreases about $3\\%$ compared to our best results. Conversely, when the phase information dominates (e.g., $\\alpha = 0.001$ ), the test accuracy decreases by about $1\\%$ . Results demonstrate that a balanced integration of both components yields the highest accuracy.",
1336
+ "bbox": [
1337
+ 509,
1338
+ 628,
1339
+ 906,
1340
+ 900
1341
+ ],
1342
+ "page_idx": 6
1343
+ },
1344
+ {
1345
+ "type": "page_number",
1346
+ "text": "7",
1347
+ "bbox": [
1348
+ 493,
1349
+ 924,
1350
+ 504,
1351
+ 935
1352
+ ],
1353
+ "page_idx": 6
1354
+ },
1355
+ {
1356
+ "type": "table",
1357
+ "img_path": "images/c0df3dee8557adfb51d2899dfbe19abdbe2047a67eb0863a8030f47730ef98cb.jpg",
1358
+ "table_caption": [
1359
+ "Table 5. Test Performance (%) on CIFAR-10, CIFAR-100, Tiny ImageNet and ImageNet subsets with and without the sampling network $\\psi$ . We find that sampling network $\\psi$ significantly improves performance, even without additional data curation steps."
1360
+ ],
1361
+ "table_footnote": [],
1362
+ "table_body": "<table><tr><td>Dataset</td><td colspan=\"2\">CIFAR-10</td><td colspan=\"2\">CIFAR-100</td><td colspan=\"3\">Tiny ImageNet</td><td>ImageFruit</td><td>ImageMeow</td><td>ImageSquawk</td><td>ImageYellow</td></tr><tr><td>IPC</td><td>10</td><td>50</td><td>10</td><td>50</td><td>1</td><td>10</td><td>50</td><td>10</td><td>10</td><td>10</td><td>10</td></tr><tr><td>NCFM w/o ψ</td><td>65.6</td><td>74.2</td><td>45.9</td><td>53.7</td><td>9.4</td><td>14.2</td><td>22.0</td><td>39.6</td><td>51.6</td><td>68.8</td><td>67.6</td></tr><tr><td>NCFM</td><td>68.9</td><td>77.4</td><td>48.7</td><td>54.4</td><td>12.0</td><td>24.3</td><td>26.5</td><td>41.4</td><td>54.4</td><td>70.8</td><td>69.2</td></tr></table>",
1363
+ "bbox": [
1364
+ 109,
1365
+ 108,
1366
+ 879,
1367
+ 171
1368
+ ],
1369
+ "page_idx": 7
1370
+ },
1371
+ {
1372
+ "type": "image",
1373
+ "img_path": "images/5179072a2d0bae3429db742d4c7dc1afa0e10403a7aee3f4cfd9808b1903bc5c.jpg",
1374
+ "image_caption": [
1375
+ "Figure 5. Impact of amplitude and phase components in the NCFD measure across various datasets and IPC settings. The figure illustrates the relationship between the amplitude-to-phase ratio $\\alpha$ in Eq. (8). Results indicate that balancing amplitude (for diversity) and phase (for realism) information leads to improved performance. Baseline results were obtained using DM [55]."
1376
+ ],
1377
+ "image_footnote": [],
1378
+ "bbox": [
1379
+ 96,
1380
+ 183,
1381
+ 478,
1382
+ 324
1383
+ ],
1384
+ "page_idx": 7
1385
+ },
1386
+ {
1387
+ "type": "text",
1388
+ "text": "5.3.3. Effect of the Number of Sampled Frequency Arguments in NCFD",
1389
+ "text_level": 1,
1390
+ "bbox": [
1391
+ 89,
1392
+ 422,
1393
+ 444,
1394
+ 454
1395
+ ],
1396
+ "page_idx": 7
1397
+ },
1398
+ {
1399
+ "type": "text",
1400
+ "text": "To assess the impact of the number of sampled frequency arguments, $t$ , generated by the sampling network $\\psi$ , we varied the sample count and measured the corresponding performance. As illustrated in Figure 6, increasing the number of sampled arguments initially enhances the quality of synthetic data by facilitating finer distributional alignment. For example, accuracy on CIFAR-10 at 10 IPC improves from $62\\%$ with 16 sampled frequency arguments to approximately $67\\%$ with 1024, indicating a positive correlation between the sampled number and accuracy. However, beyond 1024 arguments, performance gains plateau, with accuracy stabilizing around $67 - 68\\%$ even as the sampling number increases to 4096. This trend suggests that a moderate number achieves an optimal balance between computational efficiency and accuracy. We observed that additional cost remains minimal as the number of sampled arguments increases, underscoring NCFM's ability to produce high-quality synthetic data with low computational cost.",
1401
+ "bbox": [
1402
+ 88,
1403
+ 460,
1404
+ 482,
1405
+ 733
1406
+ ],
1407
+ "page_idx": 7
1408
+ },
1409
+ {
1410
+ "type": "image",
1411
+ "img_path": "images/b06a52c4e0156d036e365301165ae4f543fe9c2bd6a7902212815545e1f86563.jpg",
1412
+ "image_caption": [
1413
+ "Figure 6. Impact of sampled frequency count in NCFD on accuracy across datasets and IPC. Increasing frequencies improves accuracy up to a threshold, beyond which gains diminish."
1414
+ ],
1415
+ "image_footnote": [],
1416
+ "bbox": [
1417
+ 142,
1418
+ 739,
1419
+ 429,
1420
+ 851
1421
+ ],
1422
+ "page_idx": 7
1423
+ },
1424
+ {
1425
+ "type": "text",
1426
+ "text": "6. Discussion",
1427
+ "text_level": 1,
1428
+ "bbox": [
1429
+ 513,
1430
+ 186,
1431
+ 627,
1432
+ 203
1433
+ ],
1434
+ "page_idx": 7
1435
+ },
1436
+ {
1437
+ "type": "text",
1438
+ "text": "6.1. Training stability of NCFD",
1439
+ "text_level": 1,
1440
+ "bbox": [
1441
+ 511,
1442
+ 212,
1443
+ 756,
1444
+ 228
1445
+ ],
1446
+ "page_idx": 7
1447
+ },
1448
+ {
1449
+ "type": "text",
1450
+ "text": "The training stability of our minmax paradigm is crucial to its effectiveness. Unlike traditional discrepancy measures, NCFM operates within the complex plane to conduct minmax optimization. While instability is a common issue in minmax adversarial optimization, as seen in generative adversarial networks [2, 37, 39], NCFM consistently maintains stable optimization throughout training, as illustrated in Figure 7. This stability is further supported by theoretical guarantees of weak convergence in Theorem 1, demonstrating the robustness of the CF-based discrepancy under diverse conditions and contributing to NCFM's reliable convergence across datasets.",
1451
+ "bbox": [
1452
+ 511,
1453
+ 234,
1454
+ 906,
1455
+ 416
1456
+ ],
1457
+ "page_idx": 7
1458
+ },
1459
+ {
1460
+ "type": "image",
1461
+ "img_path": "images/31d6ecc48d88667965cb2632861025d2a6397601fdba5f9e9c483e36bc71c2e4.jpg",
1462
+ "image_caption": [
1463
+ "Figure 7. Training dynamics of the minmax optimization process across different datasets and various IPC settings."
1464
+ ],
1465
+ "image_footnote": [],
1466
+ "bbox": [
1467
+ 568,
1468
+ 430,
1469
+ 854,
1470
+ 536
1471
+ ],
1472
+ "page_idx": 7
1473
+ },
1474
+ {
1475
+ "type": "text",
1476
+ "text": "6.2. Correlation between CFD and MMD",
1477
+ "text_level": 1,
1478
+ "bbox": [
1479
+ 511,
1480
+ 590,
1481
+ 833,
1482
+ 606
1483
+ ],
1484
+ "page_idx": 7
1485
+ },
1486
+ {
1487
+ "type": "text",
1488
+ "text": "To better understand NCFM, we examine the relationship between the Characteristic Function Discrepancy (CFD) and Maximum Mean Discrepancy (MMD).",
1489
+ "bbox": [
1490
+ 511,
1491
+ 613,
1492
+ 905,
1493
+ 657
1494
+ ],
1495
+ "page_idx": 7
1496
+ },
1497
+ {
1498
+ "type": "text",
1499
+ "text": "CF as Well- behaved Kernels in the MMD Metric. The CF discrepancy term $\\int_t\\sqrt{\\mathrm{Chf}(t;f)} dF_{\\mathcal{T}}(t)$ in our loss can be viewed as a well-behaved kernel in MMD, specifically as a Characteristic Kernel [43]. Unlike MMD, which relies on fixed kernels, NCFM adaptively learns $F_{\\mathcal{T}}(t)$ , enabling flexible kernel selection for optimal distribution alignment. Furthermore, mixtures of Gaussian distributions within the CF framework produce well-defined characteristic kernels. When MMD employs a characteristic kernel of the form $\\int_t e^{-j\\langle t,x - \\tilde{x}\\rangle}dF_{\\mathcal{T}}(t)$ , it aligns with the structure of CFD, demonstrating that MMD is a special case of CFD when only specific moments are matched. This insight also explains the minimal memory overhead observed as IPC grows, highlighting the efficiency of our approach.",
1500
+ "bbox": [
1501
+ 511,
1502
+ 659,
1503
+ 906,
1504
+ 869
1505
+ ],
1506
+ "page_idx": 7
1507
+ },
1508
+ {
1509
+ "type": "text",
1510
+ "text": "Computational Advantage of CFD over MMD. In contrast to MMD, which requires quadratic time in the number",
1511
+ "bbox": [
1512
+ 511,
1513
+ 869,
1514
+ 905,
1515
+ 900
1516
+ ],
1517
+ "page_idx": 7
1518
+ },
1519
+ {
1520
+ "type": "page_number",
1521
+ "text": "8",
1522
+ "bbox": [
1523
+ 493,
1524
+ 924,
1525
+ 503,
1526
+ 935
1527
+ ],
1528
+ "page_idx": 7
1529
+ },
1530
+ {
1531
+ "type": "text",
1532
+ "text": "of samples for approximate computation, CFD operates in linear time relative to the sampling number of frequency arguments, which aligns results in [1]. This efficiency makes CFD substantially faster and more scalable than MMD, offering a particular advantage for large-scale datasets.",
1533
+ "bbox": [
1534
+ 94,
1535
+ 92,
1536
+ 480,
1537
+ 165
1538
+ ],
1539
+ "page_idx": 8
1540
+ },
1541
+ {
1542
+ "type": "text",
1543
+ "text": "7. Conclusion",
1544
+ "text_level": 1,
1545
+ "bbox": [
1546
+ 94,
1547
+ 181,
1548
+ 207,
1549
+ 195
1550
+ ],
1551
+ "page_idx": 8
1552
+ },
1553
+ {
1554
+ "type": "text",
1555
+ "text": "In this work, we redefined distribution matching for dataset distillation as a minmax optimization problem and introduced Neural Characteristic Function Discrepancy (NCFD), a novel and theoretically grounded metric designed to maximize the separability between real and synthetic data. Leveraging the Characteristic Function (CF), our method dynamically adjusts NCFD to align both phase and amplitude information in the complex plane, achieving a balance between realism and diversity. Extensive experiments demonstrated the computational efficiency of our approach, which achieves state-of-the-art performance with minimal computational overhead, showcasing its scalability and practicality for large-scale applications.",
1556
+ "bbox": [
1557
+ 94,
1558
+ 207,
1559
+ 480,
1560
+ 401
1561
+ ],
1562
+ "page_idx": 8
1563
+ },
1564
+ {
1565
+ "type": "page_number",
1566
+ "text": "9",
1567
+ "bbox": [
1568
+ 493,
1569
+ 925,
1570
+ 503,
1571
+ 935
1572
+ ],
1573
+ "page_idx": 8
1574
+ },
1575
+ {
1576
+ "type": "text",
1577
+ "text": "References",
1578
+ "text_level": 1,
1579
+ "bbox": [
1580
+ 91,
1581
+ 89,
1582
+ 187,
1583
+ 104
1584
+ ],
1585
+ "page_idx": 9
1586
+ },
1587
+ {
1588
+ "type": "list",
1589
+ "sub_type": "ref_text",
1590
+ "list_items": [
1591
+ "[1] Abdul Fatir Ansari, Jonathan Scarlett, and Harold Soh. A characteristic function approach to deep implicit generative modeling. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7478-7487, 2020. 3, 5, 9",
1592
+ "[2] Martin Arjovsky and Léon Bottou. Towards principled methods for training generative adversarial networks. arXiv preprint arXiv:1701.04862, 2017. 8",
1593
+ "[3] Patrick Billingsley. Probability and measure. John Wiley & Sons, 2017. 2, 3",
1594
+ "[4] Mikołaj Binkowski, Danica J Sutherland, Michael Arbel, and Arthur Gretton. Demystifying mmd gans. arXiv preprint arXiv:1801.01401, 2018. 2",
1595
+ "[5] Torben Maack Bisgaard and Zoltán Sasvari. Characteristic functions and moment sequences: positive definiteness in probability. Nova Publishers, 2000. 2, 3",
1596
+ "[6] George Cazenavette, Tongzhou Wang, Antonio Torralba, Alexei A Efros, and Jun-Yan Zhu. Dataset distillation by matching training trajectories. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4750-4759, 2022. 1, 2, 3, 5, 6",
1597
+ "[7] Dingfan Chen, Raouf Kerkouche, and Mario Fritz. Private set generation with discriminative information. Advances in Neural Information Processing Systems, 35:14678-14690, 2022. 1",
1598
+ "[8] Ming-Yu Chung, Sheng-Yen Chou, Chia-Mu Yu, Pin-Yu Chen, Sy-Yen Kuo, and Tsung-Yi Ho. Rethinking backdoor attacks on dataset distillation: A kernel method perspective. arXiv preprint arXiv:2311.16646, 2023. 1",
1599
+ "[9] Justin Cui, Ruochen Wang, Si Si, and Cho-Jui Hsieh. Scaling up dataset distillation to imagenet-1k with constant memory. In International Conference on Machine Learning, pages 6565–6590. PMLR, 2023. 3, 5, 6",
1600
+ "[10] Wenxiao Deng, Wenbin Li, Tianyu Ding, Lei Wang, Hongguang Zhang, Kuihua Huang, Jing Huo, and Yang Gao. Exploiting inter-sample and inter-feature relations in dataset distillation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17057-17066, 2024. 2, 3, 5",
1601
+ "[11] Tian Dong, Bo Zhao, and Lingjuan Lyu. Privacy for free: How does dataset condensation help privacy? In International Conference on Machine Learning, pages 5378-5396. PMLR, 2022. 1",
1602
+ "[12] Jiawei Du, Yidi Jiang, Vincent YF Tan, Joel Tianyi Zhou, and Haizhou Li. Minimizing the accumulated trajectory error to improve dataset distillation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3749-3758, 2023. 3, 5, 6",
1603
+ "[13] Leonard Euler. On transcending quantities arising from the circle. Chapter, 8, 1748. 2",
1604
+ "[14] Andrey Feuerverger and Roman A Mureika. The empirical characteristic function and its applications. The annals of Statistics, pages 88-97, 1977. 2, 4",
1605
+ "[15] Jianyang Gu, Kai Wang, Wei Jiang, and Yang You. Summarizing stream data for memory-constrained online continual"
1606
+ ],
1607
+ "bbox": [
1608
+ 93,
1609
+ 114,
1610
+ 483,
1611
+ 900
1612
+ ],
1613
+ "page_idx": 9
1614
+ },
1615
+ {
1616
+ "type": "list",
1617
+ "sub_type": "ref_text",
1618
+ "list_items": [
1619
+ "learning. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 12217-12225, 2024. 1",
1620
+ "[16] Ziyao Guo, Kai Wang, George Cazenavette, Hui Li, Kaipeng Zhang, and Yang You. Towards lossless dataset distillation via difficulty-aligned trajectory matching. arXiv preprint arXiv:2310.05773, 2023. 3, 5, 6",
1621
+ "[17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 7",
1622
+ "[18] Jeremy Howard and Sylvain Gugger. Fastai: a layered api for deep learning. Information, 11(2):108, 2020. 6",
1623
+ "[19] Seoungyoon Kang, Youngsun Lim, and Hyunjung Shim. Label-augmented dataset distillation. arXiv preprint arXiv:2409.16239, 2024. 5",
1624
+ "[20] Jang-Hyun Kim, Jinuk Kim, Seong Joon Oh, Sangdoo Yun, Hwanjun Song, Joonhyun Jeong, Jung-Woo Ha, and Hyun Oh Song. Dataset condensation via efficient synthetic-data parameterization. In International Conference on Machine Learning, pages 11102-11118. PMLR, 2022. 2, 3, 6",
1625
+ "[21] Stephen M Kogon and Douglas B Williams. Characteristic function based estimation of stable distribution parameters. A practical guide to heavy tails: statistical techniques and applications, pages 311-338, 1998. 2",
1626
+ "[22] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 5, 7",
1627
+ "[23] Ya Le and Xuan Yang. Tiny imagenet visual recognition challenge. CS 231N, 7(7):3, 2015. 5",
1628
+ "[24] Saehyung Lee, Sanghyuk Chun, Sangwon Jung, Sangdoo Yun, and Sungroh Yoon. Dataset condensation with contrastive signals. In International Conference on Machine Learning, pages 12352-12364. PMLR, 2022. 2, 3, 5",
1629
+ "[25] Chun-Liang Li, Wei-Cheng Chang, Yu Cheng, Yiming Yang, and Barnabás Póczos. Mmd gan: Towards deeper understanding of moment matching network. Advances in neural information processing systems, 30, 2017. 2",
1630
+ "[26] Hongcheng Li, Yucan Zhou, Xiaoyan Gu, Bo Li, and Weiping Wang. Diversified semantic distribution matching for dataset distillation. In ACM Multimedia 2024, 2024. 5",
1631
+ "[27] Shengxi Li, Zeyang Yu, Min Xiang, and Danilo Mandic. Reciprocal adversarial learning via characteristic functions. Advances in Neural Information Processing Systems, 33:217-228, 2020. 3, 5",
1632
+ "[28] Shengxi Li, Jialu Zhang, Yifei Li, Mai Xu, Xin Deng, and Li Li. Neural characteristic function learning for conditional image generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7204-7214, 2023. 5",
1633
+ "[29] Zhe Li and Bernhard Kainz. Image distillation for safe data sharing in histopathology. In International Conference on Medical Image Computing and Computer-Assisted Intervention, pages 459-469. Springer, 2024. 1",
1634
+ "[30] Dai Liu, Jindong Gu, Hu Cao, Carsten Trinitis, and Martin Schulz. Dataset distillation by automatic training trajectories. arXiv preprint arXiv:2407.14245, 2024. 5",
1635
+ "[31] Paul Lévy. Théorie de l'addition des variables algatoires. Gauthier-Villars, Paris, 1937. 2, 4, 5"
1636
+ ],
1637
+ "bbox": [
1638
+ 516,
1639
+ 92,
1640
+ 903,
1641
+ 900
1642
+ ],
1643
+ "page_idx": 9
1644
+ },
1645
+ {
1646
+ "type": "page_number",
1647
+ "text": "10",
1648
+ "bbox": [
1649
+ 490,
1650
+ 924,
1651
+ 508,
1652
+ 936
1653
+ ],
1654
+ "page_idx": 9
1655
+ },
1656
+ {
1657
+ "type": "list",
1658
+ "sub_type": "ref_text",
1659
+ "list_items": [
1660
+ "[32] Danilo P Mandic and Anthony G Constantinides. Complex valued nonlinear adaptive filters: state of the art. Signal Processing, 89(9):1704-1725, 2009. 5, 7",
1661
+ "[33] Dmitry Medvedev and Alexander D'yakonov. Learning to generate synthetic training data using gradient matching and implicit differentiation. In International Conference on Analysis of Images, Social Networks and Texts, pages 138-150. Springer, 2021. 1",
1662
+ "[34] Timothy Nguyen, Zhourong Chen, and Jaehoon Lee. Dataset meta-learning from kernel ridge-regression. arXiv preprint arXiv:2011.00050, 2020. 5",
1663
+ "[35] Alan V Oppenheim and Jae S Lim. The importance of phase in signals. Proceedings of the IEEE, 69(5):529-541, 1981. 5, 7",
1664
+ "[36] Tian Qin, Zhiwei Deng, and David Alvarez-Melis. A label is worth a thousand images in dataset distillation. arXiv preprint arXiv:2406.10485, 2024. 5",
1665
+ "[37] Alec Radford. Unsupervised representation learning with deep convolutional generative adversarial networks. arXiv preprint arXiv:1511.06434, 2015. 8",
1666
+ "[38] Ahmad Sajedi, Samir Khaki, Ehsan Amjadian, Lucy Z Liu, Yuri A Lawryshyn, and Konstantinos N Plataniotis. Datadam: Efficient dataset distillation with attention matching. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17097-17107, 2023. 2, 3",
1667
+ "[39] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 8",
1668
+ "[40] Shitong Shao, Zeyuan Yin, Muxin Zhou, Xindong Zhang, and Zhiqiang Shen. Generalized large-scale data condensation via various backbone and statistical matching. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16709-16718, 2024. 5",
1669
+ "[41] Neil G Shephard. From characteristic function to distribution function: a simple framework for the theory. Econometric theory, 7(4):519-529, 1991. 2",
1670
+ "[42] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 7",
1671
+ "[43] Bharath K Striperumbudur, Arthur Gretton, Kenji Fukumizu, Bernhard Schölkopf, and Gert RG Lanckriet. Hilbert space embeddings and metrics on probability measures. The Journal of Machine Learning Research, 11:1517-1561, 2010. 8",
1672
+ "[44] Felipe Petroski Such, Aditya Rawal, Joel Lehman, Kenneth Stanley, and Jeffrey Clune. Generative teaching networks: Accelerating neural architecture search by learning to generate synthetic training data. In International Conference on Machine Learning, pages 9206-9216. PMLR, 2020. 1",
1673
+ "[45] Peng Sun, Bei Shi, Daiwei Yu, and Tao Lin. On the diversity and realism of distilled dataset: An efficient dataset distillation paradigm. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9390-9399, 2024. 5, 6",
1674
+ "[46] Mariya Toneva, Alessandro Sordoni, Remi Tachet des Combes, Adam Trischler, Yoshua Bengio, and Geoffrey J Gordon. An empirical study of example forget-"
1675
+ ],
1676
+ "bbox": [
1677
+ 91,
1678
+ 90,
1679
+ 482,
1680
+ 900
1681
+ ],
1682
+ "page_idx": 10
1683
+ },
1684
+ {
1685
+ "type": "list",
1686
+ "sub_type": "ref_text",
1687
+ "list_items": [
1688
+ "ting during deep neural network learning. arXiv preprint arXiv:1812.05159, 2018. 5",
1689
+ "[47] Kai Wang, Bo Zhao, Xiangyu Peng, Zheng Zhu, Shuo Yang, Shuo Wang, Guan Huang, Hakan Bilen, Xinchao Wang, and Yang You. Cafe: Learning to condense dataset by aligning features. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12196-12205, 2022. 1, 2, 3, 5, 6",
1690
+ "[48] Shaobo Wang, Yantai Yang, Shuaiyu Zhang, Chenghao Sun, Weiya Li, Xuming Hu, and Linfeng Zhang. Drupi: Dataset reduction using privileged information, 2024. 6",
1691
+ "[49] Tongzhou Wang, Jun-Yan Zhu, Antonio Torralba, and Alexei A Efros. Dataset distillation. arXiv preprint arXiv:1811.10959, 2018. 1, 3",
1692
+ "[50] Max Welling. Herding dynamical weights to learn. In Proceedings of the 26th annual international conference on machine learning, pages 1121-1128, 2009. 5",
1693
+ "[51] Enneng Yang, Li Shen, Zhenyi Wang, Tongliang Liu, and Guibing Guo. An efficient dataset condensation plugin and its application to continual learning. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 1",
1694
+ "[52] Zeyuan Yin, Eric Xing, and Zhiqiang Shen. Squeeze, recover and relabel: Dataset condensation at imagenet scale from a new perspective. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 1",
1695
+ "[53] Hansong Zhang, Shikun Li, Pengju Wang, Dan Zeng, and Shiming Ge. M3d: Dataset condensation by minimizing maximum mean discrepancy. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 9314-9322, 2024. 2, 3, 5",
1696
+ "[54] Bo Zhao and Hakan Bilen. Dataset condensation with differentiable siamese augmentation. In International Conference on Machine Learning, pages 12674-12685. PMLR, 2021. 2, 3, 5, 6",
1697
+ "[55] Bo Zhao and Hakan Bilen. Dataset condensation with distribution matching, 2022. 1, 2, 3, 5, 6, 8",
1698
+ "[56] Bo Zhao, Konda Reddy Mopuri, and Hakan Bilen. Dataset condensation with gradient matching. arXiv preprint arXiv:2006.05929, 2020. 1, 2, 3, 5",
1699
+ "[57] Ganlong Zhao, Guanbin Li, Yipeng Qin, and Yizhou Yu. Improved distribution matching for dataset condensation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7856-7865, 2023. 2, 3, 5, 6",
1700
+ "[58] Yongchao Zhou, Ehsan Nezhadarya, and Jimmy Ba. Dataset distillation using neural feature regression. Advances in Neural Information Processing Systems, 35:9813-9827, 2022. 5"
1701
+ ],
1702
+ "bbox": [
1703
+ 516,
1704
+ 92,
1705
+ 903,
1706
+ 757
1707
+ ],
1708
+ "page_idx": 10
1709
+ },
1710
+ {
1711
+ "type": "page_number",
1712
+ "text": "11",
1713
+ "bbox": [
1714
+ 490,
1715
+ 924,
1716
+ 506,
1717
+ 936
1718
+ ],
1719
+ "page_idx": 10
1720
+ }
1721
+ ]
data/2025/2502_20xxx/2502.20653/882fa9ce-7503-46e8-8936-f6b18b3bf5e6_model.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20653/882fa9ce-7503-46e8-8936-f6b18b3bf5e6_origin.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d958ab91565ea1999c21865b402239040c7636581475b3a0c709d65119ae7ce8
3
+ size 980759
data/2025/2502_20xxx/2502.20653/full.md ADDED
@@ -0,0 +1,355 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dataset Distillation with Neural Characteristic Function: A Minmax Perspective
2
+
3
+ Shaobo Wang $^{1,2}$ Yicun Yang $^{2}$ Zhiyuan Liu $^{2}$ Chenghao Sun $^{2}$ Xuming Hu $^{3}$ Conghui He $^{4}$ Linfeng Zhang $^{1,2*}$
4
+
5
+ $^{1}$ School of Artificial Intelligence, Shanghai Jiao Tong University
6
+
7
+ $^{2}$ EPIC Lab, Shanghai Jiao Tong University
8
+
9
+ $^{3}$ Hong Kong University of Science and Technology, Guangzhou
10
+
11
+ $^{4}$ Shanghai Artificial Intelligence Laboratory
12
+
13
+ {shaobowang1009,zhanglinfeng}@sjtu.edu.cn
14
+
15
+ # Abstract
16
+
17
+ Dataset distillation has emerged as a powerful approach for reducing data requirements in deep learning. Among various methods, distribution matching-based approaches stand out for their balance of computational efficiency and strong performance. However, existing distance metrics used in distribution matching often fail to accurately capture distributional differences, leading to unreliable measures of discrepancy. In this paper, we reformulate dataset distillation as a minmax optimization problem and introduce Neural Characteristic Function Discrepancy (NCFD), a comprehensive and theoretically grounded metric for measuring distributional differences. NCFD leverages the Characteristic Function (CF) to encapsulate full distributional information, employing a neural network to optimize the sampling strategy for the CF's frequency arguments, thereby maximizing the discrepancy to enhance distance estimation. Simultaneously, we minimize the difference between real and synthetic data under this optimized NCFD measure. Our approach, termed Neural Characteristic Function Matching (NCFM), inherently aligns the phase and amplitude of neural features in the complex plane for both real and synthetic data, achieving a balance between realism and diversity in synthetic samples. Experiments demonstrate that our method achieves significant performance gains over state-of-the-art methods on both low- and high-resolution datasets. Notably, we achieve a $20.5\%$ accuracy boost on ImageSquawk. Our method also reduces GPU memory usage by over $300\times$ and achieves $20\times$ faster processing speeds compared to state-of-the-art methods. To the best of our knowledge, this is the first work to achieve lossless compression of CIFAR-100 on a single NVIDIA 2080 Ti GPU using only 2.3 GB of memory.
18
+
19
+ ![](images/2579577802e13a513b788c8274f4d12d521c98fe5701a701241cc9be77902fce.jpg)
20
+ Synthetic Data Real Data Z: Latent Space $\psi$ : Parameterized Network
21
+ (a) Previous paradigm: optimize $\widetilde{D}$ to minimize the distance within $Z$ .
22
+
23
+ ![](images/3da4180494fcb84609eb0f265753940bed09de93f05dd5873b0136a20d71db44.jpg)
24
+
25
+ size the distance within $\mathcal{Z}$ .
26
+
27
+ ![](images/1f8388cdfb72e8638cd8761bce24bb64684f8099768f418d5faa17d10baea050.jpg)
28
+ (b) Our minmax paradigm: first optimize $\psi$ to maximize the distance in parameterized space $\mathcal{Z}_{\psi}$ , then optimize $\widetilde{D}$ to minimize the distance within $\mathcal{Z}_{\psi}$ .
29
+ Figure 1. Comparison of different paradigms for dataset distillation. (a) The MSE approach compares point-wise features within Euclidean space, denoted as $\mathcal{Z}_{\mathbb{R}}$ , while MMD evaluates moment differences in Hilbert space, $\mathcal{Z}_{\mathcal{H}}$ . (b) Our method redefines distribution matching as a minmax optimization problem, where the distributional discrepancy is parameterized by a neural network $\psi$ . We begin by optimizing $\psi$ to maximize the discrepancy, thereby establishing the latent space $\mathcal{Z}_{\psi}$ , and subsequently optimize the synthesized data $\hat{\mathcal{D}}$ to minimize this discrepancy within $\mathcal{Z}_{\psi}$ .
30
+
31
+ ![](images/3767bbf5f8eb407e359840c9dfbba66cdf9c147644c0adca6db6d7cfa9930d30.jpg)
32
+
33
+ # 1. Introduction
34
+
35
+ Deep neural networks (DNNs) have achieved remarkable progress across a range of tasks, largely due to the availability of vast amounts of training data. However, training effectively with limited data remains challenging and crucial, particularly when large-scale datasets become too voluminous for storage. To address this, dataset distillation has been proposed to condense a large, real dataset into a smaller, synthetic one [6, 49, 52, 55, 56]. Dataset distillation has been applied in various areas, including neural architecture search [33, 44], continual learning [15, 51], medical image computing [29], and privacy protection [7, 8, 11].
36
+
37
+ Among dataset distillation methods, feature or distribution matching (DM) approaches [47, 55] have gained popu
38
+
39
+ ![](images/fd10e80466cecd63f97eee094d2dd46454f7f0b88c64d16007ac793bfec19882.jpg)
40
+ (a) From Real to Complex
41
+ (b) Distribution Matching (DM)
42
+ Figure 2. Comparison of different distribution matching methods. (a) Illustration of embedded features from the real domain to complexplane features using Euler's formula [13]. The latent neural feature $\Phi_{\pmb{x}}(\pmb {t})$ captures the amplitude and phase information. (b) MMD-based methods align feature moments in the embedded domain but may not effectively align the overall distributions. (c) CF-based methods directly compare distributions by balancing the amplitude and phase in the complex plane, enhancing distributional similarity.
43
+
44
+ ![](images/724b3dc4b0df2fc7ead266ec622c4cecf20c8b026265e245bcbd020f48289e74.jpg)
45
+ minimizing MMD doesn't effectively make the distributions similar
46
+ Iteration 0
47
+
48
+ ![](images/686f0775e9c062d309f31fbd097802a9f2fd472ceba8e105f82de6a33ca6967a.jpg)
49
+ Iteration 5000
50
+
51
+ ![](images/9cd497ed63e24283fab03e297125f5a603353786dcd0030d2f9c009ec3ed5260.jpg)
52
+ Iteration 10000
53
+
54
+ ![](images/f52d0d68d48b30d5fe2f3cd175cbcafb5c1dc72da5ab2bf4de6885b2c9687e8a.jpg)
55
+ (c) Characteristic Function Matching (Ours) Real Synthetic
56
+ minimizing CF makes the distributions similar
57
+ Iteration 0
58
+
59
+ ![](images/58484942b91e753dc671c1d292b53c01fcb1316f175abaf501ce83ed8d4c8ad9.jpg)
60
+ Iteration 5000
61
+
62
+ ![](images/c920a295d46efcf67c9d4a831fbfc151c089dec53dedb97296b540c0087827c3.jpg)
63
+ Iteration 10000
64
+
65
+ ![](images/05c57ba73b98ec41b7f7bf797e1d7107f3b84e3a9930bb33d39e31e2f6437969.jpg)
66
+ Figure 3. Comparison of performance, peak GPU memory usage, and distillation speed between the state-of-the-art (SOTA) distillation method and our NCFM on CIFAR-100 across various IPC values, evaluated on 8 NVIDIA H100 GPUs. Notably, NCFM reduces GPU memory usage by over $300\times$ , achieves $20\times$ faster distillation, and delivers better performance. We also successfully demonstrated lossless distillation using only 2.3GB GPU memory.
67
+
68
+ larity for their effective balance between high performance and computational efficiency. Unlike bi-level optimization-based distillation approaches [6, 20, 24, 54, 56], DM-based methods bypass the need for nested optimization. For instance, when learning with 50 images per class (IPC) on CIFAR-10 dataset, DM methods achieve higher test accuracy than gradient matching methods [24, 54, 56], while requiring only a tenth of the computation time.
69
+
70
+ A key challenge in DM lies in defining an effective metric to measure distributional discrepancies between real and synthetic datasets. Early methods primarily employed Mean Squared Error (MSE) to compare point-wise features [10, 38, 47], which operates in Euclidean space, $\mathcal{Z}_{\mathbb{R}}$ as illustrated on the left of Figure 1(a). However, MSE directly matches pixel-level or patch-level information without capturing the semantic structures embedded in high-dimensional manifolds, which falls short for distribution comparison. Later methods [53, 55, 57] employ Maximum Mean Discrepancy (MMD) as a metric. Nevertheless, research in generative modeling [4, 25] has shown that MMD aligns moments of distributions in a latent Hilbert space, $\mathcal{Z}_{\mathcal{H}}$ , as shown on the right of Figure 1(a). While distributional equivalence implies moment equivalence, the converse is not necessarily true: aligning moments alone does not guarantee full distributional matching. As illustrated in
71
+
72
+ Figure 2(b), MMD-based methods may fail to capture overall distributional alignment between real and synthetic data, resulting in suboptimal synthesized image quality.
73
+
74
+ To overcome these limitations, we propose a novel approach that reformulates distribution matching as an adversarial minmax optimization problem, as depicted in Figure 1(b). By leveraging the minmax paradigm, we adaptively learn the discrepancy metric, enabling it to maximize the separability between real and synthetic data distributions. This dynamic adjustment addresses the rigidity of fixed metrics like MSE and MMD. Meanwhile, the synthetic data is iteratively optimized to minimize the dynamically refined discrepancy measure. Building upon this foundation, we introduce Neural Characteristic Discrepancy (NCFD), a parameterized metric based on the Characteristic Function (CF), which provides a precise and comprehensive representation of the underlying probability distribution. Defined as the Fourier transform of the probability density function, the CF encapsulates all relevant information about a distribution [3, 5, 14, 21, 31, 41]. The CF offers a one-to-one correspondence with the cumulative density function, ensuring the robustness and reliability.
75
+
76
+ In our framework, an auxiliary network embeds features while a lightweight sampling network is optimized to dynamically adjust its CF sampling strategy using a scale mixture of normals. During the distillation process, we iteratively minimize the NCFD to bring synthetic data closer to real data, while training the sampling network to maximize NCFD, thereby improving the metric's robustness and accuracy. Unlike MMD which has quadratic computational complexity, NCFD achieves linear time computational complexity. Our method, Neural Characteristic Function Matching (NCFM), aligns both the phase and amplitude of neural features in the complex plane, achieving a balanced synthesis of realism and diversity in the generated images. As shown in Figure 2(c), NCFM effectively captures overall distributional information, leading to well-aligned synthetic and real data distributions after optimization. Our contributions are as follows:
77
+
78
+ 1. We reformulate the distribution matching problem as a minmax optimization problem, where the sampling net-
79
+
80
+ work maximizes the distributional discrepancy to learn a proper discrepancy metric, while the synthesized images are optimized to minimize such discrepancy.
81
+
82
+ 2. We introduce Neural Characteristic Function Matching (NCFM), which aligns the phase and amplitude information of neural features in the complex plane for both real and synthetic data, achieving a balance between realism and diversity in synthetic data.
83
+ 3. Extensive experiments across multiple benchmark datasets demonstrate the superior performance and efficiency of NCFM. Particularly, on high-resolution datasets, NCFM achieves significant accuracy gains of up to $20.5\%$ on ImageSquawk and $17.8\%$ on ImageMeow at 10 IPC compared to SOTA methods.
84
+ 4. NCFM achieves unprecedented efficiency in computational resources. As shown in Figure 3, our method dramatically reduces resource requirements with better performance, achieving more than $300 \times$ reduction in GPU memory usage compared with DATM [16]. Most remarkably, NCFM demonstrates lossless dataset distillation on both CIFAR-10 and CIFAR-100 using about merely 2GB GPU memory, enabling all experiments to be conducted on a single NVIDIA 2080 Ti GPU.
85
+
86
+ # 2. Related Work
87
+
88
+ Dataset Distillation Methods Based on Distribution and Feature Matching. Dataset distillation was proposed by [49]. Compared with various bi-level DD methods, DM [55] is regarded as a efficient method that balances the performance and computational efficiency, without involving the nested model optimization. These methods can be classified into two directions, i.e., point-wise and moment-wise matching. For moment-wise matching, DM-based methods [53, 55, 57] propose to minimize the maximum mean discrepancy (MMD) between synthetic and real datasets. For point-wise feature matching, they typically design better strategies to match features extracted across layers in convolutional neural networks, and apply further adjustments to improve the performance [10, 38, 47]. However, moment-based and point-based matching methods may not capture the overall distributional discrepancy between synthetic and real data, as they are not sufficient conditions for distributional equivalence.
89
+
90
+ Characteristic Function as a Distributional Metric. The characteristic function is a unique and universal metric for measuring distributional discrepancy, defined as the Fourier transform of the probability density function [3]. The CF of any real-valued random variable completely defines its probability distribution, providing an alternative analytical approach compared to working directly with probability density or cumulative distribution functions. Unlike the moment-generating function, the CF always exists when treated as a function of a real-valued argument [5]. Re-
91
+
92
+ cently, the CFD has been adopted in deep learning for various tasks, e.g., several works have been proposed to use the CFD for generative modeling [1, 27]. However, none of prior works have considered the CFD for distillation.
93
+
94
+ # 3. Preliminaries: Distribution Matching
95
+
96
+ Distribution Matching (DM) was first introduced by [55] as an alternative to traditional bi-level optimization techniques, such as gradient matching methods [20, 24, 54, 56] and trajectory matching methods [6, 9, 12, 16]. Classical DM approaches focus on minimizing the discrepancy between the distributions of real and synthetic data, typically categorized into two main types: feature point matching and moment matching. Feature point matching methods [10, 38, 47] directly compare point-wise features using Mean Square Error (MSE), as defined by:
97
+
98
+ $$
99
+ \mathcal {L} _ {\mathrm {M S E}} = \mathbb {E} _ {\boldsymbol {x} \sim \mathcal {D}, \tilde {\boldsymbol {x}} \sim \tilde {\mathcal {D}}} \left[ \| f (\boldsymbol {x}) - f (\tilde {\boldsymbol {x}}) \| ^ {2} \right], \tag {1}
100
+ $$
101
+
102
+ where $f$ denotes the feature extractor network, $\mathcal{D}$ and $\tilde{\mathcal{D}}$ represent the real and synthetic data distributions, respectively, $\pmb{x}$ and $\tilde{\pmb{x}}$ are samples drawn from $\mathcal{D}$ and $\tilde{\mathcal{D}}$ . However, MSE may not be ideal for comparing distributions, as it only considers direct feature comparisons in Euclidean space, neglecting important semantic information.
103
+
104
+ In another line, notable works employed Maximum Mean Discrepancy (MMD) to align high-order moments in the latent feature space [53, 55, 57]. Rigorously, MMD is defined to match moments within the Reproducing Kernel Hilbert Space (RKHS) induced by a selected kernel function. The MMD loss can be formulated as:
105
+
106
+ $$
107
+ \begin{array}{l} \sup _ {f \in \mathcal {F}} \| \mathbb {E} _ {\boldsymbol {x} \sim \mathcal {D}} [ f (\boldsymbol {x}) ] - \mathbb {E} _ {\tilde {\boldsymbol {x}} \sim \hat {\mathcal {D}}} [ f (\tilde {\boldsymbol {x}}) ] \| ^ {2}, \\ = \sup _ {f \in \mathcal {F}} \left(\mathcal {K} _ {\mathcal {D}, \mathcal {D}} + \mathcal {K} _ {\tilde {\mathcal {D}}, \tilde {\mathcal {D}}} - 2 \mathcal {K} _ {\mathcal {D}, \tilde {\mathcal {D}}}\right), \tag {2} \\ \end{array}
108
+ $$
109
+
110
+ where $\mathcal{K}_{\mathcal{D},\tilde{\mathcal{D}}} = \mathbb{E}_{\boldsymbol{x}\sim \mathcal{D},\tilde{\boldsymbol{x}}\sim \tilde{\mathcal{D}}}[\mathcal{K}_{f(\boldsymbol{x}),f(\tilde{\boldsymbol{x}})}]$ denotes the kernel function associated with feature extractor $f$ in function class $\mathcal{F}$ . The choice of kernel function $\mathcal{K}$ is not unique and requires careful selection for MMD to be effective. However, instead of selecting certain kernel function, most DM-based methods [10, 55, 57] align moments directly in the feature space, commonly approximated as:
111
+
112
+ $$
113
+ \mathcal {L} _ {\mathrm {M M D}} = \left\| \mathbb {E} _ {\boldsymbol {x} \sim \mathcal {D}} [ f (\boldsymbol {x}) ] - \mathbb {E} _ {\tilde {\boldsymbol {x}} \sim \tilde {\mathcal {D}}} [ f (\tilde {\boldsymbol {x}}) ] \right\| ^ {2}. \tag {3}
114
+ $$
115
+
116
+ We argue that such empirical MMD estimates lack rigor, as they do not provide a maximal upper bound on the discrepancy, falling short of MMD's theoretical requirements.
117
+
118
+ # 4. Adversarial Distribution Matching
119
+
120
+ # 4.1. Minmax Framework
121
+
122
+ To address existing challenges with discrepancy measure selection, we propose a new approach that reformulates distribution matching as a minmax optimization problem. In
123
+
124
+ ![](images/0008b4982c396724408b45226589e92e7d316ca31a12b2d76f90dd1f10b89460.jpg)
125
+ Figure 4. Dataset Distillation with Neural Characteristic Function Matching (NCFM). Real and synthetic data points are sampled and embedded through a feature extractor network. The synthetic data is optimized by minimizing the distributional discrepancy between real and synthetic data, measured via the Neural Characteristic Function Discrepancy (NCFD) in the complex plane. Additionally, an auxiliary network learns an optimal sampling distribution for the frequency arguments of the characteristic function. Best viewed in color.
126
+
127
+ this framework, we maximize the discrepancy measure to define a robust discrepancy metric, parameterized by a neural network $\psi$ . Concurrently, we minimize the discrepancy between the synthetic dataset $\tilde{\mathcal{D}}$ and the real dataset $\mathcal{D}$ by optimizing the synthetic data distribution $\tilde{\mathcal{D}}$ . Formally, this minmax optimization problem is expressed as:
128
+
129
+ $$
130
+ \min _ {\tilde {\mathcal {D}}} \max _ {\psi} \mathcal {L} (\tilde {\mathcal {D}}, \mathcal {D}, f, \psi), \tag {4}
131
+ $$
132
+
133
+ where $\mathcal{L}$ denotes the discrepancy measure, $f$ is the feature extractor network, and $\psi$ is the network learning the discrepancy metric. This minmax framework seeks the optimal synthetic data distribution $\tilde{\mathcal{D}}$ that minimizes $\mathcal{L}$ while network $\psi$ maximizes $\mathcal{L}$ to establish a robust metric.
134
+
135
+ # 4.2. Neural Characteristic Function Matching
136
+
137
+ # 4.2.1. Neural Characteristic Function Discrepancy
138
+
139
+ To define a suitable discrepancy metric within this minmax framework, we propose a novel discrepancy measure based on the characteristic function (CF), which enables direct and robust assessment of distributional discrepancies. Characteristic functions are a mainstay in probability theory, often used as an alternative to probability density functions due to their unique properties. Specifically, the CF of a random variable $x$ is the expectation of its complex exponential transform, defined as:
140
+
141
+ $$
142
+ \Phi_ {\boldsymbol {x}} (\boldsymbol {t}) = \mathbb {E} _ {\boldsymbol {x}} \left[ e ^ {j \langle \boldsymbol {t}, \boldsymbol {x} \rangle} \right] = \int_ {\boldsymbol {x}} e ^ {j \langle \boldsymbol {t}, \boldsymbol {x} \rangle} d F (\boldsymbol {x}), \tag {5}
143
+ $$
144
+
145
+ where $F(\pmb{x})$ denotes the cumulative distribution function (cdf) of $\pmb{x}$ , $j = \sqrt{-1}$ , and $\pmb{t}$ is the frequency argument. Since the cdf is not directly accessible in practice, we approximate the CF empirically as $\Phi_{\pmb{x}}(\pmb{t}) = \frac{1}{N}\sum_{i=1}^{N}e^{j\langle\pmb{t},\pmb{x}_i\rangle}$ , where $N$ is the sample size in the dataset. The CF provides a theoretically principled description of a distribution, summarized in the following theorems.
146
+
147
+ Theorem 1 (Lévy's Convergence Theorem [31]) Let $\{X_{n}\}_{n = 1}^{\infty}$ be a sequence of random variables with characteristic functions $\Phi_n(t) = \mathbb{E}_{X_n}[e^{j\langle t,X_n\rangle}]$ . Suppose
148
+
149
+ $\Phi_n(\pmb{t}) \to \Phi(\pmb{t})$ pointwise for each $\pmb{t} \in \mathbb{R}^d$ as $n \to \infty$ . If $\Phi(\pmb{t})$ is continuous at $\pmb{t} = 0$ , then there exists a random variable $X$ with characteristic function $\Phi(\pmb{t})$ , and $X_n$ converges in distribution to $X$ .
150
+
151
+ Theorem 2 (Uniqueness for Characteristic Functions [14]) If two random variables $X$ and $Y$ have the same characteristic function, $\Phi_X(t) = \Phi_Y(t)$ for all $t$ , then $X$ and $Y$ are identically distributed. In other words, a characteristic function uniquely determines the distribution.
152
+
153
+ By Theorems 1 and 2, the empirical CF weakly converges to the population CF, ensuring that the CF serves as a reliable proxy for the underlying distribution. Based on the CF, we define our characteristic function discrepancy (CFD) as:
154
+
155
+ $$
156
+ \mathcal {C} _ {\mathcal {T}} (\boldsymbol {x}, \tilde {\boldsymbol {x}}) = \int_ {t} \sqrt {\underbrace {(\Phi_ {\boldsymbol {x}} (\boldsymbol {t}) - \Phi_ {\tilde {\boldsymbol {x}}} (\boldsymbol {t})) (\bar {\Phi} _ {\boldsymbol {x}} (\boldsymbol {t}) - \bar {\Phi} _ {\tilde {\boldsymbol {x}}} (\boldsymbol {t}))} _ {\operatorname {C h f} (t)} d F _ {\mathcal {T}} (\boldsymbol {t}), \tag {6}
157
+ $$
158
+
159
+ where $\bar{\Phi}$ is the complex conjugate of $\Phi$ , and $F_{\mathcal{T}}(t)$ is the CDF of a sampling distribution on $t$ . To simplify, we let $\mathrm{Chf}(t) = (\Phi_x(t) - \Phi_{\tilde{x}}(t))(\bar{\Phi}_x(t) - \bar{\Phi}_{\tilde{x}}(t))$ for further analysis. We show that $\mathcal{C}_{\mathcal{T}}(\boldsymbol{x},\tilde{\boldsymbol{x}})$ defines a valid distance metric in the following theorem.
160
+
161
+ Theorem 3 (CFD as a Distance Metric.) The CF discrepancy $\mathcal{C}_{\mathcal{T}}(\boldsymbol{x}, \tilde{\boldsymbol{x}})$ , as defined in Eq. (6), serves as a distance metric between $\boldsymbol{x}$ and $\tilde{\boldsymbol{x}}$ when the support of $\mathcal{T}$ resides in Euclidean space. It satisfies the properties of nonnegativity, symmetry, and the triangle inequality.
162
+
163
+ Theorem 3 establishes that CFD is a valid distance metric. Furthermore, we demonstrate that this formulation decomposes CFD into phase, $\pmb{a}_{\pmb{x}}(\pmb{t})$ , and amplitude, $|\Phi_{\pmb{x}}(t)|$ , com
164
+
165
+ ponents through Euler's formula:
166
+
167
+ $$
168
+ \begin{array}{l} \operatorname {C h f} (\boldsymbol {t}) = \left| \Phi_ {\boldsymbol {x}} (\boldsymbol {t}) \right| ^ {2} + \left| \Phi_ {\hat {\boldsymbol {x}}} (\boldsymbol {t}) \right| ^ {2} \\ - \left| \Phi_ {\boldsymbol {x}} (\boldsymbol {t}) \right| \left| \Phi_ {\tilde {\boldsymbol {x}}} (\boldsymbol {t}) \right| (2 \cos (\boldsymbol {a} _ {\boldsymbol {x}} (\boldsymbol {t}) - \boldsymbol {a} _ {\tilde {\boldsymbol {x}}} (\boldsymbol {t}))) \\ = \underbrace {\left(\left| \Phi_ {\boldsymbol {x}} (\boldsymbol {t}) - \Phi_ {\bar {\boldsymbol {x}}} (\boldsymbol {t}) \right|\right) ^ {2}} _ {\text {a m p l i t u d e d i f f e r e n c e}} \tag {7} \\ + 2 \left| \Phi_ {\boldsymbol {x}} (\boldsymbol {t}) \right| \left| \Phi_ {\tilde {\boldsymbol {x}}} (\boldsymbol {t}) \right| \underbrace {(1 - \cos (\boldsymbol {a} _ {\boldsymbol {x}} (\boldsymbol {t}) - \boldsymbol {a} _ {\tilde {\boldsymbol {x}}} (\boldsymbol {t})))} _ {\text {p h a s e d i f f e r e n c e}}, \\ \end{array}
169
+ $$
170
+
171
+ - Phase Information: the term $1 - \cos (a_{\pmb{x}}(\pmb{t}) - a_{\tilde{\pmb{x}}}(\pmb{t}))$ represents the phase, encoding data centres crucial for realism.
172
+ - Amplitude Information: the term $|\Phi_{\pmb{x}}(\pmb{t}) - \Phi_{\tilde{\pmb{x}}}(\pmb{t})|^{2}$ captures the distribution scale, contributing to the diversity.
173
+
174
+ This phase-amplitude decomposition, supported in signal processing [32, 35] and generative models [27], provides insight into CFD's descriptive power. In practice, to reduce computational cost, we further introduce an additional feature extractor $f$ to map input variables into a latent space, which is similar to previous works in distribution matching [10, 26, 55, 57]. We also use a parameterized sampling network $\psi$ to obtain the distribution of frequency argument $t$ , thereby extending the CFD to a more general parameterized form, i.e., Neural Characteristic Function Discrepancy (NCFD) as $\mathcal{C}_{\mathcal{T}}(\pmb{x},\tilde{\pmb{x}};f,\psi) = \int_t\sqrt{\mathrm{Chf}(\pmb{t};f)dF_{\mathcal{T}}(\pmb{t};\psi)}$ , where $\mathrm{Chf}(t,f)$ is defined as $\left|\Phi_{f(\pmb{x})}(t) - \Phi_{f(\tilde{\pmb{x}})}(t)\right|^2 + 2\left|\Phi_{f(\pmb{x})}(t)\right|\left|\Phi_{f(\tilde{\pmb{x}})}(t)\right|(1 - \cos (\pmb{a}_{f(\pmb{x})}(t) - \pmb{a}_{f(\tilde{\pmb{x}})}(t)))$ .
175
+
176
+ # 4.2.2. Determining the sampling strategy for NCFD
177
+
178
+ The core aspect in optimizing $\mathcal{C}_{\mathcal{T}}(\boldsymbol{x},\tilde{\boldsymbol{x}};f,\psi)$ lies in determining the form of $F_{\mathcal{T}}(t;\psi)$ , i.e., how to correctly and efficiently sample $\pmb{t}$ from a carefully picked distribution. Similar with works in generative adversarial network [1, 28], we define $F_{\mathcal{T}}(t)$ as the cumulative distribution function (cdf) of a scale mixture of normals, as $p_{\mathcal{T}}(t) = \int_{\Sigma}\mathcal{N}(t|\mathbf{0},\Sigma)p_{\Sigma}(\mathbf{\Sigma})d\mathbf{\Sigma}$ , where $p_{\mathcal{T}}(t)$ is the probability density function (pdf) of $F_{\mathcal{T}}(t),\mathcal{N}(t|\mathbf{0},\Sigma)$ denotes a zero-mean Gaussian distribution with covariance $\Sigma$ , and $p_{\Sigma}(\Sigma)$ represents the distribution of $\Sigma$ . We observe that as the number of sampled frequency arguments increases, the approximation of the empirical CF improves, as guaranteed by Lévy's Convergence Theorem [31], ultimately leading to higher quality synthetic data.
179
+
180
+ # 4.2.3. Distribution Matching with NCFD
181
+
182
+ Given the NCFD measure $\mathcal{C}_{\mathcal{T}}(\boldsymbol{x},\tilde{\boldsymbol{x}};f,\psi)$ , we now propose a method to utilize NCFD for distribution matching, termed as Neural Characteristic Function Matching (NCFM). A visual illustration of the NCFM pipeline is provided in Figure 4. On one hand, we maximize the NCFD to learn an effective discrepancy metric by optimizing the network $\psi$ . On the other hand, we minimize this learned NCFD to obtain an optimal synthetic dataset, $\hat{\mathcal{D}}$ . In practice, we introduce a hyper-parameter $\alpha$ to balance the amplitude and
183
+
184
+ phase information in the NCFD, then the minmax optimization problem can be formulated as:
185
+
186
+ $$
187
+ \begin{array}{l} \min _ {\tilde {\mathcal {D}}} \max _ {\psi} \mathcal {L} (\tilde {\mathcal {D}}, \mathcal {D}, f, \psi) = \min _ {\tilde {\mathcal {D}}} \max _ {\psi} \mathbb {E} _ {\boldsymbol {x} \sim \mathcal {D}, \tilde {\boldsymbol {x}} \sim \bar {\mathcal {D}}} \mathcal {C} _ {\mathcal {T}} (\boldsymbol {x}, \tilde {\boldsymbol {x}}; f, \psi) \\ = \min _ {\tilde {\mathcal {D}}} \max _ {\psi} \mathbb {E} _ {\boldsymbol {x} \sim \mathcal {D}, \tilde {\boldsymbol {x}} \sim \tilde {\mathcal {D}}} \int_ {\boldsymbol {t}} \sqrt {\operatorname {C h f} (\boldsymbol {t} ; f)} d F _ {\mathcal {T}} (\boldsymbol {t}; \psi) \\ \end{array}
188
+ $$
189
+
190
+ where $\operatorname{Chf}(\pmb{t}; f) = \alpha \left(\left|\Phi_{f(\pmb{x})}(\pmb{t}) - \Phi_{f(\tilde{\pmb{x}})}(\pmb{t})\right|\right)^2 + (1 - \alpha)$ .
191
+
192
+ $$
193
+ \left. \left(2 \left| \Phi_ {f (\boldsymbol {x})} (\boldsymbol {t}) \right| \mid \Phi_ {f (\tilde {\boldsymbol {x}})} (\boldsymbol {t}) \right|\right) \cdot \left(1 - \cos \left(\boldsymbol {a} _ {f (\boldsymbol {x})} (\boldsymbol {t}) - \boldsymbol {a} _ {f (\tilde {\boldsymbol {x}})} (\boldsymbol {t})\right)\right). \tag {8}
194
+ $$
195
+
196
+ For the design of $f$ , we used a hybrid approach that combines a pre-trained model with a randomly initialized model, both selected from a subset of trained models. This ensures that the feature extractor remains moderately diverse yet discriminative. The hybrid feature extractor is constructed by $\beta$ -blending the checkpoints of the initial and final models, where each model is chosen from a specific subset of available models. At each distillation step, the blending coefficient $\beta \in (0,1)$ is sampled from a uniform distribution $\mathcal{U}(0,1)$ , providing a balanced combination of initial and final checkpoints. Our NCFM can be seamlessly integrated with additional data curation steps, such as generating soft labels with a pre-trained neural network and performing dataset finetuning. Unlike prior methods that focus on learning soft labels [16, 19, 36], NCFM simply leverages a pre-trained network to efficiently generate soft labels for the distilled dataset, improving both efficiency and effectiveness. However, these additional curation steps are not essential for NCFM, as it can achieve SOTA performance within the pure minmax framework.
197
+
198
+ # 5. Experiments
199
+
200
+ # 5.1. Setup
201
+
202
+ Baseline methods. We compared NCFM with several representative approaches in dataset distillation and coreset selection. These include gradient-matching methods such as DC [56], DCC [24], DSA and DSAC [54]. Kernel-based methods like KIP [34] and FrePo [58] were also included. Distribution-matching methods like CAFE [47], DM [55], IDM [57], M3D [53], IID [10], and DSDM [26] were part of the evaluation. We also included trajectory-matching methods such as MTT [6], FTD [12], ATT [30], and TESLA [9]. State-of-the-art methods like DATM [16], G-VBSM [40], and RDED [45] were also considered in our comparisons. Additionally, we benchmarked our method against classical coreset selection techniques, including random selection, Herding [50], and Forgetting [46].
203
+
204
+ Datasets and Networks. Our evaluations were conducted on widely-used datasets, including CIFAR-10 and CIFAR-100 [22] with resolution of $32 \times 32$ , Tiny ImageNet [23] with resolution of $64 \times 64$ , and ImageNet subsets with resolution of $128 \times 128$ , i.e., ImageNette, ImageWoof, ImageFruit, Im
205
+
206
+ Table 1. Results of NCFM on CIFAR-10/100, and Tiny ImageNet (resolution of $64\times 64$ ) datasets.
207
+
208
+ <table><tr><td>Dataset</td><td colspan="3">CIFAR-10</td><td colspan="3">CIFAR-100</td><td colspan="3">Tiny ImageNet</td></tr><tr><td>IPC</td><td>1</td><td>10</td><td>50</td><td>1</td><td>10</td><td>50</td><td>1</td><td>10</td><td>50</td></tr><tr><td>Ratio (%)</td><td>0.02</td><td>0.2</td><td>1</td><td>0.2</td><td>2</td><td>10</td><td>0.2</td><td>2</td><td>10</td></tr><tr><td>Random</td><td>14.4±2.0</td><td>26.0±1.2</td><td>43.4±1.0</td><td>4.2±0.3</td><td>14.6±0.5</td><td>30.0±0.4</td><td>1.4±0.1</td><td>5.0±0.2</td><td>15.0±0.4</td></tr><tr><td>Herding</td><td>21.5±1.2</td><td>31.6±0.7</td><td>40.4±0.6</td><td>8.4±0.3</td><td>17.3±0.3</td><td>33.7±0.5</td><td>2.8±0.2</td><td>6.3±0.2</td><td>16.7±0.3</td></tr><tr><td>Forgetting</td><td>13.5±1.2</td><td>23.3±1.0</td><td>23.3±1.1</td><td>4.5±0.2</td><td>15.1±0.3</td><td>30.5±0.3</td><td>1.6±0.1</td><td>5.1±0.2</td><td>15.0±0.3</td></tr><tr><td>DC</td><td>28.3±0.5</td><td>44.9±0.5</td><td>53.9±0.5</td><td>12.8±0.3</td><td>25.2±0.3</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>DSA</td><td>28.8±0.7</td><td>52.1±0.5</td><td>60.6±0.5</td><td>13.9±0.3</td><td>32.3±0.3</td><td>42.8±0.4</td><td>-</td><td>-</td><td>-</td></tr><tr><td>DCC</td><td>32.9±0.8</td><td>49.4±0.5</td><td>61.6±0.4</td><td>13.3±0.3</td><td>30.6±0.4</td><td>40.0±0.3</td><td>-</td><td>-</td><td>-</td></tr><tr><td>DSAC</td><td>34.0±0.7</td><td>54.5±0.5</td><td>64.2±0.4</td><td>14.6±0.3</td><td>14.6±0.3</td><td>39.3±0.4</td><td>-</td><td>-</td><td>-</td></tr><tr><td>FrePo</td><td>46.8±0.7</td><td>65.5±0.4</td><td>71.7±0.2</td><td>28.7±0.1</td><td>42.5±0.2</td><td>44.3±0.2</td><td>15.4±0.3</td><td>25.4±0.2</td><td>-</td></tr><tr><td>MTT</td><td>46.3±0.8</td><td>65.3±0.7</td><td>71.6±0.2</td><td>24.3±0.3</td><td>40.1±0.4</td><td>47.7±0.2</td><td>8.8±0.3</td><td>23.2±0.2</td><td>28.0±0.3</td></tr><tr><td>ATT</td><td>48.3±1.0</td><td>67.7±0.6</td><td>74.5±0.4</td><td>26.1±0.3</td><td>44.2±0.5</td><td>51.2±0.3</td><td>11.0±0.5</td><td>25.8±0.4</td><td>-</td></tr><tr><td>FTD</td><td>46.8±0.3</td><td>66.6±0.3</td><td>73.8±0.2</td><td>25.2±0.2</td><td>43.4±0.3</td><td>48.5±0.3</td><td>10.4±0.3</td><td>24.5±0.2</td><td>-</td></tr><tr><td>TESLA</td><td>48.5±0.8</td><td>66.4±0.8</td><td>72.6±0.7</td><td>24.8±0.4</td><td>41.7±0.3</td><td>47.9±0.3</td><td>-</td><td>-</td><td>-</td></tr><tr><td>CAFE</td><td>30.3±1.1</td><td>46.3±0.6</td><td>55.5±0.6</td><td>12.9±0.3</td><td>27.8±0.3</td><td>37.9±0.3</td><td>-</td><td>-</td><td>-</td></tr><tr><td>DM</td><td>26.0±0.8</td><td>48.9±0.6</td><td>63.0±0.4</td><td>11.4±0.3</td><td>29.7±0.3</td><td>43.6±0.4</td><td>3.9±0.2</td><td>12.9±0.4</td><td>24.1±0.3</td></tr><tr><td>IDM</td><td>45.6±0.7</td><td>58.6±0.1</td><td>67.5±0.1</td><td>20.1±0.3</td><td>45.1±0.1</td><td>50.0±0.2</td><td>10.1±0.2</td><td>21.9±0.2</td><td>27.7±0.3</td></tr><tr><td>M3D</td><td>45.3±0.3</td><td>63.5±0.2</td><td>69.9±0.5</td><td>26.2±0.3</td><td>42.4±0.2</td><td>50.9±0.7</td><td>-</td><td>-</td><td>-</td></tr><tr><td>IID</td><td>47.1±0.1</td><td>59.9±0.1</td><td>69.0±0.3</td><td>24.6±0.1</td><td>45.7±0.4</td><td>51.3±0.4</td><td>-</td><td>-</td><td>-</td></tr><tr><td>DSDM</td><td>45.0±0.4</td><td>66.5±0.3</td><td>75.8±0.3</td><td>19.5±0.2</td><td>46.2±0.3</td><td>54.0±0.2</td><td>-</td><td>-</td><td>-</td></tr><tr><td>G-VBSM</td><td>-</td><td>46.5±0.7</td><td>54.3±0.3</td><td>16.4±0.7</td><td>38.7±0.2</td><td>45.7±0.4</td><td>-</td><td>-</td><td>-</td></tr><tr><td>NCFM (Ours)</td><td>49.5±0.3</td><td>71.8±0.3</td><td>77.4±0.3</td><td>34.4±0.5</td><td>48.7±0.3</td><td>54.7±0.2</td><td>18.2±0.5</td><td>26.8±0.6</td><td>29.6±0.5</td></tr><tr><td>Whole Dataset</td><td></td><td>84.8±0.1</td><td></td><td></td><td>56.2±0.3</td><td></td><td></td><td>37.6±0.4</td><td></td></tr></table>
209
+
210
+ ageMeow, ImageSquawk, and ImageYellow [18]. Following prior studies [16, 48], we used networks with instance normalization as the default setting. Specifically, dataset distillation is performed with a 3-layer ConvNet for CIFAR-10/100, a 4-layer ConvNet for Tiny ImageNet, and a 5-layer ConvNet for ImageNet subsets. All experiments were conducted with 10 evaluations for fairness, primarily using a single NVIDIA 4090 GPU.
211
+
212
+ Other Settings. Following prior works, we implemented differential augmentation [47, 54] and applied multi-formation parameterization with a scale factor of $\rho = 2$ for images, as in [20, 57]. We employed AdamW as our optimizer. In our setup, we set the number of sampled frequency arguments to 1024. The number of mixture Gaussian components in the sampling network is set to the number of frequency arguments divided by 16, balancing the sampling network diversity and computational efficiency. Further details are provided in the supplementary material.
213
+
214
+ # 5.2. Main Results
215
+
216
+ We verified the effectiveness of NCFM on various benchmark datasets of different image-per-class (IPC) settings<sup>1</sup>.
217
+
218
+ CIFAR-10/100 and Tiny ImageNet. As shown in Table 1, NCFM outperforms all state-of-the-art (SOTA) baselines. Specifically, it surpasses distribution matching methods using traditional metrics like MSE and MMD, achieving improvements of $23.5\%$ and $23.0\%$ on CIFAR-10 and CIFAR-100 with 1 IPC compared to DM [55]. Additionally, NCFM maintains SOTA performance even against computationally
219
+
220
+ intensive methods like MTT [6]. Results for larger IPC settings and comparisons with other SOTA methods like DATM [16] are in the supplementary material.
221
+
222
+ Higher-resolution Datasets. We also evaluated NCFM on larger datasets, specifically the ImageNet subsets. As shown in Table 2, NCFM demonstrates strong performance across these challenging benchmarks. In 10 IPC setting, our method achieves substantial improvements of $20.5\%$ on ImageSquawk, compared to the baseline MTT [6]. Remarkably, NCFM exhibits robust performance under relatively small IPC. For instance, compared to RDED [45], NCFM yields a significant improvement of $19.6\%$ on ImageNette.
223
+
224
+ Computational Efficiency Evaluation. We tested the training speed and GPU memory of our NCFM compared with strong baseline methods on different datasets. As conventional recognition, trajectory matching based methods usually achieve better results than distribution matching in practice [6, 9, 12]. However, both superior training efficiency and GPU memory efficiency are observed in NCFM across all benchmark datasets, while achieving better results. Specifically, we measured the average training time over 1000 distillation iterations for each method, as summarized in Table 3. For CIFAR-100 at IPC 50, NCFM achieves nearly $30 \times$ faster speeds compared to TESLA [9] without the sampling network, and maintains over $20 \times$ faster speeds with the sampling network included. Moreover, we conducted a comprehensive analysis of computational efficiency, where GPU memory is expressed as the peak memory usage during 1000 iterations of training, as shown in Table 3. While most existing methods encounter out of memory (OOM) issues at IPC = 50, our method requires only
225
+
226
+ Table 2. Results on ImageNet subsets (resolution of ${128} \times {128}$ ) when employing NCFM across different IPCs.
227
+
228
+ <table><tr><td>Dataset</td><td colspan="2">ImageNette</td><td colspan="2">ImageWoof</td><td colspan="2">ImageFruit</td><td colspan="2">ImageMeow</td><td colspan="2">ImageSquawk</td><td colspan="2">ImageYellow</td></tr><tr><td>IPC</td><td>1</td><td>10</td><td>1</td><td>10</td><td>1</td><td>10</td><td>1</td><td>10</td><td>1</td><td>10</td><td>1</td><td>10</td></tr><tr><td>Ratio (%)</td><td>0.105</td><td>1.050</td><td>0.110</td><td>1.100</td><td>0.077</td><td>0.77</td><td>0.077</td><td>0.77</td><td>0.077</td><td>0.77</td><td>0.077</td><td>0.77</td></tr><tr><td>Random</td><td>23.5±4.8</td><td>47.7±2.4</td><td>14.2±0.9</td><td>27.0±1.9</td><td>13.2±0.8</td><td>21.4±1.2</td><td>13.8±0.6</td><td>29.0±1.1</td><td>21.8±0.5</td><td>40.2±0.4</td><td>20.4±0.6</td><td>37.4±0.5</td></tr><tr><td>MTT</td><td>47.7±0.9</td><td>63.0±1.3</td><td>28.6±0.8</td><td>35.8±1.8</td><td>26.6±0.8</td><td>40.3±1.3</td><td>30.7±1.6</td><td>40.4±2.2</td><td>39.4±1.5</td><td>52.3±1.0</td><td>45.2±0.8</td><td>60.0±1.5</td></tr><tr><td>DM</td><td>32.8±0.5</td><td>58.1±0.3</td><td>21.1±1.2</td><td>31.4±0.5</td><td>-</td><td>-</td><td>-</td><td>-</td><td>31.2±0.7</td><td>50.4±1.2</td><td>-</td><td>-</td></tr><tr><td>RDED</td><td>33.8±0.8</td><td>63.2±0.7</td><td>18.5±0.9</td><td>40.6±2.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>NCFM (Ours)</td><td>53.4±1.6</td><td>77.6±1.0</td><td>27.2±1.1</td><td>48.4±1.3</td><td>29.2±0.7</td><td>44.8±1.5</td><td>34.6±0.7</td><td>58.2±1.2</td><td>41.6±1.2</td><td>72.8±0.9</td><td>46.6±1.5</td><td>74.2±1.4</td></tr><tr><td>Whole Dataset</td><td colspan="2">87.4±1.0</td><td colspan="2">67.0±1.3</td><td colspan="2">63.9±2.0</td><td colspan="2">66.7±1.1</td><td colspan="2">87.5±0.3</td><td colspan="2">84.4±0.6</td></tr></table>
229
+
230
+ Table 3. Training speed (s/iter) and peak GPU memory (GB) comparison on a single NVIDIA A100 80G. OOM marks out-of-memory cases. 'Reduction' shows NCFM's speed and memory improvements over the best-performing baseline in the table.
231
+
232
+ <table><tr><td>Resource</td><td colspan="4">Speed (s/iter)</td><td colspan="4">GPU Memory (GB)</td></tr><tr><td>Dataset</td><td colspan="2">CIFAR-100</td><td colspan="2">Tiny ImageNet</td><td colspan="2">CIFAR-100</td><td colspan="2">Tiny ImageNet</td></tr><tr><td>IPC</td><td>10</td><td>50</td><td>10</td><td>50</td><td>10</td><td>50</td><td>10</td><td>50</td></tr><tr><td>MTT</td><td>1.92</td><td>OOM</td><td>OOM</td><td>OOM</td><td>61.6</td><td>OOM</td><td>OOM</td><td>OOM</td></tr><tr><td>FTD</td><td>1.68</td><td>OOM</td><td>OOM</td><td>OOM</td><td>61.4</td><td>OOM</td><td>OOM</td><td>OOM</td></tr><tr><td>TESLA</td><td>5.71</td><td>28.24</td><td>42.01</td><td>OOM</td><td>10.3</td><td>44.2</td><td>69.6</td><td>OOM</td></tr><tr><td>DATM</td><td>OOM</td><td>OOM</td><td>OOM</td><td>OOM</td><td>OOM</td><td>OOM</td><td>OOM</td><td>OOM</td></tr><tr><td>NCFM w/o ψ</td><td>0.73</td><td>0.96</td><td>2.40</td><td>5.67</td><td>1.4</td><td>1.9</td><td>6.4</td><td>8.4</td></tr><tr><td>Reduction</td><td>2.3×</td><td>29.4×</td><td>17.5×</td><td>-</td><td>7.4×</td><td>23.3×</td><td>10.9×</td><td>-</td></tr><tr><td>NCFM</td><td>1.33</td><td>1.36</td><td>3.27</td><td>7.22</td><td>1.6</td><td>2.0</td><td>6.5</td><td>8.7</td></tr><tr><td>Reduction</td><td>1.3×</td><td>20.8×</td><td>12.8×</td><td>-</td><td>6.4×</td><td>22.1×</td><td>10.7×</td><td>-</td></tr></table>
233
+
234
+ about 1.9GB GPU memory on CIFAR-100. This further demonstrates the exceptional scalability of our approach under high IPC conditions. Further results on CIFAR-10 are provided in the supplementary material.
235
+
236
+ Cross-Architecture Generalization. We evaluated the cross-architecture generalization capability of our method by testing its performance on various network architectures, including AlexNet [22], VGG-11 [42], and ResNet-18 [17]. In this evaluation, synthetic data were condensed using a 3-layer ConvNet, and each method was subsequently tested across different architectures to assess robustness and adaptability. Tables 4 summarize the results on CIFAR-10 with 10 and 50 IPC settings, respectively. In both cases, NCFM consistently outperformed other methods across all architectures, demonstrating its strong ability to generalize effectively even when trained on a different architecture. Results on other backbone networks beyond ConvNet are provided in the supplementary material.
237
+
238
+ # 5.3. Ablation Study
239
+
240
+ # 5.3.1. Effect of the Sampling Network
241
+
242
+ To rigorously evaluate the impact of the sampling network, $\psi$ , within the minmax paradigm of NCFM, we conducted performance comparisons with and without this component. To ensure a controlled and fair assessment, no additional data curation techniques were applied (such as fine-tuning or soft label integration). As shown in Table 5, employing the sampling network $\psi$ yields substantial improvements in
243
+
244
+ Table 4. Cross-architecture generalization performance $(\%)$ on CIFAR-10. The synthetic data is condensed using ConvNet, and each method is evaluated on different architectures.
245
+
246
+ <table><tr><td>IPC</td><td>Method</td><td>ConvNet</td><td>AlexNet</td><td>VGG</td><td>ResNet</td></tr><tr><td rowspan="4">10</td><td>DSA</td><td>52.1±0.4</td><td>35.9±1.3</td><td>43.2±0.5</td><td>35.9±1.3</td></tr><tr><td>MTT</td><td>64.3±0.7</td><td>34.2±2.6</td><td>50.3±0.8</td><td>34.2±2.6</td></tr><tr><td>KIP</td><td>47.6±0.9</td><td>24.4±3.9</td><td>42.1±0.4</td><td>24.4±3.9</td></tr><tr><td>NCFM</td><td>71.8±0.3</td><td>67.9±0.5</td><td>68.0±0.3</td><td>67.7±0.5</td></tr><tr><td rowspan="3">50</td><td>DSA</td><td>59.9±0.8</td><td>53.3±0.7</td><td>51.0±1.1</td><td>47.3±1.0</td></tr><tr><td>DM</td><td>65.2±0.4</td><td>61.3±0.6</td><td>59.9±0.8</td><td>57.0±0.9</td></tr><tr><td>NCFM</td><td>77.4±0.3</td><td>75.5±0.3</td><td>75.5±0.3</td><td>73.8±0.2</td></tr></table>
247
+
248
+ synthetic data quality across various datasets. For example, integrating $\psi$ into our method provides a $3.2\%$ performance increase on CIFAR-10 at 50 IPC. Our method yields a $2.6\%$ performance increase on Tiny ImageNet at 1 IPC and $10.1\%$ at 10 IPC. Similar trends are observed across ImageNet subsets, including gains of $2.8\%$ on ImageMeow and $2.0\%$ on ImageSquawk. The strong performance benefits from sampling network $\psi$ emphasize the effectiveness of the minmax paradigm compared to straightforward CFD minimization.
249
+
250
+ # 5.3.2. Impact of Amplitude and Phase Components
251
+
252
+ We examine individual contributions of amplitude and phase alignment within the NCFD measure. By selectively adjusting amplitude or phase alignment, controlled by the hyperparameter $\alpha$ that represents the ratio of amplitude to phase weight in the loss function, we find that both components are essential. To further evaluate the effect of $\alpha$ on performance, we conducted ablation studies on the CIFAR-10 and CIFAR-100 datasets. As noted in prior works [32, 35], the amplitude term primarily enhances the diversity of generated data, while the phase term contributes to realism by accurately capturing data centers. For example, as shown in Figure 5, on CIFAR-10 with 10 IPC, when the amplitude information dominates the loss (e.g., $\alpha = 0.999$ ), the test accuracy decreases about $3\%$ compared to our best results. Conversely, when the phase information dominates (e.g., $\alpha = 0.001$ ), the test accuracy decreases by about $1\%$ . Results demonstrate that a balanced integration of both components yields the highest accuracy.
253
+
254
+ Table 5. Test Performance (%) on CIFAR-10, CIFAR-100, Tiny ImageNet and ImageNet subsets with and without the sampling network $\psi$ . We find that sampling network $\psi$ significantly improves performance, even without additional data curation steps.
255
+
256
+ <table><tr><td>Dataset</td><td colspan="2">CIFAR-10</td><td colspan="2">CIFAR-100</td><td colspan="3">Tiny ImageNet</td><td>ImageFruit</td><td>ImageMeow</td><td>ImageSquawk</td><td>ImageYellow</td></tr><tr><td>IPC</td><td>10</td><td>50</td><td>10</td><td>50</td><td>1</td><td>10</td><td>50</td><td>10</td><td>10</td><td>10</td><td>10</td></tr><tr><td>NCFM w/o ψ</td><td>65.6</td><td>74.2</td><td>45.9</td><td>53.7</td><td>9.4</td><td>14.2</td><td>22.0</td><td>39.6</td><td>51.6</td><td>68.8</td><td>67.6</td></tr><tr><td>NCFM</td><td>68.9</td><td>77.4</td><td>48.7</td><td>54.4</td><td>12.0</td><td>24.3</td><td>26.5</td><td>41.4</td><td>54.4</td><td>70.8</td><td>69.2</td></tr></table>
257
+
258
+ ![](images/5179072a2d0bae3429db742d4c7dc1afa0e10403a7aee3f4cfd9808b1903bc5c.jpg)
259
+ Figure 5. Impact of amplitude and phase components in the NCFD measure across various datasets and IPC settings. The figure illustrates the relationship between the amplitude-to-phase ratio $\alpha$ in Eq. (8). Results indicate that balancing amplitude (for diversity) and phase (for realism) information leads to improved performance. Baseline results were obtained using DM [55].
260
+
261
+ # 5.3.3. Effect of the Number of Sampled Frequency Arguments in NCFD
262
+
263
+ To assess the impact of the number of sampled frequency arguments, $t$ , generated by the sampling network $\psi$ , we varied the sample count and measured the corresponding performance. As illustrated in Figure 6, increasing the number of sampled arguments initially enhances the quality of synthetic data by facilitating finer distributional alignment. For example, accuracy on CIFAR-10 at 10 IPC improves from $62\%$ with 16 sampled frequency arguments to approximately $67\%$ with 1024, indicating a positive correlation between the sampled number and accuracy. However, beyond 1024 arguments, performance gains plateau, with accuracy stabilizing around $67 - 68\%$ even as the sampling number increases to 4096. This trend suggests that a moderate number achieves an optimal balance between computational efficiency and accuracy. We observed that additional cost remains minimal as the number of sampled arguments increases, underscoring NCFM's ability to produce high-quality synthetic data with low computational cost.
264
+
265
+ ![](images/b06a52c4e0156d036e365301165ae4f543fe9c2bd6a7902212815545e1f86563.jpg)
266
+ Figure 6. Impact of sampled frequency count in NCFD on accuracy across datasets and IPC. Increasing frequencies improves accuracy up to a threshold, beyond which gains diminish.
267
+
268
+ # 6. Discussion
269
+
270
+ # 6.1. Training stability of NCFD
271
+
272
+ The training stability of our minmax paradigm is crucial to its effectiveness. Unlike traditional discrepancy measures, NCFM operates within the complex plane to conduct minmax optimization. While instability is a common issue in minmax adversarial optimization, as seen in generative adversarial networks [2, 37, 39], NCFM consistently maintains stable optimization throughout training, as illustrated in Figure 7. This stability is further supported by theoretical guarantees of weak convergence in Theorem 1, demonstrating the robustness of the CF-based discrepancy under diverse conditions and contributing to NCFM's reliable convergence across datasets.
273
+
274
+ ![](images/31d6ecc48d88667965cb2632861025d2a6397601fdba5f9e9c483e36bc71c2e4.jpg)
275
+ Figure 7. Training dynamics of the minmax optimization process across different datasets and various IPC settings.
276
+
277
+ # 6.2. Correlation between CFD and MMD
278
+
279
+ To better understand NCFM, we examine the relationship between the Characteristic Function Discrepancy (CFD) and Maximum Mean Discrepancy (MMD).
280
+
281
+ CF as Well- behaved Kernels in the MMD Metric. The CF discrepancy term $\int_t\sqrt{\mathrm{Chf}(t;f)} dF_{\mathcal{T}}(t)$ in our loss can be viewed as a well-behaved kernel in MMD, specifically as a Characteristic Kernel [43]. Unlike MMD, which relies on fixed kernels, NCFM adaptively learns $F_{\mathcal{T}}(t)$ , enabling flexible kernel selection for optimal distribution alignment. Furthermore, mixtures of Gaussian distributions within the CF framework produce well-defined characteristic kernels. When MMD employs a characteristic kernel of the form $\int_t e^{-j\langle t,x - \tilde{x}\rangle}dF_{\mathcal{T}}(t)$ , it aligns with the structure of CFD, demonstrating that MMD is a special case of CFD when only specific moments are matched. This insight also explains the minimal memory overhead observed as IPC grows, highlighting the efficiency of our approach.
282
+
283
+ Computational Advantage of CFD over MMD. In contrast to MMD, which requires quadratic time in the number
284
+
285
+ of samples for approximate computation, CFD operates in linear time relative to the sampling number of frequency arguments, which aligns results in [1]. This efficiency makes CFD substantially faster and more scalable than MMD, offering a particular advantage for large-scale datasets.
286
+
287
+ # 7. Conclusion
288
+
289
+ In this work, we redefined distribution matching for dataset distillation as a minmax optimization problem and introduced Neural Characteristic Function Discrepancy (NCFD), a novel and theoretically grounded metric designed to maximize the separability between real and synthetic data. Leveraging the Characteristic Function (CF), our method dynamically adjusts NCFD to align both phase and amplitude information in the complex plane, achieving a balance between realism and diversity. Extensive experiments demonstrated the computational efficiency of our approach, which achieves state-of-the-art performance with minimal computational overhead, showcasing its scalability and practicality for large-scale applications.
290
+
291
+ # References
292
+
293
+ [1] Abdul Fatir Ansari, Jonathan Scarlett, and Harold Soh. A characteristic function approach to deep implicit generative modeling. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 7478-7487, 2020. 3, 5, 9
294
+ [2] Martin Arjovsky and Léon Bottou. Towards principled methods for training generative adversarial networks. arXiv preprint arXiv:1701.04862, 2017. 8
295
+ [3] Patrick Billingsley. Probability and measure. John Wiley & Sons, 2017. 2, 3
296
+ [4] Mikołaj Binkowski, Danica J Sutherland, Michael Arbel, and Arthur Gretton. Demystifying mmd gans. arXiv preprint arXiv:1801.01401, 2018. 2
297
+ [5] Torben Maack Bisgaard and Zoltán Sasvari. Characteristic functions and moment sequences: positive definiteness in probability. Nova Publishers, 2000. 2, 3
298
+ [6] George Cazenavette, Tongzhou Wang, Antonio Torralba, Alexei A Efros, and Jun-Yan Zhu. Dataset distillation by matching training trajectories. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4750-4759, 2022. 1, 2, 3, 5, 6
299
+ [7] Dingfan Chen, Raouf Kerkouche, and Mario Fritz. Private set generation with discriminative information. Advances in Neural Information Processing Systems, 35:14678-14690, 2022. 1
300
+ [8] Ming-Yu Chung, Sheng-Yen Chou, Chia-Mu Yu, Pin-Yu Chen, Sy-Yen Kuo, and Tsung-Yi Ho. Rethinking backdoor attacks on dataset distillation: A kernel method perspective. arXiv preprint arXiv:2311.16646, 2023. 1
301
+ [9] Justin Cui, Ruochen Wang, Si Si, and Cho-Jui Hsieh. Scaling up dataset distillation to imagenet-1k with constant memory. In International Conference on Machine Learning, pages 6565–6590. PMLR, 2023. 3, 5, 6
302
+ [10] Wenxiao Deng, Wenbin Li, Tianyu Ding, Lei Wang, Hongguang Zhang, Kuihua Huang, Jing Huo, and Yang Gao. Exploiting inter-sample and inter-feature relations in dataset distillation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 17057-17066, 2024. 2, 3, 5
303
+ [11] Tian Dong, Bo Zhao, and Lingjuan Lyu. Privacy for free: How does dataset condensation help privacy? In International Conference on Machine Learning, pages 5378-5396. PMLR, 2022. 1
304
+ [12] Jiawei Du, Yidi Jiang, Vincent YF Tan, Joel Tianyi Zhou, and Haizhou Li. Minimizing the accumulated trajectory error to improve dataset distillation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3749-3758, 2023. 3, 5, 6
305
+ [13] Leonard Euler. On transcending quantities arising from the circle. Chapter, 8, 1748. 2
306
+ [14] Andrey Feuerverger and Roman A Mureika. The empirical characteristic function and its applications. The annals of Statistics, pages 88-97, 1977. 2, 4
307
+ [15] Jianyang Gu, Kai Wang, Wei Jiang, and Yang You. Summarizing stream data for memory-constrained online continual
308
+
309
+ learning. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 12217-12225, 2024. 1
310
+ [16] Ziyao Guo, Kai Wang, George Cazenavette, Hui Li, Kaipeng Zhang, and Yang You. Towards lossless dataset distillation via difficulty-aligned trajectory matching. arXiv preprint arXiv:2310.05773, 2023. 3, 5, 6
311
+ [17] Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 770-778, 2016. 7
312
+ [18] Jeremy Howard and Sylvain Gugger. Fastai: a layered api for deep learning. Information, 11(2):108, 2020. 6
313
+ [19] Seoungyoon Kang, Youngsun Lim, and Hyunjung Shim. Label-augmented dataset distillation. arXiv preprint arXiv:2409.16239, 2024. 5
314
+ [20] Jang-Hyun Kim, Jinuk Kim, Seong Joon Oh, Sangdoo Yun, Hwanjun Song, Joonhyun Jeong, Jung-Woo Ha, and Hyun Oh Song. Dataset condensation via efficient synthetic-data parameterization. In International Conference on Machine Learning, pages 11102-11118. PMLR, 2022. 2, 3, 6
315
+ [21] Stephen M Kogon and Douglas B Williams. Characteristic function based estimation of stable distribution parameters. A practical guide to heavy tails: statistical techniques and applications, pages 311-338, 1998. 2
316
+ [22] Alex Krizhevsky, Geoffrey Hinton, et al. Learning multiple layers of features from tiny images. 2009. 5, 7
317
+ [23] Ya Le and Xuan Yang. Tiny imagenet visual recognition challenge. CS 231N, 7(7):3, 2015. 5
318
+ [24] Saehyung Lee, Sanghyuk Chun, Sangwon Jung, Sangdoo Yun, and Sungroh Yoon. Dataset condensation with contrastive signals. In International Conference on Machine Learning, pages 12352-12364. PMLR, 2022. 2, 3, 5
319
+ [25] Chun-Liang Li, Wei-Cheng Chang, Yu Cheng, Yiming Yang, and Barnabás Póczos. Mmd gan: Towards deeper understanding of moment matching network. Advances in neural information processing systems, 30, 2017. 2
320
+ [26] Hongcheng Li, Yucan Zhou, Xiaoyan Gu, Bo Li, and Weiping Wang. Diversified semantic distribution matching for dataset distillation. In ACM Multimedia 2024, 2024. 5
321
+ [27] Shengxi Li, Zeyang Yu, Min Xiang, and Danilo Mandic. Reciprocal adversarial learning via characteristic functions. Advances in Neural Information Processing Systems, 33:217-228, 2020. 3, 5
322
+ [28] Shengxi Li, Jialu Zhang, Yifei Li, Mai Xu, Xin Deng, and Li Li. Neural characteristic function learning for conditional image generation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7204-7214, 2023. 5
323
+ [29] Zhe Li and Bernhard Kainz. Image distillation for safe data sharing in histopathology. In International Conference on Medical Image Computing and Computer-Assisted Intervention, pages 459-469. Springer, 2024. 1
324
+ [30] Dai Liu, Jindong Gu, Hu Cao, Carsten Trinitis, and Martin Schulz. Dataset distillation by automatic training trajectories. arXiv preprint arXiv:2407.14245, 2024. 5
325
+ [31] Paul Lévy. Théorie de l'addition des variables algatoires. Gauthier-Villars, Paris, 1937. 2, 4, 5
326
+
327
+ [32] Danilo P Mandic and Anthony G Constantinides. Complex valued nonlinear adaptive filters: state of the art. Signal Processing, 89(9):1704-1725, 2009. 5, 7
328
+ [33] Dmitry Medvedev and Alexander D'yakonov. Learning to generate synthetic training data using gradient matching and implicit differentiation. In International Conference on Analysis of Images, Social Networks and Texts, pages 138-150. Springer, 2021. 1
329
+ [34] Timothy Nguyen, Zhourong Chen, and Jaehoon Lee. Dataset meta-learning from kernel ridge-regression. arXiv preprint arXiv:2011.00050, 2020. 5
330
+ [35] Alan V Oppenheim and Jae S Lim. The importance of phase in signals. Proceedings of the IEEE, 69(5):529-541, 1981. 5, 7
331
+ [36] Tian Qin, Zhiwei Deng, and David Alvarez-Melis. A label is worth a thousand images in dataset distillation. arXiv preprint arXiv:2406.10485, 2024. 5
332
+ [37] Alec Radford. Unsupervised representation learning with deep convolutional generative adversarial networks. arXiv preprint arXiv:1511.06434, 2015. 8
333
+ [38] Ahmad Sajedi, Samir Khaki, Ehsan Amjadian, Lucy Z Liu, Yuri A Lawryshyn, and Konstantinos N Plataniotis. Datadam: Efficient dataset distillation with attention matching. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17097-17107, 2023. 2, 3
334
+ [39] Tim Salimans, Ian Goodfellow, Wojciech Zaremba, Vicki Cheung, Alec Radford, and Xi Chen. Improved techniques for training gans. Advances in neural information processing systems, 29, 2016. 8
335
+ [40] Shitong Shao, Zeyuan Yin, Muxin Zhou, Xindong Zhang, and Zhiqiang Shen. Generalized large-scale data condensation via various backbone and statistical matching. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 16709-16718, 2024. 5
336
+ [41] Neil G Shephard. From characteristic function to distribution function: a simple framework for the theory. Econometric theory, 7(4):519-529, 1991. 2
337
+ [42] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 7
338
+ [43] Bharath K Striperumbudur, Arthur Gretton, Kenji Fukumizu, Bernhard Schölkopf, and Gert RG Lanckriet. Hilbert space embeddings and metrics on probability measures. The Journal of Machine Learning Research, 11:1517-1561, 2010. 8
339
+ [44] Felipe Petroski Such, Aditya Rawal, Joel Lehman, Kenneth Stanley, and Jeffrey Clune. Generative teaching networks: Accelerating neural architecture search by learning to generate synthetic training data. In International Conference on Machine Learning, pages 9206-9216. PMLR, 2020. 1
340
+ [45] Peng Sun, Bei Shi, Daiwei Yu, and Tao Lin. On the diversity and realism of distilled dataset: An efficient dataset distillation paradigm. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9390-9399, 2024. 5, 6
341
+ [46] Mariya Toneva, Alessandro Sordoni, Remi Tachet des Combes, Adam Trischler, Yoshua Bengio, and Geoffrey J Gordon. An empirical study of example forget-
342
+
343
+ ting during deep neural network learning. arXiv preprint arXiv:1812.05159, 2018. 5
344
+ [47] Kai Wang, Bo Zhao, Xiangyu Peng, Zheng Zhu, Shuo Yang, Shuo Wang, Guan Huang, Hakan Bilen, Xinchao Wang, and Yang You. Cafe: Learning to condense dataset by aligning features. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12196-12205, 2022. 1, 2, 3, 5, 6
345
+ [48] Shaobo Wang, Yantai Yang, Shuaiyu Zhang, Chenghao Sun, Weiya Li, Xuming Hu, and Linfeng Zhang. Drupi: Dataset reduction using privileged information, 2024. 6
346
+ [49] Tongzhou Wang, Jun-Yan Zhu, Antonio Torralba, and Alexei A Efros. Dataset distillation. arXiv preprint arXiv:1811.10959, 2018. 1, 3
347
+ [50] Max Welling. Herding dynamical weights to learn. In Proceedings of the 26th annual international conference on machine learning, pages 1121-1128, 2009. 5
348
+ [51] Enneng Yang, Li Shen, Zhenyi Wang, Tongliang Liu, and Guibing Guo. An efficient dataset condensation plugin and its application to continual learning. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 1
349
+ [52] Zeyuan Yin, Eric Xing, and Zhiqiang Shen. Squeeze, recover and relabel: Dataset condensation at imagenet scale from a new perspective. In Thirty-seventh Conference on Neural Information Processing Systems, 2023. 1
350
+ [53] Hansong Zhang, Shikun Li, Pengju Wang, Dan Zeng, and Shiming Ge. M3d: Dataset condensation by minimizing maximum mean discrepancy. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 9314-9322, 2024. 2, 3, 5
351
+ [54] Bo Zhao and Hakan Bilen. Dataset condensation with differentiable siamese augmentation. In International Conference on Machine Learning, pages 12674-12685. PMLR, 2021. 2, 3, 5, 6
352
+ [55] Bo Zhao and Hakan Bilen. Dataset condensation with distribution matching, 2022. 1, 2, 3, 5, 6, 8
353
+ [56] Bo Zhao, Konda Reddy Mopuri, and Hakan Bilen. Dataset condensation with gradient matching. arXiv preprint arXiv:2006.05929, 2020. 1, 2, 3, 5
354
+ [57] Ganlong Zhao, Guanbin Li, Yipeng Qin, and Yizhou Yu. Improved distribution matching for dataset condensation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7856-7865, 2023. 2, 3, 5, 6
355
+ [58] Yongchao Zhou, Ehsan Nezhadarya, and Jimmy Ba. Dataset distillation using neural feature regression. Advances in Neural Information Processing Systems, 35:9813-9827, 2022. 5
data/2025/2502_20xxx/2502.20653/images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13605521272412773c87860b945bb551e747d479b10874e58ad7448558dca144
3
+ size 637961
data/2025/2502_20xxx/2502.20653/layout.json ADDED
The diff for this file is too large to render. See raw diff
 
data/2025/2502_20xxx/2502.20694/c26d785e-046d-463b-a119-b1927240da07_content_list.json ADDED
The diff for this file is too large to render. See raw diff