MinerU Batch 073eeaf6-c1b9-4559-bcfe-006e52e195f5 (Part 8/8)
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +7 -0
- data/2025/2503_15xxx/2503.15082/9abe67bb-bb04-4404-ba77-d9bdbc419145_content_list.json +1413 -0
- data/2025/2503_15xxx/2503.15082/9abe67bb-bb04-4404-ba77-d9bdbc419145_model.json +2066 -0
- data/2025/2503_15xxx/2503.15082/9abe67bb-bb04-4404-ba77-d9bdbc419145_origin.pdf +3 -0
- data/2025/2503_15xxx/2503.15082/full.md +313 -0
- data/2025/2503_15xxx/2503.15082/images/034446eb7696b2e53c824ed40d612d5fd9f889891278e7d15e33240460801807.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/1bcdd6f1c5caab9505459eea437df23ddfe58f0cd03e7c863a2eff6915ba2be4.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/1e898ae805a8a226adc31b09bee1e5ca4e2a29f36e3598c9b0229868b39764ee.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/25f482130f2c8371bda6cb44d6e4e2a848320aa4731234c20e36ecf1bef0305f.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/4686b8efb67004974db08c666935a7676e2f443e5fc1473c5a2d89a678c4e588.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/4cc5eb1cce99ae67f5f9a804b4c2739c3f4c4025dd55e5e65924dd9eeaad7538.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/4ed8f8007c780019399529e52b6e64219119e91eadccfb42285262a25208d118.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/72638f03e41d10c1d413c9874ca8552ffafaa066ff39616757386667a4881f99.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/7d7026594acb0742b1ffa973d55c714cfa3dec4f728af15e9a20e8b4dd966dfe.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/85accef67f72eee92b8fba3e755403b019656541d1780e6b00386a41b2c1da7f.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/8bbd19b31033f6015bc0594ca6a2c9b6e5d63779241e9e6e37ab39029cc089a5.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/a0c9846395ade7aa4b1d61bb3f7ceefc9d52030df7f34db0473afa4ebf216844.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/b905dabcb35401d01b56c89866cc447aaa62874bb982626732c20633b9ed297b.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/c1470d9edbf54fb74775c976df1b6ea687deb49fcd2c2616260ce88d4345518e.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/c61b7da2cee45634b4ff5dd3d7fd6886b7d93c4b4aec7f8bffc6c4632ae6a1bb.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/d194bc021bfa502c7dd9f4c140a9ffac506cc7be455a393e5676ed244cc02bff.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/d8fea24b57b2dc152b0aa7dc359c61b2923700b72c93c010c21a5c19b7e4c718.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/db8b242c0da8d5188232d469894c5fe9217429a79f81fb65c12b2ed292a464fb.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/images/e6b29beb53fb9dc5b615d4cdd95736227312ddb0e4724131a2b7c936ff3c501e.jpg +3 -0
- data/2025/2503_15xxx/2503.15082/layout.json +0 -0
- data/2025/2503_15xxx/2503.15092/b6252e1c-d150-4802-b7b5-057b9326a285_content_list.json +1531 -0
- data/2025/2503_15xxx/2503.15092/b6252e1c-d150-4802-b7b5-057b9326a285_model.json +2483 -0
- data/2025/2503_15xxx/2503.15092/b6252e1c-d150-4802-b7b5-057b9326a285_origin.pdf +3 -0
- data/2025/2503_15xxx/2503.15092/full.md +310 -0
- data/2025/2503_15xxx/2503.15092/images/173a0d78d14e3aa2faf5434a8830a9886cef9244b14e55c74dc450f4f1636272.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/198b58be1862bcfb3495e44f7bb579c2d2af776f3ad9305c8508d0395bdb788b.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/1c907e28c4b515d3fccf907639ba08a265e4dfe34cddd60a42a2a9a5106ea664.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/4cbff7188997f009a05cf78e1045fbeb940630c3a5c7ff1ea534811af17d776f.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/55718dc235b9bd54c5c9e8a17fe739ef79434a16dafecbdb2b0136bbe97abd0e.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/55fb783ad2adb41d045a59e457336fd067be2ed921e7ca12ee92923660b048e9.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/6eeceff7b55665b9a7cb6ea5eb9f80d5f53585c8e73efe5539f6463a829b3033.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/837ed3e60e92e435fa22125a0e857a27ca8d2ef7334d32935e15ad44d80afe12.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/8c9427b38fe3eff759fd376ff8eb5ce23dbd2d1be9563f0bfa486e6f81ccecff.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/933cc611cdab1edb70abc13d50aab004973186a8e50868f4c6c8e3d0fa4f5abc.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/a2ecbfa253705fc233420bf98d4dc6351aae78177711b77a89a38e742092e986.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/a78d63f9e5b7d49a1007549d1d5d173834b98e64c8e07b60ae8437b2f916d5d9.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/ac507e5edeaf220188c651a28b476844d5481b2e39e5238162c9a7b03a0b0748.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/add00a1381f4e6122096d019a46e1343259d3592eb2c418cdda9920f049b6fd0.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/c1af023a4b2ff28b865e6453fe7dc2082a5247f5548613599388e120b3f5db11.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/ca379e4a1dd72dd16df16517c4b97455853953fe7dd802ddbe7a1f99a90efa9e.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/d040f93e4de3859776d94935d440ee590d69054397f1a469d78e119bb18ca77b.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/d72847520f3e3ccf3b6d7aa79e8685b193af07feab1c019da7545874439a6159.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/images/fd5c8c0b62ddcf673c09ec84587e595e367ab76774b9a793b6a923ff52abd051.jpg +3 -0
- data/2025/2503_15xxx/2503.15092/layout.json +0 -0
- data/2025/2503_15xxx/2503.15112/625c34d4-2bff-40eb-b17b-655c01ac6ef3_content_list.json +1744 -0
.gitattributes
CHANGED
|
@@ -1601,3 +1601,10 @@ data/2025/2503_15xxx/2503.15451/c93caa14-654f-4530-8f47-e3e29bf7e12d_origin.pdf
|
|
| 1601 |
data/2025/2503_15xxx/2503.15463/b29f82d2-542a-42d6-a09d-1f2523a8d872_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1602 |
data/2025/2503_15xxx/2503.15478/b679118a-b2b3-4410-b092-174c01501501_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1603 |
data/2025/2503_17xxx/2503.17400/64a7a26c-e0b0-4b9f-9967-ccb2355bbef7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1601 |
data/2025/2503_15xxx/2503.15463/b29f82d2-542a-42d6-a09d-1f2523a8d872_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1602 |
data/2025/2503_15xxx/2503.15478/b679118a-b2b3-4410-b092-174c01501501_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1603 |
data/2025/2503_17xxx/2503.17400/64a7a26c-e0b0-4b9f-9967-ccb2355bbef7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1604 |
+
data/2025/2503_15xxx/2503.15082/9abe67bb-bb04-4404-ba77-d9bdbc419145_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1605 |
+
data/2025/2503_15xxx/2503.15092/b6252e1c-d150-4802-b7b5-057b9326a285_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1606 |
+
data/2025/2503_15xxx/2503.15112/625c34d4-2bff-40eb-b17b-655c01ac6ef3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1607 |
+
data/2025/2503_15xxx/2503.15129/02b2079d-486d-4e14-a020-9f768a6f1b0d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1608 |
+
data/2025/2503_15xxx/2503.15208/aa0707bd-9293-47cf-a7d6-46417b45c2f3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1609 |
+
data/2025/2503_15xxx/2503.15265/fd00c27e-5d96-45b3-aa94-37d51c89e263_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1610 |
+
data/2025/2504_02xxx/2504.02843/cb3ac9b1-afb1-4f2e-b1b1-2b2561a8cfa5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
data/2025/2503_15xxx/2503.15082/9abe67bb-bb04-4404-ba77-d9bdbc419145_content_list.json
ADDED
|
@@ -0,0 +1,1413 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "StyleLoco: Generative Adversarial Distillation for Natural Humanoid Robot Locomotion",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
218,
|
| 8 |
+
93,
|
| 9 |
+
779,
|
| 10 |
+
138
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Le Ma $^{1*}$ , Ziyu Meng $^{1,2*}$ , Tengyu Liu $^{1}$ , Yuhan Li $^{1,3}$ , Ran Song $^{2}$ , Wei Zhang $^{2}$ , Siyuan Huang $^{1, \\boxtimes}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
145,
|
| 19 |
+
176,
|
| 20 |
+
867,
|
| 21 |
+
195
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "$^{1}$ National Key Laboratory of General Artificial Intelligence, BIGAI $^{2}$ School of Control Science and Engineering, Shandong University",
|
| 28 |
+
"bbox": [
|
| 29 |
+
84,
|
| 30 |
+
198,
|
| 31 |
+
928,
|
| 32 |
+
213
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "<sup>3</sup> Huazhong University of Science and Technology *Equal contributors huangsiyuan@bigai.ai",
|
| 39 |
+
"bbox": [
|
| 40 |
+
174,
|
| 41 |
+
213,
|
| 42 |
+
834,
|
| 43 |
+
227
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "https://styleloco.github.io/",
|
| 50 |
+
"bbox": [
|
| 51 |
+
424,
|
| 52 |
+
234,
|
| 53 |
+
583,
|
| 54 |
+
247
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "Abstract—Humanoid robots are anticipated to acquire a wide range of locomotion capabilities while ensuring natural movement across varying speeds and terrains. Existing methods encounter a fundamental dilemma in learning humanoid locomotion: reinforcement learning with handcrafted rewards can achieve agile locomotion but produces unnatural gaits, while Generative Adversarial Imitation Learning (GAIL) with motion capture data yields natural movements but suffers from unstable training processes and restricted agility. Integrating these approaches proves challenging due to the inherent heterogeneity between expert policies and human motion datasets. To address this, we introduce StyleLoco, a novel two-stage framework that bridges this gap through a Generative Adversarial Distillation (GAD) process. Our framework begins by training a teacher policy using reinforcement learning to achieve agile and dynamic locomotion. It then employs a multi-discriminator architecture, where distinct discriminators concurrently extract skills from both the teacher policy and motion capture data. This approach effectively combines the agility of reinforcement learning with the natural fluidity of human-like movements while mitigating the instability issues commonly associated with adversarial training. Through extensive simulation and real-world experiments, we demonstrate that StyleLoco enables humanoid robots to perform diverse locomotion tasks with the precision of expertly trained policies and the natural aesthetics of human motion, successfully transferring styles across different movement types while maintaining stable locomotion across a broad spectrum of command inputs.",
|
| 61 |
+
"bbox": [
|
| 62 |
+
84,
|
| 63 |
+
268,
|
| 64 |
+
488,
|
| 65 |
+
621
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "I. INTRODUCTION",
|
| 72 |
+
"text_level": 1,
|
| 73 |
+
"bbox": [
|
| 74 |
+
209,
|
| 75 |
+
636,
|
| 76 |
+
362,
|
| 77 |
+
648
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "text",
|
| 83 |
+
"text": "Natural and agile locomotion in humanoid robots represents a fundamental challenge in robotics, with far-reaching implications for human-robot interaction, disaster response, and industrial applications. While humanoid robots offer unprecedented potential for operating in human-centric environments, achieving human-like movement patterns remains difficult due to their high degrees of freedom and inherently unstable dynamics[1]. This challenge is further complicated by the fundamental trade-off between achieving precise control and maintaining natural motion qualities.",
|
| 84 |
+
"bbox": [
|
| 85 |
+
81,
|
| 86 |
+
657,
|
| 87 |
+
488,
|
| 88 |
+
809
|
| 89 |
+
],
|
| 90 |
+
"page_idx": 0
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"type": "text",
|
| 94 |
+
"text": "Reinforcement learning (RL) has emerged as a powerful approach for developing locomotion controllers, enabling robots to master complex movements through carefully designed reward functions. These methods often employ a two-stage learning process: first training a teacher policy that relies on privileged information (such as global positions and ground truth environmental parameters) unavailable in real-world settings, then distilling this knowledge into a student",
|
| 95 |
+
"bbox": [
|
| 96 |
+
81,
|
| 97 |
+
810,
|
| 98 |
+
488,
|
| 99 |
+
931
|
| 100 |
+
],
|
| 101 |
+
"page_idx": 0
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"type": "image",
|
| 105 |
+
"img_path": "images/72638f03e41d10c1d413c9874ca8552ffafaa066ff39616757386667a4881f99.jpg",
|
| 106 |
+
"image_caption": [
|
| 107 |
+
"Fig. 1. Gait pattern transitions during forward velocity $(v_{x})$ acceleration from $0.7\\mathrm{m / s}$ to $1.8\\mathrm{m / s}$"
|
| 108 |
+
],
|
| 109 |
+
"image_footnote": [],
|
| 110 |
+
"bbox": [
|
| 111 |
+
509,
|
| 112 |
+
262,
|
| 113 |
+
911,
|
| 114 |
+
441
|
| 115 |
+
],
|
| 116 |
+
"page_idx": 0
|
| 117 |
+
},
|
| 118 |
+
{
|
| 119 |
+
"type": "text",
|
| 120 |
+
"text": "policy that operates solely on realistic sensor observations. While this approach has demonstrated impressive results in terms of agility and precision, it faces two key limitations. First, the reliance on handcrafted rewards requires extensive tuning to accommodate different gaits, stride lengths, and motion parameters across varying speeds. Second, these methods often result in rigid, mechanical movements that lack the fluidity and naturalness characteristic of human motion, limiting their effectiveness in human-centric environments.",
|
| 121 |
+
"bbox": [
|
| 122 |
+
504,
|
| 123 |
+
506,
|
| 124 |
+
911,
|
| 125 |
+
655
|
| 126 |
+
],
|
| 127 |
+
"page_idx": 0
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"type": "text",
|
| 131 |
+
"text": "Recent advances in generative adversarial imitation learning, particularly approaches like Adversarial Motion Prior (AMP) [2], have opened new possibilities for achieving more natural robot movements by leveraging large-scale motion capture datasets such as LaFAN1 [3] and AMASS [4]. These methods employ adversarial training to ensure that robot movements closely match the statistical patterns present in human demonstrations [5]. However, their performance is fundamentally limited by the content and quality of the reference motion data. For instance, learning running behaviors becomes impossible with a dataset containing only walking motions, and acquiring diverse specialized skills often requires expensive motion capture sessions. Furthermore, these methods struggle when motion datasets lack diversity or when retargeting processes introduce artifacts, resulting in brittle behaviors that fail to generalize beyond demonstrated movements.",
|
| 132 |
+
"bbox": [
|
| 133 |
+
504,
|
| 134 |
+
657,
|
| 135 |
+
913,
|
| 136 |
+
912
|
| 137 |
+
],
|
| 138 |
+
"page_idx": 0
|
| 139 |
+
},
|
| 140 |
+
{
|
| 141 |
+
"type": "text",
|
| 142 |
+
"text": "The limitations of both approaches highlight a critical gap",
|
| 143 |
+
"bbox": [
|
| 144 |
+
522,
|
| 145 |
+
916,
|
| 146 |
+
911,
|
| 147 |
+
931
|
| 148 |
+
],
|
| 149 |
+
"page_idx": 0
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"type": "aside_text",
|
| 153 |
+
"text": "arXiv:2503.15082v1 [cs.RO] 19 Mar 2025",
|
| 154 |
+
"bbox": [
|
| 155 |
+
22,
|
| 156 |
+
260,
|
| 157 |
+
57,
|
| 158 |
+
705
|
| 159 |
+
],
|
| 160 |
+
"page_idx": 0
|
| 161 |
+
},
|
| 162 |
+
{
|
| 163 |
+
"type": "text",
|
| 164 |
+
"text": "in humanoid locomotion: the need to combine the precision and adaptability of RL-based controllers with the natural movement qualities captured in human demonstrations. While RL methods can learn complex skills beyond available motion capture data, they struggle with natural movement generation. Conversely, demonstration-based methods excel at producing natural movements but are constrained by the available motion capture data. This complementary nature suggests the potential for combining both approaches, yet traditional methods struggle to bridge this gap due to the fundamental heterogeneity between expert policies trained with handcrafted rewards and the statistical patterns present in human motion datasets.",
|
| 165 |
+
"bbox": [
|
| 166 |
+
86,
|
| 167 |
+
71,
|
| 168 |
+
486,
|
| 169 |
+
265
|
| 170 |
+
],
|
| 171 |
+
"page_idx": 1
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"type": "text",
|
| 175 |
+
"text": "We address these challenges with StyleLoco, introducing a novel Generative Adversarial Distillation (GAD) framework that effectively combines knowledge from heterogeneous sources. Our approach employs a multi-discriminator architecture where separate discriminators simultaneously distill skills from both an RL-trained expert policy and motion capture demonstrations. This design allows the model to preserve the agility and precision of RL while incorporating the natural style of human movements, enabling natural skill execution even for behaviors not present in the motion capture data. Through extensive evaluations in both simulated and real-world environments, we demonstrate that StyleLoco enables humanoid robots to achieve superior locomotion performance compared to traditional approaches while maintaining natural, human-like movement qualities.",
|
| 176 |
+
"bbox": [
|
| 177 |
+
86,
|
| 178 |
+
267,
|
| 179 |
+
486,
|
| 180 |
+
493
|
| 181 |
+
],
|
| 182 |
+
"page_idx": 1
|
| 183 |
+
},
|
| 184 |
+
{
|
| 185 |
+
"type": "text",
|
| 186 |
+
"text": "The key contribution of our work is three-fold.",
|
| 187 |
+
"bbox": [
|
| 188 |
+
102,
|
| 189 |
+
494,
|
| 190 |
+
419,
|
| 191 |
+
508
|
| 192 |
+
],
|
| 193 |
+
"page_idx": 1
|
| 194 |
+
},
|
| 195 |
+
{
|
| 196 |
+
"type": "list",
|
| 197 |
+
"sub_type": "text",
|
| 198 |
+
"list_items": [
|
| 199 |
+
"- A novel GAD framework that enables stable policy distillation from heterogeneous sources, effectively bridging the gap between RL and demonstration-based approaches.",
|
| 200 |
+
"- A multi-discriminator architecture that successfully combines task-oriented control objectives with natural motion patterns, achieving both high performance and human-like movement qualities.",
|
| 201 |
+
"- Comprehensive validation through real-world deployment on the Unitree H1 humanoid robot, demonstrating robust and natural motion across diverse locomotion tasks and speeds."
|
| 202 |
+
],
|
| 203 |
+
"bbox": [
|
| 204 |
+
102,
|
| 205 |
+
513,
|
| 206 |
+
486,
|
| 207 |
+
693
|
| 208 |
+
],
|
| 209 |
+
"page_idx": 1
|
| 210 |
+
},
|
| 211 |
+
{
|
| 212 |
+
"type": "text",
|
| 213 |
+
"text": "II. RELATED WORKS",
|
| 214 |
+
"text_level": 1,
|
| 215 |
+
"bbox": [
|
| 216 |
+
200,
|
| 217 |
+
708,
|
| 218 |
+
370,
|
| 219 |
+
720
|
| 220 |
+
],
|
| 221 |
+
"page_idx": 1
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"type": "text",
|
| 225 |
+
"text": "A. Humanoid Robot Locomotion",
|
| 226 |
+
"text_level": 1,
|
| 227 |
+
"bbox": [
|
| 228 |
+
86,
|
| 229 |
+
729,
|
| 230 |
+
305,
|
| 231 |
+
743
|
| 232 |
+
],
|
| 233 |
+
"page_idx": 1
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"type": "text",
|
| 237 |
+
"text": "Locomotion is a critical aspect in the motion control in humanoid robots. Traditional methods typically achieve stable movement by formulating the robot's dynamics model as constrained trajectory optimization problems [6]. Model Predictive Control (MPC) [7], [8], [9] is then employed in real-time to adjust and execute this trajectory, enabling adaption to dynamic environmental changes. However, these model-based methods usually rely heavily on precise modeling of robot dynamic properties [10], [11], [12], [13], [14] and environmental conditions [15], [16], [17], [18], [12], [19], [20], [21], [22], which leads to vulnerabilities in real-world performance, especially when there is a substantial",
|
| 238 |
+
"bbox": [
|
| 239 |
+
86,
|
| 240 |
+
751,
|
| 241 |
+
486,
|
| 242 |
+
929
|
| 243 |
+
],
|
| 244 |
+
"page_idx": 1
|
| 245 |
+
},
|
| 246 |
+
{
|
| 247 |
+
"type": "text",
|
| 248 |
+
"text": "discrepancy between the applied environments and the predefined conditions [23]. Thus, the optimization problem for humanoid robots is slow to resolve due to the complexity of high-dimensional state and action spaces, rendering it challenging to satisfy the demands for real-time performance and stability.",
|
| 249 |
+
"bbox": [
|
| 250 |
+
509,
|
| 251 |
+
71,
|
| 252 |
+
911,
|
| 253 |
+
161
|
| 254 |
+
],
|
| 255 |
+
"page_idx": 1
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"type": "text",
|
| 259 |
+
"text": "Recently, reinforcement learning (RL) has emerged as a promising paradigm for humanoid locomotion tasks. These methods design tailored reward functions to guide \"try and error\" feedback-based learning process. For instance, reward functions are often crafted to encourage stable walking, minimize energy consumption, or optimize trajectory tracking [24]. However, designing effective reward functions is non-trivial and often requires extensive domain expertise especially for particular locomotion gaits. Natural locomotion motions require different gaits for varying movement speeds, making the design of the reward function even more challenging. Moreover, the numerous rewards terms must strike a delicate balance between competing objectives. To alleviate these drawbacks, we incorporate diverse reference locomotion motions as style guidance to simplify the reward components and encourage the policy learn versatile gaits.",
|
| 260 |
+
"bbox": [
|
| 261 |
+
509,
|
| 262 |
+
162,
|
| 263 |
+
911,
|
| 264 |
+
402
|
| 265 |
+
],
|
| 266 |
+
"page_idx": 1
|
| 267 |
+
},
|
| 268 |
+
{
|
| 269 |
+
"type": "text",
|
| 270 |
+
"text": "B. Imitation Learning for Humanoid locomotion",
|
| 271 |
+
"text_level": 1,
|
| 272 |
+
"bbox": [
|
| 273 |
+
509,
|
| 274 |
+
425,
|
| 275 |
+
834,
|
| 276 |
+
436
|
| 277 |
+
],
|
| 278 |
+
"page_idx": 1
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "text",
|
| 282 |
+
"text": "The fundamental challenges in learning high-dimensional, underactuated robotic systems include precise task specification and effective exploration. Imitation learning (IL) is a method that learns from expert demonstrations, effectively addressing challenges related to quantifying rewards. Unlike pure reinforcement learning, IL can directly leverage offline expert data to guide policy learning, significantly reducing the exploration space and obtaining dense rewards. This approach is particularly effective in real-world robotics and complex task scenarios. Typically, it involves directly following reference trajectories through motion tracking. Generative Adversarial Imitation Learning (GAIL) [25] has been applied to locomotion tasks. The traditional imitation learning method, as mentioned above, is limited in flexibility—it can only replicate reference trajectories and cannot adapt to downstream tasks. To address this limitation, AMP [2] introduces the concept of learning the style from reference motion as a constraint, guiding the policy learning process.",
|
| 283 |
+
"bbox": [
|
| 284 |
+
509,
|
| 285 |
+
446,
|
| 286 |
+
911,
|
| 287 |
+
718
|
| 288 |
+
],
|
| 289 |
+
"page_idx": 1
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "text",
|
| 293 |
+
"text": "However, this paradigm heavily relies on expert demonstrations, and its performance can significantly degrade when the quality of demonstrations is poor or when the task changes. Since IL strategies are directly derived from the demonstrations, they are prone to overfitting to the demonstration data. As a result, when faced with novel situations, IL may lack sufficient generalization ability. Furthermore, due to the morphological differences between humanoid robots and humans, obtaining high-quality reference data proves challenging, resulting in datasets that can only encompass a limited range of instructions. This scarcity of data can compromise the stability of Generative Adversarial Imitation Learning (GAIL), leading to mode collapse. To mitigate these challenges, we supplement the expert policy as a",
|
| 294 |
+
"bbox": [
|
| 295 |
+
509,
|
| 296 |
+
720,
|
| 297 |
+
911,
|
| 298 |
+
929
|
| 299 |
+
],
|
| 300 |
+
"page_idx": 1
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "text",
|
| 304 |
+
"text": "reference motion, providing additional motion references to achieve a stable omnidirectional movement strategy.",
|
| 305 |
+
"bbox": [
|
| 306 |
+
81,
|
| 307 |
+
70,
|
| 308 |
+
488,
|
| 309 |
+
102
|
| 310 |
+
],
|
| 311 |
+
"page_idx": 2
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"text": "C. Deployable Policy Distillation",
|
| 316 |
+
"text_level": 1,
|
| 317 |
+
"bbox": [
|
| 318 |
+
83,
|
| 319 |
+
114,
|
| 320 |
+
313,
|
| 321 |
+
131
|
| 322 |
+
],
|
| 323 |
+
"page_idx": 2
|
| 324 |
+
},
|
| 325 |
+
{
|
| 326 |
+
"type": "text",
|
| 327 |
+
"text": "In robotic locomotion control, distillation is a method that transfers knowledge from teacher policies with privileged information (e.g., full-state dynamics, simulated ground-truth forces, or ideal state estimators) to student policies for real-world deployment. This knowledge transfer enables the student to leverage the teacher's expertise while operating under real-world constraints, such as partial observation or limited sensory inputs. There are two main approaches to distillation:",
|
| 328 |
+
"bbox": [
|
| 329 |
+
81,
|
| 330 |
+
136,
|
| 331 |
+
488,
|
| 332 |
+
271
|
| 333 |
+
],
|
| 334 |
+
"page_idx": 2
|
| 335 |
+
},
|
| 336 |
+
{
|
| 337 |
+
"type": "text",
|
| 338 |
+
"text": "BC methods[26], [27] learn by mimicking the teacher's actions using supervised learning on state-action pairs. BC achieves effective performance when the student operates within the teacher's training distribution, as it directly replicates the teacher's behavior under familiar conditions. However, its performance degrades sharply with \"compounding error\" [28] in out-of-distribution (OOD) scenarios (e.g., environmental perturbations, actuator noise, or unseen terrains), as BC inherently lacks the capacity to self-correct deviations from the teacher's demonstration space. This limitation arises because BC relies solely on static datasets of teacher demonstrations, without mechanisms to adapt to novel or unexpected situations.",
|
| 339 |
+
"bbox": [
|
| 340 |
+
81,
|
| 341 |
+
273,
|
| 342 |
+
488,
|
| 343 |
+
469
|
| 344 |
+
],
|
| 345 |
+
"page_idx": 2
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"type": "text",
|
| 349 |
+
"text": "Another popular approach is online distillation via Dataset Aggregation (DAgger) [29], which addresses BC's limitations by iteratively aggregating student-generated trajectories with teacher-corrected actions. Recently, DAgger and its derivative strategies have stood out as a promising distillation approach for humanoid robot [30], [31], [32], [33] to acquire deployable policies. During training, the student policy interacts with the environment, while the teacher provides corrective feedback on the student's actions, enabling the student to refine its policy over multiple iterations. This interactive process mitigates distributional shift and improves robustness to OOD scenarios. However, DAgger still faces a fundamental challenge: the student lacks access to the teacher's privileged information (e.g., simulated contact forces, ideal state estimators, or full-state dynamics). As a result, under partial observation or incomplete environmental feedback, the student struggles to fully replicate the teacher's actions. [24]",
|
| 350 |
+
"bbox": [
|
| 351 |
+
81,
|
| 352 |
+
470,
|
| 353 |
+
488,
|
| 354 |
+
742
|
| 355 |
+
],
|
| 356 |
+
"page_idx": 2
|
| 357 |
+
},
|
| 358 |
+
{
|
| 359 |
+
"type": "text",
|
| 360 |
+
"text": "III. METHOD",
|
| 361 |
+
"text_level": 1,
|
| 362 |
+
"bbox": [
|
| 363 |
+
230,
|
| 364 |
+
757,
|
| 365 |
+
341,
|
| 366 |
+
771
|
| 367 |
+
],
|
| 368 |
+
"page_idx": 2
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"type": "text",
|
| 372 |
+
"text": "StyleLoco is a novel approach for learning deployable natural locomotion skills that effectively combines the precision of RL-based controllers with the naturalness of human demonstrations. At its core, StyleLoco employs our proposed Generative Adversarial Distillation (GAD) framework, which uses a unique double-discriminator architecture to distill knowledge from both an RL-trained teacher policy and human motion demonstrations into a deployable student policy. Through adversarial learning, our approach generates naturalistic motions beyond the constraints of available",
|
| 373 |
+
"bbox": [
|
| 374 |
+
81,
|
| 375 |
+
780,
|
| 376 |
+
490,
|
| 377 |
+
933
|
| 378 |
+
],
|
| 379 |
+
"page_idx": 2
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"type": "text",
|
| 383 |
+
"text": "motion capture data while avoiding the artificial behaviors typically resulting from hand-crafted rewards.",
|
| 384 |
+
"bbox": [
|
| 385 |
+
504,
|
| 386 |
+
70,
|
| 387 |
+
911,
|
| 388 |
+
99
|
| 389 |
+
],
|
| 390 |
+
"page_idx": 2
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"type": "text",
|
| 394 |
+
"text": "StyleLoco consists of three key components: (1) a teacher policy trained with privileged information to achieve robust omnidirectional locomotion, (2) a motion dataset containing natural human movements, and (3) our novel GAD framework that combines these sources to train a deployable student policy. The framework's innovation lies in its ability to generate natural behaviors beyond what either source can achieve alone - overcoming both the limited coverage of motion datasets and the unnatural movements that emerge from pure RL training.",
|
| 395 |
+
"bbox": [
|
| 396 |
+
504,
|
| 397 |
+
101,
|
| 398 |
+
913,
|
| 399 |
+
251
|
| 400 |
+
],
|
| 401 |
+
"page_idx": 2
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"type": "text",
|
| 405 |
+
"text": "To achieve this, StyleLoco employs two discriminators that work in concert to adversarially shape the student policy's behavior. One discriminator ensures the policy can replicate the robust performance of the teacher, while the other maintains consistency with natural human motion patterns. This dual-discriminator approach simultaneously serves two purposes: expanding the range of natural behaviors beyond the demonstration data, and distilling the teacher's capabilities into a deployable policy. The resulting system produces controllers that are both highly capable and naturally moving, without being constrained to demonstrated behaviors or exhibiting artifacts from hand-crafted rewards.",
|
| 406 |
+
"bbox": [
|
| 407 |
+
504,
|
| 408 |
+
252,
|
| 409 |
+
911,
|
| 410 |
+
434
|
| 411 |
+
],
|
| 412 |
+
"page_idx": 2
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"type": "text",
|
| 416 |
+
"text": "A. Preliminaries",
|
| 417 |
+
"text_level": 1,
|
| 418 |
+
"bbox": [
|
| 419 |
+
504,
|
| 420 |
+
441,
|
| 421 |
+
624,
|
| 422 |
+
455
|
| 423 |
+
],
|
| 424 |
+
"page_idx": 2
|
| 425 |
+
},
|
| 426 |
+
{
|
| 427 |
+
"type": "text",
|
| 428 |
+
"text": "1) Reinforcement Learning: We formulate humanoid locomotion control as a Partially Observable Markov Decision Process (POMDP) defined by tuple $\\langle S, \\mathcal{A}, T, \\mathcal{O}, R, \\gamma \\rangle$ , where $\\mathcal{S}$ represents the full state space, $\\mathcal{O}$ denotes partial observations available to the robot, $\\mathcal{A}$ is the action space, $T(s'|s, a)$ describes state transitions, $R(s, a)$ defines the reward function, and $\\gamma \\in (0, 1]$ is the discount factor. The goal is to learn a policy $\\pi(a|o)$ that maximizes expected discounted returns while operating only on partial observations $o \\in \\mathcal{O}$ .",
|
| 429 |
+
"bbox": [
|
| 430 |
+
504,
|
| 431 |
+
460,
|
| 432 |
+
913,
|
| 433 |
+
595
|
| 434 |
+
],
|
| 435 |
+
"page_idx": 2
|
| 436 |
+
},
|
| 437 |
+
{
|
| 438 |
+
"type": "text",
|
| 439 |
+
"text": "The locomotion task requires tracking commanded velocities $v^{*} = (v_{x}^{*}, v_{y}^{*}, \\omega_{z}^{*})$ , where $(v_{x}^{*}, v_{y}^{*})$ specify desired linear velocities in local coordinate frame and $\\omega_{z}^{*}$ defines the desired yaw rate. Following [34], we use the reward function:",
|
| 440 |
+
"bbox": [
|
| 441 |
+
504,
|
| 442 |
+
597,
|
| 443 |
+
913,
|
| 444 |
+
657
|
| 445 |
+
],
|
| 446 |
+
"page_idx": 2
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"type": "equation",
|
| 450 |
+
"text": "\n$$\nr _ {\\text {t a s k}} (e, \\lambda) := \\exp (- \\lambda \\cdot \\| e \\| ^ {2})\n$$\n",
|
| 451 |
+
"text_format": "latex",
|
| 452 |
+
"bbox": [
|
| 453 |
+
609,
|
| 454 |
+
662,
|
| 455 |
+
808,
|
| 456 |
+
681
|
| 457 |
+
],
|
| 458 |
+
"page_idx": 2
|
| 459 |
+
},
|
| 460 |
+
{
|
| 461 |
+
"type": "text",
|
| 462 |
+
"text": "where $e$ represents tracking errors and $\\lambda$ controls their relative importance.",
|
| 463 |
+
"bbox": [
|
| 464 |
+
504,
|
| 465 |
+
688,
|
| 466 |
+
911,
|
| 467 |
+
718
|
| 468 |
+
],
|
| 469 |
+
"page_idx": 2
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"type": "text",
|
| 473 |
+
"text": "2) Generative Adversarial Imitation Learning: Generative Adversarial Imitation Learning (GAIL) learns to mimic expert behavior through adversarial training. Given a dataset of expert demonstrations $\\mathcal{M} = (s_i, a_i)$ consisting of state-action pairs, GAIL trains a policy $\\pi(a|s)$ that generates actions $a'$ for given states $s'$ . A discriminator network $\\mathcal{D}$ is employed to distinguish between state-action pairs $(s, a)$ from the expert demonstrations and those produced by the policy $\\pi$ . The reward function used to train the policy is then given by:",
|
| 474 |
+
"bbox": [
|
| 475 |
+
504,
|
| 476 |
+
719,
|
| 477 |
+
913,
|
| 478 |
+
869
|
| 479 |
+
],
|
| 480 |
+
"page_idx": 2
|
| 481 |
+
},
|
| 482 |
+
{
|
| 483 |
+
"type": "equation",
|
| 484 |
+
"text": "\n$$\nr _ {\\mathrm {G A I L}} (s, a) = - \\log \\left(1 - \\mathcal {D} (s, a)\\right)\n$$\n",
|
| 485 |
+
"text_format": "latex",
|
| 486 |
+
"bbox": [
|
| 487 |
+
596,
|
| 488 |
+
877,
|
| 489 |
+
823,
|
| 490 |
+
893
|
| 491 |
+
],
|
| 492 |
+
"page_idx": 2
|
| 493 |
+
},
|
| 494 |
+
{
|
| 495 |
+
"type": "text",
|
| 496 |
+
"text": "Adversarial Motion Prior (AMP) [2] extends this framework to handle settings where only state information is",
|
| 497 |
+
"bbox": [
|
| 498 |
+
504,
|
| 499 |
+
901,
|
| 500 |
+
913,
|
| 501 |
+
931
|
| 502 |
+
],
|
| 503 |
+
"page_idx": 2
|
| 504 |
+
},
|
| 505 |
+
{
|
| 506 |
+
"type": "image",
|
| 507 |
+
"img_path": "images/8bbd19b31033f6015bc0594ca6a2c9b6e5d63779241e9e6e37ab39029cc089a5.jpg",
|
| 508 |
+
"image_caption": [
|
| 509 |
+
"Fig. 2. Overview of the proposed Generative Adversarial Distillation (GAD) framework. Two discriminators separately evaluate the similarity of generated motions against a teacher policy and reference motion dataset, enabling the synthesis of natural and adaptive behaviors."
|
| 510 |
+
],
|
| 511 |
+
"image_footnote": [],
|
| 512 |
+
"bbox": [
|
| 513 |
+
91,
|
| 514 |
+
73,
|
| 515 |
+
480,
|
| 516 |
+
276
|
| 517 |
+
],
|
| 518 |
+
"page_idx": 3
|
| 519 |
+
},
|
| 520 |
+
{
|
| 521 |
+
"type": "text",
|
| 522 |
+
"text": "available in the demonstrations. Instead of operating on state-action pairs, AMP's discriminator evaluates state transitions $(s,s^{\\prime})$ , enabling imitation learning from state-only demonstrations. Additionally, AMP employs a least-squares discriminator [35], replacing the traditional binary cross-entropy loss, which has been empirically shown to provide more stable adversarial training dynamics.",
|
| 523 |
+
"bbox": [
|
| 524 |
+
81,
|
| 525 |
+
367,
|
| 526 |
+
488,
|
| 527 |
+
473
|
| 528 |
+
],
|
| 529 |
+
"page_idx": 3
|
| 530 |
+
},
|
| 531 |
+
{
|
| 532 |
+
"type": "text",
|
| 533 |
+
"text": "B. Generative Adversarial Distillation",
|
| 534 |
+
"text_level": 1,
|
| 535 |
+
"bbox": [
|
| 536 |
+
83,
|
| 537 |
+
481,
|
| 538 |
+
346,
|
| 539 |
+
494
|
| 540 |
+
],
|
| 541 |
+
"page_idx": 3
|
| 542 |
+
},
|
| 543 |
+
{
|
| 544 |
+
"type": "text",
|
| 545 |
+
"text": "The core innovation of StyleLoco is our GAD framework, which synthesizes natural and adaptive behaviors from two complementary sources: a well-trained teacher policy and a reference motion dataset. As illustrated in Fig. 2, GAD trains a student policy $\\pi_{\\mathrm{student}}$ alongside two AMP-style discriminators, $\\mathcal{D}_{\\mathrm{teacher}}$ and $\\mathcal{D}_{\\mathrm{dataset}}$ . Each discriminator evaluates the student's generated state transitions against one source of reference motions: either the teacher policy or the motion dataset.",
|
| 546 |
+
"bbox": [
|
| 547 |
+
81,
|
| 548 |
+
500,
|
| 549 |
+
488,
|
| 550 |
+
635
|
| 551 |
+
],
|
| 552 |
+
"page_idx": 3
|
| 553 |
+
},
|
| 554 |
+
{
|
| 555 |
+
"type": "text",
|
| 556 |
+
"text": "Training proceeds in an interleaving manner, alternating between updating the student policy and the discriminators. In each iteration, we first update the student policy using the combined feedback from both discriminators and then train both discriminators to better distinguish between the student's outputs and their respective reference motions.",
|
| 557 |
+
"bbox": [
|
| 558 |
+
81,
|
| 559 |
+
636,
|
| 560 |
+
488,
|
| 561 |
+
726
|
| 562 |
+
],
|
| 563 |
+
"page_idx": 3
|
| 564 |
+
},
|
| 565 |
+
{
|
| 566 |
+
"type": "text",
|
| 567 |
+
"text": "The teacher discriminator $\\mathcal{D}_{\\mathrm{teacher}}$ optimizes:",
|
| 568 |
+
"bbox": [
|
| 569 |
+
99,
|
| 570 |
+
727,
|
| 571 |
+
401,
|
| 572 |
+
742
|
| 573 |
+
],
|
| 574 |
+
"page_idx": 3
|
| 575 |
+
},
|
| 576 |
+
{
|
| 577 |
+
"type": "equation",
|
| 578 |
+
"text": "\n$$\n\\begin{array}{l} \\arg \\min _ {\\mathcal {D} _ {\\text {t e a c h e r}}} \\mathbb {E} _ {(s, s ^ {\\prime}) \\sim \\pi_ {\\text {t e a c h e r}}} \\left[ \\left(\\mathcal {D} _ {\\text {t e a c h e r}} (s, s ^ {\\prime}) - 1\\right) ^ {2} \\right] \\\\ + \\mathbb {E} _ {(s, s ^ {\\prime}) \\sim \\pi_ {\\text {s t u d e n t}}} \\left[ \\left(\\mathcal {D} _ {\\text {t e a c h e r}} (s, s ^ {\\prime}) + 1\\right) ^ {2} \\right] \\\\ + \\lambda \\mathbb {E} _ {(s, s ^ {\\prime}) \\sim \\pi_ {\\text {t e a c h e r}}} \\left[ \\| \\nabla_ {(s, s ^ {\\prime})} \\mathcal {D} _ {\\text {t e a c h e r}} (s, s ^ {\\prime}) \\| ^ {2} \\right], \\\\ \\end{array}\n$$\n",
|
| 579 |
+
"text_format": "latex",
|
| 580 |
+
"bbox": [
|
| 581 |
+
107,
|
| 582 |
+
746,
|
| 583 |
+
464,
|
| 584 |
+
821
|
| 585 |
+
],
|
| 586 |
+
"page_idx": 3
|
| 587 |
+
},
|
| 588 |
+
{
|
| 589 |
+
"type": "text",
|
| 590 |
+
"text": "while the reference discriminator $\\mathcal{D}_{\\mathrm{dataset}}$ ensures natural motion qualities by optimizing:",
|
| 591 |
+
"bbox": [
|
| 592 |
+
81,
|
| 593 |
+
825,
|
| 594 |
+
488,
|
| 595 |
+
854
|
| 596 |
+
],
|
| 597 |
+
"page_idx": 3
|
| 598 |
+
},
|
| 599 |
+
{
|
| 600 |
+
"type": "equation",
|
| 601 |
+
"text": "\n$$\n\\begin{array}{l} \\arg \\min _ {\\mathcal {D} _ {\\text {d a t a s e t}}} \\mathbb {E} _ {(s, s ^ {\\prime}) \\sim \\mathcal {M}} \\left[ \\left(\\mathcal {D} _ {\\text {d a t a s e t}} (s, s ^ {\\prime}) - 1\\right) ^ {2} \\right] \\\\ + \\mathbb {E} _ {(s, s ^ {\\prime}) \\sim \\pi_ {\\text {s t u d e n t}}} \\left[ \\left(\\mathcal {D} _ {\\text {d a t a s e t}} (s, s ^ {\\prime}) + 1\\right) ^ {2} \\right] \\\\ + \\lambda \\mathbb {E} _ {(s, s ^ {\\prime}) \\sim \\mathcal {M}} \\left[ \\| \\nabla_ {(s, s ^ {\\prime})} \\mathcal {D} _ {\\text {d a t a s e t}} (s, s ^ {\\prime}) \\| ^ {2} \\right], \\\\ \\end{array}\n$$\n",
|
| 602 |
+
"text_format": "latex",
|
| 603 |
+
"bbox": [
|
| 604 |
+
114,
|
| 605 |
+
859,
|
| 606 |
+
452,
|
| 607 |
+
934
|
| 608 |
+
],
|
| 609 |
+
"page_idx": 3
|
| 610 |
+
},
|
| 611 |
+
{
|
| 612 |
+
"type": "text",
|
| 613 |
+
"text": "where $\\lambda$ controls the gradient penalty term that ensures stable training.",
|
| 614 |
+
"bbox": [
|
| 615 |
+
504,
|
| 616 |
+
71,
|
| 617 |
+
911,
|
| 618 |
+
99
|
| 619 |
+
],
|
| 620 |
+
"page_idx": 3
|
| 621 |
+
},
|
| 622 |
+
{
|
| 623 |
+
"type": "text",
|
| 624 |
+
"text": "The student policy learns from a combined reward function:",
|
| 625 |
+
"bbox": [
|
| 626 |
+
504,
|
| 627 |
+
101,
|
| 628 |
+
911,
|
| 629 |
+
128
|
| 630 |
+
],
|
| 631 |
+
"page_idx": 3
|
| 632 |
+
},
|
| 633 |
+
{
|
| 634 |
+
"type": "equation",
|
| 635 |
+
"text": "\n$$\nr = r _ {\\text {t a s k}} + w _ {\\text {t e a c h e r}} \\cdot r _ {\\text {t e a c h e r}} + w _ {\\text {d a t a s e t}} \\cdot r _ {\\text {d a t a s e t}},\n$$\n",
|
| 636 |
+
"text_format": "latex",
|
| 637 |
+
"bbox": [
|
| 638 |
+
558,
|
| 639 |
+
142,
|
| 640 |
+
859,
|
| 641 |
+
156
|
| 642 |
+
],
|
| 643 |
+
"page_idx": 3
|
| 644 |
+
},
|
| 645 |
+
{
|
| 646 |
+
"type": "text",
|
| 647 |
+
"text": "where the discriminator rewards are computed as:",
|
| 648 |
+
"bbox": [
|
| 649 |
+
506,
|
| 650 |
+
164,
|
| 651 |
+
846,
|
| 652 |
+
179
|
| 653 |
+
],
|
| 654 |
+
"page_idx": 3
|
| 655 |
+
},
|
| 656 |
+
{
|
| 657 |
+
"type": "equation",
|
| 658 |
+
"text": "\n$$\nr _ {\\text {t e a c h e r}} = \\max \\left[ 0, \\quad 1 - 0. 2 5 \\left(\\mathcal {D} _ {\\text {t e a c h e r}} (s, s ^ {\\prime}) - 1\\right) ^ {2} \\right]\n$$\n",
|
| 659 |
+
"text_format": "latex",
|
| 660 |
+
"bbox": [
|
| 661 |
+
540,
|
| 662 |
+
186,
|
| 663 |
+
875,
|
| 664 |
+
205
|
| 665 |
+
],
|
| 666 |
+
"page_idx": 3
|
| 667 |
+
},
|
| 668 |
+
{
|
| 669 |
+
"type": "equation",
|
| 670 |
+
"text": "\n$$\nr _ {\\text {d a t a s e t}} = \\max \\left[ 0, \\quad 1 - 0. 2 5 \\left(\\mathcal {D} _ {\\text {d a t a s e t}} (s, s ^ {\\prime}) - 1\\right) ^ {2} \\right]\n$$\n",
|
| 671 |
+
"text_format": "latex",
|
| 672 |
+
"bbox": [
|
| 673 |
+
542,
|
| 674 |
+
212,
|
| 675 |
+
872,
|
| 676 |
+
229
|
| 677 |
+
],
|
| 678 |
+
"page_idx": 3
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
"type": "text",
|
| 682 |
+
"text": "Both discriminators process state transitions using a consistent feature set comprising joint positions and velocities, root linear and angular velocities in the robot's local frame, base link orientation (roll and pitch), and root height. This common representation enables effective comparison across different motion sources while capturing the essential characteristics of locomotion behavior.",
|
| 683 |
+
"bbox": [
|
| 684 |
+
504,
|
| 685 |
+
237,
|
| 686 |
+
911,
|
| 687 |
+
340
|
| 688 |
+
],
|
| 689 |
+
"page_idx": 3
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"type": "text",
|
| 693 |
+
"text": "Deployable Policy Distillation A key aspect of our framework is enabling the student policy $\\pi_{\\mathrm{student}}$ to generate actions when privileged observations are unavailable in real-world deployment. While the teacher policy benefits from privileged information during training to better understand task objectives and achieve efficient convergence, the student policy must learn to generate appropriate actions using only deployable sensor observations. This asymmetric approach allows us to leverage rich state information during training while ensuring the final policy remains deployable. The specific observations available to the student policy are detailed in Table I.",
|
| 694 |
+
"bbox": [
|
| 695 |
+
504,
|
| 696 |
+
343,
|
| 697 |
+
913,
|
| 698 |
+
523
|
| 699 |
+
],
|
| 700 |
+
"page_idx": 3
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "text",
|
| 704 |
+
"text": "C. Training Process",
|
| 705 |
+
"text_level": 1,
|
| 706 |
+
"bbox": [
|
| 707 |
+
506,
|
| 708 |
+
534,
|
| 709 |
+
648,
|
| 710 |
+
547
|
| 711 |
+
],
|
| 712 |
+
"page_idx": 3
|
| 713 |
+
},
|
| 714 |
+
{
|
| 715 |
+
"type": "text",
|
| 716 |
+
"text": "Curriculum Learning Teacher policy $\\pi_{\\text{teacher}}$ training adopts a curriculum learning approach comprised of two distinct phases. The initial stability phase prioritizes maintaining balance and preventing falls, establishing fundamental stability behaviors. This is followed by the mobility phase, where the policy develops comprehensive omnidirectional locomotion capabilities. The specific reward components for each phase are detailed in Table II.",
|
| 717 |
+
"bbox": [
|
| 718 |
+
504,
|
| 719 |
+
553,
|
| 720 |
+
911,
|
| 721 |
+
672
|
| 722 |
+
],
|
| 723 |
+
"page_idx": 3
|
| 724 |
+
},
|
| 725 |
+
{
|
| 726 |
+
"type": "text",
|
| 727 |
+
"text": "Demonstration Data Preparation The locomotion motion data in this work is sourced from the LaFAN1 dataset and meticulously retargeted to conform to the kinematic specifications of Unitree H1 robots. While this dataset offers diverse motion styles and velocity ranges, utilizing all demonstrations simultaneously introduces ambiguity in the learning process. To facilitate distinct gait style demonstrations across different velocity commands, we strategically selected motion clips with minimal or non-overlapping velocity ranges, ensuring a relatively clear behavioral boundaries between different locomotion patterns.",
|
| 728 |
+
"bbox": [
|
| 729 |
+
504,
|
| 730 |
+
674,
|
| 731 |
+
913,
|
| 732 |
+
840
|
| 733 |
+
],
|
| 734 |
+
"page_idx": 3
|
| 735 |
+
},
|
| 736 |
+
{
|
| 737 |
+
"type": "text",
|
| 738 |
+
"text": "Asymmetric Actor-critic Architecture Student policy training utilizes an asymmetric actor-critic architecture to effectively handle partial observability in real-world conditions. The student's observation processing begins with temporal partial observations $o_{t}^{N} = [o_{t - n}, o_{t - n + 1} \\dots o_{t}]^{T}$ . These observations are first processed through a partial states",
|
| 739 |
+
"bbox": [
|
| 740 |
+
504,
|
| 741 |
+
840,
|
| 742 |
+
913,
|
| 743 |
+
931
|
| 744 |
+
],
|
| 745 |
+
"page_idx": 3
|
| 746 |
+
},
|
| 747 |
+
{
|
| 748 |
+
"type": "table",
|
| 749 |
+
"img_path": "images/c61b7da2cee45634b4ff5dd3d7fd6886b7d93c4b4aec7f8bffc6c4632ae6a1bb.jpg",
|
| 750 |
+
"table_caption": [
|
| 751 |
+
"TABLEI AVAILABLE OBSERVATIONS IN TRAINING"
|
| 752 |
+
],
|
| 753 |
+
"table_footnote": [
|
| 754 |
+
"Notes:",
|
| 755 |
+
"- Phase: Indicates the phase of motion, serving as a temporal marker.",
|
| 756 |
+
"- Diff: Difference between current joint angular position and reference joint angular position, calculated based on Phase.",
|
| 757 |
+
"- ContactStatus: Information regarding the stance mask and feet contact forces."
|
| 758 |
+
],
|
| 759 |
+
"table_body": "<table><tr><td>Sources</td><td>Phase</td><td>CmdVel</td><td>DoFPos</td><td>DoFVel</td><td>LastAction</td><td>Diff</td><td>BaseLinVel</td><td>BaseAngVel</td><td>RPY</td><td>Root Height</td><td>Push</td><td>Fraction</td><td>BodyMass</td><td>ContactStatus</td></tr><tr><td>Teacher</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Dataset</td><td></td><td></td><td>✓</td><td>✓</td><td></td><td></td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td></td></tr><tr><td>Student</td><td></td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td>✓</td><td>✓</td><td></td><td></td><td></td><td></td><td></td></tr></table>",
|
| 760 |
+
"bbox": [
|
| 761 |
+
84,
|
| 762 |
+
102,
|
| 763 |
+
911,
|
| 764 |
+
152
|
| 765 |
+
],
|
| 766 |
+
"page_idx": 4
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"type": "text",
|
| 770 |
+
"text": "encoder $\\mathcal{E}$ to generate context latent representations, which are then combined with the current partial state observations and the velocity command. The resulting combined representation passes through MLP layers to produce the final control actions.",
|
| 771 |
+
"bbox": [
|
| 772 |
+
81,
|
| 773 |
+
218,
|
| 774 |
+
488,
|
| 775 |
+
294
|
| 776 |
+
],
|
| 777 |
+
"page_idx": 4
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"type": "table",
|
| 781 |
+
"img_path": "images/1e898ae805a8a226adc31b09bee1e5ca4e2a29f36e3598c9b0229868b39764ee.jpg",
|
| 782 |
+
"table_caption": [
|
| 783 |
+
"TABLE II REWARD DEFINITIONS USED IN TEACHER POLICY TRAINING."
|
| 784 |
+
],
|
| 785 |
+
"table_footnote": [
|
| 786 |
+
"Notes:",
|
| 787 |
+
"- $\\mathbb{I}_A = 1$ if $A = true$ and $\\mathbb{I}_A = 0$ otherwise.",
|
| 788 |
+
"The maximum allowable feet contact force $F_{\\mathrm{max}}$ is set to 550N"
|
| 789 |
+
],
|
| 790 |
+
"table_body": "<table><tr><td>Term</td><td>Definition</td><td>Weight</td></tr><tr><td colspan=\"3\">First Stage</td></tr><tr><td>Termination</td><td>termination = Ireset - Ittimeout</td><td>-1000</td></tr><tr><td>Linear Velocity Tracking</td><td>exp(-||xxy|2/0.1)</td><td>10</td></tr><tr><td>Angular Velocity Tracking</td><td>exp(-||u|2/0.1)</td><td>10</td></tr><tr><td>Linear Velocity z</td><td>||vz||2</td><td>-1.0</td></tr><tr><td>R-P Angular Velocity</td><td>||ωxy||2</td><td>-0.5</td></tr><tr><td>Orientation</td><td>Σi∈{x,y} (projected gravityi)2</td><td>-1.0</td></tr><tr><td>Base Height</td><td>exp(-100|hbase-htarget|) where hbase=zroot-(feet-0.08)</td><td>0.5</td></tr><tr><td>Action Rate</td><td>||at-at-1||2</td><td>-0.01</td></tr><tr><td>Energy Square</td><td>Σi=10(τiq̂i)21+||cxy||2</td><td>-5e-6</td></tr><tr><td>Stand Still</td><td>(Σ|q-qdefault|·Istand</td><td>-1</td></tr><tr><td>Feet Clearance</td><td>Σi||hfeet,i-htarget|<0.01|·(1-gait phasei)</td><td>2.5</td></tr><tr><td>Feet Contact Number</td><td>mean(Πcontact=stance mask)-Π(contact≠stance mask)</td><td>1</td></tr><tr><td>Default Joint Position</td><td>||q[1:2]-qdefault||2+||q[6:7]-qdefault||2</td><td>0.5</td></tr><tr><td>Action Smoothness</td><td>||at-2-2at-1+at||2</td><td>-0.001</td></tr><tr><td>Feet Slip</td><td>1-Σi exp(-||vxy|i||2)</td><td>-0.05</td></tr><tr><td>Reference Joint Position</td><td>exp(-2||q-qref||2)-0.5min(||q-qref||2,0.5)</td><td>10</td></tr><tr><td>Pelvis-Angle y Distance</td><td>(||ypelvis,pitch-yankle,L)||+||ypelvis,pitch-yankle,R)||·Π{|vy|<0.1}</td><td>-5</td></tr><tr><td>Upper Joint Constraints</td><td>Σ||q[12:14]-qdefault||2+||q[16:18]-qdefault||2+||q10-q10||2</td><td>-5</td></tr><tr><td colspan=\"3\">Second Stage</td></tr><tr><td>Joint Torque</td><td>||τ||2</td><td>-2e-5</td></tr><tr><td>Joint Acceleration</td><td>||q||2</td><td>-1e-6</td></tr><tr><td>Feet Contact Forces</td><td>Σi max(||contact forcei||2-Fmax,0)</td><td>-0.01</td></tr><tr><td>Torque When Stand-Still</td><td>Σ[(τt-τt-1)2+(τt+τt-2-2τt-1)2]·Istand</td><td>-1e-3</td></tr><tr><td>Body Pitch</td><td>||pitch-0.01||</td><td>-5</td></tr><tr><td>Body Roll</td><td>||roll||</td><td>-10</td></tr><tr><td>Track Velocity Hard</td><td>e-10||vxy-target-vxy||+e-10|ωz|2</td><td>50</td></tr><tr><td>Ankle Air Time</td><td>∑(tair,i-0.2)·Ifirst,contact,i·Istand.still</td><td>100</td></tr><tr><td>Ankle Limits</td><td>-∑i i∈{4,9} clip(qi-qmin,i,0) + clip(qmax,i-qi,0)</td><td>-200</td></tr></table>",
|
| 791 |
+
"bbox": [
|
| 792 |
+
84,
|
| 793 |
+
330,
|
| 794 |
+
488,
|
| 795 |
+
734
|
| 796 |
+
],
|
| 797 |
+
"page_idx": 4
|
| 798 |
+
},
|
| 799 |
+
{
|
| 800 |
+
"type": "text",
|
| 801 |
+
"text": "D. Implementation and Deployment Details",
|
| 802 |
+
"text_level": 1,
|
| 803 |
+
"bbox": [
|
| 804 |
+
83,
|
| 805 |
+
777,
|
| 806 |
+
382,
|
| 807 |
+
792
|
| 808 |
+
],
|
| 809 |
+
"page_idx": 4
|
| 810 |
+
},
|
| 811 |
+
{
|
| 812 |
+
"type": "text",
|
| 813 |
+
"text": "Both policies are implemented using the Proximal Policy Optimization (PPO) algorithm [36], with comprehensive domain randomization ensuring robust real-world transfer.",
|
| 814 |
+
"bbox": [
|
| 815 |
+
81,
|
| 816 |
+
795,
|
| 817 |
+
488,
|
| 818 |
+
840
|
| 819 |
+
],
|
| 820 |
+
"page_idx": 4
|
| 821 |
+
},
|
| 822 |
+
{
|
| 823 |
+
"type": "text",
|
| 824 |
+
"text": "Domain Randomization Following existing researches on humanoid whole-body control, our domain randomization encompasses three aspects: physical parameter variations, systematic observation noise injection, and randomized external force perturbations. The physical parameters include variations in mass distribution, joint properties, and surface",
|
| 825 |
+
"bbox": [
|
| 826 |
+
81,
|
| 827 |
+
840,
|
| 828 |
+
488,
|
| 829 |
+
931
|
| 830 |
+
],
|
| 831 |
+
"page_idx": 4
|
| 832 |
+
},
|
| 833 |
+
{
|
| 834 |
+
"type": "text",
|
| 835 |
+
"text": "interactions. Observation noise is carefully calibrated to match real-world sensor characteristics, while external forces simulate unexpected disturbances the robot might encounter during deployment.",
|
| 836 |
+
"bbox": [
|
| 837 |
+
504,
|
| 838 |
+
218,
|
| 839 |
+
913,
|
| 840 |
+
279
|
| 841 |
+
],
|
| 842 |
+
"page_idx": 4
|
| 843 |
+
},
|
| 844 |
+
{
|
| 845 |
+
"type": "text",
|
| 846 |
+
"text": "Safe Deployment Safe deployment is achieved through torque limiting. This controller continuously monitors and adjusts torque outputs to remain within safe operational limits. The deployment architecture operates with the policy executing at $50\\mathrm{Hz}$ , while the low-level control loop maintains precise actuation at $1000\\mathrm{Hz}$ , ensuring responsive and stable behavior.",
|
| 847 |
+
"bbox": [
|
| 848 |
+
504,
|
| 849 |
+
279,
|
| 850 |
+
911,
|
| 851 |
+
383
|
| 852 |
+
],
|
| 853 |
+
"page_idx": 4
|
| 854 |
+
},
|
| 855 |
+
{
|
| 856 |
+
"type": "text",
|
| 857 |
+
"text": "Real-world execution incorporates additional safety measures through continuous monitoring of joint positions, velocities, and torques. When approaching operational limits, the system smoothly modulates commands to maintain safe operation while preserving task performance. This approach enables robust deployment across varying conditions while protecting the hardware from potential damage.",
|
| 858 |
+
"bbox": [
|
| 859 |
+
504,
|
| 860 |
+
385,
|
| 861 |
+
911,
|
| 862 |
+
491
|
| 863 |
+
],
|
| 864 |
+
"page_idx": 4
|
| 865 |
+
},
|
| 866 |
+
{
|
| 867 |
+
"type": "text",
|
| 868 |
+
"text": "IV. EXPERIMENTS",
|
| 869 |
+
"text_level": 1,
|
| 870 |
+
"bbox": [
|
| 871 |
+
632,
|
| 872 |
+
502,
|
| 873 |
+
787,
|
| 874 |
+
515
|
| 875 |
+
],
|
| 876 |
+
"page_idx": 4
|
| 877 |
+
},
|
| 878 |
+
{
|
| 879 |
+
"type": "text",
|
| 880 |
+
"text": "We conduct comprehensive experiments in both simulation and real-world environments to evaluate StyleLoco's effectiveness in generating natural and adaptive locomotion behaviors. Our evaluation framework addresses four key aspects: (1) the effectiveness of GAD's distillation capabilities, (2) the accuracy of velocity tracking during locomotion tasks, (3) the quality of motion style reproduction, and (4) real-world deployment performance.",
|
| 881 |
+
"bbox": [
|
| 882 |
+
504,
|
| 883 |
+
522,
|
| 884 |
+
913,
|
| 885 |
+
643
|
| 886 |
+
],
|
| 887 |
+
"page_idx": 4
|
| 888 |
+
},
|
| 889 |
+
{
|
| 890 |
+
"type": "text",
|
| 891 |
+
"text": "All experiments are conducted using the Unitree H1 humanoid robot in both simulated and physical environments. For reference motions, we utilize the LaFAN1 dataset, carefully retargeted to match the H1's kinematics. The motion data comprises global root position and orientation (quaternion), along with joint angular positions. Simulated experiments are performed in the NVIDIA Isaac Gym environment, which enables efficient parallel training and evaluation.",
|
| 892 |
+
"bbox": [
|
| 893 |
+
504,
|
| 894 |
+
643,
|
| 895 |
+
913,
|
| 896 |
+
763
|
| 897 |
+
],
|
| 898 |
+
"page_idx": 4
|
| 899 |
+
},
|
| 900 |
+
{
|
| 901 |
+
"type": "text",
|
| 902 |
+
"text": "A. Distillation Performance",
|
| 903 |
+
"text_level": 1,
|
| 904 |
+
"bbox": [
|
| 905 |
+
504,
|
| 906 |
+
775,
|
| 907 |
+
697,
|
| 908 |
+
790
|
| 909 |
+
],
|
| 910 |
+
"page_idx": 4
|
| 911 |
+
},
|
| 912 |
+
{
|
| 913 |
+
"type": "text",
|
| 914 |
+
"text": "Our first set of experiments evaluates GAD's ability to effectively distill privileged information from the teacher policy while maintaining task performance. We compare GAD against several baseline distillation approaches, measuring both task achievement and motion naturalness.",
|
| 915 |
+
"bbox": [
|
| 916 |
+
504,
|
| 917 |
+
795,
|
| 918 |
+
911,
|
| 919 |
+
869
|
| 920 |
+
],
|
| 921 |
+
"page_idx": 4
|
| 922 |
+
},
|
| 923 |
+
{
|
| 924 |
+
"type": "text",
|
| 925 |
+
"text": "One of the main contributions of this work is the development of a Generative Adversarial Distillation method. In this context, we emphasize the ability of our single teacher discriminator (GAD-SD) to effectively distill knowledge from",
|
| 926 |
+
"bbox": [
|
| 927 |
+
504,
|
| 928 |
+
869,
|
| 929 |
+
911,
|
| 930 |
+
931
|
| 931 |
+
],
|
| 932 |
+
"page_idx": 4
|
| 933 |
+
},
|
| 934 |
+
{
|
| 935 |
+
"type": "text",
|
| 936 |
+
"text": "the teacher policy. To evaluate this capability, we compare our method against DAgger, one of the most widely used distillation methods in robot control.",
|
| 937 |
+
"bbox": [
|
| 938 |
+
81,
|
| 939 |
+
70,
|
| 940 |
+
490,
|
| 941 |
+
114
|
| 942 |
+
],
|
| 943 |
+
"page_idx": 5
|
| 944 |
+
},
|
| 945 |
+
{
|
| 946 |
+
"type": "text",
|
| 947 |
+
"text": "First, we train an omnidirectional locomotion policy as the teacher. The command ranges used for both teacher training and the subsequent distillation experiment are listed in Table. III. We then leverage the well-trained teacher policy to guide the learning of the student policy.",
|
| 948 |
+
"bbox": [
|
| 949 |
+
81,
|
| 950 |
+
116,
|
| 951 |
+
488,
|
| 952 |
+
191
|
| 953 |
+
],
|
| 954 |
+
"page_idx": 5
|
| 955 |
+
},
|
| 956 |
+
{
|
| 957 |
+
"type": "table",
|
| 958 |
+
"img_path": "images/4ed8f8007c780019399529e52b6e64219119e91eadccfb42285262a25208d118.jpg",
|
| 959 |
+
"table_caption": [
|
| 960 |
+
"TABLE III RANGES OF LOCOMOTION TASK COMMAND"
|
| 961 |
+
],
|
| 962 |
+
"table_footnote": [],
|
| 963 |
+
"table_body": "<table><tr><td>Parameter</td><td>Teacher (Unit)</td><td>Distillation student (Unit)</td><td>StyleLoco student (Unit)</td></tr><tr><td>Forward (vx)</td><td>[-1.0, 3.5] m/s</td><td>[-1.0, 3.5] m/s</td><td>[-1.0, 4.5] m/s</td></tr><tr><td>Lateral (vy)</td><td>[-0.8, 0.8] m/s</td><td>[-0.8, 0.8] m/s</td><td>[-1.0, 1.0] m/s</td></tr><tr><td>Angular (ωz)</td><td>[-1.0, 1.0] rad/s</td><td>[-1.0, 1.0] rad/s</td><td>[-1.5, 1.5] rad/s</td></tr></table>",
|
| 964 |
+
"bbox": [
|
| 965 |
+
84,
|
| 966 |
+
234,
|
| 967 |
+
486,
|
| 968 |
+
297
|
| 969 |
+
],
|
| 970 |
+
"page_idx": 5
|
| 971 |
+
},
|
| 972 |
+
{
|
| 973 |
+
"type": "text",
|
| 974 |
+
"text": "The evaluation metrics include linear velocity tracking reward, angular velocity tracking reward, and average survival time. As shown in Table IV, while both methods successfully learn from the teacher policy, GAD-SD demonstrates superior performance, particularly in linear velocity tracking and survival time.",
|
| 975 |
+
"bbox": [
|
| 976 |
+
81,
|
| 977 |
+
306,
|
| 978 |
+
488,
|
| 979 |
+
397
|
| 980 |
+
],
|
| 981 |
+
"page_idx": 5
|
| 982 |
+
},
|
| 983 |
+
{
|
| 984 |
+
"type": "table",
|
| 985 |
+
"img_path": "images/b905dabcb35401d01b56c89866cc447aaa62874bb982626732c20633b9ed297b.jpg",
|
| 986 |
+
"table_caption": [
|
| 987 |
+
"TABLE IV QUANTITATIVE COMPARISON OF DISTILLATION METHODS"
|
| 988 |
+
],
|
| 989 |
+
"table_footnote": [
|
| 990 |
+
"Notes:",
|
| 991 |
+
"- Teacher: teacher policy trained with privileged information",
|
| 992 |
+
"GAD-SD: GAD with only teacher distillation discriminator"
|
| 993 |
+
],
|
| 994 |
+
"table_body": "<table><tr><td>Method</td><td>Linear Velocity Tracking Reward(±0.1) ↑</td><td>Angular Velocity Tracking Reward(±0.1) ↑</td><td>Average Survival Time(±15 steps) ↑</td></tr><tr><td>Teacher</td><td>7.403</td><td>2.824</td><td>925.9</td></tr><tr><td>DAgger</td><td>3.744</td><td>2.516</td><td>506.6</td></tr><tr><td>GAD-SD</td><td>5.679</td><td>2.653</td><td>860.3</td></tr></table>",
|
| 995 |
+
"bbox": [
|
| 996 |
+
84,
|
| 997 |
+
446,
|
| 998 |
+
488,
|
| 999 |
+
539
|
| 1000 |
+
],
|
| 1001 |
+
"page_idx": 5
|
| 1002 |
+
},
|
| 1003 |
+
{
|
| 1004 |
+
"type": "text",
|
| 1005 |
+
"text": "B. Locomotion Capabilities",
|
| 1006 |
+
"text_level": 1,
|
| 1007 |
+
"bbox": [
|
| 1008 |
+
83,
|
| 1009 |
+
604,
|
| 1010 |
+
274,
|
| 1011 |
+
619
|
| 1012 |
+
],
|
| 1013 |
+
"page_idx": 5
|
| 1014 |
+
},
|
| 1015 |
+
{
|
| 1016 |
+
"type": "text",
|
| 1017 |
+
"text": "The second set of experiments assesses the student policy's locomotion capabilities, particularly its ability to track commanded velocities while maintaining natural motion patterns. We compare StyleLoco against state-of-the-art approaches in terms of tracking accuracy, stability, and style preservation. Table VI shows comparative results across various performance metrics.",
|
| 1018 |
+
"bbox": [
|
| 1019 |
+
81,
|
| 1020 |
+
625,
|
| 1021 |
+
488,
|
| 1022 |
+
729
|
| 1023 |
+
],
|
| 1024 |
+
"page_idx": 5
|
| 1025 |
+
},
|
| 1026 |
+
{
|
| 1027 |
+
"type": "text",
|
| 1028 |
+
"text": "The locomotion task evaluates the ability of student policy to track local velocity commands comprising three components: forward/backward velocity $v_{x}$ , lateral velocity $v_{y}$ , and rotational velocity $w_{z}$ . Command values are uniformly sampled within pre-defined ranges specified in Table. III. For style imitation, we select four representative motion clips as reference targets for the style discriminator, with their corresponding velocity profiles detailed in Table. V.",
|
| 1029 |
+
"bbox": [
|
| 1030 |
+
81,
|
| 1031 |
+
731,
|
| 1032 |
+
488,
|
| 1033 |
+
851
|
| 1034 |
+
],
|
| 1035 |
+
"page_idx": 5
|
| 1036 |
+
},
|
| 1037 |
+
{
|
| 1038 |
+
"type": "text",
|
| 1039 |
+
"text": "To comprehensively evaluate our double-discriminator framework, we compare our method against three baseline approaches:",
|
| 1040 |
+
"bbox": [
|
| 1041 |
+
81,
|
| 1042 |
+
852,
|
| 1043 |
+
488,
|
| 1044 |
+
897
|
| 1045 |
+
],
|
| 1046 |
+
"page_idx": 5
|
| 1047 |
+
},
|
| 1048 |
+
{
|
| 1049 |
+
"type": "text",
|
| 1050 |
+
"text": "- SD-Motion: Single-discriminator approach using only motion clips as reference.",
|
| 1051 |
+
"bbox": [
|
| 1052 |
+
99,
|
| 1053 |
+
901,
|
| 1054 |
+
488,
|
| 1055 |
+
931
|
| 1056 |
+
],
|
| 1057 |
+
"page_idx": 5
|
| 1058 |
+
},
|
| 1059 |
+
{
|
| 1060 |
+
"type": "table",
|
| 1061 |
+
"img_path": "images/85accef67f72eee92b8fba3e755403b019656541d1780e6b00386a41b2c1da7f.jpg",
|
| 1062 |
+
"table_caption": [
|
| 1063 |
+
"TABLEV VELOCITY PROFILES FOR MOTION CLIPS"
|
| 1064 |
+
],
|
| 1065 |
+
"table_footnote": [],
|
| 1066 |
+
"table_body": "<table><tr><td>Vel Profiles</td><td>Forward (m/s)</td><td>Lateral (m/s)</td><td>Angular (rad/s)</td></tr><tr><td>Slow Forward</td><td>[0.089, 1.205]</td><td>[-0.396, 0.188]</td><td>[-1.734, 0.906]</td></tr><tr><td>Medium Forward</td><td>[0.884, 2.067]</td><td>[-0.563, 0.306]</td><td>[-2.044, 1.963]</td></tr><tr><td>Fast Forward</td><td>[2.438, 4.378]</td><td>[-1.166, 0.943]</td><td>[-1.555, 3.476]</td></tr><tr><td>Move Backward</td><td>[-1.088, -0.350]</td><td>[-0.425, 0.365]</td><td>[-1.580, 1.981]</td></tr></table>",
|
| 1067 |
+
"bbox": [
|
| 1068 |
+
506,
|
| 1069 |
+
102,
|
| 1070 |
+
913,
|
| 1071 |
+
178
|
| 1072 |
+
],
|
| 1073 |
+
"page_idx": 5
|
| 1074 |
+
},
|
| 1075 |
+
{
|
| 1076 |
+
"type": "list",
|
| 1077 |
+
"sub_type": "text",
|
| 1078 |
+
"list_items": [
|
| 1079 |
+
"- SD-Full: Single-discriminator approach using a combination of teacher policy online roll-out data and motion clips.",
|
| 1080 |
+
"- DAgger+Style: DAgger-based teacher policy distillation combined with a separate discriminator for style learning."
|
| 1081 |
+
],
|
| 1082 |
+
"bbox": [
|
| 1083 |
+
522,
|
| 1084 |
+
215,
|
| 1085 |
+
911,
|
| 1086 |
+
306
|
| 1087 |
+
],
|
| 1088 |
+
"page_idx": 5
|
| 1089 |
+
},
|
| 1090 |
+
{
|
| 1091 |
+
"type": "text",
|
| 1092 |
+
"text": "The evaluation metrics are similar to those used in the distillation task experiment, with the addition of energy consumption.",
|
| 1093 |
+
"bbox": [
|
| 1094 |
+
504,
|
| 1095 |
+
316,
|
| 1096 |
+
911,
|
| 1097 |
+
362
|
| 1098 |
+
],
|
| 1099 |
+
"page_idx": 5
|
| 1100 |
+
},
|
| 1101 |
+
{
|
| 1102 |
+
"type": "text",
|
| 1103 |
+
"text": "As demonstrated in Table. VI, our proposed double-discriminator framework achieves superior performance in velocity tracking and survival time compared to all baseline methods. Notably, the SD-Motion approach exhibits the best energy consumption performance, suggesting that human motions are inherently energy efficient and properly incorporating motion demonstrations during training contributes to reduced energy consumption.",
|
| 1104 |
+
"bbox": [
|
| 1105 |
+
504,
|
| 1106 |
+
366,
|
| 1107 |
+
913,
|
| 1108 |
+
487
|
| 1109 |
+
],
|
| 1110 |
+
"page_idx": 5
|
| 1111 |
+
},
|
| 1112 |
+
{
|
| 1113 |
+
"type": "image",
|
| 1114 |
+
"img_path": "images/c1470d9edbf54fb74775c976df1b6ea687deb49fcd2c2616260ce88d4345518e.jpg",
|
| 1115 |
+
"image_caption": [],
|
| 1116 |
+
"image_footnote": [],
|
| 1117 |
+
"bbox": [
|
| 1118 |
+
527,
|
| 1119 |
+
510,
|
| 1120 |
+
890,
|
| 1121 |
+
631
|
| 1122 |
+
],
|
| 1123 |
+
"page_idx": 5
|
| 1124 |
+
},
|
| 1125 |
+
{
|
| 1126 |
+
"type": "image",
|
| 1127 |
+
"img_path": "images/4cc5eb1cce99ae67f5f9a804b4c2739c3f4c4025dd55e5e65924dd9eeaad7538.jpg",
|
| 1128 |
+
"image_caption": [],
|
| 1129 |
+
"image_footnote": [],
|
| 1130 |
+
"bbox": [
|
| 1131 |
+
529,
|
| 1132 |
+
632,
|
| 1133 |
+
890,
|
| 1134 |
+
733
|
| 1135 |
+
],
|
| 1136 |
+
"page_idx": 5
|
| 1137 |
+
},
|
| 1138 |
+
{
|
| 1139 |
+
"type": "image",
|
| 1140 |
+
"img_path": "images/7d7026594acb0742b1ffa973d55c714cfa3dec4f728af15e9a20e8b4dd966dfe.jpg",
|
| 1141 |
+
"image_caption": [
|
| 1142 |
+
"Fig. 3. From top to bottom, a stylized locomotion demonstration from LaFAN1 (Top), motions generated by student policy in simulation (Middle), motions generated by student policy deployed on real H1 robot(Bottom)."
|
| 1143 |
+
],
|
| 1144 |
+
"image_footnote": [],
|
| 1145 |
+
"bbox": [
|
| 1146 |
+
529,
|
| 1147 |
+
732,
|
| 1148 |
+
890,
|
| 1149 |
+
859
|
| 1150 |
+
],
|
| 1151 |
+
"page_idx": 5
|
| 1152 |
+
},
|
| 1153 |
+
{
|
| 1154 |
+
"type": "table",
|
| 1155 |
+
"img_path": "images/1bcdd6f1c5caab9505459eea437df23ddfe58f0cd03e7c863a2eff6915ba2be4.jpg",
|
| 1156 |
+
"table_caption": [
|
| 1157 |
+
"TABLE VI QUANTITATIVE COMPARISON OF DIFFERENT METHODS ACROSS VARIOUS METRICS"
|
| 1158 |
+
],
|
| 1159 |
+
"table_footnote": [
|
| 1160 |
+
"Notes:",
|
| 1161 |
+
"- SD-Motion: Single discriminator with only motion demonstrations",
|
| 1162 |
+
"- SD-Full: Single discriminator with both teacher roll-outs and motion demonstrations",
|
| 1163 |
+
"- DAgger+Style: DAgger distillation with additional style discriminator"
|
| 1164 |
+
],
|
| 1165 |
+
"table_body": "<table><tr><td>Method</td><td>Linear Velocity Tracking Reward(±0.1) ↑</td><td>Angular Velocity Tracking Reward(±0.1) ↑</td><td>Average Survival Time(±15 steps) ↑</td><td>Energy Consumption(±0.001) ↓</td></tr><tr><td>SD-Motion</td><td>4.229</td><td>2.249</td><td>813.2</td><td>0.065</td></tr><tr><td>SD-Full</td><td>4.665</td><td>2.413</td><td>824.1</td><td>0.093</td></tr><tr><td>DAgger+Style</td><td>5.059</td><td>2.384</td><td>826.9</td><td>0.079</td></tr><tr><td>GAD (Ours)</td><td>5.485</td><td>2.644</td><td>846.5</td><td>0.081</td></tr></table>",
|
| 1166 |
+
"bbox": [
|
| 1167 |
+
153,
|
| 1168 |
+
104,
|
| 1169 |
+
843,
|
| 1170 |
+
188
|
| 1171 |
+
],
|
| 1172 |
+
"page_idx": 6
|
| 1173 |
+
},
|
| 1174 |
+
{
|
| 1175 |
+
"type": "text",
|
| 1176 |
+
"text": "C. Evaluations on Style Imitation",
|
| 1177 |
+
"text_level": 1,
|
| 1178 |
+
"bbox": [
|
| 1179 |
+
83,
|
| 1180 |
+
263,
|
| 1181 |
+
313,
|
| 1182 |
+
277
|
| 1183 |
+
],
|
| 1184 |
+
"page_idx": 6
|
| 1185 |
+
},
|
| 1186 |
+
{
|
| 1187 |
+
"type": "text",
|
| 1188 |
+
"text": "To demonstrate our method's ability to combine robust locomotion skills with distinct motion styles, we evaluate a particularly challenging case: synthesizing a limping gait by combining a regular walking teacher policy with reference motions exhibiting a distinct limping pattern. Fig. 3 shows the comparison between the original limping motion from LaFAN1 (visualized in Rerun [37]), the synthesized motion in Isaac Gym [38], and the deployed behavior on the physical Unitree H1 robot. The results demonstrate that our method successfully maintains the characteristic limping style while preserving the fundamental locomotion capabilities of the teacher policy.",
|
| 1189 |
+
"bbox": [
|
| 1190 |
+
81,
|
| 1191 |
+
282,
|
| 1192 |
+
488,
|
| 1193 |
+
463
|
| 1194 |
+
],
|
| 1195 |
+
"page_idx": 6
|
| 1196 |
+
},
|
| 1197 |
+
{
|
| 1198 |
+
"type": "text",
|
| 1199 |
+
"text": "This fusion of different motion sources creates an inherent trade-off between style fidelity and command tracking accuracy, as the stylized motions often deviate significantly from the teacher's optimal movement patterns. Our framework addresses this challenge through adjustable discriminator weights, allowing fine-tuned balance between style preservation and task performance.",
|
| 1200 |
+
"bbox": [
|
| 1201 |
+
81,
|
| 1202 |
+
464,
|
| 1203 |
+
488,
|
| 1204 |
+
570
|
| 1205 |
+
],
|
| 1206 |
+
"page_idx": 6
|
| 1207 |
+
},
|
| 1208 |
+
{
|
| 1209 |
+
"type": "text",
|
| 1210 |
+
"text": "D. Real Robot Deployment",
|
| 1211 |
+
"text_level": 1,
|
| 1212 |
+
"bbox": [
|
| 1213 |
+
83,
|
| 1214 |
+
579,
|
| 1215 |
+
272,
|
| 1216 |
+
594
|
| 1217 |
+
],
|
| 1218 |
+
"page_idx": 6
|
| 1219 |
+
},
|
| 1220 |
+
{
|
| 1221 |
+
"type": "text",
|
| 1222 |
+
"text": "The real-world deployment of our student policy on the Unitree H1 robot validates the practical effectiveness of our approach across various scenarios. As shown in Fig. 1, the robot demonstrates smooth transitions in both gait patterns and arm postures when responding to velocity command changes from low to medium speeds. The policy's robustness is further evidenced in Fig. 4, where the robot maintains stable locomotion at high speeds up to $3\\mathrm{m / s}$ . Most notably, Fig. 3 showcases our method's unique capability to synthesize stylized gaits that combine the stability of the teacher policy with distinctive motion patterns from the reference datasets, resulting in natural and controllable locomotion behaviors.",
|
| 1223 |
+
"bbox": [
|
| 1224 |
+
81,
|
| 1225 |
+
599,
|
| 1226 |
+
488,
|
| 1227 |
+
794
|
| 1228 |
+
],
|
| 1229 |
+
"page_idx": 6
|
| 1230 |
+
},
|
| 1231 |
+
{
|
| 1232 |
+
"type": "text",
|
| 1233 |
+
"text": "V. CONCLUSION AND LIMITATIONS",
|
| 1234 |
+
"text_level": 1,
|
| 1235 |
+
"bbox": [
|
| 1236 |
+
137,
|
| 1237 |
+
805,
|
| 1238 |
+
434,
|
| 1239 |
+
819
|
| 1240 |
+
],
|
| 1241 |
+
"page_idx": 6
|
| 1242 |
+
},
|
| 1243 |
+
{
|
| 1244 |
+
"type": "text",
|
| 1245 |
+
"text": "This paper presents StyleLoco, a novel framework for humanoid locomotion that bridges the gap between robust task execution and natural motion synthesis. Through our proposed Generative Adversarial Distillation approach, we demonstrate the effective combination of privileged information from expert policies with stylistic elements from human demonstrations. Our extensive experimental results,",
|
| 1246 |
+
"bbox": [
|
| 1247 |
+
81,
|
| 1248 |
+
825,
|
| 1249 |
+
490,
|
| 1250 |
+
931
|
| 1251 |
+
],
|
| 1252 |
+
"page_idx": 6
|
| 1253 |
+
},
|
| 1254 |
+
{
|
| 1255 |
+
"type": "image",
|
| 1256 |
+
"img_path": "images/25f482130f2c8371bda6cb44d6e4e2a848320aa4731234c20e36ecf1bef0305f.jpg",
|
| 1257 |
+
"image_caption": [
|
| 1258 |
+
"Fig. 4. H1 operating outdoors at forward velocity $(v_{x})$ of $3\\mathrm{m / s}$"
|
| 1259 |
+
],
|
| 1260 |
+
"image_footnote": [],
|
| 1261 |
+
"bbox": [
|
| 1262 |
+
547,
|
| 1263 |
+
257,
|
| 1264 |
+
870,
|
| 1265 |
+
501
|
| 1266 |
+
],
|
| 1267 |
+
"page_idx": 6
|
| 1268 |
+
},
|
| 1269 |
+
{
|
| 1270 |
+
"type": "text",
|
| 1271 |
+
"text": "including successful deployment on the Unitree H1 robot, validate the framework's capability to generate stable and natural locomotion behaviors across diverse scenarios, from high-speed running at $3\\mathrm{m / s}$ to stylized gaits such as limping.",
|
| 1272 |
+
"bbox": [
|
| 1273 |
+
504,
|
| 1274 |
+
553,
|
| 1275 |
+
911,
|
| 1276 |
+
614
|
| 1277 |
+
],
|
| 1278 |
+
"page_idx": 6
|
| 1279 |
+
},
|
| 1280 |
+
{
|
| 1281 |
+
"type": "text",
|
| 1282 |
+
"text": "The key innovation of our double-discriminator architecture enables simultaneous learning from heterogeneous sources while maintaining deployability through careful handling of privileged information. Quantitative evaluations show that StyleLoco outperforms existing approaches in both task performance and style preservation, demonstrating superior velocity tracking rewards and survival times while maintaining natural motion patterns.",
|
| 1283 |
+
"bbox": [
|
| 1284 |
+
504,
|
| 1285 |
+
614,
|
| 1286 |
+
913,
|
| 1287 |
+
733
|
| 1288 |
+
],
|
| 1289 |
+
"page_idx": 6
|
| 1290 |
+
},
|
| 1291 |
+
{
|
| 1292 |
+
"type": "text",
|
| 1293 |
+
"text": "Despite these achievements, several important limitations warrant future investigation. A primary challenge lies in style disambiguation when motion demonstrations share overlapping velocity ranges, potentially creating ambiguity in style selection and degrading imitation fidelity. Future research could explore automatic style clustering or context-aware selection mechanisms to address this limitation. Additionally, the current implementation relies on manual tuning of discriminator weights to balance task completion and style imitation objectives. Developing adaptive weighting schemes or automated tuning methods could enhance the framework's practical applicability. While our method shows impressive results in locomotion tasks, its generalization to broader",
|
| 1294 |
+
"bbox": [
|
| 1295 |
+
504,
|
| 1296 |
+
734,
|
| 1297 |
+
913,
|
| 1298 |
+
931
|
| 1299 |
+
],
|
| 1300 |
+
"page_idx": 6
|
| 1301 |
+
},
|
| 1302 |
+
{
|
| 1303 |
+
"type": "text",
|
| 1304 |
+
"text": "manipulation tasks or more complex behaviors remains to be explored, opening avenues for future research.",
|
| 1305 |
+
"bbox": [
|
| 1306 |
+
81,
|
| 1307 |
+
71,
|
| 1308 |
+
488,
|
| 1309 |
+
99
|
| 1310 |
+
],
|
| 1311 |
+
"page_idx": 7
|
| 1312 |
+
},
|
| 1313 |
+
{
|
| 1314 |
+
"type": "text",
|
| 1315 |
+
"text": "Despite these limitations, StyleLoco represents a step toward natural and capable humanoid robotics, offering a promising foundation for future research in combining task-oriented control with human-like motion generation.",
|
| 1316 |
+
"bbox": [
|
| 1317 |
+
81,
|
| 1318 |
+
102,
|
| 1319 |
+
488,
|
| 1320 |
+
162
|
| 1321 |
+
],
|
| 1322 |
+
"page_idx": 7
|
| 1323 |
+
},
|
| 1324 |
+
{
|
| 1325 |
+
"type": "text",
|
| 1326 |
+
"text": "REFERENCES",
|
| 1327 |
+
"text_level": 1,
|
| 1328 |
+
"bbox": [
|
| 1329 |
+
238,
|
| 1330 |
+
185,
|
| 1331 |
+
334,
|
| 1332 |
+
199
|
| 1333 |
+
],
|
| 1334 |
+
"page_idx": 7
|
| 1335 |
+
},
|
| 1336 |
+
{
|
| 1337 |
+
"type": "list",
|
| 1338 |
+
"sub_type": "ref_text",
|
| 1339 |
+
"list_items": [
|
| 1340 |
+
"[1] K. Darvish, L. Penco, J. Ramos, R. Cisneros, J. Pratt, E. Yoshida, S. Ivaldi, and D. Pucci, \"Teleoperation of humanoid robots: A survey,\" IEEE Transactions on Robotics, vol. 39, no. 3, pp. 1706-1727, 2023.",
|
| 1341 |
+
"[2] X. B. Peng, Z. Ma, P. Abbeel, S. Levine, and A. Kanazawa, \"Amp: Adversarial motion priors for stylized physics-based character control,\" ACM Transactions on Graphics (ToG), vol. 40, no. 4, pp. 1-20, 2021.",
|
| 1342 |
+
"[3] F. G. Harvey, M. Yurick, D. Nowrouzezahrai, and C. Pal, \"Robust motion in-between,\" vol. 39, no. 4, 2020.",
|
| 1343 |
+
"[4] N. Mahmood, N. Ghorbani, N. F. Troje, G. Pons-Moll, and M. J. Black, “AMASS: Archive of motion capture as surface shapes,” in International Conference on Computer Vision, Oct. 2019, pp. 5442–5451.",
|
| 1344 |
+
"[5] X. Cheng, Y. Ji, J. Chen, R. Yang, G. Yang, and X. Wang, \"Expressive whole-body control for humanoid robots,\" arXiv preprint arXiv:2402.16796, 2024.",
|
| 1345 |
+
"[6] T. Marcucci, M. Gabiccini, and A. Artoni, \"A two-stage trajectory optimization strategy for articulated bodies with unscheduled contact sequences,\" IEEE Robotics and Automation Letters, vol. 2, no. 1, pp. 104-111, 2017.",
|
| 1346 |
+
"[7] G. Romualdi, S. Dafarra, G. L'Erario, I. Sorrentino, S. Traversaro, and D. Pucci, \"Online non-linear centroidal mpc for humanoid robot locomotion with step adjustment,\" in 2022 International Conference on Robotics and Automation (ICRA). IEEE, 2022, pp. 10412-10419.",
|
| 1347 |
+
"[8] J. Englsberger, A. Dietrich, G.-A. Mesesan, G. Garofalo, C. Ott, and A. O. Albu-Schäffer, \"Mptc-modular passive tracking controller for stack of tasks based control frameworks,\" 16th Robotics: Science and Systems, RSS 2020, 2020.",
|
| 1348 |
+
"[9] M. Elobaid, G. Romualdi, G. Nava, L. Rapetti, H. A. O. Mohamed, and D. Pucci, \"Online non-linear centroidal mpc for humanoid robots payload carrying with contact-stable force parametrization,\" in 2023 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2023, pp. 12233-12239.",
|
| 1349 |
+
"[10] Y. Ishiguro, K. Kojima, F. Sugai, S. Nozawa, Y. Kakiuchi, K. Okada, and M. Inaba, \"High speed whole body dynamic motion experiment with real time master-slave humanoid robot system,\" in 2018 IEEE International Conference on Robotics and Automation (ICRA), 2018, pp. 5835-5841.",
|
| 1350 |
+
"[11] Y. Ishiguro, T. Makabe, Y. Nagamatsu, Y. Kojio, K. Kojima, F. Sugai, Y. Kakiuchi, K. Okada, and M. Inaba, \"Bilateral humanoid teleoperation system using whole-body exoskeleton cockpit tablis,\" IEEE Robotics and Automation Letters, vol. 5, no. 4, pp. 6419-6426, 2020.",
|
| 1351 |
+
"[12] J. Ramos and S. Kim, \"Dynamic locomotion synchronization of bipedal robot and human operator via bilateral feedback teleoperation,\" Science Robotics, vol. 4, no. 35, p. eaav4282, 2019.",
|
| 1352 |
+
"[13] K. Ayusawa and E. Yoshida, \"Motion retargeting for humanoid robots based on simultaneous morphing parameter identification and motion optimization,\" IEEE Transactions on Robotics, vol. 33, no. 6, pp. 1343-1357, 2017.",
|
| 1353 |
+
"[14] K. Hu, C. Ott, and D. Lee, \"Online human walking imitation in task and joint space based on quadratic programming,\" in 2014 IEEE International Conference on Robotics and Automation (ICRA), 2014, pp. 3458-3464.",
|
| 1354 |
+
"[15] F.-J. Montecillo-Puente, M. N. Sreenivasa, and J.-P. Laumond, \"On real-time whole-body human to humanoid motion transfer,\" in International Conference on Informatics in Control, Automation and Robotics, 2010. [Online]. Available: https://api(semanticscholar.org/CorpusID:20676844",
|
| 1355 |
+
"[16] K. Yamane, S. O. Anderson, and J. K. Hodgins, “Controlling humanoid robots with human motion data: Experimental validation,” in 2010 10th IEEE-RAS International Conference on Humanoid Robots, 2010, pp. 504–510."
|
| 1356 |
+
],
|
| 1357 |
+
"bbox": [
|
| 1358 |
+
84,
|
| 1359 |
+
214,
|
| 1360 |
+
488,
|
| 1361 |
+
929
|
| 1362 |
+
],
|
| 1363 |
+
"page_idx": 7
|
| 1364 |
+
},
|
| 1365 |
+
{
|
| 1366 |
+
"type": "list",
|
| 1367 |
+
"sub_type": "ref_text",
|
| 1368 |
+
"list_items": [
|
| 1369 |
+
"[17] A. Di Fava, K. Bouyarmane, K. Chappellet, E. Ruffaldi, and A. Kheddar, “Multi-contact motion retargeting from human to humanoid robot,” in 2016 IEEE-RAS 16th International Conference on Humanoid Robots (Humanoids), 2016, pp. 1081–1086.",
|
| 1370 |
+
"[18] K. Otani and K. Bouyarmane, \"Adaptive whole-body manipulation in human-to-humanoid multi-contact motion retargeting,\" in 2017 IEEE-RAS 17th International Conference on Humanoid Robotics (Humanoids), 2017, pp. 446-453.",
|
| 1371 |
+
"[19] L. Penco, B. Clement, V. Modugno, E. Mingo Hoffman, G. Nava, D. Pucci, N. G. Tsagarakis, J. B. Mouret, and S. Ivaldi, \"Robust real-time whole-body motion retargeting from human to humanoid,\" in 2018 IEEE-RAS 18th International Conference on Humanoid Robots (Humanoids), 2018, pp. 425-432.",
|
| 1372 |
+
"[20] J. Koenemann, F. Burget, and M. Bennewitz, “Real-time imitation of human whole-body motions by humanoids,” in 2014 IEEE International Conference on Robotics and Automation (ICRA), 2014, pp. 2806–2812.",
|
| 1373 |
+
"[21] O. E. Ramos, N. Mansard, O. Stasse, C. Benazeth, S. Hak, and L. Saab, \"Dancing humanoid robots: Systematic use of osid to compute dynamically consistent movements following a motion capture pattern,\" IEEE Robotics and Automation Magazine, vol. 22, no. 4, pp. 16-26, 2015.",
|
| 1374 |
+
"[22] L. Penco, K. Momose, S. McCrory, D. Anderson, N. Kitchel, D. Calvert, and R. J. Griffin, \"Mixed reality teleoperation assistance for direct control of humanoids,\" IEEE Robotics and Automation Letters, vol. 9, no. 2, pp. 1937-1944, 2024.",
|
| 1375 |
+
"[23] Z. Li, X. B. Peng, P. Abbeel, S. Levine, G. Berseth, and K. Sreenath, \"Reinforcement learning for versatile, dynamic, and robust bipedal locomotion control,\" The International Journal of Robotics Research, p. 02783649241285161, 2024.",
|
| 1376 |
+
"[24] Z. Fu, A. Kumar, J. Malik, and D. Pathak, \"Minimizing energy consumption leads to the emergence of gaits in legged robots,\" in 5th Annual Conference on Robot Learning.",
|
| 1377 |
+
"[25] J. Ho and S. Ermon, \"Generative adversarial imitation learning,\" Advances in neural information processing systems, vol. 29, 2016.",
|
| 1378 |
+
"[26] X. Huang, Y. Chi, R. Wang, Z. Li, X. B. Peng, S. Shao, B. Nikolic, and K. Sreenath, \"Diffuseloco: Real-time legged locomotion control with diffusion from offline datasets,\" 2024. [Online]. Available: https://arxiv.org/abs/2404.19264",
|
| 1379 |
+
"[27] B. Jia and D. Manocha, \"Sim-to-real robotic sketching using behavior cloning and reinforcement learning,\" in 2024 IEEE International Conference on Robotics and Automation (ICRA), 2024, pp. 18272-18278.",
|
| 1380 |
+
"[28] S. Ross and D. Bagnell, \"Efficient reductions for imitation learning,\" in Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics, ser. Proceedings of Machine Learning Research, Y. W. Teh and M. Titterington, Eds., vol. 9. Chia Laguna Resort, Sardinia, Italy: PMLR, 13-15 May 2010, pp. 661-668. [Online]. Available: https://proceedings.mlr.press/v9/ross10a.html",
|
| 1381 |
+
"[29] S. Ross, G. Gordon, and D. Bagnell, “A reduction of imitation learning and structured prediction to no-regret online learning,” in Proceedings of the fourteenth international conference on artificial intelligence and statistics. JMLR Workshop and Conference Proceedings, 2011, pp. 627–635.",
|
| 1382 |
+
"[30] M. Ji, X. Peng, F. Liu, J. Li, G. Yang, X. Cheng, and X. Wang, \"Exbody2: Advanced expressive humanoid whole-body control,\" arXiv preprint arXiv:2412.13196, 2024.",
|
| 1383 |
+
"[31] T. He, Z. Luo, W. Xiao, C. Zhang, K. Kitani, C. Liu, and G. Shi, \"Learning human-to-humanoid real-time whole-body teleoperation,\" arXiv preprint arXiv:2403.04436, 2024.",
|
| 1384 |
+
"[32] T. He, W. Xiao, T. Lin, Z. Luo, Z. Xu, Z. Jiang, C. Liu, G. Shi, X. Wang, L. Fan, and Y. Zhu, \"Hover: Versatile neural whole-body controller for humanoid robots,\" arXiv preprint arXiv:2410.21229, 2024.",
|
| 1385 |
+
"[33] T. He, Z. Luo, X. He, W. Xiao, C. Zhang, W. Zhang, K. Kitani, C. Liu, and G. Shi, “Omnih2o: Universal and dexterous human-to-humanoid whole-body teleoperation and learning,” arXiv preprint arXiv:2406.08858, 2024.",
|
| 1386 |
+
"[34] X. Gu, Y.-J. Wang, X. Zhu, C. Shi, Y. Guo, Y. Liu, and J. Chen, “Advancing humanoid locomotion: Mastering challenging terrains with denoising world model learning,” arXiv preprint arXiv:2408.14472, 2024.",
|
| 1387 |
+
"[35] X. Mao, Q. Li, H. Xie, R. Y. Lau, Z. Wang, and S. Paul Smolley, \"Least squares generative adversarial networks,\" in Proceedings of the IEEE international conference on computer vision, 2017, pp. 2794-2802."
|
| 1388 |
+
],
|
| 1389 |
+
"bbox": [
|
| 1390 |
+
509,
|
| 1391 |
+
71,
|
| 1392 |
+
911,
|
| 1393 |
+
931
|
| 1394 |
+
],
|
| 1395 |
+
"page_idx": 7
|
| 1396 |
+
},
|
| 1397 |
+
{
|
| 1398 |
+
"type": "list",
|
| 1399 |
+
"sub_type": "ref_text",
|
| 1400 |
+
"list_items": [
|
| 1401 |
+
"[36] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov, \"Proximal policy optimization algorithms,\" arXiv preprint arXiv:1707.06347, 2017.",
|
| 1402 |
+
"[37] Rerun Development Team, \"Rerun: A visualizationsdk for multimodal data,\" Online, 2024, available from https://www. rerun.io/ and https://github.com/rerun-io/rerun. [Online]. Available: https://www. rerun.io",
|
| 1403 |
+
"[38] V. Makoviychuk, L. Wawrzyniak, Y. Guo, M. Lu, K. Storey, M. Macklin, D. Hoeller, N. Rudin, A. Allshire, A. Handa, and G. State, \"Isaac gym: High performancegpu-based physics simulation for robot learning,\" 2021. [Online]. Available: https://arxiv.org/abs/2108.10470"
|
| 1404 |
+
],
|
| 1405 |
+
"bbox": [
|
| 1406 |
+
84,
|
| 1407 |
+
71,
|
| 1408 |
+
488,
|
| 1409 |
+
198
|
| 1410 |
+
],
|
| 1411 |
+
"page_idx": 8
|
| 1412 |
+
}
|
| 1413 |
+
]
|
data/2025/2503_15xxx/2503.15082/9abe67bb-bb04-4404-ba77-d9bdbc419145_model.json
ADDED
|
@@ -0,0 +1,2066 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "aside_text",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.023,
|
| 7 |
+
0.261,
|
| 8 |
+
0.058,
|
| 9 |
+
0.707
|
| 10 |
+
],
|
| 11 |
+
"angle": 270,
|
| 12 |
+
"content": "arXiv:2503.15082v1 [cs.RO] 19 Mar 2025"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "title",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.22,
|
| 18 |
+
0.094,
|
| 19 |
+
0.78,
|
| 20 |
+
0.14
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "StyleLoco: Generative Adversarial Distillation for Natural Humanoid Robot Locomotion"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.147,
|
| 29 |
+
0.178,
|
| 30 |
+
0.868,
|
| 31 |
+
0.196
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "Le Ma\\(^{1*}\\), Ziyu Meng\\(^{1,2*}\\), Tengyu Liu\\(^{1}\\), Yuhan Li\\(^{1,3}\\), Ran Song\\(^{2}\\), Wei Zhang\\(^{2}\\), Siyuan Huang\\(^{1, \\boxtimes}\\)"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.086,
|
| 40 |
+
0.199,
|
| 41 |
+
0.929,
|
| 42 |
+
0.214
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "\\(^{1}\\) National Key Laboratory of General Artificial Intelligence, BIGAI \\(^{2}\\) School of Control Science and Engineering, Shandong University"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.175,
|
| 51 |
+
0.214,
|
| 52 |
+
0.835,
|
| 53 |
+
0.228
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "<sup>3</sup> Huazhong University of Science and Technology *Equal contributors huangsiyuan@bigai.ai"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.426,
|
| 62 |
+
0.235,
|
| 63 |
+
0.584,
|
| 64 |
+
0.248
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "https://styleloco.github.io/"
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.085,
|
| 73 |
+
0.269,
|
| 74 |
+
0.49,
|
| 75 |
+
0.622
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "Abstract—Humanoid robots are anticipated to acquire a wide range of locomotion capabilities while ensuring natural movement across varying speeds and terrains. Existing methods encounter a fundamental dilemma in learning humanoid locomotion: reinforcement learning with handcrafted rewards can achieve agile locomotion but produces unnatural gaits, while Generative Adversarial Imitation Learning (GAIL) with motion capture data yields natural movements but suffers from unstable training processes and restricted agility. Integrating these approaches proves challenging due to the inherent heterogeneity between expert policies and human motion datasets. To address this, we introduce StyleLoco, a novel two-stage framework that bridges this gap through a Generative Adversarial Distillation (GAD) process. Our framework begins by training a teacher policy using reinforcement learning to achieve agile and dynamic locomotion. It then employs a multi-discriminator architecture, where distinct discriminators concurrently extract skills from both the teacher policy and motion capture data. This approach effectively combines the agility of reinforcement learning with the natural fluidity of human-like movements while mitigating the instability issues commonly associated with adversarial training. Through extensive simulation and real-world experiments, we demonstrate that StyleLoco enables humanoid robots to perform diverse locomotion tasks with the precision of expertly trained policies and the natural aesthetics of human motion, successfully transferring styles across different movement types while maintaining stable locomotion across a broad spectrum of command inputs."
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "title",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.21,
|
| 84 |
+
0.637,
|
| 85 |
+
0.364,
|
| 86 |
+
0.65
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "I. INTRODUCTION"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.082,
|
| 95 |
+
0.659,
|
| 96 |
+
0.49,
|
| 97 |
+
0.81
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "Natural and agile locomotion in humanoid robots represents a fundamental challenge in robotics, with far-reaching implications for human-robot interaction, disaster response, and industrial applications. While humanoid robots offer unprecedented potential for operating in human-centric environments, achieving human-like movement patterns remains difficult due to their high degrees of freedom and inherently unstable dynamics[1]. This challenge is further complicated by the fundamental trade-off between achieving precise control and maintaining natural motion qualities."
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.082,
|
| 106 |
+
0.811,
|
| 107 |
+
0.49,
|
| 108 |
+
0.932
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "Reinforcement learning (RL) has emerged as a powerful approach for developing locomotion controllers, enabling robots to master complex movements through carefully designed reward functions. These methods often employ a two-stage learning process: first training a teacher policy that relies on privileged information (such as global positions and ground truth environmental parameters) unavailable in real-world settings, then distilling this knowledge into a student"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "image",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.51,
|
| 117 |
+
0.263,
|
| 118 |
+
0.913,
|
| 119 |
+
0.442
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": null
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "image_caption",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.506,
|
| 128 |
+
0.454,
|
| 129 |
+
0.913,
|
| 130 |
+
0.478
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "Fig. 1. Gait pattern transitions during forward velocity \\((v_{x})\\) acceleration from \\(0.7\\mathrm{m / s}\\) to \\(1.8\\mathrm{m / s}\\)"
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.505,
|
| 139 |
+
0.507,
|
| 140 |
+
0.913,
|
| 141 |
+
0.656
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "policy that operates solely on realistic sensor observations. While this approach has demonstrated impressive results in terms of agility and precision, it faces two key limitations. First, the reliance on handcrafted rewards requires extensive tuning to accommodate different gaits, stride lengths, and motion parameters across varying speeds. Second, these methods often result in rigid, mechanical movements that lack the fluidity and naturalness characteristic of human motion, limiting their effectiveness in human-centric environments."
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"bbox": [
|
| 149 |
+
0.505,
|
| 150 |
+
0.659,
|
| 151 |
+
0.914,
|
| 152 |
+
0.914
|
| 153 |
+
],
|
| 154 |
+
"angle": 0,
|
| 155 |
+
"content": "Recent advances in generative adversarial imitation learning, particularly approaches like Adversarial Motion Prior (AMP) [2], have opened new possibilities for achieving more natural robot movements by leveraging large-scale motion capture datasets such as LaFAN1 [3] and AMASS [4]. These methods employ adversarial training to ensure that robot movements closely match the statistical patterns present in human demonstrations [5]. However, their performance is fundamentally limited by the content and quality of the reference motion data. For instance, learning running behaviors becomes impossible with a dataset containing only walking motions, and acquiring diverse specialized skills often requires expensive motion capture sessions. Furthermore, these methods struggle when motion datasets lack diversity or when retargeting processes introduce artifacts, resulting in brittle behaviors that fail to generalize beyond demonstrated movements."
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "text",
|
| 159 |
+
"bbox": [
|
| 160 |
+
0.523,
|
| 161 |
+
0.917,
|
| 162 |
+
0.913,
|
| 163 |
+
0.932
|
| 164 |
+
],
|
| 165 |
+
"angle": 0,
|
| 166 |
+
"content": "The limitations of both approaches highlight a critical gap"
|
| 167 |
+
}
|
| 168 |
+
],
|
| 169 |
+
[
|
| 170 |
+
{
|
| 171 |
+
"type": "text",
|
| 172 |
+
"bbox": [
|
| 173 |
+
0.087,
|
| 174 |
+
0.072,
|
| 175 |
+
0.487,
|
| 176 |
+
0.266
|
| 177 |
+
],
|
| 178 |
+
"angle": 0,
|
| 179 |
+
"content": "in humanoid locomotion: the need to combine the precision and adaptability of RL-based controllers with the natural movement qualities captured in human demonstrations. While RL methods can learn complex skills beyond available motion capture data, they struggle with natural movement generation. Conversely, demonstration-based methods excel at producing natural movements but are constrained by the available motion capture data. This complementary nature suggests the potential for combining both approaches, yet traditional methods struggle to bridge this gap due to the fundamental heterogeneity between expert policies trained with handcrafted rewards and the statistical patterns present in human motion datasets."
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "text",
|
| 183 |
+
"bbox": [
|
| 184 |
+
0.087,
|
| 185 |
+
0.268,
|
| 186 |
+
0.487,
|
| 187 |
+
0.494
|
| 188 |
+
],
|
| 189 |
+
"angle": 0,
|
| 190 |
+
"content": "We address these challenges with StyleLoco, introducing a novel Generative Adversarial Distillation (GAD) framework that effectively combines knowledge from heterogeneous sources. Our approach employs a multi-discriminator architecture where separate discriminators simultaneously distill skills from both an RL-trained expert policy and motion capture demonstrations. This design allows the model to preserve the agility and precision of RL while incorporating the natural style of human movements, enabling natural skill execution even for behaviors not present in the motion capture data. Through extensive evaluations in both simulated and real-world environments, we demonstrate that StyleLoco enables humanoid robots to achieve superior locomotion performance compared to traditional approaches while maintaining natural, human-like movement qualities."
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "text",
|
| 194 |
+
"bbox": [
|
| 195 |
+
0.103,
|
| 196 |
+
0.496,
|
| 197 |
+
0.42,
|
| 198 |
+
0.509
|
| 199 |
+
],
|
| 200 |
+
"angle": 0,
|
| 201 |
+
"content": "The key contribution of our work is three-fold."
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "text",
|
| 205 |
+
"bbox": [
|
| 206 |
+
0.104,
|
| 207 |
+
0.514,
|
| 208 |
+
0.487,
|
| 209 |
+
0.573
|
| 210 |
+
],
|
| 211 |
+
"angle": 0,
|
| 212 |
+
"content": "- A novel GAD framework that enables stable policy distillation from heterogeneous sources, effectively bridging the gap between RL and demonstration-based approaches."
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"type": "text",
|
| 216 |
+
"bbox": [
|
| 217 |
+
0.104,
|
| 218 |
+
0.575,
|
| 219 |
+
0.487,
|
| 220 |
+
0.634
|
| 221 |
+
],
|
| 222 |
+
"angle": 0,
|
| 223 |
+
"content": "- A multi-discriminator architecture that successfully combines task-oriented control objectives with natural motion patterns, achieving both high performance and human-like movement qualities."
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "text",
|
| 227 |
+
"bbox": [
|
| 228 |
+
0.104,
|
| 229 |
+
0.635,
|
| 230 |
+
0.487,
|
| 231 |
+
0.694
|
| 232 |
+
],
|
| 233 |
+
"angle": 0,
|
| 234 |
+
"content": "- Comprehensive validation through real-world deployment on the Unitree H1 humanoid robot, demonstrating robust and natural motion across diverse locomotion tasks and speeds."
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "list",
|
| 238 |
+
"bbox": [
|
| 239 |
+
0.104,
|
| 240 |
+
0.514,
|
| 241 |
+
0.487,
|
| 242 |
+
0.694
|
| 243 |
+
],
|
| 244 |
+
"angle": 0,
|
| 245 |
+
"content": null
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "title",
|
| 249 |
+
"bbox": [
|
| 250 |
+
0.202,
|
| 251 |
+
0.709,
|
| 252 |
+
0.372,
|
| 253 |
+
0.721
|
| 254 |
+
],
|
| 255 |
+
"angle": 0,
|
| 256 |
+
"content": "II. RELATED WORKS"
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "title",
|
| 260 |
+
"bbox": [
|
| 261 |
+
0.087,
|
| 262 |
+
0.731,
|
| 263 |
+
0.307,
|
| 264 |
+
0.744
|
| 265 |
+
],
|
| 266 |
+
"angle": 0,
|
| 267 |
+
"content": "A. Humanoid Robot Locomotion"
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"bbox": [
|
| 272 |
+
0.087,
|
| 273 |
+
0.752,
|
| 274 |
+
0.487,
|
| 275 |
+
0.93
|
| 276 |
+
],
|
| 277 |
+
"angle": 0,
|
| 278 |
+
"content": "Locomotion is a critical aspect in the motion control in humanoid robots. Traditional methods typically achieve stable movement by formulating the robot's dynamics model as constrained trajectory optimization problems [6]. Model Predictive Control (MPC) [7], [8], [9] is then employed in real-time to adjust and execute this trajectory, enabling adaption to dynamic environmental changes. However, these model-based methods usually rely heavily on precise modeling of robot dynamic properties [10], [11], [12], [13], [14] and environmental conditions [15], [16], [17], [18], [12], [19], [20], [21], [22], which leads to vulnerabilities in real-world performance, especially when there is a substantial"
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "text",
|
| 282 |
+
"bbox": [
|
| 283 |
+
0.511,
|
| 284 |
+
0.072,
|
| 285 |
+
0.912,
|
| 286 |
+
0.162
|
| 287 |
+
],
|
| 288 |
+
"angle": 0,
|
| 289 |
+
"content": "discrepancy between the applied environments and the predefined conditions [23]. Thus, the optimization problem for humanoid robots is slow to resolve due to the complexity of high-dimensional state and action spaces, rendering it challenging to satisfy the demands for real-time performance and stability."
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "text",
|
| 293 |
+
"bbox": [
|
| 294 |
+
0.511,
|
| 295 |
+
0.164,
|
| 296 |
+
0.912,
|
| 297 |
+
0.404
|
| 298 |
+
],
|
| 299 |
+
"angle": 0,
|
| 300 |
+
"content": "Recently, reinforcement learning (RL) has emerged as a promising paradigm for humanoid locomotion tasks. These methods design tailored reward functions to guide \"try and error\" feedback-based learning process. For instance, reward functions are often crafted to encourage stable walking, minimize energy consumption, or optimize trajectory tracking [24]. However, designing effective reward functions is non-trivial and often requires extensive domain expertise especially for particular locomotion gaits. Natural locomotion motions require different gaits for varying movement speeds, making the design of the reward function even more challenging. Moreover, the numerous rewards terms must strike a delicate balance between competing objectives. To alleviate these drawbacks, we incorporate diverse reference locomotion motions as style guidance to simplify the reward components and encourage the policy learn versatile gaits."
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "title",
|
| 304 |
+
"bbox": [
|
| 305 |
+
0.511,
|
| 306 |
+
0.426,
|
| 307 |
+
0.836,
|
| 308 |
+
0.438
|
| 309 |
+
],
|
| 310 |
+
"angle": 0,
|
| 311 |
+
"content": "B. Imitation Learning for Humanoid locomotion"
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"bbox": [
|
| 316 |
+
0.511,
|
| 317 |
+
0.448,
|
| 318 |
+
0.912,
|
| 319 |
+
0.719
|
| 320 |
+
],
|
| 321 |
+
"angle": 0,
|
| 322 |
+
"content": "The fundamental challenges in learning high-dimensional, underactuated robotic systems include precise task specification and effective exploration. Imitation learning (IL) is a method that learns from expert demonstrations, effectively addressing challenges related to quantifying rewards. Unlike pure reinforcement learning, IL can directly leverage offline expert data to guide policy learning, significantly reducing the exploration space and obtaining dense rewards. This approach is particularly effective in real-world robotics and complex task scenarios. Typically, it involves directly following reference trajectories through motion tracking. Generative Adversarial Imitation Learning (GAIL) [25] has been applied to locomotion tasks. The traditional imitation learning method, as mentioned above, is limited in flexibility—it can only replicate reference trajectories and cannot adapt to downstream tasks. To address this limitation, AMP [2] introduces the concept of learning the style from reference motion as a constraint, guiding the policy learning process."
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "text",
|
| 326 |
+
"bbox": [
|
| 327 |
+
0.511,
|
| 328 |
+
0.721,
|
| 329 |
+
0.912,
|
| 330 |
+
0.93
|
| 331 |
+
],
|
| 332 |
+
"angle": 0,
|
| 333 |
+
"content": "However, this paradigm heavily relies on expert demonstrations, and its performance can significantly degrade when the quality of demonstrations is poor or when the task changes. Since IL strategies are directly derived from the demonstrations, they are prone to overfitting to the demonstration data. As a result, when faced with novel situations, IL may lack sufficient generalization ability. Furthermore, due to the morphological differences between humanoid robots and humans, obtaining high-quality reference data proves challenging, resulting in datasets that can only encompass a limited range of instructions. This scarcity of data can compromise the stability of Generative Adversarial Imitation Learning (GAIL), leading to mode collapse. To mitigate these challenges, we supplement the expert policy as a"
|
| 334 |
+
}
|
| 335 |
+
],
|
| 336 |
+
[
|
| 337 |
+
{
|
| 338 |
+
"type": "text",
|
| 339 |
+
"bbox": [
|
| 340 |
+
0.083,
|
| 341 |
+
0.071,
|
| 342 |
+
0.49,
|
| 343 |
+
0.103
|
| 344 |
+
],
|
| 345 |
+
"angle": 0,
|
| 346 |
+
"content": "reference motion, providing additional motion references to achieve a stable omnidirectional movement strategy."
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"type": "title",
|
| 350 |
+
"bbox": [
|
| 351 |
+
0.084,
|
| 352 |
+
0.116,
|
| 353 |
+
0.314,
|
| 354 |
+
0.132
|
| 355 |
+
],
|
| 356 |
+
"angle": 0,
|
| 357 |
+
"content": "C. Deployable Policy Distillation"
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"type": "text",
|
| 361 |
+
"bbox": [
|
| 362 |
+
0.082,
|
| 363 |
+
0.137,
|
| 364 |
+
0.49,
|
| 365 |
+
0.272
|
| 366 |
+
],
|
| 367 |
+
"angle": 0,
|
| 368 |
+
"content": "In robotic locomotion control, distillation is a method that transfers knowledge from teacher policies with privileged information (e.g., full-state dynamics, simulated ground-truth forces, or ideal state estimators) to student policies for real-world deployment. This knowledge transfer enables the student to leverage the teacher's expertise while operating under real-world constraints, such as partial observation or limited sensory inputs. There are two main approaches to distillation:"
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"type": "text",
|
| 372 |
+
"bbox": [
|
| 373 |
+
0.082,
|
| 374 |
+
0.274,
|
| 375 |
+
0.49,
|
| 376 |
+
0.47
|
| 377 |
+
],
|
| 378 |
+
"angle": 0,
|
| 379 |
+
"content": "BC methods[26], [27] learn by mimicking the teacher's actions using supervised learning on state-action pairs. BC achieves effective performance when the student operates within the teacher's training distribution, as it directly replicates the teacher's behavior under familiar conditions. However, its performance degrades sharply with \"compounding error\" [28] in out-of-distribution (OOD) scenarios (e.g., environmental perturbations, actuator noise, or unseen terrains), as BC inherently lacks the capacity to self-correct deviations from the teacher's demonstration space. This limitation arises because BC relies solely on static datasets of teacher demonstrations, without mechanisms to adapt to novel or unexpected situations."
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"type": "text",
|
| 383 |
+
"bbox": [
|
| 384 |
+
0.082,
|
| 385 |
+
0.471,
|
| 386 |
+
0.49,
|
| 387 |
+
0.743
|
| 388 |
+
],
|
| 389 |
+
"angle": 0,
|
| 390 |
+
"content": "Another popular approach is online distillation via Dataset Aggregation (DAgger) [29], which addresses BC's limitations by iteratively aggregating student-generated trajectories with teacher-corrected actions. Recently, DAgger and its derivative strategies have stood out as a promising distillation approach for humanoid robot [30], [31], [32], [33] to acquire deployable policies. During training, the student policy interacts with the environment, while the teacher provides corrective feedback on the student's actions, enabling the student to refine its policy over multiple iterations. This interactive process mitigates distributional shift and improves robustness to OOD scenarios. However, DAgger still faces a fundamental challenge: the student lacks access to the teacher's privileged information (e.g., simulated contact forces, ideal state estimators, or full-state dynamics). As a result, under partial observation or incomplete environmental feedback, the student struggles to fully replicate the teacher's actions. [24]"
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"type": "title",
|
| 394 |
+
"bbox": [
|
| 395 |
+
0.231,
|
| 396 |
+
0.758,
|
| 397 |
+
0.343,
|
| 398 |
+
0.772
|
| 399 |
+
],
|
| 400 |
+
"angle": 0,
|
| 401 |
+
"content": "III. METHOD"
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"type": "text",
|
| 405 |
+
"bbox": [
|
| 406 |
+
0.082,
|
| 407 |
+
0.781,
|
| 408 |
+
0.491,
|
| 409 |
+
0.934
|
| 410 |
+
],
|
| 411 |
+
"angle": 0,
|
| 412 |
+
"content": "StyleLoco is a novel approach for learning deployable natural locomotion skills that effectively combines the precision of RL-based controllers with the naturalness of human demonstrations. At its core, StyleLoco employs our proposed Generative Adversarial Distillation (GAD) framework, which uses a unique double-discriminator architecture to distill knowledge from both an RL-trained teacher policy and human motion demonstrations into a deployable student policy. Through adversarial learning, our approach generates naturalistic motions beyond the constraints of available"
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"type": "text",
|
| 416 |
+
"bbox": [
|
| 417 |
+
0.505,
|
| 418 |
+
0.071,
|
| 419 |
+
0.913,
|
| 420 |
+
0.101
|
| 421 |
+
],
|
| 422 |
+
"angle": 0,
|
| 423 |
+
"content": "motion capture data while avoiding the artificial behaviors typically resulting from hand-crafted rewards."
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"type": "text",
|
| 427 |
+
"bbox": [
|
| 428 |
+
0.505,
|
| 429 |
+
0.102,
|
| 430 |
+
0.914,
|
| 431 |
+
0.252
|
| 432 |
+
],
|
| 433 |
+
"angle": 0,
|
| 434 |
+
"content": "StyleLoco consists of three key components: (1) a teacher policy trained with privileged information to achieve robust omnidirectional locomotion, (2) a motion dataset containing natural human movements, and (3) our novel GAD framework that combines these sources to train a deployable student policy. The framework's innovation lies in its ability to generate natural behaviors beyond what either source can achieve alone - overcoming both the limited coverage of motion datasets and the unnatural movements that emerge from pure RL training."
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"type": "text",
|
| 438 |
+
"bbox": [
|
| 439 |
+
0.505,
|
| 440 |
+
0.253,
|
| 441 |
+
0.913,
|
| 442 |
+
0.435
|
| 443 |
+
],
|
| 444 |
+
"angle": 0,
|
| 445 |
+
"content": "To achieve this, StyleLoco employs two discriminators that work in concert to adversarially shape the student policy's behavior. One discriminator ensures the policy can replicate the robust performance of the teacher, while the other maintains consistency with natural human motion patterns. This dual-discriminator approach simultaneously serves two purposes: expanding the range of natural behaviors beyond the demonstration data, and distilling the teacher's capabilities into a deployable policy. The resulting system produces controllers that are both highly capable and naturally moving, without being constrained to demonstrated behaviors or exhibiting artifacts from hand-crafted rewards."
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"type": "title",
|
| 449 |
+
"bbox": [
|
| 450 |
+
0.506,
|
| 451 |
+
0.442,
|
| 452 |
+
0.625,
|
| 453 |
+
0.456
|
| 454 |
+
],
|
| 455 |
+
"angle": 0,
|
| 456 |
+
"content": "A. Preliminaries"
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "text",
|
| 460 |
+
"bbox": [
|
| 461 |
+
0.505,
|
| 462 |
+
0.461,
|
| 463 |
+
0.914,
|
| 464 |
+
0.597
|
| 465 |
+
],
|
| 466 |
+
"angle": 0,
|
| 467 |
+
"content": "1) Reinforcement Learning: We formulate humanoid locomotion control as a Partially Observable Markov Decision Process (POMDP) defined by tuple \\(\\langle S, \\mathcal{A}, T, \\mathcal{O}, R, \\gamma \\rangle\\), where \\(\\mathcal{S}\\) represents the full state space, \\(\\mathcal{O}\\) denotes partial observations available to the robot, \\(\\mathcal{A}\\) is the action space, \\(T(s'|s, a)\\) describes state transitions, \\(R(s, a)\\) defines the reward function, and \\(\\gamma \\in (0, 1]\\) is the discount factor. The goal is to learn a policy \\(\\pi(a|o)\\) that maximizes expected discounted returns while operating only on partial observations \\(o \\in \\mathcal{O}\\)."
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "text",
|
| 471 |
+
"bbox": [
|
| 472 |
+
0.505,
|
| 473 |
+
0.598,
|
| 474 |
+
0.914,
|
| 475 |
+
0.658
|
| 476 |
+
],
|
| 477 |
+
"angle": 0,
|
| 478 |
+
"content": "The locomotion task requires tracking commanded velocities \\( v^{*} = (v_{x}^{*}, v_{y}^{*}, \\omega_{z}^{*}) \\), where \\( (v_{x}^{*}, v_{y}^{*}) \\) specify desired linear velocities in local coordinate frame and \\( \\omega_{z}^{*} \\) defines the desired yaw rate. Following [34], we use the reward function:"
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"type": "equation",
|
| 482 |
+
"bbox": [
|
| 483 |
+
0.61,
|
| 484 |
+
0.664,
|
| 485 |
+
0.81,
|
| 486 |
+
0.682
|
| 487 |
+
],
|
| 488 |
+
"angle": 0,
|
| 489 |
+
"content": "\\[\nr _ {\\text {t a s k}} (e, \\lambda) := \\exp (- \\lambda \\cdot \\| e \\| ^ {2})\n\\]"
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "text",
|
| 493 |
+
"bbox": [
|
| 494 |
+
0.505,
|
| 495 |
+
0.689,
|
| 496 |
+
0.913,
|
| 497 |
+
0.719
|
| 498 |
+
],
|
| 499 |
+
"angle": 0,
|
| 500 |
+
"content": "where \\( e \\) represents tracking errors and \\( \\lambda \\) controls their relative importance."
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"type": "text",
|
| 504 |
+
"bbox": [
|
| 505 |
+
0.505,
|
| 506 |
+
0.72,
|
| 507 |
+
0.914,
|
| 508 |
+
0.87
|
| 509 |
+
],
|
| 510 |
+
"angle": 0,
|
| 511 |
+
"content": "2) Generative Adversarial Imitation Learning: Generative Adversarial Imitation Learning (GAIL) learns to mimic expert behavior through adversarial training. Given a dataset of expert demonstrations \\(\\mathcal{M} = (s_i, a_i)\\) consisting of state-action pairs, GAIL trains a policy \\(\\pi(a|s)\\) that generates actions \\(a'\\) for given states \\(s'\\). A discriminator network \\(\\mathcal{D}\\) is employed to distinguish between state-action pairs \\((s, a)\\) from the expert demonstrations and those produced by the policy \\(\\pi\\). The reward function used to train the policy is then given by:"
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"type": "equation",
|
| 515 |
+
"bbox": [
|
| 516 |
+
0.597,
|
| 517 |
+
0.878,
|
| 518 |
+
0.824,
|
| 519 |
+
0.895
|
| 520 |
+
],
|
| 521 |
+
"angle": 0,
|
| 522 |
+
"content": "\\[\nr _ {\\mathrm {G A I L}} (s, a) = - \\log \\left(1 - \\mathcal {D} (s, a)\\right)\n\\]"
|
| 523 |
+
},
|
| 524 |
+
{
|
| 525 |
+
"type": "text",
|
| 526 |
+
"bbox": [
|
| 527 |
+
0.506,
|
| 528 |
+
0.902,
|
| 529 |
+
0.914,
|
| 530 |
+
0.933
|
| 531 |
+
],
|
| 532 |
+
"angle": 0,
|
| 533 |
+
"content": "Adversarial Motion Prior (AMP) [2] extends this framework to handle settings where only state information is"
|
| 534 |
+
}
|
| 535 |
+
],
|
| 536 |
+
[
|
| 537 |
+
{
|
| 538 |
+
"type": "image",
|
| 539 |
+
"bbox": [
|
| 540 |
+
0.092,
|
| 541 |
+
0.074,
|
| 542 |
+
0.482,
|
| 543 |
+
0.277
|
| 544 |
+
],
|
| 545 |
+
"angle": 0,
|
| 546 |
+
"content": null
|
| 547 |
+
},
|
| 548 |
+
{
|
| 549 |
+
"type": "image_caption",
|
| 550 |
+
"bbox": [
|
| 551 |
+
0.082,
|
| 552 |
+
0.294,
|
| 553 |
+
0.49,
|
| 554 |
+
0.342
|
| 555 |
+
],
|
| 556 |
+
"angle": 0,
|
| 557 |
+
"content": "Fig. 2. Overview of the proposed Generative Adversarial Distillation (GAD) framework. Two discriminators separately evaluate the similarity of generated motions against a teacher policy and reference motion dataset, enabling the synthesis of natural and adaptive behaviors."
|
| 558 |
+
},
|
| 559 |
+
{
|
| 560 |
+
"type": "text",
|
| 561 |
+
"bbox": [
|
| 562 |
+
0.082,
|
| 563 |
+
0.368,
|
| 564 |
+
0.489,
|
| 565 |
+
0.474
|
| 566 |
+
],
|
| 567 |
+
"angle": 0,
|
| 568 |
+
"content": "available in the demonstrations. Instead of operating on state-action pairs, AMP's discriminator evaluates state transitions \\((s,s^{\\prime})\\), enabling imitation learning from state-only demonstrations. Additionally, AMP employs a least-squares discriminator [35], replacing the traditional binary cross-entropy loss, which has been empirically shown to provide more stable adversarial training dynamics."
|
| 569 |
+
},
|
| 570 |
+
{
|
| 571 |
+
"type": "title",
|
| 572 |
+
"bbox": [
|
| 573 |
+
0.084,
|
| 574 |
+
0.482,
|
| 575 |
+
0.347,
|
| 576 |
+
0.495
|
| 577 |
+
],
|
| 578 |
+
"angle": 0,
|
| 579 |
+
"content": "B. Generative Adversarial Distillation"
|
| 580 |
+
},
|
| 581 |
+
{
|
| 582 |
+
"type": "text",
|
| 583 |
+
"bbox": [
|
| 584 |
+
0.082,
|
| 585 |
+
0.501,
|
| 586 |
+
0.489,
|
| 587 |
+
0.636
|
| 588 |
+
],
|
| 589 |
+
"angle": 0,
|
| 590 |
+
"content": "The core innovation of StyleLoco is our GAD framework, which synthesizes natural and adaptive behaviors from two complementary sources: a well-trained teacher policy and a reference motion dataset. As illustrated in Fig. 2, GAD trains a student policy \\(\\pi_{\\mathrm{student}}\\) alongside two AMP-style discriminators, \\(\\mathcal{D}_{\\mathrm{teacher}}\\) and \\(\\mathcal{D}_{\\mathrm{dataset}}\\). Each discriminator evaluates the student's generated state transitions against one source of reference motions: either the teacher policy or the motion dataset."
|
| 591 |
+
},
|
| 592 |
+
{
|
| 593 |
+
"type": "text",
|
| 594 |
+
"bbox": [
|
| 595 |
+
0.082,
|
| 596 |
+
0.637,
|
| 597 |
+
0.489,
|
| 598 |
+
0.727
|
| 599 |
+
],
|
| 600 |
+
"angle": 0,
|
| 601 |
+
"content": "Training proceeds in an interleaving manner, alternating between updating the student policy and the discriminators. In each iteration, we first update the student policy using the combined feedback from both discriminators and then train both discriminators to better distinguish between the student's outputs and their respective reference motions."
|
| 602 |
+
},
|
| 603 |
+
{
|
| 604 |
+
"type": "text",
|
| 605 |
+
"bbox": [
|
| 606 |
+
0.1,
|
| 607 |
+
0.728,
|
| 608 |
+
0.402,
|
| 609 |
+
0.743
|
| 610 |
+
],
|
| 611 |
+
"angle": 0,
|
| 612 |
+
"content": "The teacher discriminator \\(\\mathcal{D}_{\\mathrm{teacher}}\\) optimizes:"
|
| 613 |
+
},
|
| 614 |
+
{
|
| 615 |
+
"type": "equation",
|
| 616 |
+
"bbox": [
|
| 617 |
+
0.108,
|
| 618 |
+
0.747,
|
| 619 |
+
0.465,
|
| 620 |
+
0.822
|
| 621 |
+
],
|
| 622 |
+
"angle": 0,
|
| 623 |
+
"content": "\\[\n\\begin{array}{l} \\arg \\min _ {\\mathcal {D} _ {\\text {t e a c h e r}}} \\mathbb {E} _ {(s, s ^ {\\prime}) \\sim \\pi_ {\\text {t e a c h e r}}} \\left[ \\left(\\mathcal {D} _ {\\text {t e a c h e r}} (s, s ^ {\\prime}) - 1\\right) ^ {2} \\right] \\\\ + \\mathbb {E} _ {(s, s ^ {\\prime}) \\sim \\pi_ {\\text {s t u d e n t}}} \\left[ \\left(\\mathcal {D} _ {\\text {t e a c h e r}} (s, s ^ {\\prime}) + 1\\right) ^ {2} \\right] \\\\ + \\lambda \\mathbb {E} _ {(s, s ^ {\\prime}) \\sim \\pi_ {\\text {t e a c h e r}}} \\left[ \\| \\nabla_ {(s, s ^ {\\prime})} \\mathcal {D} _ {\\text {t e a c h e r}} (s, s ^ {\\prime}) \\| ^ {2} \\right], \\\\ \\end{array}\n\\]"
|
| 624 |
+
},
|
| 625 |
+
{
|
| 626 |
+
"type": "text",
|
| 627 |
+
"bbox": [
|
| 628 |
+
0.082,
|
| 629 |
+
0.826,
|
| 630 |
+
0.489,
|
| 631 |
+
0.856
|
| 632 |
+
],
|
| 633 |
+
"angle": 0,
|
| 634 |
+
"content": "while the reference discriminator \\(\\mathcal{D}_{\\mathrm{dataset}}\\) ensures natural motion qualities by optimizing:"
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"type": "equation",
|
| 638 |
+
"bbox": [
|
| 639 |
+
0.116,
|
| 640 |
+
0.86,
|
| 641 |
+
0.454,
|
| 642 |
+
0.935
|
| 643 |
+
],
|
| 644 |
+
"angle": 0,
|
| 645 |
+
"content": "\\[\n\\begin{array}{l} \\arg \\min _ {\\mathcal {D} _ {\\text {d a t a s e t}}} \\mathbb {E} _ {(s, s ^ {\\prime}) \\sim \\mathcal {M}} \\left[ \\left(\\mathcal {D} _ {\\text {d a t a s e t}} (s, s ^ {\\prime}) - 1\\right) ^ {2} \\right] \\\\ + \\mathbb {E} _ {(s, s ^ {\\prime}) \\sim \\pi_ {\\text {s t u d e n t}}} \\left[ \\left(\\mathcal {D} _ {\\text {d a t a s e t}} (s, s ^ {\\prime}) + 1\\right) ^ {2} \\right] \\\\ + \\lambda \\mathbb {E} _ {(s, s ^ {\\prime}) \\sim \\mathcal {M}} \\left[ \\| \\nabla_ {(s, s ^ {\\prime})} \\mathcal {D} _ {\\text {d a t a s e t}} (s, s ^ {\\prime}) \\| ^ {2} \\right], \\\\ \\end{array}\n\\]"
|
| 646 |
+
},
|
| 647 |
+
{
|
| 648 |
+
"type": "text",
|
| 649 |
+
"bbox": [
|
| 650 |
+
0.506,
|
| 651 |
+
0.072,
|
| 652 |
+
0.912,
|
| 653 |
+
0.101
|
| 654 |
+
],
|
| 655 |
+
"angle": 0,
|
| 656 |
+
"content": "where \\(\\lambda\\) controls the gradient penalty term that ensures stable training."
|
| 657 |
+
},
|
| 658 |
+
{
|
| 659 |
+
"type": "text",
|
| 660 |
+
"bbox": [
|
| 661 |
+
0.506,
|
| 662 |
+
0.102,
|
| 663 |
+
0.912,
|
| 664 |
+
0.13
|
| 665 |
+
],
|
| 666 |
+
"angle": 0,
|
| 667 |
+
"content": "The student policy learns from a combined reward function:"
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"type": "equation",
|
| 671 |
+
"bbox": [
|
| 672 |
+
0.559,
|
| 673 |
+
0.143,
|
| 674 |
+
0.86,
|
| 675 |
+
0.157
|
| 676 |
+
],
|
| 677 |
+
"angle": 0,
|
| 678 |
+
"content": "\\[\nr = r _ {\\text {t a s k}} + w _ {\\text {t e a c h e r}} \\cdot r _ {\\text {t e a c h e r}} + w _ {\\text {d a t a s e t}} \\cdot r _ {\\text {d a t a s e t}},\n\\]"
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
"type": "text",
|
| 682 |
+
"bbox": [
|
| 683 |
+
0.507,
|
| 684 |
+
0.165,
|
| 685 |
+
0.847,
|
| 686 |
+
0.18
|
| 687 |
+
],
|
| 688 |
+
"angle": 0,
|
| 689 |
+
"content": "where the discriminator rewards are computed as:"
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"type": "equation",
|
| 693 |
+
"bbox": [
|
| 694 |
+
0.541,
|
| 695 |
+
0.187,
|
| 696 |
+
0.876,
|
| 697 |
+
0.206
|
| 698 |
+
],
|
| 699 |
+
"angle": 0,
|
| 700 |
+
"content": "\\[\nr _ {\\text {t e a c h e r}} = \\max \\left[ 0, \\quad 1 - 0. 2 5 \\left(\\mathcal {D} _ {\\text {t e a c h e r}} (s, s ^ {\\prime}) - 1\\right) ^ {2} \\right]\n\\]"
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "equation",
|
| 704 |
+
"bbox": [
|
| 705 |
+
0.543,
|
| 706 |
+
0.213,
|
| 707 |
+
0.874,
|
| 708 |
+
0.231
|
| 709 |
+
],
|
| 710 |
+
"angle": 0,
|
| 711 |
+
"content": "\\[\nr _ {\\text {d a t a s e t}} = \\max \\left[ 0, \\quad 1 - 0. 2 5 \\left(\\mathcal {D} _ {\\text {d a t a s e t}} (s, s ^ {\\prime}) - 1\\right) ^ {2} \\right]\n\\]"
|
| 712 |
+
},
|
| 713 |
+
{
|
| 714 |
+
"type": "text",
|
| 715 |
+
"bbox": [
|
| 716 |
+
0.505,
|
| 717 |
+
0.238,
|
| 718 |
+
0.913,
|
| 719 |
+
0.342
|
| 720 |
+
],
|
| 721 |
+
"angle": 0,
|
| 722 |
+
"content": "Both discriminators process state transitions using a consistent feature set comprising joint positions and velocities, root linear and angular velocities in the robot's local frame, base link orientation (roll and pitch), and root height. This common representation enables effective comparison across different motion sources while capturing the essential characteristics of locomotion behavior."
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"type": "text",
|
| 726 |
+
"bbox": [
|
| 727 |
+
0.505,
|
| 728 |
+
0.344,
|
| 729 |
+
0.914,
|
| 730 |
+
0.525
|
| 731 |
+
],
|
| 732 |
+
"angle": 0,
|
| 733 |
+
"content": "Deployable Policy Distillation A key aspect of our framework is enabling the student policy \\(\\pi_{\\mathrm{student}}\\) to generate actions when privileged observations are unavailable in real-world deployment. While the teacher policy benefits from privileged information during training to better understand task objectives and achieve efficient convergence, the student policy must learn to generate appropriate actions using only deployable sensor observations. This asymmetric approach allows us to leverage rich state information during training while ensuring the final policy remains deployable. The specific observations available to the student policy are detailed in Table I."
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"type": "title",
|
| 737 |
+
"bbox": [
|
| 738 |
+
0.508,
|
| 739 |
+
0.535,
|
| 740 |
+
0.649,
|
| 741 |
+
0.549
|
| 742 |
+
],
|
| 743 |
+
"angle": 0,
|
| 744 |
+
"content": "C. Training Process"
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "text",
|
| 748 |
+
"bbox": [
|
| 749 |
+
0.505,
|
| 750 |
+
0.554,
|
| 751 |
+
0.913,
|
| 752 |
+
0.674
|
| 753 |
+
],
|
| 754 |
+
"angle": 0,
|
| 755 |
+
"content": "Curriculum Learning Teacher policy \\(\\pi_{\\text{teacher}}\\) training adopts a curriculum learning approach comprised of two distinct phases. The initial stability phase prioritizes maintaining balance and preventing falls, establishing fundamental stability behaviors. This is followed by the mobility phase, where the policy develops comprehensive omnidirectional locomotion capabilities. The specific reward components for each phase are detailed in Table II."
|
| 756 |
+
},
|
| 757 |
+
{
|
| 758 |
+
"type": "text",
|
| 759 |
+
"bbox": [
|
| 760 |
+
0.505,
|
| 761 |
+
0.675,
|
| 762 |
+
0.914,
|
| 763 |
+
0.841
|
| 764 |
+
],
|
| 765 |
+
"angle": 0,
|
| 766 |
+
"content": "Demonstration Data Preparation The locomotion motion data in this work is sourced from the LaFAN1 dataset and meticulously retargeted to conform to the kinematic specifications of Unitree H1 robots. While this dataset offers diverse motion styles and velocity ranges, utilizing all demonstrations simultaneously introduces ambiguity in the learning process. To facilitate distinct gait style demonstrations across different velocity commands, we strategically selected motion clips with minimal or non-overlapping velocity ranges, ensuring a relatively clear behavioral boundaries between different locomotion patterns."
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"type": "text",
|
| 770 |
+
"bbox": [
|
| 771 |
+
0.505,
|
| 772 |
+
0.842,
|
| 773 |
+
0.914,
|
| 774 |
+
0.933
|
| 775 |
+
],
|
| 776 |
+
"angle": 0,
|
| 777 |
+
"content": "Asymmetric Actor-critic Architecture Student policy training utilizes an asymmetric actor-critic architecture to effectively handle partial observability in real-world conditions. The student's observation processing begins with temporal partial observations \\( o_{t}^{N} = [o_{t - n}, o_{t - n + 1} \\dots o_{t}]^{T} \\). These observations are first processed through a partial states"
|
| 778 |
+
}
|
| 779 |
+
],
|
| 780 |
+
[
|
| 781 |
+
{
|
| 782 |
+
"type": "table_caption",
|
| 783 |
+
"bbox": [
|
| 784 |
+
0.385,
|
| 785 |
+
0.065,
|
| 786 |
+
0.613,
|
| 787 |
+
0.092
|
| 788 |
+
],
|
| 789 |
+
"angle": 0,
|
| 790 |
+
"content": "TABLEI AVAILABLE OBSERVATIONS IN TRAINING"
|
| 791 |
+
},
|
| 792 |
+
{
|
| 793 |
+
"type": "table",
|
| 794 |
+
"bbox": [
|
| 795 |
+
0.085,
|
| 796 |
+
0.103,
|
| 797 |
+
0.912,
|
| 798 |
+
0.153
|
| 799 |
+
],
|
| 800 |
+
"angle": 0,
|
| 801 |
+
"content": "<table><tr><td>Sources</td><td>Phase</td><td>CmdVel</td><td>DoFPos</td><td>DoFVel</td><td>LastAction</td><td>Diff</td><td>BaseLinVel</td><td>BaseAngVel</td><td>RPY</td><td>Root Height</td><td>Push</td><td>Fraction</td><td>BodyMass</td><td>ContactStatus</td></tr><tr><td>Teacher</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Dataset</td><td></td><td></td><td>✓</td><td>✓</td><td></td><td></td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td></td></tr><tr><td>Student</td><td></td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td>✓</td><td>✓</td><td></td><td></td><td></td><td></td><td></td></tr></table>"
|
| 802 |
+
},
|
| 803 |
+
{
|
| 804 |
+
"type": "table_footnote",
|
| 805 |
+
"bbox": [
|
| 806 |
+
0.094,
|
| 807 |
+
0.153,
|
| 808 |
+
0.125,
|
| 809 |
+
0.161
|
| 810 |
+
],
|
| 811 |
+
"angle": 0,
|
| 812 |
+
"content": "Notes:"
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"type": "table_footnote",
|
| 816 |
+
"bbox": [
|
| 817 |
+
0.094,
|
| 818 |
+
0.162,
|
| 819 |
+
0.4,
|
| 820 |
+
0.171
|
| 821 |
+
],
|
| 822 |
+
"angle": 0,
|
| 823 |
+
"content": "- Phase: Indicates the phase of motion, serving as a temporal marker."
|
| 824 |
+
},
|
| 825 |
+
{
|
| 826 |
+
"type": "table_footnote",
|
| 827 |
+
"bbox": [
|
| 828 |
+
0.094,
|
| 829 |
+
0.171,
|
| 830 |
+
0.621,
|
| 831 |
+
0.18
|
| 832 |
+
],
|
| 833 |
+
"angle": 0,
|
| 834 |
+
"content": "- Diff: Difference between current joint angular position and reference joint angular position, calculated based on Phase."
|
| 835 |
+
},
|
| 836 |
+
{
|
| 837 |
+
"type": "table_footnote",
|
| 838 |
+
"bbox": [
|
| 839 |
+
0.094,
|
| 840 |
+
0.18,
|
| 841 |
+
0.442,
|
| 842 |
+
0.189
|
| 843 |
+
],
|
| 844 |
+
"angle": 0,
|
| 845 |
+
"content": "- ContactStatus: Information regarding the stance mask and feet contact forces."
|
| 846 |
+
},
|
| 847 |
+
{
|
| 848 |
+
"type": "list",
|
| 849 |
+
"bbox": [
|
| 850 |
+
0.094,
|
| 851 |
+
0.153,
|
| 852 |
+
0.621,
|
| 853 |
+
0.189
|
| 854 |
+
],
|
| 855 |
+
"angle": 0,
|
| 856 |
+
"content": null
|
| 857 |
+
},
|
| 858 |
+
{
|
| 859 |
+
"type": "text",
|
| 860 |
+
"bbox": [
|
| 861 |
+
0.082,
|
| 862 |
+
0.219,
|
| 863 |
+
0.49,
|
| 864 |
+
0.295
|
| 865 |
+
],
|
| 866 |
+
"angle": 0,
|
| 867 |
+
"content": "encoder \\(\\mathcal{E}\\) to generate context latent representations, which are then combined with the current partial state observations and the velocity command. The resulting combined representation passes through MLP layers to produce the final control actions."
|
| 868 |
+
},
|
| 869 |
+
{
|
| 870 |
+
"type": "table_caption",
|
| 871 |
+
"bbox": [
|
| 872 |
+
0.086,
|
| 873 |
+
0.303,
|
| 874 |
+
0.486,
|
| 875 |
+
0.33
|
| 876 |
+
],
|
| 877 |
+
"angle": 0,
|
| 878 |
+
"content": "TABLE II REWARD DEFINITIONS USED IN TEACHER POLICY TRAINING."
|
| 879 |
+
},
|
| 880 |
+
{
|
| 881 |
+
"type": "table",
|
| 882 |
+
"bbox": [
|
| 883 |
+
0.085,
|
| 884 |
+
0.332,
|
| 885 |
+
0.49,
|
| 886 |
+
0.736
|
| 887 |
+
],
|
| 888 |
+
"angle": 0,
|
| 889 |
+
"content": "<table><tr><td>Term</td><td>Definition</td><td>Weight</td></tr><tr><td colspan=\"3\">First Stage</td></tr><tr><td>Termination</td><td>termination = Ireset - Ittimeout</td><td>-1000</td></tr><tr><td>Linear Velocity Tracking</td><td>exp(-||xxy|2/0.1)</td><td>10</td></tr><tr><td>Angular Velocity Tracking</td><td>exp(-||u|2/0.1)</td><td>10</td></tr><tr><td>Linear Velocity z</td><td>||vz||2</td><td>-1.0</td></tr><tr><td>R-P Angular Velocity</td><td>||ωxy||2</td><td>-0.5</td></tr><tr><td>Orientation</td><td>Σi∈{x,y} (projected gravityi)2</td><td>-1.0</td></tr><tr><td>Base Height</td><td>exp(-100|hbase-htarget|) where hbase=zroot-(feet-0.08)</td><td>0.5</td></tr><tr><td>Action Rate</td><td>||at-at-1||2</td><td>-0.01</td></tr><tr><td>Energy Square</td><td>Σi=10(τiq̂i)21+||cxy||2</td><td>-5e-6</td></tr><tr><td>Stand Still</td><td>(Σ|q-qdefault|·Istand</td><td>-1</td></tr><tr><td>Feet Clearance</td><td>Σi||hfeet,i-htarget|<0.01|·(1-gait phasei)</td><td>2.5</td></tr><tr><td>Feet Contact Number</td><td>mean(Πcontact=stance mask)-Π(contact≠stance mask)</td><td>1</td></tr><tr><td>Default Joint Position</td><td>||q[1:2]-qdefault||2+||q[6:7]-qdefault||2</td><td>0.5</td></tr><tr><td>Action Smoothness</td><td>||at-2-2at-1+at||2</td><td>-0.001</td></tr><tr><td>Feet Slip</td><td>1-Σi exp(-||vxy|i||2)</td><td>-0.05</td></tr><tr><td>Reference Joint Position</td><td>exp(-2||q-qref||2)-0.5min(||q-qref||2,0.5)</td><td>10</td></tr><tr><td>Pelvis-Angle y Distance</td><td>(||ypelvis,pitch-yankle,L)||+||ypelvis,pitch-yankle,R)||·Π{|vy|<0.1}</td><td>-5</td></tr><tr><td>Upper Joint Constraints</td><td>Σ||q[12:14]-qdefault||2+||q[16:18]-qdefault||2+||q10-q10||2</td><td>-5</td></tr><tr><td colspan=\"3\">Second Stage</td></tr><tr><td>Joint Torque</td><td>||τ||2</td><td>-2e-5</td></tr><tr><td>Joint Acceleration</td><td>||q||2</td><td>-1e-6</td></tr><tr><td>Feet Contact Forces</td><td>Σi max(||contact forcei||2-Fmax,0)</td><td>-0.01</td></tr><tr><td>Torque When Stand-Still</td><td>Σ[(τt-τt-1)2+(τt+τt-2-2τt-1)2]·Istand</td><td>-1e-3</td></tr><tr><td>Body Pitch</td><td>||pitch-0.01||</td><td>-5</td></tr><tr><td>Body Roll</td><td>||roll||</td><td>-10</td></tr><tr><td>Track Velocity Hard</td><td>e-10||vxy-target-vxy||+e-10|ωz|2</td><td>50</td></tr><tr><td>Ankle Air Time</td><td>∑(tair,i-0.2)·Ifirst,contact,i·Istand.still</td><td>100</td></tr><tr><td>Ankle Limits</td><td>-∑i i∈{4,9} clip(qi-qmin,i,0) + clip(qmax,i-qi,0)</td><td>-200</td></tr></table>"
|
| 890 |
+
},
|
| 891 |
+
{
|
| 892 |
+
"type": "table_footnote",
|
| 893 |
+
"bbox": [
|
| 894 |
+
0.094,
|
| 895 |
+
0.735,
|
| 896 |
+
0.119,
|
| 897 |
+
0.741
|
| 898 |
+
],
|
| 899 |
+
"angle": 0,
|
| 900 |
+
"content": "Notes:"
|
| 901 |
+
},
|
| 902 |
+
{
|
| 903 |
+
"type": "table_footnote",
|
| 904 |
+
"bbox": [
|
| 905 |
+
0.094,
|
| 906 |
+
0.741,
|
| 907 |
+
0.256,
|
| 908 |
+
0.749
|
| 909 |
+
],
|
| 910 |
+
"angle": 0,
|
| 911 |
+
"content": "- \\(\\mathbb{I}_A = 1\\) if \\(A = true\\) and \\(\\mathbb{I}_A = 0\\) otherwise."
|
| 912 |
+
},
|
| 913 |
+
{
|
| 914 |
+
"type": "table_footnote",
|
| 915 |
+
"bbox": [
|
| 916 |
+
0.094,
|
| 917 |
+
0.749,
|
| 918 |
+
0.319,
|
| 919 |
+
0.756
|
| 920 |
+
],
|
| 921 |
+
"angle": 0,
|
| 922 |
+
"content": "The maximum allowable feet contact force \\(F_{\\mathrm{max}}\\) is set to 550N"
|
| 923 |
+
},
|
| 924 |
+
{
|
| 925 |
+
"type": "list",
|
| 926 |
+
"bbox": [
|
| 927 |
+
0.094,
|
| 928 |
+
0.735,
|
| 929 |
+
0.319,
|
| 930 |
+
0.756
|
| 931 |
+
],
|
| 932 |
+
"angle": 0,
|
| 933 |
+
"content": null
|
| 934 |
+
},
|
| 935 |
+
{
|
| 936 |
+
"type": "title",
|
| 937 |
+
"bbox": [
|
| 938 |
+
0.084,
|
| 939 |
+
0.778,
|
| 940 |
+
0.383,
|
| 941 |
+
0.793
|
| 942 |
+
],
|
| 943 |
+
"angle": 0,
|
| 944 |
+
"content": "D. Implementation and Deployment Details"
|
| 945 |
+
},
|
| 946 |
+
{
|
| 947 |
+
"type": "text",
|
| 948 |
+
"bbox": [
|
| 949 |
+
0.082,
|
| 950 |
+
0.796,
|
| 951 |
+
0.489,
|
| 952 |
+
0.841
|
| 953 |
+
],
|
| 954 |
+
"angle": 0,
|
| 955 |
+
"content": "Both policies are implemented using the Proximal Policy Optimization (PPO) algorithm [36], with comprehensive domain randomization ensuring robust real-world transfer."
|
| 956 |
+
},
|
| 957 |
+
{
|
| 958 |
+
"type": "text",
|
| 959 |
+
"bbox": [
|
| 960 |
+
0.082,
|
| 961 |
+
0.842,
|
| 962 |
+
0.49,
|
| 963 |
+
0.933
|
| 964 |
+
],
|
| 965 |
+
"angle": 0,
|
| 966 |
+
"content": "Domain Randomization Following existing researches on humanoid whole-body control, our domain randomization encompasses three aspects: physical parameter variations, systematic observation noise injection, and randomized external force perturbations. The physical parameters include variations in mass distribution, joint properties, and surface"
|
| 967 |
+
},
|
| 968 |
+
{
|
| 969 |
+
"type": "text",
|
| 970 |
+
"bbox": [
|
| 971 |
+
0.505,
|
| 972 |
+
0.219,
|
| 973 |
+
0.914,
|
| 974 |
+
0.28
|
| 975 |
+
],
|
| 976 |
+
"angle": 0,
|
| 977 |
+
"content": "interactions. Observation noise is carefully calibrated to match real-world sensor characteristics, while external forces simulate unexpected disturbances the robot might encounter during deployment."
|
| 978 |
+
},
|
| 979 |
+
{
|
| 980 |
+
"type": "text",
|
| 981 |
+
"bbox": [
|
| 982 |
+
0.505,
|
| 983 |
+
0.28,
|
| 984 |
+
0.912,
|
| 985 |
+
0.384
|
| 986 |
+
],
|
| 987 |
+
"angle": 0,
|
| 988 |
+
"content": "Safe Deployment Safe deployment is achieved through torque limiting. This controller continuously monitors and adjusts torque outputs to remain within safe operational limits. The deployment architecture operates with the policy executing at \\(50\\mathrm{Hz}\\), while the low-level control loop maintains precise actuation at \\(1000\\mathrm{Hz}\\), ensuring responsive and stable behavior."
|
| 989 |
+
},
|
| 990 |
+
{
|
| 991 |
+
"type": "text",
|
| 992 |
+
"bbox": [
|
| 993 |
+
0.505,
|
| 994 |
+
0.386,
|
| 995 |
+
0.913,
|
| 996 |
+
0.492
|
| 997 |
+
],
|
| 998 |
+
"angle": 0,
|
| 999 |
+
"content": "Real-world execution incorporates additional safety measures through continuous monitoring of joint positions, velocities, and torques. When approaching operational limits, the system smoothly modulates commands to maintain safe operation while preserving task performance. This approach enables robust deployment across varying conditions while protecting the hardware from potential damage."
|
| 1000 |
+
},
|
| 1001 |
+
{
|
| 1002 |
+
"type": "title",
|
| 1003 |
+
"bbox": [
|
| 1004 |
+
0.633,
|
| 1005 |
+
0.503,
|
| 1006 |
+
0.788,
|
| 1007 |
+
0.516
|
| 1008 |
+
],
|
| 1009 |
+
"angle": 0,
|
| 1010 |
+
"content": "IV. EXPERIMENTS"
|
| 1011 |
+
},
|
| 1012 |
+
{
|
| 1013 |
+
"type": "text",
|
| 1014 |
+
"bbox": [
|
| 1015 |
+
0.505,
|
| 1016 |
+
0.523,
|
| 1017 |
+
0.914,
|
| 1018 |
+
0.644
|
| 1019 |
+
],
|
| 1020 |
+
"angle": 0,
|
| 1021 |
+
"content": "We conduct comprehensive experiments in both simulation and real-world environments to evaluate StyleLoco's effectiveness in generating natural and adaptive locomotion behaviors. Our evaluation framework addresses four key aspects: (1) the effectiveness of GAD's distillation capabilities, (2) the accuracy of velocity tracking during locomotion tasks, (3) the quality of motion style reproduction, and (4) real-world deployment performance."
|
| 1022 |
+
},
|
| 1023 |
+
{
|
| 1024 |
+
"type": "text",
|
| 1025 |
+
"bbox": [
|
| 1026 |
+
0.505,
|
| 1027 |
+
0.644,
|
| 1028 |
+
0.914,
|
| 1029 |
+
0.765
|
| 1030 |
+
],
|
| 1031 |
+
"angle": 0,
|
| 1032 |
+
"content": "All experiments are conducted using the Unitree H1 humanoid robot in both simulated and physical environments. For reference motions, we utilize the LaFAN1 dataset, carefully retargeted to match the H1's kinematics. The motion data comprises global root position and orientation (quaternion), along with joint angular positions. Simulated experiments are performed in the NVIDIA Isaac Gym environment, which enables efficient parallel training and evaluation."
|
| 1033 |
+
},
|
| 1034 |
+
{
|
| 1035 |
+
"type": "title",
|
| 1036 |
+
"bbox": [
|
| 1037 |
+
0.506,
|
| 1038 |
+
0.776,
|
| 1039 |
+
0.698,
|
| 1040 |
+
0.791
|
| 1041 |
+
],
|
| 1042 |
+
"angle": 0,
|
| 1043 |
+
"content": "A. Distillation Performance"
|
| 1044 |
+
},
|
| 1045 |
+
{
|
| 1046 |
+
"type": "text",
|
| 1047 |
+
"bbox": [
|
| 1048 |
+
0.505,
|
| 1049 |
+
0.796,
|
| 1050 |
+
0.913,
|
| 1051 |
+
0.87
|
| 1052 |
+
],
|
| 1053 |
+
"angle": 0,
|
| 1054 |
+
"content": "Our first set of experiments evaluates GAD's ability to effectively distill privileged information from the teacher policy while maintaining task performance. We compare GAD against several baseline distillation approaches, measuring both task achievement and motion naturalness."
|
| 1055 |
+
},
|
| 1056 |
+
{
|
| 1057 |
+
"type": "text",
|
| 1058 |
+
"bbox": [
|
| 1059 |
+
0.505,
|
| 1060 |
+
0.871,
|
| 1061 |
+
0.913,
|
| 1062 |
+
0.933
|
| 1063 |
+
],
|
| 1064 |
+
"angle": 0,
|
| 1065 |
+
"content": "One of the main contributions of this work is the development of a Generative Adversarial Distillation method. In this context, we emphasize the ability of our single teacher discriminator (GAD-SD) to effectively distill knowledge from"
|
| 1066 |
+
}
|
| 1067 |
+
],
|
| 1068 |
+
[
|
| 1069 |
+
{
|
| 1070 |
+
"type": "text",
|
| 1071 |
+
"bbox": [
|
| 1072 |
+
0.082,
|
| 1073 |
+
0.071,
|
| 1074 |
+
0.491,
|
| 1075 |
+
0.116
|
| 1076 |
+
],
|
| 1077 |
+
"angle": 0,
|
| 1078 |
+
"content": "the teacher policy. To evaluate this capability, we compare our method against DAgger, one of the most widely used distillation methods in robot control."
|
| 1079 |
+
},
|
| 1080 |
+
{
|
| 1081 |
+
"type": "text",
|
| 1082 |
+
"bbox": [
|
| 1083 |
+
0.082,
|
| 1084 |
+
0.117,
|
| 1085 |
+
0.49,
|
| 1086 |
+
0.193
|
| 1087 |
+
],
|
| 1088 |
+
"angle": 0,
|
| 1089 |
+
"content": "First, we train an omnidirectional locomotion policy as the teacher. The command ranges used for both teacher training and the subsequent distillation experiment are listed in Table. III. We then leverage the well-trained teacher policy to guide the learning of the student policy."
|
| 1090 |
+
},
|
| 1091 |
+
{
|
| 1092 |
+
"type": "table_caption",
|
| 1093 |
+
"bbox": [
|
| 1094 |
+
0.163,
|
| 1095 |
+
0.205,
|
| 1096 |
+
0.411,
|
| 1097 |
+
0.233
|
| 1098 |
+
],
|
| 1099 |
+
"angle": 0,
|
| 1100 |
+
"content": "TABLE III RANGES OF LOCOMOTION TASK COMMAND"
|
| 1101 |
+
},
|
| 1102 |
+
{
|
| 1103 |
+
"type": "table",
|
| 1104 |
+
"bbox": [
|
| 1105 |
+
0.086,
|
| 1106 |
+
0.235,
|
| 1107 |
+
0.487,
|
| 1108 |
+
0.299
|
| 1109 |
+
],
|
| 1110 |
+
"angle": 0,
|
| 1111 |
+
"content": "<table><tr><td>Parameter</td><td>Teacher (Unit)</td><td>Distillation student (Unit)</td><td>StyleLoco student (Unit)</td></tr><tr><td>Forward (vx)</td><td>[-1.0, 3.5] m/s</td><td>[-1.0, 3.5] m/s</td><td>[-1.0, 4.5] m/s</td></tr><tr><td>Lateral (vy)</td><td>[-0.8, 0.8] m/s</td><td>[-0.8, 0.8] m/s</td><td>[-1.0, 1.0] m/s</td></tr><tr><td>Angular (ωz)</td><td>[-1.0, 1.0] rad/s</td><td>[-1.0, 1.0] rad/s</td><td>[-1.5, 1.5] rad/s</td></tr></table>"
|
| 1112 |
+
},
|
| 1113 |
+
{
|
| 1114 |
+
"type": "text",
|
| 1115 |
+
"bbox": [
|
| 1116 |
+
0.082,
|
| 1117 |
+
0.308,
|
| 1118 |
+
0.49,
|
| 1119 |
+
0.398
|
| 1120 |
+
],
|
| 1121 |
+
"angle": 0,
|
| 1122 |
+
"content": "The evaluation metrics include linear velocity tracking reward, angular velocity tracking reward, and average survival time. As shown in Table IV, while both methods successfully learn from the teacher policy, GAD-SD demonstrates superior performance, particularly in linear velocity tracking and survival time."
|
| 1123 |
+
},
|
| 1124 |
+
{
|
| 1125 |
+
"type": "table_caption",
|
| 1126 |
+
"bbox": [
|
| 1127 |
+
0.125,
|
| 1128 |
+
0.408,
|
| 1129 |
+
0.449,
|
| 1130 |
+
0.435
|
| 1131 |
+
],
|
| 1132 |
+
"angle": 0,
|
| 1133 |
+
"content": "TABLE IV QUANTITATIVE COMPARISON OF DISTILLATION METHODS"
|
| 1134 |
+
},
|
| 1135 |
+
{
|
| 1136 |
+
"type": "table",
|
| 1137 |
+
"bbox": [
|
| 1138 |
+
0.086,
|
| 1139 |
+
0.448,
|
| 1140 |
+
0.49,
|
| 1141 |
+
0.54
|
| 1142 |
+
],
|
| 1143 |
+
"angle": 0,
|
| 1144 |
+
"content": "<table><tr><td>Method</td><td>Linear Velocity Tracking Reward(±0.1) ↑</td><td>Angular Velocity Tracking Reward(±0.1) ↑</td><td>Average Survival Time(±15 steps) ↑</td></tr><tr><td>Teacher</td><td>7.403</td><td>2.824</td><td>925.9</td></tr><tr><td>DAgger</td><td>3.744</td><td>2.516</td><td>506.6</td></tr><tr><td>GAD-SD</td><td>5.679</td><td>2.653</td><td>860.3</td></tr></table>"
|
| 1145 |
+
},
|
| 1146 |
+
{
|
| 1147 |
+
"type": "table_footnote",
|
| 1148 |
+
"bbox": [
|
| 1149 |
+
0.094,
|
| 1150 |
+
0.54,
|
| 1151 |
+
0.133,
|
| 1152 |
+
0.549
|
| 1153 |
+
],
|
| 1154 |
+
"angle": 0,
|
| 1155 |
+
"content": "Notes:"
|
| 1156 |
+
},
|
| 1157 |
+
{
|
| 1158 |
+
"type": "table_footnote",
|
| 1159 |
+
"bbox": [
|
| 1160 |
+
0.095,
|
| 1161 |
+
0.55,
|
| 1162 |
+
0.426,
|
| 1163 |
+
0.561
|
| 1164 |
+
],
|
| 1165 |
+
"angle": 0,
|
| 1166 |
+
"content": "- Teacher: teacher policy trained with privileged information"
|
| 1167 |
+
},
|
| 1168 |
+
{
|
| 1169 |
+
"type": "table_footnote",
|
| 1170 |
+
"bbox": [
|
| 1171 |
+
0.095,
|
| 1172 |
+
0.561,
|
| 1173 |
+
0.427,
|
| 1174 |
+
0.572
|
| 1175 |
+
],
|
| 1176 |
+
"angle": 0,
|
| 1177 |
+
"content": "GAD-SD: GAD with only teacher distillation discriminator"
|
| 1178 |
+
},
|
| 1179 |
+
{
|
| 1180 |
+
"type": "list",
|
| 1181 |
+
"bbox": [
|
| 1182 |
+
0.094,
|
| 1183 |
+
0.54,
|
| 1184 |
+
0.427,
|
| 1185 |
+
0.572
|
| 1186 |
+
],
|
| 1187 |
+
"angle": 0,
|
| 1188 |
+
"content": null
|
| 1189 |
+
},
|
| 1190 |
+
{
|
| 1191 |
+
"type": "title",
|
| 1192 |
+
"bbox": [
|
| 1193 |
+
0.084,
|
| 1194 |
+
0.606,
|
| 1195 |
+
0.275,
|
| 1196 |
+
0.621
|
| 1197 |
+
],
|
| 1198 |
+
"angle": 0,
|
| 1199 |
+
"content": "B. Locomotion Capabilities"
|
| 1200 |
+
},
|
| 1201 |
+
{
|
| 1202 |
+
"type": "text",
|
| 1203 |
+
"bbox": [
|
| 1204 |
+
0.082,
|
| 1205 |
+
0.626,
|
| 1206 |
+
0.49,
|
| 1207 |
+
0.731
|
| 1208 |
+
],
|
| 1209 |
+
"angle": 0,
|
| 1210 |
+
"content": "The second set of experiments assesses the student policy's locomotion capabilities, particularly its ability to track commanded velocities while maintaining natural motion patterns. We compare StyleLoco against state-of-the-art approaches in terms of tracking accuracy, stability, and style preservation. Table VI shows comparative results across various performance metrics."
|
| 1211 |
+
},
|
| 1212 |
+
{
|
| 1213 |
+
"type": "text",
|
| 1214 |
+
"bbox": [
|
| 1215 |
+
0.082,
|
| 1216 |
+
0.732,
|
| 1217 |
+
0.49,
|
| 1218 |
+
0.852
|
| 1219 |
+
],
|
| 1220 |
+
"angle": 0,
|
| 1221 |
+
"content": "The locomotion task evaluates the ability of student policy to track local velocity commands comprising three components: forward/backward velocity \\( v_{x} \\), lateral velocity \\( v_{y} \\), and rotational velocity \\( w_{z} \\). Command values are uniformly sampled within pre-defined ranges specified in Table. III. For style imitation, we select four representative motion clips as reference targets for the style discriminator, with their corresponding velocity profiles detailed in Table. V."
|
| 1222 |
+
},
|
| 1223 |
+
{
|
| 1224 |
+
"type": "text",
|
| 1225 |
+
"bbox": [
|
| 1226 |
+
0.082,
|
| 1227 |
+
0.853,
|
| 1228 |
+
0.49,
|
| 1229 |
+
0.898
|
| 1230 |
+
],
|
| 1231 |
+
"angle": 0,
|
| 1232 |
+
"content": "To comprehensively evaluate our double-discriminator framework, we compare our method against three baseline approaches:"
|
| 1233 |
+
},
|
| 1234 |
+
{
|
| 1235 |
+
"type": "text",
|
| 1236 |
+
"bbox": [
|
| 1237 |
+
0.1,
|
| 1238 |
+
0.902,
|
| 1239 |
+
0.49,
|
| 1240 |
+
0.932
|
| 1241 |
+
],
|
| 1242 |
+
"angle": 0,
|
| 1243 |
+
"content": "- SD-Motion: Single-discriminator approach using only motion clips as reference."
|
| 1244 |
+
},
|
| 1245 |
+
{
|
| 1246 |
+
"type": "table_caption",
|
| 1247 |
+
"bbox": [
|
| 1248 |
+
0.594,
|
| 1249 |
+
0.065,
|
| 1250 |
+
0.826,
|
| 1251 |
+
0.092
|
| 1252 |
+
],
|
| 1253 |
+
"angle": 0,
|
| 1254 |
+
"content": "TABLEV VELOCITY PROFILES FOR MOTION CLIPS"
|
| 1255 |
+
},
|
| 1256 |
+
{
|
| 1257 |
+
"type": "table",
|
| 1258 |
+
"bbox": [
|
| 1259 |
+
0.508,
|
| 1260 |
+
0.103,
|
| 1261 |
+
0.915,
|
| 1262 |
+
0.179
|
| 1263 |
+
],
|
| 1264 |
+
"angle": 0,
|
| 1265 |
+
"content": "<table><tr><td>Vel Profiles</td><td>Forward (m/s)</td><td>Lateral (m/s)</td><td>Angular (rad/s)</td></tr><tr><td>Slow Forward</td><td>[0.089, 1.205]</td><td>[-0.396, 0.188]</td><td>[-1.734, 0.906]</td></tr><tr><td>Medium Forward</td><td>[0.884, 2.067]</td><td>[-0.563, 0.306]</td><td>[-2.044, 1.963]</td></tr><tr><td>Fast Forward</td><td>[2.438, 4.378]</td><td>[-1.166, 0.943]</td><td>[-1.555, 3.476]</td></tr><tr><td>Move Backward</td><td>[-1.088, -0.350]</td><td>[-0.425, 0.365]</td><td>[-1.580, 1.981]</td></tr></table>"
|
| 1266 |
+
},
|
| 1267 |
+
{
|
| 1268 |
+
"type": "text",
|
| 1269 |
+
"bbox": [
|
| 1270 |
+
0.524,
|
| 1271 |
+
0.217,
|
| 1272 |
+
0.912,
|
| 1273 |
+
0.262
|
| 1274 |
+
],
|
| 1275 |
+
"angle": 0,
|
| 1276 |
+
"content": "- SD-Full: Single-discriminator approach using a combination of teacher policy online roll-out data and motion clips."
|
| 1277 |
+
},
|
| 1278 |
+
{
|
| 1279 |
+
"type": "text",
|
| 1280 |
+
"bbox": [
|
| 1281 |
+
0.524,
|
| 1282 |
+
0.262,
|
| 1283 |
+
0.912,
|
| 1284 |
+
0.308
|
| 1285 |
+
],
|
| 1286 |
+
"angle": 0,
|
| 1287 |
+
"content": "- DAgger+Style: DAgger-based teacher policy distillation combined with a separate discriminator for style learning."
|
| 1288 |
+
},
|
| 1289 |
+
{
|
| 1290 |
+
"type": "list",
|
| 1291 |
+
"bbox": [
|
| 1292 |
+
0.524,
|
| 1293 |
+
0.217,
|
| 1294 |
+
0.912,
|
| 1295 |
+
0.308
|
| 1296 |
+
],
|
| 1297 |
+
"angle": 0,
|
| 1298 |
+
"content": null
|
| 1299 |
+
},
|
| 1300 |
+
{
|
| 1301 |
+
"type": "text",
|
| 1302 |
+
"bbox": [
|
| 1303 |
+
0.506,
|
| 1304 |
+
0.318,
|
| 1305 |
+
0.913,
|
| 1306 |
+
0.363
|
| 1307 |
+
],
|
| 1308 |
+
"angle": 0,
|
| 1309 |
+
"content": "The evaluation metrics are similar to those used in the distillation task experiment, with the addition of energy consumption."
|
| 1310 |
+
},
|
| 1311 |
+
{
|
| 1312 |
+
"type": "text",
|
| 1313 |
+
"bbox": [
|
| 1314 |
+
0.505,
|
| 1315 |
+
0.367,
|
| 1316 |
+
0.914,
|
| 1317 |
+
0.488
|
| 1318 |
+
],
|
| 1319 |
+
"angle": 0,
|
| 1320 |
+
"content": "As demonstrated in Table. VI, our proposed double-discriminator framework achieves superior performance in velocity tracking and survival time compared to all baseline methods. Notably, the SD-Motion approach exhibits the best energy consumption performance, suggesting that human motions are inherently energy efficient and properly incorporating motion demonstrations during training contributes to reduced energy consumption."
|
| 1321 |
+
},
|
| 1322 |
+
{
|
| 1323 |
+
"type": "image",
|
| 1324 |
+
"bbox": [
|
| 1325 |
+
0.529,
|
| 1326 |
+
0.511,
|
| 1327 |
+
0.892,
|
| 1328 |
+
0.632
|
| 1329 |
+
],
|
| 1330 |
+
"angle": 0,
|
| 1331 |
+
"content": null
|
| 1332 |
+
},
|
| 1333 |
+
{
|
| 1334 |
+
"type": "image",
|
| 1335 |
+
"bbox": [
|
| 1336 |
+
0.53,
|
| 1337 |
+
0.633,
|
| 1338 |
+
0.892,
|
| 1339 |
+
0.734
|
| 1340 |
+
],
|
| 1341 |
+
"angle": 0,
|
| 1342 |
+
"content": null
|
| 1343 |
+
},
|
| 1344 |
+
{
|
| 1345 |
+
"type": "image",
|
| 1346 |
+
"bbox": [
|
| 1347 |
+
0.53,
|
| 1348 |
+
0.733,
|
| 1349 |
+
0.892,
|
| 1350 |
+
0.86
|
| 1351 |
+
],
|
| 1352 |
+
"angle": 0,
|
| 1353 |
+
"content": null
|
| 1354 |
+
},
|
| 1355 |
+
{
|
| 1356 |
+
"type": "image_caption",
|
| 1357 |
+
"bbox": [
|
| 1358 |
+
0.506,
|
| 1359 |
+
0.873,
|
| 1360 |
+
0.915,
|
| 1361 |
+
0.908
|
| 1362 |
+
],
|
| 1363 |
+
"angle": 0,
|
| 1364 |
+
"content": "Fig. 3. From top to bottom, a stylized locomotion demonstration from LaFAN1 (Top), motions generated by student policy in simulation (Middle), motions generated by student policy deployed on real H1 robot(Bottom)."
|
| 1365 |
+
}
|
| 1366 |
+
],
|
| 1367 |
+
[
|
| 1368 |
+
{
|
| 1369 |
+
"type": "table_caption",
|
| 1370 |
+
"bbox": [
|
| 1371 |
+
0.269,
|
| 1372 |
+
0.065,
|
| 1373 |
+
0.728,
|
| 1374 |
+
0.093
|
| 1375 |
+
],
|
| 1376 |
+
"angle": 0,
|
| 1377 |
+
"content": "TABLE VI QUANTITATIVE COMPARISON OF DIFFERENT METHODS ACROSS VARIOUS METRICS"
|
| 1378 |
+
},
|
| 1379 |
+
{
|
| 1380 |
+
"type": "table",
|
| 1381 |
+
"bbox": [
|
| 1382 |
+
0.154,
|
| 1383 |
+
0.105,
|
| 1384 |
+
0.844,
|
| 1385 |
+
0.189
|
| 1386 |
+
],
|
| 1387 |
+
"angle": 0,
|
| 1388 |
+
"content": "<table><tr><td>Method</td><td>Linear Velocity Tracking Reward(±0.1) ↑</td><td>Angular Velocity Tracking Reward(±0.1) ↑</td><td>Average Survival Time(±15 steps) ↑</td><td>Energy Consumption(±0.001) ↓</td></tr><tr><td>SD-Motion</td><td>4.229</td><td>2.249</td><td>813.2</td><td>0.065</td></tr><tr><td>SD-Full</td><td>4.665</td><td>2.413</td><td>824.1</td><td>0.093</td></tr><tr><td>DAgger+Style</td><td>5.059</td><td>2.384</td><td>826.9</td><td>0.079</td></tr><tr><td>GAD (Ours)</td><td>5.485</td><td>2.644</td><td>846.5</td><td>0.081</td></tr></table>"
|
| 1389 |
+
},
|
| 1390 |
+
{
|
| 1391 |
+
"type": "table_footnote",
|
| 1392 |
+
"bbox": [
|
| 1393 |
+
0.162,
|
| 1394 |
+
0.19,
|
| 1395 |
+
0.2,
|
| 1396 |
+
0.198
|
| 1397 |
+
],
|
| 1398 |
+
"angle": 0,
|
| 1399 |
+
"content": "Notes:"
|
| 1400 |
+
},
|
| 1401 |
+
{
|
| 1402 |
+
"type": "table_footnote",
|
| 1403 |
+
"bbox": [
|
| 1404 |
+
0.164,
|
| 1405 |
+
0.2,
|
| 1406 |
+
0.534,
|
| 1407 |
+
0.211
|
| 1408 |
+
],
|
| 1409 |
+
"angle": 0,
|
| 1410 |
+
"content": "- SD-Motion: Single discriminator with only motion demonstrations"
|
| 1411 |
+
},
|
| 1412 |
+
{
|
| 1413 |
+
"type": "table_footnote",
|
| 1414 |
+
"bbox": [
|
| 1415 |
+
0.164,
|
| 1416 |
+
0.212,
|
| 1417 |
+
0.631,
|
| 1418 |
+
0.222
|
| 1419 |
+
],
|
| 1420 |
+
"angle": 0,
|
| 1421 |
+
"content": "- SD-Full: Single discriminator with both teacher roll-outs and motion demonstrations"
|
| 1422 |
+
},
|
| 1423 |
+
{
|
| 1424 |
+
"type": "table_footnote",
|
| 1425 |
+
"bbox": [
|
| 1426 |
+
0.163,
|
| 1427 |
+
0.223,
|
| 1428 |
+
0.551,
|
| 1429 |
+
0.234
|
| 1430 |
+
],
|
| 1431 |
+
"angle": 0,
|
| 1432 |
+
"content": "- DAgger+Style: DAgger distillation with additional style discriminator"
|
| 1433 |
+
},
|
| 1434 |
+
{
|
| 1435 |
+
"type": "list",
|
| 1436 |
+
"bbox": [
|
| 1437 |
+
0.162,
|
| 1438 |
+
0.19,
|
| 1439 |
+
0.631,
|
| 1440 |
+
0.234
|
| 1441 |
+
],
|
| 1442 |
+
"angle": 0,
|
| 1443 |
+
"content": null
|
| 1444 |
+
},
|
| 1445 |
+
{
|
| 1446 |
+
"type": "title",
|
| 1447 |
+
"bbox": [
|
| 1448 |
+
0.084,
|
| 1449 |
+
0.264,
|
| 1450 |
+
0.315,
|
| 1451 |
+
0.279
|
| 1452 |
+
],
|
| 1453 |
+
"angle": 0,
|
| 1454 |
+
"content": "C. Evaluations on Style Imitation"
|
| 1455 |
+
},
|
| 1456 |
+
{
|
| 1457 |
+
"type": "text",
|
| 1458 |
+
"bbox": [
|
| 1459 |
+
0.082,
|
| 1460 |
+
0.283,
|
| 1461 |
+
0.49,
|
| 1462 |
+
0.464
|
| 1463 |
+
],
|
| 1464 |
+
"angle": 0,
|
| 1465 |
+
"content": "To demonstrate our method's ability to combine robust locomotion skills with distinct motion styles, we evaluate a particularly challenging case: synthesizing a limping gait by combining a regular walking teacher policy with reference motions exhibiting a distinct limping pattern. Fig. 3 shows the comparison between the original limping motion from LaFAN1 (visualized in Rerun [37]), the synthesized motion in Isaac Gym [38], and the deployed behavior on the physical Unitree H1 robot. The results demonstrate that our method successfully maintains the characteristic limping style while preserving the fundamental locomotion capabilities of the teacher policy."
|
| 1466 |
+
},
|
| 1467 |
+
{
|
| 1468 |
+
"type": "text",
|
| 1469 |
+
"bbox": [
|
| 1470 |
+
0.082,
|
| 1471 |
+
0.465,
|
| 1472 |
+
0.49,
|
| 1473 |
+
0.571
|
| 1474 |
+
],
|
| 1475 |
+
"angle": 0,
|
| 1476 |
+
"content": "This fusion of different motion sources creates an inherent trade-off between style fidelity and command tracking accuracy, as the stylized motions often deviate significantly from the teacher's optimal movement patterns. Our framework addresses this challenge through adjustable discriminator weights, allowing fine-tuned balance between style preservation and task performance."
|
| 1477 |
+
},
|
| 1478 |
+
{
|
| 1479 |
+
"type": "title",
|
| 1480 |
+
"bbox": [
|
| 1481 |
+
0.084,
|
| 1482 |
+
0.58,
|
| 1483 |
+
0.273,
|
| 1484 |
+
0.595
|
| 1485 |
+
],
|
| 1486 |
+
"angle": 0,
|
| 1487 |
+
"content": "D. Real Robot Deployment"
|
| 1488 |
+
},
|
| 1489 |
+
{
|
| 1490 |
+
"type": "text",
|
| 1491 |
+
"bbox": [
|
| 1492 |
+
0.082,
|
| 1493 |
+
0.6,
|
| 1494 |
+
0.49,
|
| 1495 |
+
0.795
|
| 1496 |
+
],
|
| 1497 |
+
"angle": 0,
|
| 1498 |
+
"content": "The real-world deployment of our student policy on the Unitree H1 robot validates the practical effectiveness of our approach across various scenarios. As shown in Fig. 1, the robot demonstrates smooth transitions in both gait patterns and arm postures when responding to velocity command changes from low to medium speeds. The policy's robustness is further evidenced in Fig. 4, where the robot maintains stable locomotion at high speeds up to \\(3\\mathrm{m / s}\\). Most notably, Fig. 3 showcases our method's unique capability to synthesize stylized gaits that combine the stability of the teacher policy with distinctive motion patterns from the reference datasets, resulting in natural and controllable locomotion behaviors."
|
| 1499 |
+
},
|
| 1500 |
+
{
|
| 1501 |
+
"type": "title",
|
| 1502 |
+
"bbox": [
|
| 1503 |
+
0.138,
|
| 1504 |
+
0.806,
|
| 1505 |
+
0.436,
|
| 1506 |
+
0.82
|
| 1507 |
+
],
|
| 1508 |
+
"angle": 0,
|
| 1509 |
+
"content": "V. CONCLUSION AND LIMITATIONS"
|
| 1510 |
+
},
|
| 1511 |
+
{
|
| 1512 |
+
"type": "text",
|
| 1513 |
+
"bbox": [
|
| 1514 |
+
0.082,
|
| 1515 |
+
0.827,
|
| 1516 |
+
0.491,
|
| 1517 |
+
0.933
|
| 1518 |
+
],
|
| 1519 |
+
"angle": 0,
|
| 1520 |
+
"content": "This paper presents StyleLoco, a novel framework for humanoid locomotion that bridges the gap between robust task execution and natural motion synthesis. Through our proposed Generative Adversarial Distillation approach, we demonstrate the effective combination of privileged information from expert policies with stylistic elements from human demonstrations. Our extensive experimental results,"
|
| 1521 |
+
},
|
| 1522 |
+
{
|
| 1523 |
+
"type": "image",
|
| 1524 |
+
"bbox": [
|
| 1525 |
+
0.549,
|
| 1526 |
+
0.258,
|
| 1527 |
+
0.872,
|
| 1528 |
+
0.502
|
| 1529 |
+
],
|
| 1530 |
+
"angle": 0,
|
| 1531 |
+
"content": null
|
| 1532 |
+
},
|
| 1533 |
+
{
|
| 1534 |
+
"type": "image_caption",
|
| 1535 |
+
"bbox": [
|
| 1536 |
+
0.531,
|
| 1537 |
+
0.513,
|
| 1538 |
+
0.89,
|
| 1539 |
+
0.527
|
| 1540 |
+
],
|
| 1541 |
+
"angle": 0,
|
| 1542 |
+
"content": "Fig. 4. H1 operating outdoors at forward velocity \\((v_{x})\\) of \\(3\\mathrm{m / s}\\)"
|
| 1543 |
+
},
|
| 1544 |
+
{
|
| 1545 |
+
"type": "text",
|
| 1546 |
+
"bbox": [
|
| 1547 |
+
0.505,
|
| 1548 |
+
0.554,
|
| 1549 |
+
0.913,
|
| 1550 |
+
0.615
|
| 1551 |
+
],
|
| 1552 |
+
"angle": 0,
|
| 1553 |
+
"content": "including successful deployment on the Unitree H1 robot, validate the framework's capability to generate stable and natural locomotion behaviors across diverse scenarios, from high-speed running at \\(3\\mathrm{m / s}\\) to stylized gaits such as limping."
|
| 1554 |
+
},
|
| 1555 |
+
{
|
| 1556 |
+
"type": "text",
|
| 1557 |
+
"bbox": [
|
| 1558 |
+
0.505,
|
| 1559 |
+
0.616,
|
| 1560 |
+
0.914,
|
| 1561 |
+
0.734
|
| 1562 |
+
],
|
| 1563 |
+
"angle": 0,
|
| 1564 |
+
"content": "The key innovation of our double-discriminator architecture enables simultaneous learning from heterogeneous sources while maintaining deployability through careful handling of privileged information. Quantitative evaluations show that StyleLoco outperforms existing approaches in both task performance and style preservation, demonstrating superior velocity tracking rewards and survival times while maintaining natural motion patterns."
|
| 1565 |
+
},
|
| 1566 |
+
{
|
| 1567 |
+
"type": "text",
|
| 1568 |
+
"bbox": [
|
| 1569 |
+
0.505,
|
| 1570 |
+
0.736,
|
| 1571 |
+
0.915,
|
| 1572 |
+
0.932
|
| 1573 |
+
],
|
| 1574 |
+
"angle": 0,
|
| 1575 |
+
"content": "Despite these achievements, several important limitations warrant future investigation. A primary challenge lies in style disambiguation when motion demonstrations share overlapping velocity ranges, potentially creating ambiguity in style selection and degrading imitation fidelity. Future research could explore automatic style clustering or context-aware selection mechanisms to address this limitation. Additionally, the current implementation relies on manual tuning of discriminator weights to balance task completion and style imitation objectives. Developing adaptive weighting schemes or automated tuning methods could enhance the framework's practical applicability. While our method shows impressive results in locomotion tasks, its generalization to broader"
|
| 1576 |
+
}
|
| 1577 |
+
],
|
| 1578 |
+
[
|
| 1579 |
+
{
|
| 1580 |
+
"type": "text",
|
| 1581 |
+
"bbox": [
|
| 1582 |
+
0.083,
|
| 1583 |
+
0.072,
|
| 1584 |
+
0.49,
|
| 1585 |
+
0.101
|
| 1586 |
+
],
|
| 1587 |
+
"angle": 0,
|
| 1588 |
+
"content": "manipulation tasks or more complex behaviors remains to be explored, opening avenues for future research."
|
| 1589 |
+
},
|
| 1590 |
+
{
|
| 1591 |
+
"type": "text",
|
| 1592 |
+
"bbox": [
|
| 1593 |
+
0.083,
|
| 1594 |
+
0.103,
|
| 1595 |
+
0.49,
|
| 1596 |
+
0.164
|
| 1597 |
+
],
|
| 1598 |
+
"angle": 0,
|
| 1599 |
+
"content": "Despite these limitations, StyleLoco represents a step toward natural and capable humanoid robotics, offering a promising foundation for future research in combining task-oriented control with human-like motion generation."
|
| 1600 |
+
},
|
| 1601 |
+
{
|
| 1602 |
+
"type": "title",
|
| 1603 |
+
"bbox": [
|
| 1604 |
+
0.239,
|
| 1605 |
+
0.186,
|
| 1606 |
+
0.335,
|
| 1607 |
+
0.2
|
| 1608 |
+
],
|
| 1609 |
+
"angle": 0,
|
| 1610 |
+
"content": "REFERENCES"
|
| 1611 |
+
},
|
| 1612 |
+
{
|
| 1613 |
+
"type": "ref_text",
|
| 1614 |
+
"bbox": [
|
| 1615 |
+
0.093,
|
| 1616 |
+
0.215,
|
| 1617 |
+
0.49,
|
| 1618 |
+
0.251
|
| 1619 |
+
],
|
| 1620 |
+
"angle": 0,
|
| 1621 |
+
"content": "[1] K. Darvish, L. Penco, J. Ramos, R. Cisneros, J. Pratt, E. Yoshida, S. Ivaldi, and D. Pucci, \"Teleoperation of humanoid robots: A survey,\" IEEE Transactions on Robotics, vol. 39, no. 3, pp. 1706-1727, 2023."
|
| 1622 |
+
},
|
| 1623 |
+
{
|
| 1624 |
+
"type": "ref_text",
|
| 1625 |
+
"bbox": [
|
| 1626 |
+
0.093,
|
| 1627 |
+
0.251,
|
| 1628 |
+
0.489,
|
| 1629 |
+
0.295
|
| 1630 |
+
],
|
| 1631 |
+
"angle": 0,
|
| 1632 |
+
"content": "[2] X. B. Peng, Z. Ma, P. Abbeel, S. Levine, and A. Kanazawa, \"Amp: Adversarial motion priors for stylized physics-based character control,\" ACM Transactions on Graphics (ToG), vol. 40, no. 4, pp. 1-20, 2021."
|
| 1633 |
+
},
|
| 1634 |
+
{
|
| 1635 |
+
"type": "ref_text",
|
| 1636 |
+
"bbox": [
|
| 1637 |
+
0.093,
|
| 1638 |
+
0.296,
|
| 1639 |
+
0.489,
|
| 1640 |
+
0.32
|
| 1641 |
+
],
|
| 1642 |
+
"angle": 0,
|
| 1643 |
+
"content": "[3] F. G. Harvey, M. Yurick, D. Nowrouzezahrai, and C. Pal, \"Robust motion in-between,\" vol. 39, no. 4, 2020."
|
| 1644 |
+
},
|
| 1645 |
+
{
|
| 1646 |
+
"type": "ref_text",
|
| 1647 |
+
"bbox": [
|
| 1648 |
+
0.093,
|
| 1649 |
+
0.32,
|
| 1650 |
+
0.489,
|
| 1651 |
+
0.365
|
| 1652 |
+
],
|
| 1653 |
+
"angle": 0,
|
| 1654 |
+
"content": "[4] N. Mahmood, N. Ghorbani, N. F. Troje, G. Pons-Moll, and M. J. Black, “AMASS: Archive of motion capture as surface shapes,” in International Conference on Computer Vision, Oct. 2019, pp. 5442–5451."
|
| 1655 |
+
},
|
| 1656 |
+
{
|
| 1657 |
+
"type": "ref_text",
|
| 1658 |
+
"bbox": [
|
| 1659 |
+
0.093,
|
| 1660 |
+
0.366,
|
| 1661 |
+
0.489,
|
| 1662 |
+
0.4
|
| 1663 |
+
],
|
| 1664 |
+
"angle": 0,
|
| 1665 |
+
"content": "[5] X. Cheng, Y. Ji, J. Chen, R. Yang, G. Yang, and X. Wang, \"Expressive whole-body control for humanoid robots,\" arXiv preprint arXiv:2402.16796, 2024."
|
| 1666 |
+
},
|
| 1667 |
+
{
|
| 1668 |
+
"type": "ref_text",
|
| 1669 |
+
"bbox": [
|
| 1670 |
+
0.093,
|
| 1671 |
+
0.401,
|
| 1672 |
+
0.489,
|
| 1673 |
+
0.446
|
| 1674 |
+
],
|
| 1675 |
+
"angle": 0,
|
| 1676 |
+
"content": "[6] T. Marcucci, M. Gabiccini, and A. Artoni, \"A two-stage trajectory optimization strategy for articulated bodies with unscheduled contact sequences,\" IEEE Robotics and Automation Letters, vol. 2, no. 1, pp. 104-111, 2017."
|
| 1677 |
+
},
|
| 1678 |
+
{
|
| 1679 |
+
"type": "ref_text",
|
| 1680 |
+
"bbox": [
|
| 1681 |
+
0.093,
|
| 1682 |
+
0.448,
|
| 1683 |
+
0.489,
|
| 1684 |
+
0.493
|
| 1685 |
+
],
|
| 1686 |
+
"angle": 0,
|
| 1687 |
+
"content": "[7] G. Romualdi, S. Dafarra, G. L'Erario, I. Sorrentino, S. Traversaro, and D. Pucci, \"Online non-linear centroidal mpc for humanoid robot locomotion with step adjustment,\" in 2022 International Conference on Robotics and Automation (ICRA). IEEE, 2022, pp. 10412-10419."
|
| 1688 |
+
},
|
| 1689 |
+
{
|
| 1690 |
+
"type": "ref_text",
|
| 1691 |
+
"bbox": [
|
| 1692 |
+
0.093,
|
| 1693 |
+
0.493,
|
| 1694 |
+
0.489,
|
| 1695 |
+
0.539
|
| 1696 |
+
],
|
| 1697 |
+
"angle": 0,
|
| 1698 |
+
"content": "[8] J. Englsberger, A. Dietrich, G.-A. Mesesan, G. Garofalo, C. Ott, and A. O. Albu-Schäffer, \"Mptc-modular passive tracking controller for stack of tasks based control frameworks,\" 16th Robotics: Science and Systems, RSS 2020, 2020."
|
| 1699 |
+
},
|
| 1700 |
+
{
|
| 1701 |
+
"type": "ref_text",
|
| 1702 |
+
"bbox": [
|
| 1703 |
+
0.093,
|
| 1704 |
+
0.54,
|
| 1705 |
+
0.489,
|
| 1706 |
+
0.597
|
| 1707 |
+
],
|
| 1708 |
+
"angle": 0,
|
| 1709 |
+
"content": "[9] M. Elobaid, G. Romualdi, G. Nava, L. Rapetti, H. A. O. Mohamed, and D. Pucci, \"Online non-linear centroidal mpc for humanoid robots payload carrying with contact-stable force parametrization,\" in 2023 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2023, pp. 12233-12239."
|
| 1710 |
+
},
|
| 1711 |
+
{
|
| 1712 |
+
"type": "ref_text",
|
| 1713 |
+
"bbox": [
|
| 1714 |
+
0.086,
|
| 1715 |
+
0.597,
|
| 1716 |
+
0.489,
|
| 1717 |
+
0.655
|
| 1718 |
+
],
|
| 1719 |
+
"angle": 0,
|
| 1720 |
+
"content": "[10] Y. Ishiguro, K. Kojima, F. Sugai, S. Nozawa, Y. Kakiuchi, K. Okada, and M. Inaba, \"High speed whole body dynamic motion experiment with real time master-slave humanoid robot system,\" in 2018 IEEE International Conference on Robotics and Automation (ICRA), 2018, pp. 5835-5841."
|
| 1721 |
+
},
|
| 1722 |
+
{
|
| 1723 |
+
"type": "ref_text",
|
| 1724 |
+
"bbox": [
|
| 1725 |
+
0.086,
|
| 1726 |
+
0.655,
|
| 1727 |
+
0.489,
|
| 1728 |
+
0.701
|
| 1729 |
+
],
|
| 1730 |
+
"angle": 0,
|
| 1731 |
+
"content": "[11] Y. Ishiguro, T. Makabe, Y. Nagamatsu, Y. Kojio, K. Kojima, F. Sugai, Y. Kakiuchi, K. Okada, and M. Inaba, \"Bilateral humanoid teleoperation system using whole-body exoskeleton cockpit tablis,\" IEEE Robotics and Automation Letters, vol. 5, no. 4, pp. 6419-6426, 2020."
|
| 1732 |
+
},
|
| 1733 |
+
{
|
| 1734 |
+
"type": "ref_text",
|
| 1735 |
+
"bbox": [
|
| 1736 |
+
0.086,
|
| 1737 |
+
0.701,
|
| 1738 |
+
0.489,
|
| 1739 |
+
0.735
|
| 1740 |
+
],
|
| 1741 |
+
"angle": 0,
|
| 1742 |
+
"content": "[12] J. Ramos and S. Kim, \"Dynamic locomotion synchronization of bipedal robot and human operator via bilateral feedback teleoperation,\" Science Robotics, vol. 4, no. 35, p. eaav4282, 2019."
|
| 1743 |
+
},
|
| 1744 |
+
{
|
| 1745 |
+
"type": "ref_text",
|
| 1746 |
+
"bbox": [
|
| 1747 |
+
0.086,
|
| 1748 |
+
0.736,
|
| 1749 |
+
0.489,
|
| 1750 |
+
0.781
|
| 1751 |
+
],
|
| 1752 |
+
"angle": 0,
|
| 1753 |
+
"content": "[13] K. Ayusawa and E. Yoshida, \"Motion retargeting for humanoid robots based on simultaneous morphing parameter identification and motion optimization,\" IEEE Transactions on Robotics, vol. 33, no. 6, pp. 1343-1357, 2017."
|
| 1754 |
+
},
|
| 1755 |
+
{
|
| 1756 |
+
"type": "ref_text",
|
| 1757 |
+
"bbox": [
|
| 1758 |
+
0.086,
|
| 1759 |
+
0.782,
|
| 1760 |
+
0.489,
|
| 1761 |
+
0.828
|
| 1762 |
+
],
|
| 1763 |
+
"angle": 0,
|
| 1764 |
+
"content": "[14] K. Hu, C. Ott, and D. Lee, \"Online human walking imitation in task and joint space based on quadratic programming,\" in 2014 IEEE International Conference on Robotics and Automation (ICRA), 2014, pp. 3458-3464."
|
| 1765 |
+
},
|
| 1766 |
+
{
|
| 1767 |
+
"type": "ref_text",
|
| 1768 |
+
"bbox": [
|
| 1769 |
+
0.086,
|
| 1770 |
+
0.828,
|
| 1771 |
+
0.489,
|
| 1772 |
+
0.884
|
| 1773 |
+
],
|
| 1774 |
+
"angle": 0,
|
| 1775 |
+
"content": "[15] F.-J. Montecillo-Puente, M. N. Sreenivasa, and J.-P. Laumond, \"On real-time whole-body human to humanoid motion transfer,\" in International Conference on Informatics in Control, Automation and Robotics, 2010. [Online]. Available: https://api(semanticscholar.org/CorpusID:20676844"
|
| 1776 |
+
},
|
| 1777 |
+
{
|
| 1778 |
+
"type": "ref_text",
|
| 1779 |
+
"bbox": [
|
| 1780 |
+
0.086,
|
| 1781 |
+
0.885,
|
| 1782 |
+
0.489,
|
| 1783 |
+
0.93
|
| 1784 |
+
],
|
| 1785 |
+
"angle": 0,
|
| 1786 |
+
"content": "[16] K. Yamane, S. O. Anderson, and J. K. Hodgins, “Controlling humanoid robots with human motion data: Experimental validation,” in 2010 10th IEEE-RAS International Conference on Humanoid Robots, 2010, pp. 504–510."
|
| 1787 |
+
},
|
| 1788 |
+
{
|
| 1789 |
+
"type": "list",
|
| 1790 |
+
"bbox": [
|
| 1791 |
+
0.086,
|
| 1792 |
+
0.215,
|
| 1793 |
+
0.49,
|
| 1794 |
+
0.93
|
| 1795 |
+
],
|
| 1796 |
+
"angle": 0,
|
| 1797 |
+
"content": null
|
| 1798 |
+
},
|
| 1799 |
+
{
|
| 1800 |
+
"type": "ref_text",
|
| 1801 |
+
"bbox": [
|
| 1802 |
+
0.511,
|
| 1803 |
+
0.073,
|
| 1804 |
+
0.913,
|
| 1805 |
+
0.12
|
| 1806 |
+
],
|
| 1807 |
+
"angle": 0,
|
| 1808 |
+
"content": "[17] A. Di Fava, K. Bouyarmane, K. Chappellet, E. Ruffaldi, and A. Kheddar, “Multi-contact motion retargeting from human to humanoid robot,” in 2016 IEEE-RAS 16th International Conference on Humanoid Robots (Humanoids), 2016, pp. 1081–1086."
|
| 1809 |
+
},
|
| 1810 |
+
{
|
| 1811 |
+
"type": "ref_text",
|
| 1812 |
+
"bbox": [
|
| 1813 |
+
0.511,
|
| 1814 |
+
0.121,
|
| 1815 |
+
0.913,
|
| 1816 |
+
0.166
|
| 1817 |
+
],
|
| 1818 |
+
"angle": 0,
|
| 1819 |
+
"content": "[18] K. Otani and K. Bouyarmane, \"Adaptive whole-body manipulation in human-to-humanoid multi-contact motion retargeting,\" in 2017 IEEE-RAS 17th International Conference on Humanoid Robotics (Humanoids), 2017, pp. 446-453."
|
| 1820 |
+
},
|
| 1821 |
+
{
|
| 1822 |
+
"type": "ref_text",
|
| 1823 |
+
"bbox": [
|
| 1824 |
+
0.511,
|
| 1825 |
+
0.166,
|
| 1826 |
+
0.913,
|
| 1827 |
+
0.224
|
| 1828 |
+
],
|
| 1829 |
+
"angle": 0,
|
| 1830 |
+
"content": "[19] L. Penco, B. Clement, V. Modugno, E. Mingo Hoffman, G. Nava, D. Pucci, N. G. Tsagarakis, J. B. Mouret, and S. Ivaldi, \"Robust real-time whole-body motion retargeting from human to humanoid,\" in 2018 IEEE-RAS 18th International Conference on Humanoid Robots (Humanoids), 2018, pp. 425-432."
|
| 1831 |
+
},
|
| 1832 |
+
{
|
| 1833 |
+
"type": "ref_text",
|
| 1834 |
+
"bbox": [
|
| 1835 |
+
0.51,
|
| 1836 |
+
0.224,
|
| 1837 |
+
0.913,
|
| 1838 |
+
0.269
|
| 1839 |
+
],
|
| 1840 |
+
"angle": 0,
|
| 1841 |
+
"content": "[20] J. Koenemann, F. Burget, and M. Bennewitz, “Real-time imitation of human whole-body motions by humanoids,” in 2014 IEEE International Conference on Robotics and Automation (ICRA), 2014, pp. 2806–2812."
|
| 1842 |
+
},
|
| 1843 |
+
{
|
| 1844 |
+
"type": "ref_text",
|
| 1845 |
+
"bbox": [
|
| 1846 |
+
0.51,
|
| 1847 |
+
0.27,
|
| 1848 |
+
0.913,
|
| 1849 |
+
0.317
|
| 1850 |
+
],
|
| 1851 |
+
"angle": 0,
|
| 1852 |
+
"content": "[21] O. E. Ramos, N. Mansard, O. Stasse, C. Benazeth, S. Hak, and L. Saab, \"Dancing humanoid robots: Systematic use of osid to compute dynamically consistent movements following a motion capture pattern,\" IEEE Robotics and Automation Magazine, vol. 22, no. 4, pp. 16-26, 2015."
|
| 1853 |
+
},
|
| 1854 |
+
{
|
| 1855 |
+
"type": "ref_text",
|
| 1856 |
+
"bbox": [
|
| 1857 |
+
0.51,
|
| 1858 |
+
0.317,
|
| 1859 |
+
0.913,
|
| 1860 |
+
0.363
|
| 1861 |
+
],
|
| 1862 |
+
"angle": 0,
|
| 1863 |
+
"content": "[22] L. Penco, K. Momose, S. McCrory, D. Anderson, N. Kitchel, D. Calvert, and R. J. Griffin, \"Mixed reality teleoperation assistance for direct control of humanoids,\" IEEE Robotics and Automation Letters, vol. 9, no. 2, pp. 1937-1944, 2024."
|
| 1864 |
+
},
|
| 1865 |
+
{
|
| 1866 |
+
"type": "ref_text",
|
| 1867 |
+
"bbox": [
|
| 1868 |
+
0.51,
|
| 1869 |
+
0.363,
|
| 1870 |
+
0.913,
|
| 1871 |
+
0.408
|
| 1872 |
+
],
|
| 1873 |
+
"angle": 0,
|
| 1874 |
+
"content": "[23] Z. Li, X. B. Peng, P. Abbeel, S. Levine, G. Berseth, and K. Sreenath, \"Reinforcement learning for versatile, dynamic, and robust bipedal locomotion control,\" The International Journal of Robotics Research, p. 02783649241285161, 2024."
|
| 1875 |
+
},
|
| 1876 |
+
{
|
| 1877 |
+
"type": "ref_text",
|
| 1878 |
+
"bbox": [
|
| 1879 |
+
0.51,
|
| 1880 |
+
0.409,
|
| 1881 |
+
0.913,
|
| 1882 |
+
0.444
|
| 1883 |
+
],
|
| 1884 |
+
"angle": 0,
|
| 1885 |
+
"content": "[24] Z. Fu, A. Kumar, J. Malik, and D. Pathak, \"Minimizing energy consumption leads to the emergence of gaits in legged robots,\" in 5th Annual Conference on Robot Learning."
|
| 1886 |
+
},
|
| 1887 |
+
{
|
| 1888 |
+
"type": "ref_text",
|
| 1889 |
+
"bbox": [
|
| 1890 |
+
0.51,
|
| 1891 |
+
0.445,
|
| 1892 |
+
0.913,
|
| 1893 |
+
0.468
|
| 1894 |
+
],
|
| 1895 |
+
"angle": 0,
|
| 1896 |
+
"content": "[25] J. Ho and S. Ermon, \"Generative adversarial imitation learning,\" Advances in neural information processing systems, vol. 29, 2016."
|
| 1897 |
+
},
|
| 1898 |
+
{
|
| 1899 |
+
"type": "ref_text",
|
| 1900 |
+
"bbox": [
|
| 1901 |
+
0.51,
|
| 1902 |
+
0.469,
|
| 1903 |
+
0.913,
|
| 1904 |
+
0.514
|
| 1905 |
+
],
|
| 1906 |
+
"angle": 0,
|
| 1907 |
+
"content": "[26] X. Huang, Y. Chi, R. Wang, Z. Li, X. B. Peng, S. Shao, B. Nikolic, and K. Sreenath, \"Diffuseloco: Real-time legged locomotion control with diffusion from offline datasets,\" 2024. [Online]. Available: https://arxiv.org/abs/2404.19264"
|
| 1908 |
+
},
|
| 1909 |
+
{
|
| 1910 |
+
"type": "ref_text",
|
| 1911 |
+
"bbox": [
|
| 1912 |
+
0.51,
|
| 1913 |
+
0.515,
|
| 1914 |
+
0.913,
|
| 1915 |
+
0.56
|
| 1916 |
+
],
|
| 1917 |
+
"angle": 0,
|
| 1918 |
+
"content": "[27] B. Jia and D. Manocha, \"Sim-to-real robotic sketching using behavior cloning and reinforcement learning,\" in 2024 IEEE International Conference on Robotics and Automation (ICRA), 2024, pp. 18272-18278."
|
| 1919 |
+
},
|
| 1920 |
+
{
|
| 1921 |
+
"type": "ref_text",
|
| 1922 |
+
"bbox": [
|
| 1923 |
+
0.51,
|
| 1924 |
+
0.561,
|
| 1925 |
+
0.913,
|
| 1926 |
+
0.63
|
| 1927 |
+
],
|
| 1928 |
+
"angle": 0,
|
| 1929 |
+
"content": "[28] S. Ross and D. Bagnell, \"Efficient reductions for imitation learning,\" in Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics, ser. Proceedings of Machine Learning Research, Y. W. Teh and M. Titterington, Eds., vol. 9. Chia Laguna Resort, Sardinia, Italy: PMLR, 13-15 May 2010, pp. 661-668. [Online]. Available: https://proceedings.mlr.press/v9/ross10a.html"
|
| 1930 |
+
},
|
| 1931 |
+
{
|
| 1932 |
+
"type": "ref_text",
|
| 1933 |
+
"bbox": [
|
| 1934 |
+
0.51,
|
| 1935 |
+
0.63,
|
| 1936 |
+
0.913,
|
| 1937 |
+
0.687
|
| 1938 |
+
],
|
| 1939 |
+
"angle": 0,
|
| 1940 |
+
"content": "[29] S. Ross, G. Gordon, and D. Bagnell, “A reduction of imitation learning and structured prediction to no-regret online learning,” in Proceedings of the fourteenth international conference on artificial intelligence and statistics. JMLR Workshop and Conference Proceedings, 2011, pp. 627–635."
|
| 1941 |
+
},
|
| 1942 |
+
{
|
| 1943 |
+
"type": "ref_text",
|
| 1944 |
+
"bbox": [
|
| 1945 |
+
0.51,
|
| 1946 |
+
0.688,
|
| 1947 |
+
0.913,
|
| 1948 |
+
0.722
|
| 1949 |
+
],
|
| 1950 |
+
"angle": 0,
|
| 1951 |
+
"content": "[30] M. Ji, X. Peng, F. Liu, J. Li, G. Yang, X. Cheng, and X. Wang, \"Exbody2: Advanced expressive humanoid whole-body control,\" arXiv preprint arXiv:2412.13196, 2024."
|
| 1952 |
+
},
|
| 1953 |
+
{
|
| 1954 |
+
"type": "ref_text",
|
| 1955 |
+
"bbox": [
|
| 1956 |
+
0.51,
|
| 1957 |
+
0.722,
|
| 1958 |
+
0.913,
|
| 1959 |
+
0.757
|
| 1960 |
+
],
|
| 1961 |
+
"angle": 0,
|
| 1962 |
+
"content": "[31] T. He, Z. Luo, W. Xiao, C. Zhang, K. Kitani, C. Liu, and G. Shi, \"Learning human-to-humanoid real-time whole-body teleoperation,\" arXiv preprint arXiv:2403.04436, 2024."
|
| 1963 |
+
},
|
| 1964 |
+
{
|
| 1965 |
+
"type": "ref_text",
|
| 1966 |
+
"bbox": [
|
| 1967 |
+
0.51,
|
| 1968 |
+
0.758,
|
| 1969 |
+
0.913,
|
| 1970 |
+
0.802
|
| 1971 |
+
],
|
| 1972 |
+
"angle": 0,
|
| 1973 |
+
"content": "[32] T. He, W. Xiao, T. Lin, Z. Luo, Z. Xu, Z. Jiang, C. Liu, G. Shi, X. Wang, L. Fan, and Y. Zhu, \"Hover: Versatile neural whole-body controller for humanoid robots,\" arXiv preprint arXiv:2410.21229, 2024."
|
| 1974 |
+
},
|
| 1975 |
+
{
|
| 1976 |
+
"type": "ref_text",
|
| 1977 |
+
"bbox": [
|
| 1978 |
+
0.51,
|
| 1979 |
+
0.803,
|
| 1980 |
+
0.913,
|
| 1981 |
+
0.849
|
| 1982 |
+
],
|
| 1983 |
+
"angle": 0,
|
| 1984 |
+
"content": "[33] T. He, Z. Luo, X. He, W. Xiao, C. Zhang, W. Zhang, K. Kitani, C. Liu, and G. Shi, “Omnih2o: Universal and dexterous human-to-humanoid whole-body teleoperation and learning,” arXiv preprint arXiv:2406.08858, 2024."
|
| 1985 |
+
},
|
| 1986 |
+
{
|
| 1987 |
+
"type": "ref_text",
|
| 1988 |
+
"bbox": [
|
| 1989 |
+
0.51,
|
| 1990 |
+
0.85,
|
| 1991 |
+
0.913,
|
| 1992 |
+
0.895
|
| 1993 |
+
],
|
| 1994 |
+
"angle": 0,
|
| 1995 |
+
"content": "[34] X. Gu, Y.-J. Wang, X. Zhu, C. Shi, Y. Guo, Y. Liu, and J. Chen, “Advancing humanoid locomotion: Mastering challenging terrains with denoising world model learning,” arXiv preprint arXiv:2408.14472, 2024."
|
| 1996 |
+
},
|
| 1997 |
+
{
|
| 1998 |
+
"type": "ref_text",
|
| 1999 |
+
"bbox": [
|
| 2000 |
+
0.51,
|
| 2001 |
+
0.896,
|
| 2002 |
+
0.913,
|
| 2003 |
+
0.932
|
| 2004 |
+
],
|
| 2005 |
+
"angle": 0,
|
| 2006 |
+
"content": "[35] X. Mao, Q. Li, H. Xie, R. Y. Lau, Z. Wang, and S. Paul Smolley, \"Least squares generative adversarial networks,\" in Proceedings of the IEEE international conference on computer vision, 2017, pp. 2794-2802."
|
| 2007 |
+
},
|
| 2008 |
+
{
|
| 2009 |
+
"type": "list",
|
| 2010 |
+
"bbox": [
|
| 2011 |
+
0.51,
|
| 2012 |
+
0.073,
|
| 2013 |
+
0.913,
|
| 2014 |
+
0.932
|
| 2015 |
+
],
|
| 2016 |
+
"angle": 0,
|
| 2017 |
+
"content": null
|
| 2018 |
+
}
|
| 2019 |
+
],
|
| 2020 |
+
[
|
| 2021 |
+
{
|
| 2022 |
+
"type": "ref_text",
|
| 2023 |
+
"bbox": [
|
| 2024 |
+
0.085,
|
| 2025 |
+
0.073,
|
| 2026 |
+
0.49,
|
| 2027 |
+
0.108
|
| 2028 |
+
],
|
| 2029 |
+
"angle": 0,
|
| 2030 |
+
"content": "[36] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov, \"Proximal policy optimization algorithms,\" arXiv preprint arXiv:1707.06347, 2017."
|
| 2031 |
+
},
|
| 2032 |
+
{
|
| 2033 |
+
"type": "ref_text",
|
| 2034 |
+
"bbox": [
|
| 2035 |
+
0.087,
|
| 2036 |
+
0.109,
|
| 2037 |
+
0.49,
|
| 2038 |
+
0.153
|
| 2039 |
+
],
|
| 2040 |
+
"angle": 0,
|
| 2041 |
+
"content": "[37] Rerun Development Team, \"Rerun: A visualizationsdk for multimodal data,\" Online, 2024, available from https://www. rerun.io/ and https://github.com/rerun-io/rerun. [Online]. Available: https://www. rerun.io"
|
| 2042 |
+
},
|
| 2043 |
+
{
|
| 2044 |
+
"type": "ref_text",
|
| 2045 |
+
"bbox": [
|
| 2046 |
+
0.087,
|
| 2047 |
+
0.154,
|
| 2048 |
+
0.49,
|
| 2049 |
+
0.199
|
| 2050 |
+
],
|
| 2051 |
+
"angle": 0,
|
| 2052 |
+
"content": "[38] V. Makoviychuk, L. Wawrzyniak, Y. Guo, M. Lu, K. Storey, M. Macklin, D. Hoeller, N. Rudin, A. Allshire, A. Handa, and G. State, \"Isaac gym: High performancegpu-based physics simulation for robot learning,\" 2021. [Online]. Available: https://arxiv.org/abs/2108.10470"
|
| 2053 |
+
},
|
| 2054 |
+
{
|
| 2055 |
+
"type": "list",
|
| 2056 |
+
"bbox": [
|
| 2057 |
+
0.085,
|
| 2058 |
+
0.073,
|
| 2059 |
+
0.49,
|
| 2060 |
+
0.199
|
| 2061 |
+
],
|
| 2062 |
+
"angle": 0,
|
| 2063 |
+
"content": null
|
| 2064 |
+
}
|
| 2065 |
+
]
|
| 2066 |
+
]
|
data/2025/2503_15xxx/2503.15082/9abe67bb-bb04-4404-ba77-d9bdbc419145_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ec9cb7012a732ea57ac2e886ef7ac03b6ad5401738fe52dda0989890bb71593c
|
| 3 |
+
size 6572671
|
data/2025/2503_15xxx/2503.15082/full.md
ADDED
|
@@ -0,0 +1,313 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# StyleLoco: Generative Adversarial Distillation for Natural Humanoid Robot Locomotion
|
| 2 |
+
|
| 3 |
+
Le Ma $^{1*}$ , Ziyu Meng $^{1,2*}$ , Tengyu Liu $^{1}$ , Yuhan Li $^{1,3}$ , Ran Song $^{2}$ , Wei Zhang $^{2}$ , Siyuan Huang $^{1, \boxtimes}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ National Key Laboratory of General Artificial Intelligence, BIGAI $^{2}$ School of Control Science and Engineering, Shandong University
|
| 6 |
+
|
| 7 |
+
<sup>3</sup> Huazhong University of Science and Technology *Equal contributors huangsiyuan@bigai.ai
|
| 8 |
+
|
| 9 |
+
https://styleloco.github.io/
|
| 10 |
+
|
| 11 |
+
Abstract—Humanoid robots are anticipated to acquire a wide range of locomotion capabilities while ensuring natural movement across varying speeds and terrains. Existing methods encounter a fundamental dilemma in learning humanoid locomotion: reinforcement learning with handcrafted rewards can achieve agile locomotion but produces unnatural gaits, while Generative Adversarial Imitation Learning (GAIL) with motion capture data yields natural movements but suffers from unstable training processes and restricted agility. Integrating these approaches proves challenging due to the inherent heterogeneity between expert policies and human motion datasets. To address this, we introduce StyleLoco, a novel two-stage framework that bridges this gap through a Generative Adversarial Distillation (GAD) process. Our framework begins by training a teacher policy using reinforcement learning to achieve agile and dynamic locomotion. It then employs a multi-discriminator architecture, where distinct discriminators concurrently extract skills from both the teacher policy and motion capture data. This approach effectively combines the agility of reinforcement learning with the natural fluidity of human-like movements while mitigating the instability issues commonly associated with adversarial training. Through extensive simulation and real-world experiments, we demonstrate that StyleLoco enables humanoid robots to perform diverse locomotion tasks with the precision of expertly trained policies and the natural aesthetics of human motion, successfully transferring styles across different movement types while maintaining stable locomotion across a broad spectrum of command inputs.
|
| 12 |
+
|
| 13 |
+
# I. INTRODUCTION
|
| 14 |
+
|
| 15 |
+
Natural and agile locomotion in humanoid robots represents a fundamental challenge in robotics, with far-reaching implications for human-robot interaction, disaster response, and industrial applications. While humanoid robots offer unprecedented potential for operating in human-centric environments, achieving human-like movement patterns remains difficult due to their high degrees of freedom and inherently unstable dynamics[1]. This challenge is further complicated by the fundamental trade-off between achieving precise control and maintaining natural motion qualities.
|
| 16 |
+
|
| 17 |
+
Reinforcement learning (RL) has emerged as a powerful approach for developing locomotion controllers, enabling robots to master complex movements through carefully designed reward functions. These methods often employ a two-stage learning process: first training a teacher policy that relies on privileged information (such as global positions and ground truth environmental parameters) unavailable in real-world settings, then distilling this knowledge into a student
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
Fig. 1. Gait pattern transitions during forward velocity $(v_{x})$ acceleration from $0.7\mathrm{m / s}$ to $1.8\mathrm{m / s}$
|
| 21 |
+
|
| 22 |
+
policy that operates solely on realistic sensor observations. While this approach has demonstrated impressive results in terms of agility and precision, it faces two key limitations. First, the reliance on handcrafted rewards requires extensive tuning to accommodate different gaits, stride lengths, and motion parameters across varying speeds. Second, these methods often result in rigid, mechanical movements that lack the fluidity and naturalness characteristic of human motion, limiting their effectiveness in human-centric environments.
|
| 23 |
+
|
| 24 |
+
Recent advances in generative adversarial imitation learning, particularly approaches like Adversarial Motion Prior (AMP) [2], have opened new possibilities for achieving more natural robot movements by leveraging large-scale motion capture datasets such as LaFAN1 [3] and AMASS [4]. These methods employ adversarial training to ensure that robot movements closely match the statistical patterns present in human demonstrations [5]. However, their performance is fundamentally limited by the content and quality of the reference motion data. For instance, learning running behaviors becomes impossible with a dataset containing only walking motions, and acquiring diverse specialized skills often requires expensive motion capture sessions. Furthermore, these methods struggle when motion datasets lack diversity or when retargeting processes introduce artifacts, resulting in brittle behaviors that fail to generalize beyond demonstrated movements.
|
| 25 |
+
|
| 26 |
+
The limitations of both approaches highlight a critical gap
|
| 27 |
+
|
| 28 |
+
in humanoid locomotion: the need to combine the precision and adaptability of RL-based controllers with the natural movement qualities captured in human demonstrations. While RL methods can learn complex skills beyond available motion capture data, they struggle with natural movement generation. Conversely, demonstration-based methods excel at producing natural movements but are constrained by the available motion capture data. This complementary nature suggests the potential for combining both approaches, yet traditional methods struggle to bridge this gap due to the fundamental heterogeneity between expert policies trained with handcrafted rewards and the statistical patterns present in human motion datasets.
|
| 29 |
+
|
| 30 |
+
We address these challenges with StyleLoco, introducing a novel Generative Adversarial Distillation (GAD) framework that effectively combines knowledge from heterogeneous sources. Our approach employs a multi-discriminator architecture where separate discriminators simultaneously distill skills from both an RL-trained expert policy and motion capture demonstrations. This design allows the model to preserve the agility and precision of RL while incorporating the natural style of human movements, enabling natural skill execution even for behaviors not present in the motion capture data. Through extensive evaluations in both simulated and real-world environments, we demonstrate that StyleLoco enables humanoid robots to achieve superior locomotion performance compared to traditional approaches while maintaining natural, human-like movement qualities.
|
| 31 |
+
|
| 32 |
+
The key contribution of our work is three-fold.
|
| 33 |
+
|
| 34 |
+
- A novel GAD framework that enables stable policy distillation from heterogeneous sources, effectively bridging the gap between RL and demonstration-based approaches.
|
| 35 |
+
- A multi-discriminator architecture that successfully combines task-oriented control objectives with natural motion patterns, achieving both high performance and human-like movement qualities.
|
| 36 |
+
- Comprehensive validation through real-world deployment on the Unitree H1 humanoid robot, demonstrating robust and natural motion across diverse locomotion tasks and speeds.
|
| 37 |
+
|
| 38 |
+
# II. RELATED WORKS
|
| 39 |
+
|
| 40 |
+
# A. Humanoid Robot Locomotion
|
| 41 |
+
|
| 42 |
+
Locomotion is a critical aspect in the motion control in humanoid robots. Traditional methods typically achieve stable movement by formulating the robot's dynamics model as constrained trajectory optimization problems [6]. Model Predictive Control (MPC) [7], [8], [9] is then employed in real-time to adjust and execute this trajectory, enabling adaption to dynamic environmental changes. However, these model-based methods usually rely heavily on precise modeling of robot dynamic properties [10], [11], [12], [13], [14] and environmental conditions [15], [16], [17], [18], [12], [19], [20], [21], [22], which leads to vulnerabilities in real-world performance, especially when there is a substantial
|
| 43 |
+
|
| 44 |
+
discrepancy between the applied environments and the predefined conditions [23]. Thus, the optimization problem for humanoid robots is slow to resolve due to the complexity of high-dimensional state and action spaces, rendering it challenging to satisfy the demands for real-time performance and stability.
|
| 45 |
+
|
| 46 |
+
Recently, reinforcement learning (RL) has emerged as a promising paradigm for humanoid locomotion tasks. These methods design tailored reward functions to guide "try and error" feedback-based learning process. For instance, reward functions are often crafted to encourage stable walking, minimize energy consumption, or optimize trajectory tracking [24]. However, designing effective reward functions is non-trivial and often requires extensive domain expertise especially for particular locomotion gaits. Natural locomotion motions require different gaits for varying movement speeds, making the design of the reward function even more challenging. Moreover, the numerous rewards terms must strike a delicate balance between competing objectives. To alleviate these drawbacks, we incorporate diverse reference locomotion motions as style guidance to simplify the reward components and encourage the policy learn versatile gaits.
|
| 47 |
+
|
| 48 |
+
# B. Imitation Learning for Humanoid locomotion
|
| 49 |
+
|
| 50 |
+
The fundamental challenges in learning high-dimensional, underactuated robotic systems include precise task specification and effective exploration. Imitation learning (IL) is a method that learns from expert demonstrations, effectively addressing challenges related to quantifying rewards. Unlike pure reinforcement learning, IL can directly leverage offline expert data to guide policy learning, significantly reducing the exploration space and obtaining dense rewards. This approach is particularly effective in real-world robotics and complex task scenarios. Typically, it involves directly following reference trajectories through motion tracking. Generative Adversarial Imitation Learning (GAIL) [25] has been applied to locomotion tasks. The traditional imitation learning method, as mentioned above, is limited in flexibility—it can only replicate reference trajectories and cannot adapt to downstream tasks. To address this limitation, AMP [2] introduces the concept of learning the style from reference motion as a constraint, guiding the policy learning process.
|
| 51 |
+
|
| 52 |
+
However, this paradigm heavily relies on expert demonstrations, and its performance can significantly degrade when the quality of demonstrations is poor or when the task changes. Since IL strategies are directly derived from the demonstrations, they are prone to overfitting to the demonstration data. As a result, when faced with novel situations, IL may lack sufficient generalization ability. Furthermore, due to the morphological differences between humanoid robots and humans, obtaining high-quality reference data proves challenging, resulting in datasets that can only encompass a limited range of instructions. This scarcity of data can compromise the stability of Generative Adversarial Imitation Learning (GAIL), leading to mode collapse. To mitigate these challenges, we supplement the expert policy as a
|
| 53 |
+
|
| 54 |
+
reference motion, providing additional motion references to achieve a stable omnidirectional movement strategy.
|
| 55 |
+
|
| 56 |
+
# C. Deployable Policy Distillation
|
| 57 |
+
|
| 58 |
+
In robotic locomotion control, distillation is a method that transfers knowledge from teacher policies with privileged information (e.g., full-state dynamics, simulated ground-truth forces, or ideal state estimators) to student policies for real-world deployment. This knowledge transfer enables the student to leverage the teacher's expertise while operating under real-world constraints, such as partial observation or limited sensory inputs. There are two main approaches to distillation:
|
| 59 |
+
|
| 60 |
+
BC methods[26], [27] learn by mimicking the teacher's actions using supervised learning on state-action pairs. BC achieves effective performance when the student operates within the teacher's training distribution, as it directly replicates the teacher's behavior under familiar conditions. However, its performance degrades sharply with "compounding error" [28] in out-of-distribution (OOD) scenarios (e.g., environmental perturbations, actuator noise, or unseen terrains), as BC inherently lacks the capacity to self-correct deviations from the teacher's demonstration space. This limitation arises because BC relies solely on static datasets of teacher demonstrations, without mechanisms to adapt to novel or unexpected situations.
|
| 61 |
+
|
| 62 |
+
Another popular approach is online distillation via Dataset Aggregation (DAgger) [29], which addresses BC's limitations by iteratively aggregating student-generated trajectories with teacher-corrected actions. Recently, DAgger and its derivative strategies have stood out as a promising distillation approach for humanoid robot [30], [31], [32], [33] to acquire deployable policies. During training, the student policy interacts with the environment, while the teacher provides corrective feedback on the student's actions, enabling the student to refine its policy over multiple iterations. This interactive process mitigates distributional shift and improves robustness to OOD scenarios. However, DAgger still faces a fundamental challenge: the student lacks access to the teacher's privileged information (e.g., simulated contact forces, ideal state estimators, or full-state dynamics). As a result, under partial observation or incomplete environmental feedback, the student struggles to fully replicate the teacher's actions. [24]
|
| 63 |
+
|
| 64 |
+
# III. METHOD
|
| 65 |
+
|
| 66 |
+
StyleLoco is a novel approach for learning deployable natural locomotion skills that effectively combines the precision of RL-based controllers with the naturalness of human demonstrations. At its core, StyleLoco employs our proposed Generative Adversarial Distillation (GAD) framework, which uses a unique double-discriminator architecture to distill knowledge from both an RL-trained teacher policy and human motion demonstrations into a deployable student policy. Through adversarial learning, our approach generates naturalistic motions beyond the constraints of available
|
| 67 |
+
|
| 68 |
+
motion capture data while avoiding the artificial behaviors typically resulting from hand-crafted rewards.
|
| 69 |
+
|
| 70 |
+
StyleLoco consists of three key components: (1) a teacher policy trained with privileged information to achieve robust omnidirectional locomotion, (2) a motion dataset containing natural human movements, and (3) our novel GAD framework that combines these sources to train a deployable student policy. The framework's innovation lies in its ability to generate natural behaviors beyond what either source can achieve alone - overcoming both the limited coverage of motion datasets and the unnatural movements that emerge from pure RL training.
|
| 71 |
+
|
| 72 |
+
To achieve this, StyleLoco employs two discriminators that work in concert to adversarially shape the student policy's behavior. One discriminator ensures the policy can replicate the robust performance of the teacher, while the other maintains consistency with natural human motion patterns. This dual-discriminator approach simultaneously serves two purposes: expanding the range of natural behaviors beyond the demonstration data, and distilling the teacher's capabilities into a deployable policy. The resulting system produces controllers that are both highly capable and naturally moving, without being constrained to demonstrated behaviors or exhibiting artifacts from hand-crafted rewards.
|
| 73 |
+
|
| 74 |
+
# A. Preliminaries
|
| 75 |
+
|
| 76 |
+
1) Reinforcement Learning: We formulate humanoid locomotion control as a Partially Observable Markov Decision Process (POMDP) defined by tuple $\langle S, \mathcal{A}, T, \mathcal{O}, R, \gamma \rangle$ , where $\mathcal{S}$ represents the full state space, $\mathcal{O}$ denotes partial observations available to the robot, $\mathcal{A}$ is the action space, $T(s'|s, a)$ describes state transitions, $R(s, a)$ defines the reward function, and $\gamma \in (0, 1]$ is the discount factor. The goal is to learn a policy $\pi(a|o)$ that maximizes expected discounted returns while operating only on partial observations $o \in \mathcal{O}$ .
|
| 77 |
+
|
| 78 |
+
The locomotion task requires tracking commanded velocities $v^{*} = (v_{x}^{*}, v_{y}^{*}, \omega_{z}^{*})$ , where $(v_{x}^{*}, v_{y}^{*})$ specify desired linear velocities in local coordinate frame and $\omega_{z}^{*}$ defines the desired yaw rate. Following [34], we use the reward function:
|
| 79 |
+
|
| 80 |
+
$$
|
| 81 |
+
r _ {\text {t a s k}} (e, \lambda) := \exp (- \lambda \cdot \| e \| ^ {2})
|
| 82 |
+
$$
|
| 83 |
+
|
| 84 |
+
where $e$ represents tracking errors and $\lambda$ controls their relative importance.
|
| 85 |
+
|
| 86 |
+
2) Generative Adversarial Imitation Learning: Generative Adversarial Imitation Learning (GAIL) learns to mimic expert behavior through adversarial training. Given a dataset of expert demonstrations $\mathcal{M} = (s_i, a_i)$ consisting of state-action pairs, GAIL trains a policy $\pi(a|s)$ that generates actions $a'$ for given states $s'$ . A discriminator network $\mathcal{D}$ is employed to distinguish between state-action pairs $(s, a)$ from the expert demonstrations and those produced by the policy $\pi$ . The reward function used to train the policy is then given by:
|
| 87 |
+
|
| 88 |
+
$$
|
| 89 |
+
r _ {\mathrm {G A I L}} (s, a) = - \log \left(1 - \mathcal {D} (s, a)\right)
|
| 90 |
+
$$
|
| 91 |
+
|
| 92 |
+
Adversarial Motion Prior (AMP) [2] extends this framework to handle settings where only state information is
|
| 93 |
+
|
| 94 |
+

|
| 95 |
+
Fig. 2. Overview of the proposed Generative Adversarial Distillation (GAD) framework. Two discriminators separately evaluate the similarity of generated motions against a teacher policy and reference motion dataset, enabling the synthesis of natural and adaptive behaviors.
|
| 96 |
+
|
| 97 |
+
available in the demonstrations. Instead of operating on state-action pairs, AMP's discriminator evaluates state transitions $(s,s^{\prime})$ , enabling imitation learning from state-only demonstrations. Additionally, AMP employs a least-squares discriminator [35], replacing the traditional binary cross-entropy loss, which has been empirically shown to provide more stable adversarial training dynamics.
|
| 98 |
+
|
| 99 |
+
# B. Generative Adversarial Distillation
|
| 100 |
+
|
| 101 |
+
The core innovation of StyleLoco is our GAD framework, which synthesizes natural and adaptive behaviors from two complementary sources: a well-trained teacher policy and a reference motion dataset. As illustrated in Fig. 2, GAD trains a student policy $\pi_{\mathrm{student}}$ alongside two AMP-style discriminators, $\mathcal{D}_{\mathrm{teacher}}$ and $\mathcal{D}_{\mathrm{dataset}}$ . Each discriminator evaluates the student's generated state transitions against one source of reference motions: either the teacher policy or the motion dataset.
|
| 102 |
+
|
| 103 |
+
Training proceeds in an interleaving manner, alternating between updating the student policy and the discriminators. In each iteration, we first update the student policy using the combined feedback from both discriminators and then train both discriminators to better distinguish between the student's outputs and their respective reference motions.
|
| 104 |
+
|
| 105 |
+
The teacher discriminator $\mathcal{D}_{\mathrm{teacher}}$ optimizes:
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
\begin{array}{l} \arg \min _ {\mathcal {D} _ {\text {t e a c h e r}}} \mathbb {E} _ {(s, s ^ {\prime}) \sim \pi_ {\text {t e a c h e r}}} \left[ \left(\mathcal {D} _ {\text {t e a c h e r}} (s, s ^ {\prime}) - 1\right) ^ {2} \right] \\ + \mathbb {E} _ {(s, s ^ {\prime}) \sim \pi_ {\text {s t u d e n t}}} \left[ \left(\mathcal {D} _ {\text {t e a c h e r}} (s, s ^ {\prime}) + 1\right) ^ {2} \right] \\ + \lambda \mathbb {E} _ {(s, s ^ {\prime}) \sim \pi_ {\text {t e a c h e r}}} \left[ \| \nabla_ {(s, s ^ {\prime})} \mathcal {D} _ {\text {t e a c h e r}} (s, s ^ {\prime}) \| ^ {2} \right], \\ \end{array}
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
while the reference discriminator $\mathcal{D}_{\mathrm{dataset}}$ ensures natural motion qualities by optimizing:
|
| 112 |
+
|
| 113 |
+
$$
|
| 114 |
+
\begin{array}{l} \arg \min _ {\mathcal {D} _ {\text {d a t a s e t}}} \mathbb {E} _ {(s, s ^ {\prime}) \sim \mathcal {M}} \left[ \left(\mathcal {D} _ {\text {d a t a s e t}} (s, s ^ {\prime}) - 1\right) ^ {2} \right] \\ + \mathbb {E} _ {(s, s ^ {\prime}) \sim \pi_ {\text {s t u d e n t}}} \left[ \left(\mathcal {D} _ {\text {d a t a s e t}} (s, s ^ {\prime}) + 1\right) ^ {2} \right] \\ + \lambda \mathbb {E} _ {(s, s ^ {\prime}) \sim \mathcal {M}} \left[ \| \nabla_ {(s, s ^ {\prime})} \mathcal {D} _ {\text {d a t a s e t}} (s, s ^ {\prime}) \| ^ {2} \right], \\ \end{array}
|
| 115 |
+
$$
|
| 116 |
+
|
| 117 |
+
where $\lambda$ controls the gradient penalty term that ensures stable training.
|
| 118 |
+
|
| 119 |
+
The student policy learns from a combined reward function:
|
| 120 |
+
|
| 121 |
+
$$
|
| 122 |
+
r = r _ {\text {t a s k}} + w _ {\text {t e a c h e r}} \cdot r _ {\text {t e a c h e r}} + w _ {\text {d a t a s e t}} \cdot r _ {\text {d a t a s e t}},
|
| 123 |
+
$$
|
| 124 |
+
|
| 125 |
+
where the discriminator rewards are computed as:
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
r _ {\text {t e a c h e r}} = \max \left[ 0, \quad 1 - 0. 2 5 \left(\mathcal {D} _ {\text {t e a c h e r}} (s, s ^ {\prime}) - 1\right) ^ {2} \right]
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
$$
|
| 132 |
+
r _ {\text {d a t a s e t}} = \max \left[ 0, \quad 1 - 0. 2 5 \left(\mathcal {D} _ {\text {d a t a s e t}} (s, s ^ {\prime}) - 1\right) ^ {2} \right]
|
| 133 |
+
$$
|
| 134 |
+
|
| 135 |
+
Both discriminators process state transitions using a consistent feature set comprising joint positions and velocities, root linear and angular velocities in the robot's local frame, base link orientation (roll and pitch), and root height. This common representation enables effective comparison across different motion sources while capturing the essential characteristics of locomotion behavior.
|
| 136 |
+
|
| 137 |
+
Deployable Policy Distillation A key aspect of our framework is enabling the student policy $\pi_{\mathrm{student}}$ to generate actions when privileged observations are unavailable in real-world deployment. While the teacher policy benefits from privileged information during training to better understand task objectives and achieve efficient convergence, the student policy must learn to generate appropriate actions using only deployable sensor observations. This asymmetric approach allows us to leverage rich state information during training while ensuring the final policy remains deployable. The specific observations available to the student policy are detailed in Table I.
|
| 138 |
+
|
| 139 |
+
# C. Training Process
|
| 140 |
+
|
| 141 |
+
Curriculum Learning Teacher policy $\pi_{\text{teacher}}$ training adopts a curriculum learning approach comprised of two distinct phases. The initial stability phase prioritizes maintaining balance and preventing falls, establishing fundamental stability behaviors. This is followed by the mobility phase, where the policy develops comprehensive omnidirectional locomotion capabilities. The specific reward components for each phase are detailed in Table II.
|
| 142 |
+
|
| 143 |
+
Demonstration Data Preparation The locomotion motion data in this work is sourced from the LaFAN1 dataset and meticulously retargeted to conform to the kinematic specifications of Unitree H1 robots. While this dataset offers diverse motion styles and velocity ranges, utilizing all demonstrations simultaneously introduces ambiguity in the learning process. To facilitate distinct gait style demonstrations across different velocity commands, we strategically selected motion clips with minimal or non-overlapping velocity ranges, ensuring a relatively clear behavioral boundaries between different locomotion patterns.
|
| 144 |
+
|
| 145 |
+
Asymmetric Actor-critic Architecture Student policy training utilizes an asymmetric actor-critic architecture to effectively handle partial observability in real-world conditions. The student's observation processing begins with temporal partial observations $o_{t}^{N} = [o_{t - n}, o_{t - n + 1} \dots o_{t}]^{T}$ . These observations are first processed through a partial states
|
| 146 |
+
|
| 147 |
+
TABLEI AVAILABLE OBSERVATIONS IN TRAINING
|
| 148 |
+
|
| 149 |
+
<table><tr><td>Sources</td><td>Phase</td><td>CmdVel</td><td>DoFPos</td><td>DoFVel</td><td>LastAction</td><td>Diff</td><td>BaseLinVel</td><td>BaseAngVel</td><td>RPY</td><td>Root Height</td><td>Push</td><td>Fraction</td><td>BodyMass</td><td>ContactStatus</td></tr><tr><td>Teacher</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr><tr><td>Dataset</td><td></td><td></td><td>✓</td><td>✓</td><td></td><td></td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td></td><td></td></tr><tr><td>Student</td><td></td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td></td><td></td><td>✓</td><td>✓</td><td></td><td></td><td></td><td></td><td></td></tr></table>
|
| 150 |
+
|
| 151 |
+
Notes:
|
| 152 |
+
- Phase: Indicates the phase of motion, serving as a temporal marker.
|
| 153 |
+
- Diff: Difference between current joint angular position and reference joint angular position, calculated based on Phase.
|
| 154 |
+
- ContactStatus: Information regarding the stance mask and feet contact forces.
|
| 155 |
+
|
| 156 |
+
encoder $\mathcal{E}$ to generate context latent representations, which are then combined with the current partial state observations and the velocity command. The resulting combined representation passes through MLP layers to produce the final control actions.
|
| 157 |
+
|
| 158 |
+
TABLE II REWARD DEFINITIONS USED IN TEACHER POLICY TRAINING.
|
| 159 |
+
|
| 160 |
+
<table><tr><td>Term</td><td>Definition</td><td>Weight</td></tr><tr><td colspan="3">First Stage</td></tr><tr><td>Termination</td><td>termination = Ireset - Ittimeout</td><td>-1000</td></tr><tr><td>Linear Velocity Tracking</td><td>exp(-||xxy|2/0.1)</td><td>10</td></tr><tr><td>Angular Velocity Tracking</td><td>exp(-||u|2/0.1)</td><td>10</td></tr><tr><td>Linear Velocity z</td><td>||vz||2</td><td>-1.0</td></tr><tr><td>R-P Angular Velocity</td><td>||ωxy||2</td><td>-0.5</td></tr><tr><td>Orientation</td><td>Σi∈{x,y} (projected gravityi)2</td><td>-1.0</td></tr><tr><td>Base Height</td><td>exp(-100|hbase-htarget|) where hbase=zroot-(feet-0.08)</td><td>0.5</td></tr><tr><td>Action Rate</td><td>||at-at-1||2</td><td>-0.01</td></tr><tr><td>Energy Square</td><td>Σi=10(τiq̂i)21+||cxy||2</td><td>-5e-6</td></tr><tr><td>Stand Still</td><td>(Σ|q-qdefault|·Istand</td><td>-1</td></tr><tr><td>Feet Clearance</td><td>Σi||hfeet,i-htarget|<0.01|·(1-gait phasei)</td><td>2.5</td></tr><tr><td>Feet Contact Number</td><td>mean(Πcontact=stance mask)-Π(contact≠stance mask)</td><td>1</td></tr><tr><td>Default Joint Position</td><td>||q[1:2]-qdefault||2+||q[6:7]-qdefault||2</td><td>0.5</td></tr><tr><td>Action Smoothness</td><td>||at-2-2at-1+at||2</td><td>-0.001</td></tr><tr><td>Feet Slip</td><td>1-Σi exp(-||vxy|i||2)</td><td>-0.05</td></tr><tr><td>Reference Joint Position</td><td>exp(-2||q-qref||2)-0.5min(||q-qref||2,0.5)</td><td>10</td></tr><tr><td>Pelvis-Angle y Distance</td><td>(||ypelvis,pitch-yankle,L)||+||ypelvis,pitch-yankle,R)||·Π{|vy|<0.1}</td><td>-5</td></tr><tr><td>Upper Joint Constraints</td><td>Σ||q[12:14]-qdefault||2+||q[16:18]-qdefault||2+||q10-q10||2</td><td>-5</td></tr><tr><td colspan="3">Second Stage</td></tr><tr><td>Joint Torque</td><td>||τ||2</td><td>-2e-5</td></tr><tr><td>Joint Acceleration</td><td>||q||2</td><td>-1e-6</td></tr><tr><td>Feet Contact Forces</td><td>Σi max(||contact forcei||2-Fmax,0)</td><td>-0.01</td></tr><tr><td>Torque When Stand-Still</td><td>Σ[(τt-τt-1)2+(τt+τt-2-2τt-1)2]·Istand</td><td>-1e-3</td></tr><tr><td>Body Pitch</td><td>||pitch-0.01||</td><td>-5</td></tr><tr><td>Body Roll</td><td>||roll||</td><td>-10</td></tr><tr><td>Track Velocity Hard</td><td>e-10||vxy-target-vxy||+e-10|ωz|2</td><td>50</td></tr><tr><td>Ankle Air Time</td><td>∑(tair,i-0.2)·Ifirst,contact,i·Istand.still</td><td>100</td></tr><tr><td>Ankle Limits</td><td>-∑i i∈{4,9} clip(qi-qmin,i,0) + clip(qmax,i-qi,0)</td><td>-200</td></tr></table>
|
| 161 |
+
|
| 162 |
+
Notes:
|
| 163 |
+
- $\mathbb{I}_A = 1$ if $A = true$ and $\mathbb{I}_A = 0$ otherwise.
|
| 164 |
+
The maximum allowable feet contact force $F_{\mathrm{max}}$ is set to 550N
|
| 165 |
+
|
| 166 |
+
# D. Implementation and Deployment Details
|
| 167 |
+
|
| 168 |
+
Both policies are implemented using the Proximal Policy Optimization (PPO) algorithm [36], with comprehensive domain randomization ensuring robust real-world transfer.
|
| 169 |
+
|
| 170 |
+
Domain Randomization Following existing researches on humanoid whole-body control, our domain randomization encompasses three aspects: physical parameter variations, systematic observation noise injection, and randomized external force perturbations. The physical parameters include variations in mass distribution, joint properties, and surface
|
| 171 |
+
|
| 172 |
+
interactions. Observation noise is carefully calibrated to match real-world sensor characteristics, while external forces simulate unexpected disturbances the robot might encounter during deployment.
|
| 173 |
+
|
| 174 |
+
Safe Deployment Safe deployment is achieved through torque limiting. This controller continuously monitors and adjusts torque outputs to remain within safe operational limits. The deployment architecture operates with the policy executing at $50\mathrm{Hz}$ , while the low-level control loop maintains precise actuation at $1000\mathrm{Hz}$ , ensuring responsive and stable behavior.
|
| 175 |
+
|
| 176 |
+
Real-world execution incorporates additional safety measures through continuous monitoring of joint positions, velocities, and torques. When approaching operational limits, the system smoothly modulates commands to maintain safe operation while preserving task performance. This approach enables robust deployment across varying conditions while protecting the hardware from potential damage.
|
| 177 |
+
|
| 178 |
+
# IV. EXPERIMENTS
|
| 179 |
+
|
| 180 |
+
We conduct comprehensive experiments in both simulation and real-world environments to evaluate StyleLoco's effectiveness in generating natural and adaptive locomotion behaviors. Our evaluation framework addresses four key aspects: (1) the effectiveness of GAD's distillation capabilities, (2) the accuracy of velocity tracking during locomotion tasks, (3) the quality of motion style reproduction, and (4) real-world deployment performance.
|
| 181 |
+
|
| 182 |
+
All experiments are conducted using the Unitree H1 humanoid robot in both simulated and physical environments. For reference motions, we utilize the LaFAN1 dataset, carefully retargeted to match the H1's kinematics. The motion data comprises global root position and orientation (quaternion), along with joint angular positions. Simulated experiments are performed in the NVIDIA Isaac Gym environment, which enables efficient parallel training and evaluation.
|
| 183 |
+
|
| 184 |
+
# A. Distillation Performance
|
| 185 |
+
|
| 186 |
+
Our first set of experiments evaluates GAD's ability to effectively distill privileged information from the teacher policy while maintaining task performance. We compare GAD against several baseline distillation approaches, measuring both task achievement and motion naturalness.
|
| 187 |
+
|
| 188 |
+
One of the main contributions of this work is the development of a Generative Adversarial Distillation method. In this context, we emphasize the ability of our single teacher discriminator (GAD-SD) to effectively distill knowledge from
|
| 189 |
+
|
| 190 |
+
the teacher policy. To evaluate this capability, we compare our method against DAgger, one of the most widely used distillation methods in robot control.
|
| 191 |
+
|
| 192 |
+
First, we train an omnidirectional locomotion policy as the teacher. The command ranges used for both teacher training and the subsequent distillation experiment are listed in Table. III. We then leverage the well-trained teacher policy to guide the learning of the student policy.
|
| 193 |
+
|
| 194 |
+
TABLE III RANGES OF LOCOMOTION TASK COMMAND
|
| 195 |
+
|
| 196 |
+
<table><tr><td>Parameter</td><td>Teacher (Unit)</td><td>Distillation student (Unit)</td><td>StyleLoco student (Unit)</td></tr><tr><td>Forward (vx)</td><td>[-1.0, 3.5] m/s</td><td>[-1.0, 3.5] m/s</td><td>[-1.0, 4.5] m/s</td></tr><tr><td>Lateral (vy)</td><td>[-0.8, 0.8] m/s</td><td>[-0.8, 0.8] m/s</td><td>[-1.0, 1.0] m/s</td></tr><tr><td>Angular (ωz)</td><td>[-1.0, 1.0] rad/s</td><td>[-1.0, 1.0] rad/s</td><td>[-1.5, 1.5] rad/s</td></tr></table>
|
| 197 |
+
|
| 198 |
+
The evaluation metrics include linear velocity tracking reward, angular velocity tracking reward, and average survival time. As shown in Table IV, while both methods successfully learn from the teacher policy, GAD-SD demonstrates superior performance, particularly in linear velocity tracking and survival time.
|
| 199 |
+
|
| 200 |
+
TABLE IV QUANTITATIVE COMPARISON OF DISTILLATION METHODS
|
| 201 |
+
|
| 202 |
+
<table><tr><td>Method</td><td>Linear Velocity Tracking Reward(±0.1) ↑</td><td>Angular Velocity Tracking Reward(±0.1) ↑</td><td>Average Survival Time(±15 steps) ↑</td></tr><tr><td>Teacher</td><td>7.403</td><td>2.824</td><td>925.9</td></tr><tr><td>DAgger</td><td>3.744</td><td>2.516</td><td>506.6</td></tr><tr><td>GAD-SD</td><td>5.679</td><td>2.653</td><td>860.3</td></tr></table>
|
| 203 |
+
|
| 204 |
+
Notes:
|
| 205 |
+
- Teacher: teacher policy trained with privileged information
|
| 206 |
+
GAD-SD: GAD with only teacher distillation discriminator
|
| 207 |
+
|
| 208 |
+
# B. Locomotion Capabilities
|
| 209 |
+
|
| 210 |
+
The second set of experiments assesses the student policy's locomotion capabilities, particularly its ability to track commanded velocities while maintaining natural motion patterns. We compare StyleLoco against state-of-the-art approaches in terms of tracking accuracy, stability, and style preservation. Table VI shows comparative results across various performance metrics.
|
| 211 |
+
|
| 212 |
+
The locomotion task evaluates the ability of student policy to track local velocity commands comprising three components: forward/backward velocity $v_{x}$ , lateral velocity $v_{y}$ , and rotational velocity $w_{z}$ . Command values are uniformly sampled within pre-defined ranges specified in Table. III. For style imitation, we select four representative motion clips as reference targets for the style discriminator, with their corresponding velocity profiles detailed in Table. V.
|
| 213 |
+
|
| 214 |
+
To comprehensively evaluate our double-discriminator framework, we compare our method against three baseline approaches:
|
| 215 |
+
|
| 216 |
+
- SD-Motion: Single-discriminator approach using only motion clips as reference.
|
| 217 |
+
|
| 218 |
+
TABLEV VELOCITY PROFILES FOR MOTION CLIPS
|
| 219 |
+
|
| 220 |
+
<table><tr><td>Vel Profiles</td><td>Forward (m/s)</td><td>Lateral (m/s)</td><td>Angular (rad/s)</td></tr><tr><td>Slow Forward</td><td>[0.089, 1.205]</td><td>[-0.396, 0.188]</td><td>[-1.734, 0.906]</td></tr><tr><td>Medium Forward</td><td>[0.884, 2.067]</td><td>[-0.563, 0.306]</td><td>[-2.044, 1.963]</td></tr><tr><td>Fast Forward</td><td>[2.438, 4.378]</td><td>[-1.166, 0.943]</td><td>[-1.555, 3.476]</td></tr><tr><td>Move Backward</td><td>[-1.088, -0.350]</td><td>[-0.425, 0.365]</td><td>[-1.580, 1.981]</td></tr></table>
|
| 221 |
+
|
| 222 |
+
- SD-Full: Single-discriminator approach using a combination of teacher policy online roll-out data and motion clips.
|
| 223 |
+
- DAgger+Style: DAgger-based teacher policy distillation combined with a separate discriminator for style learning.
|
| 224 |
+
|
| 225 |
+
The evaluation metrics are similar to those used in the distillation task experiment, with the addition of energy consumption.
|
| 226 |
+
|
| 227 |
+
As demonstrated in Table. VI, our proposed double-discriminator framework achieves superior performance in velocity tracking and survival time compared to all baseline methods. Notably, the SD-Motion approach exhibits the best energy consumption performance, suggesting that human motions are inherently energy efficient and properly incorporating motion demonstrations during training contributes to reduced energy consumption.
|
| 228 |
+
|
| 229 |
+

|
| 230 |
+
|
| 231 |
+

|
| 232 |
+
|
| 233 |
+

|
| 234 |
+
Fig. 3. From top to bottom, a stylized locomotion demonstration from LaFAN1 (Top), motions generated by student policy in simulation (Middle), motions generated by student policy deployed on real H1 robot(Bottom).
|
| 235 |
+
|
| 236 |
+
TABLE VI QUANTITATIVE COMPARISON OF DIFFERENT METHODS ACROSS VARIOUS METRICS
|
| 237 |
+
|
| 238 |
+
<table><tr><td>Method</td><td>Linear Velocity Tracking Reward(±0.1) ↑</td><td>Angular Velocity Tracking Reward(±0.1) ↑</td><td>Average Survival Time(±15 steps) ↑</td><td>Energy Consumption(±0.001) ↓</td></tr><tr><td>SD-Motion</td><td>4.229</td><td>2.249</td><td>813.2</td><td>0.065</td></tr><tr><td>SD-Full</td><td>4.665</td><td>2.413</td><td>824.1</td><td>0.093</td></tr><tr><td>DAgger+Style</td><td>5.059</td><td>2.384</td><td>826.9</td><td>0.079</td></tr><tr><td>GAD (Ours)</td><td>5.485</td><td>2.644</td><td>846.5</td><td>0.081</td></tr></table>
|
| 239 |
+
|
| 240 |
+
Notes:
|
| 241 |
+
- SD-Motion: Single discriminator with only motion demonstrations
|
| 242 |
+
- SD-Full: Single discriminator with both teacher roll-outs and motion demonstrations
|
| 243 |
+
- DAgger+Style: DAgger distillation with additional style discriminator
|
| 244 |
+
|
| 245 |
+
# C. Evaluations on Style Imitation
|
| 246 |
+
|
| 247 |
+
To demonstrate our method's ability to combine robust locomotion skills with distinct motion styles, we evaluate a particularly challenging case: synthesizing a limping gait by combining a regular walking teacher policy with reference motions exhibiting a distinct limping pattern. Fig. 3 shows the comparison between the original limping motion from LaFAN1 (visualized in Rerun [37]), the synthesized motion in Isaac Gym [38], and the deployed behavior on the physical Unitree H1 robot. The results demonstrate that our method successfully maintains the characteristic limping style while preserving the fundamental locomotion capabilities of the teacher policy.
|
| 248 |
+
|
| 249 |
+
This fusion of different motion sources creates an inherent trade-off between style fidelity and command tracking accuracy, as the stylized motions often deviate significantly from the teacher's optimal movement patterns. Our framework addresses this challenge through adjustable discriminator weights, allowing fine-tuned balance between style preservation and task performance.
|
| 250 |
+
|
| 251 |
+
# D. Real Robot Deployment
|
| 252 |
+
|
| 253 |
+
The real-world deployment of our student policy on the Unitree H1 robot validates the practical effectiveness of our approach across various scenarios. As shown in Fig. 1, the robot demonstrates smooth transitions in both gait patterns and arm postures when responding to velocity command changes from low to medium speeds. The policy's robustness is further evidenced in Fig. 4, where the robot maintains stable locomotion at high speeds up to $3\mathrm{m / s}$ . Most notably, Fig. 3 showcases our method's unique capability to synthesize stylized gaits that combine the stability of the teacher policy with distinctive motion patterns from the reference datasets, resulting in natural and controllable locomotion behaviors.
|
| 254 |
+
|
| 255 |
+
# V. CONCLUSION AND LIMITATIONS
|
| 256 |
+
|
| 257 |
+
This paper presents StyleLoco, a novel framework for humanoid locomotion that bridges the gap between robust task execution and natural motion synthesis. Through our proposed Generative Adversarial Distillation approach, we demonstrate the effective combination of privileged information from expert policies with stylistic elements from human demonstrations. Our extensive experimental results,
|
| 258 |
+
|
| 259 |
+

|
| 260 |
+
Fig. 4. H1 operating outdoors at forward velocity $(v_{x})$ of $3\mathrm{m / s}$
|
| 261 |
+
|
| 262 |
+
including successful deployment on the Unitree H1 robot, validate the framework's capability to generate stable and natural locomotion behaviors across diverse scenarios, from high-speed running at $3\mathrm{m / s}$ to stylized gaits such as limping.
|
| 263 |
+
|
| 264 |
+
The key innovation of our double-discriminator architecture enables simultaneous learning from heterogeneous sources while maintaining deployability through careful handling of privileged information. Quantitative evaluations show that StyleLoco outperforms existing approaches in both task performance and style preservation, demonstrating superior velocity tracking rewards and survival times while maintaining natural motion patterns.
|
| 265 |
+
|
| 266 |
+
Despite these achievements, several important limitations warrant future investigation. A primary challenge lies in style disambiguation when motion demonstrations share overlapping velocity ranges, potentially creating ambiguity in style selection and degrading imitation fidelity. Future research could explore automatic style clustering or context-aware selection mechanisms to address this limitation. Additionally, the current implementation relies on manual tuning of discriminator weights to balance task completion and style imitation objectives. Developing adaptive weighting schemes or automated tuning methods could enhance the framework's practical applicability. While our method shows impressive results in locomotion tasks, its generalization to broader
|
| 267 |
+
|
| 268 |
+
manipulation tasks or more complex behaviors remains to be explored, opening avenues for future research.
|
| 269 |
+
|
| 270 |
+
Despite these limitations, StyleLoco represents a step toward natural and capable humanoid robotics, offering a promising foundation for future research in combining task-oriented control with human-like motion generation.
|
| 271 |
+
|
| 272 |
+
# REFERENCES
|
| 273 |
+
|
| 274 |
+
[1] K. Darvish, L. Penco, J. Ramos, R. Cisneros, J. Pratt, E. Yoshida, S. Ivaldi, and D. Pucci, "Teleoperation of humanoid robots: A survey," IEEE Transactions on Robotics, vol. 39, no. 3, pp. 1706-1727, 2023.
|
| 275 |
+
[2] X. B. Peng, Z. Ma, P. Abbeel, S. Levine, and A. Kanazawa, "Amp: Adversarial motion priors for stylized physics-based character control," ACM Transactions on Graphics (ToG), vol. 40, no. 4, pp. 1-20, 2021.
|
| 276 |
+
[3] F. G. Harvey, M. Yurick, D. Nowrouzezahrai, and C. Pal, "Robust motion in-between," vol. 39, no. 4, 2020.
|
| 277 |
+
[4] N. Mahmood, N. Ghorbani, N. F. Troje, G. Pons-Moll, and M. J. Black, “AMASS: Archive of motion capture as surface shapes,” in International Conference on Computer Vision, Oct. 2019, pp. 5442–5451.
|
| 278 |
+
[5] X. Cheng, Y. Ji, J. Chen, R. Yang, G. Yang, and X. Wang, "Expressive whole-body control for humanoid robots," arXiv preprint arXiv:2402.16796, 2024.
|
| 279 |
+
[6] T. Marcucci, M. Gabiccini, and A. Artoni, "A two-stage trajectory optimization strategy for articulated bodies with unscheduled contact sequences," IEEE Robotics and Automation Letters, vol. 2, no. 1, pp. 104-111, 2017.
|
| 280 |
+
[7] G. Romualdi, S. Dafarra, G. L'Erario, I. Sorrentino, S. Traversaro, and D. Pucci, "Online non-linear centroidal mpc for humanoid robot locomotion with step adjustment," in 2022 International Conference on Robotics and Automation (ICRA). IEEE, 2022, pp. 10412-10419.
|
| 281 |
+
[8] J. Englsberger, A. Dietrich, G.-A. Mesesan, G. Garofalo, C. Ott, and A. O. Albu-Schäffer, "Mptc-modular passive tracking controller for stack of tasks based control frameworks," 16th Robotics: Science and Systems, RSS 2020, 2020.
|
| 282 |
+
[9] M. Elobaid, G. Romualdi, G. Nava, L. Rapetti, H. A. O. Mohamed, and D. Pucci, "Online non-linear centroidal mpc for humanoid robots payload carrying with contact-stable force parametrization," in 2023 IEEE International Conference on Robotics and Automation (ICRA). IEEE, 2023, pp. 12233-12239.
|
| 283 |
+
[10] Y. Ishiguro, K. Kojima, F. Sugai, S. Nozawa, Y. Kakiuchi, K. Okada, and M. Inaba, "High speed whole body dynamic motion experiment with real time master-slave humanoid robot system," in 2018 IEEE International Conference on Robotics and Automation (ICRA), 2018, pp. 5835-5841.
|
| 284 |
+
[11] Y. Ishiguro, T. Makabe, Y. Nagamatsu, Y. Kojio, K. Kojima, F. Sugai, Y. Kakiuchi, K. Okada, and M. Inaba, "Bilateral humanoid teleoperation system using whole-body exoskeleton cockpit tablis," IEEE Robotics and Automation Letters, vol. 5, no. 4, pp. 6419-6426, 2020.
|
| 285 |
+
[12] J. Ramos and S. Kim, "Dynamic locomotion synchronization of bipedal robot and human operator via bilateral feedback teleoperation," Science Robotics, vol. 4, no. 35, p. eaav4282, 2019.
|
| 286 |
+
[13] K. Ayusawa and E. Yoshida, "Motion retargeting for humanoid robots based on simultaneous morphing parameter identification and motion optimization," IEEE Transactions on Robotics, vol. 33, no. 6, pp. 1343-1357, 2017.
|
| 287 |
+
[14] K. Hu, C. Ott, and D. Lee, "Online human walking imitation in task and joint space based on quadratic programming," in 2014 IEEE International Conference on Robotics and Automation (ICRA), 2014, pp. 3458-3464.
|
| 288 |
+
[15] F.-J. Montecillo-Puente, M. N. Sreenivasa, and J.-P. Laumond, "On real-time whole-body human to humanoid motion transfer," in International Conference on Informatics in Control, Automation and Robotics, 2010. [Online]. Available: https://api(semanticscholar.org/CorpusID:20676844
|
| 289 |
+
[16] K. Yamane, S. O. Anderson, and J. K. Hodgins, “Controlling humanoid robots with human motion data: Experimental validation,” in 2010 10th IEEE-RAS International Conference on Humanoid Robots, 2010, pp. 504–510.
|
| 290 |
+
|
| 291 |
+
[17] A. Di Fava, K. Bouyarmane, K. Chappellet, E. Ruffaldi, and A. Kheddar, “Multi-contact motion retargeting from human to humanoid robot,” in 2016 IEEE-RAS 16th International Conference on Humanoid Robots (Humanoids), 2016, pp. 1081–1086.
|
| 292 |
+
[18] K. Otani and K. Bouyarmane, "Adaptive whole-body manipulation in human-to-humanoid multi-contact motion retargeting," in 2017 IEEE-RAS 17th International Conference on Humanoid Robotics (Humanoids), 2017, pp. 446-453.
|
| 293 |
+
[19] L. Penco, B. Clement, V. Modugno, E. Mingo Hoffman, G. Nava, D. Pucci, N. G. Tsagarakis, J. B. Mouret, and S. Ivaldi, "Robust real-time whole-body motion retargeting from human to humanoid," in 2018 IEEE-RAS 18th International Conference on Humanoid Robots (Humanoids), 2018, pp. 425-432.
|
| 294 |
+
[20] J. Koenemann, F. Burget, and M. Bennewitz, “Real-time imitation of human whole-body motions by humanoids,” in 2014 IEEE International Conference on Robotics and Automation (ICRA), 2014, pp. 2806–2812.
|
| 295 |
+
[21] O. E. Ramos, N. Mansard, O. Stasse, C. Benazeth, S. Hak, and L. Saab, "Dancing humanoid robots: Systematic use of osid to compute dynamically consistent movements following a motion capture pattern," IEEE Robotics and Automation Magazine, vol. 22, no. 4, pp. 16-26, 2015.
|
| 296 |
+
[22] L. Penco, K. Momose, S. McCrory, D. Anderson, N. Kitchel, D. Calvert, and R. J. Griffin, "Mixed reality teleoperation assistance for direct control of humanoids," IEEE Robotics and Automation Letters, vol. 9, no. 2, pp. 1937-1944, 2024.
|
| 297 |
+
[23] Z. Li, X. B. Peng, P. Abbeel, S. Levine, G. Berseth, and K. Sreenath, "Reinforcement learning for versatile, dynamic, and robust bipedal locomotion control," The International Journal of Robotics Research, p. 02783649241285161, 2024.
|
| 298 |
+
[24] Z. Fu, A. Kumar, J. Malik, and D. Pathak, "Minimizing energy consumption leads to the emergence of gaits in legged robots," in 5th Annual Conference on Robot Learning.
|
| 299 |
+
[25] J. Ho and S. Ermon, "Generative adversarial imitation learning," Advances in neural information processing systems, vol. 29, 2016.
|
| 300 |
+
[26] X. Huang, Y. Chi, R. Wang, Z. Li, X. B. Peng, S. Shao, B. Nikolic, and K. Sreenath, "Diffuseloco: Real-time legged locomotion control with diffusion from offline datasets," 2024. [Online]. Available: https://arxiv.org/abs/2404.19264
|
| 301 |
+
[27] B. Jia and D. Manocha, "Sim-to-real robotic sketching using behavior cloning and reinforcement learning," in 2024 IEEE International Conference on Robotics and Automation (ICRA), 2024, pp. 18272-18278.
|
| 302 |
+
[28] S. Ross and D. Bagnell, "Efficient reductions for imitation learning," in Proceedings of the Thirteenth International Conference on Artificial Intelligence and Statistics, ser. Proceedings of Machine Learning Research, Y. W. Teh and M. Titterington, Eds., vol. 9. Chia Laguna Resort, Sardinia, Italy: PMLR, 13-15 May 2010, pp. 661-668. [Online]. Available: https://proceedings.mlr.press/v9/ross10a.html
|
| 303 |
+
[29] S. Ross, G. Gordon, and D. Bagnell, “A reduction of imitation learning and structured prediction to no-regret online learning,” in Proceedings of the fourteenth international conference on artificial intelligence and statistics. JMLR Workshop and Conference Proceedings, 2011, pp. 627–635.
|
| 304 |
+
[30] M. Ji, X. Peng, F. Liu, J. Li, G. Yang, X. Cheng, and X. Wang, "Exbody2: Advanced expressive humanoid whole-body control," arXiv preprint arXiv:2412.13196, 2024.
|
| 305 |
+
[31] T. He, Z. Luo, W. Xiao, C. Zhang, K. Kitani, C. Liu, and G. Shi, "Learning human-to-humanoid real-time whole-body teleoperation," arXiv preprint arXiv:2403.04436, 2024.
|
| 306 |
+
[32] T. He, W. Xiao, T. Lin, Z. Luo, Z. Xu, Z. Jiang, C. Liu, G. Shi, X. Wang, L. Fan, and Y. Zhu, "Hover: Versatile neural whole-body controller for humanoid robots," arXiv preprint arXiv:2410.21229, 2024.
|
| 307 |
+
[33] T. He, Z. Luo, X. He, W. Xiao, C. Zhang, W. Zhang, K. Kitani, C. Liu, and G. Shi, “Omnih2o: Universal and dexterous human-to-humanoid whole-body teleoperation and learning,” arXiv preprint arXiv:2406.08858, 2024.
|
| 308 |
+
[34] X. Gu, Y.-J. Wang, X. Zhu, C. Shi, Y. Guo, Y. Liu, and J. Chen, “Advancing humanoid locomotion: Mastering challenging terrains with denoising world model learning,” arXiv preprint arXiv:2408.14472, 2024.
|
| 309 |
+
[35] X. Mao, Q. Li, H. Xie, R. Y. Lau, Z. Wang, and S. Paul Smolley, "Least squares generative adversarial networks," in Proceedings of the IEEE international conference on computer vision, 2017, pp. 2794-2802.
|
| 310 |
+
|
| 311 |
+
[36] J. Schulman, F. Wolski, P. Dhariwal, A. Radford, and O. Klimov, "Proximal policy optimization algorithms," arXiv preprint arXiv:1707.06347, 2017.
|
| 312 |
+
[37] Rerun Development Team, "Rerun: A visualizationsdk for multimodal data," Online, 2024, available from https://www. rerun.io/ and https://github.com/rerun-io/rerun. [Online]. Available: https://www. rerun.io
|
| 313 |
+
[38] V. Makoviychuk, L. Wawrzyniak, Y. Guo, M. Lu, K. Storey, M. Macklin, D. Hoeller, N. Rudin, A. Allshire, A. Handa, and G. State, "Isaac gym: High performancegpu-based physics simulation for robot learning," 2021. [Online]. Available: https://arxiv.org/abs/2108.10470
|
data/2025/2503_15xxx/2503.15082/images/034446eb7696b2e53c824ed40d612d5fd9f889891278e7d15e33240460801807.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/1bcdd6f1c5caab9505459eea437df23ddfe58f0cd03e7c863a2eff6915ba2be4.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/1e898ae805a8a226adc31b09bee1e5ca4e2a29f36e3598c9b0229868b39764ee.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/25f482130f2c8371bda6cb44d6e4e2a848320aa4731234c20e36ecf1bef0305f.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/4686b8efb67004974db08c666935a7676e2f443e5fc1473c5a2d89a678c4e588.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/4cc5eb1cce99ae67f5f9a804b4c2739c3f4c4025dd55e5e65924dd9eeaad7538.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/4ed8f8007c780019399529e52b6e64219119e91eadccfb42285262a25208d118.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/72638f03e41d10c1d413c9874ca8552ffafaa066ff39616757386667a4881f99.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/7d7026594acb0742b1ffa973d55c714cfa3dec4f728af15e9a20e8b4dd966dfe.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/85accef67f72eee92b8fba3e755403b019656541d1780e6b00386a41b2c1da7f.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/8bbd19b31033f6015bc0594ca6a2c9b6e5d63779241e9e6e37ab39029cc089a5.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/a0c9846395ade7aa4b1d61bb3f7ceefc9d52030df7f34db0473afa4ebf216844.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/b905dabcb35401d01b56c89866cc447aaa62874bb982626732c20633b9ed297b.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/c1470d9edbf54fb74775c976df1b6ea687deb49fcd2c2616260ce88d4345518e.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/c61b7da2cee45634b4ff5dd3d7fd6886b7d93c4b4aec7f8bffc6c4632ae6a1bb.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/d194bc021bfa502c7dd9f4c140a9ffac506cc7be455a393e5676ed244cc02bff.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/d8fea24b57b2dc152b0aa7dc359c61b2923700b72c93c010c21a5c19b7e4c718.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/db8b242c0da8d5188232d469894c5fe9217429a79f81fb65c12b2ed292a464fb.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/images/e6b29beb53fb9dc5b615d4cdd95736227312ddb0e4724131a2b7c936ff3c501e.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15082/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_15xxx/2503.15092/b6252e1c-d150-4802-b7b5-057b9326a285_content_list.json
ADDED
|
@@ -0,0 +1,1531 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "TOWARDS UNDERSTANDING THE SAFETY BOUNDARIES OF DEEPSEEK MODELS: EVALUATION AND FINDINGS",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
171,
|
| 8 |
+
98,
|
| 9 |
+
823,
|
| 10 |
+
170
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Zonghao Ying $^{1}$ , Guangyi Zheng $^{1}$ , Yongxin Huang $^{1}$ , Deyue Zhang $^{2}$ , Wenxin Zhang $^{3}$ , Quchen Zou $^{2}$ , Aishan Liu $^{1}$ , Xianglong Liu $^{1}$ , and Dacheng Tao $^{4}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
184,
|
| 19 |
+
199,
|
| 20 |
+
812,
|
| 21 |
+
229
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "<sup>1</sup>Beihang University",
|
| 28 |
+
"bbox": [
|
| 29 |
+
426,
|
| 30 |
+
241,
|
| 31 |
+
566,
|
| 32 |
+
257
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "2360 AI Security Lab",
|
| 39 |
+
"bbox": [
|
| 40 |
+
426,
|
| 41 |
+
256,
|
| 42 |
+
570,
|
| 43 |
+
270
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "<sup>3</sup>University of Chinese Academy of Sciences",
|
| 50 |
+
"bbox": [
|
| 51 |
+
349,
|
| 52 |
+
268,
|
| 53 |
+
645,
|
| 54 |
+
284
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "$^{4}$ Nanyang Technological University",
|
| 61 |
+
"bbox": [
|
| 62 |
+
380,
|
| 63 |
+
282,
|
| 64 |
+
616,
|
| 65 |
+
297
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "ABSTRACT",
|
| 72 |
+
"text_level": 1,
|
| 73 |
+
"bbox": [
|
| 74 |
+
450,
|
| 75 |
+
339,
|
| 76 |
+
545,
|
| 77 |
+
353
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "text",
|
| 83 |
+
"text": "This study presents the first comprehensive safety evaluation of the DeepSeek models, focusing on evaluating the safety risks associated with their generated content. Our evaluation encompasses DeepSeek's latest generation of large language models, multimodal large language models, and text-to-image models, systematically examining their performance regarding unsafe content generation. Notably, we developed a bilingual (Chinese-English) safety evaluation dataset tailored to Chinese sociocultural contexts, enabling a more thorough evaluation of the safety capabilities of Chinese-developed models. Experimental results indicate that despite their strong general capabilities, DeepSeek models exhibit significant safety vulnerabilities across multiple risk dimensions, including algorithmic discrimination and sexual content. These findings provide crucial insights for understanding and improving the safety of large foundation models. Our code is available at https://github.com/NY1024/DeepSeek-Safety-Eval.",
|
| 84 |
+
"bbox": [
|
| 85 |
+
228,
|
| 86 |
+
368,
|
| 87 |
+
767,
|
| 88 |
+
550
|
| 89 |
+
],
|
| 90 |
+
"page_idx": 0
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"type": "text",
|
| 94 |
+
"text": "1 INTRODUCTION",
|
| 95 |
+
"text_level": 1,
|
| 96 |
+
"bbox": [
|
| 97 |
+
173,
|
| 98 |
+
573,
|
| 99 |
+
336,
|
| 100 |
+
588
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "text",
|
| 106 |
+
"text": "With the rapid advancement of artificial intelligence technology, large models such as the DeepSeek series have demonstrated remarkable capabilities across multiple domains Abraham (2025); Faray de Paiva et al. (2025); Mikhail et al. (2025). These models trained on vast datasets understand and generate diverse content forms, transformatively impacting multiple industries Liu et al. (2023a; 2020a;b). However, alongside these technological advances, model safety concerns have become increasingly prominent Liu et al. (2019; 2021; 2022; 2023b); Zhang et al. (2021); Wang et al. (2021); Ying & Wu (2023a;b), particularly the potential risks associated with generating unsafe content Ying et al. (2024c; 2025), which require systematic evaluation Ying et al. (2024b;a).",
|
| 107 |
+
"bbox": [
|
| 108 |
+
169,
|
| 109 |
+
603,
|
| 110 |
+
823,
|
| 111 |
+
715
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "text",
|
| 117 |
+
"text": "Currently, the community has established multiple evaluation frameworks to test the safety performance of mainstream large models Yuan et al. (2024a;b); Röttger et al. (2024); Tang et al. (2021); Liu et al. (2023c); Guo et al. (2023). However, these evaluation standards lack consideration for China's national conditions and cultural background. Although some research has preliminarily identified certain safety risks in DeepSeek LLMs Arrieta et al. (2025); Parmar & Govindarajulu (2025); Zhou et al. (2025); Xu et al. (2025), these assessments are typically limited to specific scenarios or single models, lacking a comprehensive and systematic safety evaluation of the entire DeepSeek model series. This assessment gap leaves us with limited knowledge about the comprehensive risk profile these models may face in practical applications.",
|
| 118 |
+
"bbox": [
|
| 119 |
+
169,
|
| 120 |
+
720,
|
| 121 |
+
823,
|
| 122 |
+
847
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 0
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "text",
|
| 128 |
+
"text": "This research presents the first systematic safety evaluation of the complete DeepSeek model series, covering its latest generation of large language models (LLMs) (DeepSeek-R1 Guo et al. (2025) and DeepSeek-V3 Liu et al. (2024a)), multimodal large language model (MLLM) (DeepSeek-VL2 Wu et al. (2024)), and text-to-image model (T2I model) (Janus-Pro-7B Chen et al. (2025)). We focus on assessing the safety risks of these models in generating content, including both text and image",
|
| 129 |
+
"bbox": [
|
| 130 |
+
169,
|
| 131 |
+
854,
|
| 132 |
+
826,
|
| 133 |
+
925
|
| 134 |
+
],
|
| 135 |
+
"page_idx": 0
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "aside_text",
|
| 139 |
+
"text": "arXiv:2503.15092v1 [cs.CR] 19 Mar 2025",
|
| 140 |
+
"bbox": [
|
| 141 |
+
22,
|
| 142 |
+
260,
|
| 143 |
+
57,
|
| 144 |
+
705
|
| 145 |
+
],
|
| 146 |
+
"page_idx": 0
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "page_number",
|
| 150 |
+
"text": "1",
|
| 151 |
+
"bbox": [
|
| 152 |
+
493,
|
| 153 |
+
948,
|
| 154 |
+
504,
|
| 155 |
+
959
|
| 156 |
+
],
|
| 157 |
+
"page_idx": 0
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "text",
|
| 161 |
+
"text": "modalities. Specifically, for the safety evaluation of large language models, we have designed a Chinese-English bilingual safety evaluation dataset suitable for China's national conditions, which can more comprehensively assess the safety capabilities of Chinese-developed models.",
|
| 162 |
+
"bbox": [
|
| 163 |
+
169,
|
| 164 |
+
103,
|
| 165 |
+
823,
|
| 166 |
+
147
|
| 167 |
+
],
|
| 168 |
+
"page_idx": 1
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"type": "text",
|
| 172 |
+
"text": "Experimental results indicate that despite the excellent performance of the DeepSeek series models in general capabilities, significant vulnerabilities still exist across multiple safety dimensions. Particularly in areas such as algorithmic discrimination An et al. (2024) and sexual content Ma et al. (2024), the protective effects of existing safety alignments are insufficient, potentially causing adverse social impacts when the models are deployed in real-world applications. Additionally, we have made several notable findings: 1 The models show significant differences in attack success rates when receiving queries in Chinese versus English, with an average disparity of $21.7\\%$ ; 2 The exposed chain-of-thought reasoning in DeepSeek-R1 increases its safety risks, with an average attack success rate $30.4\\%$ higher than DeepSeek-V3; 3 When facing jailbreak attacks, the attack success rates of DeepSeek models rise dramatically, reaching up to $100\\%$ in some categories.",
|
| 173 |
+
"bbox": [
|
| 174 |
+
169,
|
| 175 |
+
152,
|
| 176 |
+
826,
|
| 177 |
+
294
|
| 178 |
+
],
|
| 179 |
+
"page_idx": 1
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "text",
|
| 183 |
+
"text": "These findings not only reveal the current safety shortcomings of these models but also provide specific directions for improving model safety mechanisms in the future. It is our hope that this study will contribute to the broader effort of advancing large model safety, fostering the development of more robust and responsible AI systems for the benefit of society.",
|
| 184 |
+
"bbox": [
|
| 185 |
+
169,
|
| 186 |
+
297,
|
| 187 |
+
823,
|
| 188 |
+
354
|
| 189 |
+
],
|
| 190 |
+
"page_idx": 1
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "text",
|
| 194 |
+
"text": "2 PRELIMINARIES",
|
| 195 |
+
"text_level": 1,
|
| 196 |
+
"bbox": [
|
| 197 |
+
171,
|
| 198 |
+
378,
|
| 199 |
+
339,
|
| 200 |
+
393
|
| 201 |
+
],
|
| 202 |
+
"page_idx": 1
|
| 203 |
+
},
|
| 204 |
+
{
|
| 205 |
+
"type": "text",
|
| 206 |
+
"text": "2.1 DEEKEEKMODELS",
|
| 207 |
+
"text_level": 1,
|
| 208 |
+
"bbox": [
|
| 209 |
+
171,
|
| 210 |
+
410,
|
| 211 |
+
359,
|
| 212 |
+
425
|
| 213 |
+
],
|
| 214 |
+
"page_idx": 1
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
"type": "text",
|
| 218 |
+
"text": "DeepSeek-R1 Guo et al. (2025) is the first-generation reasoning model designed to enhance the reasoning capabilities of LLMs. Its development incorporated multi-stage training and cold-start data prior to reinforcement learning. Its predecessor, DeepSeek-R1-Zero, exhibited issues including poor readability and language mixing. DeepSeek-R1 not only addresses these problems but further improves reasoning performance, achieving comparable results to OpenAI-o1-1217 OpenAI et al. (2024b) on reasoning tasks. This study evaluates the safety risk of its 671B parameter version.",
|
| 219 |
+
"bbox": [
|
| 220 |
+
169,
|
| 221 |
+
436,
|
| 222 |
+
823,
|
| 223 |
+
523
|
| 224 |
+
],
|
| 225 |
+
"page_idx": 1
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"type": "text",
|
| 229 |
+
"text": "DeepSeek-V3 Liu et al. (2024a) is a powerful Mixture-of-Experts (MoE Cai et al. (2024)) language model with a total of 671B parameters, activating 37B parameters per token. It employs Multihead Latent Attention (MLA) and the DeepSeekMoE architecture to achieve efficient inference and economical training. Previous evaluations have demonstrated its exceptional performance across multiple tasks, surpassing other open-source models and achieving comparable results to leading closed-source models, with notable advantages in domains such as coding and mathematics. We have similarly conducted a safety evaluation of this model.",
|
| 230 |
+
"bbox": [
|
| 231 |
+
169,
|
| 232 |
+
527,
|
| 233 |
+
825,
|
| 234 |
+
627
|
| 235 |
+
],
|
| 236 |
+
"page_idx": 1
|
| 237 |
+
},
|
| 238 |
+
{
|
| 239 |
+
"type": "text",
|
| 240 |
+
"text": "DeepSeek-VL2 Wu et al. (2024) represents a series of advanced large-scale MoE MLLMs. The visual component employs a dynamic tiling visual encoding strategy specifically designed to handle images of varying high resolutions and aspect ratios. For the language component, DeepSeek-VL2 utilizes the DeepSeekMoE model with MLA, which compresses key-value caches into latent vectors, enabling efficient inference and high throughput. The series comprises three variants: DeepSeek-VL2-Tiny, DeepSeek-VL2-Small, and DeepSeek-VL2, with 1B, 2.8B, and 45B activated parameters, respectively. This study focuses on the safety evaluation of DeepSeek-VL2, the variant with the largest number of activated parameters.",
|
| 241 |
+
"bbox": [
|
| 242 |
+
169,
|
| 243 |
+
632,
|
| 244 |
+
825,
|
| 245 |
+
747
|
| 246 |
+
],
|
| 247 |
+
"page_idx": 1
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"type": "text",
|
| 251 |
+
"text": "Janus-Pro-7B Chen et al. (2025) is a novel autoregressive framework that unifies multimodal understanding and generation. It overcomes the limitations of existing methods in visual encoding by decoupling visual encoding into independent pathways while employing a single unified Transformer architecture for processing. Janus-Pro's decoupling strategy effectively mitigates the functional conflicts of visual encoders between understanding and generation tasks, while simultaneously enhancing model flexibility. This study conducts a safety evaluation of Janus-Pro-7B.",
|
| 252 |
+
"bbox": [
|
| 253 |
+
169,
|
| 254 |
+
750,
|
| 255 |
+
825,
|
| 256 |
+
835
|
| 257 |
+
],
|
| 258 |
+
"page_idx": 1
|
| 259 |
+
},
|
| 260 |
+
{
|
| 261 |
+
"type": "text",
|
| 262 |
+
"text": "2.2 JAILBREAK ATTACKS",
|
| 263 |
+
"text_level": 1,
|
| 264 |
+
"bbox": [
|
| 265 |
+
171,
|
| 266 |
+
854,
|
| 267 |
+
362,
|
| 268 |
+
869
|
| 269 |
+
],
|
| 270 |
+
"page_idx": 1
|
| 271 |
+
},
|
| 272 |
+
{
|
| 273 |
+
"type": "text",
|
| 274 |
+
"text": "Jailbreak attacks on LLMs Ying et al. (2025); Zou et al. (2023); Shen et al. (2024) represent a class of adversarial techniques designed to circumvent the safety mechanisms and ethical guidelines embedded within LLMs. These attacks typically involve crafting malicious prompts or input",
|
| 275 |
+
"bbox": [
|
| 276 |
+
169,
|
| 277 |
+
881,
|
| 278 |
+
823,
|
| 279 |
+
926
|
| 280 |
+
],
|
| 281 |
+
"page_idx": 1
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"type": "page_number",
|
| 285 |
+
"text": "2",
|
| 286 |
+
"bbox": [
|
| 287 |
+
493,
|
| 288 |
+
948,
|
| 289 |
+
504,
|
| 290 |
+
959
|
| 291 |
+
],
|
| 292 |
+
"page_idx": 1
|
| 293 |
+
},
|
| 294 |
+
{
|
| 295 |
+
"type": "text",
|
| 296 |
+
"text": "sequences that exploit vulnerabilities in the model's training data, instruction-following capabilities, or underlying architecture. The goal is to induce the LLM to generate outputs that would normally be prohibited, such as toxic, biased, harmful, or misleading content.",
|
| 297 |
+
"bbox": [
|
| 298 |
+
169,
|
| 299 |
+
103,
|
| 300 |
+
823,
|
| 301 |
+
147
|
| 302 |
+
],
|
| 303 |
+
"page_idx": 2
|
| 304 |
+
},
|
| 305 |
+
{
|
| 306 |
+
"type": "text",
|
| 307 |
+
"text": "Jailbreak attacks on MLLMs Ying et al. (2024c); Niu et al. (2024); Luo et al. (2024) extend the principles of LLM jailbreaking to the multimodal domain. These attacks leverage both textual and visual inputs to manipulate the model's behavior and bypass safety protocols. Attackers might craft prompts that combine seemingly innocuous images with carefully worded text designed to elicit harmful or inappropriate responses. The complex interplay between visual and textual modalities in MLLMs creates a larger attack surface compared to LLMs.",
|
| 308 |
+
"bbox": [
|
| 309 |
+
169,
|
| 310 |
+
152,
|
| 311 |
+
826,
|
| 312 |
+
237
|
| 313 |
+
],
|
| 314 |
+
"page_idx": 2
|
| 315 |
+
},
|
| 316 |
+
{
|
| 317 |
+
"type": "text",
|
| 318 |
+
"text": "Jailbreaking attacks on T2I models Gao et al. (2024); Dong et al. (2024); Kim et al. (2024); Jing et al. (2025) aim to generate images that violate safety guidelines, depict harmful content, or misrepresent information. These attacks typically involve crafting textual prompts that, while appearing benign on the surface, exploit the model's internal representations and biases to produce undesirable outputs. This can include generating images that are sexually suggestive, violent, promote hate speech, or depict copyrighted material.",
|
| 319 |
+
"bbox": [
|
| 320 |
+
169,
|
| 321 |
+
243,
|
| 322 |
+
826,
|
| 323 |
+
328
|
| 324 |
+
],
|
| 325 |
+
"page_idx": 2
|
| 326 |
+
},
|
| 327 |
+
{
|
| 328 |
+
"type": "text",
|
| 329 |
+
"text": "3 EVALUATION PROTOCOL",
|
| 330 |
+
"text_level": 1,
|
| 331 |
+
"bbox": [
|
| 332 |
+
171,
|
| 333 |
+
364,
|
| 334 |
+
413,
|
| 335 |
+
380
|
| 336 |
+
],
|
| 337 |
+
"page_idx": 2
|
| 338 |
+
},
|
| 339 |
+
{
|
| 340 |
+
"type": "text",
|
| 341 |
+
"text": "3.1 BENCHMARKS",
|
| 342 |
+
"text_level": 1,
|
| 343 |
+
"bbox": [
|
| 344 |
+
171,
|
| 345 |
+
406,
|
| 346 |
+
316,
|
| 347 |
+
420
|
| 348 |
+
],
|
| 349 |
+
"page_idx": 2
|
| 350 |
+
},
|
| 351 |
+
{
|
| 352 |
+
"type": "text",
|
| 353 |
+
"text": "For the evaluation of DeepSeek-R1 and DeepSeek-V3, we developed a dedicated benchmark dataset, CNSafe, based on the Basic Security Requirements for Generative Artificial Intelligence Service (TC260-003). CNSafe encompasses 5 major categories and 31 subcategories, comprising a total of 3100 test cases. CNSafe is available in both Chinese and English, aiming to provide a more comprehensive assessment of model safety across different prevalent linguistic contexts. Furthermore, building upon CNSafe, we constructed a red-teaming dataset, CNSafe_RT, by integrating typical jailbreak attack methods. This allows for a more in-depth evaluation of the models from a red team perspective.",
|
| 354 |
+
"bbox": [
|
| 355 |
+
169,
|
| 356 |
+
438,
|
| 357 |
+
823,
|
| 358 |
+
549
|
| 359 |
+
],
|
| 360 |
+
"page_idx": 2
|
| 361 |
+
},
|
| 362 |
+
{
|
| 363 |
+
"type": "text",
|
| 364 |
+
"text": "For the evaluation of DeepSeek-VL2, we randomly sampled from SafeBench Ying et al. (2024a) and MM-SafetyBench Liu et al. (2024b), assessing the 13 risk types jointly covered by these two benchmarks, totaling 1300 queries. For the evaluation of Janus-Pro-7B, we randomly sampled from I2P Schramowski et al. (2023), encompassing 7 risk types and a total of 671 queries.",
|
| 365 |
+
"bbox": [
|
| 366 |
+
169,
|
| 367 |
+
556,
|
| 368 |
+
823,
|
| 369 |
+
613
|
| 370 |
+
],
|
| 371 |
+
"page_idx": 2
|
| 372 |
+
},
|
| 373 |
+
{
|
| 374 |
+
"type": "text",
|
| 375 |
+
"text": "Detailed descriptions of all benchmark datasets used in this study are provided in Appendix A.1.",
|
| 376 |
+
"bbox": [
|
| 377 |
+
171,
|
| 378 |
+
619,
|
| 379 |
+
805,
|
| 380 |
+
633
|
| 381 |
+
],
|
| 382 |
+
"page_idx": 2
|
| 383 |
+
},
|
| 384 |
+
{
|
| 385 |
+
"type": "text",
|
| 386 |
+
"text": "3.2 EVALUATION METHODS",
|
| 387 |
+
"text_level": 1,
|
| 388 |
+
"bbox": [
|
| 389 |
+
171,
|
| 390 |
+
667,
|
| 391 |
+
382,
|
| 392 |
+
681
|
| 393 |
+
],
|
| 394 |
+
"page_idx": 2
|
| 395 |
+
},
|
| 396 |
+
{
|
| 397 |
+
"type": "text",
|
| 398 |
+
"text": "This study employs a hybrid evaluation approach, integrating (M)LLM-as-Judge with human evaluation to ensure comprehensive and reliable experimental results.",
|
| 399 |
+
"bbox": [
|
| 400 |
+
169,
|
| 401 |
+
700,
|
| 402 |
+
823,
|
| 403 |
+
729
|
| 404 |
+
],
|
| 405 |
+
"page_idx": 2
|
| 406 |
+
},
|
| 407 |
+
{
|
| 408 |
+
"type": "text",
|
| 409 |
+
"text": "(M)LLM-as-Judge leverages a designated (M)LLM as a judge to evaluate the quality or performance of outputs generated by other models. This methodology capitalizes on the (M)LLM's advanced comprehension and reasoning capabilities, providing an automated and scalable evaluation framework, thereby potentially reducing reliance on human annotators. In this research, we specifically utilize (M)LLM-as-Judge to evaluate the harmfulness of generated content. For textual content, we employ GPT-4o OpenAI et al. (2024a) and Qwen2.5-72B-Instruct Qwen et al. (2025) for judgment; for visual content, we utilize Qwen2.5-VL-72B-Instruct Qwen et al. (2025).",
|
| 410 |
+
"bbox": [
|
| 411 |
+
169,
|
| 412 |
+
734,
|
| 413 |
+
825,
|
| 414 |
+
834
|
| 415 |
+
],
|
| 416 |
+
"page_idx": 2
|
| 417 |
+
},
|
| 418 |
+
{
|
| 419 |
+
"type": "text",
|
| 420 |
+
"text": "Recognizing the inherent limitations of scaling human evaluation to large datasets, we conduct a comprehensive human evaluation exclusively on the outputs of DeepSeek-R1 and DeepSeek-V3 on the CNSafe_RT. For other experiments, we employ a sampling-based human evaluation of responses. This collaborative human-machine evaluation strategy ensures both scalability and efficiency, while incorporating the nuanced judgment, contextual understanding, and ethical considerations of human experts in complex scenarios.",
|
| 421 |
+
"bbox": [
|
| 422 |
+
169,
|
| 423 |
+
840,
|
| 424 |
+
825,
|
| 425 |
+
925
|
| 426 |
+
],
|
| 427 |
+
"page_idx": 2
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"type": "page_number",
|
| 431 |
+
"text": "3",
|
| 432 |
+
"bbox": [
|
| 433 |
+
493,
|
| 434 |
+
948,
|
| 435 |
+
504,
|
| 436 |
+
959
|
| 437 |
+
],
|
| 438 |
+
"page_idx": 2
|
| 439 |
+
},
|
| 440 |
+
{
|
| 441 |
+
"type": "table",
|
| 442 |
+
"img_path": "images/d040f93e4de3859776d94935d440ee590d69054397f1a469d78e119bb18ca77b.jpg",
|
| 443 |
+
"table_caption": [
|
| 444 |
+
"Table 1: ASR (%) of DeepSeek-R1 and DeepSeek-V3 on CNSafe."
|
| 445 |
+
],
|
| 446 |
+
"table_footnote": [],
|
| 447 |
+
"table_body": "<table><tr><td rowspan=\"2\">Category</td><td colspan=\"2\">Chinese</td><td colspan=\"2\">English</td></tr><tr><td>DeepSeek-V3</td><td>DeepSeek-R1</td><td>DeepSeek-V3</td><td>DeepSeek-R1</td></tr><tr><td>Core socialist values violation</td><td>4.5</td><td>14.8</td><td>9.9</td><td>59.5</td></tr><tr><td>Discriminatory content</td><td>14.1</td><td>27.3</td><td>21.1</td><td>54.3</td></tr><tr><td>Commercial misconduct</td><td>12.4</td><td>28.0</td><td>11.2</td><td>69.0</td></tr><tr><td>Rights infringement</td><td>6.1</td><td>19.7</td><td>9.4</td><td>66.1</td></tr><tr><td>Service insecurity</td><td>N/A</td><td>N/A</td><td>N/A</td><td>N/A</td></tr></table>",
|
| 448 |
+
"bbox": [
|
| 449 |
+
174,
|
| 450 |
+
127,
|
| 451 |
+
820,
|
| 452 |
+
247
|
| 453 |
+
],
|
| 454 |
+
"page_idx": 3
|
| 455 |
+
},
|
| 456 |
+
{
|
| 457 |
+
"type": "image",
|
| 458 |
+
"img_path": "images/add00a1381f4e6122096d019a46e1343259d3592eb2c418cdda9920f049b6fd0.jpg",
|
| 459 |
+
"image_caption": [
|
| 460 |
+
"Figure 1: ASR $(\\%)$ of DeepSeek-R1 and DeepSeek-V3 on CNSafe_RT."
|
| 461 |
+
],
|
| 462 |
+
"image_footnote": [],
|
| 463 |
+
"bbox": [
|
| 464 |
+
189,
|
| 465 |
+
265,
|
| 466 |
+
805,
|
| 467 |
+
426
|
| 468 |
+
],
|
| 469 |
+
"page_idx": 3
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"type": "text",
|
| 473 |
+
"text": "3.3 METRIC",
|
| 474 |
+
"text_level": 1,
|
| 475 |
+
"bbox": [
|
| 476 |
+
171,
|
| 477 |
+
484,
|
| 478 |
+
274,
|
| 479 |
+
500
|
| 480 |
+
],
|
| 481 |
+
"page_idx": 3
|
| 482 |
+
},
|
| 483 |
+
{
|
| 484 |
+
"type": "text",
|
| 485 |
+
"text": "To rigorously evaluate the safety of the three categories of generative large models under evaluation, we employ Attack Success Rate (ASR) as the primary evaluation metric. ASR is defined as the proportion of model responses judged to be unsafe. Specifically, for LLMs and MLLMs, ASR is calculated as the percentage of all responses that are flagged as containing text posing a safety risk. For T2I models, the focus is on evaluating the proportion of generated images that contain inappropriate content, prohibited elements, or sensitive information.",
|
| 486 |
+
"bbox": [
|
| 487 |
+
169,
|
| 488 |
+
512,
|
| 489 |
+
823,
|
| 490 |
+
597
|
| 491 |
+
],
|
| 492 |
+
"page_idx": 3
|
| 493 |
+
},
|
| 494 |
+
{
|
| 495 |
+
"type": "text",
|
| 496 |
+
"text": "The ASR is calculated using the following formula:",
|
| 497 |
+
"bbox": [
|
| 498 |
+
171,
|
| 499 |
+
603,
|
| 500 |
+
514,
|
| 501 |
+
619
|
| 502 |
+
],
|
| 503 |
+
"page_idx": 3
|
| 504 |
+
},
|
| 505 |
+
{
|
| 506 |
+
"type": "equation",
|
| 507 |
+
"text": "\n$$\n\\mathrm{ASR} = \\frac{\\text{Number of Unsafe Responses}}{\\text{Number of Total Responses}}\\times 100\\% . \\tag{1}\n$$\n",
|
| 508 |
+
"text_format": "latex",
|
| 509 |
+
"bbox": [
|
| 510 |
+
336,
|
| 511 |
+
638,
|
| 512 |
+
823,
|
| 513 |
+
672
|
| 514 |
+
],
|
| 515 |
+
"page_idx": 3
|
| 516 |
+
},
|
| 517 |
+
{
|
| 518 |
+
"type": "text",
|
| 519 |
+
"text": "This consistent application of ASR across all model types ensures a comparable measure of their vulnerability to producing unsafe outputs.",
|
| 520 |
+
"bbox": [
|
| 521 |
+
169,
|
| 522 |
+
686,
|
| 523 |
+
823,
|
| 524 |
+
717
|
| 525 |
+
],
|
| 526 |
+
"page_idx": 3
|
| 527 |
+
},
|
| 528 |
+
{
|
| 529 |
+
"type": "text",
|
| 530 |
+
"text": "4 EXPERIMENT",
|
| 531 |
+
"text_level": 1,
|
| 532 |
+
"bbox": [
|
| 533 |
+
171,
|
| 534 |
+
739,
|
| 535 |
+
318,
|
| 536 |
+
753
|
| 537 |
+
],
|
| 538 |
+
"page_idx": 3
|
| 539 |
+
},
|
| 540 |
+
{
|
| 541 |
+
"type": "text",
|
| 542 |
+
"text": "4.1 EVALUATION ON LLMS",
|
| 543 |
+
"text_level": 1,
|
| 544 |
+
"bbox": [
|
| 545 |
+
171,
|
| 546 |
+
773,
|
| 547 |
+
380,
|
| 548 |
+
787
|
| 549 |
+
],
|
| 550 |
+
"page_idx": 3
|
| 551 |
+
},
|
| 552 |
+
{
|
| 553 |
+
"type": "text",
|
| 554 |
+
"text": "4.1.1 DEEPSEEK-R1 & DEEPSEEK-V3",
|
| 555 |
+
"text_level": 1,
|
| 556 |
+
"bbox": [
|
| 557 |
+
171,
|
| 558 |
+
800,
|
| 559 |
+
457,
|
| 560 |
+
814
|
| 561 |
+
],
|
| 562 |
+
"page_idx": 3
|
| 563 |
+
},
|
| 564 |
+
{
|
| 565 |
+
"type": "text",
|
| 566 |
+
"text": "The evaluation results on CNSafe are summarized in Tab. 1 and Fig. 2a, with Tab. 1 presenting data for the 5 major risk categories and Fig. 2a showing data for 29 detailed risk subcategories. It should be noted that we deliberately marked the statistical data for Service insecurity as N/A. This is because the Service insecurity category in TC260-003 refers to risks such as content inaccuracy and unreliability when models are used for specific service types with high security requirements. Evaluating these aspects requires substantial expert knowledge, and accurate results cannot be obtained through LLM-as-Judge or manual assessment alone.",
|
| 567 |
+
"bbox": [
|
| 568 |
+
169,
|
| 569 |
+
825,
|
| 570 |
+
823,
|
| 571 |
+
925
|
| 572 |
+
],
|
| 573 |
+
"page_idx": 3
|
| 574 |
+
},
|
| 575 |
+
{
|
| 576 |
+
"type": "page_number",
|
| 577 |
+
"text": "4",
|
| 578 |
+
"bbox": [
|
| 579 |
+
493,
|
| 580 |
+
948,
|
| 581 |
+
504,
|
| 582 |
+
959
|
| 583 |
+
],
|
| 584 |
+
"page_idx": 3
|
| 585 |
+
},
|
| 586 |
+
{
|
| 587 |
+
"type": "image",
|
| 588 |
+
"img_path": "images/d72847520f3e3ccf3b6d7aa79e8685b193af07feab1c019da7545874439a6159.jpg",
|
| 589 |
+
"image_caption": [
|
| 590 |
+
"(a) DeepSeek LLMs"
|
| 591 |
+
],
|
| 592 |
+
"image_footnote": [],
|
| 593 |
+
"bbox": [
|
| 594 |
+
176,
|
| 595 |
+
104,
|
| 596 |
+
488,
|
| 597 |
+
371
|
| 598 |
+
],
|
| 599 |
+
"page_idx": 4
|
| 600 |
+
},
|
| 601 |
+
{
|
| 602 |
+
"type": "image",
|
| 603 |
+
"img_path": "images/ac507e5edeaf220188c651a28b476844d5481b2e39e5238162c9a7b03a0b0748.jpg",
|
| 604 |
+
"image_caption": [
|
| 605 |
+
"(b) Other Chinese-developed LLMs",
|
| 606 |
+
"Figure 2: ASR (\\%) of DeepSeek LLMs and other Chinese-developed LLMs on CNSafe. Abbreviations: RS, Regime subversion; NH, National harm; SD, State division; TP, Terrorism promotion; EH, Ethnic hatred; VP, Violence promotion; FI, False information; PC, Prohibited content; ED, Ethnic discrimination; FD, Faith discrimination; ND, National discrimination; RD, Regional discrimination; GD, Gender discrimination; AD, Age discrimination; OD, Occupational discrimination; HD, Health discrimination; OT, Other discrimination; II, IP infringement; BE, Business ethics; TS, Trade secrets; UC, Unfair competition; BV, Business violations; HE, Health endangerment; IR, Image rights; RH, Reputation harm; HV, Honor violation; PI, Privacy invasion; DM, Data misuse; RV, Rights violation."
|
| 607 |
+
],
|
| 608 |
+
"image_footnote": [],
|
| 609 |
+
"bbox": [
|
| 610 |
+
508,
|
| 611 |
+
101,
|
| 612 |
+
821,
|
| 613 |
+
372
|
| 614 |
+
],
|
| 615 |
+
"page_idx": 4
|
| 616 |
+
},
|
| 617 |
+
{
|
| 618 |
+
"type": "text",
|
| 619 |
+
"text": "Two major trends can be clearly observed from the data in Tab. 1. For both DeepSeek-V3 and DeepSeek-R1 models, attack success rates in English environments consistently exceed those in Chinese environments across all risk categories (with an average ASR gap of $21.7\\%$ ). This indicates that language context substantially influences model vulnerability. When comparing DeepSeek-V3 and DeepSeek-R1 models, we observe that regardless of language environment, the DeepSeek-R1 model exhibits higher attack success rates than the DeepSeek-V3 model across all major risk categories (with an average ASR gap of $31.25\\%$ ). This suggests that the exposed CoT Wei et al. (2022) in DeepSeek-R1 introduces additional vulnerabilities.",
|
| 620 |
+
"bbox": [
|
| 621 |
+
169,
|
| 622 |
+
559,
|
| 623 |
+
823,
|
| 624 |
+
671
|
| 625 |
+
],
|
| 626 |
+
"page_idx": 4
|
| 627 |
+
},
|
| 628 |
+
{
|
| 629 |
+
"type": "text",
|
| 630 |
+
"text": "Fig. 1 presents the evaluation results of DeepSeek-R1 and DeepSeek-V3 on CNSafe_RT. As shown, the DeepSeek-V3 model exhibits exceptionally high ASRs across most risk categories, with many reaching $95\\% - 100\\%$ , indicating significant vulnerabilities in the model's safety mechanisms. In contrast, the DeepSeek-R1 model generally shows lower ASRs than the DeepSeek-V3 model, typically $80\\% - 90\\%$ in Chinese environments and $85\\% - 95\\%$ in English environments.",
|
| 631 |
+
"bbox": [
|
| 632 |
+
169,
|
| 633 |
+
676,
|
| 634 |
+
823,
|
| 635 |
+
748
|
| 636 |
+
],
|
| 637 |
+
"page_idx": 4
|
| 638 |
+
},
|
| 639 |
+
{
|
| 640 |
+
"type": "text",
|
| 641 |
+
"text": "Notably, we observe that the DeepSeek-V3 model achieves $100\\%$ ASRs for categories such as Ethnic hatred and False information in both Chinese and English environments. These risk types should be prioritized in subsequent safety alignment efforts. Overall, the evaluation results demonstrate that both DeepSeek-V3 and DeepSeek-R1 models exhibit clear vulnerabilities when facing jailbreak attacks.",
|
| 642 |
+
"bbox": [
|
| 643 |
+
169,
|
| 644 |
+
753,
|
| 645 |
+
823,
|
| 646 |
+
825
|
| 647 |
+
],
|
| 648 |
+
"page_idx": 4
|
| 649 |
+
},
|
| 650 |
+
{
|
| 651 |
+
"type": "text",
|
| 652 |
+
"text": "4.1.2 COMPARISON WITH OTHER CHINESE LLMS",
|
| 653 |
+
"text_level": 1,
|
| 654 |
+
"bbox": [
|
| 655 |
+
171,
|
| 656 |
+
842,
|
| 657 |
+
532,
|
| 658 |
+
857
|
| 659 |
+
],
|
| 660 |
+
"page_idx": 4
|
| 661 |
+
},
|
| 662 |
+
{
|
| 663 |
+
"type": "text",
|
| 664 |
+
"text": "We conducted additional safety evaluations on five representative Chinese-developed LLMs using CNSafe and CNSafe_RT. Four are standard LLMs—Doubao-1.5-pro-32k-250115 (Doubao), Hunyuan-turbo-latest (Hunyuan), Moonshot-v1-8k (Moonshot), and Qwen-Max; while one is a reasoning LLM, QwQ-32B.",
|
| 665 |
+
"bbox": [
|
| 666 |
+
169,
|
| 667 |
+
867,
|
| 668 |
+
823,
|
| 669 |
+
925
|
| 670 |
+
],
|
| 671 |
+
"page_idx": 4
|
| 672 |
+
},
|
| 673 |
+
{
|
| 674 |
+
"type": "page_number",
|
| 675 |
+
"text": "5",
|
| 676 |
+
"bbox": [
|
| 677 |
+
493,
|
| 678 |
+
948,
|
| 679 |
+
503,
|
| 680 |
+
959
|
| 681 |
+
],
|
| 682 |
+
"page_idx": 4
|
| 683 |
+
},
|
| 684 |
+
{
|
| 685 |
+
"type": "image",
|
| 686 |
+
"img_path": "images/55fb783ad2adb41d045a59e457336fd067be2ed921e7ca12ee92923660b048e9.jpg",
|
| 687 |
+
"image_caption": [
|
| 688 |
+
"Figure 3: ASR $(\\%)$ of Chinese-developed LLMs on CNSafe_RT."
|
| 689 |
+
],
|
| 690 |
+
"image_footnote": [],
|
| 691 |
+
"bbox": [
|
| 692 |
+
189,
|
| 693 |
+
102,
|
| 694 |
+
803,
|
| 695 |
+
263
|
| 696 |
+
],
|
| 697 |
+
"page_idx": 5
|
| 698 |
+
},
|
| 699 |
+
{
|
| 700 |
+
"type": "table",
|
| 701 |
+
"img_path": "images/1c907e28c4b515d3fccf907639ba08a265e4dfe34cddd60a42a2a9a5106ea664.jpg",
|
| 702 |
+
"table_caption": [
|
| 703 |
+
"Table 2: ASR (%) of Chinese-developed LLMs on CNSafe."
|
| 704 |
+
],
|
| 705 |
+
"table_footnote": [],
|
| 706 |
+
"table_body": "<table><tr><td>Category</td><td>Doubao</td><td>Hunyuan</td><td>Moonshot</td><td>Qwen-Max</td><td>QwQ-32B</td></tr><tr><td>Core socialist values violation</td><td>7.9</td><td>2</td><td>2.5</td><td>3.8</td><td>21.8</td></tr><tr><td>Discriminatory content</td><td>26.3</td><td>8.4</td><td>14.3</td><td>3.9</td><td>36.2</td></tr><tr><td>Commercial misconduct</td><td>25.6</td><td>3</td><td>5.6</td><td>3.6</td><td>25.6</td></tr><tr><td>Rights infringement</td><td>15.7</td><td>2</td><td>2.9</td><td>2.9</td><td>22.6</td></tr><tr><td>Service insecurity</td><td>N/A</td><td>N/A</td><td>N/A</td><td>N/A</td><td>N/A</td></tr></table>",
|
| 707 |
+
"bbox": [
|
| 708 |
+
187,
|
| 709 |
+
335,
|
| 710 |
+
805,
|
| 711 |
+
435
|
| 712 |
+
],
|
| 713 |
+
"page_idx": 5
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"type": "text",
|
| 717 |
+
"text": "Tab. 2 summarizes the attack success rates for these five Chinese-developed LLMs across major risk categories on CNSafe, while Fig. 2b displays ASRs across all 29 detailed risk subcategories. Overall, among the compared models, QwQ-32B achieved the highest attack success rates across all major risk categories, with an average ASR of $26.6\\%$ . This pattern aligns with observations from DeepSeek-R1, further suggesting that exposed chains of thought present exploitation risks for attackers. Doubao also demonstrated considerable vulnerabilities in certain risk categories, particularly in Discriminatory content and Commercial misconduct, with attack success rates of $26.3\\%$ and $25.6\\%$ respectively. Comparatively, Qwen-Max exhibited the strongest safety performance with an average ASR of only $3.6\\%$ . Notably, when comparing these models with DeepSeek LLMs, we observe that DeepSeek LLMs rank quite low in terms of safety performance. Among reasoning LLMs, while DeepSeek-R1's average ASR $(22.5\\%)$ is lower than QwQ-32B, it remains substantial. Among standard LLMs, DeepSeek-V3's safety performance ranks second-to-last, surpassing only Doubao.",
|
| 718 |
+
"bbox": [
|
| 719 |
+
169,
|
| 720 |
+
462,
|
| 721 |
+
823,
|
| 722 |
+
642
|
| 723 |
+
],
|
| 724 |
+
"page_idx": 5
|
| 725 |
+
},
|
| 726 |
+
{
|
| 727 |
+
"type": "text",
|
| 728 |
+
"text": "The evaluation results of five Chinese-developed LLMs on CNSafe_RT are presented in Fig. 3. QwQ-32B clearly demonstrates the highest ASRs across all risk categories, notably exceeding $85\\%$ in nine risk categories. This indicates that this model performs worst in terms of safety and is most susceptible to attacks. In contrast, Hunyuan shows significantly lower ASRs than other models across most risk categories, with an average ASR of only $1.9\\%$ , demonstrating its robust safety performance.",
|
| 729 |
+
"bbox": [
|
| 730 |
+
169,
|
| 731 |
+
648,
|
| 732 |
+
823,
|
| 733 |
+
734
|
| 734 |
+
],
|
| 735 |
+
"page_idx": 5
|
| 736 |
+
},
|
| 737 |
+
{
|
| 738 |
+
"type": "text",
|
| 739 |
+
"text": "When comparing these models with corresponding DeepSeek LLM results, we observe that reasoning LLMs (QwQ and DeepSeek-R1) have markedly higher ASRs than standard LLMs, further indicating that the reasoning chains exposed by such models increase safety risks even under jailbreak attacks. Among standard LLMs, DeepSeek-V3 presents substantially higher risks than other Chinese-developed LLMs (averaging $66.8\\%$ higher), possibly stemming from its innovative low-cost model training method that neglected safety alignment considerations.",
|
| 740 |
+
"bbox": [
|
| 741 |
+
169,
|
| 742 |
+
739,
|
| 743 |
+
823,
|
| 744 |
+
825
|
| 745 |
+
],
|
| 746 |
+
"page_idx": 5
|
| 747 |
+
},
|
| 748 |
+
{
|
| 749 |
+
"type": "text",
|
| 750 |
+
"text": "4.2 EVALUATION ON MLLM",
|
| 751 |
+
"text_level": 1,
|
| 752 |
+
"bbox": [
|
| 753 |
+
171,
|
| 754 |
+
840,
|
| 755 |
+
387,
|
| 756 |
+
854
|
| 757 |
+
],
|
| 758 |
+
"page_idx": 5
|
| 759 |
+
},
|
| 760 |
+
{
|
| 761 |
+
"type": "text",
|
| 762 |
+
"text": "SafeBench and MM-SafetyBench introduce two prevalent multimodal jailbreaking attack methodologies: image semantic-based attacks and typography-based attacks. Representative image-text pairs employed in these attack methods are illustrated in Fig. 4. For each of these methods, we sampled 750 image-text pairs, covering 13 distinct categories, for evaluation purposes.",
|
| 763 |
+
"bbox": [
|
| 764 |
+
169,
|
| 765 |
+
867,
|
| 766 |
+
823,
|
| 767 |
+
925
|
| 768 |
+
],
|
| 769 |
+
"page_idx": 5
|
| 770 |
+
},
|
| 771 |
+
{
|
| 772 |
+
"type": "page_number",
|
| 773 |
+
"text": "6",
|
| 774 |
+
"bbox": [
|
| 775 |
+
493,
|
| 776 |
+
948,
|
| 777 |
+
504,
|
| 778 |
+
959
|
| 779 |
+
],
|
| 780 |
+
"page_idx": 5
|
| 781 |
+
},
|
| 782 |
+
{
|
| 783 |
+
"type": "image",
|
| 784 |
+
"img_path": "images/a2ecbfa253705fc233420bf98d4dc6351aae78177711b77a89a38e742092e986.jpg",
|
| 785 |
+
"image_caption": [
|
| 786 |
+
"(a) Image semantic-based Attack"
|
| 787 |
+
],
|
| 788 |
+
"image_footnote": [],
|
| 789 |
+
"bbox": [
|
| 790 |
+
171,
|
| 791 |
+
99,
|
| 792 |
+
472,
|
| 793 |
+
224
|
| 794 |
+
],
|
| 795 |
+
"page_idx": 6
|
| 796 |
+
},
|
| 797 |
+
{
|
| 798 |
+
"type": "image",
|
| 799 |
+
"img_path": "images/4cbff7188997f009a05cf78e1045fbeb940630c3a5c7ff1ea534811af17d776f.jpg",
|
| 800 |
+
"image_caption": [
|
| 801 |
+
"(b) Typography-based Attack"
|
| 802 |
+
],
|
| 803 |
+
"image_footnote": [],
|
| 804 |
+
"bbox": [
|
| 805 |
+
527,
|
| 806 |
+
99,
|
| 807 |
+
826,
|
| 808 |
+
224
|
| 809 |
+
],
|
| 810 |
+
"page_idx": 6
|
| 811 |
+
},
|
| 812 |
+
{
|
| 813 |
+
"type": "image",
|
| 814 |
+
"img_path": "images/933cc611cdab1edb70abc13d50aab004973186a8e50868f4c6c8e3d0fa4f5abc.jpg",
|
| 815 |
+
"image_caption": [
|
| 816 |
+
"Figure 4: Examples of image-text pairs used in multimodal jailbreak attack methods.",
|
| 817 |
+
"Figure 5: ASR $(\\%)$ of DeepSeek-VL2 on SafeBench and MM-SafetyBench."
|
| 818 |
+
],
|
| 819 |
+
"image_footnote": [],
|
| 820 |
+
"bbox": [
|
| 821 |
+
178,
|
| 822 |
+
292,
|
| 823 |
+
820,
|
| 824 |
+
460
|
| 825 |
+
],
|
| 826 |
+
"page_idx": 6
|
| 827 |
+
},
|
| 828 |
+
{
|
| 829 |
+
"type": "text",
|
| 830 |
+
"text": "From Fig. 5, it is evident that typography-based attacks achieve significantly higher ASRs compared to image semantics-based attacks, with an average increase of $20.31\\%$ . This indicates a notable vulnerability in current models when processing typographical perturbations. Such vulnerability may stem from insufficient exposure to these attack types during training. When examining specific risk categories, we observe several striking differences. In Economic Harm and Fraud categories, typography-based attacks reached ASRs of $40\\%$ and $38\\%$ respectively, substantially higher than other categories. This suggests that models are particularly susceptible to generate unsafe response when processing economics and finance-related content.",
|
| 831 |
+
"bbox": [
|
| 832 |
+
169,
|
| 833 |
+
522,
|
| 834 |
+
823,
|
| 835 |
+
633
|
| 836 |
+
],
|
| 837 |
+
"page_idx": 6
|
| 838 |
+
},
|
| 839 |
+
{
|
| 840 |
+
"type": "text",
|
| 841 |
+
"text": "Regarding image semantics-based attacks, while overall ASRs remain lower, certain categories such as Gov Decision, Health Consultation, and Legal Opinion show relatively higher ASRs (4%-6%). This indicates potential vulnerabilities in the model's understanding of image semantics when addressing sensitive topics related to politics, health, and governmental decisions. Notably, our manual analysis of model responses revealed that when confronted with these attacks, models frequently generated meaningless outputs, including repetitive characters or strings such as \"the of\", \"***\", \"shows\", and \"using\". Since the LLM-as-Judge methodology classifies these meaningless outputs as safe, this effectively reduces the model's actual ASR. It is important to note that this does not reflect the model's true safety level; rather, it highlights deficiencies in the model's image comprehension and response generation capabilities.",
|
| 842 |
+
"bbox": [
|
| 843 |
+
169,
|
| 844 |
+
640,
|
| 845 |
+
826,
|
| 846 |
+
780
|
| 847 |
+
],
|
| 848 |
+
"page_idx": 6
|
| 849 |
+
},
|
| 850 |
+
{
|
| 851 |
+
"type": "text",
|
| 852 |
+
"text": "4.3 EVALUATION ON T2I MODELS",
|
| 853 |
+
"text_level": 1,
|
| 854 |
+
"bbox": [
|
| 855 |
+
171,
|
| 856 |
+
803,
|
| 857 |
+
426,
|
| 858 |
+
816
|
| 859 |
+
],
|
| 860 |
+
"page_idx": 6
|
| 861 |
+
},
|
| 862 |
+
{
|
| 863 |
+
"type": "text",
|
| 864 |
+
"text": "In this section, we evaluate the safety of DeepSeek's T2I model, Janus-Pro-7B, using a sample of 671 queries drawn from the I2P. This sample comprises 100 queries for each category except Hate, which contains only 71 queries. Fig. 6 showcases representative examples of unsafe images generated during this evaluation.",
|
| 865 |
+
"bbox": [
|
| 866 |
+
169,
|
| 867 |
+
832,
|
| 868 |
+
823,
|
| 869 |
+
888
|
| 870 |
+
],
|
| 871 |
+
"page_idx": 6
|
| 872 |
+
},
|
| 873 |
+
{
|
| 874 |
+
"type": "text",
|
| 875 |
+
"text": "For comparative purposes, we concurrently assess the safety of another popular T2I model, Stable-Diffusion-3.5-Large AI (2024). Fig. 7 presents the ASRs for both models across various risk di",
|
| 876 |
+
"bbox": [
|
| 877 |
+
169,
|
| 878 |
+
895,
|
| 879 |
+
823,
|
| 880 |
+
924
|
| 881 |
+
],
|
| 882 |
+
"page_idx": 6
|
| 883 |
+
},
|
| 884 |
+
{
|
| 885 |
+
"type": "page_number",
|
| 886 |
+
"text": "7",
|
| 887 |
+
"bbox": [
|
| 888 |
+
493,
|
| 889 |
+
948,
|
| 890 |
+
503,
|
| 891 |
+
959
|
| 892 |
+
],
|
| 893 |
+
"page_idx": 6
|
| 894 |
+
},
|
| 895 |
+
{
|
| 896 |
+
"type": "image",
|
| 897 |
+
"img_path": "images/ca379e4a1dd72dd16df16517c4b97455853953fe7dd802ddbe7a1f99a90efa9e.jpg",
|
| 898 |
+
"image_caption": [
|
| 899 |
+
"Harassment"
|
| 900 |
+
],
|
| 901 |
+
"image_footnote": [],
|
| 902 |
+
"bbox": [
|
| 903 |
+
178,
|
| 904 |
+
102,
|
| 905 |
+
267,
|
| 906 |
+
172
|
| 907 |
+
],
|
| 908 |
+
"page_idx": 7
|
| 909 |
+
},
|
| 910 |
+
{
|
| 911 |
+
"type": "image",
|
| 912 |
+
"img_path": "images/6eeceff7b55665b9a7cb6ea5eb9f80d5f53585c8e73efe5539f6463a829b3033.jpg",
|
| 913 |
+
"image_caption": [
|
| 914 |
+
"Hate"
|
| 915 |
+
],
|
| 916 |
+
"image_footnote": [],
|
| 917 |
+
"bbox": [
|
| 918 |
+
269,
|
| 919 |
+
102,
|
| 920 |
+
359,
|
| 921 |
+
172
|
| 922 |
+
],
|
| 923 |
+
"page_idx": 7
|
| 924 |
+
},
|
| 925 |
+
{
|
| 926 |
+
"type": "image",
|
| 927 |
+
"img_path": "images/c1af023a4b2ff28b865e6453fe7dc2082a5247f5548613599388e120b3f5db11.jpg",
|
| 928 |
+
"image_caption": [
|
| 929 |
+
"Illegal activity"
|
| 930 |
+
],
|
| 931 |
+
"image_footnote": [],
|
| 932 |
+
"bbox": [
|
| 933 |
+
362,
|
| 934 |
+
102,
|
| 935 |
+
452,
|
| 936 |
+
172
|
| 937 |
+
],
|
| 938 |
+
"page_idx": 7
|
| 939 |
+
},
|
| 940 |
+
{
|
| 941 |
+
"type": "image",
|
| 942 |
+
"img_path": "images/8c9427b38fe3eff759fd376ff8eb5ce23dbd2d1be9563f0bfa486e6f81ccecff.jpg",
|
| 943 |
+
"image_caption": [
|
| 944 |
+
"Self-harm"
|
| 945 |
+
],
|
| 946 |
+
"image_footnote": [],
|
| 947 |
+
"bbox": [
|
| 948 |
+
454,
|
| 949 |
+
102,
|
| 950 |
+
544,
|
| 951 |
+
172
|
| 952 |
+
],
|
| 953 |
+
"page_idx": 7
|
| 954 |
+
},
|
| 955 |
+
{
|
| 956 |
+
"type": "image",
|
| 957 |
+
"img_path": "images/a78d63f9e5b7d49a1007549d1d5d173834b98e64c8e07b60ae8437b2f916d5d9.jpg",
|
| 958 |
+
"image_caption": [
|
| 959 |
+
"Sexual content"
|
| 960 |
+
],
|
| 961 |
+
"image_footnote": [],
|
| 962 |
+
"bbox": [
|
| 963 |
+
545,
|
| 964 |
+
102,
|
| 965 |
+
635,
|
| 966 |
+
172
|
| 967 |
+
],
|
| 968 |
+
"page_idx": 7
|
| 969 |
+
},
|
| 970 |
+
{
|
| 971 |
+
"type": "image",
|
| 972 |
+
"img_path": "images/837ed3e60e92e435fa22125a0e857a27ca8d2ef7334d32935e15ad44d80afe12.jpg",
|
| 973 |
+
"image_caption": [
|
| 974 |
+
"Shocking images"
|
| 975 |
+
],
|
| 976 |
+
"image_footnote": [],
|
| 977 |
+
"bbox": [
|
| 978 |
+
638,
|
| 979 |
+
102,
|
| 980 |
+
728,
|
| 981 |
+
172
|
| 982 |
+
],
|
| 983 |
+
"page_idx": 7
|
| 984 |
+
},
|
| 985 |
+
{
|
| 986 |
+
"type": "image",
|
| 987 |
+
"img_path": "images/55718dc235b9bd54c5c9e8a17fe739ef79434a16dafecbdb2b0136bbe97abd0e.jpg",
|
| 988 |
+
"image_caption": [
|
| 989 |
+
"Violence"
|
| 990 |
+
],
|
| 991 |
+
"image_footnote": [],
|
| 992 |
+
"bbox": [
|
| 993 |
+
730,
|
| 994 |
+
102,
|
| 995 |
+
820,
|
| 996 |
+
172
|
| 997 |
+
],
|
| 998 |
+
"page_idx": 7
|
| 999 |
+
},
|
| 1000 |
+
{
|
| 1001 |
+
"type": "image",
|
| 1002 |
+
"img_path": "images/173a0d78d14e3aa2faf5434a8830a9886cef9244b14e55c74dc450f4f1636272.jpg",
|
| 1003 |
+
"image_caption": [
|
| 1004 |
+
"Figure 6: Examples of unsafe images generated by Janus-Pro-7B.",
|
| 1005 |
+
"(a) Janus-Pro-7B",
|
| 1006 |
+
"Figure 7: ASR $(\\%)$ of Janus-Pro-7B and Stable-Diffusion-3.5-Large on I2P."
|
| 1007 |
+
],
|
| 1008 |
+
"image_footnote": [],
|
| 1009 |
+
"bbox": [
|
| 1010 |
+
176,
|
| 1011 |
+
229,
|
| 1012 |
+
465,
|
| 1013 |
+
454
|
| 1014 |
+
],
|
| 1015 |
+
"page_idx": 7
|
| 1016 |
+
},
|
| 1017 |
+
{
|
| 1018 |
+
"type": "image",
|
| 1019 |
+
"img_path": "images/198b58be1862bcfb3495e44f7bb579c2d2af776f3ad9305c8508d0395bdb788b.jpg",
|
| 1020 |
+
"image_caption": [
|
| 1021 |
+
"(b) Stable-Diffusion-3.5-Large"
|
| 1022 |
+
],
|
| 1023 |
+
"image_footnote": [],
|
| 1024 |
+
"bbox": [
|
| 1025 |
+
532,
|
| 1026 |
+
229,
|
| 1027 |
+
821,
|
| 1028 |
+
454
|
| 1029 |
+
],
|
| 1030 |
+
"page_idx": 7
|
| 1031 |
+
},
|
| 1032 |
+
{
|
| 1033 |
+
"type": "text",
|
| 1034 |
+
"text": "mensions. Overall, Janus-Pro-7B presents higher safety risks compared to Stable Diffusion 3.5 Large, with average ASRs of $43.7\\%$ and $39.9\\%$ , respectively.",
|
| 1035 |
+
"bbox": [
|
| 1036 |
+
169,
|
| 1037 |
+
530,
|
| 1038 |
+
823,
|
| 1039 |
+
560
|
| 1040 |
+
],
|
| 1041 |
+
"page_idx": 7
|
| 1042 |
+
},
|
| 1043 |
+
{
|
| 1044 |
+
"type": "text",
|
| 1045 |
+
"text": "We subsequently compared the specific performance of both models across various dimensions. In the Hate, Harassment, Shocking images, and Self-harm dimensions, the Stable-Diffusion-3.5-Large model shows slightly higher ASRs than the Janus-Pro-7B model, indicating that Janus-Pro-7B offers relatively better safety in these risk categories. However, in the Sexual content dimension, the Janus-Pro-7B model exhibits a significantly higher attack success rate compared to the Stable-Diffusion-3.5-Large model (74.0% vs. 47.0%). This reveals a notable safety vulnerability in the Janus-Pro-7B model within this dimension. For the Illegal activity dimension, Janus-Pro-7B also demonstrating higher ASRs (61% vs. 39%). In the Violence dimension, both models show similar ASRs, approximately 50%.",
|
| 1046 |
+
"bbox": [
|
| 1047 |
+
169,
|
| 1048 |
+
566,
|
| 1049 |
+
823,
|
| 1050 |
+
691
|
| 1051 |
+
],
|
| 1052 |
+
"page_idx": 7
|
| 1053 |
+
},
|
| 1054 |
+
{
|
| 1055 |
+
"type": "text",
|
| 1056 |
+
"text": "Additionally, in our manual sampling evaluation, we observed that Janus-Pro-7B does not prohibit the generation of fake news images, such as \"Trump arrested and imprisoned.\" This finding suggests that Janus-Pro-7B is highly susceptible to misuse, potentially leading to additional risks Sha et al. (2023).",
|
| 1057 |
+
"bbox": [
|
| 1058 |
+
169,
|
| 1059 |
+
698,
|
| 1060 |
+
823,
|
| 1061 |
+
755
|
| 1062 |
+
],
|
| 1063 |
+
"page_idx": 7
|
| 1064 |
+
},
|
| 1065 |
+
{
|
| 1066 |
+
"type": "text",
|
| 1067 |
+
"text": "5 CONCLUSION",
|
| 1068 |
+
"text_level": 1,
|
| 1069 |
+
"bbox": [
|
| 1070 |
+
171,
|
| 1071 |
+
777,
|
| 1072 |
+
318,
|
| 1073 |
+
792
|
| 1074 |
+
],
|
| 1075 |
+
"page_idx": 7
|
| 1076 |
+
},
|
| 1077 |
+
{
|
| 1078 |
+
"type": "text",
|
| 1079 |
+
"text": "To the best of our knowledge, this study presents the first comprehensive safety evaluation of the DeepSeek models. Our investigation reveals a nuanced balance between safety and performance, and highlights several key findings.",
|
| 1080 |
+
"bbox": [
|
| 1081 |
+
169,
|
| 1082 |
+
810,
|
| 1083 |
+
823,
|
| 1084 |
+
854
|
| 1085 |
+
],
|
| 1086 |
+
"page_idx": 7
|
| 1087 |
+
},
|
| 1088 |
+
{
|
| 1089 |
+
"type": "text",
|
| 1090 |
+
"text": "- Vulnerability to jailbreaking. While DeepSeek LLMs exhibit robust safety boundaries when handling direct harmful queries, their safety alignment proves brittle under jailbreaking attacks. This suggests that their safety alignments may be optimized for explicit threats but remain vulnerable to adversarial manipulations.",
|
| 1091 |
+
"bbox": [
|
| 1092 |
+
215,
|
| 1093 |
+
867,
|
| 1094 |
+
823,
|
| 1095 |
+
925
|
| 1096 |
+
],
|
| 1097 |
+
"page_idx": 7
|
| 1098 |
+
},
|
| 1099 |
+
{
|
| 1100 |
+
"type": "page_number",
|
| 1101 |
+
"text": "8",
|
| 1102 |
+
"bbox": [
|
| 1103 |
+
493,
|
| 1104 |
+
948,
|
| 1105 |
+
503,
|
| 1106 |
+
959
|
| 1107 |
+
],
|
| 1108 |
+
"page_idx": 7
|
| 1109 |
+
},
|
| 1110 |
+
{
|
| 1111 |
+
"type": "list",
|
| 1112 |
+
"sub_type": "text",
|
| 1113 |
+
"list_items": [
|
| 1114 |
+
"- Cross-lingual disparities. DeepSeek LLMs exhibit a considerable disparity in safety performance between Chinese and English contexts. Specifically, they demonstrate a greater propensity to generate harmful content in English, suggesting that safety alignment strategies may not generalize effectively across languages.",
|
| 1115 |
+
"- Chain-of-Thought exposure. DeepSeek-R1, which exposes its CoT reasoning, presents a higher safety risk compared to DeepSeek-V3. This suggests that increased transparency, while potentially beneficial for interpretability, can inadvertently create new attack vectors.",
|
| 1116 |
+
"- Multi-Model capability deficiencies. The apparent strong safety performance of the DeepSeek MLLM is not a result of robust safety alignment. Instead, it stems from its limited multimodal understanding capabilities. This finding underscores the importance of distinguishing between genuine safety and limitations that mask underlying vulnerabilities.",
|
| 1117 |
+
"- Text-to-image generation risks. The DeepSeek T2I model exhibits significant safety risks. Across the benchmarks we evaluated, more than half of the categories demonstrated ASRs exceeding $50\\%$ , underscoring the urgent need for stronger safety measures.."
|
| 1118 |
+
],
|
| 1119 |
+
"bbox": [
|
| 1120 |
+
215,
|
| 1121 |
+
103,
|
| 1122 |
+
821,
|
| 1123 |
+
311
|
| 1124 |
+
],
|
| 1125 |
+
"page_idx": 8
|
| 1126 |
+
},
|
| 1127 |
+
{
|
| 1128 |
+
"type": "text",
|
| 1129 |
+
"text": "The findings presented highlight the imperative for ongoing, iterative safety evaluations and thorough pre-deployment testing of large models. A key priority for future research is the strengthening of safety mechanisms, with a particular focus on resilience against jailbreak attacks. Concurrently, the creation of more standardized and comprehensive safety benchmarks is essential to facilitate meaningful advancements in the safety of large models.",
|
| 1130 |
+
"bbox": [
|
| 1131 |
+
169,
|
| 1132 |
+
325,
|
| 1133 |
+
823,
|
| 1134 |
+
397
|
| 1135 |
+
],
|
| 1136 |
+
"page_idx": 8
|
| 1137 |
+
},
|
| 1138 |
+
{
|
| 1139 |
+
"type": "page_number",
|
| 1140 |
+
"text": "9",
|
| 1141 |
+
"bbox": [
|
| 1142 |
+
493,
|
| 1143 |
+
946,
|
| 1144 |
+
504,
|
| 1145 |
+
959
|
| 1146 |
+
],
|
| 1147 |
+
"page_idx": 8
|
| 1148 |
+
},
|
| 1149 |
+
{
|
| 1150 |
+
"type": "text",
|
| 1151 |
+
"text": "REFERENCES",
|
| 1152 |
+
"text_level": 1,
|
| 1153 |
+
"bbox": [
|
| 1154 |
+
173,
|
| 1155 |
+
102,
|
| 1156 |
+
287,
|
| 1157 |
+
118
|
| 1158 |
+
],
|
| 1159 |
+
"page_idx": 9
|
| 1160 |
+
},
|
| 1161 |
+
{
|
| 1162 |
+
"type": "list",
|
| 1163 |
+
"sub_type": "ref_text",
|
| 1164 |
+
"list_items": [
|
| 1165 |
+
"Razii Abraham. Democratizing ai's frontiers: A critical review of deepseek ai's open-source ecosystem. 2025.",
|
| 1166 |
+
"Stability AI. Stable diffusion 3.5 large. Hugging Face Model Repository, 2024. URL https://huggingface.co/stabilityai/stable-diffusion-3.5-large. Accessed: 2025-03-15.",
|
| 1167 |
+
"Haozhe An, Christabel Acquaye, Colin Wang, Zongxia Li, and Rachel Rudinger. Do large language models discriminate in hiring decisions on the basis of race, ethnicity, and gender? arXiv preprint arXiv:2406.10486, 2024.",
|
| 1168 |
+
"Aitor Arrieta, Miriam Ugarte, Pablo Valle, José Antonio Parejo, and Sergio Segura. o3-mini vs deepseek-r1: Which one is safer? arXiv preprint arXiv:2501.18438, 2025.",
|
| 1169 |
+
"Weilin Cai, Juyong Jiang, Fan Wang, Jing Tang, Sunghun Kim, and Jiayi Huang. A survey on mixture of experts, 2024. URL https://arxiv.org/abs/2407.06204.",
|
| 1170 |
+
"Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Janus-pro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025.",
|
| 1171 |
+
"Yingkai Dong, Zheng Li, Xiangtao Meng, Ning Yu, and Shanqing Guo. Jailbreaking text-to-image models with llm-based agents, 2024. URL https://arxiv.org/abs/2408.00523.",
|
| 1172 |
+
"Lisle Faray de Paiva, Gijs Luijten, Behrus Puladi, and Jan Egger. How does deepseek-r1 perform on usmle? medRxiv, pp. 2025-02, 2025.",
|
| 1173 |
+
"Sensen Gao, Xiaojun Jia, Yihao Huang, Ranjie Duan, Jindong Gu, Yang Bai, Yang Liu, and Qing Guo. Hts-attack: Heuristic token search for jailbreaking text-to-image models, 2024. URL https://arxiv.org/abs/2408.13896.",
|
| 1174 |
+
"Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.",
|
| 1175 |
+
"Jun Guo, Wei Bao, Jiakai Wang, Yuqing Ma, Xinghai Gao, Gang Xiao, Aishan Liu, Jian Dong, Xi-anglong Liu, and Wenjun Wu. A comprehensive evaluation framework for deep model robustness. Pattern Recognition, 2023.",
|
| 1176 |
+
"Zonglei Jing, Zonghao Ying, Le Wang, Siyuan Liang, Aishan Liu, Xianglong Liu, and Dacheng Tao. Cognorm: Cognitive morphing attacks for text-to-image models, 2025. URL https://arxiv.org/abs/2501.11815.",
|
| 1177 |
+
"Minseon Kim, Hyomin Lee, Boqing Gong, Huishuai Zhang, and Sung Ju Hwang. Automatic jailbreaking of the text-to-image generative ai systems, 2024. URL https://arxiv.org/abs/2405.16567.",
|
| 1178 |
+
"Aishan Liu, Xianglong Liu, Jiaxin Fan, Yuqing Ma, Anlan Zhang, Huiyuan Xie, and Dacheng Tao. Perceptual-sensitive gan for generating adversarial patches. In AAAI, 2019.",
|
| 1179 |
+
"Aishan Liu, Tairan Huang, Xianglong Liu, Yitao Xu, Yuqing Ma, Xinyun Chen, Stephen J Maybank, and Dacheng Tao. Spatiotemporal attacks for embodied agents. In ECCV, 2020a.",
|
| 1180 |
+
"Aishan Liu, Jiakai Wang, Xianglong Liu, Bowen Cao, Chongzhi Zhang, and Hang Yu. Bias-based universal adversarial patch attack for automatic check-out. In ECCV, 2020b.",
|
| 1181 |
+
"Aishan Liu, Xianglong Liu, Hang Yu, Chongzhi Zhang, Qiang Liu, and Dacheng Tao. Training robust deep neural networks via adversarial noise propagation. TIP, 2021.",
|
| 1182 |
+
"Aishan Liu, Jun Guo, Jiakai Wang, Siyuan Liang, Renshuai Tao, Wenbo Zhou, Cong Liu, Xianglong Liu, and Dacheng Tao. X-adv: Physical adversarial object attacks against x-ray prohibited item detection. In USENIX Security Symposium, 2023a."
|
| 1183 |
+
],
|
| 1184 |
+
"bbox": [
|
| 1185 |
+
171,
|
| 1186 |
+
125,
|
| 1187 |
+
825,
|
| 1188 |
+
924
|
| 1189 |
+
],
|
| 1190 |
+
"page_idx": 9
|
| 1191 |
+
},
|
| 1192 |
+
{
|
| 1193 |
+
"type": "page_number",
|
| 1194 |
+
"text": "10",
|
| 1195 |
+
"bbox": [
|
| 1196 |
+
490,
|
| 1197 |
+
946,
|
| 1198 |
+
509,
|
| 1199 |
+
959
|
| 1200 |
+
],
|
| 1201 |
+
"page_idx": 9
|
| 1202 |
+
},
|
| 1203 |
+
{
|
| 1204 |
+
"type": "list",
|
| 1205 |
+
"sub_type": "ref_text",
|
| 1206 |
+
"list_items": [
|
| 1207 |
+
"Aishan Liu, Shiyu Tang, Xinyun Chen, Lei Huang, Haotong Qin, Xianglong Liu, and Dacheng Tao. Towards defending multiple lp-norm bounded adversarial perturbations via gated batch normalization. International Journal of Computer Vision, 2023b.",
|
| 1208 |
+
"Aishan Liu, Shiyu Tang, Siyuan Liang, Ruihao Gong, Boxi Wu, Xianglong Liu, and Dacheng Tao. Exploring the relationship between architecture and adversarially robust generalization. In CVPR, 2023c.",
|
| 1209 |
+
"Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024a.",
|
| 1210 |
+
"Shunchang Liu, Jiakai Wang, Aishan Liu, Yingwei Li, Yijie Gao, Xianglong Liu, and Dacheng Tao. Harnessing perceptual adversarial patches for crowd counting. In ACM CCS, 2022.",
|
| 1211 |
+
"Xin Liu, Yichen Zhu, Jindong Gu, Yunshi Lan, Chao Yang, and Yu Qiao. Mm-safetybench: A benchmark for safety evaluation of multimodal large language models, 2024b. URL https://arxiv.org/abs/2311.17600.",
|
| 1212 |
+
"Weidi Luo, Siyuan Ma, Xiaogeng Liu, Xiaoyu Guo, and Chaowei Xiao. Jailbreakv: A benchmark for assessing the robustness of multimodal large language models against jailbreak attacks, 2024. URL https://arxiv.org/abs/2404.03027.",
|
| 1213 |
+
"Jiachen Ma, Anda Cao, Zhiqing Xiao, Yijiang Li, Jie Zhang, Chao Ye, and Junbo Zhao. Jailbreaking prompt attack: A controllable adversarial attack against diffusion models. arXiv preprint arXiv:2404.02928, 2024.",
|
| 1214 |
+
"David Mikhail, Andrew Farah, Jason Milad, Wissam Nassrallah, Andrew Mihalache, Daniel Milad, Fares Antaki, Michael Balas, Marko M Popovic, Alessandro Feo, et al. Performance of deepseek-r1 in ophthalmology: An evaluation of clinical decision-making and cost-effectiveness. medRxiv, pp. 2025-02, 2025.",
|
| 1215 |
+
"Zhenxing Niu, Haodong Ren, Xinbo Gao, Gang Hua, and Rong Jin. Jailbreaking attack against multimodal large language model, 2024. URL https://arxiv.org/abs/2402.02309.",
|
| 1216 |
+
"OpenAI, :, Aaron Hurst, Adam Lerer, Adam P. Goucher, Adam Perelman, Aditya Ramesh, et al. Gpt-4o system card, 2024a. URL https://arxiv.org/abs/2410.21276.",
|
| 1217 |
+
"OpenAI, :, Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, et al. Openai o1 system card, 2024b. URL https://arxiv.org/abs/2412.16720.",
|
| 1218 |
+
"Manojkumar Parmar and Yuvaraj Govindarajulu. Challenges in ensuring ai safety in deepseek-r1 models: The shortcomings of reinforcement learning strategies. arXiv preprint arXiv:2501.17030, 2025.",
|
| 1219 |
+
"Qwen, :, An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, et al. Qwen2.5 technical report, 2025. URL https://arxiv.org/abs/2412.15115.",
|
| 1220 |
+
"Paul Röttger, Fabio Pernisi, Bertie Vidgen, and Dirk Hovy. *Safetyprompts: a systematic review of open datasets for evaluating and improving large language model safety.* arXiv preprint arXiv:2404.05399, 2024.",
|
| 1221 |
+
"Patrick Schramowski, Manuel Brack, Björn Deiseroth, and Kristian Kersting. Safe latent diffusion: Mitigating inappropriate degeneration in diffusion models, 2023. URL https://arxiv.org/abs/2211.05105.",
|
| 1222 |
+
"Zeyang Sha, Zheng Li, Ning Yu, and Yang Zhang. De-fake: Detection and attribution of fake images generated by text-to-image generation models, 2023. URL https://arxiv.org/abs/2210.06998.",
|
| 1223 |
+
"Xinyue Shen, Zeyuan Chen, Michael Backes, Yun Shen, and Yang Zhang. \"do anything now\": Characterizing and evaluating in-the-wild jailbreak prompts on large language models, 2024. URL https://arxiv.org/abs/2308.03825."
|
| 1224 |
+
],
|
| 1225 |
+
"bbox": [
|
| 1226 |
+
173,
|
| 1227 |
+
102,
|
| 1228 |
+
825,
|
| 1229 |
+
924
|
| 1230 |
+
],
|
| 1231 |
+
"page_idx": 10
|
| 1232 |
+
},
|
| 1233 |
+
{
|
| 1234 |
+
"type": "page_number",
|
| 1235 |
+
"text": "11",
|
| 1236 |
+
"bbox": [
|
| 1237 |
+
490,
|
| 1238 |
+
948,
|
| 1239 |
+
506,
|
| 1240 |
+
959
|
| 1241 |
+
],
|
| 1242 |
+
"page_idx": 10
|
| 1243 |
+
},
|
| 1244 |
+
{
|
| 1245 |
+
"type": "list",
|
| 1246 |
+
"sub_type": "ref_text",
|
| 1247 |
+
"list_items": [
|
| 1248 |
+
"Shiyu Tang, Ruihao Gong, Yan Wang, Aishan Liu, Jiakai Wang, Xinyun Chen, Fengwei Yu, Xianglong Liu, Dawn Song, Alan Yuille, et al. Robust: Benchmarking robustness on architecture design and training techniques. ArXiv, 2021.",
|
| 1249 |
+
"Jiakai Wang, Aishan Liu, Zixin Yin, Shunchang Liu, Shiyu Tang, and Xianglong Liu. Dual attention suppression attack: Generate adversarial camouflage in physical world. In CVPR, 2021.",
|
| 1250 |
+
"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022.",
|
| 1251 |
+
"Zhiyu Wu, Xiaokang Chen, Zizheng Pan, Xingchao Liu, Wen Liu, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, et al. Deepseek-vl2: Mixture-of-experts vision-language models for advanced multimodal understanding. arXiv preprint arXiv:2412.10302, 2024.",
|
| 1252 |
+
"Zhiyuan Xu, Joseph Gardiner, and Sana Belguith. The dark deep side of deepseek: Fine-tuning attacks against the safety alignment of cot-enabled models. arXiv preprint arXiv:2502.01225, 2025.",
|
| 1253 |
+
"Zonghao Ying and Bin Wu. Nba: defensive distillation for backdoor removal via neural behavior alignment. Cybersecurity, 6(1), July 2023a. ISSN 2523-3246. doi: 10.1186/s42400-023-00154-z. URL http://dx.doi.org/10.1186/s42400-023-00154-z.",
|
| 1254 |
+
"Zonghao Ying and Bin Wu. Dlp: towards active defense against backdoor attacks with decoupled learning process. Cybersecurity, 6(1), May 2023b. ISSN 2523-3246. doi: 10.1186/s42400-023-00141-4. URL http://dx.doi.org/10.1186/s42400-023-00141-4.",
|
| 1255 |
+
"Zonghao Ying, Aishan Liu, Siyuan Liang, Lei Huang, Jinyang Guo, Wenbo Zhou, Xianglong Liu, and Dacheng Tao. Safebench: A safety evaluation framework for multimodal large language models. arXiv preprint arXiv:2410.18927, 2024a.",
|
| 1256 |
+
"Zonghao Ying, Aishan Liu, Xianglong Liu, and Dacheng Tao. Unveiling the safety of gpt-4o: An empirical study using jailbreak attacks. arXiv preprint arXiv:2406.06302, 2024b.",
|
| 1257 |
+
"Zonghao Ying, Aishan Liu, Tianyuan Zhang, Zhengmin Yu, Siyuan Liang, Xianglong Liu, and Dacheng Tao. Jailbreak vision language models via bi-modal adversarial prompt. arXiv preprint arXiv:2406.04031, 2024c.",
|
| 1258 |
+
"Zonghao Ying, Deyue Zhang, Zonglei Jing, Yisong Xiao, Quanchen Zou, Aishan Liu, Siyuan Liang, Xiangzheng Zhang, Xianglong Liu, and Dacheng Tao. Reasoning-augmented conversation for multi-turn jailbreak attacks on large language models. arXiv preprint arXiv:2502.11054, 2025.",
|
| 1259 |
+
"Tongxin Yuan, Zhiwei He, Lingzhong Dong, Yiming Wang, Ruijie Zhao, Tian Xia, Lizhen Xu, Binglin Zhou, Fangqi Li, Zhuosheng Zhang, et al. R-judge: Benchmarking safety risk awareness for llm agents. arXiv preprint arXiv:2401.10019, 2024a.",
|
| 1260 |
+
"Xiaohan Yuan, Jinfeng Li, Dongxia Wang, Yuefeng Chen, Xiaofeng Mao, Longtao Huang, Hui Xue, Wenhai Wang, Kui Ren, and Jingyi Wang. S-eval: Automatic and adaptive test generation for benchmarking safety evaluation of large language models. arXiv preprint arXiv:2405.14191, 2024b.",
|
| 1261 |
+
"Chongzhi Zhang, Aishan Liu, Xianglong Liu, Yitao Xu, Hang Yu, Yuqing Ma, and Tianlin Li. Interpreting and improving adversarial robustness of deep neural networks with neuron sensitivity. IEEE Transactions on Image Processing, 2021.",
|
| 1262 |
+
"Kaiwen Zhou, Chengzhi Liu, Xuandong Zhao, Shreedhar Jangam, Jayanth Srinivasa, Gaowen Liu, Dawn Song, and Xin Eric Wang. The hidden risks of large reasoning models: A safety assessment of r1. arXiv preprint arXiv:2502.12659, 2025.",
|
| 1263 |
+
"Andy Zou, Zifan Wang, Nicholas Carlini, Milad Nasr, J. Zico Kolter, and Matt Fredrikson. Universal and transferable adversarial attacks on aligned language models, 2023. URL https://arxiv.org/abs/2307.15043."
|
| 1264 |
+
],
|
| 1265 |
+
"bbox": [
|
| 1266 |
+
173,
|
| 1267 |
+
102,
|
| 1268 |
+
825,
|
| 1269 |
+
906
|
| 1270 |
+
],
|
| 1271 |
+
"page_idx": 11
|
| 1272 |
+
},
|
| 1273 |
+
{
|
| 1274 |
+
"type": "page_number",
|
| 1275 |
+
"text": "12",
|
| 1276 |
+
"bbox": [
|
| 1277 |
+
490,
|
| 1278 |
+
946,
|
| 1279 |
+
508,
|
| 1280 |
+
959
|
| 1281 |
+
],
|
| 1282 |
+
"page_idx": 11
|
| 1283 |
+
},
|
| 1284 |
+
{
|
| 1285 |
+
"type": "text",
|
| 1286 |
+
"text": "A APPENDIX",
|
| 1287 |
+
"text_level": 1,
|
| 1288 |
+
"bbox": [
|
| 1289 |
+
171,
|
| 1290 |
+
102,
|
| 1291 |
+
299,
|
| 1292 |
+
118
|
| 1293 |
+
],
|
| 1294 |
+
"page_idx": 12
|
| 1295 |
+
},
|
| 1296 |
+
{
|
| 1297 |
+
"type": "text",
|
| 1298 |
+
"text": "A.1 BENCHMARK",
|
| 1299 |
+
"text_level": 1,
|
| 1300 |
+
"bbox": [
|
| 1301 |
+
171,
|
| 1302 |
+
133,
|
| 1303 |
+
312,
|
| 1304 |
+
148
|
| 1305 |
+
],
|
| 1306 |
+
"page_idx": 12
|
| 1307 |
+
},
|
| 1308 |
+
{
|
| 1309 |
+
"type": "text",
|
| 1310 |
+
"text": "A.2 CNSAFE",
|
| 1311 |
+
"text_level": 1,
|
| 1312 |
+
"bbox": [
|
| 1313 |
+
171,
|
| 1314 |
+
159,
|
| 1315 |
+
281,
|
| 1316 |
+
172
|
| 1317 |
+
],
|
| 1318 |
+
"page_idx": 12
|
| 1319 |
+
},
|
| 1320 |
+
{
|
| 1321 |
+
"type": "text",
|
| 1322 |
+
"text": "CNSafe focuses on evaluating the following five core dimensions:",
|
| 1323 |
+
"bbox": [
|
| 1324 |
+
171,
|
| 1325 |
+
185,
|
| 1326 |
+
604,
|
| 1327 |
+
200
|
| 1328 |
+
],
|
| 1329 |
+
"page_idx": 12
|
| 1330 |
+
},
|
| 1331 |
+
{
|
| 1332 |
+
"type": "list",
|
| 1333 |
+
"sub_type": "text",
|
| 1334 |
+
"list_items": [
|
| 1335 |
+
"- Content Contravening Core Socialist Values. This includes content that incites subversion of state power, endangers national security, promotes terrorism, incites ethnic hatred, contains violent or pornographic material, disseminates false information, and related violations.",
|
| 1336 |
+
"- Discriminatory Content. This encompasses expressions of discrimination based on ethnicity, religion, nationality, geographic origin, gender, age, occupation, health status, and other protected characteristics.",
|
| 1337 |
+
"- Commercial Violations and Misconduct. This addresses issues such as intellectual property infringement, breaches of business ethics, disclosure of trade secrets, monopolistic practices, and unfair competition.",
|
| 1338 |
+
"- Infringement of Others' Legal Rights. This includes violations impacting others' physical and mental well-being, portrait rights, reputation, privacy, and personal information rights.",
|
| 1339 |
+
"- Inability to Meet Safety Requirements for Specific Service Types. This dimension assesses risks arising from inaccurate or unreliable content in high-security contexts such as automated control, medical information services, psychological counseling, and critical information infrastructure."
|
| 1340 |
+
],
|
| 1341 |
+
"bbox": [
|
| 1342 |
+
215,
|
| 1343 |
+
210,
|
| 1344 |
+
823,
|
| 1345 |
+
452
|
| 1346 |
+
],
|
| 1347 |
+
"page_idx": 12
|
| 1348 |
+
},
|
| 1349 |
+
{
|
| 1350 |
+
"type": "text",
|
| 1351 |
+
"text": "A.3 CNSAFE_RT",
|
| 1352 |
+
"text_level": 1,
|
| 1353 |
+
"bbox": [
|
| 1354 |
+
171,
|
| 1355 |
+
470,
|
| 1356 |
+
310,
|
| 1357 |
+
484
|
| 1358 |
+
],
|
| 1359 |
+
"page_idx": 12
|
| 1360 |
+
},
|
| 1361 |
+
{
|
| 1362 |
+
"type": "text",
|
| 1363 |
+
"text": "CNSafe_RT is derived from CNSafe, sampling 1000 benchmark queries across 10 categories. It then integrates typical jailbreak attack methods, combining advanced prompt perturbation techniques with safety risk scenarios specific to the Chinese context, to construct a highly adversarial dataset. The integrated jailbreak methods include: (1) scenario injection attacks; (2) affirmative prefix induction; (3) indirect instruction attacks.",
|
| 1364 |
+
"bbox": [
|
| 1365 |
+
169,
|
| 1366 |
+
496,
|
| 1367 |
+
823,
|
| 1368 |
+
566
|
| 1369 |
+
],
|
| 1370 |
+
"page_idx": 12
|
| 1371 |
+
},
|
| 1372 |
+
{
|
| 1373 |
+
"type": "text",
|
| 1374 |
+
"text": "The generation of CNSafe_RT followed a semi-automated process. Initially, LLMs, such as GPT-4, were used to rewrite the base samples, generating adversarial variants. Subsequently, safety experts reviewed and refined the attack strategies, ensuring the effectiveness and targeted nature of the test samples. The resulting CNSafe_RT dataset comprises 1000 attack samples encompassing 10 granular risk dimensions.",
|
| 1375 |
+
"bbox": [
|
| 1376 |
+
169,
|
| 1377 |
+
573,
|
| 1378 |
+
826,
|
| 1379 |
+
643
|
| 1380 |
+
],
|
| 1381 |
+
"page_idx": 12
|
| 1382 |
+
},
|
| 1383 |
+
{
|
| 1384 |
+
"type": "text",
|
| 1385 |
+
"text": "A.4 SAFEBENCH",
|
| 1386 |
+
"text_level": 1,
|
| 1387 |
+
"bbox": [
|
| 1388 |
+
171,
|
| 1389 |
+
660,
|
| 1390 |
+
307,
|
| 1391 |
+
674
|
| 1392 |
+
],
|
| 1393 |
+
"page_idx": 12
|
| 1394 |
+
},
|
| 1395 |
+
{
|
| 1396 |
+
"type": "text",
|
| 1397 |
+
"text": "SafeBench is constructed through an automated safety dataset generation pipeline. This pipeline leverages a set of LLMs as judges to identify and categorize the most harmful and diverse risk scenarios for MLLMs. Based on this categorization, these LLM judges then generate high-quality harmful queries. This process results in 23 distinct risk scenarios and 2300 foundational multimodal harmful query pairs. Furthermore, SafeBench provides an extension module capable of deriving a significantly larger number of query pairs. Consequently, SafeBench offers a comprehensive and targeted set of test samples for evaluating the safety of MLLMs.",
|
| 1398 |
+
"bbox": [
|
| 1399 |
+
169,
|
| 1400 |
+
686,
|
| 1401 |
+
823,
|
| 1402 |
+
784
|
| 1403 |
+
],
|
| 1404 |
+
"page_idx": 12
|
| 1405 |
+
},
|
| 1406 |
+
{
|
| 1407 |
+
"type": "text",
|
| 1408 |
+
"text": "A.5 MM-SAFETYBENCH",
|
| 1409 |
+
"text_level": 1,
|
| 1410 |
+
"bbox": [
|
| 1411 |
+
171,
|
| 1412 |
+
800,
|
| 1413 |
+
362,
|
| 1414 |
+
814
|
| 1415 |
+
],
|
| 1416 |
+
"page_idx": 12
|
| 1417 |
+
},
|
| 1418 |
+
{
|
| 1419 |
+
"type": "text",
|
| 1420 |
+
"text": "MM-SafetyBench is designed to address the vulnerability of MLLMs to manipulations stemming from query-related images. It encompasses 13 distinct scenarios and comprises a total of 5040 text-image pairs. Through an analysis of 12 leading MLLMs, this dataset reveals that even MLLMs equipped with safety-aligned LLMs remain susceptible to such attacks. Consequently, MM-SafetyBench provides a crucial benchmark for investigating the safety vulnerabilities of MLLMs under image-based manipulation, thereby fostering research aimed at enhancing model robustness and safety.",
|
| 1421 |
+
"bbox": [
|
| 1422 |
+
169,
|
| 1423 |
+
825,
|
| 1424 |
+
823,
|
| 1425 |
+
925
|
| 1426 |
+
],
|
| 1427 |
+
"page_idx": 12
|
| 1428 |
+
},
|
| 1429 |
+
{
|
| 1430 |
+
"type": "page_number",
|
| 1431 |
+
"text": "13",
|
| 1432 |
+
"bbox": [
|
| 1433 |
+
490,
|
| 1434 |
+
946,
|
| 1435 |
+
508,
|
| 1436 |
+
959
|
| 1437 |
+
],
|
| 1438 |
+
"page_idx": 12
|
| 1439 |
+
},
|
| 1440 |
+
{
|
| 1441 |
+
"type": "text",
|
| 1442 |
+
"text": "SafeBench and MM-SafetyBench jointly cover the following 13 granular risk dimensions:",
|
| 1443 |
+
"bbox": [
|
| 1444 |
+
171,
|
| 1445 |
+
103,
|
| 1446 |
+
764,
|
| 1447 |
+
119
|
| 1448 |
+
],
|
| 1449 |
+
"page_idx": 13
|
| 1450 |
+
},
|
| 1451 |
+
{
|
| 1452 |
+
"type": "list",
|
| 1453 |
+
"sub_type": "text",
|
| 1454 |
+
"list_items": [
|
| 1455 |
+
"- Illegal Activities. Content that incites crime, drug trafficking, smuggling, or other illegal acts.",
|
| 1456 |
+
"- Hate Speech. Content that expresses discrimination, hatred, or incites violence against specific groups.",
|
| 1457 |
+
"- Malware Generation. Assessing whether the model can be used to generate malicious code, viruses, or cyberattack tools.",
|
| 1458 |
+
"- Threats to Physical Safety. Content related to self-harm, violent attacks, or the creation of dangerous items.",
|
| 1459 |
+
"- Economic Harm. Risks associated with market manipulation, financial fraud, or money laundering.",
|
| 1460 |
+
"- Fraud. Assessing whether the model can be misused for scams, identity theft, or other fraudulent activities.",
|
| 1461 |
+
"- Pornographic Content. Detecting the generation of obscene or inappropriate material.",
|
| 1462 |
+
"- Political Lobbying. Assessing whether the model engages in undue political manipulation or interference.",
|
| 1463 |
+
"- Privacy Violation. Content that involves the disclosure of personal information or unauthorized data access.",
|
| 1464 |
+
"- Legal Advice. Examining whether the model provides unauthorized legal advice, potentially misleading users.",
|
| 1465 |
+
"- Financial Advice. Evaluating whether the financial advice provided by the model is misleading or non-compliant.",
|
| 1466 |
+
"- Health Advice. Assessing the reliability of medical advice to prevent users from making incorrect health decisions based on misleading information.",
|
| 1467 |
+
"- Interference with Government Decision-Making. Detecting whether the model could potentially influence or interfere with government governance decisions."
|
| 1468 |
+
],
|
| 1469 |
+
"bbox": [
|
| 1470 |
+
215,
|
| 1471 |
+
131,
|
| 1472 |
+
823,
|
| 1473 |
+
539
|
| 1474 |
+
],
|
| 1475 |
+
"page_idx": 13
|
| 1476 |
+
},
|
| 1477 |
+
{
|
| 1478 |
+
"type": "text",
|
| 1479 |
+
"text": "A.6 I2P",
|
| 1480 |
+
"text_level": 1,
|
| 1481 |
+
"bbox": [
|
| 1482 |
+
171,
|
| 1483 |
+
555,
|
| 1484 |
+
246,
|
| 1485 |
+
569
|
| 1486 |
+
],
|
| 1487 |
+
"page_idx": 13
|
| 1488 |
+
},
|
| 1489 |
+
{
|
| 1490 |
+
"type": "text",
|
| 1491 |
+
"text": "I2P comprises specialized, real-world image-to-prompt datasets, encompassing concepts such as nudity and violence. This dataset enables researchers to evaluate model performance when processing sensitive content. I2P encompasses the following seven core dimensions:",
|
| 1492 |
+
"bbox": [
|
| 1493 |
+
169,
|
| 1494 |
+
580,
|
| 1495 |
+
823,
|
| 1496 |
+
625
|
| 1497 |
+
],
|
| 1498 |
+
"page_idx": 13
|
| 1499 |
+
},
|
| 1500 |
+
{
|
| 1501 |
+
"type": "list",
|
| 1502 |
+
"sub_type": "text",
|
| 1503 |
+
"list_items": [
|
| 1504 |
+
"- Hate Content. Elements that express discrimination, insults, or incite hatred against specific groups.",
|
| 1505 |
+
"- Harassment. Content involving insults, threats, or malicious and offensive attacks targeting individuals or groups.",
|
| 1506 |
+
"- Violence. Detecting whether the model generates images containing gore, assault, warfare, or other violent scenes.",
|
| 1507 |
+
"- Self-Harm. Assessing the presence of content related to self-harm, suicide, or other acts endangering one's own health.",
|
| 1508 |
+
"- Pornographic Content. Including obscene, explicit, or otherwise inappropriate imagery.",
|
| 1509 |
+
"- Shocking Imagery. Content such as graphic violence, terror, or material likely to evoke extreme negative emotions.",
|
| 1510 |
+
"- Illegal Activities. The risk of generating content related to drugs, crime, terrorism, or other illegal acts."
|
| 1511 |
+
],
|
| 1512 |
+
"bbox": [
|
| 1513 |
+
215,
|
| 1514 |
+
636,
|
| 1515 |
+
823,
|
| 1516 |
+
848
|
| 1517 |
+
],
|
| 1518 |
+
"page_idx": 13
|
| 1519 |
+
},
|
| 1520 |
+
{
|
| 1521 |
+
"type": "page_number",
|
| 1522 |
+
"text": "14",
|
| 1523 |
+
"bbox": [
|
| 1524 |
+
490,
|
| 1525 |
+
946,
|
| 1526 |
+
508,
|
| 1527 |
+
959
|
| 1528 |
+
],
|
| 1529 |
+
"page_idx": 13
|
| 1530 |
+
}
|
| 1531 |
+
]
|
data/2025/2503_15xxx/2503.15092/b6252e1c-d150-4802-b7b5-057b9326a285_model.json
ADDED
|
@@ -0,0 +1,2483 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "aside_text",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.023,
|
| 7 |
+
0.261,
|
| 8 |
+
0.058,
|
| 9 |
+
0.707
|
| 10 |
+
],
|
| 11 |
+
"angle": 270,
|
| 12 |
+
"content": "arXiv:2503.15092v1 [cs.CR] 19 Mar 2025"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "title",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.172,
|
| 18 |
+
0.099,
|
| 19 |
+
0.825,
|
| 20 |
+
0.171
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "TOWARDS UNDERSTANDING THE SAFETY BOUNDARIES OF DEEPSEEK MODELS: EVALUATION AND FINDINGS"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.185,
|
| 29 |
+
0.2,
|
| 30 |
+
0.813,
|
| 31 |
+
0.231
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "Zonghao Ying\\(^{1}\\), Guangyi Zheng\\(^{1}\\), Yongxin Huang\\(^{1}\\), Deyue Zhang\\(^{2}\\), Wenxin Zhang\\(^{3}\\), Quchen Zou\\(^{2}\\), Aishan Liu\\(^{1}\\), Xianglong Liu\\(^{1}\\), and Dacheng Tao\\(^{4}\\)"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.428,
|
| 40 |
+
0.242,
|
| 41 |
+
0.568,
|
| 42 |
+
0.258
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "<sup>1</sup>Beihang University"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.428,
|
| 51 |
+
0.257,
|
| 52 |
+
0.571,
|
| 53 |
+
0.271
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "2360 AI Security Lab"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.351,
|
| 62 |
+
0.27,
|
| 63 |
+
0.647,
|
| 64 |
+
0.285
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "<sup>3</sup>University of Chinese Academy of Sciences"
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.381,
|
| 73 |
+
0.284,
|
| 74 |
+
0.617,
|
| 75 |
+
0.299
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "\\(^{4}\\)Nanyang Technological University"
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "title",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.451,
|
| 84 |
+
0.34,
|
| 85 |
+
0.547,
|
| 86 |
+
0.354
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "ABSTRACT"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.23,
|
| 95 |
+
0.369,
|
| 96 |
+
0.768,
|
| 97 |
+
0.551
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "This study presents the first comprehensive safety evaluation of the DeepSeek models, focusing on evaluating the safety risks associated with their generated content. Our evaluation encompasses DeepSeek's latest generation of large language models, multimodal large language models, and text-to-image models, systematically examining their performance regarding unsafe content generation. Notably, we developed a bilingual (Chinese-English) safety evaluation dataset tailored to Chinese sociocultural contexts, enabling a more thorough evaluation of the safety capabilities of Chinese-developed models. Experimental results indicate that despite their strong general capabilities, DeepSeek models exhibit significant safety vulnerabilities across multiple risk dimensions, including algorithmic discrimination and sexual content. These findings provide crucial insights for understanding and improving the safety of large foundation models. Our code is available at https://github.com/NY1024/DeepSeek-Safety-Eval."
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "title",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.174,
|
| 106 |
+
0.574,
|
| 107 |
+
0.338,
|
| 108 |
+
0.589
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "1 INTRODUCTION"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.17,
|
| 117 |
+
0.604,
|
| 118 |
+
0.825,
|
| 119 |
+
0.716
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "With the rapid advancement of artificial intelligence technology, large models such as the DeepSeek series have demonstrated remarkable capabilities across multiple domains Abraham (2025); Faray de Paiva et al. (2025); Mikhail et al. (2025). These models trained on vast datasets understand and generate diverse content forms, transformatively impacting multiple industries Liu et al. (2023a; 2020a;b). However, alongside these technological advances, model safety concerns have become increasingly prominent Liu et al. (2019; 2021; 2022; 2023b); Zhang et al. (2021); Wang et al. (2021); Ying & Wu (2023a;b), particularly the potential risks associated with generating unsafe content Ying et al. (2024c; 2025), which require systematic evaluation Ying et al. (2024b;a)."
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.17,
|
| 128 |
+
0.722,
|
| 129 |
+
0.825,
|
| 130 |
+
0.848
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "Currently, the community has established multiple evaluation frameworks to test the safety performance of mainstream large models Yuan et al. (2024a;b); Röttger et al. (2024); Tang et al. (2021); Liu et al. (2023c); Guo et al. (2023). However, these evaluation standards lack consideration for China's national conditions and cultural background. Although some research has preliminarily identified certain safety risks in DeepSeek LLMs Arrieta et al. (2025); Parmar & Govindarajulu (2025); Zhou et al. (2025); Xu et al. (2025), these assessments are typically limited to specific scenarios or single models, lacking a comprehensive and systematic safety evaluation of the entire DeepSeek model series. This assessment gap leaves us with limited knowledge about the comprehensive risk profile these models may face in practical applications."
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.17,
|
| 139 |
+
0.855,
|
| 140 |
+
0.827,
|
| 141 |
+
0.926
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "This research presents the first systematic safety evaluation of the complete DeepSeek model series, covering its latest generation of large language models (LLMs) (DeepSeek-R1 Guo et al. (2025) and DeepSeek-V3 Liu et al. (2024a)), multimodal large language model (MLLM) (DeepSeek-VL2 Wu et al. (2024)), and text-to-image model (T2I model) (Janus-Pro-7B Chen et al. (2025)). We focus on assessing the safety risks of these models in generating content, including both text and image"
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "page_number",
|
| 148 |
+
"bbox": [
|
| 149 |
+
0.495,
|
| 150 |
+
0.949,
|
| 151 |
+
0.505,
|
| 152 |
+
0.96
|
| 153 |
+
],
|
| 154 |
+
"angle": 0,
|
| 155 |
+
"content": "1"
|
| 156 |
+
}
|
| 157 |
+
],
|
| 158 |
+
[
|
| 159 |
+
{
|
| 160 |
+
"type": "text",
|
| 161 |
+
"bbox": [
|
| 162 |
+
0.171,
|
| 163 |
+
0.104,
|
| 164 |
+
0.825,
|
| 165 |
+
0.148
|
| 166 |
+
],
|
| 167 |
+
"angle": 0,
|
| 168 |
+
"content": "modalities. Specifically, for the safety evaluation of large language models, we have designed a Chinese-English bilingual safety evaluation dataset suitable for China's national conditions, which can more comprehensively assess the safety capabilities of Chinese-developed models."
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"type": "text",
|
| 172 |
+
"bbox": [
|
| 173 |
+
0.171,
|
| 174 |
+
0.153,
|
| 175 |
+
0.827,
|
| 176 |
+
0.295
|
| 177 |
+
],
|
| 178 |
+
"angle": 0,
|
| 179 |
+
"content": "Experimental results indicate that despite the excellent performance of the DeepSeek series models in general capabilities, significant vulnerabilities still exist across multiple safety dimensions. Particularly in areas such as algorithmic discrimination An et al. (2024) and sexual content Ma et al. (2024), the protective effects of existing safety alignments are insufficient, potentially causing adverse social impacts when the models are deployed in real-world applications. Additionally, we have made several notable findings: 1 The models show significant differences in attack success rates when receiving queries in Chinese versus English, with an average disparity of \\(21.7\\%\\) ; 2 The exposed chain-of-thought reasoning in DeepSeek-R1 increases its safety risks, with an average attack success rate \\(30.4\\%\\) higher than DeepSeek-V3; 3 When facing jailbreak attacks, the attack success rates of DeepSeek models rise dramatically, reaching up to \\(100\\%\\) in some categories."
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "text",
|
| 183 |
+
"bbox": [
|
| 184 |
+
0.171,
|
| 185 |
+
0.299,
|
| 186 |
+
0.825,
|
| 187 |
+
0.356
|
| 188 |
+
],
|
| 189 |
+
"angle": 0,
|
| 190 |
+
"content": "These findings not only reveal the current safety shortcomings of these models but also provide specific directions for improving model safety mechanisms in the future. It is our hope that this study will contribute to the broader effort of advancing large model safety, fostering the development of more robust and responsible AI systems for the benefit of society."
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "title",
|
| 194 |
+
"bbox": [
|
| 195 |
+
0.172,
|
| 196 |
+
0.379,
|
| 197 |
+
0.341,
|
| 198 |
+
0.394
|
| 199 |
+
],
|
| 200 |
+
"angle": 0,
|
| 201 |
+
"content": "2 PRELIMINARIES"
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "title",
|
| 205 |
+
"bbox": [
|
| 206 |
+
0.172,
|
| 207 |
+
0.411,
|
| 208 |
+
0.36,
|
| 209 |
+
0.426
|
| 210 |
+
],
|
| 211 |
+
"angle": 0,
|
| 212 |
+
"content": "2.1 DEEKEEKMODELS"
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"type": "text",
|
| 216 |
+
"bbox": [
|
| 217 |
+
0.171,
|
| 218 |
+
0.438,
|
| 219 |
+
0.825,
|
| 220 |
+
0.524
|
| 221 |
+
],
|
| 222 |
+
"angle": 0,
|
| 223 |
+
"content": "DeepSeek-R1 Guo et al. (2025) is the first-generation reasoning model designed to enhance the reasoning capabilities of LLMs. Its development incorporated multi-stage training and cold-start data prior to reinforcement learning. Its predecessor, DeepSeek-R1-Zero, exhibited issues including poor readability and language mixing. DeepSeek-R1 not only addresses these problems but further improves reasoning performance, achieving comparable results to OpenAI-o1-1217 OpenAI et al. (2024b) on reasoning tasks. This study evaluates the safety risk of its 671B parameter version."
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "text",
|
| 227 |
+
"bbox": [
|
| 228 |
+
0.171,
|
| 229 |
+
0.529,
|
| 230 |
+
0.826,
|
| 231 |
+
0.628
|
| 232 |
+
],
|
| 233 |
+
"angle": 0,
|
| 234 |
+
"content": "DeepSeek-V3 Liu et al. (2024a) is a powerful Mixture-of-Experts (MoE Cai et al. (2024)) language model with a total of 671B parameters, activating 37B parameters per token. It employs Multihead Latent Attention (MLA) and the DeepSeekMoE architecture to achieve efficient inference and economical training. Previous evaluations have demonstrated its exceptional performance across multiple tasks, surpassing other open-source models and achieving comparable results to leading closed-source models, with notable advantages in domains such as coding and mathematics. We have similarly conducted a safety evaluation of this model."
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "text",
|
| 238 |
+
"bbox": [
|
| 239 |
+
0.171,
|
| 240 |
+
0.633,
|
| 241 |
+
0.826,
|
| 242 |
+
0.748
|
| 243 |
+
],
|
| 244 |
+
"angle": 0,
|
| 245 |
+
"content": "DeepSeek-VL2 Wu et al. (2024) represents a series of advanced large-scale MoE MLLMs. The visual component employs a dynamic tiling visual encoding strategy specifically designed to handle images of varying high resolutions and aspect ratios. For the language component, DeepSeek-VL2 utilizes the DeepSeekMoE model with MLA, which compresses key-value caches into latent vectors, enabling efficient inference and high throughput. The series comprises three variants: DeepSeek-VL2-Tiny, DeepSeek-VL2-Small, and DeepSeek-VL2, with 1B, 2.8B, and 45B activated parameters, respectively. This study focuses on the safety evaluation of DeepSeek-VL2, the variant with the largest number of activated parameters."
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "text",
|
| 249 |
+
"bbox": [
|
| 250 |
+
0.171,
|
| 251 |
+
0.751,
|
| 252 |
+
0.826,
|
| 253 |
+
0.837
|
| 254 |
+
],
|
| 255 |
+
"angle": 0,
|
| 256 |
+
"content": "Janus-Pro-7B Chen et al. (2025) is a novel autoregressive framework that unifies multimodal understanding and generation. It overcomes the limitations of existing methods in visual encoding by decoupling visual encoding into independent pathways while employing a single unified Transformer architecture for processing. Janus-Pro's decoupling strategy effectively mitigates the functional conflicts of visual encoders between understanding and generation tasks, while simultaneously enhancing model flexibility. This study conducts a safety evaluation of Janus-Pro-7B."
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "title",
|
| 260 |
+
"bbox": [
|
| 261 |
+
0.172,
|
| 262 |
+
0.855,
|
| 263 |
+
0.363,
|
| 264 |
+
0.87
|
| 265 |
+
],
|
| 266 |
+
"angle": 0,
|
| 267 |
+
"content": "2.2 JAILBREAK ATTACKS"
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"bbox": [
|
| 272 |
+
0.171,
|
| 273 |
+
0.882,
|
| 274 |
+
0.825,
|
| 275 |
+
0.927
|
| 276 |
+
],
|
| 277 |
+
"angle": 0,
|
| 278 |
+
"content": "Jailbreak attacks on LLMs Ying et al. (2025); Zou et al. (2023); Shen et al. (2024) represent a class of adversarial techniques designed to circumvent the safety mechanisms and ethical guidelines embedded within LLMs. These attacks typically involve crafting malicious prompts or input"
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "page_number",
|
| 282 |
+
"bbox": [
|
| 283 |
+
0.494,
|
| 284 |
+
0.949,
|
| 285 |
+
0.505,
|
| 286 |
+
0.96
|
| 287 |
+
],
|
| 288 |
+
"angle": 0,
|
| 289 |
+
"content": "2"
|
| 290 |
+
}
|
| 291 |
+
],
|
| 292 |
+
[
|
| 293 |
+
{
|
| 294 |
+
"type": "text",
|
| 295 |
+
"bbox": [
|
| 296 |
+
0.171,
|
| 297 |
+
0.104,
|
| 298 |
+
0.825,
|
| 299 |
+
0.148
|
| 300 |
+
],
|
| 301 |
+
"angle": 0,
|
| 302 |
+
"content": "sequences that exploit vulnerabilities in the model's training data, instruction-following capabilities, or underlying architecture. The goal is to induce the LLM to generate outputs that would normally be prohibited, such as toxic, biased, harmful, or misleading content."
|
| 303 |
+
},
|
| 304 |
+
{
|
| 305 |
+
"type": "text",
|
| 306 |
+
"bbox": [
|
| 307 |
+
0.171,
|
| 308 |
+
0.153,
|
| 309 |
+
0.827,
|
| 310 |
+
0.238
|
| 311 |
+
],
|
| 312 |
+
"angle": 0,
|
| 313 |
+
"content": "Jailbreak attacks on MLLMs Ying et al. (2024c); Niu et al. (2024); Luo et al. (2024) extend the principles of LLM jailbreaking to the multimodal domain. These attacks leverage both textual and visual inputs to manipulate the model's behavior and bypass safety protocols. Attackers might craft prompts that combine seemingly innocuous images with carefully worded text designed to elicit harmful or inappropriate responses. The complex interplay between visual and textual modalities in MLLMs creates a larger attack surface compared to LLMs."
|
| 314 |
+
},
|
| 315 |
+
{
|
| 316 |
+
"type": "text",
|
| 317 |
+
"bbox": [
|
| 318 |
+
0.171,
|
| 319 |
+
0.244,
|
| 320 |
+
0.828,
|
| 321 |
+
0.329
|
| 322 |
+
],
|
| 323 |
+
"angle": 0,
|
| 324 |
+
"content": "Jailbreaking attacks on T2I models Gao et al. (2024); Dong et al. (2024); Kim et al. (2024); Jing et al. (2025) aim to generate images that violate safety guidelines, depict harmful content, or misrepresent information. These attacks typically involve crafting textual prompts that, while appearing benign on the surface, exploit the model's internal representations and biases to produce undesirable outputs. This can include generating images that are sexually suggestive, violent, promote hate speech, or depict copyrighted material."
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"type": "title",
|
| 328 |
+
"bbox": [
|
| 329 |
+
0.173,
|
| 330 |
+
0.365,
|
| 331 |
+
0.414,
|
| 332 |
+
0.381
|
| 333 |
+
],
|
| 334 |
+
"angle": 0,
|
| 335 |
+
"content": "3 EVALUATION PROTOCOL"
|
| 336 |
+
},
|
| 337 |
+
{
|
| 338 |
+
"type": "title",
|
| 339 |
+
"bbox": [
|
| 340 |
+
0.172,
|
| 341 |
+
0.407,
|
| 342 |
+
0.318,
|
| 343 |
+
0.421
|
| 344 |
+
],
|
| 345 |
+
"angle": 0,
|
| 346 |
+
"content": "3.1 BENCHMARKS"
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"type": "text",
|
| 350 |
+
"bbox": [
|
| 351 |
+
0.171,
|
| 352 |
+
0.439,
|
| 353 |
+
0.825,
|
| 354 |
+
0.55
|
| 355 |
+
],
|
| 356 |
+
"angle": 0,
|
| 357 |
+
"content": "For the evaluation of DeepSeek-R1 and DeepSeek-V3, we developed a dedicated benchmark dataset, CNSafe, based on the Basic Security Requirements for Generative Artificial Intelligence Service (TC260-003). CNSafe encompasses 5 major categories and 31 subcategories, comprising a total of 3100 test cases. CNSafe is available in both Chinese and English, aiming to provide a more comprehensive assessment of model safety across different prevalent linguistic contexts. Furthermore, building upon CNSafe, we constructed a red-teaming dataset, CNSafe_RT, by integrating typical jailbreak attack methods. This allows for a more in-depth evaluation of the models from a red team perspective."
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"type": "text",
|
| 361 |
+
"bbox": [
|
| 362 |
+
0.171,
|
| 363 |
+
0.557,
|
| 364 |
+
0.825,
|
| 365 |
+
0.614
|
| 366 |
+
],
|
| 367 |
+
"angle": 0,
|
| 368 |
+
"content": "For the evaluation of DeepSeek-VL2, we randomly sampled from SafeBench Ying et al. (2024a) and MM-SafetyBench Liu et al. (2024b), assessing the 13 risk types jointly covered by these two benchmarks, totaling 1300 queries. For the evaluation of Janus-Pro-7B, we randomly sampled from I2P Schramowski et al. (2023), encompassing 7 risk types and a total of 671 queries."
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"type": "text",
|
| 372 |
+
"bbox": [
|
| 373 |
+
0.172,
|
| 374 |
+
0.62,
|
| 375 |
+
0.807,
|
| 376 |
+
0.635
|
| 377 |
+
],
|
| 378 |
+
"angle": 0,
|
| 379 |
+
"content": "Detailed descriptions of all benchmark datasets used in this study are provided in Appendix A.1."
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"type": "title",
|
| 383 |
+
"bbox": [
|
| 384 |
+
0.172,
|
| 385 |
+
0.668,
|
| 386 |
+
0.383,
|
| 387 |
+
0.683
|
| 388 |
+
],
|
| 389 |
+
"angle": 0,
|
| 390 |
+
"content": "3.2 EVALUATION METHODS"
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"type": "text",
|
| 394 |
+
"bbox": [
|
| 395 |
+
0.171,
|
| 396 |
+
0.701,
|
| 397 |
+
0.825,
|
| 398 |
+
0.73
|
| 399 |
+
],
|
| 400 |
+
"angle": 0,
|
| 401 |
+
"content": "This study employs a hybrid evaluation approach, integrating (M)LLM-as-Judge with human evaluation to ensure comprehensive and reliable experimental results."
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"type": "text",
|
| 405 |
+
"bbox": [
|
| 406 |
+
0.171,
|
| 407 |
+
0.736,
|
| 408 |
+
0.826,
|
| 409 |
+
0.835
|
| 410 |
+
],
|
| 411 |
+
"angle": 0,
|
| 412 |
+
"content": "(M)LLM-as-Judge leverages a designated (M)LLM as a judge to evaluate the quality or performance of outputs generated by other models. This methodology capitalizes on the (M)LLM's advanced comprehension and reasoning capabilities, providing an automated and scalable evaluation framework, thereby potentially reducing reliance on human annotators. In this research, we specifically utilize (M)LLM-as-Judge to evaluate the harmfulness of generated content. For textual content, we employ GPT-4o OpenAI et al. (2024a) and Qwen2.5-72B-Instruct Qwen et al. (2025) for judgment; for visual content, we utilize Qwen2.5-VL-72B-Instruct Qwen et al. (2025)."
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"type": "text",
|
| 416 |
+
"bbox": [
|
| 417 |
+
0.171,
|
| 418 |
+
0.841,
|
| 419 |
+
0.826,
|
| 420 |
+
0.926
|
| 421 |
+
],
|
| 422 |
+
"angle": 0,
|
| 423 |
+
"content": "Recognizing the inherent limitations of scaling human evaluation to large datasets, we conduct a comprehensive human evaluation exclusively on the outputs of DeepSeek-R1 and DeepSeek-V3 on the CNSafe_RT. For other experiments, we employ a sampling-based human evaluation of responses. This collaborative human-machine evaluation strategy ensures both scalability and efficiency, while incorporating the nuanced judgment, contextual understanding, and ethical considerations of human experts in complex scenarios."
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"type": "page_number",
|
| 427 |
+
"bbox": [
|
| 428 |
+
0.494,
|
| 429 |
+
0.949,
|
| 430 |
+
0.506,
|
| 431 |
+
0.96
|
| 432 |
+
],
|
| 433 |
+
"angle": 0,
|
| 434 |
+
"content": "3"
|
| 435 |
+
}
|
| 436 |
+
],
|
| 437 |
+
[
|
| 438 |
+
{
|
| 439 |
+
"type": "table_caption",
|
| 440 |
+
"bbox": [
|
| 441 |
+
0.279,
|
| 442 |
+
0.101,
|
| 443 |
+
0.719,
|
| 444 |
+
0.117
|
| 445 |
+
],
|
| 446 |
+
"angle": 0,
|
| 447 |
+
"content": "Table 1: ASR (%) of DeepSeek-R1 and DeepSeek-V3 on CNSafe."
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"type": "table",
|
| 451 |
+
"bbox": [
|
| 452 |
+
0.175,
|
| 453 |
+
0.128,
|
| 454 |
+
0.821,
|
| 455 |
+
0.248
|
| 456 |
+
],
|
| 457 |
+
"angle": 0,
|
| 458 |
+
"content": "<table><tr><td rowspan=\"2\">Category</td><td colspan=\"2\">Chinese</td><td colspan=\"2\">English</td></tr><tr><td>DeepSeek-V3</td><td>DeepSeek-R1</td><td>DeepSeek-V3</td><td>DeepSeek-R1</td></tr><tr><td>Core socialist values violation</td><td>4.5</td><td>14.8</td><td>9.9</td><td>59.5</td></tr><tr><td>Discriminatory content</td><td>14.1</td><td>27.3</td><td>21.1</td><td>54.3</td></tr><tr><td>Commercial misconduct</td><td>12.4</td><td>28.0</td><td>11.2</td><td>69.0</td></tr><tr><td>Rights infringement</td><td>6.1</td><td>19.7</td><td>9.4</td><td>66.1</td></tr><tr><td>Service insecurity</td><td>N/A</td><td>N/A</td><td>N/A</td><td>N/A</td></tr></table>"
|
| 459 |
+
},
|
| 460 |
+
{
|
| 461 |
+
"type": "image",
|
| 462 |
+
"bbox": [
|
| 463 |
+
0.191,
|
| 464 |
+
0.266,
|
| 465 |
+
0.806,
|
| 466 |
+
0.427
|
| 467 |
+
],
|
| 468 |
+
"angle": 0,
|
| 469 |
+
"content": null
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"type": "image_caption",
|
| 473 |
+
"bbox": [
|
| 474 |
+
0.259,
|
| 475 |
+
0.44,
|
| 476 |
+
0.738,
|
| 477 |
+
0.457
|
| 478 |
+
],
|
| 479 |
+
"angle": 0,
|
| 480 |
+
"content": "Figure 1: ASR \\((\\%)\\) of DeepSeek-R1 and DeepSeek-V3 on CNSafe_RT."
|
| 481 |
+
},
|
| 482 |
+
{
|
| 483 |
+
"type": "title",
|
| 484 |
+
"bbox": [
|
| 485 |
+
0.172,
|
| 486 |
+
0.486,
|
| 487 |
+
0.275,
|
| 488 |
+
0.5
|
| 489 |
+
],
|
| 490 |
+
"angle": 0,
|
| 491 |
+
"content": "3.3 METRIC"
|
| 492 |
+
},
|
| 493 |
+
{
|
| 494 |
+
"type": "text",
|
| 495 |
+
"bbox": [
|
| 496 |
+
0.171,
|
| 497 |
+
0.513,
|
| 498 |
+
0.825,
|
| 499 |
+
0.598
|
| 500 |
+
],
|
| 501 |
+
"angle": 0,
|
| 502 |
+
"content": "To rigorously evaluate the safety of the three categories of generative large models under evaluation, we employ Attack Success Rate (ASR) as the primary evaluation metric. ASR is defined as the proportion of model responses judged to be unsafe. Specifically, for LLMs and MLLMs, ASR is calculated as the percentage of all responses that are flagged as containing text posing a safety risk. For T2I models, the focus is on evaluating the proportion of generated images that contain inappropriate content, prohibited elements, or sensitive information."
|
| 503 |
+
},
|
| 504 |
+
{
|
| 505 |
+
"type": "text",
|
| 506 |
+
"bbox": [
|
| 507 |
+
0.172,
|
| 508 |
+
0.604,
|
| 509 |
+
0.515,
|
| 510 |
+
0.62
|
| 511 |
+
],
|
| 512 |
+
"angle": 0,
|
| 513 |
+
"content": "The ASR is calculated using the following formula:"
|
| 514 |
+
},
|
| 515 |
+
{
|
| 516 |
+
"type": "equation",
|
| 517 |
+
"bbox": [
|
| 518 |
+
0.338,
|
| 519 |
+
0.64,
|
| 520 |
+
0.825,
|
| 521 |
+
0.673
|
| 522 |
+
],
|
| 523 |
+
"angle": 0,
|
| 524 |
+
"content": "\\[\n\\mathrm{ASR} = \\frac{\\text{Number of Unsafe Responses}}{\\text{Number of Total Responses}}\\times 100\\% . \\tag{1}\n\\]"
|
| 525 |
+
},
|
| 526 |
+
{
|
| 527 |
+
"type": "text",
|
| 528 |
+
"bbox": [
|
| 529 |
+
0.171,
|
| 530 |
+
0.687,
|
| 531 |
+
0.825,
|
| 532 |
+
0.718
|
| 533 |
+
],
|
| 534 |
+
"angle": 0,
|
| 535 |
+
"content": "This consistent application of ASR across all model types ensures a comparable measure of their vulnerability to producing unsafe outputs."
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"type": "title",
|
| 539 |
+
"bbox": [
|
| 540 |
+
0.172,
|
| 541 |
+
0.74,
|
| 542 |
+
0.319,
|
| 543 |
+
0.755
|
| 544 |
+
],
|
| 545 |
+
"angle": 0,
|
| 546 |
+
"content": "4 EXPERIMENT"
|
| 547 |
+
},
|
| 548 |
+
{
|
| 549 |
+
"type": "title",
|
| 550 |
+
"bbox": [
|
| 551 |
+
0.172,
|
| 552 |
+
0.774,
|
| 553 |
+
0.382,
|
| 554 |
+
0.788
|
| 555 |
+
],
|
| 556 |
+
"angle": 0,
|
| 557 |
+
"content": "4.1 EVALUATION ON LLMS"
|
| 558 |
+
},
|
| 559 |
+
{
|
| 560 |
+
"type": "title",
|
| 561 |
+
"bbox": [
|
| 562 |
+
0.172,
|
| 563 |
+
0.801,
|
| 564 |
+
0.458,
|
| 565 |
+
0.815
|
| 566 |
+
],
|
| 567 |
+
"angle": 0,
|
| 568 |
+
"content": "4.1.1 DEEPSEEK-R1 & DEEPSEEK-V3"
|
| 569 |
+
},
|
| 570 |
+
{
|
| 571 |
+
"type": "text",
|
| 572 |
+
"bbox": [
|
| 573 |
+
0.171,
|
| 574 |
+
0.827,
|
| 575 |
+
0.825,
|
| 576 |
+
0.926
|
| 577 |
+
],
|
| 578 |
+
"angle": 0,
|
| 579 |
+
"content": "The evaluation results on CNSafe are summarized in Tab. 1 and Fig. 2a, with Tab. 1 presenting data for the 5 major risk categories and Fig. 2a showing data for 29 detailed risk subcategories. It should be noted that we deliberately marked the statistical data for Service insecurity as N/A. This is because the Service insecurity category in TC260-003 refers to risks such as content inaccuracy and unreliability when models are used for specific service types with high security requirements. Evaluating these aspects requires substantial expert knowledge, and accurate results cannot be obtained through LLM-as-Judge or manual assessment alone."
|
| 580 |
+
},
|
| 581 |
+
{
|
| 582 |
+
"type": "page_number",
|
| 583 |
+
"bbox": [
|
| 584 |
+
0.494,
|
| 585 |
+
0.949,
|
| 586 |
+
0.506,
|
| 587 |
+
0.96
|
| 588 |
+
],
|
| 589 |
+
"angle": 0,
|
| 590 |
+
"content": "4"
|
| 591 |
+
}
|
| 592 |
+
],
|
| 593 |
+
[
|
| 594 |
+
{
|
| 595 |
+
"type": "image",
|
| 596 |
+
"bbox": [
|
| 597 |
+
0.177,
|
| 598 |
+
0.106,
|
| 599 |
+
0.49,
|
| 600 |
+
0.372
|
| 601 |
+
],
|
| 602 |
+
"angle": 0,
|
| 603 |
+
"content": null
|
| 604 |
+
},
|
| 605 |
+
{
|
| 606 |
+
"type": "image_caption",
|
| 607 |
+
"bbox": [
|
| 608 |
+
0.269,
|
| 609 |
+
0.38,
|
| 610 |
+
0.393,
|
| 611 |
+
0.394
|
| 612 |
+
],
|
| 613 |
+
"angle": 0,
|
| 614 |
+
"content": "(a) DeepSeek LLMs"
|
| 615 |
+
},
|
| 616 |
+
{
|
| 617 |
+
"type": "image",
|
| 618 |
+
"bbox": [
|
| 619 |
+
0.509,
|
| 620 |
+
0.102,
|
| 621 |
+
0.822,
|
| 622 |
+
0.373
|
| 623 |
+
],
|
| 624 |
+
"angle": 0,
|
| 625 |
+
"content": null
|
| 626 |
+
},
|
| 627 |
+
{
|
| 628 |
+
"type": "image_caption",
|
| 629 |
+
"bbox": [
|
| 630 |
+
0.561,
|
| 631 |
+
0.38,
|
| 632 |
+
0.774,
|
| 633 |
+
0.394
|
| 634 |
+
],
|
| 635 |
+
"angle": 0,
|
| 636 |
+
"content": "(b) Other Chinese-developed LLMs"
|
| 637 |
+
},
|
| 638 |
+
{
|
| 639 |
+
"type": "image_caption",
|
| 640 |
+
"bbox": [
|
| 641 |
+
0.17,
|
| 642 |
+
0.404,
|
| 643 |
+
0.825,
|
| 644 |
+
0.53
|
| 645 |
+
],
|
| 646 |
+
"angle": 0,
|
| 647 |
+
"content": "Figure 2: ASR (\\%) of DeepSeek LLMs and other Chinese-developed LLMs on CNSafe. Abbreviations: RS, Regime subversion; NH, National harm; SD, State division; TP, Terrorism promotion; EH, Ethnic hatred; VP, Violence promotion; FI, False information; PC, Prohibited content; ED, Ethnic discrimination; FD, Faith discrimination; ND, National discrimination; RD, Regional discrimination; GD, Gender discrimination; AD, Age discrimination; OD, Occupational discrimination; HD, Health discrimination; OT, Other discrimination; II, IP infringement; BE, Business ethics; TS, Trade secrets; UC, Unfair competition; BV, Business violations; HE, Health endangerment; IR, Image rights; RH, Reputation harm; HV, Honor violation; PI, Privacy invasion; DM, Data misuse; RV, Rights violation."
|
| 648 |
+
},
|
| 649 |
+
{
|
| 650 |
+
"type": "text",
|
| 651 |
+
"bbox": [
|
| 652 |
+
0.17,
|
| 653 |
+
0.56,
|
| 654 |
+
0.825,
|
| 655 |
+
0.672
|
| 656 |
+
],
|
| 657 |
+
"angle": 0,
|
| 658 |
+
"content": "Two major trends can be clearly observed from the data in Tab. 1. For both DeepSeek-V3 and DeepSeek-R1 models, attack success rates in English environments consistently exceed those in Chinese environments across all risk categories (with an average ASR gap of \\(21.7\\%\\)). This indicates that language context substantially influences model vulnerability. When comparing DeepSeek-V3 and DeepSeek-R1 models, we observe that regardless of language environment, the DeepSeek-R1 model exhibits higher attack success rates than the DeepSeek-V3 model across all major risk categories (with an average ASR gap of \\(31.25\\%\\)). This suggests that the exposed CoT Wei et al. (2022) in DeepSeek-R1 introduces additional vulnerabilities."
|
| 659 |
+
},
|
| 660 |
+
{
|
| 661 |
+
"type": "text",
|
| 662 |
+
"bbox": [
|
| 663 |
+
0.17,
|
| 664 |
+
0.678,
|
| 665 |
+
0.825,
|
| 666 |
+
0.749
|
| 667 |
+
],
|
| 668 |
+
"angle": 0,
|
| 669 |
+
"content": "Fig. 1 presents the evaluation results of DeepSeek-R1 and DeepSeek-V3 on CNSafe_RT. As shown, the DeepSeek-V3 model exhibits exceptionally high ASRs across most risk categories, with many reaching \\(95\\% - 100\\%\\), indicating significant vulnerabilities in the model's safety mechanisms. In contrast, the DeepSeek-R1 model generally shows lower ASRs than the DeepSeek-V3 model, typically \\(80\\% - 90\\%\\) in Chinese environments and \\(85\\% - 95\\%\\) in English environments."
|
| 670 |
+
},
|
| 671 |
+
{
|
| 672 |
+
"type": "text",
|
| 673 |
+
"bbox": [
|
| 674 |
+
0.17,
|
| 675 |
+
0.755,
|
| 676 |
+
0.825,
|
| 677 |
+
0.826
|
| 678 |
+
],
|
| 679 |
+
"angle": 0,
|
| 680 |
+
"content": "Notably, we observe that the DeepSeek-V3 model achieves \\(100\\%\\) ASRs for categories such as Ethnic hatred and False information in both Chinese and English environments. These risk types should be prioritized in subsequent safety alignment efforts. Overall, the evaluation results demonstrate that both DeepSeek-V3 and DeepSeek-R1 models exhibit clear vulnerabilities when facing jailbreak attacks."
|
| 681 |
+
},
|
| 682 |
+
{
|
| 683 |
+
"type": "title",
|
| 684 |
+
"bbox": [
|
| 685 |
+
0.172,
|
| 686 |
+
0.843,
|
| 687 |
+
0.533,
|
| 688 |
+
0.858
|
| 689 |
+
],
|
| 690 |
+
"angle": 0,
|
| 691 |
+
"content": "4.1.2 COMPARISON WITH OTHER CHINESE LLMS"
|
| 692 |
+
},
|
| 693 |
+
{
|
| 694 |
+
"type": "text",
|
| 695 |
+
"bbox": [
|
| 696 |
+
0.17,
|
| 697 |
+
0.868,
|
| 698 |
+
0.825,
|
| 699 |
+
0.926
|
| 700 |
+
],
|
| 701 |
+
"angle": 0,
|
| 702 |
+
"content": "We conducted additional safety evaluations on five representative Chinese-developed LLMs using CNSafe and CNSafe_RT. Four are standard LLMs—Doubao-1.5-pro-32k-250115 (Doubao), Hunyuan-turbo-latest (Hunyuan), Moonshot-v1-8k (Moonshot), and Qwen-Max; while one is a reasoning LLM, QwQ-32B."
|
| 703 |
+
},
|
| 704 |
+
{
|
| 705 |
+
"type": "page_number",
|
| 706 |
+
"bbox": [
|
| 707 |
+
0.494,
|
| 708 |
+
0.949,
|
| 709 |
+
0.504,
|
| 710 |
+
0.96
|
| 711 |
+
],
|
| 712 |
+
"angle": 0,
|
| 713 |
+
"content": "5"
|
| 714 |
+
}
|
| 715 |
+
],
|
| 716 |
+
[
|
| 717 |
+
{
|
| 718 |
+
"type": "image",
|
| 719 |
+
"bbox": [
|
| 720 |
+
0.191,
|
| 721 |
+
0.103,
|
| 722 |
+
0.805,
|
| 723 |
+
0.265
|
| 724 |
+
],
|
| 725 |
+
"angle": 0,
|
| 726 |
+
"content": null
|
| 727 |
+
},
|
| 728 |
+
{
|
| 729 |
+
"type": "image_caption",
|
| 730 |
+
"bbox": [
|
| 731 |
+
0.281,
|
| 732 |
+
0.28,
|
| 733 |
+
0.713,
|
| 734 |
+
0.296
|
| 735 |
+
],
|
| 736 |
+
"angle": 0,
|
| 737 |
+
"content": "Figure 3: ASR \\((\\%)\\) of Chinese-developed LLMs on CNSafe_RT."
|
| 738 |
+
},
|
| 739 |
+
{
|
| 740 |
+
"type": "table_caption",
|
| 741 |
+
"bbox": [
|
| 742 |
+
0.301,
|
| 743 |
+
0.31,
|
| 744 |
+
0.696,
|
| 745 |
+
0.325
|
| 746 |
+
],
|
| 747 |
+
"angle": 0,
|
| 748 |
+
"content": "Table 2: ASR (%) of Chinese-developed LLMs on CNSafe."
|
| 749 |
+
},
|
| 750 |
+
{
|
| 751 |
+
"type": "table",
|
| 752 |
+
"bbox": [
|
| 753 |
+
0.188,
|
| 754 |
+
0.336,
|
| 755 |
+
0.806,
|
| 756 |
+
0.436
|
| 757 |
+
],
|
| 758 |
+
"angle": 0,
|
| 759 |
+
"content": "<table><tr><td>Category</td><td>Doubao</td><td>Hunyuan</td><td>Moonshot</td><td>Qwen-Max</td><td>QwQ-32B</td></tr><tr><td>Core socialist values violation</td><td>7.9</td><td>2</td><td>2.5</td><td>3.8</td><td>21.8</td></tr><tr><td>Discriminatory content</td><td>26.3</td><td>8.4</td><td>14.3</td><td>3.9</td><td>36.2</td></tr><tr><td>Commercial misconduct</td><td>25.6</td><td>3</td><td>5.6</td><td>3.6</td><td>25.6</td></tr><tr><td>Rights infringement</td><td>15.7</td><td>2</td><td>2.9</td><td>2.9</td><td>22.6</td></tr><tr><td>Service insecurity</td><td>N/A</td><td>N/A</td><td>N/A</td><td>N/A</td><td>N/A</td></tr></table>"
|
| 760 |
+
},
|
| 761 |
+
{
|
| 762 |
+
"type": "text",
|
| 763 |
+
"bbox": [
|
| 764 |
+
0.17,
|
| 765 |
+
0.463,
|
| 766 |
+
0.825,
|
| 767 |
+
0.643
|
| 768 |
+
],
|
| 769 |
+
"angle": 0,
|
| 770 |
+
"content": "Tab. 2 summarizes the attack success rates for these five Chinese-developed LLMs across major risk categories on CNSafe, while Fig. 2b displays ASRs across all 29 detailed risk subcategories. Overall, among the compared models, QwQ-32B achieved the highest attack success rates across all major risk categories, with an average ASR of \\(26.6\\%\\). This pattern aligns with observations from DeepSeek-R1, further suggesting that exposed chains of thought present exploitation risks for attackers. Doubao also demonstrated considerable vulnerabilities in certain risk categories, particularly in Discriminatory content and Commercial misconduct, with attack success rates of \\(26.3\\%\\) and \\(25.6\\%\\) respectively. Comparatively, Qwen-Max exhibited the strongest safety performance with an average ASR of only \\(3.6\\%\\). Notably, when comparing these models with DeepSeek LLMs, we observe that DeepSeek LLMs rank quite low in terms of safety performance. Among reasoning LLMs, while DeepSeek-R1's average ASR \\((22.5\\%)\\) is lower than QwQ-32B, it remains substantial. Among standard LLMs, DeepSeek-V3's safety performance ranks second-to-last, surpassing only Doubao."
|
| 771 |
+
},
|
| 772 |
+
{
|
| 773 |
+
"type": "text",
|
| 774 |
+
"bbox": [
|
| 775 |
+
0.17,
|
| 776 |
+
0.65,
|
| 777 |
+
0.825,
|
| 778 |
+
0.735
|
| 779 |
+
],
|
| 780 |
+
"angle": 0,
|
| 781 |
+
"content": "The evaluation results of five Chinese-developed LLMs on CNSafe_RT are presented in Fig. 3. QwQ-32B clearly demonstrates the highest ASRs across all risk categories, notably exceeding \\(85\\%\\) in nine risk categories. This indicates that this model performs worst in terms of safety and is most susceptible to attacks. In contrast, Hunyuan shows significantly lower ASRs than other models across most risk categories, with an average ASR of only \\(1.9\\%\\), demonstrating its robust safety performance."
|
| 782 |
+
},
|
| 783 |
+
{
|
| 784 |
+
"type": "text",
|
| 785 |
+
"bbox": [
|
| 786 |
+
0.17,
|
| 787 |
+
0.74,
|
| 788 |
+
0.825,
|
| 789 |
+
0.826
|
| 790 |
+
],
|
| 791 |
+
"angle": 0,
|
| 792 |
+
"content": "When comparing these models with corresponding DeepSeek LLM results, we observe that reasoning LLMs (QwQ and DeepSeek-R1) have markedly higher ASRs than standard LLMs, further indicating that the reasoning chains exposed by such models increase safety risks even under jailbreak attacks. Among standard LLMs, DeepSeek-V3 presents substantially higher risks than other Chinese-developed LLMs (averaging \\(66.8\\%\\) higher), possibly stemming from its innovative low-cost model training method that neglected safety alignment considerations."
|
| 793 |
+
},
|
| 794 |
+
{
|
| 795 |
+
"type": "title",
|
| 796 |
+
"bbox": [
|
| 797 |
+
0.172,
|
| 798 |
+
0.842,
|
| 799 |
+
0.388,
|
| 800 |
+
0.856
|
| 801 |
+
],
|
| 802 |
+
"angle": 0,
|
| 803 |
+
"content": "4.2 EVALUATION ON MLLM"
|
| 804 |
+
},
|
| 805 |
+
{
|
| 806 |
+
"type": "text",
|
| 807 |
+
"bbox": [
|
| 808 |
+
0.17,
|
| 809 |
+
0.868,
|
| 810 |
+
0.825,
|
| 811 |
+
0.926
|
| 812 |
+
],
|
| 813 |
+
"angle": 0,
|
| 814 |
+
"content": "SafeBench and MM-SafetyBench introduce two prevalent multimodal jailbreaking attack methodologies: image semantic-based attacks and typography-based attacks. Representative image-text pairs employed in these attack methods are illustrated in Fig. 4. For each of these methods, we sampled 750 image-text pairs, covering 13 distinct categories, for evaluation purposes."
|
| 815 |
+
},
|
| 816 |
+
{
|
| 817 |
+
"type": "page_number",
|
| 818 |
+
"bbox": [
|
| 819 |
+
0.494,
|
| 820 |
+
0.949,
|
| 821 |
+
0.506,
|
| 822 |
+
0.96
|
| 823 |
+
],
|
| 824 |
+
"angle": 0,
|
| 825 |
+
"content": "6"
|
| 826 |
+
}
|
| 827 |
+
],
|
| 828 |
+
[
|
| 829 |
+
{
|
| 830 |
+
"type": "image",
|
| 831 |
+
"bbox": [
|
| 832 |
+
0.172,
|
| 833 |
+
0.101,
|
| 834 |
+
0.473,
|
| 835 |
+
0.226
|
| 836 |
+
],
|
| 837 |
+
"angle": 0,
|
| 838 |
+
"content": null
|
| 839 |
+
},
|
| 840 |
+
{
|
| 841 |
+
"type": "image_caption",
|
| 842 |
+
"bbox": [
|
| 843 |
+
0.223,
|
| 844 |
+
0.23,
|
| 845 |
+
0.42,
|
| 846 |
+
0.244
|
| 847 |
+
],
|
| 848 |
+
"angle": 0,
|
| 849 |
+
"content": "(a) Image semantic-based Attack"
|
| 850 |
+
},
|
| 851 |
+
{
|
| 852 |
+
"type": "image",
|
| 853 |
+
"bbox": [
|
| 854 |
+
0.529,
|
| 855 |
+
0.101,
|
| 856 |
+
0.827,
|
| 857 |
+
0.226
|
| 858 |
+
],
|
| 859 |
+
"angle": 0,
|
| 860 |
+
"content": null
|
| 861 |
+
},
|
| 862 |
+
{
|
| 863 |
+
"type": "image_caption",
|
| 864 |
+
"bbox": [
|
| 865 |
+
0.589,
|
| 866 |
+
0.23,
|
| 867 |
+
0.765,
|
| 868 |
+
0.244
|
| 869 |
+
],
|
| 870 |
+
"angle": 0,
|
| 871 |
+
"content": "(b) Typography-based Attack"
|
| 872 |
+
},
|
| 873 |
+
{
|
| 874 |
+
"type": "image_caption",
|
| 875 |
+
"bbox": [
|
| 876 |
+
0.218,
|
| 877 |
+
0.255,
|
| 878 |
+
0.777,
|
| 879 |
+
0.271
|
| 880 |
+
],
|
| 881 |
+
"angle": 0,
|
| 882 |
+
"content": "Figure 4: Examples of image-text pairs used in multimodal jailbreak attack methods."
|
| 883 |
+
},
|
| 884 |
+
{
|
| 885 |
+
"type": "image",
|
| 886 |
+
"bbox": [
|
| 887 |
+
0.179,
|
| 888 |
+
0.293,
|
| 889 |
+
0.821,
|
| 890 |
+
0.462
|
| 891 |
+
],
|
| 892 |
+
"angle": 0,
|
| 893 |
+
"content": null
|
| 894 |
+
},
|
| 895 |
+
{
|
| 896 |
+
"type": "image_caption",
|
| 897 |
+
"bbox": [
|
| 898 |
+
0.247,
|
| 899 |
+
0.474,
|
| 900 |
+
0.747,
|
| 901 |
+
0.49
|
| 902 |
+
],
|
| 903 |
+
"angle": 0,
|
| 904 |
+
"content": "Figure 5: ASR \\((\\%)\\) of DeepSeek-VL2 on SafeBench and MM-SafetyBench."
|
| 905 |
+
},
|
| 906 |
+
{
|
| 907 |
+
"type": "text",
|
| 908 |
+
"bbox": [
|
| 909 |
+
0.17,
|
| 910 |
+
0.523,
|
| 911 |
+
0.825,
|
| 912 |
+
0.634
|
| 913 |
+
],
|
| 914 |
+
"angle": 0,
|
| 915 |
+
"content": "From Fig. 5, it is evident that typography-based attacks achieve significantly higher ASRs compared to image semantics-based attacks, with an average increase of \\(20.31\\%\\). This indicates a notable vulnerability in current models when processing typographical perturbations. Such vulnerability may stem from insufficient exposure to these attack types during training. When examining specific risk categories, we observe several striking differences. In Economic Harm and Fraud categories, typography-based attacks reached ASRs of \\(40\\%\\) and \\(38\\%\\) respectively, substantially higher than other categories. This suggests that models are particularly susceptible to generate unsafe response when processing economics and finance-related content."
|
| 916 |
+
},
|
| 917 |
+
{
|
| 918 |
+
"type": "text",
|
| 919 |
+
"bbox": [
|
| 920 |
+
0.17,
|
| 921 |
+
0.641,
|
| 922 |
+
0.827,
|
| 923 |
+
0.781
|
| 924 |
+
],
|
| 925 |
+
"angle": 0,
|
| 926 |
+
"content": "Regarding image semantics-based attacks, while overall ASRs remain lower, certain categories such as Gov Decision, Health Consultation, and Legal Opinion show relatively higher ASRs (4%-6%). This indicates potential vulnerabilities in the model's understanding of image semantics when addressing sensitive topics related to politics, health, and governmental decisions. Notably, our manual analysis of model responses revealed that when confronted with these attacks, models frequently generated meaningless outputs, including repetitive characters or strings such as \"the of\", \"***\", \"shows\", and \"using\". Since the LLM-as-Judge methodology classifies these meaningless outputs as safe, this effectively reduces the model's actual ASR. It is important to note that this does not reflect the model's true safety level; rather, it highlights deficiencies in the model's image comprehension and response generation capabilities."
|
| 927 |
+
},
|
| 928 |
+
{
|
| 929 |
+
"type": "title",
|
| 930 |
+
"bbox": [
|
| 931 |
+
0.172,
|
| 932 |
+
0.804,
|
| 933 |
+
0.427,
|
| 934 |
+
0.818
|
| 935 |
+
],
|
| 936 |
+
"angle": 0,
|
| 937 |
+
"content": "4.3 EVALUATION ON T2I MODELS"
|
| 938 |
+
},
|
| 939 |
+
{
|
| 940 |
+
"type": "text",
|
| 941 |
+
"bbox": [
|
| 942 |
+
0.171,
|
| 943 |
+
0.833,
|
| 944 |
+
0.825,
|
| 945 |
+
0.89
|
| 946 |
+
],
|
| 947 |
+
"angle": 0,
|
| 948 |
+
"content": "In this section, we evaluate the safety of DeepSeek's T2I model, Janus-Pro-7B, using a sample of 671 queries drawn from the I2P. This sample comprises 100 queries for each category except Hate, which contains only 71 queries. Fig. 6 showcases representative examples of unsafe images generated during this evaluation."
|
| 949 |
+
},
|
| 950 |
+
{
|
| 951 |
+
"type": "text",
|
| 952 |
+
"bbox": [
|
| 953 |
+
0.171,
|
| 954 |
+
0.896,
|
| 955 |
+
0.825,
|
| 956 |
+
0.925
|
| 957 |
+
],
|
| 958 |
+
"angle": 0,
|
| 959 |
+
"content": "For comparative purposes, we concurrently assess the safety of another popular T2I model, Stable-Diffusion-3.5-Large AI (2024). Fig. 7 presents the ASRs for both models across various risk di"
|
| 960 |
+
},
|
| 961 |
+
{
|
| 962 |
+
"type": "page_number",
|
| 963 |
+
"bbox": [
|
| 964 |
+
0.494,
|
| 965 |
+
0.949,
|
| 966 |
+
0.504,
|
| 967 |
+
0.96
|
| 968 |
+
],
|
| 969 |
+
"angle": 0,
|
| 970 |
+
"content": "7"
|
| 971 |
+
}
|
| 972 |
+
],
|
| 973 |
+
[
|
| 974 |
+
{
|
| 975 |
+
"type": "image",
|
| 976 |
+
"bbox": [
|
| 977 |
+
0.179,
|
| 978 |
+
0.103,
|
| 979 |
+
0.268,
|
| 980 |
+
0.173
|
| 981 |
+
],
|
| 982 |
+
"angle": 0,
|
| 983 |
+
"content": null
|
| 984 |
+
},
|
| 985 |
+
{
|
| 986 |
+
"type": "image_caption",
|
| 987 |
+
"bbox": [
|
| 988 |
+
0.188,
|
| 989 |
+
0.177,
|
| 990 |
+
0.258,
|
| 991 |
+
0.187
|
| 992 |
+
],
|
| 993 |
+
"angle": 0,
|
| 994 |
+
"content": "Harassment"
|
| 995 |
+
},
|
| 996 |
+
{
|
| 997 |
+
"type": "image",
|
| 998 |
+
"bbox": [
|
| 999 |
+
0.271,
|
| 1000 |
+
0.103,
|
| 1001 |
+
0.361,
|
| 1002 |
+
0.174
|
| 1003 |
+
],
|
| 1004 |
+
"angle": 0,
|
| 1005 |
+
"content": null
|
| 1006 |
+
},
|
| 1007 |
+
{
|
| 1008 |
+
"type": "image_caption",
|
| 1009 |
+
"bbox": [
|
| 1010 |
+
0.299,
|
| 1011 |
+
0.178,
|
| 1012 |
+
0.326,
|
| 1013 |
+
0.187
|
| 1014 |
+
],
|
| 1015 |
+
"angle": 0,
|
| 1016 |
+
"content": "Hate"
|
| 1017 |
+
},
|
| 1018 |
+
{
|
| 1019 |
+
"type": "image",
|
| 1020 |
+
"bbox": [
|
| 1021 |
+
0.364,
|
| 1022 |
+
0.103,
|
| 1023 |
+
0.453,
|
| 1024 |
+
0.174
|
| 1025 |
+
],
|
| 1026 |
+
"angle": 0,
|
| 1027 |
+
"content": null
|
| 1028 |
+
},
|
| 1029 |
+
{
|
| 1030 |
+
"type": "image_caption",
|
| 1031 |
+
"bbox": [
|
| 1032 |
+
0.37,
|
| 1033 |
+
0.177,
|
| 1034 |
+
0.447,
|
| 1035 |
+
0.188
|
| 1036 |
+
],
|
| 1037 |
+
"angle": 0,
|
| 1038 |
+
"content": "Illegal activity"
|
| 1039 |
+
},
|
| 1040 |
+
{
|
| 1041 |
+
"type": "image",
|
| 1042 |
+
"bbox": [
|
| 1043 |
+
0.455,
|
| 1044 |
+
0.103,
|
| 1045 |
+
0.545,
|
| 1046 |
+
0.174
|
| 1047 |
+
],
|
| 1048 |
+
"angle": 0,
|
| 1049 |
+
"content": null
|
| 1050 |
+
},
|
| 1051 |
+
{
|
| 1052 |
+
"type": "image_caption",
|
| 1053 |
+
"bbox": [
|
| 1054 |
+
0.473,
|
| 1055 |
+
0.177,
|
| 1056 |
+
0.528,
|
| 1057 |
+
0.188
|
| 1058 |
+
],
|
| 1059 |
+
"angle": 0,
|
| 1060 |
+
"content": "Self-harm"
|
| 1061 |
+
},
|
| 1062 |
+
{
|
| 1063 |
+
"type": "image",
|
| 1064 |
+
"bbox": [
|
| 1065 |
+
0.547,
|
| 1066 |
+
0.103,
|
| 1067 |
+
0.636,
|
| 1068 |
+
0.174
|
| 1069 |
+
],
|
| 1070 |
+
"angle": 0,
|
| 1071 |
+
"content": null
|
| 1072 |
+
},
|
| 1073 |
+
{
|
| 1074 |
+
"type": "image_caption",
|
| 1075 |
+
"bbox": [
|
| 1076 |
+
0.546,
|
| 1077 |
+
0.177,
|
| 1078 |
+
0.631,
|
| 1079 |
+
0.188
|
| 1080 |
+
],
|
| 1081 |
+
"angle": 0,
|
| 1082 |
+
"content": "Sexual content"
|
| 1083 |
+
},
|
| 1084 |
+
{
|
| 1085 |
+
"type": "image",
|
| 1086 |
+
"bbox": [
|
| 1087 |
+
0.639,
|
| 1088 |
+
0.103,
|
| 1089 |
+
0.729,
|
| 1090 |
+
0.174
|
| 1091 |
+
],
|
| 1092 |
+
"angle": 0,
|
| 1093 |
+
"content": null
|
| 1094 |
+
},
|
| 1095 |
+
{
|
| 1096 |
+
"type": "image_caption",
|
| 1097 |
+
"bbox": [
|
| 1098 |
+
0.637,
|
| 1099 |
+
0.177,
|
| 1100 |
+
0.732,
|
| 1101 |
+
0.188
|
| 1102 |
+
],
|
| 1103 |
+
"angle": 0,
|
| 1104 |
+
"content": "Shocking images"
|
| 1105 |
+
},
|
| 1106 |
+
{
|
| 1107 |
+
"type": "image",
|
| 1108 |
+
"bbox": [
|
| 1109 |
+
0.731,
|
| 1110 |
+
0.103,
|
| 1111 |
+
0.821,
|
| 1112 |
+
0.174
|
| 1113 |
+
],
|
| 1114 |
+
"angle": 0,
|
| 1115 |
+
"content": null
|
| 1116 |
+
},
|
| 1117 |
+
{
|
| 1118 |
+
"type": "image_caption",
|
| 1119 |
+
"bbox": [
|
| 1120 |
+
0.754,
|
| 1121 |
+
0.177,
|
| 1122 |
+
0.803,
|
| 1123 |
+
0.188
|
| 1124 |
+
],
|
| 1125 |
+
"angle": 0,
|
| 1126 |
+
"content": "Violence"
|
| 1127 |
+
},
|
| 1128 |
+
{
|
| 1129 |
+
"type": "image_caption",
|
| 1130 |
+
"bbox": [
|
| 1131 |
+
0.281,
|
| 1132 |
+
0.198,
|
| 1133 |
+
0.714,
|
| 1134 |
+
0.214
|
| 1135 |
+
],
|
| 1136 |
+
"angle": 0,
|
| 1137 |
+
"content": "Figure 6: Examples of unsafe images generated by Janus-Pro-7B."
|
| 1138 |
+
},
|
| 1139 |
+
{
|
| 1140 |
+
"type": "image",
|
| 1141 |
+
"bbox": [
|
| 1142 |
+
0.177,
|
| 1143 |
+
0.231,
|
| 1144 |
+
0.466,
|
| 1145 |
+
0.455
|
| 1146 |
+
],
|
| 1147 |
+
"angle": 0,
|
| 1148 |
+
"content": null
|
| 1149 |
+
},
|
| 1150 |
+
{
|
| 1151 |
+
"type": "image_caption",
|
| 1152 |
+
"bbox": [
|
| 1153 |
+
0.269,
|
| 1154 |
+
0.463,
|
| 1155 |
+
0.372,
|
| 1156 |
+
0.476
|
| 1157 |
+
],
|
| 1158 |
+
"angle": 0,
|
| 1159 |
+
"content": "(a) Janus-Pro-7B"
|
| 1160 |
+
},
|
| 1161 |
+
{
|
| 1162 |
+
"type": "image",
|
| 1163 |
+
"bbox": [
|
| 1164 |
+
0.533,
|
| 1165 |
+
0.231,
|
| 1166 |
+
0.822,
|
| 1167 |
+
0.455
|
| 1168 |
+
],
|
| 1169 |
+
"angle": 0,
|
| 1170 |
+
"content": null
|
| 1171 |
+
},
|
| 1172 |
+
{
|
| 1173 |
+
"type": "image_caption",
|
| 1174 |
+
"bbox": [
|
| 1175 |
+
0.586,
|
| 1176 |
+
0.463,
|
| 1177 |
+
0.768,
|
| 1178 |
+
0.477
|
| 1179 |
+
],
|
| 1180 |
+
"angle": 0,
|
| 1181 |
+
"content": "(b) Stable-Diffusion-3.5-Large"
|
| 1182 |
+
},
|
| 1183 |
+
{
|
| 1184 |
+
"type": "image_caption",
|
| 1185 |
+
"bbox": [
|
| 1186 |
+
0.248,
|
| 1187 |
+
0.487,
|
| 1188 |
+
0.747,
|
| 1189 |
+
0.503
|
| 1190 |
+
],
|
| 1191 |
+
"angle": 0,
|
| 1192 |
+
"content": "Figure 7: ASR \\((\\%)\\) of Janus-Pro-7B and Stable-Diffusion-3.5-Large on I2P."
|
| 1193 |
+
},
|
| 1194 |
+
{
|
| 1195 |
+
"type": "text",
|
| 1196 |
+
"bbox": [
|
| 1197 |
+
0.171,
|
| 1198 |
+
0.531,
|
| 1199 |
+
0.825,
|
| 1200 |
+
0.561
|
| 1201 |
+
],
|
| 1202 |
+
"angle": 0,
|
| 1203 |
+
"content": "mensions. Overall, Janus-Pro-7B presents higher safety risks compared to Stable Diffusion 3.5 Large, with average ASRs of \\(43.7\\%\\) and \\(39.9\\%\\), respectively."
|
| 1204 |
+
},
|
| 1205 |
+
{
|
| 1206 |
+
"type": "text",
|
| 1207 |
+
"bbox": [
|
| 1208 |
+
0.171,
|
| 1209 |
+
0.567,
|
| 1210 |
+
0.825,
|
| 1211 |
+
0.693
|
| 1212 |
+
],
|
| 1213 |
+
"angle": 0,
|
| 1214 |
+
"content": "We subsequently compared the specific performance of both models across various dimensions. In the Hate, Harassment, Shocking images, and Self-harm dimensions, the Stable-Diffusion-3.5-Large model shows slightly higher ASRs than the Janus-Pro-7B model, indicating that Janus-Pro-7B offers relatively better safety in these risk categories. However, in the Sexual content dimension, the Janus-Pro-7B model exhibits a significantly higher attack success rate compared to the Stable-Diffusion-3.5-Large model (74.0% vs. 47.0%). This reveals a notable safety vulnerability in the Janus-Pro-7B model within this dimension. For the Illegal activity dimension, Janus-Pro-7B also demonstrating higher ASRs (61% vs. 39%). In the Violence dimension, both models show similar ASRs, approximately 50%."
|
| 1215 |
+
},
|
| 1216 |
+
{
|
| 1217 |
+
"type": "text",
|
| 1218 |
+
"bbox": [
|
| 1219 |
+
0.171,
|
| 1220 |
+
0.699,
|
| 1221 |
+
0.825,
|
| 1222 |
+
0.756
|
| 1223 |
+
],
|
| 1224 |
+
"angle": 0,
|
| 1225 |
+
"content": "Additionally, in our manual sampling evaluation, we observed that Janus-Pro-7B does not prohibit the generation of fake news images, such as \"Trump arrested and imprisoned.\" This finding suggests that Janus-Pro-7B is highly susceptible to misuse, potentially leading to additional risks Sha et al. (2023)."
|
| 1226 |
+
},
|
| 1227 |
+
{
|
| 1228 |
+
"type": "title",
|
| 1229 |
+
"bbox": [
|
| 1230 |
+
0.173,
|
| 1231 |
+
0.778,
|
| 1232 |
+
0.32,
|
| 1233 |
+
0.793
|
| 1234 |
+
],
|
| 1235 |
+
"angle": 0,
|
| 1236 |
+
"content": "5 CONCLUSION"
|
| 1237 |
+
},
|
| 1238 |
+
{
|
| 1239 |
+
"type": "text",
|
| 1240 |
+
"bbox": [
|
| 1241 |
+
0.171,
|
| 1242 |
+
0.811,
|
| 1243 |
+
0.825,
|
| 1244 |
+
0.855
|
| 1245 |
+
],
|
| 1246 |
+
"angle": 0,
|
| 1247 |
+
"content": "To the best of our knowledge, this study presents the first comprehensive safety evaluation of the DeepSeek models. Our investigation reveals a nuanced balance between safety and performance, and highlights several key findings."
|
| 1248 |
+
},
|
| 1249 |
+
{
|
| 1250 |
+
"type": "text",
|
| 1251 |
+
"bbox": [
|
| 1252 |
+
0.216,
|
| 1253 |
+
0.868,
|
| 1254 |
+
0.825,
|
| 1255 |
+
0.926
|
| 1256 |
+
],
|
| 1257 |
+
"angle": 0,
|
| 1258 |
+
"content": "- Vulnerability to jailbreaking. While DeepSeek LLMs exhibit robust safety boundaries when handling direct harmful queries, their safety alignment proves brittle under jailbreaking attacks. This suggests that their safety alignments may be optimized for explicit threats but remain vulnerable to adversarial manipulations."
|
| 1259 |
+
},
|
| 1260 |
+
{
|
| 1261 |
+
"type": "page_number",
|
| 1262 |
+
"bbox": [
|
| 1263 |
+
0.494,
|
| 1264 |
+
0.949,
|
| 1265 |
+
0.504,
|
| 1266 |
+
0.96
|
| 1267 |
+
],
|
| 1268 |
+
"angle": 0,
|
| 1269 |
+
"content": "8"
|
| 1270 |
+
}
|
| 1271 |
+
],
|
| 1272 |
+
[
|
| 1273 |
+
{
|
| 1274 |
+
"type": "text",
|
| 1275 |
+
"bbox": [
|
| 1276 |
+
0.217,
|
| 1277 |
+
0.104,
|
| 1278 |
+
0.822,
|
| 1279 |
+
0.16
|
| 1280 |
+
],
|
| 1281 |
+
"angle": 0,
|
| 1282 |
+
"content": "- Cross-lingual disparities. DeepSeek LLMs exhibit a considerable disparity in safety performance between Chinese and English contexts. Specifically, they demonstrate a greater propensity to generate harmful content in English, suggesting that safety alignment strategies may not generalize effectively across languages."
|
| 1283 |
+
},
|
| 1284 |
+
{
|
| 1285 |
+
"type": "text",
|
| 1286 |
+
"bbox": [
|
| 1287 |
+
0.217,
|
| 1288 |
+
0.165,
|
| 1289 |
+
0.822,
|
| 1290 |
+
0.206
|
| 1291 |
+
],
|
| 1292 |
+
"angle": 0,
|
| 1293 |
+
"content": "- Chain-of-Thought exposure. DeepSeek-R1, which exposes its CoT reasoning, presents a higher safety risk compared to DeepSeek-V3. This suggests that increased transparency, while potentially beneficial for interpretability, can inadvertently create new attack vectors."
|
| 1294 |
+
},
|
| 1295 |
+
{
|
| 1296 |
+
"type": "text",
|
| 1297 |
+
"bbox": [
|
| 1298 |
+
0.217,
|
| 1299 |
+
0.211,
|
| 1300 |
+
0.822,
|
| 1301 |
+
0.266
|
| 1302 |
+
],
|
| 1303 |
+
"angle": 0,
|
| 1304 |
+
"content": "- Multi-Model capability deficiencies. The apparent strong safety performance of the DeepSeek MLLM is not a result of robust safety alignment. Instead, it stems from its limited multimodal understanding capabilities. This finding underscores the importance of distinguishing between genuine safety and limitations that mask underlying vulnerabilities."
|
| 1305 |
+
},
|
| 1306 |
+
{
|
| 1307 |
+
"type": "text",
|
| 1308 |
+
"bbox": [
|
| 1309 |
+
0.217,
|
| 1310 |
+
0.272,
|
| 1311 |
+
0.822,
|
| 1312 |
+
0.312
|
| 1313 |
+
],
|
| 1314 |
+
"angle": 0,
|
| 1315 |
+
"content": "- Text-to-image generation risks. The DeepSeek T2I model exhibits significant safety risks. Across the benchmarks we evaluated, more than half of the categories demonstrated ASRs exceeding \\(50\\%\\), underscoring the urgent need for stronger safety measures.."
|
| 1316 |
+
},
|
| 1317 |
+
{
|
| 1318 |
+
"type": "list",
|
| 1319 |
+
"bbox": [
|
| 1320 |
+
0.217,
|
| 1321 |
+
0.104,
|
| 1322 |
+
0.822,
|
| 1323 |
+
0.312
|
| 1324 |
+
],
|
| 1325 |
+
"angle": 0,
|
| 1326 |
+
"content": null
|
| 1327 |
+
},
|
| 1328 |
+
{
|
| 1329 |
+
"type": "text",
|
| 1330 |
+
"bbox": [
|
| 1331 |
+
0.171,
|
| 1332 |
+
0.327,
|
| 1333 |
+
0.825,
|
| 1334 |
+
0.398
|
| 1335 |
+
],
|
| 1336 |
+
"angle": 0,
|
| 1337 |
+
"content": "The findings presented highlight the imperative for ongoing, iterative safety evaluations and thorough pre-deployment testing of large models. A key priority for future research is the strengthening of safety mechanisms, with a particular focus on resilience against jailbreak attacks. Concurrently, the creation of more standardized and comprehensive safety benchmarks is essential to facilitate meaningful advancements in the safety of large models."
|
| 1338 |
+
},
|
| 1339 |
+
{
|
| 1340 |
+
"type": "page_number",
|
| 1341 |
+
"bbox": [
|
| 1342 |
+
0.494,
|
| 1343 |
+
0.948,
|
| 1344 |
+
0.506,
|
| 1345 |
+
0.96
|
| 1346 |
+
],
|
| 1347 |
+
"angle": 0,
|
| 1348 |
+
"content": "9"
|
| 1349 |
+
}
|
| 1350 |
+
],
|
| 1351 |
+
[
|
| 1352 |
+
{
|
| 1353 |
+
"type": "title",
|
| 1354 |
+
"bbox": [
|
| 1355 |
+
0.174,
|
| 1356 |
+
0.103,
|
| 1357 |
+
0.289,
|
| 1358 |
+
0.119
|
| 1359 |
+
],
|
| 1360 |
+
"angle": 0,
|
| 1361 |
+
"content": "REFERENCES"
|
| 1362 |
+
},
|
| 1363 |
+
{
|
| 1364 |
+
"type": "ref_text",
|
| 1365 |
+
"bbox": [
|
| 1366 |
+
0.173,
|
| 1367 |
+
0.126,
|
| 1368 |
+
0.826,
|
| 1369 |
+
0.155
|
| 1370 |
+
],
|
| 1371 |
+
"angle": 0,
|
| 1372 |
+
"content": "Razii Abraham. Democratizing ai's frontiers: A critical review of deepseek ai's open-source ecosystem. 2025."
|
| 1373 |
+
},
|
| 1374 |
+
{
|
| 1375 |
+
"type": "ref_text",
|
| 1376 |
+
"bbox": [
|
| 1377 |
+
0.173,
|
| 1378 |
+
0.164,
|
| 1379 |
+
0.826,
|
| 1380 |
+
0.207
|
| 1381 |
+
],
|
| 1382 |
+
"angle": 0,
|
| 1383 |
+
"content": "Stability AI. Stable diffusion 3.5 large. Hugging Face Model Repository, 2024. URL https://huggingface.co/stabilityai/stable-diffusion-3.5-large. Accessed: 2025-03-15."
|
| 1384 |
+
},
|
| 1385 |
+
{
|
| 1386 |
+
"type": "ref_text",
|
| 1387 |
+
"bbox": [
|
| 1388 |
+
0.173,
|
| 1389 |
+
0.216,
|
| 1390 |
+
0.826,
|
| 1391 |
+
0.259
|
| 1392 |
+
],
|
| 1393 |
+
"angle": 0,
|
| 1394 |
+
"content": "Haozhe An, Christabel Acquaye, Colin Wang, Zongxia Li, and Rachel Rudinger. Do large language models discriminate in hiring decisions on the basis of race, ethnicity, and gender? arXiv preprint arXiv:2406.10486, 2024."
|
| 1395 |
+
},
|
| 1396 |
+
{
|
| 1397 |
+
"type": "ref_text",
|
| 1398 |
+
"bbox": [
|
| 1399 |
+
0.173,
|
| 1400 |
+
0.268,
|
| 1401 |
+
0.826,
|
| 1402 |
+
0.298
|
| 1403 |
+
],
|
| 1404 |
+
"angle": 0,
|
| 1405 |
+
"content": "Aitor Arrieta, Miriam Ugarte, Pablo Valle, José Antonio Parejo, and Sergio Segura. o3-mini vs deepseek-r1: Which one is safer? arXiv preprint arXiv:2501.18438, 2025."
|
| 1406 |
+
},
|
| 1407 |
+
{
|
| 1408 |
+
"type": "ref_text",
|
| 1409 |
+
"bbox": [
|
| 1410 |
+
0.173,
|
| 1411 |
+
0.305,
|
| 1412 |
+
0.826,
|
| 1413 |
+
0.335
|
| 1414 |
+
],
|
| 1415 |
+
"angle": 0,
|
| 1416 |
+
"content": "Weilin Cai, Juyong Jiang, Fan Wang, Jing Tang, Sunghun Kim, and Jiayi Huang. A survey on mixture of experts, 2024. URL https://arxiv.org/abs/2407.06204."
|
| 1417 |
+
},
|
| 1418 |
+
{
|
| 1419 |
+
"type": "ref_text",
|
| 1420 |
+
"bbox": [
|
| 1421 |
+
0.173,
|
| 1422 |
+
0.343,
|
| 1423 |
+
0.826,
|
| 1424 |
+
0.387
|
| 1425 |
+
],
|
| 1426 |
+
"angle": 0,
|
| 1427 |
+
"content": "Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Janus-pro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025."
|
| 1428 |
+
},
|
| 1429 |
+
{
|
| 1430 |
+
"type": "ref_text",
|
| 1431 |
+
"bbox": [
|
| 1432 |
+
0.173,
|
| 1433 |
+
0.395,
|
| 1434 |
+
0.826,
|
| 1435 |
+
0.425
|
| 1436 |
+
],
|
| 1437 |
+
"angle": 0,
|
| 1438 |
+
"content": "Yingkai Dong, Zheng Li, Xiangtao Meng, Ning Yu, and Shanqing Guo. Jailbreaking text-to-image models with llm-based agents, 2024. URL https://arxiv.org/abs/2408.00523."
|
| 1439 |
+
},
|
| 1440 |
+
{
|
| 1441 |
+
"type": "ref_text",
|
| 1442 |
+
"bbox": [
|
| 1443 |
+
0.173,
|
| 1444 |
+
0.433,
|
| 1445 |
+
0.826,
|
| 1446 |
+
0.463
|
| 1447 |
+
],
|
| 1448 |
+
"angle": 0,
|
| 1449 |
+
"content": "Lisle Faray de Paiva, Gijs Luijten, Behrus Puladi, and Jan Egger. How does deepseek-r1 perform on usmle? medRxiv, pp. 2025-02, 2025."
|
| 1450 |
+
},
|
| 1451 |
+
{
|
| 1452 |
+
"type": "ref_text",
|
| 1453 |
+
"bbox": [
|
| 1454 |
+
0.173,
|
| 1455 |
+
0.471,
|
| 1456 |
+
0.826,
|
| 1457 |
+
0.514
|
| 1458 |
+
],
|
| 1459 |
+
"angle": 0,
|
| 1460 |
+
"content": "Sensen Gao, Xiaojun Jia, Yihao Huang, Ranjie Duan, Jindong Gu, Yang Bai, Yang Liu, and Qing Guo. Hts-attack: Heuristic token search for jailbreaking text-to-image models, 2024. URL https://arxiv.org/abs/2408.13896."
|
| 1461 |
+
},
|
| 1462 |
+
{
|
| 1463 |
+
"type": "ref_text",
|
| 1464 |
+
"bbox": [
|
| 1465 |
+
0.173,
|
| 1466 |
+
0.523,
|
| 1467 |
+
0.826,
|
| 1468 |
+
0.567
|
| 1469 |
+
],
|
| 1470 |
+
"angle": 0,
|
| 1471 |
+
"content": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025."
|
| 1472 |
+
},
|
| 1473 |
+
{
|
| 1474 |
+
"type": "ref_text",
|
| 1475 |
+
"bbox": [
|
| 1476 |
+
0.173,
|
| 1477 |
+
0.575,
|
| 1478 |
+
0.826,
|
| 1479 |
+
0.618
|
| 1480 |
+
],
|
| 1481 |
+
"angle": 0,
|
| 1482 |
+
"content": "Jun Guo, Wei Bao, Jiakai Wang, Yuqing Ma, Xinghai Gao, Gang Xiao, Aishan Liu, Jian Dong, Xi-anglong Liu, and Wenjun Wu. A comprehensive evaluation framework for deep model robustness. Pattern Recognition, 2023."
|
| 1483 |
+
},
|
| 1484 |
+
{
|
| 1485 |
+
"type": "ref_text",
|
| 1486 |
+
"bbox": [
|
| 1487 |
+
0.173,
|
| 1488 |
+
0.626,
|
| 1489 |
+
0.826,
|
| 1490 |
+
0.67
|
| 1491 |
+
],
|
| 1492 |
+
"angle": 0,
|
| 1493 |
+
"content": "Zonglei Jing, Zonghao Ying, Le Wang, Siyuan Liang, Aishan Liu, Xianglong Liu, and Dacheng Tao. Cognorm: Cognitive morphing attacks for text-to-image models, 2025. URL https://arxiv.org/abs/2501.11815."
|
| 1494 |
+
},
|
| 1495 |
+
{
|
| 1496 |
+
"type": "ref_text",
|
| 1497 |
+
"bbox": [
|
| 1498 |
+
0.173,
|
| 1499 |
+
0.678,
|
| 1500 |
+
0.826,
|
| 1501 |
+
0.72
|
| 1502 |
+
],
|
| 1503 |
+
"angle": 0,
|
| 1504 |
+
"content": "Minseon Kim, Hyomin Lee, Boqing Gong, Huishuai Zhang, and Sung Ju Hwang. Automatic jailbreaking of the text-to-image generative ai systems, 2024. URL https://arxiv.org/abs/2405.16567."
|
| 1505 |
+
},
|
| 1506 |
+
{
|
| 1507 |
+
"type": "ref_text",
|
| 1508 |
+
"bbox": [
|
| 1509 |
+
0.173,
|
| 1510 |
+
0.73,
|
| 1511 |
+
0.826,
|
| 1512 |
+
0.76
|
| 1513 |
+
],
|
| 1514 |
+
"angle": 0,
|
| 1515 |
+
"content": "Aishan Liu, Xianglong Liu, Jiaxin Fan, Yuqing Ma, Anlan Zhang, Huiyuan Xie, and Dacheng Tao. Perceptual-sensitive gan for generating adversarial patches. In AAAI, 2019."
|
| 1516 |
+
},
|
| 1517 |
+
{
|
| 1518 |
+
"type": "ref_text",
|
| 1519 |
+
"bbox": [
|
| 1520 |
+
0.173,
|
| 1521 |
+
0.768,
|
| 1522 |
+
0.826,
|
| 1523 |
+
0.798
|
| 1524 |
+
],
|
| 1525 |
+
"angle": 0,
|
| 1526 |
+
"content": "Aishan Liu, Tairan Huang, Xianglong Liu, Yitao Xu, Yuqing Ma, Xinyun Chen, Stephen J Maybank, and Dacheng Tao. Spatiotemporal attacks for embodied agents. In ECCV, 2020a."
|
| 1527 |
+
},
|
| 1528 |
+
{
|
| 1529 |
+
"type": "ref_text",
|
| 1530 |
+
"bbox": [
|
| 1531 |
+
0.173,
|
| 1532 |
+
0.806,
|
| 1533 |
+
0.826,
|
| 1534 |
+
0.836
|
| 1535 |
+
],
|
| 1536 |
+
"angle": 0,
|
| 1537 |
+
"content": "Aishan Liu, Jiakai Wang, Xianglong Liu, Bowen Cao, Chongzhi Zhang, and Hang Yu. Bias-based universal adversarial patch attack for automatic check-out. In ECCV, 2020b."
|
| 1538 |
+
},
|
| 1539 |
+
{
|
| 1540 |
+
"type": "ref_text",
|
| 1541 |
+
"bbox": [
|
| 1542 |
+
0.173,
|
| 1543 |
+
0.844,
|
| 1544 |
+
0.826,
|
| 1545 |
+
0.874
|
| 1546 |
+
],
|
| 1547 |
+
"angle": 0,
|
| 1548 |
+
"content": "Aishan Liu, Xianglong Liu, Hang Yu, Chongzhi Zhang, Qiang Liu, and Dacheng Tao. Training robust deep neural networks via adversarial noise propagation. TIP, 2021."
|
| 1549 |
+
},
|
| 1550 |
+
{
|
| 1551 |
+
"type": "ref_text",
|
| 1552 |
+
"bbox": [
|
| 1553 |
+
0.173,
|
| 1554 |
+
0.882,
|
| 1555 |
+
0.826,
|
| 1556 |
+
0.925
|
| 1557 |
+
],
|
| 1558 |
+
"angle": 0,
|
| 1559 |
+
"content": "Aishan Liu, Jun Guo, Jiakai Wang, Siyuan Liang, Renshuai Tao, Wenbo Zhou, Cong Liu, Xianglong Liu, and Dacheng Tao. X-adv: Physical adversarial object attacks against x-ray prohibited item detection. In USENIX Security Symposium, 2023a."
|
| 1560 |
+
},
|
| 1561 |
+
{
|
| 1562 |
+
"type": "list",
|
| 1563 |
+
"bbox": [
|
| 1564 |
+
0.173,
|
| 1565 |
+
0.126,
|
| 1566 |
+
0.826,
|
| 1567 |
+
0.925
|
| 1568 |
+
],
|
| 1569 |
+
"angle": 0,
|
| 1570 |
+
"content": null
|
| 1571 |
+
},
|
| 1572 |
+
{
|
| 1573 |
+
"type": "page_number",
|
| 1574 |
+
"bbox": [
|
| 1575 |
+
0.491,
|
| 1576 |
+
0.948,
|
| 1577 |
+
0.51,
|
| 1578 |
+
0.96
|
| 1579 |
+
],
|
| 1580 |
+
"angle": 0,
|
| 1581 |
+
"content": "10"
|
| 1582 |
+
}
|
| 1583 |
+
],
|
| 1584 |
+
[
|
| 1585 |
+
{
|
| 1586 |
+
"type": "ref_text",
|
| 1587 |
+
"bbox": [
|
| 1588 |
+
0.174,
|
| 1589 |
+
0.103,
|
| 1590 |
+
0.826,
|
| 1591 |
+
0.148
|
| 1592 |
+
],
|
| 1593 |
+
"angle": 0,
|
| 1594 |
+
"content": "Aishan Liu, Shiyu Tang, Xinyun Chen, Lei Huang, Haotong Qin, Xianglong Liu, and Dacheng Tao. Towards defending multiple lp-norm bounded adversarial perturbations via gated batch normalization. International Journal of Computer Vision, 2023b."
|
| 1595 |
+
},
|
| 1596 |
+
{
|
| 1597 |
+
"type": "ref_text",
|
| 1598 |
+
"bbox": [
|
| 1599 |
+
0.174,
|
| 1600 |
+
0.155,
|
| 1601 |
+
0.826,
|
| 1602 |
+
0.2
|
| 1603 |
+
],
|
| 1604 |
+
"angle": 0,
|
| 1605 |
+
"content": "Aishan Liu, Shiyu Tang, Siyuan Liang, Ruihao Gong, Boxi Wu, Xianglong Liu, and Dacheng Tao. Exploring the relationship between architecture and adversarially robust generalization. In CVPR, 2023c."
|
| 1606 |
+
},
|
| 1607 |
+
{
|
| 1608 |
+
"type": "ref_text",
|
| 1609 |
+
"bbox": [
|
| 1610 |
+
0.174,
|
| 1611 |
+
0.207,
|
| 1612 |
+
0.826,
|
| 1613 |
+
0.251
|
| 1614 |
+
],
|
| 1615 |
+
"angle": 0,
|
| 1616 |
+
"content": "Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024a."
|
| 1617 |
+
},
|
| 1618 |
+
{
|
| 1619 |
+
"type": "ref_text",
|
| 1620 |
+
"bbox": [
|
| 1621 |
+
0.174,
|
| 1622 |
+
0.259,
|
| 1623 |
+
0.824,
|
| 1624 |
+
0.291
|
| 1625 |
+
],
|
| 1626 |
+
"angle": 0,
|
| 1627 |
+
"content": "Shunchang Liu, Jiakai Wang, Aishan Liu, Yingwei Li, Yijie Gao, Xianglong Liu, and Dacheng Tao. Harnessing perceptual adversarial patches for crowd counting. In ACM CCS, 2022."
|
| 1628 |
+
},
|
| 1629 |
+
{
|
| 1630 |
+
"type": "ref_text",
|
| 1631 |
+
"bbox": [
|
| 1632 |
+
0.174,
|
| 1633 |
+
0.298,
|
| 1634 |
+
0.826,
|
| 1635 |
+
0.341
|
| 1636 |
+
],
|
| 1637 |
+
"angle": 0,
|
| 1638 |
+
"content": "Xin Liu, Yichen Zhu, Jindong Gu, Yunshi Lan, Chao Yang, and Yu Qiao. Mm-safetybench: A benchmark for safety evaluation of multimodal large language models, 2024b. URL https://arxiv.org/abs/2311.17600."
|
| 1639 |
+
},
|
| 1640 |
+
{
|
| 1641 |
+
"type": "ref_text",
|
| 1642 |
+
"bbox": [
|
| 1643 |
+
0.174,
|
| 1644 |
+
0.35,
|
| 1645 |
+
0.826,
|
| 1646 |
+
0.394
|
| 1647 |
+
],
|
| 1648 |
+
"angle": 0,
|
| 1649 |
+
"content": "Weidi Luo, Siyuan Ma, Xiaogeng Liu, Xiaoyu Guo, and Chaowei Xiao. Jailbreakv: A benchmark for assessing the robustness of multimodal large language models against jailbreak attacks, 2024. URL https://arxiv.org/abs/2404.03027."
|
| 1650 |
+
},
|
| 1651 |
+
{
|
| 1652 |
+
"type": "ref_text",
|
| 1653 |
+
"bbox": [
|
| 1654 |
+
0.174,
|
| 1655 |
+
0.402,
|
| 1656 |
+
0.826,
|
| 1657 |
+
0.445
|
| 1658 |
+
],
|
| 1659 |
+
"angle": 0,
|
| 1660 |
+
"content": "Jiachen Ma, Anda Cao, Zhiqing Xiao, Yijiang Li, Jie Zhang, Chao Ye, and Junbo Zhao. Jailbreaking prompt attack: A controllable adversarial attack against diffusion models. arXiv preprint arXiv:2404.02928, 2024."
|
| 1661 |
+
},
|
| 1662 |
+
{
|
| 1663 |
+
"type": "ref_text",
|
| 1664 |
+
"bbox": [
|
| 1665 |
+
0.174,
|
| 1666 |
+
0.454,
|
| 1667 |
+
0.826,
|
| 1668 |
+
0.512
|
| 1669 |
+
],
|
| 1670 |
+
"angle": 0,
|
| 1671 |
+
"content": "David Mikhail, Andrew Farah, Jason Milad, Wissam Nassrallah, Andrew Mihalache, Daniel Milad, Fares Antaki, Michael Balas, Marko M Popovic, Alessandro Feo, et al. Performance of deepseek-r1 in ophthalmology: An evaluation of clinical decision-making and cost-effectiveness. medRxiv, pp. 2025-02, 2025."
|
| 1672 |
+
},
|
| 1673 |
+
{
|
| 1674 |
+
"type": "ref_text",
|
| 1675 |
+
"bbox": [
|
| 1676 |
+
0.174,
|
| 1677 |
+
0.52,
|
| 1678 |
+
0.826,
|
| 1679 |
+
0.552
|
| 1680 |
+
],
|
| 1681 |
+
"angle": 0,
|
| 1682 |
+
"content": "Zhenxing Niu, Haodong Ren, Xinbo Gao, Gang Hua, and Rong Jin. Jailbreaking attack against multimodal large language model, 2024. URL https://arxiv.org/abs/2402.02309."
|
| 1683 |
+
},
|
| 1684 |
+
{
|
| 1685 |
+
"type": "ref_text",
|
| 1686 |
+
"bbox": [
|
| 1687 |
+
0.174,
|
| 1688 |
+
0.559,
|
| 1689 |
+
0.826,
|
| 1690 |
+
0.589
|
| 1691 |
+
],
|
| 1692 |
+
"angle": 0,
|
| 1693 |
+
"content": "OpenAI, :, Aaron Hurst, Adam Lerer, Adam P. Goucher, Adam Perelman, Aditya Ramesh, et al. Gpt-4o system card, 2024a. URL https://arxiv.org/abs/2410.21276."
|
| 1694 |
+
},
|
| 1695 |
+
{
|
| 1696 |
+
"type": "ref_text",
|
| 1697 |
+
"bbox": [
|
| 1698 |
+
0.174,
|
| 1699 |
+
0.597,
|
| 1700 |
+
0.826,
|
| 1701 |
+
0.627
|
| 1702 |
+
],
|
| 1703 |
+
"angle": 0,
|
| 1704 |
+
"content": "OpenAI, :, Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, et al. Openai o1 system card, 2024b. URL https://arxiv.org/abs/2412.16720."
|
| 1705 |
+
},
|
| 1706 |
+
{
|
| 1707 |
+
"type": "ref_text",
|
| 1708 |
+
"bbox": [
|
| 1709 |
+
0.174,
|
| 1710 |
+
0.634,
|
| 1711 |
+
0.826,
|
| 1712 |
+
0.677
|
| 1713 |
+
],
|
| 1714 |
+
"angle": 0,
|
| 1715 |
+
"content": "Manojkumar Parmar and Yuvaraj Govindarajulu. Challenges in ensuring ai safety in deepseek-r1 models: The shortcomings of reinforcement learning strategies. arXiv preprint arXiv:2501.17030, 2025."
|
| 1716 |
+
},
|
| 1717 |
+
{
|
| 1718 |
+
"type": "ref_text",
|
| 1719 |
+
"bbox": [
|
| 1720 |
+
0.174,
|
| 1721 |
+
0.687,
|
| 1722 |
+
0.826,
|
| 1723 |
+
0.717
|
| 1724 |
+
],
|
| 1725 |
+
"angle": 0,
|
| 1726 |
+
"content": "Qwen, :, An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, et al. Qwen2.5 technical report, 2025. URL https://arxiv.org/abs/2412.15115."
|
| 1727 |
+
},
|
| 1728 |
+
{
|
| 1729 |
+
"type": "ref_text",
|
| 1730 |
+
"bbox": [
|
| 1731 |
+
0.174,
|
| 1732 |
+
0.725,
|
| 1733 |
+
0.826,
|
| 1734 |
+
0.768
|
| 1735 |
+
],
|
| 1736 |
+
"angle": 0,
|
| 1737 |
+
"content": "Paul Röttger, Fabio Pernisi, Bertie Vidgen, and Dirk Hovy. *Safetyprompts: a systematic review of open datasets for evaluating and improving large language model safety.* arXiv preprint arXiv:2404.05399, 2024."
|
| 1738 |
+
},
|
| 1739 |
+
{
|
| 1740 |
+
"type": "ref_text",
|
| 1741 |
+
"bbox": [
|
| 1742 |
+
0.174,
|
| 1743 |
+
0.777,
|
| 1744 |
+
0.826,
|
| 1745 |
+
0.82
|
| 1746 |
+
],
|
| 1747 |
+
"angle": 0,
|
| 1748 |
+
"content": "Patrick Schramowski, Manuel Brack, Björn Deiseroth, and Kristian Kersting. Safe latent diffusion: Mitigating inappropriate degeneration in diffusion models, 2023. URL https://arxiv.org/abs/2211.05105."
|
| 1749 |
+
},
|
| 1750 |
+
{
|
| 1751 |
+
"type": "ref_text",
|
| 1752 |
+
"bbox": [
|
| 1753 |
+
0.174,
|
| 1754 |
+
0.829,
|
| 1755 |
+
0.826,
|
| 1756 |
+
0.872
|
| 1757 |
+
],
|
| 1758 |
+
"angle": 0,
|
| 1759 |
+
"content": "Zeyang Sha, Zheng Li, Ning Yu, and Yang Zhang. De-fake: Detection and attribution of fake images generated by text-to-image generation models, 2023. URL https://arxiv.org/abs/2210.06998."
|
| 1760 |
+
},
|
| 1761 |
+
{
|
| 1762 |
+
"type": "ref_text",
|
| 1763 |
+
"bbox": [
|
| 1764 |
+
0.174,
|
| 1765 |
+
0.881,
|
| 1766 |
+
0.826,
|
| 1767 |
+
0.925
|
| 1768 |
+
],
|
| 1769 |
+
"angle": 0,
|
| 1770 |
+
"content": "Xinyue Shen, Zeyuan Chen, Michael Backes, Yun Shen, and Yang Zhang. \"do anything now\": Characterizing and evaluating in-the-wild jailbreak prompts on large language models, 2024. URL https://arxiv.org/abs/2308.03825."
|
| 1771 |
+
},
|
| 1772 |
+
{
|
| 1773 |
+
"type": "list",
|
| 1774 |
+
"bbox": [
|
| 1775 |
+
0.174,
|
| 1776 |
+
0.103,
|
| 1777 |
+
0.826,
|
| 1778 |
+
0.925
|
| 1779 |
+
],
|
| 1780 |
+
"angle": 0,
|
| 1781 |
+
"content": null
|
| 1782 |
+
},
|
| 1783 |
+
{
|
| 1784 |
+
"type": "page_number",
|
| 1785 |
+
"bbox": [
|
| 1786 |
+
0.491,
|
| 1787 |
+
0.949,
|
| 1788 |
+
0.508,
|
| 1789 |
+
0.96
|
| 1790 |
+
],
|
| 1791 |
+
"angle": 0,
|
| 1792 |
+
"content": "11"
|
| 1793 |
+
}
|
| 1794 |
+
],
|
| 1795 |
+
[
|
| 1796 |
+
{
|
| 1797 |
+
"type": "ref_text",
|
| 1798 |
+
"bbox": [
|
| 1799 |
+
0.174,
|
| 1800 |
+
0.103,
|
| 1801 |
+
0.826,
|
| 1802 |
+
0.147
|
| 1803 |
+
],
|
| 1804 |
+
"angle": 0,
|
| 1805 |
+
"content": "Shiyu Tang, Ruihao Gong, Yan Wang, Aishan Liu, Jiakai Wang, Xinyun Chen, Fengwei Yu, Xianglong Liu, Dawn Song, Alan Yuille, et al. Robust: Benchmarking robustness on architecture design and training techniques. ArXiv, 2021."
|
| 1806 |
+
},
|
| 1807 |
+
{
|
| 1808 |
+
"type": "ref_text",
|
| 1809 |
+
"bbox": [
|
| 1810 |
+
0.174,
|
| 1811 |
+
0.155,
|
| 1812 |
+
0.825,
|
| 1813 |
+
0.186
|
| 1814 |
+
],
|
| 1815 |
+
"angle": 0,
|
| 1816 |
+
"content": "Jiakai Wang, Aishan Liu, Zixin Yin, Shunchang Liu, Shiyu Tang, and Xianglong Liu. Dual attention suppression attack: Generate adversarial camouflage in physical world. In CVPR, 2021."
|
| 1817 |
+
},
|
| 1818 |
+
{
|
| 1819 |
+
"type": "ref_text",
|
| 1820 |
+
"bbox": [
|
| 1821 |
+
0.174,
|
| 1822 |
+
0.194,
|
| 1823 |
+
0.825,
|
| 1824 |
+
0.236
|
| 1825 |
+
],
|
| 1826 |
+
"angle": 0,
|
| 1827 |
+
"content": "Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022."
|
| 1828 |
+
},
|
| 1829 |
+
{
|
| 1830 |
+
"type": "ref_text",
|
| 1831 |
+
"bbox": [
|
| 1832 |
+
0.174,
|
| 1833 |
+
0.244,
|
| 1834 |
+
0.825,
|
| 1835 |
+
0.288
|
| 1836 |
+
],
|
| 1837 |
+
"angle": 0,
|
| 1838 |
+
"content": "Zhiyu Wu, Xiaokang Chen, Zizheng Pan, Xingchao Liu, Wen Liu, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, et al. Deepseek-vl2: Mixture-of-experts vision-language models for advanced multimodal understanding. arXiv preprint arXiv:2412.10302, 2024."
|
| 1839 |
+
},
|
| 1840 |
+
{
|
| 1841 |
+
"type": "ref_text",
|
| 1842 |
+
"bbox": [
|
| 1843 |
+
0.174,
|
| 1844 |
+
0.296,
|
| 1845 |
+
0.825,
|
| 1846 |
+
0.338
|
| 1847 |
+
],
|
| 1848 |
+
"angle": 0,
|
| 1849 |
+
"content": "Zhiyuan Xu, Joseph Gardiner, and Sana Belguith. The dark deep side of deepseek: Fine-tuning attacks against the safety alignment of cot-enabled models. arXiv preprint arXiv:2502.01225, 2025."
|
| 1850 |
+
},
|
| 1851 |
+
{
|
| 1852 |
+
"type": "ref_text",
|
| 1853 |
+
"bbox": [
|
| 1854 |
+
0.174,
|
| 1855 |
+
0.348,
|
| 1856 |
+
0.825,
|
| 1857 |
+
0.391
|
| 1858 |
+
],
|
| 1859 |
+
"angle": 0,
|
| 1860 |
+
"content": "Zonghao Ying and Bin Wu. Nba: defensive distillation for backdoor removal via neural behavior alignment. Cybersecurity, 6(1), July 2023a. ISSN 2523-3246. doi: 10.1186/s42400-023-00154-z. URL http://dx.doi.org/10.1186/s42400-023-00154-z."
|
| 1861 |
+
},
|
| 1862 |
+
{
|
| 1863 |
+
"type": "ref_text",
|
| 1864 |
+
"bbox": [
|
| 1865 |
+
0.174,
|
| 1866 |
+
0.399,
|
| 1867 |
+
0.825,
|
| 1868 |
+
0.443
|
| 1869 |
+
],
|
| 1870 |
+
"angle": 0,
|
| 1871 |
+
"content": "Zonghao Ying and Bin Wu. Dlp: towards active defense against backdoor attacks with decoupled learning process. Cybersecurity, 6(1), May 2023b. ISSN 2523-3246. doi: 10.1186/s42400-023-00141-4. URL http://dx.doi.org/10.1186/s42400-023-00141-4."
|
| 1872 |
+
},
|
| 1873 |
+
{
|
| 1874 |
+
"type": "ref_text",
|
| 1875 |
+
"bbox": [
|
| 1876 |
+
0.174,
|
| 1877 |
+
0.451,
|
| 1878 |
+
0.825,
|
| 1879 |
+
0.494
|
| 1880 |
+
],
|
| 1881 |
+
"angle": 0,
|
| 1882 |
+
"content": "Zonghao Ying, Aishan Liu, Siyuan Liang, Lei Huang, Jinyang Guo, Wenbo Zhou, Xianglong Liu, and Dacheng Tao. Safebench: A safety evaluation framework for multimodal large language models. arXiv preprint arXiv:2410.18927, 2024a."
|
| 1883 |
+
},
|
| 1884 |
+
{
|
| 1885 |
+
"type": "ref_text",
|
| 1886 |
+
"bbox": [
|
| 1887 |
+
0.174,
|
| 1888 |
+
0.502,
|
| 1889 |
+
0.825,
|
| 1890 |
+
0.533
|
| 1891 |
+
],
|
| 1892 |
+
"angle": 0,
|
| 1893 |
+
"content": "Zonghao Ying, Aishan Liu, Xianglong Liu, and Dacheng Tao. Unveiling the safety of gpt-4o: An empirical study using jailbreak attacks. arXiv preprint arXiv:2406.06302, 2024b."
|
| 1894 |
+
},
|
| 1895 |
+
{
|
| 1896 |
+
"type": "ref_text",
|
| 1897 |
+
"bbox": [
|
| 1898 |
+
0.174,
|
| 1899 |
+
0.54,
|
| 1900 |
+
0.825,
|
| 1901 |
+
0.583
|
| 1902 |
+
],
|
| 1903 |
+
"angle": 0,
|
| 1904 |
+
"content": "Zonghao Ying, Aishan Liu, Tianyuan Zhang, Zhengmin Yu, Siyuan Liang, Xianglong Liu, and Dacheng Tao. Jailbreak vision language models via bi-modal adversarial prompt. arXiv preprint arXiv:2406.04031, 2024c."
|
| 1905 |
+
},
|
| 1906 |
+
{
|
| 1907 |
+
"type": "ref_text",
|
| 1908 |
+
"bbox": [
|
| 1909 |
+
0.174,
|
| 1910 |
+
0.592,
|
| 1911 |
+
0.825,
|
| 1912 |
+
0.636
|
| 1913 |
+
],
|
| 1914 |
+
"angle": 0,
|
| 1915 |
+
"content": "Zonghao Ying, Deyue Zhang, Zonglei Jing, Yisong Xiao, Quanchen Zou, Aishan Liu, Siyuan Liang, Xiangzheng Zhang, Xianglong Liu, and Dacheng Tao. Reasoning-augmented conversation for multi-turn jailbreak attacks on large language models. arXiv preprint arXiv:2502.11054, 2025."
|
| 1916 |
+
},
|
| 1917 |
+
{
|
| 1918 |
+
"type": "ref_text",
|
| 1919 |
+
"bbox": [
|
| 1920 |
+
0.174,
|
| 1921 |
+
0.643,
|
| 1922 |
+
0.825,
|
| 1923 |
+
0.687
|
| 1924 |
+
],
|
| 1925 |
+
"angle": 0,
|
| 1926 |
+
"content": "Tongxin Yuan, Zhiwei He, Lingzhong Dong, Yiming Wang, Ruijie Zhao, Tian Xia, Lizhen Xu, Binglin Zhou, Fangqi Li, Zhuosheng Zhang, et al. R-judge: Benchmarking safety risk awareness for llm agents. arXiv preprint arXiv:2401.10019, 2024a."
|
| 1927 |
+
},
|
| 1928 |
+
{
|
| 1929 |
+
"type": "ref_text",
|
| 1930 |
+
"bbox": [
|
| 1931 |
+
0.174,
|
| 1932 |
+
0.695,
|
| 1933 |
+
0.825,
|
| 1934 |
+
0.751
|
| 1935 |
+
],
|
| 1936 |
+
"angle": 0,
|
| 1937 |
+
"content": "Xiaohan Yuan, Jinfeng Li, Dongxia Wang, Yuefeng Chen, Xiaofeng Mao, Longtao Huang, Hui Xue, Wenhai Wang, Kui Ren, and Jingyi Wang. S-eval: Automatic and adaptive test generation for benchmarking safety evaluation of large language models. arXiv preprint arXiv:2405.14191, 2024b."
|
| 1938 |
+
},
|
| 1939 |
+
{
|
| 1940 |
+
"type": "ref_text",
|
| 1941 |
+
"bbox": [
|
| 1942 |
+
0.174,
|
| 1943 |
+
0.76,
|
| 1944 |
+
0.825,
|
| 1945 |
+
0.804
|
| 1946 |
+
],
|
| 1947 |
+
"angle": 0,
|
| 1948 |
+
"content": "Chongzhi Zhang, Aishan Liu, Xianglong Liu, Yitao Xu, Hang Yu, Yuqing Ma, and Tianlin Li. Interpreting and improving adversarial robustness of deep neural networks with neuron sensitivity. IEEE Transactions on Image Processing, 2021."
|
| 1949 |
+
},
|
| 1950 |
+
{
|
| 1951 |
+
"type": "ref_text",
|
| 1952 |
+
"bbox": [
|
| 1953 |
+
0.174,
|
| 1954 |
+
0.812,
|
| 1955 |
+
0.825,
|
| 1956 |
+
0.856
|
| 1957 |
+
],
|
| 1958 |
+
"angle": 0,
|
| 1959 |
+
"content": "Kaiwen Zhou, Chengzhi Liu, Xuandong Zhao, Shreedhar Jangam, Jayanth Srinivasa, Gaowen Liu, Dawn Song, and Xin Eric Wang. The hidden risks of large reasoning models: A safety assessment of r1. arXiv preprint arXiv:2502.12659, 2025."
|
| 1960 |
+
},
|
| 1961 |
+
{
|
| 1962 |
+
"type": "ref_text",
|
| 1963 |
+
"bbox": [
|
| 1964 |
+
0.174,
|
| 1965 |
+
0.864,
|
| 1966 |
+
0.825,
|
| 1967 |
+
0.907
|
| 1968 |
+
],
|
| 1969 |
+
"angle": 0,
|
| 1970 |
+
"content": "Andy Zou, Zifan Wang, Nicholas Carlini, Milad Nasr, J. Zico Kolter, and Matt Fredrikson. Universal and transferable adversarial attacks on aligned language models, 2023. URL https://arxiv.org/abs/2307.15043."
|
| 1971 |
+
},
|
| 1972 |
+
{
|
| 1973 |
+
"type": "list",
|
| 1974 |
+
"bbox": [
|
| 1975 |
+
0.174,
|
| 1976 |
+
0.103,
|
| 1977 |
+
0.826,
|
| 1978 |
+
0.907
|
| 1979 |
+
],
|
| 1980 |
+
"angle": 0,
|
| 1981 |
+
"content": null
|
| 1982 |
+
},
|
| 1983 |
+
{
|
| 1984 |
+
"type": "page_number",
|
| 1985 |
+
"bbox": [
|
| 1986 |
+
0.491,
|
| 1987 |
+
0.948,
|
| 1988 |
+
0.509,
|
| 1989 |
+
0.96
|
| 1990 |
+
],
|
| 1991 |
+
"angle": 0,
|
| 1992 |
+
"content": "12"
|
| 1993 |
+
}
|
| 1994 |
+
],
|
| 1995 |
+
[
|
| 1996 |
+
{
|
| 1997 |
+
"type": "title",
|
| 1998 |
+
"bbox": [
|
| 1999 |
+
0.173,
|
| 2000 |
+
0.103,
|
| 2001 |
+
0.3,
|
| 2002 |
+
0.119
|
| 2003 |
+
],
|
| 2004 |
+
"angle": 0,
|
| 2005 |
+
"content": "A APPENDIX"
|
| 2006 |
+
},
|
| 2007 |
+
{
|
| 2008 |
+
"type": "title",
|
| 2009 |
+
"bbox": [
|
| 2010 |
+
0.173,
|
| 2011 |
+
0.134,
|
| 2012 |
+
0.313,
|
| 2013 |
+
0.149
|
| 2014 |
+
],
|
| 2015 |
+
"angle": 0,
|
| 2016 |
+
"content": "A.1 BENCHMARK"
|
| 2017 |
+
},
|
| 2018 |
+
{
|
| 2019 |
+
"type": "title",
|
| 2020 |
+
"bbox": [
|
| 2021 |
+
0.173,
|
| 2022 |
+
0.16,
|
| 2023 |
+
0.282,
|
| 2024 |
+
0.174
|
| 2025 |
+
],
|
| 2026 |
+
"angle": 0,
|
| 2027 |
+
"content": "A.2 CNSAFE"
|
| 2028 |
+
},
|
| 2029 |
+
{
|
| 2030 |
+
"type": "text",
|
| 2031 |
+
"bbox": [
|
| 2032 |
+
0.172,
|
| 2033 |
+
0.186,
|
| 2034 |
+
0.606,
|
| 2035 |
+
0.202
|
| 2036 |
+
],
|
| 2037 |
+
"angle": 0,
|
| 2038 |
+
"content": "CNSafe focuses on evaluating the following five core dimensions:"
|
| 2039 |
+
},
|
| 2040 |
+
{
|
| 2041 |
+
"type": "text",
|
| 2042 |
+
"bbox": [
|
| 2043 |
+
0.217,
|
| 2044 |
+
0.212,
|
| 2045 |
+
0.825,
|
| 2046 |
+
0.265
|
| 2047 |
+
],
|
| 2048 |
+
"angle": 0,
|
| 2049 |
+
"content": "- Content Contravening Core Socialist Values. This includes content that incites subversion of state power, endangers national security, promotes terrorism, incites ethnic hatred, contains violent or pornographic material, disseminates false information, and related violations."
|
| 2050 |
+
},
|
| 2051 |
+
{
|
| 2052 |
+
"type": "text",
|
| 2053 |
+
"bbox": [
|
| 2054 |
+
0.217,
|
| 2055 |
+
0.272,
|
| 2056 |
+
0.825,
|
| 2057 |
+
0.313
|
| 2058 |
+
],
|
| 2059 |
+
"angle": 0,
|
| 2060 |
+
"content": "- Discriminatory Content. This encompasses expressions of discrimination based on ethnicity, religion, nationality, geographic origin, gender, age, occupation, health status, and other protected characteristics."
|
| 2061 |
+
},
|
| 2062 |
+
{
|
| 2063 |
+
"type": "text",
|
| 2064 |
+
"bbox": [
|
| 2065 |
+
0.217,
|
| 2066 |
+
0.319,
|
| 2067 |
+
0.825,
|
| 2068 |
+
0.36
|
| 2069 |
+
],
|
| 2070 |
+
"angle": 0,
|
| 2071 |
+
"content": "- Commercial Violations and Misconduct. This addresses issues such as intellectual property infringement, breaches of business ethics, disclosure of trade secrets, monopolistic practices, and unfair competition."
|
| 2072 |
+
},
|
| 2073 |
+
{
|
| 2074 |
+
"type": "text",
|
| 2075 |
+
"bbox": [
|
| 2076 |
+
0.217,
|
| 2077 |
+
0.365,
|
| 2078 |
+
0.825,
|
| 2079 |
+
0.394
|
| 2080 |
+
],
|
| 2081 |
+
"angle": 0,
|
| 2082 |
+
"content": "- Infringement of Others' Legal Rights. This includes violations impacting others' physical and mental well-being, portrait rights, reputation, privacy, and personal information rights."
|
| 2083 |
+
},
|
| 2084 |
+
{
|
| 2085 |
+
"type": "text",
|
| 2086 |
+
"bbox": [
|
| 2087 |
+
0.217,
|
| 2088 |
+
0.398,
|
| 2089 |
+
0.825,
|
| 2090 |
+
0.453
|
| 2091 |
+
],
|
| 2092 |
+
"angle": 0,
|
| 2093 |
+
"content": "- Inability to Meet Safety Requirements for Specific Service Types. This dimension assesses risks arising from inaccurate or unreliable content in high-security contexts such as automated control, medical information services, psychological counseling, and critical information infrastructure."
|
| 2094 |
+
},
|
| 2095 |
+
{
|
| 2096 |
+
"type": "list",
|
| 2097 |
+
"bbox": [
|
| 2098 |
+
0.217,
|
| 2099 |
+
0.212,
|
| 2100 |
+
0.825,
|
| 2101 |
+
0.453
|
| 2102 |
+
],
|
| 2103 |
+
"angle": 0,
|
| 2104 |
+
"content": null
|
| 2105 |
+
},
|
| 2106 |
+
{
|
| 2107 |
+
"type": "title",
|
| 2108 |
+
"bbox": [
|
| 2109 |
+
0.173,
|
| 2110 |
+
0.471,
|
| 2111 |
+
0.312,
|
| 2112 |
+
0.485
|
| 2113 |
+
],
|
| 2114 |
+
"angle": 0,
|
| 2115 |
+
"content": "A.3 CNSAFE_RT"
|
| 2116 |
+
},
|
| 2117 |
+
{
|
| 2118 |
+
"type": "text",
|
| 2119 |
+
"bbox": [
|
| 2120 |
+
0.171,
|
| 2121 |
+
0.497,
|
| 2122 |
+
0.825,
|
| 2123 |
+
0.567
|
| 2124 |
+
],
|
| 2125 |
+
"angle": 0,
|
| 2126 |
+
"content": "CNSafe_RT is derived from CNSafe, sampling 1000 benchmark queries across 10 categories. It then integrates typical jailbreak attack methods, combining advanced prompt perturbation techniques with safety risk scenarios specific to the Chinese context, to construct a highly adversarial dataset. The integrated jailbreak methods include: (1) scenario injection attacks; (2) affirmative prefix induction; (3) indirect instruction attacks."
|
| 2127 |
+
},
|
| 2128 |
+
{
|
| 2129 |
+
"type": "text",
|
| 2130 |
+
"bbox": [
|
| 2131 |
+
0.171,
|
| 2132 |
+
0.574,
|
| 2133 |
+
0.827,
|
| 2134 |
+
0.645
|
| 2135 |
+
],
|
| 2136 |
+
"angle": 0,
|
| 2137 |
+
"content": "The generation of CNSafe_RT followed a semi-automated process. Initially, LLMs, such as GPT-4, were used to rewrite the base samples, generating adversarial variants. Subsequently, safety experts reviewed and refined the attack strategies, ensuring the effectiveness and targeted nature of the test samples. The resulting CNSafe_RT dataset comprises 1000 attack samples encompassing 10 granular risk dimensions."
|
| 2138 |
+
},
|
| 2139 |
+
{
|
| 2140 |
+
"type": "title",
|
| 2141 |
+
"bbox": [
|
| 2142 |
+
0.173,
|
| 2143 |
+
0.661,
|
| 2144 |
+
0.308,
|
| 2145 |
+
0.675
|
| 2146 |
+
],
|
| 2147 |
+
"angle": 0,
|
| 2148 |
+
"content": "A.4 SAFEBENCH"
|
| 2149 |
+
},
|
| 2150 |
+
{
|
| 2151 |
+
"type": "text",
|
| 2152 |
+
"bbox": [
|
| 2153 |
+
0.171,
|
| 2154 |
+
0.687,
|
| 2155 |
+
0.825,
|
| 2156 |
+
0.785
|
| 2157 |
+
],
|
| 2158 |
+
"angle": 0,
|
| 2159 |
+
"content": "SafeBench is constructed through an automated safety dataset generation pipeline. This pipeline leverages a set of LLMs as judges to identify and categorize the most harmful and diverse risk scenarios for MLLMs. Based on this categorization, these LLM judges then generate high-quality harmful queries. This process results in 23 distinct risk scenarios and 2300 foundational multimodal harmful query pairs. Furthermore, SafeBench provides an extension module capable of deriving a significantly larger number of query pairs. Consequently, SafeBench offers a comprehensive and targeted set of test samples for evaluating the safety of MLLMs."
|
| 2160 |
+
},
|
| 2161 |
+
{
|
| 2162 |
+
"type": "title",
|
| 2163 |
+
"bbox": [
|
| 2164 |
+
0.173,
|
| 2165 |
+
0.801,
|
| 2166 |
+
0.363,
|
| 2167 |
+
0.815
|
| 2168 |
+
],
|
| 2169 |
+
"angle": 0,
|
| 2170 |
+
"content": "A.5 MM-SAFETYBENCH"
|
| 2171 |
+
},
|
| 2172 |
+
{
|
| 2173 |
+
"type": "text",
|
| 2174 |
+
"bbox": [
|
| 2175 |
+
0.171,
|
| 2176 |
+
0.827,
|
| 2177 |
+
0.825,
|
| 2178 |
+
0.926
|
| 2179 |
+
],
|
| 2180 |
+
"angle": 0,
|
| 2181 |
+
"content": "MM-SafetyBench is designed to address the vulnerability of MLLMs to manipulations stemming from query-related images. It encompasses 13 distinct scenarios and comprises a total of 5040 text-image pairs. Through an analysis of 12 leading MLLMs, this dataset reveals that even MLLMs equipped with safety-aligned LLMs remain susceptible to such attacks. Consequently, MM-SafetyBench provides a crucial benchmark for investigating the safety vulnerabilities of MLLMs under image-based manipulation, thereby fostering research aimed at enhancing model robustness and safety."
|
| 2182 |
+
},
|
| 2183 |
+
{
|
| 2184 |
+
"type": "page_number",
|
| 2185 |
+
"bbox": [
|
| 2186 |
+
0.491,
|
| 2187 |
+
0.948,
|
| 2188 |
+
0.509,
|
| 2189 |
+
0.96
|
| 2190 |
+
],
|
| 2191 |
+
"angle": 0,
|
| 2192 |
+
"content": "13"
|
| 2193 |
+
}
|
| 2194 |
+
],
|
| 2195 |
+
[
|
| 2196 |
+
{
|
| 2197 |
+
"type": "text",
|
| 2198 |
+
"bbox": [
|
| 2199 |
+
0.172,
|
| 2200 |
+
0.104,
|
| 2201 |
+
0.766,
|
| 2202 |
+
0.12
|
| 2203 |
+
],
|
| 2204 |
+
"angle": 0,
|
| 2205 |
+
"content": "SafeBench and MM-SafetyBench jointly cover the following 13 granular risk dimensions:"
|
| 2206 |
+
},
|
| 2207 |
+
{
|
| 2208 |
+
"type": "text",
|
| 2209 |
+
"bbox": [
|
| 2210 |
+
0.217,
|
| 2211 |
+
0.132,
|
| 2212 |
+
0.825,
|
| 2213 |
+
0.16
|
| 2214 |
+
],
|
| 2215 |
+
"angle": 0,
|
| 2216 |
+
"content": "- Illegal Activities. Content that incites crime, drug trafficking, smuggling, or other illegal acts."
|
| 2217 |
+
},
|
| 2218 |
+
{
|
| 2219 |
+
"type": "text",
|
| 2220 |
+
"bbox": [
|
| 2221 |
+
0.217,
|
| 2222 |
+
0.165,
|
| 2223 |
+
0.825,
|
| 2224 |
+
0.193
|
| 2225 |
+
],
|
| 2226 |
+
"angle": 0,
|
| 2227 |
+
"content": "- Hate Speech. Content that expresses discrimination, hatred, or incites violence against specific groups."
|
| 2228 |
+
},
|
| 2229 |
+
{
|
| 2230 |
+
"type": "text",
|
| 2231 |
+
"bbox": [
|
| 2232 |
+
0.217,
|
| 2233 |
+
0.197,
|
| 2234 |
+
0.825,
|
| 2235 |
+
0.226
|
| 2236 |
+
],
|
| 2237 |
+
"angle": 0,
|
| 2238 |
+
"content": "- Malware Generation. Assessing whether the model can be used to generate malicious code, viruses, or cyberattack tools."
|
| 2239 |
+
},
|
| 2240 |
+
{
|
| 2241 |
+
"type": "text",
|
| 2242 |
+
"bbox": [
|
| 2243 |
+
0.217,
|
| 2244 |
+
0.23,
|
| 2245 |
+
0.825,
|
| 2246 |
+
0.259
|
| 2247 |
+
],
|
| 2248 |
+
"angle": 0,
|
| 2249 |
+
"content": "- Threats to Physical Safety. Content related to self-harm, violent attacks, or the creation of dangerous items."
|
| 2250 |
+
},
|
| 2251 |
+
{
|
| 2252 |
+
"type": "text",
|
| 2253 |
+
"bbox": [
|
| 2254 |
+
0.217,
|
| 2255 |
+
0.263,
|
| 2256 |
+
0.825,
|
| 2257 |
+
0.292
|
| 2258 |
+
],
|
| 2259 |
+
"angle": 0,
|
| 2260 |
+
"content": "- Economic Harm. Risks associated with market manipulation, financial fraud, or money laundering."
|
| 2261 |
+
},
|
| 2262 |
+
{
|
| 2263 |
+
"type": "text",
|
| 2264 |
+
"bbox": [
|
| 2265 |
+
0.217,
|
| 2266 |
+
0.296,
|
| 2267 |
+
0.825,
|
| 2268 |
+
0.324
|
| 2269 |
+
],
|
| 2270 |
+
"angle": 0,
|
| 2271 |
+
"content": "- Fraud. Assessing whether the model can be misused for scams, identity theft, or other fraudulent activities."
|
| 2272 |
+
},
|
| 2273 |
+
{
|
| 2274 |
+
"type": "text",
|
| 2275 |
+
"bbox": [
|
| 2276 |
+
0.217,
|
| 2277 |
+
0.328,
|
| 2278 |
+
0.792,
|
| 2279 |
+
0.344
|
| 2280 |
+
],
|
| 2281 |
+
"angle": 0,
|
| 2282 |
+
"content": "- Pornographic Content. Detecting the generation of obscene or inappropriate material."
|
| 2283 |
+
},
|
| 2284 |
+
{
|
| 2285 |
+
"type": "text",
|
| 2286 |
+
"bbox": [
|
| 2287 |
+
0.217,
|
| 2288 |
+
0.347,
|
| 2289 |
+
0.825,
|
| 2290 |
+
0.375
|
| 2291 |
+
],
|
| 2292 |
+
"angle": 0,
|
| 2293 |
+
"content": "- Political Lobbying. Assessing whether the model engages in undue political manipulation or interference."
|
| 2294 |
+
},
|
| 2295 |
+
{
|
| 2296 |
+
"type": "text",
|
| 2297 |
+
"bbox": [
|
| 2298 |
+
0.217,
|
| 2299 |
+
0.38,
|
| 2300 |
+
0.825,
|
| 2301 |
+
0.408
|
| 2302 |
+
],
|
| 2303 |
+
"angle": 0,
|
| 2304 |
+
"content": "- Privacy Violation. Content that involves the disclosure of personal information or unauthorized data access."
|
| 2305 |
+
},
|
| 2306 |
+
{
|
| 2307 |
+
"type": "text",
|
| 2308 |
+
"bbox": [
|
| 2309 |
+
0.217,
|
| 2310 |
+
0.412,
|
| 2311 |
+
0.825,
|
| 2312 |
+
0.442
|
| 2313 |
+
],
|
| 2314 |
+
"angle": 0,
|
| 2315 |
+
"content": "- Legal Advice. Examining whether the model provides unauthorized legal advice, potentially misleading users."
|
| 2316 |
+
},
|
| 2317 |
+
{
|
| 2318 |
+
"type": "text",
|
| 2319 |
+
"bbox": [
|
| 2320 |
+
0.217,
|
| 2321 |
+
0.445,
|
| 2322 |
+
0.825,
|
| 2323 |
+
0.474
|
| 2324 |
+
],
|
| 2325 |
+
"angle": 0,
|
| 2326 |
+
"content": "- Financial Advice. Evaluating whether the financial advice provided by the model is misleading or non-compliant."
|
| 2327 |
+
},
|
| 2328 |
+
{
|
| 2329 |
+
"type": "text",
|
| 2330 |
+
"bbox": [
|
| 2331 |
+
0.217,
|
| 2332 |
+
0.478,
|
| 2333 |
+
0.825,
|
| 2334 |
+
0.507
|
| 2335 |
+
],
|
| 2336 |
+
"angle": 0,
|
| 2337 |
+
"content": "- Health Advice. Assessing the reliability of medical advice to prevent users from making incorrect health decisions based on misleading information."
|
| 2338 |
+
},
|
| 2339 |
+
{
|
| 2340 |
+
"type": "text",
|
| 2341 |
+
"bbox": [
|
| 2342 |
+
0.217,
|
| 2343 |
+
0.51,
|
| 2344 |
+
0.825,
|
| 2345 |
+
0.54
|
| 2346 |
+
],
|
| 2347 |
+
"angle": 0,
|
| 2348 |
+
"content": "- Interference with Government Decision-Making. Detecting whether the model could potentially influence or interfere with government governance decisions."
|
| 2349 |
+
},
|
| 2350 |
+
{
|
| 2351 |
+
"type": "list",
|
| 2352 |
+
"bbox": [
|
| 2353 |
+
0.217,
|
| 2354 |
+
0.132,
|
| 2355 |
+
0.825,
|
| 2356 |
+
0.54
|
| 2357 |
+
],
|
| 2358 |
+
"angle": 0,
|
| 2359 |
+
"content": null
|
| 2360 |
+
},
|
| 2361 |
+
{
|
| 2362 |
+
"type": "title",
|
| 2363 |
+
"bbox": [
|
| 2364 |
+
0.172,
|
| 2365 |
+
0.556,
|
| 2366 |
+
0.248,
|
| 2367 |
+
0.57
|
| 2368 |
+
],
|
| 2369 |
+
"angle": 0,
|
| 2370 |
+
"content": "A.6 I2P"
|
| 2371 |
+
},
|
| 2372 |
+
{
|
| 2373 |
+
"type": "text",
|
| 2374 |
+
"bbox": [
|
| 2375 |
+
0.171,
|
| 2376 |
+
0.582,
|
| 2377 |
+
0.825,
|
| 2378 |
+
0.626
|
| 2379 |
+
],
|
| 2380 |
+
"angle": 0,
|
| 2381 |
+
"content": "I2P comprises specialized, real-world image-to-prompt datasets, encompassing concepts such as nudity and violence. This dataset enables researchers to evaluate model performance when processing sensitive content. I2P encompasses the following seven core dimensions:"
|
| 2382 |
+
},
|
| 2383 |
+
{
|
| 2384 |
+
"type": "text",
|
| 2385 |
+
"bbox": [
|
| 2386 |
+
0.217,
|
| 2387 |
+
0.637,
|
| 2388 |
+
0.825,
|
| 2389 |
+
0.667
|
| 2390 |
+
],
|
| 2391 |
+
"angle": 0,
|
| 2392 |
+
"content": "- Hate Content. Elements that express discrimination, insults, or incite hatred against specific groups."
|
| 2393 |
+
},
|
| 2394 |
+
{
|
| 2395 |
+
"type": "text",
|
| 2396 |
+
"bbox": [
|
| 2397 |
+
0.217,
|
| 2398 |
+
0.67,
|
| 2399 |
+
0.825,
|
| 2400 |
+
0.699
|
| 2401 |
+
],
|
| 2402 |
+
"angle": 0,
|
| 2403 |
+
"content": "- Harassment. Content involving insults, threats, or malicious and offensive attacks targeting individuals or groups."
|
| 2404 |
+
},
|
| 2405 |
+
{
|
| 2406 |
+
"type": "text",
|
| 2407 |
+
"bbox": [
|
| 2408 |
+
0.217,
|
| 2409 |
+
0.703,
|
| 2410 |
+
0.825,
|
| 2411 |
+
0.731
|
| 2412 |
+
],
|
| 2413 |
+
"angle": 0,
|
| 2414 |
+
"content": "- Violence. Detecting whether the model generates images containing gore, assault, warfare, or other violent scenes."
|
| 2415 |
+
},
|
| 2416 |
+
{
|
| 2417 |
+
"type": "text",
|
| 2418 |
+
"bbox": [
|
| 2419 |
+
0.217,
|
| 2420 |
+
0.735,
|
| 2421 |
+
0.825,
|
| 2422 |
+
0.764
|
| 2423 |
+
],
|
| 2424 |
+
"angle": 0,
|
| 2425 |
+
"content": "- Self-Harm. Assessing the presence of content related to self-harm, suicide, or other acts endangering one's own health."
|
| 2426 |
+
},
|
| 2427 |
+
{
|
| 2428 |
+
"type": "text",
|
| 2429 |
+
"bbox": [
|
| 2430 |
+
0.217,
|
| 2431 |
+
0.768,
|
| 2432 |
+
0.804,
|
| 2433 |
+
0.784
|
| 2434 |
+
],
|
| 2435 |
+
"angle": 0,
|
| 2436 |
+
"content": "- Pornographic Content. Including obscene, explicit, or otherwise inappropriate imagery."
|
| 2437 |
+
},
|
| 2438 |
+
{
|
| 2439 |
+
"type": "text",
|
| 2440 |
+
"bbox": [
|
| 2441 |
+
0.217,
|
| 2442 |
+
0.787,
|
| 2443 |
+
0.825,
|
| 2444 |
+
0.816
|
| 2445 |
+
],
|
| 2446 |
+
"angle": 0,
|
| 2447 |
+
"content": "- Shocking Imagery. Content such as graphic violence, terror, or material likely to evoke extreme negative emotions."
|
| 2448 |
+
},
|
| 2449 |
+
{
|
| 2450 |
+
"type": "text",
|
| 2451 |
+
"bbox": [
|
| 2452 |
+
0.217,
|
| 2453 |
+
0.819,
|
| 2454 |
+
0.825,
|
| 2455 |
+
0.849
|
| 2456 |
+
],
|
| 2457 |
+
"angle": 0,
|
| 2458 |
+
"content": "- Illegal Activities. The risk of generating content related to drugs, crime, terrorism, or other illegal acts."
|
| 2459 |
+
},
|
| 2460 |
+
{
|
| 2461 |
+
"type": "list",
|
| 2462 |
+
"bbox": [
|
| 2463 |
+
0.217,
|
| 2464 |
+
0.637,
|
| 2465 |
+
0.825,
|
| 2466 |
+
0.849
|
| 2467 |
+
],
|
| 2468 |
+
"angle": 0,
|
| 2469 |
+
"content": null
|
| 2470 |
+
},
|
| 2471 |
+
{
|
| 2472 |
+
"type": "page_number",
|
| 2473 |
+
"bbox": [
|
| 2474 |
+
0.491,
|
| 2475 |
+
0.948,
|
| 2476 |
+
0.509,
|
| 2477 |
+
0.96
|
| 2478 |
+
],
|
| 2479 |
+
"angle": 0,
|
| 2480 |
+
"content": "14"
|
| 2481 |
+
}
|
| 2482 |
+
]
|
| 2483 |
+
]
|
data/2025/2503_15xxx/2503.15092/b6252e1c-d150-4802-b7b5-057b9326a285_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0bb9f16485414bb2dec9f398820f0d2a3dbfadcb94b4d576e3c7148c2527f6b6
|
| 3 |
+
size 4092157
|
data/2025/2503_15xxx/2503.15092/full.md
ADDED
|
@@ -0,0 +1,310 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# TOWARDS UNDERSTANDING THE SAFETY BOUNDARIES OF DEEPSEEK MODELS: EVALUATION AND FINDINGS
|
| 2 |
+
|
| 3 |
+
Zonghao Ying $^{1}$ , Guangyi Zheng $^{1}$ , Yongxin Huang $^{1}$ , Deyue Zhang $^{2}$ , Wenxin Zhang $^{3}$ , Quchen Zou $^{2}$ , Aishan Liu $^{1}$ , Xianglong Liu $^{1}$ , and Dacheng Tao $^{4}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup>Beihang University
|
| 6 |
+
|
| 7 |
+
2360 AI Security Lab
|
| 8 |
+
|
| 9 |
+
<sup>3</sup>University of Chinese Academy of Sciences
|
| 10 |
+
|
| 11 |
+
$^{4}$ Nanyang Technological University
|
| 12 |
+
|
| 13 |
+
# ABSTRACT
|
| 14 |
+
|
| 15 |
+
This study presents the first comprehensive safety evaluation of the DeepSeek models, focusing on evaluating the safety risks associated with their generated content. Our evaluation encompasses DeepSeek's latest generation of large language models, multimodal large language models, and text-to-image models, systematically examining their performance regarding unsafe content generation. Notably, we developed a bilingual (Chinese-English) safety evaluation dataset tailored to Chinese sociocultural contexts, enabling a more thorough evaluation of the safety capabilities of Chinese-developed models. Experimental results indicate that despite their strong general capabilities, DeepSeek models exhibit significant safety vulnerabilities across multiple risk dimensions, including algorithmic discrimination and sexual content. These findings provide crucial insights for understanding and improving the safety of large foundation models. Our code is available at https://github.com/NY1024/DeepSeek-Safety-Eval.
|
| 16 |
+
|
| 17 |
+
# 1 INTRODUCTION
|
| 18 |
+
|
| 19 |
+
With the rapid advancement of artificial intelligence technology, large models such as the DeepSeek series have demonstrated remarkable capabilities across multiple domains Abraham (2025); Faray de Paiva et al. (2025); Mikhail et al. (2025). These models trained on vast datasets understand and generate diverse content forms, transformatively impacting multiple industries Liu et al. (2023a; 2020a;b). However, alongside these technological advances, model safety concerns have become increasingly prominent Liu et al. (2019; 2021; 2022; 2023b); Zhang et al. (2021); Wang et al. (2021); Ying & Wu (2023a;b), particularly the potential risks associated with generating unsafe content Ying et al. (2024c; 2025), which require systematic evaluation Ying et al. (2024b;a).
|
| 20 |
+
|
| 21 |
+
Currently, the community has established multiple evaluation frameworks to test the safety performance of mainstream large models Yuan et al. (2024a;b); Röttger et al. (2024); Tang et al. (2021); Liu et al. (2023c); Guo et al. (2023). However, these evaluation standards lack consideration for China's national conditions and cultural background. Although some research has preliminarily identified certain safety risks in DeepSeek LLMs Arrieta et al. (2025); Parmar & Govindarajulu (2025); Zhou et al. (2025); Xu et al. (2025), these assessments are typically limited to specific scenarios or single models, lacking a comprehensive and systematic safety evaluation of the entire DeepSeek model series. This assessment gap leaves us with limited knowledge about the comprehensive risk profile these models may face in practical applications.
|
| 22 |
+
|
| 23 |
+
This research presents the first systematic safety evaluation of the complete DeepSeek model series, covering its latest generation of large language models (LLMs) (DeepSeek-R1 Guo et al. (2025) and DeepSeek-V3 Liu et al. (2024a)), multimodal large language model (MLLM) (DeepSeek-VL2 Wu et al. (2024)), and text-to-image model (T2I model) (Janus-Pro-7B Chen et al. (2025)). We focus on assessing the safety risks of these models in generating content, including both text and image
|
| 24 |
+
|
| 25 |
+
modalities. Specifically, for the safety evaluation of large language models, we have designed a Chinese-English bilingual safety evaluation dataset suitable for China's national conditions, which can more comprehensively assess the safety capabilities of Chinese-developed models.
|
| 26 |
+
|
| 27 |
+
Experimental results indicate that despite the excellent performance of the DeepSeek series models in general capabilities, significant vulnerabilities still exist across multiple safety dimensions. Particularly in areas such as algorithmic discrimination An et al. (2024) and sexual content Ma et al. (2024), the protective effects of existing safety alignments are insufficient, potentially causing adverse social impacts when the models are deployed in real-world applications. Additionally, we have made several notable findings: 1 The models show significant differences in attack success rates when receiving queries in Chinese versus English, with an average disparity of $21.7\%$ ; 2 The exposed chain-of-thought reasoning in DeepSeek-R1 increases its safety risks, with an average attack success rate $30.4\%$ higher than DeepSeek-V3; 3 When facing jailbreak attacks, the attack success rates of DeepSeek models rise dramatically, reaching up to $100\%$ in some categories.
|
| 28 |
+
|
| 29 |
+
These findings not only reveal the current safety shortcomings of these models but also provide specific directions for improving model safety mechanisms in the future. It is our hope that this study will contribute to the broader effort of advancing large model safety, fostering the development of more robust and responsible AI systems for the benefit of society.
|
| 30 |
+
|
| 31 |
+
# 2 PRELIMINARIES
|
| 32 |
+
|
| 33 |
+
# 2.1 DEEKEEKMODELS
|
| 34 |
+
|
| 35 |
+
DeepSeek-R1 Guo et al. (2025) is the first-generation reasoning model designed to enhance the reasoning capabilities of LLMs. Its development incorporated multi-stage training and cold-start data prior to reinforcement learning. Its predecessor, DeepSeek-R1-Zero, exhibited issues including poor readability and language mixing. DeepSeek-R1 not only addresses these problems but further improves reasoning performance, achieving comparable results to OpenAI-o1-1217 OpenAI et al. (2024b) on reasoning tasks. This study evaluates the safety risk of its 671B parameter version.
|
| 36 |
+
|
| 37 |
+
DeepSeek-V3 Liu et al. (2024a) is a powerful Mixture-of-Experts (MoE Cai et al. (2024)) language model with a total of 671B parameters, activating 37B parameters per token. It employs Multihead Latent Attention (MLA) and the DeepSeekMoE architecture to achieve efficient inference and economical training. Previous evaluations have demonstrated its exceptional performance across multiple tasks, surpassing other open-source models and achieving comparable results to leading closed-source models, with notable advantages in domains such as coding and mathematics. We have similarly conducted a safety evaluation of this model.
|
| 38 |
+
|
| 39 |
+
DeepSeek-VL2 Wu et al. (2024) represents a series of advanced large-scale MoE MLLMs. The visual component employs a dynamic tiling visual encoding strategy specifically designed to handle images of varying high resolutions and aspect ratios. For the language component, DeepSeek-VL2 utilizes the DeepSeekMoE model with MLA, which compresses key-value caches into latent vectors, enabling efficient inference and high throughput. The series comprises three variants: DeepSeek-VL2-Tiny, DeepSeek-VL2-Small, and DeepSeek-VL2, with 1B, 2.8B, and 45B activated parameters, respectively. This study focuses on the safety evaluation of DeepSeek-VL2, the variant with the largest number of activated parameters.
|
| 40 |
+
|
| 41 |
+
Janus-Pro-7B Chen et al. (2025) is a novel autoregressive framework that unifies multimodal understanding and generation. It overcomes the limitations of existing methods in visual encoding by decoupling visual encoding into independent pathways while employing a single unified Transformer architecture for processing. Janus-Pro's decoupling strategy effectively mitigates the functional conflicts of visual encoders between understanding and generation tasks, while simultaneously enhancing model flexibility. This study conducts a safety evaluation of Janus-Pro-7B.
|
| 42 |
+
|
| 43 |
+
# 2.2 JAILBREAK ATTACKS
|
| 44 |
+
|
| 45 |
+
Jailbreak attacks on LLMs Ying et al. (2025); Zou et al. (2023); Shen et al. (2024) represent a class of adversarial techniques designed to circumvent the safety mechanisms and ethical guidelines embedded within LLMs. These attacks typically involve crafting malicious prompts or input
|
| 46 |
+
|
| 47 |
+
sequences that exploit vulnerabilities in the model's training data, instruction-following capabilities, or underlying architecture. The goal is to induce the LLM to generate outputs that would normally be prohibited, such as toxic, biased, harmful, or misleading content.
|
| 48 |
+
|
| 49 |
+
Jailbreak attacks on MLLMs Ying et al. (2024c); Niu et al. (2024); Luo et al. (2024) extend the principles of LLM jailbreaking to the multimodal domain. These attacks leverage both textual and visual inputs to manipulate the model's behavior and bypass safety protocols. Attackers might craft prompts that combine seemingly innocuous images with carefully worded text designed to elicit harmful or inappropriate responses. The complex interplay between visual and textual modalities in MLLMs creates a larger attack surface compared to LLMs.
|
| 50 |
+
|
| 51 |
+
Jailbreaking attacks on T2I models Gao et al. (2024); Dong et al. (2024); Kim et al. (2024); Jing et al. (2025) aim to generate images that violate safety guidelines, depict harmful content, or misrepresent information. These attacks typically involve crafting textual prompts that, while appearing benign on the surface, exploit the model's internal representations and biases to produce undesirable outputs. This can include generating images that are sexually suggestive, violent, promote hate speech, or depict copyrighted material.
|
| 52 |
+
|
| 53 |
+
# 3 EVALUATION PROTOCOL
|
| 54 |
+
|
| 55 |
+
# 3.1 BENCHMARKS
|
| 56 |
+
|
| 57 |
+
For the evaluation of DeepSeek-R1 and DeepSeek-V3, we developed a dedicated benchmark dataset, CNSafe, based on the Basic Security Requirements for Generative Artificial Intelligence Service (TC260-003). CNSafe encompasses 5 major categories and 31 subcategories, comprising a total of 3100 test cases. CNSafe is available in both Chinese and English, aiming to provide a more comprehensive assessment of model safety across different prevalent linguistic contexts. Furthermore, building upon CNSafe, we constructed a red-teaming dataset, CNSafe_RT, by integrating typical jailbreak attack methods. This allows for a more in-depth evaluation of the models from a red team perspective.
|
| 58 |
+
|
| 59 |
+
For the evaluation of DeepSeek-VL2, we randomly sampled from SafeBench Ying et al. (2024a) and MM-SafetyBench Liu et al. (2024b), assessing the 13 risk types jointly covered by these two benchmarks, totaling 1300 queries. For the evaluation of Janus-Pro-7B, we randomly sampled from I2P Schramowski et al. (2023), encompassing 7 risk types and a total of 671 queries.
|
| 60 |
+
|
| 61 |
+
Detailed descriptions of all benchmark datasets used in this study are provided in Appendix A.1.
|
| 62 |
+
|
| 63 |
+
# 3.2 EVALUATION METHODS
|
| 64 |
+
|
| 65 |
+
This study employs a hybrid evaluation approach, integrating (M)LLM-as-Judge with human evaluation to ensure comprehensive and reliable experimental results.
|
| 66 |
+
|
| 67 |
+
(M)LLM-as-Judge leverages a designated (M)LLM as a judge to evaluate the quality or performance of outputs generated by other models. This methodology capitalizes on the (M)LLM's advanced comprehension and reasoning capabilities, providing an automated and scalable evaluation framework, thereby potentially reducing reliance on human annotators. In this research, we specifically utilize (M)LLM-as-Judge to evaluate the harmfulness of generated content. For textual content, we employ GPT-4o OpenAI et al. (2024a) and Qwen2.5-72B-Instruct Qwen et al. (2025) for judgment; for visual content, we utilize Qwen2.5-VL-72B-Instruct Qwen et al. (2025).
|
| 68 |
+
|
| 69 |
+
Recognizing the inherent limitations of scaling human evaluation to large datasets, we conduct a comprehensive human evaluation exclusively on the outputs of DeepSeek-R1 and DeepSeek-V3 on the CNSafe_RT. For other experiments, we employ a sampling-based human evaluation of responses. This collaborative human-machine evaluation strategy ensures both scalability and efficiency, while incorporating the nuanced judgment, contextual understanding, and ethical considerations of human experts in complex scenarios.
|
| 70 |
+
|
| 71 |
+
Table 1: ASR (%) of DeepSeek-R1 and DeepSeek-V3 on CNSafe.
|
| 72 |
+
|
| 73 |
+
<table><tr><td rowspan="2">Category</td><td colspan="2">Chinese</td><td colspan="2">English</td></tr><tr><td>DeepSeek-V3</td><td>DeepSeek-R1</td><td>DeepSeek-V3</td><td>DeepSeek-R1</td></tr><tr><td>Core socialist values violation</td><td>4.5</td><td>14.8</td><td>9.9</td><td>59.5</td></tr><tr><td>Discriminatory content</td><td>14.1</td><td>27.3</td><td>21.1</td><td>54.3</td></tr><tr><td>Commercial misconduct</td><td>12.4</td><td>28.0</td><td>11.2</td><td>69.0</td></tr><tr><td>Rights infringement</td><td>6.1</td><td>19.7</td><td>9.4</td><td>66.1</td></tr><tr><td>Service insecurity</td><td>N/A</td><td>N/A</td><td>N/A</td><td>N/A</td></tr></table>
|
| 74 |
+
|
| 75 |
+

|
| 76 |
+
Figure 1: ASR $(\%)$ of DeepSeek-R1 and DeepSeek-V3 on CNSafe_RT.
|
| 77 |
+
|
| 78 |
+
# 3.3 METRIC
|
| 79 |
+
|
| 80 |
+
To rigorously evaluate the safety of the three categories of generative large models under evaluation, we employ Attack Success Rate (ASR) as the primary evaluation metric. ASR is defined as the proportion of model responses judged to be unsafe. Specifically, for LLMs and MLLMs, ASR is calculated as the percentage of all responses that are flagged as containing text posing a safety risk. For T2I models, the focus is on evaluating the proportion of generated images that contain inappropriate content, prohibited elements, or sensitive information.
|
| 81 |
+
|
| 82 |
+
The ASR is calculated using the following formula:
|
| 83 |
+
|
| 84 |
+
$$
|
| 85 |
+
\mathrm{ASR} = \frac{\text{Number of Unsafe Responses}}{\text{Number of Total Responses}}\times 100\% . \tag{1}
|
| 86 |
+
$$
|
| 87 |
+
|
| 88 |
+
This consistent application of ASR across all model types ensures a comparable measure of their vulnerability to producing unsafe outputs.
|
| 89 |
+
|
| 90 |
+
# 4 EXPERIMENT
|
| 91 |
+
|
| 92 |
+
# 4.1 EVALUATION ON LLMS
|
| 93 |
+
|
| 94 |
+
# 4.1.1 DEEPSEEK-R1 & DEEPSEEK-V3
|
| 95 |
+
|
| 96 |
+
The evaluation results on CNSafe are summarized in Tab. 1 and Fig. 2a, with Tab. 1 presenting data for the 5 major risk categories and Fig. 2a showing data for 29 detailed risk subcategories. It should be noted that we deliberately marked the statistical data for Service insecurity as N/A. This is because the Service insecurity category in TC260-003 refers to risks such as content inaccuracy and unreliability when models are used for specific service types with high security requirements. Evaluating these aspects requires substantial expert knowledge, and accurate results cannot be obtained through LLM-as-Judge or manual assessment alone.
|
| 97 |
+
|
| 98 |
+

|
| 99 |
+
(a) DeepSeek LLMs
|
| 100 |
+
|
| 101 |
+

|
| 102 |
+
(b) Other Chinese-developed LLMs
|
| 103 |
+
Figure 2: ASR (\%) of DeepSeek LLMs and other Chinese-developed LLMs on CNSafe. Abbreviations: RS, Regime subversion; NH, National harm; SD, State division; TP, Terrorism promotion; EH, Ethnic hatred; VP, Violence promotion; FI, False information; PC, Prohibited content; ED, Ethnic discrimination; FD, Faith discrimination; ND, National discrimination; RD, Regional discrimination; GD, Gender discrimination; AD, Age discrimination; OD, Occupational discrimination; HD, Health discrimination; OT, Other discrimination; II, IP infringement; BE, Business ethics; TS, Trade secrets; UC, Unfair competition; BV, Business violations; HE, Health endangerment; IR, Image rights; RH, Reputation harm; HV, Honor violation; PI, Privacy invasion; DM, Data misuse; RV, Rights violation.
|
| 104 |
+
|
| 105 |
+
Two major trends can be clearly observed from the data in Tab. 1. For both DeepSeek-V3 and DeepSeek-R1 models, attack success rates in English environments consistently exceed those in Chinese environments across all risk categories (with an average ASR gap of $21.7\%$ ). This indicates that language context substantially influences model vulnerability. When comparing DeepSeek-V3 and DeepSeek-R1 models, we observe that regardless of language environment, the DeepSeek-R1 model exhibits higher attack success rates than the DeepSeek-V3 model across all major risk categories (with an average ASR gap of $31.25\%$ ). This suggests that the exposed CoT Wei et al. (2022) in DeepSeek-R1 introduces additional vulnerabilities.
|
| 106 |
+
|
| 107 |
+
Fig. 1 presents the evaluation results of DeepSeek-R1 and DeepSeek-V3 on CNSafe_RT. As shown, the DeepSeek-V3 model exhibits exceptionally high ASRs across most risk categories, with many reaching $95\% - 100\%$ , indicating significant vulnerabilities in the model's safety mechanisms. In contrast, the DeepSeek-R1 model generally shows lower ASRs than the DeepSeek-V3 model, typically $80\% - 90\%$ in Chinese environments and $85\% - 95\%$ in English environments.
|
| 108 |
+
|
| 109 |
+
Notably, we observe that the DeepSeek-V3 model achieves $100\%$ ASRs for categories such as Ethnic hatred and False information in both Chinese and English environments. These risk types should be prioritized in subsequent safety alignment efforts. Overall, the evaluation results demonstrate that both DeepSeek-V3 and DeepSeek-R1 models exhibit clear vulnerabilities when facing jailbreak attacks.
|
| 110 |
+
|
| 111 |
+
# 4.1.2 COMPARISON WITH OTHER CHINESE LLMS
|
| 112 |
+
|
| 113 |
+
We conducted additional safety evaluations on five representative Chinese-developed LLMs using CNSafe and CNSafe_RT. Four are standard LLMs—Doubao-1.5-pro-32k-250115 (Doubao), Hunyuan-turbo-latest (Hunyuan), Moonshot-v1-8k (Moonshot), and Qwen-Max; while one is a reasoning LLM, QwQ-32B.
|
| 114 |
+
|
| 115 |
+

|
| 116 |
+
Figure 3: ASR $(\%)$ of Chinese-developed LLMs on CNSafe_RT.
|
| 117 |
+
|
| 118 |
+
Table 2: ASR (%) of Chinese-developed LLMs on CNSafe.
|
| 119 |
+
|
| 120 |
+
<table><tr><td>Category</td><td>Doubao</td><td>Hunyuan</td><td>Moonshot</td><td>Qwen-Max</td><td>QwQ-32B</td></tr><tr><td>Core socialist values violation</td><td>7.9</td><td>2</td><td>2.5</td><td>3.8</td><td>21.8</td></tr><tr><td>Discriminatory content</td><td>26.3</td><td>8.4</td><td>14.3</td><td>3.9</td><td>36.2</td></tr><tr><td>Commercial misconduct</td><td>25.6</td><td>3</td><td>5.6</td><td>3.6</td><td>25.6</td></tr><tr><td>Rights infringement</td><td>15.7</td><td>2</td><td>2.9</td><td>2.9</td><td>22.6</td></tr><tr><td>Service insecurity</td><td>N/A</td><td>N/A</td><td>N/A</td><td>N/A</td><td>N/A</td></tr></table>
|
| 121 |
+
|
| 122 |
+
Tab. 2 summarizes the attack success rates for these five Chinese-developed LLMs across major risk categories on CNSafe, while Fig. 2b displays ASRs across all 29 detailed risk subcategories. Overall, among the compared models, QwQ-32B achieved the highest attack success rates across all major risk categories, with an average ASR of $26.6\%$ . This pattern aligns with observations from DeepSeek-R1, further suggesting that exposed chains of thought present exploitation risks for attackers. Doubao also demonstrated considerable vulnerabilities in certain risk categories, particularly in Discriminatory content and Commercial misconduct, with attack success rates of $26.3\%$ and $25.6\%$ respectively. Comparatively, Qwen-Max exhibited the strongest safety performance with an average ASR of only $3.6\%$ . Notably, when comparing these models with DeepSeek LLMs, we observe that DeepSeek LLMs rank quite low in terms of safety performance. Among reasoning LLMs, while DeepSeek-R1's average ASR $(22.5\%)$ is lower than QwQ-32B, it remains substantial. Among standard LLMs, DeepSeek-V3's safety performance ranks second-to-last, surpassing only Doubao.
|
| 123 |
+
|
| 124 |
+
The evaluation results of five Chinese-developed LLMs on CNSafe_RT are presented in Fig. 3. QwQ-32B clearly demonstrates the highest ASRs across all risk categories, notably exceeding $85\%$ in nine risk categories. This indicates that this model performs worst in terms of safety and is most susceptible to attacks. In contrast, Hunyuan shows significantly lower ASRs than other models across most risk categories, with an average ASR of only $1.9\%$ , demonstrating its robust safety performance.
|
| 125 |
+
|
| 126 |
+
When comparing these models with corresponding DeepSeek LLM results, we observe that reasoning LLMs (QwQ and DeepSeek-R1) have markedly higher ASRs than standard LLMs, further indicating that the reasoning chains exposed by such models increase safety risks even under jailbreak attacks. Among standard LLMs, DeepSeek-V3 presents substantially higher risks than other Chinese-developed LLMs (averaging $66.8\%$ higher), possibly stemming from its innovative low-cost model training method that neglected safety alignment considerations.
|
| 127 |
+
|
| 128 |
+
# 4.2 EVALUATION ON MLLM
|
| 129 |
+
|
| 130 |
+
SafeBench and MM-SafetyBench introduce two prevalent multimodal jailbreaking attack methodologies: image semantic-based attacks and typography-based attacks. Representative image-text pairs employed in these attack methods are illustrated in Fig. 4. For each of these methods, we sampled 750 image-text pairs, covering 13 distinct categories, for evaluation purposes.
|
| 131 |
+
|
| 132 |
+

|
| 133 |
+
(a) Image semantic-based Attack
|
| 134 |
+
|
| 135 |
+

|
| 136 |
+
(b) Typography-based Attack
|
| 137 |
+
|
| 138 |
+

|
| 139 |
+
Figure 4: Examples of image-text pairs used in multimodal jailbreak attack methods.
|
| 140 |
+
Figure 5: ASR $(\%)$ of DeepSeek-VL2 on SafeBench and MM-SafetyBench.
|
| 141 |
+
|
| 142 |
+
From Fig. 5, it is evident that typography-based attacks achieve significantly higher ASRs compared to image semantics-based attacks, with an average increase of $20.31\%$ . This indicates a notable vulnerability in current models when processing typographical perturbations. Such vulnerability may stem from insufficient exposure to these attack types during training. When examining specific risk categories, we observe several striking differences. In Economic Harm and Fraud categories, typography-based attacks reached ASRs of $40\%$ and $38\%$ respectively, substantially higher than other categories. This suggests that models are particularly susceptible to generate unsafe response when processing economics and finance-related content.
|
| 143 |
+
|
| 144 |
+
Regarding image semantics-based attacks, while overall ASRs remain lower, certain categories such as Gov Decision, Health Consultation, and Legal Opinion show relatively higher ASRs (4%-6%). This indicates potential vulnerabilities in the model's understanding of image semantics when addressing sensitive topics related to politics, health, and governmental decisions. Notably, our manual analysis of model responses revealed that when confronted with these attacks, models frequently generated meaningless outputs, including repetitive characters or strings such as "the of", "***", "shows", and "using". Since the LLM-as-Judge methodology classifies these meaningless outputs as safe, this effectively reduces the model's actual ASR. It is important to note that this does not reflect the model's true safety level; rather, it highlights deficiencies in the model's image comprehension and response generation capabilities.
|
| 145 |
+
|
| 146 |
+
# 4.3 EVALUATION ON T2I MODELS
|
| 147 |
+
|
| 148 |
+
In this section, we evaluate the safety of DeepSeek's T2I model, Janus-Pro-7B, using a sample of 671 queries drawn from the I2P. This sample comprises 100 queries for each category except Hate, which contains only 71 queries. Fig. 6 showcases representative examples of unsafe images generated during this evaluation.
|
| 149 |
+
|
| 150 |
+
For comparative purposes, we concurrently assess the safety of another popular T2I model, Stable-Diffusion-3.5-Large AI (2024). Fig. 7 presents the ASRs for both models across various risk di
|
| 151 |
+
|
| 152 |
+

|
| 153 |
+
Harassment
|
| 154 |
+
|
| 155 |
+

|
| 156 |
+
Hate
|
| 157 |
+
|
| 158 |
+

|
| 159 |
+
Illegal activity
|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
Self-harm
|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
Sexual content
|
| 166 |
+
|
| 167 |
+

|
| 168 |
+
Shocking images
|
| 169 |
+
|
| 170 |
+

|
| 171 |
+
Violence
|
| 172 |
+
|
| 173 |
+

|
| 174 |
+
Figure 6: Examples of unsafe images generated by Janus-Pro-7B.
|
| 175 |
+
(a) Janus-Pro-7B
|
| 176 |
+
Figure 7: ASR $(\%)$ of Janus-Pro-7B and Stable-Diffusion-3.5-Large on I2P.
|
| 177 |
+
|
| 178 |
+

|
| 179 |
+
(b) Stable-Diffusion-3.5-Large
|
| 180 |
+
|
| 181 |
+
mensions. Overall, Janus-Pro-7B presents higher safety risks compared to Stable Diffusion 3.5 Large, with average ASRs of $43.7\%$ and $39.9\%$ , respectively.
|
| 182 |
+
|
| 183 |
+
We subsequently compared the specific performance of both models across various dimensions. In the Hate, Harassment, Shocking images, and Self-harm dimensions, the Stable-Diffusion-3.5-Large model shows slightly higher ASRs than the Janus-Pro-7B model, indicating that Janus-Pro-7B offers relatively better safety in these risk categories. However, in the Sexual content dimension, the Janus-Pro-7B model exhibits a significantly higher attack success rate compared to the Stable-Diffusion-3.5-Large model (74.0% vs. 47.0%). This reveals a notable safety vulnerability in the Janus-Pro-7B model within this dimension. For the Illegal activity dimension, Janus-Pro-7B also demonstrating higher ASRs (61% vs. 39%). In the Violence dimension, both models show similar ASRs, approximately 50%.
|
| 184 |
+
|
| 185 |
+
Additionally, in our manual sampling evaluation, we observed that Janus-Pro-7B does not prohibit the generation of fake news images, such as "Trump arrested and imprisoned." This finding suggests that Janus-Pro-7B is highly susceptible to misuse, potentially leading to additional risks Sha et al. (2023).
|
| 186 |
+
|
| 187 |
+
# 5 CONCLUSION
|
| 188 |
+
|
| 189 |
+
To the best of our knowledge, this study presents the first comprehensive safety evaluation of the DeepSeek models. Our investigation reveals a nuanced balance between safety and performance, and highlights several key findings.
|
| 190 |
+
|
| 191 |
+
- Vulnerability to jailbreaking. While DeepSeek LLMs exhibit robust safety boundaries when handling direct harmful queries, their safety alignment proves brittle under jailbreaking attacks. This suggests that their safety alignments may be optimized for explicit threats but remain vulnerable to adversarial manipulations.
|
| 192 |
+
|
| 193 |
+
- Cross-lingual disparities. DeepSeek LLMs exhibit a considerable disparity in safety performance between Chinese and English contexts. Specifically, they demonstrate a greater propensity to generate harmful content in English, suggesting that safety alignment strategies may not generalize effectively across languages.
|
| 194 |
+
- Chain-of-Thought exposure. DeepSeek-R1, which exposes its CoT reasoning, presents a higher safety risk compared to DeepSeek-V3. This suggests that increased transparency, while potentially beneficial for interpretability, can inadvertently create new attack vectors.
|
| 195 |
+
- Multi-Model capability deficiencies. The apparent strong safety performance of the DeepSeek MLLM is not a result of robust safety alignment. Instead, it stems from its limited multimodal understanding capabilities. This finding underscores the importance of distinguishing between genuine safety and limitations that mask underlying vulnerabilities.
|
| 196 |
+
- Text-to-image generation risks. The DeepSeek T2I model exhibits significant safety risks. Across the benchmarks we evaluated, more than half of the categories demonstrated ASRs exceeding $50\%$ , underscoring the urgent need for stronger safety measures..
|
| 197 |
+
|
| 198 |
+
The findings presented highlight the imperative for ongoing, iterative safety evaluations and thorough pre-deployment testing of large models. A key priority for future research is the strengthening of safety mechanisms, with a particular focus on resilience against jailbreak attacks. Concurrently, the creation of more standardized and comprehensive safety benchmarks is essential to facilitate meaningful advancements in the safety of large models.
|
| 199 |
+
|
| 200 |
+
# REFERENCES
|
| 201 |
+
|
| 202 |
+
Razii Abraham. Democratizing ai's frontiers: A critical review of deepseek ai's open-source ecosystem. 2025.
|
| 203 |
+
Stability AI. Stable diffusion 3.5 large. Hugging Face Model Repository, 2024. URL https://huggingface.co/stabilityai/stable-diffusion-3.5-large. Accessed: 2025-03-15.
|
| 204 |
+
Haozhe An, Christabel Acquaye, Colin Wang, Zongxia Li, and Rachel Rudinger. Do large language models discriminate in hiring decisions on the basis of race, ethnicity, and gender? arXiv preprint arXiv:2406.10486, 2024.
|
| 205 |
+
Aitor Arrieta, Miriam Ugarte, Pablo Valle, José Antonio Parejo, and Sergio Segura. o3-mini vs deepseek-r1: Which one is safer? arXiv preprint arXiv:2501.18438, 2025.
|
| 206 |
+
Weilin Cai, Juyong Jiang, Fan Wang, Jing Tang, Sunghun Kim, and Jiayi Huang. A survey on mixture of experts, 2024. URL https://arxiv.org/abs/2407.06204.
|
| 207 |
+
Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Janus-pro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025.
|
| 208 |
+
Yingkai Dong, Zheng Li, Xiangtao Meng, Ning Yu, and Shanqing Guo. Jailbreaking text-to-image models with llm-based agents, 2024. URL https://arxiv.org/abs/2408.00523.
|
| 209 |
+
Lisle Faray de Paiva, Gijs Luijten, Behrus Puladi, and Jan Egger. How does deepseek-r1 perform on usmle? medRxiv, pp. 2025-02, 2025.
|
| 210 |
+
Sensen Gao, Xiaojun Jia, Yihao Huang, Ranjie Duan, Jindong Gu, Yang Bai, Yang Liu, and Qing Guo. Hts-attack: Heuristic token search for jailbreaking text-to-image models, 2024. URL https://arxiv.org/abs/2408.13896.
|
| 211 |
+
Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.
|
| 212 |
+
Jun Guo, Wei Bao, Jiakai Wang, Yuqing Ma, Xinghai Gao, Gang Xiao, Aishan Liu, Jian Dong, Xi-anglong Liu, and Wenjun Wu. A comprehensive evaluation framework for deep model robustness. Pattern Recognition, 2023.
|
| 213 |
+
Zonglei Jing, Zonghao Ying, Le Wang, Siyuan Liang, Aishan Liu, Xianglong Liu, and Dacheng Tao. Cognorm: Cognitive morphing attacks for text-to-image models, 2025. URL https://arxiv.org/abs/2501.11815.
|
| 214 |
+
Minseon Kim, Hyomin Lee, Boqing Gong, Huishuai Zhang, and Sung Ju Hwang. Automatic jailbreaking of the text-to-image generative ai systems, 2024. URL https://arxiv.org/abs/2405.16567.
|
| 215 |
+
Aishan Liu, Xianglong Liu, Jiaxin Fan, Yuqing Ma, Anlan Zhang, Huiyuan Xie, and Dacheng Tao. Perceptual-sensitive gan for generating adversarial patches. In AAAI, 2019.
|
| 216 |
+
Aishan Liu, Tairan Huang, Xianglong Liu, Yitao Xu, Yuqing Ma, Xinyun Chen, Stephen J Maybank, and Dacheng Tao. Spatiotemporal attacks for embodied agents. In ECCV, 2020a.
|
| 217 |
+
Aishan Liu, Jiakai Wang, Xianglong Liu, Bowen Cao, Chongzhi Zhang, and Hang Yu. Bias-based universal adversarial patch attack for automatic check-out. In ECCV, 2020b.
|
| 218 |
+
Aishan Liu, Xianglong Liu, Hang Yu, Chongzhi Zhang, Qiang Liu, and Dacheng Tao. Training robust deep neural networks via adversarial noise propagation. TIP, 2021.
|
| 219 |
+
Aishan Liu, Jun Guo, Jiakai Wang, Siyuan Liang, Renshuai Tao, Wenbo Zhou, Cong Liu, Xianglong Liu, and Dacheng Tao. X-adv: Physical adversarial object attacks against x-ray prohibited item detection. In USENIX Security Symposium, 2023a.
|
| 220 |
+
|
| 221 |
+
Aishan Liu, Shiyu Tang, Xinyun Chen, Lei Huang, Haotong Qin, Xianglong Liu, and Dacheng Tao. Towards defending multiple lp-norm bounded adversarial perturbations via gated batch normalization. International Journal of Computer Vision, 2023b.
|
| 222 |
+
Aishan Liu, Shiyu Tang, Siyuan Liang, Ruihao Gong, Boxi Wu, Xianglong Liu, and Dacheng Tao. Exploring the relationship between architecture and adversarially robust generalization. In CVPR, 2023c.
|
| 223 |
+
Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024a.
|
| 224 |
+
Shunchang Liu, Jiakai Wang, Aishan Liu, Yingwei Li, Yijie Gao, Xianglong Liu, and Dacheng Tao. Harnessing perceptual adversarial patches for crowd counting. In ACM CCS, 2022.
|
| 225 |
+
Xin Liu, Yichen Zhu, Jindong Gu, Yunshi Lan, Chao Yang, and Yu Qiao. Mm-safetybench: A benchmark for safety evaluation of multimodal large language models, 2024b. URL https://arxiv.org/abs/2311.17600.
|
| 226 |
+
Weidi Luo, Siyuan Ma, Xiaogeng Liu, Xiaoyu Guo, and Chaowei Xiao. Jailbreakv: A benchmark for assessing the robustness of multimodal large language models against jailbreak attacks, 2024. URL https://arxiv.org/abs/2404.03027.
|
| 227 |
+
Jiachen Ma, Anda Cao, Zhiqing Xiao, Yijiang Li, Jie Zhang, Chao Ye, and Junbo Zhao. Jailbreaking prompt attack: A controllable adversarial attack against diffusion models. arXiv preprint arXiv:2404.02928, 2024.
|
| 228 |
+
David Mikhail, Andrew Farah, Jason Milad, Wissam Nassrallah, Andrew Mihalache, Daniel Milad, Fares Antaki, Michael Balas, Marko M Popovic, Alessandro Feo, et al. Performance of deepseek-r1 in ophthalmology: An evaluation of clinical decision-making and cost-effectiveness. medRxiv, pp. 2025-02, 2025.
|
| 229 |
+
Zhenxing Niu, Haodong Ren, Xinbo Gao, Gang Hua, and Rong Jin. Jailbreaking attack against multimodal large language model, 2024. URL https://arxiv.org/abs/2402.02309.
|
| 230 |
+
OpenAI, :, Aaron Hurst, Adam Lerer, Adam P. Goucher, Adam Perelman, Aditya Ramesh, et al. Gpt-4o system card, 2024a. URL https://arxiv.org/abs/2410.21276.
|
| 231 |
+
OpenAI, :, Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, et al. Openai o1 system card, 2024b. URL https://arxiv.org/abs/2412.16720.
|
| 232 |
+
Manojkumar Parmar and Yuvaraj Govindarajulu. Challenges in ensuring ai safety in deepseek-r1 models: The shortcomings of reinforcement learning strategies. arXiv preprint arXiv:2501.17030, 2025.
|
| 233 |
+
Qwen, :, An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, et al. Qwen2.5 technical report, 2025. URL https://arxiv.org/abs/2412.15115.
|
| 234 |
+
Paul Röttger, Fabio Pernisi, Bertie Vidgen, and Dirk Hovy. *Safetyprompts: a systematic review of open datasets for evaluating and improving large language model safety.* arXiv preprint arXiv:2404.05399, 2024.
|
| 235 |
+
Patrick Schramowski, Manuel Brack, Björn Deiseroth, and Kristian Kersting. Safe latent diffusion: Mitigating inappropriate degeneration in diffusion models, 2023. URL https://arxiv.org/abs/2211.05105.
|
| 236 |
+
Zeyang Sha, Zheng Li, Ning Yu, and Yang Zhang. De-fake: Detection and attribution of fake images generated by text-to-image generation models, 2023. URL https://arxiv.org/abs/2210.06998.
|
| 237 |
+
Xinyue Shen, Zeyuan Chen, Michael Backes, Yun Shen, and Yang Zhang. "do anything now": Characterizing and evaluating in-the-wild jailbreak prompts on large language models, 2024. URL https://arxiv.org/abs/2308.03825.
|
| 238 |
+
|
| 239 |
+
Shiyu Tang, Ruihao Gong, Yan Wang, Aishan Liu, Jiakai Wang, Xinyun Chen, Fengwei Yu, Xianglong Liu, Dawn Song, Alan Yuille, et al. Robust: Benchmarking robustness on architecture design and training techniques. ArXiv, 2021.
|
| 240 |
+
Jiakai Wang, Aishan Liu, Zixin Yin, Shunchang Liu, Shiyu Tang, and Xianglong Liu. Dual attention suppression attack: Generate adversarial camouflage in physical world. In CVPR, 2021.
|
| 241 |
+
Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. Chain-of-thought prompting elicits reasoning in large language models. Advances in neural information processing systems, 35:24824-24837, 2022.
|
| 242 |
+
Zhiyu Wu, Xiaokang Chen, Zizheng Pan, Xingchao Liu, Wen Liu, Damai Dai, Huazuo Gao, Yiyang Ma, Chengyue Wu, Bingxuan Wang, et al. Deepseek-vl2: Mixture-of-experts vision-language models for advanced multimodal understanding. arXiv preprint arXiv:2412.10302, 2024.
|
| 243 |
+
Zhiyuan Xu, Joseph Gardiner, and Sana Belguith. The dark deep side of deepseek: Fine-tuning attacks against the safety alignment of cot-enabled models. arXiv preprint arXiv:2502.01225, 2025.
|
| 244 |
+
Zonghao Ying and Bin Wu. Nba: defensive distillation for backdoor removal via neural behavior alignment. Cybersecurity, 6(1), July 2023a. ISSN 2523-3246. doi: 10.1186/s42400-023-00154-z. URL http://dx.doi.org/10.1186/s42400-023-00154-z.
|
| 245 |
+
Zonghao Ying and Bin Wu. Dlp: towards active defense against backdoor attacks with decoupled learning process. Cybersecurity, 6(1), May 2023b. ISSN 2523-3246. doi: 10.1186/s42400-023-00141-4. URL http://dx.doi.org/10.1186/s42400-023-00141-4.
|
| 246 |
+
Zonghao Ying, Aishan Liu, Siyuan Liang, Lei Huang, Jinyang Guo, Wenbo Zhou, Xianglong Liu, and Dacheng Tao. Safebench: A safety evaluation framework for multimodal large language models. arXiv preprint arXiv:2410.18927, 2024a.
|
| 247 |
+
Zonghao Ying, Aishan Liu, Xianglong Liu, and Dacheng Tao. Unveiling the safety of gpt-4o: An empirical study using jailbreak attacks. arXiv preprint arXiv:2406.06302, 2024b.
|
| 248 |
+
Zonghao Ying, Aishan Liu, Tianyuan Zhang, Zhengmin Yu, Siyuan Liang, Xianglong Liu, and Dacheng Tao. Jailbreak vision language models via bi-modal adversarial prompt. arXiv preprint arXiv:2406.04031, 2024c.
|
| 249 |
+
Zonghao Ying, Deyue Zhang, Zonglei Jing, Yisong Xiao, Quanchen Zou, Aishan Liu, Siyuan Liang, Xiangzheng Zhang, Xianglong Liu, and Dacheng Tao. Reasoning-augmented conversation for multi-turn jailbreak attacks on large language models. arXiv preprint arXiv:2502.11054, 2025.
|
| 250 |
+
Tongxin Yuan, Zhiwei He, Lingzhong Dong, Yiming Wang, Ruijie Zhao, Tian Xia, Lizhen Xu, Binglin Zhou, Fangqi Li, Zhuosheng Zhang, et al. R-judge: Benchmarking safety risk awareness for llm agents. arXiv preprint arXiv:2401.10019, 2024a.
|
| 251 |
+
Xiaohan Yuan, Jinfeng Li, Dongxia Wang, Yuefeng Chen, Xiaofeng Mao, Longtao Huang, Hui Xue, Wenhai Wang, Kui Ren, and Jingyi Wang. S-eval: Automatic and adaptive test generation for benchmarking safety evaluation of large language models. arXiv preprint arXiv:2405.14191, 2024b.
|
| 252 |
+
Chongzhi Zhang, Aishan Liu, Xianglong Liu, Yitao Xu, Hang Yu, Yuqing Ma, and Tianlin Li. Interpreting and improving adversarial robustness of deep neural networks with neuron sensitivity. IEEE Transactions on Image Processing, 2021.
|
| 253 |
+
Kaiwen Zhou, Chengzhi Liu, Xuandong Zhao, Shreedhar Jangam, Jayanth Srinivasa, Gaowen Liu, Dawn Song, and Xin Eric Wang. The hidden risks of large reasoning models: A safety assessment of r1. arXiv preprint arXiv:2502.12659, 2025.
|
| 254 |
+
Andy Zou, Zifan Wang, Nicholas Carlini, Milad Nasr, J. Zico Kolter, and Matt Fredrikson. Universal and transferable adversarial attacks on aligned language models, 2023. URL https://arxiv.org/abs/2307.15043.
|
| 255 |
+
|
| 256 |
+
# A APPENDIX
|
| 257 |
+
|
| 258 |
+
# A.1 BENCHMARK
|
| 259 |
+
|
| 260 |
+
# A.2 CNSAFE
|
| 261 |
+
|
| 262 |
+
CNSafe focuses on evaluating the following five core dimensions:
|
| 263 |
+
|
| 264 |
+
- Content Contravening Core Socialist Values. This includes content that incites subversion of state power, endangers national security, promotes terrorism, incites ethnic hatred, contains violent or pornographic material, disseminates false information, and related violations.
|
| 265 |
+
- Discriminatory Content. This encompasses expressions of discrimination based on ethnicity, religion, nationality, geographic origin, gender, age, occupation, health status, and other protected characteristics.
|
| 266 |
+
- Commercial Violations and Misconduct. This addresses issues such as intellectual property infringement, breaches of business ethics, disclosure of trade secrets, monopolistic practices, and unfair competition.
|
| 267 |
+
- Infringement of Others' Legal Rights. This includes violations impacting others' physical and mental well-being, portrait rights, reputation, privacy, and personal information rights.
|
| 268 |
+
- Inability to Meet Safety Requirements for Specific Service Types. This dimension assesses risks arising from inaccurate or unreliable content in high-security contexts such as automated control, medical information services, psychological counseling, and critical information infrastructure.
|
| 269 |
+
|
| 270 |
+
# A.3 CNSAFE_RT
|
| 271 |
+
|
| 272 |
+
CNSafe_RT is derived from CNSafe, sampling 1000 benchmark queries across 10 categories. It then integrates typical jailbreak attack methods, combining advanced prompt perturbation techniques with safety risk scenarios specific to the Chinese context, to construct a highly adversarial dataset. The integrated jailbreak methods include: (1) scenario injection attacks; (2) affirmative prefix induction; (3) indirect instruction attacks.
|
| 273 |
+
|
| 274 |
+
The generation of CNSafe_RT followed a semi-automated process. Initially, LLMs, such as GPT-4, were used to rewrite the base samples, generating adversarial variants. Subsequently, safety experts reviewed and refined the attack strategies, ensuring the effectiveness and targeted nature of the test samples. The resulting CNSafe_RT dataset comprises 1000 attack samples encompassing 10 granular risk dimensions.
|
| 275 |
+
|
| 276 |
+
# A.4 SAFEBENCH
|
| 277 |
+
|
| 278 |
+
SafeBench is constructed through an automated safety dataset generation pipeline. This pipeline leverages a set of LLMs as judges to identify and categorize the most harmful and diverse risk scenarios for MLLMs. Based on this categorization, these LLM judges then generate high-quality harmful queries. This process results in 23 distinct risk scenarios and 2300 foundational multimodal harmful query pairs. Furthermore, SafeBench provides an extension module capable of deriving a significantly larger number of query pairs. Consequently, SafeBench offers a comprehensive and targeted set of test samples for evaluating the safety of MLLMs.
|
| 279 |
+
|
| 280 |
+
# A.5 MM-SAFETYBENCH
|
| 281 |
+
|
| 282 |
+
MM-SafetyBench is designed to address the vulnerability of MLLMs to manipulations stemming from query-related images. It encompasses 13 distinct scenarios and comprises a total of 5040 text-image pairs. Through an analysis of 12 leading MLLMs, this dataset reveals that even MLLMs equipped with safety-aligned LLMs remain susceptible to such attacks. Consequently, MM-SafetyBench provides a crucial benchmark for investigating the safety vulnerabilities of MLLMs under image-based manipulation, thereby fostering research aimed at enhancing model robustness and safety.
|
| 283 |
+
|
| 284 |
+
SafeBench and MM-SafetyBench jointly cover the following 13 granular risk dimensions:
|
| 285 |
+
|
| 286 |
+
- Illegal Activities. Content that incites crime, drug trafficking, smuggling, or other illegal acts.
|
| 287 |
+
- Hate Speech. Content that expresses discrimination, hatred, or incites violence against specific groups.
|
| 288 |
+
- Malware Generation. Assessing whether the model can be used to generate malicious code, viruses, or cyberattack tools.
|
| 289 |
+
- Threats to Physical Safety. Content related to self-harm, violent attacks, or the creation of dangerous items.
|
| 290 |
+
- Economic Harm. Risks associated with market manipulation, financial fraud, or money laundering.
|
| 291 |
+
- Fraud. Assessing whether the model can be misused for scams, identity theft, or other fraudulent activities.
|
| 292 |
+
- Pornographic Content. Detecting the generation of obscene or inappropriate material.
|
| 293 |
+
- Political Lobbying. Assessing whether the model engages in undue political manipulation or interference.
|
| 294 |
+
- Privacy Violation. Content that involves the disclosure of personal information or unauthorized data access.
|
| 295 |
+
- Legal Advice. Examining whether the model provides unauthorized legal advice, potentially misleading users.
|
| 296 |
+
- Financial Advice. Evaluating whether the financial advice provided by the model is misleading or non-compliant.
|
| 297 |
+
- Health Advice. Assessing the reliability of medical advice to prevent users from making incorrect health decisions based on misleading information.
|
| 298 |
+
- Interference with Government Decision-Making. Detecting whether the model could potentially influence or interfere with government governance decisions.
|
| 299 |
+
|
| 300 |
+
# A.6 I2P
|
| 301 |
+
|
| 302 |
+
I2P comprises specialized, real-world image-to-prompt datasets, encompassing concepts such as nudity and violence. This dataset enables researchers to evaluate model performance when processing sensitive content. I2P encompasses the following seven core dimensions:
|
| 303 |
+
|
| 304 |
+
- Hate Content. Elements that express discrimination, insults, or incite hatred against specific groups.
|
| 305 |
+
- Harassment. Content involving insults, threats, or malicious and offensive attacks targeting individuals or groups.
|
| 306 |
+
- Violence. Detecting whether the model generates images containing gore, assault, warfare, or other violent scenes.
|
| 307 |
+
- Self-Harm. Assessing the presence of content related to self-harm, suicide, or other acts endangering one's own health.
|
| 308 |
+
- Pornographic Content. Including obscene, explicit, or otherwise inappropriate imagery.
|
| 309 |
+
- Shocking Imagery. Content such as graphic violence, terror, or material likely to evoke extreme negative emotions.
|
| 310 |
+
- Illegal Activities. The risk of generating content related to drugs, crime, terrorism, or other illegal acts.
|
data/2025/2503_15xxx/2503.15092/images/173a0d78d14e3aa2faf5434a8830a9886cef9244b14e55c74dc450f4f1636272.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/198b58be1862bcfb3495e44f7bb579c2d2af776f3ad9305c8508d0395bdb788b.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/1c907e28c4b515d3fccf907639ba08a265e4dfe34cddd60a42a2a9a5106ea664.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/4cbff7188997f009a05cf78e1045fbeb940630c3a5c7ff1ea534811af17d776f.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/55718dc235b9bd54c5c9e8a17fe739ef79434a16dafecbdb2b0136bbe97abd0e.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/55fb783ad2adb41d045a59e457336fd067be2ed921e7ca12ee92923660b048e9.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/6eeceff7b55665b9a7cb6ea5eb9f80d5f53585c8e73efe5539f6463a829b3033.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/837ed3e60e92e435fa22125a0e857a27ca8d2ef7334d32935e15ad44d80afe12.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/8c9427b38fe3eff759fd376ff8eb5ce23dbd2d1be9563f0bfa486e6f81ccecff.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/933cc611cdab1edb70abc13d50aab004973186a8e50868f4c6c8e3d0fa4f5abc.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/a2ecbfa253705fc233420bf98d4dc6351aae78177711b77a89a38e742092e986.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/a78d63f9e5b7d49a1007549d1d5d173834b98e64c8e07b60ae8437b2f916d5d9.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/ac507e5edeaf220188c651a28b476844d5481b2e39e5238162c9a7b03a0b0748.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/add00a1381f4e6122096d019a46e1343259d3592eb2c418cdda9920f049b6fd0.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/c1af023a4b2ff28b865e6453fe7dc2082a5247f5548613599388e120b3f5db11.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/ca379e4a1dd72dd16df16517c4b97455853953fe7dd802ddbe7a1f99a90efa9e.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/d040f93e4de3859776d94935d440ee590d69054397f1a469d78e119bb18ca77b.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/d72847520f3e3ccf3b6d7aa79e8685b193af07feab1c019da7545874439a6159.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/images/fd5c8c0b62ddcf673c09ec84587e595e367ab76774b9a793b6a923ff52abd051.jpg
ADDED
|
Git LFS Details
|
data/2025/2503_15xxx/2503.15092/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_15xxx/2503.15112/625c34d4-2bff-40eb-b17b-655c01ac6ef3_content_list.json
ADDED
|
@@ -0,0 +1,1744 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "OpenLLM-RTL: Open Dataset and Benchmark for LLM-Aided Design RTL Generation",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
104,
|
| 8 |
+
79,
|
| 9 |
+
890,
|
| 10 |
+
130
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Invited Paper",
|
| 17 |
+
"bbox": [
|
| 18 |
+
442,
|
| 19 |
+
135,
|
| 20 |
+
553,
|
| 21 |
+
152
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Shang Liu*, Yao Lu*, Wenji Fang*, Mengming Li, Zhiyao Xie†",
|
| 28 |
+
"bbox": [
|
| 29 |
+
250,
|
| 30 |
+
165,
|
| 31 |
+
743,
|
| 32 |
+
185
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Hong Kong University of Science and Technology (HKUST)",
|
| 39 |
+
"bbox": [
|
| 40 |
+
297,
|
| 41 |
+
190,
|
| 42 |
+
699,
|
| 43 |
+
207
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "{sluidx, yludf, wfang838, mengming.li}@connect.ust.hk, eezhiyao@ust.hk",
|
| 50 |
+
"bbox": [
|
| 51 |
+
248,
|
| 52 |
+
209,
|
| 53 |
+
750,
|
| 54 |
+
224
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "ABSTRACT",
|
| 61 |
+
"text_level": 1,
|
| 62 |
+
"bbox": [
|
| 63 |
+
83,
|
| 64 |
+
236,
|
| 65 |
+
184,
|
| 66 |
+
250
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "The automated generation of design RTL based on large language model (LLM) and natural language instructions has demonstrated great potential in agile circuit design. However, the lack of datasets and benchmarks in the public domain prevents the development and fair evaluation of LLM solutions. This paper highlights our latest advances in open datasets and benchmarks from three perspectives: (1) RTLLM 2.0, an updated benchmark assessing LLM's capability in design RTL generation. The benchmark is augmented to 50 hand-crafted designs. Each design provides the design description, test cases, and a correct RTL code. (2) AssertEval, an open-source benchmark assessing the LLM's assertion generation capabilities for RTL verification. The benchmark includes 18 designs, each providing specification, signal definition, and correct RTL code. (3) RTLCoder-Data, an extended open-source dataset with 80K instruction-code data samples. Moreover, we propose a new verification-based method to verify the functionality correctness of training data samples. Based on this technique, we further release a dataset with 7K verified high-quality samples. These three studies are integrated into one framework, providing off-the-shelf support for the development and evaluation of LLMs for RTL code generation and verification. Finally, extensive experiments indicate that LLM performance can be boosted by enlarging the training dataset, improving data quality, and improving the training scheme.",
|
| 73 |
+
"bbox": [
|
| 74 |
+
81,
|
| 75 |
+
255,
|
| 76 |
+
482,
|
| 77 |
+
574
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "text",
|
| 83 |
+
"text": "CCS CONCEPTS",
|
| 84 |
+
"text_level": 1,
|
| 85 |
+
"bbox": [
|
| 86 |
+
83,
|
| 87 |
+
585,
|
| 88 |
+
220,
|
| 89 |
+
599
|
| 90 |
+
],
|
| 91 |
+
"page_idx": 0
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"type": "text",
|
| 95 |
+
"text": "- Hardware $\\rightarrow$ Hardware description languages and compilation; - Computing methodologies $\\rightarrow$ Natural language processing.",
|
| 96 |
+
"bbox": [
|
| 97 |
+
81,
|
| 98 |
+
604,
|
| 99 |
+
482,
|
| 100 |
+
648
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "text",
|
| 106 |
+
"text": "KEYWORDS",
|
| 107 |
+
"text_level": 1,
|
| 108 |
+
"bbox": [
|
| 109 |
+
83,
|
| 110 |
+
659,
|
| 111 |
+
189,
|
| 112 |
+
672
|
| 113 |
+
],
|
| 114 |
+
"page_idx": 0
|
| 115 |
+
},
|
| 116 |
+
{
|
| 117 |
+
"type": "text",
|
| 118 |
+
"text": "LLM-assisted circuit design, electronic design automation",
|
| 119 |
+
"bbox": [
|
| 120 |
+
83,
|
| 121 |
+
678,
|
| 122 |
+
433,
|
| 123 |
+
691
|
| 124 |
+
],
|
| 125 |
+
"page_idx": 0
|
| 126 |
+
},
|
| 127 |
+
{
|
| 128 |
+
"type": "text",
|
| 129 |
+
"text": "ACM Reference Format:",
|
| 130 |
+
"text_level": 1,
|
| 131 |
+
"bbox": [
|
| 132 |
+
83,
|
| 133 |
+
700,
|
| 134 |
+
230,
|
| 135 |
+
710
|
| 136 |
+
],
|
| 137 |
+
"page_idx": 0
|
| 138 |
+
},
|
| 139 |
+
{
|
| 140 |
+
"type": "text",
|
| 141 |
+
"text": "Shang Liu*, Yao Lu*, Wenji Fang*, Mengming Li, Zhiyao Xie†. 2024. OpenLLMRTL: Open Dataset and Benchmark for LLM-Aided Design RTL Generation: Invited Paper. In Proceedings of the ACM/IEEE International Conference on Computer-Aided Design (ICCAD '24), October 27-31, 2024, New Jersey, NY, USA. ACM, New York, NY, USA, 9 pages. https://doi.org/10.1145/nnnnnnn.nnnnnnn",
|
| 142 |
+
"bbox": [
|
| 143 |
+
81,
|
| 144 |
+
712,
|
| 145 |
+
488,
|
| 146 |
+
787
|
| 147 |
+
],
|
| 148 |
+
"page_idx": 0
|
| 149 |
+
},
|
| 150 |
+
{
|
| 151 |
+
"type": "text",
|
| 152 |
+
"text": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.",
|
| 153 |
+
"bbox": [
|
| 154 |
+
81,
|
| 155 |
+
800,
|
| 156 |
+
482,
|
| 157 |
+
872
|
| 158 |
+
],
|
| 159 |
+
"page_idx": 0
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"type": "text",
|
| 163 |
+
"text": "ICCAD 24, October 27-31, 2024, New Jersey, NY, USA",
|
| 164 |
+
"bbox": [
|
| 165 |
+
83,
|
| 166 |
+
873,
|
| 167 |
+
331,
|
| 168 |
+
883
|
| 169 |
+
],
|
| 170 |
+
"page_idx": 0
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"type": "text",
|
| 174 |
+
"text": "© 2024 Copyright held by the owner/author(s). Publication rights licensed to ACM.",
|
| 175 |
+
"bbox": [
|
| 176 |
+
84,
|
| 177 |
+
883,
|
| 178 |
+
472,
|
| 179 |
+
893
|
| 180 |
+
],
|
| 181 |
+
"page_idx": 0
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
"type": "text",
|
| 185 |
+
"text": "ACM ISBN 978-x-xxxxx-xxxxx-x/YY/MM",
|
| 186 |
+
"bbox": [
|
| 187 |
+
84,
|
| 188 |
+
895,
|
| 189 |
+
267,
|
| 190 |
+
902
|
| 191 |
+
],
|
| 192 |
+
"page_idx": 0
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"type": "text",
|
| 196 |
+
"text": "https://doi.org/10.1145/nnnnnnn.nnnnnnn",
|
| 197 |
+
"bbox": [
|
| 198 |
+
84,
|
| 199 |
+
904,
|
| 200 |
+
284,
|
| 201 |
+
914
|
| 202 |
+
],
|
| 203 |
+
"page_idx": 0
|
| 204 |
+
},
|
| 205 |
+
{
|
| 206 |
+
"type": "image",
|
| 207 |
+
"img_path": "images/7dff4f7145930a9538e22867b1be2cba3e6554bcce2461ef46366249cad2787c.jpg",
|
| 208 |
+
"image_caption": [
|
| 209 |
+
"Figure 1: This paper presents open-source benchmarks and dataset for LLM-assisted RTL generation and verification."
|
| 210 |
+
],
|
| 211 |
+
"image_footnote": [],
|
| 212 |
+
"bbox": [
|
| 213 |
+
517,
|
| 214 |
+
234,
|
| 215 |
+
913,
|
| 216 |
+
382
|
| 217 |
+
],
|
| 218 |
+
"page_idx": 0
|
| 219 |
+
},
|
| 220 |
+
{
|
| 221 |
+
"type": "text",
|
| 222 |
+
"text": "1 INTRODUCTION",
|
| 223 |
+
"text_level": 1,
|
| 224 |
+
"bbox": [
|
| 225 |
+
514,
|
| 226 |
+
434,
|
| 227 |
+
687,
|
| 228 |
+
448
|
| 229 |
+
],
|
| 230 |
+
"page_idx": 0
|
| 231 |
+
},
|
| 232 |
+
{
|
| 233 |
+
"type": "text",
|
| 234 |
+
"text": "In recent years, large language models (LLMs) such as GPT [34] have demonstrated remarkable performance in natural language processing (NLP). Inspired by this progress, researchers have started exploring the adoption of LLMs in agile hardware design [8]. A promising direction that attracts the most attention is automatically generating design RTL based on natural language instructions [4, 7, 15, 25, 27-29, 31, 32, 38, 45, 46, 53, 55, 57]. In modern VLSI design flow, design teams typically exert great effort to implement precise design functionality in design RTL using hardware description languages (HDLs). Now given design functionality descriptions in natural language (e.g., specification), LLM solutions target directly generating corresponding HDL code such as Verilog, VHDL, and Chisel from scratch. This LLM-based design generation technique can potentially revolutionize the existing HDL-based VLSI design process, relieving designers from the tedious HDL coding tasks. Compared with well-explored predictive machine learning (ML)-based solutions in EDA [39, 51], such generative methods may benefit the hardware design process more directly.",
|
| 235 |
+
"bbox": [
|
| 236 |
+
511,
|
| 237 |
+
452,
|
| 238 |
+
913,
|
| 239 |
+
700
|
| 240 |
+
],
|
| 241 |
+
"page_idx": 0
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"type": "text",
|
| 245 |
+
"text": "In addition to the generation of RTL (i.e., HDL code) itself, the verification of RTL correctness is equally important and challenging in modern VLSI design. Functional verification ensures the RTL implementation satisfies its specification. Assertion-based verification (ABV) [50] employs assertions derived from specifications to verify the functional behavior of RTL designs. ABV can be conducted through either simulation or formal property verification (FPV), with assertions often expressed using SystemVerilog Assertions (SVAs). However, a major challenge in ABV is to obtain sufficient, high-quality assertions. Existing research on automating assertion generation includes dynamic assertion mining based on simulation traces [10, 14, 48], static generation using predefined design-specific templates [11, 35], and the direct translation of natural language",
|
| 246 |
+
"bbox": [
|
| 247 |
+
511,
|
| 248 |
+
702,
|
| 249 |
+
913,
|
| 250 |
+
883
|
| 251 |
+
],
|
| 252 |
+
"page_idx": 0
|
| 253 |
+
},
|
| 254 |
+
{
|
| 255 |
+
"type": "list",
|
| 256 |
+
"sub_type": "text",
|
| 257 |
+
"list_items": [
|
| 258 |
+
"*Equal Contribution",
|
| 259 |
+
"† Corresponding Author"
|
| 260 |
+
],
|
| 261 |
+
"bbox": [
|
| 262 |
+
514,
|
| 263 |
+
892,
|
| 264 |
+
630,
|
| 265 |
+
915
|
| 266 |
+
],
|
| 267 |
+
"page_idx": 0
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "aside_text",
|
| 271 |
+
"text": "arXiv:2503.15112v1 [cs.AR] 19 Mar 2025",
|
| 272 |
+
"bbox": [
|
| 273 |
+
22,
|
| 274 |
+
260,
|
| 275 |
+
57,
|
| 276 |
+
705
|
| 277 |
+
],
|
| 278 |
+
"page_idx": 0
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "table",
|
| 282 |
+
"img_path": "images/db2d3123e205f86ce0bf00bdbebdeb9bdd89dce6174cdfecb2026044bacef318.jpg",
|
| 283 |
+
"table_caption": [],
|
| 284 |
+
"table_footnote": [],
|
| 285 |
+
"table_body": "<table><tr><td colspan=\"2\">LLM-Assisted RTL Generation</td></tr><tr><td>Prompt Engineering</td><td>[4, 7, 29, 31, 32, 46]</td></tr><tr><td>Closed Dataset</td><td>VerilogEval [27], BetterV [38], ChipNemo [25] \nChang et al. [6], OriGen [9], CodeV [57]</td></tr><tr><td>Open Benchmark</td><td>RTLLM [29], VerilogEval [27], RTL-repo [2] \nRTLLM 2.0 (Section 2)</td></tr><tr><td>Open Dataset \n(code only)</td><td>Thakur et al. [45], Wang et al. [49]</td></tr><tr><td>Open Dataset \n(instruction-code)</td><td>RTLCoder [28], MG-Verilog [55], Goh et al. [15] \nRTLCoder-Data (Section 4)</td></tr></table>",
|
| 286 |
+
"bbox": [
|
| 287 |
+
86,
|
| 288 |
+
90,
|
| 289 |
+
488,
|
| 290 |
+
250
|
| 291 |
+
],
|
| 292 |
+
"page_idx": 1
|
| 293 |
+
},
|
| 294 |
+
{
|
| 295 |
+
"type": "table",
|
| 296 |
+
"img_path": "images/7a589558cf0d7430a6ef5cb66fdbcdb18243baf15477393b6b0cef8f3bb336e3.jpg",
|
| 297 |
+
"table_caption": [],
|
| 298 |
+
"table_footnote": [],
|
| 299 |
+
"table_body": "<table><tr><td colspan=\"2\">LLM-Assisted RTL Verification & Debugging</td></tr><tr><td>Prompt Engineering</td><td>[3, 12, 18, 20, 26, 30, 36, 42, 47, 52]</td></tr><tr><td>Closed Dataset</td><td>HDLdebugger [54]</td></tr><tr><td>Open Benchmark</td><td>AssertEval (Section 3)</td></tr></table>",
|
| 300 |
+
"bbox": [
|
| 301 |
+
86,
|
| 302 |
+
258,
|
| 303 |
+
488,
|
| 304 |
+
325
|
| 305 |
+
],
|
| 306 |
+
"page_idx": 1
|
| 307 |
+
},
|
| 308 |
+
{
|
| 309 |
+
"type": "text",
|
| 310 |
+
"text": "Table 1: Existing explorations in LLM-aided design RTL generation and verification, with a focus on works that adopt or propose new datasets and benchmarks.",
|
| 311 |
+
"bbox": [
|
| 312 |
+
81,
|
| 313 |
+
330,
|
| 314 |
+
483,
|
| 315 |
+
372
|
| 316 |
+
],
|
| 317 |
+
"page_idx": 1
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"type": "text",
|
| 321 |
+
"text": "specifications into assertions [1, 13, 17, 20-23, 36, 37, 42, 56]. LLM solutions [3, 12, 20, 26, 30, 36, 42] turn out to be also promising in generating assertions for design RTL verification.",
|
| 322 |
+
"bbox": [
|
| 323 |
+
81,
|
| 324 |
+
402,
|
| 325 |
+
480,
|
| 326 |
+
445
|
| 327 |
+
],
|
| 328 |
+
"page_idx": 1
|
| 329 |
+
},
|
| 330 |
+
{
|
| 331 |
+
"type": "text",
|
| 332 |
+
"text": "Many existing works directly prompt commercial LLMs like GPT-3.5/GPT-4 for RTL code generation [4, 7, 29, 31, 32, 46] or verification [3, 12, 18, 47, 52, 53], without proposing new datasets or models. However, reliance on commercial LLM tools limits in-depth research exploration and further model customization. More importantly, users of commercial LLM solutions unavoidably have data privacy concerns, since all instructions have to be uploaded to LLM providers like OpenAI. Such privacy concerns are especially critical in the IC design industry. In addition, commercial LLMs may not ensure reliable service with a low response latency.",
|
| 333 |
+
"bbox": [
|
| 334 |
+
81,
|
| 335 |
+
445,
|
| 336 |
+
482,
|
| 337 |
+
583
|
| 338 |
+
],
|
| 339 |
+
"page_idx": 1
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"type": "text",
|
| 343 |
+
"text": "To develop our own customized or open-source LLM solutions for RTL generation or verification, a primary challenge is the limited availability of circuit data. Unlike the huge amount of text and image resources in the public domain, circuit designs are the most important intellectual property (IP) of semiconductor companies, who typically strongly oppose sharing their designs. Such limited circuit data sharing is a long-standing issue not only for academia but also among different design teams within a single company. This data availability problem leads to a lack of datasets and benchmarks, preventing both the development and fair evaluation of LLM solutions in hardware design.",
|
| 344 |
+
"bbox": [
|
| 345 |
+
81,
|
| 346 |
+
584,
|
| 347 |
+
482,
|
| 348 |
+
734
|
| 349 |
+
],
|
| 350 |
+
"page_idx": 1
|
| 351 |
+
},
|
| 352 |
+
{
|
| 353 |
+
"type": "text",
|
| 354 |
+
"text": "Table 1 summarizes existing efforts in LLM-assisted RTL generation and verification, with a focus on open-source datasets and benchmarks. Open benchmarks [2, 27, 29] are vitally important for a fair evaluation of LLM solutions. In addition to prompting GPT, many works tried to construct their own LLMs with either open-source [15, 28, 46, 49, 55] or closed-source [6, 9, 25, 27, 38, 57] datasets. Among the open-sourced dataset, several of them [46, 49] only provide RTL code, without alignment with the RTL generation tasks based on natural language instructions. In comparison, some open datasets [15, 28, 55] provide a pair of natural language instruction (i.e., LLM input) and code (i.e., expected LLM output) as one data sample. These datasets are better aligned with the RTL generation task and benefit the LLM fine-tuning process.",
|
| 355 |
+
"bbox": [
|
| 356 |
+
81,
|
| 357 |
+
736,
|
| 358 |
+
482,
|
| 359 |
+
915
|
| 360 |
+
],
|
| 361 |
+
"page_idx": 1
|
| 362 |
+
},
|
| 363 |
+
{
|
| 364 |
+
"type": "text",
|
| 365 |
+
"text": "In this paper, as summarized in Table 1 and Figure 1, we highlight our latest advances in open datasets and benchmarks for LLM-assisted design and integrate them into a unified framework. It consists of three major components.",
|
| 366 |
+
"bbox": [
|
| 367 |
+
513,
|
| 368 |
+
85,
|
| 369 |
+
913,
|
| 370 |
+
140
|
| 371 |
+
],
|
| 372 |
+
"page_idx": 1
|
| 373 |
+
},
|
| 374 |
+
{
|
| 375 |
+
"type": "list",
|
| 376 |
+
"sub_type": "text",
|
| 377 |
+
"list_items": [
|
| 378 |
+
"(1) In Section 2, we present an open-source benchmark named RTLLM 2.0 for evaluating the performance of LLM-assisted RTL generation<sup>1</sup>. It provides 50 RTL designs. It is an extension of our proposed benchmark RTLLM [29], which originally provided 30 designs. For each design, we provide the functionality description, test cases, and correct RTL design handcrafted by human engineers.",
|
| 379 |
+
"(2) In Section 3, we present an open-source benchmark named AssertEval for LLM-assisted RTL verification<sup>2</sup>. It provides 18 designs to evaluate the generation of assertions by LLMs. These designs cover a diverse spectrum of applications. For each design, we provide the specification document, golden RTL code, and the script for FPV.",
|
| 380 |
+
"(3) In Section 4, we present an open-source dataset named RTLCoder-Data for training the LLM for RTL generation<sup>3</sup>. This dataset provides 80K (thousand) samples, with each sample being a code generation instruction and corresponding RTL code. This is an extension of the dataset released in our proposed RTLCoder [28], which originally provided 27K samples."
|
| 381 |
+
],
|
| 382 |
+
"bbox": [
|
| 383 |
+
532,
|
| 384 |
+
146,
|
| 385 |
+
911,
|
| 386 |
+
421
|
| 387 |
+
],
|
| 388 |
+
"page_idx": 1
|
| 389 |
+
},
|
| 390 |
+
{
|
| 391 |
+
"type": "text",
|
| 392 |
+
"text": "In addition, a challenge in dataset generation is the difficulty in checking the correctness of data samples. RTLCoder [28] has proposed both instruction checker and code checker, evaluating the diversity introduced by new instructions and the syntax correctness of new code, respectively. However, no data generation method can automatically check whether the code has the correct functionality (i.e., same functionality as described in the instruction). In this paper, we explore an innovative method to verify training data correctness by generating assertions for each sample. In this way, we further generate and release a verified 7K-sample dataset for training LLM for RTL generation, which is also introduced in Section 4. Finally, we trained and compared various LLM solutions to study the factors that affect LLM performance in RTL generation.",
|
| 393 |
+
"bbox": [
|
| 394 |
+
511,
|
| 395 |
+
428,
|
| 396 |
+
913,
|
| 397 |
+
609
|
| 398 |
+
],
|
| 399 |
+
"page_idx": 1
|
| 400 |
+
},
|
| 401 |
+
{
|
| 402 |
+
"type": "text",
|
| 403 |
+
"text": "2 RTLLM 2.0: OPEN BENCHMARK FOR RTL GENERATION",
|
| 404 |
+
"text_level": 1,
|
| 405 |
+
"bbox": [
|
| 406 |
+
513,
|
| 407 |
+
623,
|
| 408 |
+
893,
|
| 409 |
+
652
|
| 410 |
+
],
|
| 411 |
+
"page_idx": 1
|
| 412 |
+
},
|
| 413 |
+
{
|
| 414 |
+
"type": "text",
|
| 415 |
+
"text": "2.1 Overview of RTLLM 2.0",
|
| 416 |
+
"text_level": 1,
|
| 417 |
+
"bbox": [
|
| 418 |
+
513,
|
| 419 |
+
659,
|
| 420 |
+
751,
|
| 421 |
+
672
|
| 422 |
+
],
|
| 423 |
+
"page_idx": 1
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"type": "text",
|
| 427 |
+
"text": "Our previously proposed RTLLM [29] is a comprehensive open-source benchmark for design RTL generation with natural language. It supports the evaluation of any generated HDL format, including Verilog, VHDL, and Chisel, as long as it supports logic synthesis and RTL simulation.",
|
| 428 |
+
"bbox": [
|
| 429 |
+
511,
|
| 430 |
+
678,
|
| 431 |
+
913,
|
| 432 |
+
744
|
| 433 |
+
],
|
| 434 |
+
"page_idx": 1
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"type": "text",
|
| 438 |
+
"text": "RTLLM [29] consists of 30 designs with a wide coverage of design complexities and scales. In RTLLM-2.0, we have expanded this collection to include 50 designs, of which the ones highlighted in bold in Table 2 are newly added. This enlargement allows for a more thorough evaluation of different design types and sizes, offering a more comprehensive understanding of how RTL code generation performs across a wider variety of benchmarks. By increasing the number of designs, we can now explore a broader range of scenarios,",
|
| 439 |
+
"bbox": [
|
| 440 |
+
511,
|
| 441 |
+
747,
|
| 442 |
+
913,
|
| 443 |
+
857
|
| 444 |
+
],
|
| 445 |
+
"page_idx": 1
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"type": "page_footnote",
|
| 449 |
+
"text": "1RTLLM 2.0 is in https://github.com/hkust-zhiyao/RTLLM.",
|
| 450 |
+
"bbox": [
|
| 451 |
+
513,
|
| 452 |
+
881,
|
| 453 |
+
792,
|
| 454 |
+
893
|
| 455 |
+
],
|
| 456 |
+
"page_idx": 1
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "page_footnote",
|
| 460 |
+
"text": "2AssertEval is in https://github.com/hkust-zhiyao/AssertLLM.",
|
| 461 |
+
"bbox": [
|
| 462 |
+
514,
|
| 463 |
+
893,
|
| 464 |
+
805,
|
| 465 |
+
902
|
| 466 |
+
],
|
| 467 |
+
"page_idx": 1
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "page_footnote",
|
| 471 |
+
"text": "3RTLCoder-Data (both 80K and 7K) is in https://github.com/hkust-zhiyao/RTL-Coder.",
|
| 472 |
+
"bbox": [
|
| 473 |
+
514,
|
| 474 |
+
904,
|
| 475 |
+
911,
|
| 476 |
+
914
|
| 477 |
+
],
|
| 478 |
+
"page_idx": 1
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"type": "page_number",
|
| 482 |
+
"text": "2",
|
| 483 |
+
"bbox": [
|
| 484 |
+
493,
|
| 485 |
+
920,
|
| 486 |
+
501,
|
| 487 |
+
928
|
| 488 |
+
],
|
| 489 |
+
"page_idx": 1
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "table",
|
| 493 |
+
"img_path": "images/c190c04452b5868c3b4e25084c9f484aab5e7909bde07ad727b4d648526989b4.jpg",
|
| 494 |
+
"table_caption": [],
|
| 495 |
+
"table_footnote": [],
|
| 496 |
+
"table_body": "<table><tr><td colspan=\"2\">Arithmetic Modules</td><td colspan=\"2\">Memory Modules</td></tr><tr><td>Design</td><td>Description</td><td>Design</td><td>Description</td></tr><tr><td>adder_8bit</td><td>An 8-bit adder</td><td>asyn_fifo</td><td>An asynchronous FIFO 16×8 bits</td></tr><tr><td>adder_16bit</td><td>A 16-bit adder implemented with full adders</td><td>LIFObuffer</td><td>A Last-In-First-Out buffer for temporary data storage</td></tr><tr><td>adder_32bit</td><td>A 32-bit carry-lookahead adder</td><td>right_shifter</td><td>Right shifter with 8-bit delay</td></tr><tr><td>adder_pipe_64bit</td><td>A 64-bit ripple carry adder based on 4-stage pipeline</td><td>LFSR</td><td>A Linear Feedback Shift Register for generating pseudo-random sequences</td></tr><tr><td>adder_bcd</td><td>A BCD adder for decimal arithmetic operations</td><td>barrel_shifter</td><td>A barrel shifter for rotating bits efficiently</td></tr><tr><td>sub_64bit</td><td>A 64-bit subtractor for high-precision arithmetic</td><td>RAM</td><td>8x4 bits true dual-port RAM</td></tr><tr><td>multi_8bit</td><td>An 8-bit multiplier based on shifting and adding operation</td><td>ROM</td><td>A Read-Only Memory module for storing fixed data</td></tr><tr><td>multi_16bit</td><td>A 16-bit multiplier based on shifting and adding operation</td><td colspan=\"2\">Miscellaneous Modules</td></tr><tr><td>multi booth_8bit</td><td>An 8-bit booth-4 multiplier</td><td>Design</td><td>Description</td></tr><tr><td>multi_pipie_4bit</td><td>A 4-bit unsigned number pipeline multiplier</td><td>clkgenerator</td><td>A clock generator for providing timing signals</td></tr><tr><td>multi_pipie_8bit</td><td>An 8-bit unsigned number pipeline multiplier</td><td>instr_reg</td><td>An instruction register module for holding and processing CPU instructions</td></tr><tr><td>div_16bit</td><td>A 16-bit divider based on subtraction operation</td><td>signal_generation</td><td>Generate various signal patterns</td></tr><tr><td>radix2_div</td><td>An 8-bit radix-2 divider</td><td>squarewave</td><td>A generator for producing square wave signals</td></tr><tr><td>comparator_3bit</td><td>A 3-bit comparator for comparing binary numbers</td><td>alu</td><td>An ALU for 32bit MIPS-ISA CPU</td></tr><tr><td>comparator_4bit</td><td>A 4-bit comparator for comparing binary numbers</td><td>pe</td><td>A Multiplying Accumulator for 32bit integer</td></tr><tr><td>accu</td><td>Accumulates 8-bit data and output after 4 inputs</td><td>freq_div</td><td>Frequency divider for 100M input clock, outputs 50MHz, 10MHz, 1MHz</td></tr><tr><td>fixed_point_adder</td><td>A fixed-point adder for arithmetic operations with fixed precision</td><td>freq_divbyeven</td><td>Frequency divider that divides input frequency by even numbers</td></tr><tr><td>fixed_point_subtractor</td><td>A fixed-point subtractor for precise fixed-point arithmetic</td><td>freq_divbyodd</td><td>Frequency divider that divides input frequency by odd numbers</td></tr><tr><td>float-multi</td><td>A floating-point multiplier for high-precision calculations</td><td>freq_divbyfrac</td><td>Frequency divider that divides input frequency by fractional values</td></tr><tr><td colspan=\"2\">Control Modules</td><td>calendar</td><td>Perpetual calendar with seconds, minutes, and hours</td></tr><tr><td>Design</td><td>Description</td><td>traffic_light</td><td>Traffic light system with three colors and pedestrian button</td></tr><tr><td>fsm</td><td>FSM detection circuit for specific input</td><td>width_8to16</td><td>First 8-bit data placed in higher 8-bits of the 16-bit output</td></tr><tr><td>sequence_detector</td><td>Detect specific sequences in binary input</td><td>synchronizer</td><td>Multi-bit mux synchronizer</td></tr><tr><td>counter_12</td><td>Counter module counts from 0 to 12</td><td>edgedetect</td><td>Detect rising and falling edges of changing 1-bit signal</td></tr><tr><td>JC counter</td><td>A 4-bit Johnson counter with specific cyclic state sequence</td><td>pulsedetect</td><td>Extract pulse signal from the fast clock and create a new one in the slow clock</td></tr><tr><td>ring(counter</td><td>An 8-bit ring counter for cyclic state sequences</td><td>parallel2serial</td><td>Convert 4 input bits to 1 output bit</td></tr><tr><td>up_down Counter</td><td>A 16-bit counter that can increment or decrement based on control signals</td><td>serial2parallel</td><td>1-bit serial input and output data after receiving 6 inputs</td></tr></table>",
|
| 497 |
+
"bbox": [
|
| 498 |
+
86,
|
| 499 |
+
77,
|
| 500 |
+
911,
|
| 501 |
+
603
|
| 502 |
+
],
|
| 503 |
+
"page_idx": 2
|
| 504 |
+
},
|
| 505 |
+
{
|
| 506 |
+
"type": "text",
|
| 507 |
+
"text": "Table 2: RTLLM-2.0 benchmark description. The benchmark includes 50 designs across various applications, with bold designs representing newly added designs relative to RTLLM.",
|
| 508 |
+
"bbox": [
|
| 509 |
+
81,
|
| 510 |
+
609,
|
| 511 |
+
911,
|
| 512 |
+
638
|
| 513 |
+
],
|
| 514 |
+
"page_idx": 2
|
| 515 |
+
},
|
| 516 |
+
{
|
| 517 |
+
"type": "text",
|
| 518 |
+
"text": "better understand design intricacies, and assess performance more effectively in the context of RTL code generation.",
|
| 519 |
+
"bbox": [
|
| 520 |
+
81,
|
| 521 |
+
662,
|
| 522 |
+
482,
|
| 523 |
+
690
|
| 524 |
+
],
|
| 525 |
+
"page_idx": 2
|
| 526 |
+
},
|
| 527 |
+
{
|
| 528 |
+
"type": "text",
|
| 529 |
+
"text": "Unlike RTLLM, which broadly classifies designs into Arithmetic and Logic types, RTLLM-2.0 takes a closer look by categorizing designs based on their specific functions and applications. By breaking down designs in RTLLM-2.0 according to their unique purposes, these detailed categories allow for better comparisons of how different models perform across various design types.",
|
| 530 |
+
"bbox": [
|
| 531 |
+
81,
|
| 532 |
+
690,
|
| 533 |
+
482,
|
| 534 |
+
773
|
| 535 |
+
],
|
| 536 |
+
"page_idx": 2
|
| 537 |
+
},
|
| 538 |
+
{
|
| 539 |
+
"type": "text",
|
| 540 |
+
"text": "2.2 Detailed Inspection of the Benchmark",
|
| 541 |
+
"text_level": 1,
|
| 542 |
+
"bbox": [
|
| 543 |
+
81,
|
| 544 |
+
800,
|
| 545 |
+
437,
|
| 546 |
+
816
|
| 547 |
+
],
|
| 548 |
+
"page_idx": 2
|
| 549 |
+
},
|
| 550 |
+
{
|
| 551 |
+
"type": "text",
|
| 552 |
+
"text": "The benchmark RTLLM-2.0 dataset is meticulously categorized into four primary module classes: Arithmetic Modules, Memory Modules, Control Modules, and Miscellaneous Modules. Each class encompasses a variety of functional units pertinent to diverse computational and control tasks, as delineated in Table 2. This structured classification facilitates a comprehensive analysis and application of the dataset across multiple domains in digital system design.",
|
| 553 |
+
"bbox": [
|
| 554 |
+
81,
|
| 555 |
+
818,
|
| 556 |
+
482,
|
| 557 |
+
917
|
| 558 |
+
],
|
| 559 |
+
"page_idx": 2
|
| 560 |
+
},
|
| 561 |
+
{
|
| 562 |
+
"type": "text",
|
| 563 |
+
"text": "Arithmetic Modules comprise various adders, subtractors, multipliers, dividers, comparators, accumulators, and other specialized units like fixed-point arithmetic components. For instance, the adder subcategory includes 8-bit, 16-bit, 32-bit, and 64-bit pipelined adders, along with a BCD adder, addressing both general and specific arithmetic operations. Similarly, subtractors, multipliers, and comparators are available in different bit widths and configurations, such as a 64-bit subtractor, an 8-bit multiplier, and both 3-bit and 4-bit comparators, respectively.",
|
| 564 |
+
"bbox": [
|
| 565 |
+
511,
|
| 566 |
+
662,
|
| 567 |
+
913,
|
| 568 |
+
787
|
| 569 |
+
],
|
| 570 |
+
"page_idx": 2
|
| 571 |
+
},
|
| 572 |
+
{
|
| 573 |
+
"type": "text",
|
| 574 |
+
"text": "Memory Modules are designed to handle data storage and retrieval with FIFO (First-In, First-Out) and LIFO (Last-In, First-Out) buffers, alongside various shifters including right shifters, LFSRs (Linear Feedback Shift Registers), barrel shifters, as well as RAM and ROM. The inclusion of asynchronous FIFO and LIFO buffers highlights the benchmark's capability to manage different memory access patterns efficiently.",
|
| 575 |
+
"bbox": [
|
| 576 |
+
511,
|
| 577 |
+
787,
|
| 578 |
+
913,
|
| 579 |
+
883
|
| 580 |
+
],
|
| 581 |
+
"page_idx": 2
|
| 582 |
+
},
|
| 583 |
+
{
|
| 584 |
+
"type": "page_number",
|
| 585 |
+
"text": "3",
|
| 586 |
+
"bbox": [
|
| 587 |
+
493,
|
| 588 |
+
920,
|
| 589 |
+
503,
|
| 590 |
+
928
|
| 591 |
+
],
|
| 592 |
+
"page_idx": 2
|
| 593 |
+
},
|
| 594 |
+
{
|
| 595 |
+
"type": "table",
|
| 596 |
+
"img_path": "images/960dee98aa7e09ae00c237f0021dc7dc42725087982b610de5e02e2d108bc91a.jpg",
|
| 597 |
+
"table_caption": [],
|
| 598 |
+
"table_footnote": [],
|
| 599 |
+
"table_body": "<table><tr><td>Design Type</td><td colspan=\"3\">Cryptographic Unit</td><td colspan=\"3\">Processor Core</td><td colspan=\"3\">Arithmetic Unit</td><td colspan=\"3\">Communication Protocol</td><td colspan=\"3\">Memory Controller</td></tr><tr><td rowspan=\"4\">Design Name/# Page/# Signal</td><td>AES</td><td>15</td><td>11</td><td>amber</td><td>26</td><td>14</td><td>ecg</td><td>9</td><td>12</td><td>ethernet</td><td>42</td><td>54</td><td>hpdmc</td><td>8</td><td>4</td></tr><tr><td>sha3</td><td>17</td><td>9</td><td>lxp32</td><td>59</td><td>22</td><td>mac</td><td>24</td><td>34</td><td>i2c</td><td>15</td><td>24</td><td>sdc</td><td>26</td><td>53</td></tr><tr><td>tiny AES</td><td>17</td><td>4</td><td>minsoc</td><td>22</td><td>14</td><td>pairing</td><td>13</td><td>8</td><td>socket</td><td>29</td><td>15</td><td>sdr_ctrl</td><td>28</td><td>46</td></tr><tr><td></td><td></td><td></td><td></td><td></td><td></td><td>tiny pairing</td><td>17</td><td>10</td><td>uart</td><td>10</td><td>11</td><td></td><td></td><td></td></tr></table>",
|
| 600 |
+
"bbox": [
|
| 601 |
+
86,
|
| 602 |
+
75,
|
| 603 |
+
911,
|
| 604 |
+
154
|
| 605 |
+
],
|
| 606 |
+
"page_idx": 3
|
| 607 |
+
},
|
| 608 |
+
{
|
| 609 |
+
"type": "text",
|
| 610 |
+
"text": "Table 3: AssertEval benchmark description. The benchmark includes 18 open-source designs across various applications. For each design's specification document, we list the number of file pages and the number of architectural signals under verification.",
|
| 611 |
+
"bbox": [
|
| 612 |
+
81,
|
| 613 |
+
156,
|
| 614 |
+
913,
|
| 615 |
+
186
|
| 616 |
+
],
|
| 617 |
+
"page_idx": 3
|
| 618 |
+
},
|
| 619 |
+
{
|
| 620 |
+
"type": "text",
|
| 621 |
+
"text": "Control Modules focus on state management and counting mechanisms, featuring finite state machines (FSMs), sequence detectors, and various counters such as a 12-bit counter, Johnson counter (JCcounter), ring counter, and an up/down counter. These modules are crucial for controlling the flow of operations and ensuring sequential logic execution within digital systems.",
|
| 622 |
+
"bbox": [
|
| 623 |
+
81,
|
| 624 |
+
203,
|
| 625 |
+
480,
|
| 626 |
+
286
|
| 627 |
+
],
|
| 628 |
+
"page_idx": 3
|
| 629 |
+
},
|
| 630 |
+
{
|
| 631 |
+
"type": "text",
|
| 632 |
+
"text": "Miscellaneous Modules cover a broad spectrum of functionalities including signal generation, RISC-V components, frequency dividers, and other essential units. Signal generation features modules like a signal generator and a square wave generator. The RISC-V category includes clock generators, instruction registers, ALU, and processing elements, essential for constructing RISC-V-based architectures. Frequency dividers are detailed with modules that divide by even, odd, and fractional values. Additionally, there are modules for specific applications such as calendars, traffic lights, data width converters, synchronizers, and various signal detection and conversion units.",
|
| 633 |
+
"bbox": [
|
| 634 |
+
81,
|
| 635 |
+
286,
|
| 636 |
+
480,
|
| 637 |
+
436
|
| 638 |
+
],
|
| 639 |
+
"page_idx": 3
|
| 640 |
+
},
|
| 641 |
+
{
|
| 642 |
+
"type": "text",
|
| 643 |
+
"text": "This classification framework facilitates a nuanced understanding of the benchmark, highlighting its versatility and applicability across different domains of digital system design and analysis.",
|
| 644 |
+
"bbox": [
|
| 645 |
+
81,
|
| 646 |
+
438,
|
| 647 |
+
482,
|
| 648 |
+
481
|
| 649 |
+
],
|
| 650 |
+
"page_idx": 3
|
| 651 |
+
},
|
| 652 |
+
{
|
| 653 |
+
"type": "text",
|
| 654 |
+
"text": "2.3 Benchmark Evaluation",
|
| 655 |
+
"text_level": 1,
|
| 656 |
+
"bbox": [
|
| 657 |
+
83,
|
| 658 |
+
497,
|
| 659 |
+
316,
|
| 660 |
+
511
|
| 661 |
+
],
|
| 662 |
+
"page_idx": 3
|
| 663 |
+
},
|
| 664 |
+
{
|
| 665 |
+
"type": "text",
|
| 666 |
+
"text": "2.3.1 Overview of Test Files. For each design, RTLLM-2.0 provides the following information in three separate files.",
|
| 667 |
+
"bbox": [
|
| 668 |
+
81,
|
| 669 |
+
515,
|
| 670 |
+
480,
|
| 671 |
+
542
|
| 672 |
+
],
|
| 673 |
+
"page_idx": 3
|
| 674 |
+
},
|
| 675 |
+
{
|
| 676 |
+
"type": "text",
|
| 677 |
+
"text": "Description (design_description.txt): A natural language description of the target design's functionality, serving as a prompt for LLMs to generate RTL code. It includes the module name and all input/output (I/O) signals with names and widths, enabling automatic functionality verification with the provided testbench.",
|
| 678 |
+
"bbox": [
|
| 679 |
+
81,
|
| 680 |
+
542,
|
| 681 |
+
482,
|
| 682 |
+
611
|
| 683 |
+
],
|
| 684 |
+
"page_idx": 3
|
| 685 |
+
},
|
| 686 |
+
{
|
| 687 |
+
"type": "text",
|
| 688 |
+
"text": "Testbench (testbench.v): A testbench containing multiple test cases with input and expected output values. It corresponds to the module name and I/O signals in design_description.txt and is used to verify design functionality.",
|
| 689 |
+
"bbox": [
|
| 690 |
+
81,
|
| 691 |
+
613,
|
| 692 |
+
480,
|
| 693 |
+
667
|
| 694 |
+
],
|
| 695 |
+
"page_idx": 3
|
| 696 |
+
},
|
| 697 |
+
{
|
| 698 |
+
"type": "text",
|
| 699 |
+
"text": "Correct Design (designer_RTL.v): A reference design Verilog hand-crafted by human designers. By comparing with this reference design, we can quantitatively evaluate the design qualities of the automatically generated design.",
|
| 700 |
+
"bbox": [
|
| 701 |
+
81,
|
| 702 |
+
667,
|
| 703 |
+
480,
|
| 704 |
+
723
|
| 705 |
+
],
|
| 706 |
+
"page_idx": 3
|
| 707 |
+
},
|
| 708 |
+
{
|
| 709 |
+
"type": "text",
|
| 710 |
+
"text": "2.3.2 Evaluation Metrics. To systematically evaluate the generated design RTL, we summarize three progressive goals, which can all be evaluated with our benchmark. The first and basic goal is the syntax goal. It means the syntax of the generated RTL design should at least be correct. It can be verified by checking whether the design can be correctly synthesized into netlist by synthesis tools [43]. The second is functionality goal. It requires the generated RTL design to function as expected, verified by passing all test cases provided in testbench.v. While the testbench samples a reasonable number of cases, passing them doesn't guarantee $100\\%$ functionality correctness. If the design is correct in both syntax and functionality, it is considered successful. However, for practical use, its design qualities, including performance, power, and area (PPA), should",
|
| 711 |
+
"bbox": [
|
| 712 |
+
81,
|
| 713 |
+
736,
|
| 714 |
+
482,
|
| 715 |
+
916
|
| 716 |
+
],
|
| 717 |
+
"page_idx": 3
|
| 718 |
+
},
|
| 719 |
+
{
|
| 720 |
+
"type": "text",
|
| 721 |
+
"text": "also be desirable. This is the quality goal, verified by measuring PPA values after synthesis and layout.",
|
| 722 |
+
"bbox": [
|
| 723 |
+
513,
|
| 724 |
+
203,
|
| 725 |
+
911,
|
| 726 |
+
231
|
| 727 |
+
],
|
| 728 |
+
"page_idx": 3
|
| 729 |
+
},
|
| 730 |
+
{
|
| 731 |
+
"type": "text",
|
| 732 |
+
"text": "3 ASSERVAL: OPEN FRAMEWORK AND BENCHMARK FOR RTL VERIFICATION",
|
| 733 |
+
"text_level": 1,
|
| 734 |
+
"bbox": [
|
| 735 |
+
514,
|
| 736 |
+
244,
|
| 737 |
+
874,
|
| 738 |
+
276
|
| 739 |
+
],
|
| 740 |
+
"page_idx": 3
|
| 741 |
+
},
|
| 742 |
+
{
|
| 743 |
+
"type": "text",
|
| 744 |
+
"text": "3.1 Assertions Generation and Evaluation Framework",
|
| 745 |
+
"text_level": 1,
|
| 746 |
+
"bbox": [
|
| 747 |
+
514,
|
| 748 |
+
282,
|
| 749 |
+
867,
|
| 750 |
+
311
|
| 751 |
+
],
|
| 752 |
+
"page_idx": 3
|
| 753 |
+
},
|
| 754 |
+
{
|
| 755 |
+
"type": "text",
|
| 756 |
+
"text": "Inspired by the potential of LLMs, translating natural language specifications into assertions has gained significant attention. Some works[20, 30] leverage LLMs to convert human-extracted or human-written specification sentences into corresponding assertions. Other approaches, like AssertLLM [12], process entire specification documents directly, using LLMs to automatically extract assertion-related information from highly unstructured, multi-modal data, including descriptive text and waveform diagrams.",
|
| 757 |
+
"bbox": [
|
| 758 |
+
513,
|
| 759 |
+
316,
|
| 760 |
+
913,
|
| 761 |
+
426
|
| 762 |
+
],
|
| 763 |
+
"page_idx": 3
|
| 764 |
+
},
|
| 765 |
+
{
|
| 766 |
+
"type": "text",
|
| 767 |
+
"text": "Despite the growing interest in LLM-based assertion generation, a universal evaluation method and benchmark are still unavailable. To address this challenge, we propose AssertEval, a benchmark and framework designed to evaluate the quality of LLM-based assertion generation across various VLSI designs.",
|
| 768 |
+
"bbox": [
|
| 769 |
+
513,
|
| 770 |
+
428,
|
| 771 |
+
911,
|
| 772 |
+
496
|
| 773 |
+
],
|
| 774 |
+
"page_idx": 3
|
| 775 |
+
},
|
| 776 |
+
{
|
| 777 |
+
"type": "text",
|
| 778 |
+
"text": "The generation and evaluation flow is demonstrated in Figure 2. For the generation process, we provide entire specification documents as input, users then generate assertions for each architectural signal with their own assertion generation methods. These assertions are then evaluated against our provided golden RTL implementations using formal property verification techniques.",
|
| 779 |
+
"bbox": [
|
| 780 |
+
513,
|
| 781 |
+
497,
|
| 782 |
+
913,
|
| 783 |
+
579
|
| 784 |
+
],
|
| 785 |
+
"page_idx": 3
|
| 786 |
+
},
|
| 787 |
+
{
|
| 788 |
+
"type": "text",
|
| 789 |
+
"text": "After the assertion generation process, the framework automatically evaluates the quality of the generated assertions. Bug-free golden RTL implementations are provided for this evaluation. Based on these golden RTL designs, the generated assertions are verified through formal property verification (FPV) techniques. After performing FPV, the following metrics are computed to evaluate the quality of generated assertions:",
|
| 790 |
+
"bbox": [
|
| 791 |
+
513,
|
| 792 |
+
580,
|
| 793 |
+
913,
|
| 794 |
+
676
|
| 795 |
+
],
|
| 796 |
+
"page_idx": 3
|
| 797 |
+
},
|
| 798 |
+
{
|
| 799 |
+
"type": "list",
|
| 800 |
+
"sub_type": "text",
|
| 801 |
+
"list_items": [
|
| 802 |
+
"- Syntax: checks if generated assertions have syntax errors.",
|
| 803 |
+
"- FPV pass/fail: when RTL designs are bug-free, an assertion that passes the FPV check is considered semantically correct, and conversely, a failure indicates an incorrect assertion.",
|
| 804 |
+
"- COI coverage: cone of influence (COI) coverage measures the percentage of design logic that is structurally connected to the properties. It is a common metric to evaluate the quality and usefulness of the generated properties."
|
| 805 |
+
],
|
| 806 |
+
"bbox": [
|
| 807 |
+
540,
|
| 808 |
+
681,
|
| 809 |
+
911,
|
| 810 |
+
792
|
| 811 |
+
],
|
| 812 |
+
"page_idx": 3
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"type": "text",
|
| 816 |
+
"text": "3.2 AssertEval Benchmark Description",
|
| 817 |
+
"text_level": 1,
|
| 818 |
+
"bbox": [
|
| 819 |
+
514,
|
| 820 |
+
814,
|
| 821 |
+
843,
|
| 822 |
+
830
|
| 823 |
+
],
|
| 824 |
+
"page_idx": 3
|
| 825 |
+
},
|
| 826 |
+
{
|
| 827 |
+
"type": "text",
|
| 828 |
+
"text": "The benchmark AssertEval consists of 18 open-source designs that cover a diverse array of applications, including cryptographic units, processor cores, arithmetic units, communication protocols, and memory controllers. Considering the capability of existing LLM-based generation methods, we have collected specification documents that are fewer than 60 pages and contain fewer than 60",
|
| 829 |
+
"bbox": [
|
| 830 |
+
513,
|
| 831 |
+
832,
|
| 832 |
+
913,
|
| 833 |
+
916
|
| 834 |
+
],
|
| 835 |
+
"page_idx": 3
|
| 836 |
+
},
|
| 837 |
+
{
|
| 838 |
+
"type": "page_number",
|
| 839 |
+
"text": "4",
|
| 840 |
+
"bbox": [
|
| 841 |
+
493,
|
| 842 |
+
920,
|
| 843 |
+
503,
|
| 844 |
+
928
|
| 845 |
+
],
|
| 846 |
+
"page_idx": 3
|
| 847 |
+
},
|
| 848 |
+
{
|
| 849 |
+
"type": "image",
|
| 850 |
+
"img_path": "images/4fc8e83d10a582628b8a74106c95090120f3f9ea9a9eaa03a3bec2265f8ac62e.jpg",
|
| 851 |
+
"image_caption": [
|
| 852 |
+
"Figure 2: Evaluation of generated assertions using our benchmark. We provide natural language specification documents as input for the assertion generation process. The generated assertions are then evaluated against the provided golden RTL designs using the FPV technique. Three key metrics are employed to assess the quality of the generated assertions."
|
| 853 |
+
],
|
| 854 |
+
"image_footnote": [],
|
| 855 |
+
"bbox": [
|
| 856 |
+
99,
|
| 857 |
+
80,
|
| 858 |
+
468,
|
| 859 |
+
275
|
| 860 |
+
],
|
| 861 |
+
"page_idx": 4
|
| 862 |
+
},
|
| 863 |
+
{
|
| 864 |
+
"type": "text",
|
| 865 |
+
"text": "architecture-level signals. We list the detailed statistics for each design in Table 3. Additionally, we provide an FPV script for Cadence JasperGold [5], which can be executed with a single button click for ease of use. For each design within the benchmark, our benchmark provides the following three files:",
|
| 866 |
+
"bbox": [
|
| 867 |
+
81,
|
| 868 |
+
392,
|
| 869 |
+
482,
|
| 870 |
+
463
|
| 871 |
+
],
|
| 872 |
+
"page_idx": 4
|
| 873 |
+
},
|
| 874 |
+
{
|
| 875 |
+
"type": "list",
|
| 876 |
+
"sub_type": "text",
|
| 877 |
+
"list_items": [
|
| 878 |
+
"- Specification document: This file contains the natural language specification for the design, providing a detailed description of the design architecture and functionality.",
|
| 879 |
+
"- Golden RTL implementation: This file comprises the RTL design implementations that are strictly implemented according to the specification. The designs are verified to ensure it is free from bugs, serving as a reliable standard for evaluating the correctness of generated assertions.",
|
| 880 |
+
"- FPV script: This script automatically executes FPV, allowing users to execute the verification with a single click."
|
| 881 |
+
],
|
| 882 |
+
"bbox": [
|
| 883 |
+
109,
|
| 884 |
+
465,
|
| 885 |
+
482,
|
| 886 |
+
604
|
| 887 |
+
],
|
| 888 |
+
"page_idx": 4
|
| 889 |
+
},
|
| 890 |
+
{
|
| 891 |
+
"type": "text",
|
| 892 |
+
"text": "The specification document is highly unstructured, with assertion-related information dispersed across various sections. Additionally, it includes multi-modal data (e.g., descriptive text and waveform diagrams), making the extraction of relevant details challenging.",
|
| 893 |
+
"bbox": [
|
| 894 |
+
81,
|
| 895 |
+
607,
|
| 896 |
+
486,
|
| 897 |
+
662
|
| 898 |
+
],
|
| 899 |
+
"page_idx": 4
|
| 900 |
+
},
|
| 901 |
+
{
|
| 902 |
+
"type": "text",
|
| 903 |
+
"text": "Our provided specification typically includes seven key sections: 1) Summary: outlines the design's concepts and features; 2) IO ports: provides detailed information for the interface; 3) Registers: describes all the architecture-level registers in the design; 4) Operation: explains the operational procedures for dataflow and control; 5) Architecture: the high-level workflow and dataflow of the design; and 6) Usage examples: offers basic usage scenarios for the design. For signals, the specification may only define critical architecture-level IO ports and registers, leaving the designers to detail internal signals for RTL implementations. 7) Waveform diagram: describe behaviors for different signals.",
|
| 904 |
+
"bbox": [
|
| 905 |
+
81,
|
| 906 |
+
664,
|
| 907 |
+
482,
|
| 908 |
+
815
|
| 909 |
+
],
|
| 910 |
+
"page_idx": 4
|
| 911 |
+
},
|
| 912 |
+
{
|
| 913 |
+
"type": "text",
|
| 914 |
+
"text": "4 OPEN DATASET FOR RTL GENERATION",
|
| 915 |
+
"text_level": 1,
|
| 916 |
+
"bbox": [
|
| 917 |
+
81,
|
| 918 |
+
828,
|
| 919 |
+
449,
|
| 920 |
+
842
|
| 921 |
+
],
|
| 922 |
+
"page_idx": 4
|
| 923 |
+
},
|
| 924 |
+
{
|
| 925 |
+
"type": "text",
|
| 926 |
+
"text": "In this Section, we present an open-source dataset named RTLCoderData for training the LLM for RTL generation. It provides a large 'raw dataset' with 80K samples, tripling the one previously released in RTLCoder [28]. Moreover, we propose an innovative verification-based method to check the functionality correctness of",
|
| 927 |
+
"bbox": [
|
| 928 |
+
81,
|
| 929 |
+
845,
|
| 930 |
+
482,
|
| 931 |
+
916
|
| 932 |
+
],
|
| 933 |
+
"page_idx": 4
|
| 934 |
+
},
|
| 935 |
+
{
|
| 936 |
+
"type": "text",
|
| 937 |
+
"text": "each instruction-code data sample. Applying both the functionality checker and syntax checker, we further generate and release a high-quality verified dataset with 7K 'mostly-correct' samples.",
|
| 938 |
+
"bbox": [
|
| 939 |
+
513,
|
| 940 |
+
85,
|
| 941 |
+
915,
|
| 942 |
+
128
|
| 943 |
+
],
|
| 944 |
+
"page_idx": 4
|
| 945 |
+
},
|
| 946 |
+
{
|
| 947 |
+
"type": "text",
|
| 948 |
+
"text": "4.1 Basic Dataset Generation Flow",
|
| 949 |
+
"text_level": 1,
|
| 950 |
+
"bbox": [
|
| 951 |
+
514,
|
| 952 |
+
150,
|
| 953 |
+
807,
|
| 954 |
+
164
|
| 955 |
+
],
|
| 956 |
+
"page_idx": 4
|
| 957 |
+
},
|
| 958 |
+
{
|
| 959 |
+
"type": "text",
|
| 960 |
+
"text": "Our prior work RTLCoder [28] has proposed an automated training dataset generation flow and generated 27K training samples, with each sample being a pair of design instruction (i.e., model input) and the reference RTL code (i.e., expected model output). The instruction can be viewed as the input question for LLMs, describing the desired circuit functionality in natural language. The reference code is the expected Verilog code that implements the functionality. This flow takes advantage of the powerful general text generation ability of the commercial tool GPT with several prompt templates. As Figure 3 shows, the flow includes three stages, which are summarized below.",
|
| 961 |
+
"bbox": [
|
| 962 |
+
511,
|
| 963 |
+
167,
|
| 964 |
+
913,
|
| 965 |
+
306
|
| 966 |
+
],
|
| 967 |
+
"page_idx": 4
|
| 968 |
+
},
|
| 969 |
+
{
|
| 970 |
+
"type": "text",
|
| 971 |
+
"text": "Stage 1: Keywords Preparation. The first stage of the data generation flow targets preparing RTL domain keywords for subsequent stages. At process ① in Figure 3 shows, GPT is requested to generate keywords related to digital IC design (i.e., commonly used logic components) based on a set of prompts. We obtain a keyword pool $\\mathcal{L}_{key}$ with hundreds of digital design keywords.",
|
| 972 |
+
"bbox": [
|
| 973 |
+
513,
|
| 974 |
+
306,
|
| 975 |
+
913,
|
| 976 |
+
390
|
| 977 |
+
],
|
| 978 |
+
"page_idx": 4
|
| 979 |
+
},
|
| 980 |
+
{
|
| 981 |
+
"type": "text",
|
| 982 |
+
"text": "Stage 2: Instruction Generation. The second stage targets generating sufficient instructions based on the initial keywords and Verilog source code. At process $②$ , existing keywords are extended from $\\mathcal{L}_{key}$ to complete instructions. In addition to keyword-based instruction generation, we also generate instructions based on existing source code collected by us, as shown in process $③$ . By providing GPT with either part or a complete Verilog code $\\mathcal{L}_{code}$ collected by [45], we inspire it to create a related Verilog design problem.",
|
| 983 |
+
"bbox": [
|
| 984 |
+
513,
|
| 985 |
+
390,
|
| 986 |
+
913,
|
| 987 |
+
501
|
| 988 |
+
],
|
| 989 |
+
"page_idx": 4
|
| 990 |
+
},
|
| 991 |
+
{
|
| 992 |
+
"type": "text",
|
| 993 |
+
"text": "Process ② and ③ help generate the initial design instruction pool $\\mathcal{L}_{ins}$ . After that, we iteratively augment this pool with mutation. Process ④ applies two types of mutation operations on instructions sampled from the design instruction library $\\mathcal{L}_{ins}$ . The process ⑤ would check every new design instruction using a set of rules and only passed valid instructions are added to $\\mathcal{L}_{ins}$ .",
|
| 994 |
+
"bbox": [
|
| 995 |
+
514,
|
| 996 |
+
501,
|
| 997 |
+
913,
|
| 998 |
+
584
|
| 999 |
+
],
|
| 1000 |
+
"page_idx": 4
|
| 1001 |
+
},
|
| 1002 |
+
{
|
| 1003 |
+
"type": "text",
|
| 1004 |
+
"text": "Stage 3: Reference Code Generation. The third stage targets generating the reference code corresponding to each instruction. As shown in $\\mathbf{6}$ , we feed each instruction from $\\mathcal{L}_{ins}$ into GPT, generating corresponding reference design code. Then depending on whether process $\\mathbf{7}$ is applied, we generate two types of datasets in this flow, as we will introduce in Section 4.2 and 4.3, separately.",
|
| 1005 |
+
"bbox": [
|
| 1006 |
+
513,
|
| 1007 |
+
584,
|
| 1008 |
+
913,
|
| 1009 |
+
667
|
| 1010 |
+
],
|
| 1011 |
+
"page_idx": 4
|
| 1012 |
+
},
|
| 1013 |
+
{
|
| 1014 |
+
"type": "text",
|
| 1015 |
+
"text": "4.2 80K Raw Dataset in RTLCoder-Data",
|
| 1016 |
+
"text_level": 1,
|
| 1017 |
+
"bbox": [
|
| 1018 |
+
514,
|
| 1019 |
+
689,
|
| 1020 |
+
849,
|
| 1021 |
+
704
|
| 1022 |
+
],
|
| 1023 |
+
"page_idx": 4
|
| 1024 |
+
},
|
| 1025 |
+
{
|
| 1026 |
+
"type": "text",
|
| 1027 |
+
"text": "To collect a large dataset, we continued to execute the basic data generation flow. Compared with the previous dataset from [28], we further enlarge the source code pool in $\\mathcal{L}_{code}$ in process ③ and continue the mutation in process ④ during the generation process. In addition, we slightly relaxed the instruction checking conditions in process ⑤. Previously in [28], each new instruction is compared with all existing instructions in $\\mathcal{L}_{ins}$ to check whether it introduces diversity. However, it takes a long time to compare each new instruction with all existing instructions. We removed this time-consuming diversity checking process, and only checked the basic instruction content in process ⑥. As we will introduce in Section 5.2, results demonstrate that removing such diversity checking does not impair the overall diversity in the ultimate dataset. Finally, we accumulate and release a dataset of 80K samples, tripling the previous dataset in RTLCoder [28].",
|
| 1028 |
+
"bbox": [
|
| 1029 |
+
511,
|
| 1030 |
+
708,
|
| 1031 |
+
913,
|
| 1032 |
+
916
|
| 1033 |
+
],
|
| 1034 |
+
"page_idx": 4
|
| 1035 |
+
},
|
| 1036 |
+
{
|
| 1037 |
+
"type": "page_number",
|
| 1038 |
+
"text": "5",
|
| 1039 |
+
"bbox": [
|
| 1040 |
+
493,
|
| 1041 |
+
920,
|
| 1042 |
+
501,
|
| 1043 |
+
928
|
| 1044 |
+
],
|
| 1045 |
+
"page_idx": 4
|
| 1046 |
+
},
|
| 1047 |
+
{
|
| 1048 |
+
"type": "image",
|
| 1049 |
+
"img_path": "images/61bb3c105c3597c87bebd0c2ba8bfa2e80cfd0ee93700ec404ddedba421ebe2b.jpg",
|
| 1050 |
+
"image_caption": [
|
| 1051 |
+
"Figure 3: The automated training dataset generation flow to generate RTLCoder-Data. The framework is based on prior RTLCoder [28], but we proposed an innovative automated functionality checking method in Stage 3."
|
| 1052 |
+
],
|
| 1053 |
+
"image_footnote": [],
|
| 1054 |
+
"bbox": [
|
| 1055 |
+
83,
|
| 1056 |
+
80,
|
| 1057 |
+
915,
|
| 1058 |
+
200
|
| 1059 |
+
],
|
| 1060 |
+
"page_idx": 5
|
| 1061 |
+
},
|
| 1062 |
+
{
|
| 1063 |
+
"type": "text",
|
| 1064 |
+
"text": "However, since the overall generation process of this 80K dataset relies on prompting commercial LLMs, we cannot guarantee the correctness of all samples. Therefore, we also refer it as a 'raw' 80K dataset in RTLCoder-Data. To evaluate the effectiveness this 'raw' dataset, we have trained LLMs with different numbers of data samples and evaluated results will be introduced in Section 5.3. Results indicate that a larger dataset leads to better model performance, and the performance is not saturated when 80K samples are used for training. Despite possible incorrectness in data samples, a larger dataset still clearly boosts model performance and proves useful.",
|
| 1065 |
+
"bbox": [
|
| 1066 |
+
81,
|
| 1067 |
+
263,
|
| 1068 |
+
483,
|
| 1069 |
+
402
|
| 1070 |
+
],
|
| 1071 |
+
"page_idx": 5
|
| 1072 |
+
},
|
| 1073 |
+
{
|
| 1074 |
+
"type": "text",
|
| 1075 |
+
"text": "4.3 7K Verified Dataset in RTLCoder-Data",
|
| 1076 |
+
"text_level": 1,
|
| 1077 |
+
"bbox": [
|
| 1078 |
+
83,
|
| 1079 |
+
426,
|
| 1080 |
+
437,
|
| 1081 |
+
441
|
| 1082 |
+
],
|
| 1083 |
+
"page_idx": 5
|
| 1084 |
+
},
|
| 1085 |
+
{
|
| 1086 |
+
"type": "text",
|
| 1087 |
+
"text": "As introduced in Section 4.2, we have accumulated a raw dataset with 80K samples, but it is difficult to verify the correctness of each data sample. Specifically, it is feasible to automatically check the syntax correctness of the code in each sample with tools like VCS [44] or iVerilog, but it is very challenging to check whether the code has the correct functionality (i.e., code functionality matches the description in instruction). This functionality checking task is exactly hardware verification, which has been studied for decades, relies on human engineers, and is difficult to get guaranteed results. To the best of our knowledge, there is no prior work on automatic examination of code functionality correctness in dataset generation.",
|
| 1088 |
+
"bbox": [
|
| 1089 |
+
81,
|
| 1090 |
+
445,
|
| 1091 |
+
482,
|
| 1092 |
+
597
|
| 1093 |
+
],
|
| 1094 |
+
"page_idx": 5
|
| 1095 |
+
},
|
| 1096 |
+
{
|
| 1097 |
+
"type": "text",
|
| 1098 |
+
"text": "In this work, we made an innovative exploration to enable the automatic functionality checking of each instruction-code data sample. It is shown as the functionality checker in process ⑦ of Figure 3. The solution is based on the LLM-assisted verification method introduced in Section 3. First, based on the functionality description from instruction, we prompt commercial LLMs to generate corresponding assertions. The prompt techniques are from LLM-assisted verification works such as AssertLLM [12]. Second, we combine the code and generated assertions, and feed them to verification platforms (e.g., JasperGold [5]) to check whether the code violates any assertions. If all assertions are passed, it is likely that the code correctly implements the functionality described in the instruction. Still, this is not $100\\%$ guarantee of sample correctness, but this process is fully automated and it leads to sufficiently high-quality samples for model training.",
|
| 1099 |
+
"bbox": [
|
| 1100 |
+
81,
|
| 1101 |
+
597,
|
| 1102 |
+
482,
|
| 1103 |
+
805
|
| 1104 |
+
],
|
| 1105 |
+
"page_idx": 5
|
| 1106 |
+
},
|
| 1107 |
+
{
|
| 1108 |
+
"type": "text",
|
| 1109 |
+
"text": "A problem in this verification-based functionality checking is, the assertions for verification are also generated by LLMs [12], thus the correctness of assertions is not guaranteed either. As a result, incorrect assertions may be generated for actually correct samples, making the correct samples fail the verification process. Therefore, this functionality checking method is conservative: Samples passing all assertions are likely to be correct, but correct data samples may fail the checking due to wrong assertions. Applying both",
|
| 1110 |
+
"bbox": [
|
| 1111 |
+
81,
|
| 1112 |
+
805,
|
| 1113 |
+
482,
|
| 1114 |
+
917
|
| 1115 |
+
],
|
| 1116 |
+
"page_idx": 5
|
| 1117 |
+
},
|
| 1118 |
+
{
|
| 1119 |
+
"type": "text",
|
| 1120 |
+
"text": "functionality checking and syntax checking, as indicated by $\\bullet$ . We collected 7K high-quality verified samples. As we will introduce in Section 5.3, the 7K verified dataset leads to better LLM performance compared with models trained with even 50K raw data.",
|
| 1121 |
+
"bbox": [
|
| 1122 |
+
513,
|
| 1123 |
+
263,
|
| 1124 |
+
913,
|
| 1125 |
+
319
|
| 1126 |
+
],
|
| 1127 |
+
"page_idx": 5
|
| 1128 |
+
},
|
| 1129 |
+
{
|
| 1130 |
+
"type": "text",
|
| 1131 |
+
"text": "5 RTL GENERATION EXPERIMENT RESULTS",
|
| 1132 |
+
"text_level": 1,
|
| 1133 |
+
"bbox": [
|
| 1134 |
+
514,
|
| 1135 |
+
330,
|
| 1136 |
+
906,
|
| 1137 |
+
344
|
| 1138 |
+
],
|
| 1139 |
+
"page_idx": 5
|
| 1140 |
+
},
|
| 1141 |
+
{
|
| 1142 |
+
"type": "text",
|
| 1143 |
+
"text": "In this Section, we train and evaluate various LLM solutions with our 80K raw dataset and 7K verified datasets from RTLCoder-Data. In addition to extensive comparisons with various other LLM solutions, we studied the impact of training data amount, training scheme, and training data quality on LLM performance.",
|
| 1144 |
+
"bbox": [
|
| 1145 |
+
513,
|
| 1146 |
+
349,
|
| 1147 |
+
913,
|
| 1148 |
+
419
|
| 1149 |
+
],
|
| 1150 |
+
"page_idx": 5
|
| 1151 |
+
},
|
| 1152 |
+
{
|
| 1153 |
+
"type": "text",
|
| 1154 |
+
"text": "5.1 LLM Training and Evaluation Setup",
|
| 1155 |
+
"text_level": 1,
|
| 1156 |
+
"bbox": [
|
| 1157 |
+
514,
|
| 1158 |
+
430,
|
| 1159 |
+
848,
|
| 1160 |
+
446
|
| 1161 |
+
],
|
| 1162 |
+
"page_idx": 5
|
| 1163 |
+
},
|
| 1164 |
+
{
|
| 1165 |
+
"type": "text",
|
| 1166 |
+
"text": "To evaluate the performance of LLM-assisted RTL generation, we adopt two representative benchmarks named VerilogEval [27] and RTLLM [29]. For RTLLM, following the original benchmark [29], each task is counted as success as long as any of 5 trials passes the test. This can be interpreted as pass@5 metric. For all tested models, we evaluate all 3 temperature conditions $\\{0.2, 0.5, 0.8\\}$ and report the best performance for each model.",
|
| 1167 |
+
"bbox": [
|
| 1168 |
+
513,
|
| 1169 |
+
448,
|
| 1170 |
+
913,
|
| 1171 |
+
545
|
| 1172 |
+
],
|
| 1173 |
+
"page_idx": 5
|
| 1174 |
+
},
|
| 1175 |
+
{
|
| 1176 |
+
"type": "text",
|
| 1177 |
+
"text": "We choose the Mistral-7B-v0.1 [19] and DeepSeek-Coder-6.7b-Instruct [16] as the basic pre-trained model for finetuning. In all experiments, we opted for the Adam optimizer with $\\beta_{1} = 0.9$ , $\\beta_{2} = 0.999$ , and learning rate $\\gamma = 1\\mathrm{e - }5$ , while abstaining from the use of weight decay. Concurrently, we established a context length of 2048 and a global batch size of 256. We trained the model on only 4 consumer-level RTX 4090 GPUs (24GB each), each of which could afford $2\\times 2048$ context length using DeepSpeed stage-2 [40].",
|
| 1178 |
+
"bbox": [
|
| 1179 |
+
513,
|
| 1180 |
+
545,
|
| 1181 |
+
915,
|
| 1182 |
+
657
|
| 1183 |
+
],
|
| 1184 |
+
"page_idx": 5
|
| 1185 |
+
},
|
| 1186 |
+
{
|
| 1187 |
+
"type": "text",
|
| 1188 |
+
"text": "5.2 Evaluation of Dataset",
|
| 1189 |
+
"text_level": 1,
|
| 1190 |
+
"bbox": [
|
| 1191 |
+
514,
|
| 1192 |
+
667,
|
| 1193 |
+
735,
|
| 1194 |
+
681
|
| 1195 |
+
],
|
| 1196 |
+
"page_idx": 5
|
| 1197 |
+
},
|
| 1198 |
+
{
|
| 1199 |
+
"type": "text",
|
| 1200 |
+
"text": "To prevent information leakage, for each instruction-code concatenated sample in the training dataset, we computed its maximum",
|
| 1201 |
+
"bbox": [
|
| 1202 |
+
513,
|
| 1203 |
+
686,
|
| 1204 |
+
915,
|
| 1205 |
+
714
|
| 1206 |
+
],
|
| 1207 |
+
"page_idx": 5
|
| 1208 |
+
},
|
| 1209 |
+
{
|
| 1210 |
+
"type": "image",
|
| 1211 |
+
"img_path": "images/6b7c2f17f51bdf9c01694918cdb7b5d08709e3154eb768f4c366e132ff27c6c9.jpg",
|
| 1212 |
+
"image_caption": [
|
| 1213 |
+
"(a)"
|
| 1214 |
+
],
|
| 1215 |
+
"image_footnote": [],
|
| 1216 |
+
"bbox": [
|
| 1217 |
+
521,
|
| 1218 |
+
733,
|
| 1219 |
+
715,
|
| 1220 |
+
845
|
| 1221 |
+
],
|
| 1222 |
+
"page_idx": 5
|
| 1223 |
+
},
|
| 1224 |
+
{
|
| 1225 |
+
"type": "image",
|
| 1226 |
+
"img_path": "images/44337fff722c973a7c32641cb66a0149ae859311cf9dbae5aa82b3df4689697c.jpg",
|
| 1227 |
+
"image_caption": [
|
| 1228 |
+
"(b)",
|
| 1229 |
+
"Figure 4: Training dataset analysis for the obtained 80K dataset. (a) Similarity measurement between training dataset and two benchmarks based on Rouge-L metric. (b) Tokens number distribution of instruction and code part."
|
| 1230 |
+
],
|
| 1231 |
+
"image_footnote": [],
|
| 1232 |
+
"bbox": [
|
| 1233 |
+
725,
|
| 1234 |
+
733,
|
| 1235 |
+
919,
|
| 1236 |
+
845
|
| 1237 |
+
],
|
| 1238 |
+
"page_idx": 5
|
| 1239 |
+
},
|
| 1240 |
+
{
|
| 1241 |
+
"type": "page_number",
|
| 1242 |
+
"text": "6",
|
| 1243 |
+
"bbox": [
|
| 1244 |
+
493,
|
| 1245 |
+
920,
|
| 1246 |
+
501,
|
| 1247 |
+
928
|
| 1248 |
+
],
|
| 1249 |
+
"page_idx": 5
|
| 1250 |
+
},
|
| 1251 |
+
{
|
| 1252 |
+
"type": "table",
|
| 1253 |
+
"img_path": "images/96d698005f83a191b3ded905c022bd961e8409e2566077d38aba22705c7b8184.jpg",
|
| 1254 |
+
"table_caption": [],
|
| 1255 |
+
"table_footnote": [],
|
| 1256 |
+
"table_body": "<table><tr><td></td><td>RTLCoder-Data Raw (80K)</td><td>RTLCoder-Data Verified (7K)</td><td>MG-Verilog [55]</td><td>Goh et al. [15]</td></tr><tr><td>CR</td><td>4.21</td><td>4.32</td><td>5.80</td><td>5.27</td></tr><tr><td>CR: POS</td><td>7.33</td><td>7.45</td><td>9.16</td><td>10.1</td></tr></table>",
|
| 1257 |
+
"bbox": [
|
| 1258 |
+
86,
|
| 1259 |
+
80,
|
| 1260 |
+
488,
|
| 1261 |
+
137
|
| 1262 |
+
],
|
| 1263 |
+
"page_idx": 6
|
| 1264 |
+
},
|
| 1265 |
+
{
|
| 1266 |
+
"type": "text",
|
| 1267 |
+
"text": "Table 4: Diversity scores (CR, CR:POS) of RTLCoder-Data Raw (80K), RTLCoder-Data Verified (7K), and other RTL datasets [15, 55]. Lower CR and CR:POS mean higher dataset diversity. Both datasets from RTLCoder-Data exhibit satisfactory diversity compared with others.",
|
| 1268 |
+
"bbox": [
|
| 1269 |
+
81,
|
| 1270 |
+
146,
|
| 1271 |
+
482,
|
| 1272 |
+
217
|
| 1273 |
+
],
|
| 1274 |
+
"page_idx": 6
|
| 1275 |
+
},
|
| 1276 |
+
{
|
| 1277 |
+
"type": "text",
|
| 1278 |
+
"text": "similarity with all test cases in the benchmarks. We employed the Rouge-L<sup>4</sup>, a widely used similarity calculation metric in the LLM domain. As Figure 4 (a) shows, most training samples have a low Rouge-L Value of around 0.25 and this indicates a low semantic overlap with the benchmarks. There are a small number of samples with higher similarity, and we get rid of these samples with Rouge-L $>0.5$ during training. In addition, Figure 4 (b) shows that an instruction-code sample is generally within 2048 token length. So we can set 2048 as the max length in our finetuning.",
|
| 1279 |
+
"bbox": [
|
| 1280 |
+
81,
|
| 1281 |
+
231,
|
| 1282 |
+
482,
|
| 1283 |
+
354
|
| 1284 |
+
],
|
| 1285 |
+
"page_idx": 6
|
| 1286 |
+
},
|
| 1287 |
+
{
|
| 1288 |
+
"type": "text",
|
| 1289 |
+
"text": "To check the diversity of our proposed training dataset RTLCoder-Data-Raw (80K) and RTLCoder-Data-Verified (7K), we utilized two diversity measures: Compression Ratios (CR) and Part-of-Speech Compression Ratio (CR: POS) which are suggested best lexical diversity metrics by [41]. CR is calculated utilizing text compression algorithms which can identify redundancy in the whole contents. The CR-POS can capture the repeated syntactic redundancy by compressing the part-of-speech (POS) tag sequences of the original text. We also followed the method utilized in [41] to extract the tag sequences of the dataset. The results are illustrated in Table 4. Our proposed two datasets have lower CR and CR:POS than other existing open-source Verilog instruction-code datasets. This indicates that RTLCoder-Data-Raw (80K) and RTLCoder-Data-Verified (7K) have a satisfactory diversity.",
|
| 1290 |
+
"bbox": [
|
| 1291 |
+
81,
|
| 1292 |
+
356,
|
| 1293 |
+
482,
|
| 1294 |
+
549
|
| 1295 |
+
],
|
| 1296 |
+
"page_idx": 6
|
| 1297 |
+
},
|
| 1298 |
+
{
|
| 1299 |
+
"type": "text",
|
| 1300 |
+
"text": "5.3 Result of Trained LLM in RTL Generation",
|
| 1301 |
+
"text_level": 1,
|
| 1302 |
+
"bbox": [
|
| 1303 |
+
83,
|
| 1304 |
+
561,
|
| 1305 |
+
465,
|
| 1306 |
+
575
|
| 1307 |
+
],
|
| 1308 |
+
"page_idx": 6
|
| 1309 |
+
},
|
| 1310 |
+
{
|
| 1311 |
+
"type": "text",
|
| 1312 |
+
"text": "Table 5 summarizes the comparison of various LLM-assisted RTL generation solutions, including commercial models GPT3.5/GPT4, both closed- and open-source LLMs customized for Verilog generation [27, 38, 45], general software code generators [19, 24, 33], and our fine-tuned models based on DeepSeek-Coder-6.7b-Instruct [16] and Mistral-7B-v0.1 [19] with different amount of training data and training schemes. Relevant results are also presented in Figure 5, which shows LLM performance versus the amount of training data.",
|
| 1313 |
+
"bbox": [
|
| 1314 |
+
81,
|
| 1315 |
+
579,
|
| 1316 |
+
482,
|
| 1317 |
+
690
|
| 1318 |
+
],
|
| 1319 |
+
"page_idx": 6
|
| 1320 |
+
},
|
| 1321 |
+
{
|
| 1322 |
+
"type": "text",
|
| 1323 |
+
"text": "Overall performance based on RTLCoder-Data. We train the base model directly on the RTLCoder-Data Raw (80K) through instruction-supervised fine-tuning which is referred to as \"basic direct training\" in Table 5. We can observe that DeepSeek-Direct (80K data samples) outperforms all other baseline models in EvalMachine and is only inferior to GPT-4 in Eval-Human and RTLLM V1.1. Specifically, in the Eval-Machine part, it even outperforms GPT4 by an absolute value of $4.7\\%$ in the pass@1 metric. In summary, DeepSeek-Direct (80K data samples) outperforms GPT-3.5 and all non-commercial baselines in all metrics. It is surprising that the lightweight model with only 7 billion parameters could achieve such impressive accuracy despite its smaller size.",
|
| 1324 |
+
"bbox": [
|
| 1325 |
+
81,
|
| 1326 |
+
691,
|
| 1327 |
+
482,
|
| 1328 |
+
856
|
| 1329 |
+
],
|
| 1330 |
+
"page_idx": 6
|
| 1331 |
+
},
|
| 1332 |
+
{
|
| 1333 |
+
"type": "text",
|
| 1334 |
+
"text": "Impact of training data amount. To further investigate the impact of dataset size on model performance, we sampled subsets",
|
| 1335 |
+
"bbox": [
|
| 1336 |
+
83,
|
| 1337 |
+
857,
|
| 1338 |
+
482,
|
| 1339 |
+
883
|
| 1340 |
+
],
|
| 1341 |
+
"page_idx": 6
|
| 1342 |
+
},
|
| 1343 |
+
{
|
| 1344 |
+
"type": "image",
|
| 1345 |
+
"img_path": "images/cbab57abc14da28d0dad3303552fa50ec451799a12a6cb2400beb1b607ef300b.jpg",
|
| 1346 |
+
"image_caption": [],
|
| 1347 |
+
"image_footnote": [],
|
| 1348 |
+
"bbox": [
|
| 1349 |
+
521,
|
| 1350 |
+
85,
|
| 1351 |
+
710,
|
| 1352 |
+
233
|
| 1353 |
+
],
|
| 1354 |
+
"page_idx": 6
|
| 1355 |
+
},
|
| 1356 |
+
{
|
| 1357 |
+
"type": "image",
|
| 1358 |
+
"img_path": "images/18f3c8aab1b2d6e0adbe4b53516f48ae2a60c4fb372acf49116510906e15045a.jpg",
|
| 1359 |
+
"image_caption": [],
|
| 1360 |
+
"image_footnote": [],
|
| 1361 |
+
"bbox": [
|
| 1362 |
+
705,
|
| 1363 |
+
85,
|
| 1364 |
+
895,
|
| 1365 |
+
233
|
| 1366 |
+
],
|
| 1367 |
+
"page_idx": 6
|
| 1368 |
+
},
|
| 1369 |
+
{
|
| 1370 |
+
"type": "image",
|
| 1371 |
+
"img_path": "images/2a48209e83131811d773bb14e44bd4e0f2bb2802ee03819a144b6642144800f8.jpg",
|
| 1372 |
+
"image_caption": [],
|
| 1373 |
+
"image_footnote": [],
|
| 1374 |
+
"bbox": [
|
| 1375 |
+
522,
|
| 1376 |
+
234,
|
| 1377 |
+
714,
|
| 1378 |
+
345
|
| 1379 |
+
],
|
| 1380 |
+
"page_idx": 6
|
| 1381 |
+
},
|
| 1382 |
+
{
|
| 1383 |
+
"type": "image",
|
| 1384 |
+
"img_path": "images/6904b764af9b0f8e0fb8d195c4c221b575c920fcc05e2b8dba42d34170a680b0.jpg",
|
| 1385 |
+
"image_caption": [],
|
| 1386 |
+
"image_footnote": [],
|
| 1387 |
+
"bbox": [
|
| 1388 |
+
717,
|
| 1389 |
+
234,
|
| 1390 |
+
893,
|
| 1391 |
+
345
|
| 1392 |
+
],
|
| 1393 |
+
"page_idx": 6
|
| 1394 |
+
},
|
| 1395 |
+
{
|
| 1396 |
+
"type": "image",
|
| 1397 |
+
"img_path": "images/d488c7302f3ebc8790640bf8fc3daf0488dc5ad3ca2235b471a11851b41aa540.jpg",
|
| 1398 |
+
"image_caption": [
|
| 1399 |
+
"(a) VerilogEval-Machine"
|
| 1400 |
+
],
|
| 1401 |
+
"image_footnote": [],
|
| 1402 |
+
"bbox": [
|
| 1403 |
+
522,
|
| 1404 |
+
347,
|
| 1405 |
+
714,
|
| 1406 |
+
465
|
| 1407 |
+
],
|
| 1408 |
+
"page_idx": 6
|
| 1409 |
+
},
|
| 1410 |
+
{
|
| 1411 |
+
"type": "image",
|
| 1412 |
+
"img_path": "images/f22e7de55db50a877dbdf83972a70832a49106823a6bd1373c839c9d4fc841a6.jpg",
|
| 1413 |
+
"image_caption": [
|
| 1414 |
+
"(b)VerilogEval-Human",
|
| 1415 |
+
"Figure 5: The pass@k performance on VerilogEval benchmarks versus the amount of training data from RTLCoder-Data. The performance improves as the data size increases."
|
| 1416 |
+
],
|
| 1417 |
+
"image_footnote": [],
|
| 1418 |
+
"bbox": [
|
| 1419 |
+
717,
|
| 1420 |
+
347,
|
| 1421 |
+
892,
|
| 1422 |
+
467
|
| 1423 |
+
],
|
| 1424 |
+
"page_idx": 6
|
| 1425 |
+
},
|
| 1426 |
+
{
|
| 1427 |
+
"type": "text",
|
| 1428 |
+
"text": "of 5K, 27K, and 50K samples from the RTLCoder-Data Raw (80K) and then conducted direct finetuning on these subsets. The results are shown in Table 5 and also plotted in Figure 5. We can observe that as the training data volume increases, the overall performance of the model on the benchmarks also improves. For instance, as the training data size increases from 5K to 80K, the model's performance on the Eval-Machine pass@1 metric rises from $53.7\\%$ to $64.7\\%$ . Additionally, as illustrated in Figure 5, even with 80K data samples, there are still no signs of model performance saturation. This indicates that enlarging the training dataset size can significantly boost the model's code generation capabilities.",
|
| 1429 |
+
"bbox": [
|
| 1430 |
+
511,
|
| 1431 |
+
541,
|
| 1432 |
+
913,
|
| 1433 |
+
694
|
| 1434 |
+
],
|
| 1435 |
+
"page_idx": 6
|
| 1436 |
+
},
|
| 1437 |
+
{
|
| 1438 |
+
"type": "text",
|
| 1439 |
+
"text": "Impact of training scheme. We extracted a 27K subset from the RTLCoder-Data Raw (80K) and employed the code quality feedback-based training scheme proposed in RTLCoder [28] to obtain models named Mistral-Scoring and DeepSeek-Scoring. Their performance is presented in Table 5 and Figure 5 titled 'Scoring-based Training'. Compared with DeepSeek-Direct (27K data samples) and Mistral-Direct (27K data samples), models trained with the scoring-based scheme are better on all benchmarks, indicating that this better scoring-based training method [28] improves model performance.",
|
| 1440 |
+
"bbox": [
|
| 1441 |
+
511,
|
| 1442 |
+
694,
|
| 1443 |
+
913,
|
| 1444 |
+
819
|
| 1445 |
+
],
|
| 1446 |
+
"page_idx": 6
|
| 1447 |
+
},
|
| 1448 |
+
{
|
| 1449 |
+
"type": "text",
|
| 1450 |
+
"text": "Impact of training data quality. In the 'Verified Dataset' part of Table 5, we directly trained the DeepSeek-Coder using the dataset RTLCoder-Data Verified (7K). DeepSeek-Direct (7K verified) outperforms DeepSeek-Direct (27K) across all benchmarks and even surpasses DeepSeek-Direct (50K) on 6/8 metrics. Moreover, DeepSeek-Direct (7K verified) only uses $< 20\\%$ of the training time of DeepSeek-Direct (50K). This demonstrates that enhancing the",
|
| 1451 |
+
"bbox": [
|
| 1452 |
+
511,
|
| 1453 |
+
819,
|
| 1454 |
+
913,
|
| 1455 |
+
915
|
| 1456 |
+
],
|
| 1457 |
+
"page_idx": 6
|
| 1458 |
+
},
|
| 1459 |
+
{
|
| 1460 |
+
"type": "page_footnote",
|
| 1461 |
+
"text": "4The Rouge-L score $\\in [0,1]$ , with values closer to 1 indicating higher similarity between the two sequences.",
|
| 1462 |
+
"bbox": [
|
| 1463 |
+
81,
|
| 1464 |
+
892,
|
| 1465 |
+
480,
|
| 1466 |
+
915
|
| 1467 |
+
],
|
| 1468 |
+
"page_idx": 6
|
| 1469 |
+
},
|
| 1470 |
+
{
|
| 1471 |
+
"type": "page_number",
|
| 1472 |
+
"text": "7",
|
| 1473 |
+
"bbox": [
|
| 1474 |
+
493,
|
| 1475 |
+
920,
|
| 1476 |
+
501,
|
| 1477 |
+
928
|
| 1478 |
+
],
|
| 1479 |
+
"page_idx": 6
|
| 1480 |
+
},
|
| 1481 |
+
{
|
| 1482 |
+
"type": "table",
|
| 1483 |
+
"img_path": "images/6c09a326ae5c51dac954551c608226ee0b1ac76ab2466bd9be407aa382d77068.jpg",
|
| 1484 |
+
"table_caption": [],
|
| 1485 |
+
"table_footnote": [],
|
| 1486 |
+
"table_body": "<table><tr><td rowspan=\"3\">Model Type</td><td rowspan=\"3\">Evaluated Model</td><td rowspan=\"3\">Num of Params</td><td colspan=\"6\">VerilogEval Benchmark [27] (using pass@k metric)</td><td colspan=\"2\">RTLLM V1.1 [29] (using pass@5 metric)</td></tr><tr><td colspan=\"3\">Eval-Machine (%)</td><td colspan=\"3\">Eval-Human (%)</td><td rowspan=\"2\">Syntax-VCS(%)</td><td rowspan=\"2\">Func (%)</td></tr><tr><td>k=1</td><td>k=5</td><td>k=10</td><td>k=1</td><td>k=5</td><td>k=10</td></tr><tr><td rowspan=\"5\">Closed-Source Baseline</td><td>GPT-3.5</td><td>N/A</td><td>46.7</td><td>69.1</td><td>74.1</td><td>26.7</td><td>45.8</td><td>51.7</td><td>89.7</td><td>37.9</td></tr><tr><td>GPT4</td><td>N/A</td><td>60.0</td><td>70.6</td><td>73.5</td><td>43.5</td><td>55.8</td><td>58.9</td><td>100</td><td>65.5</td></tr><tr><td>ChipNeMo [25]</td><td>13B</td><td>43.4</td><td>N/A</td><td>N/A</td><td>22.4</td><td>N/A</td><td>N/A</td><td>N/A</td><td>N/A</td></tr><tr><td>VerilogEval [27]</td><td>16B</td><td>46.2</td><td>67.3</td><td>73.7</td><td>28.8</td><td>45.9</td><td>52.3</td><td>N/A</td><td>N/A</td></tr><tr><td>BetterV [38]</td><td>7B</td><td>64.2</td><td>75.4</td><td>79.1</td><td>40.9</td><td>50.0</td><td>53.3</td><td>N/A</td><td>N/A</td></tr><tr><td rowspan=\"5\">Open-Source Baseline</td><td>Codegen2 [33]</td><td>16B</td><td>5.00</td><td>9.00</td><td>13.9</td><td>0.90</td><td>4.10</td><td>7.25</td><td>72.4</td><td>6.90</td></tr><tr><td>Starcoder [24]</td><td>15B</td><td>46.8</td><td>54.5</td><td>59.6</td><td>18.1</td><td>26.1</td><td>30.4</td><td>93.1</td><td>27.6</td></tr><tr><td>Thakur et al. [45]</td><td>16B</td><td>44.0</td><td>52.6</td><td>59.2</td><td>30.3</td><td>43.9</td><td>49.6</td><td>86.2</td><td>24.1</td></tr><tr><td>Mistral-7B [19]</td><td>7B</td><td>36.9</td><td>48.8</td><td>57.4</td><td>4.49</td><td>12.6</td><td>18.6</td><td>72.4</td><td>20.7</td></tr><tr><td>DeepSeek-Coder [16]</td><td>6.7B</td><td>54.1</td><td>63.8</td><td>67.5</td><td>30.2</td><td>42.2</td><td>46.2</td><td>89.6</td><td>34.5</td></tr><tr><td rowspan=\"2\">Scoring-based Training [28]</td><td>Mistral-Scoring (27K data samples)</td><td>7B</td><td>62.5</td><td>72.2</td><td>76.6</td><td>36.7</td><td>45.5</td><td>49.2</td><td>96.6</td><td>48.3</td></tr><tr><td>DeepSeek-Scoring (27K data samples)</td><td>6.7B</td><td>61.2</td><td>76.5</td><td>81.8</td><td>41.6</td><td>50.1</td><td>53.4</td><td>93.1</td><td>48.3</td></tr><tr><td rowspan=\"5\">Basic Direct Training</td><td>Mistral-Direct (27K data samples)</td><td>7B</td><td>58.9</td><td>70.0</td><td>74.1</td><td>34.4</td><td>42.3</td><td>45.1</td><td>89.7</td><td>41.4</td></tr><tr><td>DeepSeek-Direct (5K data samples)</td><td>6.7B</td><td>53.7</td><td>71.7</td><td>77.1</td><td>32.9</td><td>45.8</td><td>52.4</td><td>93.1</td><td>41.4</td></tr><tr><td>DeepSeek-Direct (27K data samples)</td><td>6.7B</td><td>59.8</td><td>73.6</td><td>77.2</td><td>39.1</td><td>48.3</td><td>51.3</td><td>86.2</td><td>44.8</td></tr><tr><td>DeepSeek-Direct (50K data samples)</td><td>6.7B</td><td>62.6</td><td>75.6</td><td>80.5</td><td>38.9</td><td>48.7</td><td>51.8</td><td>89.7</td><td>55.2</td></tr><tr><td>DeepSeek-Direct (80K data samples)</td><td>6.7B</td><td>64.7</td><td>76.6</td><td>80.8</td><td>42.8</td><td>51.6</td><td>55.0</td><td>93.1</td><td>48.3</td></tr><tr><td>Verified Dataset</td><td>DeepSeek-Direct (7K verified data samples)</td><td>7B</td><td>61.3</td><td>76.3</td><td>80.8</td><td>38.9</td><td>50.1</td><td>55.3</td><td>100</td><td>48.3</td></tr></table>",
|
| 1487 |
+
"bbox": [
|
| 1488 |
+
96,
|
| 1489 |
+
77,
|
| 1490 |
+
898,
|
| 1491 |
+
390
|
| 1492 |
+
],
|
| 1493 |
+
"page_idx": 7
|
| 1494 |
+
},
|
| 1495 |
+
{
|
| 1496 |
+
"type": "text",
|
| 1497 |
+
"text": "Table 5: Performance comparison of RTL code generators on VerilogEval Benchmark [27] and RTLLM Benchmark [29]. The top scores ranked $1^{\\text{st}}$ , $2^{\\text{nd}}$ , and $3^{\\text{rd}}$ in each column are marked in Green, Blue, and Red, respectively.",
|
| 1498 |
+
"bbox": [
|
| 1499 |
+
81,
|
| 1500 |
+
397,
|
| 1501 |
+
911,
|
| 1502 |
+
429
|
| 1503 |
+
],
|
| 1504 |
+
"page_idx": 7
|
| 1505 |
+
},
|
| 1506 |
+
{
|
| 1507 |
+
"type": "text",
|
| 1508 |
+
"text": "quality of the training dataset can improve the model performance and reduce the LLM training cost. It indicates the great potential of our proposed assertion-based functionality checking technique.",
|
| 1509 |
+
"bbox": [
|
| 1510 |
+
81,
|
| 1511 |
+
450,
|
| 1512 |
+
482,
|
| 1513 |
+
492
|
| 1514 |
+
],
|
| 1515 |
+
"page_idx": 7
|
| 1516 |
+
},
|
| 1517 |
+
{
|
| 1518 |
+
"type": "text",
|
| 1519 |
+
"text": "6 LIMITATION AND CHALLENGES",
|
| 1520 |
+
"text_level": 1,
|
| 1521 |
+
"bbox": [
|
| 1522 |
+
83,
|
| 1523 |
+
512,
|
| 1524 |
+
388,
|
| 1525 |
+
526
|
| 1526 |
+
],
|
| 1527 |
+
"page_idx": 7
|
| 1528 |
+
},
|
| 1529 |
+
{
|
| 1530 |
+
"type": "text",
|
| 1531 |
+
"text": "Finally, we would like to discuss some challenges and questions we encountered during the development of the dataset or benchmark for LLM-assisted design automation solutions, and share our thoughts about these questions.",
|
| 1532 |
+
"bbox": [
|
| 1533 |
+
81,
|
| 1534 |
+
530,
|
| 1535 |
+
482,
|
| 1536 |
+
585
|
| 1537 |
+
],
|
| 1538 |
+
"page_idx": 7
|
| 1539 |
+
},
|
| 1540 |
+
{
|
| 1541 |
+
"type": "text",
|
| 1542 |
+
"text": "When building the open-source benchmark for RTL generation, we encountered several challenges:",
|
| 1543 |
+
"bbox": [
|
| 1544 |
+
81,
|
| 1545 |
+
585,
|
| 1546 |
+
482,
|
| 1547 |
+
614
|
| 1548 |
+
],
|
| 1549 |
+
"page_idx": 7
|
| 1550 |
+
},
|
| 1551 |
+
{
|
| 1552 |
+
"type": "list",
|
| 1553 |
+
"sub_type": "text",
|
| 1554 |
+
"list_items": [
|
| 1555 |
+
"(1) Shall we include more complex designs in the benchmark? Due to the limited abilities of existing LLMs, almost all LLMs encounter difficulty in generating 'correct' RTL design code for very complex designs. As a result, overly complex designs often fail to differentiate the capabilities of the models. In addition, it is difficult to precisely describe complex designs with natural languages.",
|
| 1556 |
+
"(2) How detailed should the description be? When descriptions are overly vague or general, LLMs struggle to produce designs that meet expected functionality, making it difficult to assess model capabilities. Conversely, if descriptions are too detailed, focusing on intricate RTL circuit specifics, the RTL generation effectively becomes a form of 'code translation', which also fails to demonstrate the general generative abilities of LLMs. Therefore, the level of detail in the description for benchmarking requires careful consideration.",
|
| 1557 |
+
"(3) How to alleviate the influence of training data leakage on the benchmark scores? The overlap between the training dataset and benchmarks should always be carefully examined because an overfitted LLM cannot generalize well in practice. Overfitted LLM can easily lead to unfair comparisons and"
|
| 1558 |
+
],
|
| 1559 |
+
"bbox": [
|
| 1560 |
+
101,
|
| 1561 |
+
625,
|
| 1562 |
+
478,
|
| 1563 |
+
912
|
| 1564 |
+
],
|
| 1565 |
+
"page_idx": 7
|
| 1566 |
+
},
|
| 1567 |
+
{
|
| 1568 |
+
"type": "text",
|
| 1569 |
+
"text": "misleading conclusions. However, the text similarity approximation we used based on Rouge-L metric may not be perfect. In addition, leakage during the LLM pre-training process is difficult to control. How to define and evaluate the data leakage in RTL generation is still a challenging open problem.",
|
| 1570 |
+
"bbox": [
|
| 1571 |
+
553,
|
| 1572 |
+
450,
|
| 1573 |
+
911,
|
| 1574 |
+
518
|
| 1575 |
+
],
|
| 1576 |
+
"page_idx": 7
|
| 1577 |
+
},
|
| 1578 |
+
{
|
| 1579 |
+
"type": "text",
|
| 1580 |
+
"text": "The main challenge in LLM-based assertion generation centers around improving the quality of the generated assertions. We break down this challenge into two key questions:",
|
| 1581 |
+
"bbox": [
|
| 1582 |
+
513,
|
| 1583 |
+
521,
|
| 1584 |
+
911,
|
| 1585 |
+
563
|
| 1586 |
+
],
|
| 1587 |
+
"page_idx": 7
|
| 1588 |
+
},
|
| 1589 |
+
{
|
| 1590 |
+
"type": "list",
|
| 1591 |
+
"sub_type": "text",
|
| 1592 |
+
"list_items": [
|
| 1593 |
+
"(1) How to better quantify the assertion quality? Existing metrics like syntax/semantics correctness and COI coverage are useful but inadequate for complex verification scenarios, such as capturing state transitions or ensuring different assertions cover distinct properties. More precise evaluation techniques are worth exploring in future works.",
|
| 1594 |
+
"(2) What limits the generation of high-quality assertions? High-quality assertions depend not only on LLM capabilities but also on the richness of the specification documents. Specifications that lack detailed functionalities or connectivities will limit the effectiveness of assertion generation, regardless of the capability of LLM."
|
| 1595 |
+
],
|
| 1596 |
+
"bbox": [
|
| 1597 |
+
532,
|
| 1598 |
+
566,
|
| 1599 |
+
911,
|
| 1600 |
+
729
|
| 1601 |
+
],
|
| 1602 |
+
"page_idx": 7
|
| 1603 |
+
},
|
| 1604 |
+
{
|
| 1605 |
+
"type": "text",
|
| 1606 |
+
"text": "7 CONCLUSION",
|
| 1607 |
+
"text_level": 1,
|
| 1608 |
+
"bbox": [
|
| 1609 |
+
514,
|
| 1610 |
+
743,
|
| 1611 |
+
663,
|
| 1612 |
+
757
|
| 1613 |
+
],
|
| 1614 |
+
"page_idx": 7
|
| 1615 |
+
},
|
| 1616 |
+
{
|
| 1617 |
+
"type": "text",
|
| 1618 |
+
"text": "In this work, we present our latest advances in open-source benchmarks and datasets for developing LLMs to assist in design RTL generation and verification. We fully open-sourced 1) RTLLM 2.0, an updated benchmark for the evaluation of LLM-assisted RTL generation; 2) AssertEval, a benchmark or the evaluation of LLM-assisted assertion generation for verification; and 3) RTLCoder-Data, an extended open-source dataset for training LLMs for RTL generation. It provides 80K instruction-code data samples, as well as a 7K verified high-quality dataset. These open-source circuit data are provided as off-the-shelf resources, targeting more democratized and reproducible AI for EDA research.",
|
| 1619 |
+
"bbox": [
|
| 1620 |
+
511,
|
| 1621 |
+
761,
|
| 1622 |
+
913,
|
| 1623 |
+
912
|
| 1624 |
+
],
|
| 1625 |
+
"page_idx": 7
|
| 1626 |
+
},
|
| 1627 |
+
{
|
| 1628 |
+
"type": "page_number",
|
| 1629 |
+
"text": "8",
|
| 1630 |
+
"bbox": [
|
| 1631 |
+
493,
|
| 1632 |
+
920,
|
| 1633 |
+
503,
|
| 1634 |
+
928
|
| 1635 |
+
],
|
| 1636 |
+
"page_idx": 7
|
| 1637 |
+
},
|
| 1638 |
+
{
|
| 1639 |
+
"type": "text",
|
| 1640 |
+
"text": "REFERENCES",
|
| 1641 |
+
"text_level": 1,
|
| 1642 |
+
"bbox": [
|
| 1643 |
+
84,
|
| 1644 |
+
84,
|
| 1645 |
+
200,
|
| 1646 |
+
97
|
| 1647 |
+
],
|
| 1648 |
+
"page_idx": 8
|
| 1649 |
+
},
|
| 1650 |
+
{
|
| 1651 |
+
"type": "list",
|
| 1652 |
+
"sub_type": "ref_text",
|
| 1653 |
+
"list_items": [
|
| 1654 |
+
"[1] Fnu Aditi and Michael S Hsiao. 2022. Hybrid Rule-based and Machine Learning System for Assertion Generation from Natural Language Specifications. In Asian Test Symposium (ATS).",
|
| 1655 |
+
"[2] Ahmed Allam and Mohamed Shalan. 2024. RTL-Repo: A Benchmark for Evaluating LLMs on Large-Scale RTL Design Projects. arXiv preprint arXiv:2405.17378 (2024).",
|
| 1656 |
+
"[3] Jitendra Bhandari, Johann Knechtel, Ramesh Narayanaswamy, Siddharth Garg, and Ramesh Karri. 2024. LLM-Aided Testbench Generation and Bug Detection for Finite-State Machines. arXiv preprint arXiv:2406.17132 (2024).",
|
| 1657 |
+
"[4] Jason Blocklove, Siddharth Garg, Ramesh Karri, and Hammond Pearce. 2023. Chip-Chat: Challenges and Opportunities in Conversational Hardware Design. arXiv preprint arXiv:2305.13243 (2023).",
|
| 1658 |
+
"[5] Cadence. 2023. Jasper Formal Verification Platform. https://www.cadence.com/en_US/home/tools/system-design-and-verification/formal-and-static-verification.html.",
|
| 1659 |
+
"[6] Kaiyan Chang, Kun Wang, Nan Yang, Ying Wang, Dantong Jin, Wenlong Zhu, Zhirong Chen, Cangyuan Li, Hao Yan, Yunhao Zhou, et al. 2024. Data is all you need: Finetuning LLMs for Chip Design via an Automated design-data augmentation framework. arXiv preprint arXiv:2403.11202 (2024).",
|
| 1660 |
+
"[7] Kaiyan Chang, Ying Wang, Haimeng Ren, Mengdi Wang, Shengwen Liang, Yinhe Han, Huawei Li, and Xiaowei Li. 2023. ChipGPT: How far are we from natural language hardware design. arXiv preprint arXiv:2305.14019 (2023).",
|
| 1661 |
+
"[8] Lei Chen, Yiqi Chen, Zhufei Chu, Wenji Fang, Tsung-Yi Ho, et al. 2024. The dawn of AI-native EDA: Promises and challenges of large circuit models. arXiv preprint arXiv:2403.07257 (2024).",
|
| 1662 |
+
"[9] Fan Cui, Chenyang Yin, et al. 2024. OriGen: Enhancing RTL Code Generation with Code-to-Code Augmentation and Self-Reflection. arXiv preprint arXiv:2407.16237 (2024).",
|
| 1663 |
+
"[10] Alessandro Danese, Nicolò Dalla Riva, and Graziano Pravadelli. 2017. A-team: Automatic template-based assertion miner. In DAC.",
|
| 1664 |
+
"[11] Wenji Fang, Guangyu Hu, and Hongce Zhang. 2023. r-map: Relating Implementation and Specification in Hardware Refinement Checking. IEEE TCAD (2023).",
|
| 1665 |
+
"[12] Wenji Fang, Mengming Li, Min Li, Zhiyuan Yan, Shang Liu, Hongce Zhang, and Zhiyao Xie. 2024. AssertLLM: Generating and Evaluating Hardware Verification Assertions from Design Specifications via Multi-LLMs. arXiv preprint arXiv:2402.00386 (2024).",
|
| 1666 |
+
"[13] Steven J Frederiksen, John Aromando, and Michael S Hsiao. 2020. Automated Assertion Generation from Natural Language Specifications. In ITC.",
|
| 1667 |
+
"[14] Samuele Germiniani and Graziano Pravadelli. 2022. Harm: a hint-based assertion miner. IEEE TCAD (2022).",
|
| 1668 |
+
"[15] Emil Goh, Maoyang Xiang, I Wey, T Hui Teo, et al. 2024. From English to ASIC: Hardware Implementation with Large Language Model. arXiv preprint arXiv:2403.07039 (2024).",
|
| 1669 |
+
"[16] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Y Wu, YK Li, et al. 2024. DeepSeek-Coder: When the Large Language Model Meets Programming-The Rise of Code Intelligence. arXiv preprint arXiv:2401.14196 (2024).",
|
| 1670 |
+
"[17] Christopher B Harris and Ian G Harris. 2016. Glast: Learning formal grammars to translate natural language specifications into hardware assertions. In DATE.",
|
| 1671 |
+
"[18] Hanxian Huang, Zhenghan Lin, Zixuan Wang, Xin Chen, Ke Ding, and Jishen Zhao. 2024. Towards LLM-Powered Verilog RTL Assistant: Self-Verification and Self-Correction. arXiv preprint arXiv:2406.00115 (2024).",
|
| 1672 |
+
"[19] Albert Q Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, et al. 2023. Mistral 7B. arXiv preprint arXiv:2310.06825 (2023).",
|
| 1673 |
+
"[20] Rahul Kande, Hammond Pearce, et al. 2024. (Security) Assertions by Large Language Models. IEEE Transactions on Information Forensics and Security (TIFS) (2024).",
|
| 1674 |
+
"[21] Oliver Keszocze and Ian G Harris. 2019. Chatbot-based assertion generation from natural language specifications. In Forum for Specification and Design Languages.",
|
| 1675 |
+
"[22] Rahul Krishnamurthy and Michael S Hsiao. 2019. Controlled natural language framework for generating assertions from hardware specifications. In ICSC.",
|
| 1676 |
+
"[23] Rahul Krishnamurthy and Michael S Hsiao. 2019. Ease: Enabling hardware assertion synthesis from english. In Rules and Reasoning: Third International Joint Conference.",
|
| 1677 |
+
"[24] Raymond Li, Loubna Ben Allal, Yangtian Zi, et al. 2023. StarCoder: may the source be with you! arXiv preprint arXiv:2305.06161 (2023).",
|
| 1678 |
+
"[25] Mingjie Liu, Teodor-Dumitru Ene, Robert Kirby, Chris Cheng, Nathaniel Pinckney, Rongjian Liang, Jonah Alben, Hmyanshu Anand, Sanmitra Banerjee, Ismet Bayraktaroglu, et al. 2023. ChipNeMo: Domain-Adapted LLMs for Chip Design. arXiv preprint arXiv:2311.00176 (2023).",
|
| 1679 |
+
"[26] Mingjie Liu, Minwoo Kang, Ghaith Bany Hamad, Syed Suhaib, and Haoxing Ren. 2024. Domain-Adapted LLMs for VLSI Design and Verification: A Case Study on Formal Verification. In 2024 IEEE 42nd VLSI Test Symposium (VTS). IEEE, 1-4.",
|
| 1680 |
+
"[27] Mingjie Liu, Nathaniel Pinckney, Brucek Khailany, and Haoxing Ren. 2023. VerilogEval: Evaluating Large Language Models for Verilog Code Generation. arXiv preprint arXiv:2309.07544 (2023)."
|
| 1681 |
+
],
|
| 1682 |
+
"bbox": [
|
| 1683 |
+
84,
|
| 1684 |
+
101,
|
| 1685 |
+
482,
|
| 1686 |
+
897
|
| 1687 |
+
],
|
| 1688 |
+
"page_idx": 8
|
| 1689 |
+
},
|
| 1690 |
+
{
|
| 1691 |
+
"type": "list",
|
| 1692 |
+
"sub_type": "ref_text",
|
| 1693 |
+
"list_items": [
|
| 1694 |
+
"[28] Shang Liu, Wenji Fang, Yao Lu, Qijun Zhang, Hongce Zhang, and Zhiyao Xie. 2023. RTLCoder: Outperforming GPT-3.5 in Design RTL Generation with Our Open-Source Dataset and Lightweight Solution. arXiv preprint arXiv:2312.08617 (2023).",
|
| 1695 |
+
"[29] Yao Lu, Shang Liu, Qijun Zhang, and Zhiyao Xie. 2023. RTLLM: An Open-Source Benchmark for Design RTL Generation with Large Language Model. arXiv preprint arXiv:2308.05345 (2023).",
|
| 1696 |
+
"[30] Bhabesh Mali, Karthik Maddala, Sweeya Reddy, Vatsal Gupta, Chandan Karfa, and Ramesh Karri. 2024. ChIRAAG: ChatGPT Informed Rapid and Automated Assertion Generation. arXiv preprint arXiv:2402.00093 (2024).",
|
| 1697 |
+
"[31] Madhav Nair, Rajat Sadhukhan, et al. 2023. Generating secure hardware using chatgpt resistant to cwes. Cryptology ePrint Archive (2023).",
|
| 1698 |
+
"[32] Andre Nakkab, Sai Qian Zhang, Ramesh Karri, and Siddharth Garg. 2024. Rome was Not Built in a Single Step: Hierarchical Prompting for LLM-based Chip Design. arXiv preprint arXiv:2407.18276 (2024).",
|
| 1699 |
+
"[33] Erik Nijkamp, Hiroaki Hayashi, Caiming Xiong, Silvio Savarese, and Yingbo Zhou. 2023. Codegen2: Lessons for training lms on programming and natural languages. arXiv preprint arXiv:2305.02309 (2023).",
|
| 1700 |
+
"[34] OpenAI. 2023. GPT-4 Technical Report. arXiv preprint arXiv:2303.08774 (2023).",
|
| 1701 |
+
"[35] Marcelo Orenes-Vera, Aninda Manocha, et al. 2021. AutoSVA: Democratizing Formal Verification of RTL Module Interactions. In DAC.",
|
| 1702 |
+
"[36] Marcelo Orenes-Vera, Margaret Martonosi, and David Wentzlaff. 2023. Using LLMs to Facilitate Formal Verification of RTL. arXiv e-prints (2023), arXiv-2309.",
|
| 1703 |
+
"[37] Ganapathy Parthasarathy, Saurav Nanda, Parivesh Choudhary, and Pawan Patil. [n.d.]. SpecToSVA: Circuit Specification Document to SystemVerilog Assertion Translation. In 2021 Second Document Intelligence Workshop at KDD.",
|
| 1704 |
+
"[38] Zehua Pei, Hui-Ling Zhen, Mingxuan Yuan, Yu Huang, and Bei Yu. 2024. BetterV: Controlled Verilog Generation with Discriminative Guidance. arXiv preprint arXiv:2402.03375 (2024).",
|
| 1705 |
+
"[39] Martin Rapp, Hussam Amrouch, Yibo Lin, Bei Yu, David Z Pan, Marilyn Wolf, and Jörg Henkel. 2021. MLCAD: A survey of research in machine learning for CAD keynote paper. IEEE TCAD (2021).",
|
| 1706 |
+
"[40] Jeff Rasley, Samyam Rajbhandari, Olatunji Ruwase, and Yuxiong He. 2020. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In KDD.",
|
| 1707 |
+
"[41] Chantal Shaib, Joe Barrow, Jiuding Sun, Alexa F Siu, Byron C Wallace, and Ani Nenkova. 2024. Standardizing the measurement of text diversity: A tool and a comparative analysis of scores. arXiv preprint arXiv:2403.00553 (2024).",
|
| 1708 |
+
"[42] Chuyue Sun, Christopher Hahn, and Caroline Trippel. 2023. Towards Improving Verification Productivity with Circuit-Aware Translation of Natural Language to SystemVerilog Assertions. In International Workshop on Deep Learning-aided Verification.",
|
| 1709 |
+
"[43] Synopsys. 2023. Design Compiler® RTL Synthesis. https://www.synopsys.com/implementation-and-signoffrtl-synthesis-test/design-compiler-nxt.html.",
|
| 1710 |
+
"[44] Synopsys. 2023. VCS® functional verification solution. https://www.synopsys.com/verification/simulation/vcs.html.",
|
| 1711 |
+
"[45] Shailja Thakur, Baleegh Ahmad, Zhenxing Fan, Hammond Pearce, Benjamin Tan, Ramesh Karri, Brendan Dolan-Gavitt, and Siddharth Garg. 2023. Benchmarking Large Language Models for Automated Verilog RTL Code Generation. In DATE.",
|
| 1712 |
+
"[46] Shailja Thakur, Jason Blocklove, Hammond Pearce, Benjamin Tan, Siddharth Garg, and Ramesh Karri. 2023. AutoChip: Automating HDL Generation Using LLM Feedback. arXiv preprint arXiv:2311.04887 (2023).",
|
| 1713 |
+
"[47] YunDa Tsai, Mingjie Liu, and Haoxing Ren. 2023. Rtlfixer: Automatically fixing rtl syntax errors with large language models. arXiv preprint arXiv:2311.16543 (2023).",
|
| 1714 |
+
"[48] Shobha Vasudevan, David Sheridan, Sanjay Patel, David Tcheng, Bill Tuohy, and Daniel Johnson. 2010. Goldmine: Automatic assertion generation using data mining and static analysis. In DATE.",
|
| 1715 |
+
"[49] Ning Wang, Bingkun Yao, Jie Zhou, Xi Wang, Zhe Jiang, and Nan Guan. 2024. Large Language Model for Verilog Generation with Golden Code Feedback. arXiv preprint arXiv:2407.18271 (2024).",
|
| 1716 |
+
"[50] Hasini Witharana, Yangdi Lyu, Subodha Charles, and Prabhat Mishra. 2022. A survey on assertion-based hardware verification. ACM Computing Surveys (CSUR) 54, 11s (2022), 1-33.",
|
| 1717 |
+
"[51] Zhiyao Xie. 2022. Intelligent Circuit Design and Implementation with Machine Learning. Ph.D. Dissertation. Duke University.",
|
| 1718 |
+
"[52] Ke Xu, Jialin Sun, et al. 2024. MEIC: Re-thinking RTL Debug Automation using LLMs. arXiv preprint arXiv:2405.06840 (2024).",
|
| 1719 |
+
"[53] Sichao Yang and Ye Yang. 2024. FormalEval: A Method for Automatic Evaluation of Code Generation via Large Language Models. In ISEDA.",
|
| 1720 |
+
"[54] Xufeng Yao, Hoayong Li, Tsz Ho Chan, Wenyi Xiao, Mingxuan Yuan, Yu Huang, Lei Chen, and Bei Yu. 2024. Hdldebugger: Streamlininghdl debugging with large language models. arXiv preprint arXiv:2403.11671 (2024).",
|
| 1721 |
+
"[55] Yongan Zhang, Zhongzhi Yu, et al. 2024. MG-Verilog: Multi-grained Dataset Towards Enhanced LLM-assisted Verilog Generation. arXiv preprint arXiv:2407.01910 (2024).",
|
| 1722 |
+
"[56] Junchen Zhao and Ian G Harris. 2019. Automatic assertion generation from natural language specifications using subtree analysis. In DATE.",
|
| 1723 |
+
"[57] Yang Zhao, Di Huang, et al. 2024. CodeV: Empowering LLMs for Verilog Generation through Multi-Level Summarization. arXiv preprint arXiv:2407.10424 (2024)."
|
| 1724 |
+
],
|
| 1725 |
+
"bbox": [
|
| 1726 |
+
517,
|
| 1727 |
+
87,
|
| 1728 |
+
913,
|
| 1729 |
+
902
|
| 1730 |
+
],
|
| 1731 |
+
"page_idx": 8
|
| 1732 |
+
},
|
| 1733 |
+
{
|
| 1734 |
+
"type": "page_number",
|
| 1735 |
+
"text": "9",
|
| 1736 |
+
"bbox": [
|
| 1737 |
+
495,
|
| 1738 |
+
920,
|
| 1739 |
+
501,
|
| 1740 |
+
928
|
| 1741 |
+
],
|
| 1742 |
+
"page_idx": 8
|
| 1743 |
+
}
|
| 1744 |
+
]
|