Add Batch e2bd439e-48e5-4561-9912-b10fa4b5c117
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +64 -0
- 2501.13xxx/2501.13919/2ca1593e-d4fc-4339-9bba-11f11e8e6907_content_list.json +0 -0
- 2501.13xxx/2501.13919/2ca1593e-d4fc-4339-9bba-11f11e8e6907_model.json +0 -0
- 2501.13xxx/2501.13919/2ca1593e-d4fc-4339-9bba-11f11e8e6907_origin.pdf +3 -0
- 2501.13xxx/2501.13919/full.md +395 -0
- 2501.13xxx/2501.13919/images.zip +3 -0
- 2501.13xxx/2501.13919/layout.json +0 -0
- 2501.13xxx/2501.13925/b36151e7-51e9-449b-a4e2-a01cd46a04c4_content_list.json +0 -0
- 2501.13xxx/2501.13925/b36151e7-51e9-449b-a4e2-a01cd46a04c4_model.json +0 -0
- 2501.13xxx/2501.13925/b36151e7-51e9-449b-a4e2-a01cd46a04c4_origin.pdf +3 -0
- 2501.13xxx/2501.13925/full.md +423 -0
- 2501.13xxx/2501.13925/images.zip +3 -0
- 2501.13xxx/2501.13925/layout.json +0 -0
- 2501.13xxx/2501.13926/85005fd1-a0be-4094-9326-d9dc14bda98c_content_list.json +0 -0
- 2501.13xxx/2501.13926/85005fd1-a0be-4094-9326-d9dc14bda98c_model.json +0 -0
- 2501.13xxx/2501.13926/85005fd1-a0be-4094-9326-d9dc14bda98c_origin.pdf +3 -0
- 2501.13xxx/2501.13926/full.md +0 -0
- 2501.13xxx/2501.13926/images.zip +3 -0
- 2501.13xxx/2501.13926/layout.json +0 -0
- 2501.13xxx/2501.13928/fd24d2f4-5582-494f-93fc-ab143e630789_content_list.json +0 -0
- 2501.13xxx/2501.13928/fd24d2f4-5582-494f-93fc-ab143e630789_model.json +0 -0
- 2501.13xxx/2501.13928/fd24d2f4-5582-494f-93fc-ab143e630789_origin.pdf +3 -0
- 2501.13xxx/2501.13928/full.md +434 -0
- 2501.13xxx/2501.13928/images.zip +3 -0
- 2501.13xxx/2501.13928/layout.json +0 -0
- 2501.14xxx/2501.14143/dbe5f8b6-224b-4a5e-ae76-21a2b393dee5_content_list.json +0 -0
- 2501.14xxx/2501.14143/dbe5f8b6-224b-4a5e-ae76-21a2b393dee5_model.json +0 -0
- 2501.14xxx/2501.14143/dbe5f8b6-224b-4a5e-ae76-21a2b393dee5_origin.pdf +3 -0
- 2501.14xxx/2501.14143/full.md +0 -0
- 2501.14xxx/2501.14143/images.zip +3 -0
- 2501.14xxx/2501.14143/layout.json +0 -0
- 2501.14xxx/2501.14195/55154dce-d70c-43aa-a450-f39ef81abb90_content_list.json +0 -0
- 2501.14xxx/2501.14195/55154dce-d70c-43aa-a450-f39ef81abb90_model.json +0 -0
- 2501.14xxx/2501.14195/55154dce-d70c-43aa-a450-f39ef81abb90_origin.pdf +3 -0
- 2501.14xxx/2501.14195/full.md +0 -0
- 2501.14xxx/2501.14195/images.zip +3 -0
- 2501.14xxx/2501.14195/layout.json +0 -0
- 2501.14xxx/2501.14208/e2caf765-1c34-43b1-b3ae-52afa7b5b7ff_content_list.json +0 -0
- 2501.14xxx/2501.14208/e2caf765-1c34-43b1-b3ae-52afa7b5b7ff_model.json +0 -0
- 2501.14xxx/2501.14208/e2caf765-1c34-43b1-b3ae-52afa7b5b7ff_origin.pdf +3 -0
- 2501.14xxx/2501.14208/full.md +0 -0
- 2501.14xxx/2501.14208/images.zip +3 -0
- 2501.14xxx/2501.14208/layout.json +0 -0
- 2501.14xxx/2501.14240/3d87a418-e222-4e41-ac34-ee2f21e49a0d_content_list.json +1186 -0
- 2501.14xxx/2501.14240/3d87a418-e222-4e41-ac34-ee2f21e49a0d_model.json +1497 -0
- 2501.14xxx/2501.14240/3d87a418-e222-4e41-ac34-ee2f21e49a0d_origin.pdf +3 -0
- 2501.14xxx/2501.14240/full.md +250 -0
- 2501.14xxx/2501.14240/images.zip +3 -0
- 2501.14xxx/2501.14240/layout.json +0 -0
- 2501.14xxx/2501.14249/2102c0e9-a2a1-42c5-8617-430fd4e1c8e9_content_list.json +0 -0
.gitattributes
CHANGED
|
@@ -5037,3 +5037,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 5037 |
2501.18xxx/2501.18438/213124ac-7a76-4984-99ca-571ebf97895b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5038 |
2501.18xxx/2501.18636/e30382d9-0f40-46a6-8508-f33971f11e94_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5039 |
2501.18xxx/2501.18648/c00ff309-f82a-43c3-88a9-37f96fe485c9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5037 |
2501.18xxx/2501.18438/213124ac-7a76-4984-99ca-571ebf97895b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5038 |
2501.18xxx/2501.18636/e30382d9-0f40-46a6-8508-f33971f11e94_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5039 |
2501.18xxx/2501.18648/c00ff309-f82a-43c3-88a9-37f96fe485c9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5040 |
+
2501.13xxx/2501.13919/2ca1593e-d4fc-4339-9bba-11f11e8e6907_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5041 |
+
2501.13xxx/2501.13925/b36151e7-51e9-449b-a4e2-a01cd46a04c4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5042 |
+
2501.13xxx/2501.13926/85005fd1-a0be-4094-9326-d9dc14bda98c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5043 |
+
2501.13xxx/2501.13928/fd24d2f4-5582-494f-93fc-ab143e630789_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5044 |
+
2501.14xxx/2501.14143/dbe5f8b6-224b-4a5e-ae76-21a2b393dee5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5045 |
+
2501.14xxx/2501.14195/55154dce-d70c-43aa-a450-f39ef81abb90_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5046 |
+
2501.14xxx/2501.14208/e2caf765-1c34-43b1-b3ae-52afa7b5b7ff_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5047 |
+
2501.14xxx/2501.14240/3d87a418-e222-4e41-ac34-ee2f21e49a0d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5048 |
+
2501.14xxx/2501.14249/2102c0e9-a2a1-42c5-8617-430fd4e1c8e9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5049 |
+
2501.14xxx/2501.14257/be9000a2-0141-4dfa-85bb-5e3f9cb557d0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5050 |
+
2501.14xxx/2501.14275/032c7d3d-8882-4f99-83ff-f39d8eac1498_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5051 |
+
2501.14xxx/2501.14342/9b4c65dc-422c-498a-9e36-9c6fe5c856d7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5052 |
+
2501.14xxx/2501.14350/ae62e6d0-9e9a-4e72-b034-92dbbe79d978_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5053 |
+
2501.14xxx/2501.14387/6f5cd0b1-4220-4ac3-8663-55cc365c3b4d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5054 |
+
2501.14xxx/2501.14548/9e821bd7-0dde-4c61-bc7d-3854f97ccc82_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5055 |
+
2501.14xxx/2501.14607/f84c4c39-46aa-40a6-badd-3da4a89390b1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5056 |
+
2501.14xxx/2501.14723/2013a344-2c0f-4d86-b888-2441a0a73f9f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5057 |
+
2501.14xxx/2501.14729/51e55e0e-249b-4972-b45b-6be09739c215_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5058 |
+
2501.14xxx/2501.14914/ad394808-3709-40a6-a9bc-6db8a89029aa_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5059 |
+
2501.14xxx/2501.14956/2022773e-c174-4e34-94bd-33f1dd5a6090_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5060 |
+
2501.15xxx/2501.15034/fc3a3f22-411a-45bf-81f2-f24f5ed94c56_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5061 |
+
2501.15xxx/2501.15038/89e1c9a1-a896-4ae3-9a68-a6d9d26d1834_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5062 |
+
2501.15xxx/2501.15061/efa888a6-9f2c-4511-9bde-7ac0dfc313fa_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5063 |
+
2501.15xxx/2501.15089/b98a1233-7f7b-4621-972d-e59ecc5ef495_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5064 |
+
2501.15xxx/2501.15091/a15f3492-2f8a-4b86-93b0-a27371537938_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5065 |
+
2501.15xxx/2501.15111/3b7472e6-6d07-4f84-8469-c96f78921d00_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5066 |
+
2501.15xxx/2501.15140/23a03dc7-d13c-49af-804b-4e85071347aa_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5067 |
+
2501.15xxx/2501.15145/9d377d21-5f1b-4aac-958a-3b029b31f79a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5068 |
+
2501.15xxx/2501.15167/c2d48ace-316d-42f7-bcc8-0620b12c40cd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5069 |
+
2501.15xxx/2501.15177/80268c56-f5a3-47a6-bc70-ef5c1fc7a273_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5070 |
+
2501.15xxx/2501.15187/2fc40895-ab06-4bd0-b95e-fe70179f0b18_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5071 |
+
2501.15xxx/2501.15228/bb07bbbe-bbaa-4708-a89a-613964c0600e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5072 |
+
2501.15xxx/2501.15368/d1eeeeb1-9c7d-4433-b4ba-d7bee6073781_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5073 |
+
2501.15xxx/2501.15383/4f9ea768-0704-4886-a426-a23907c72ee4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5074 |
+
2501.15xxx/2501.15408/aaed704f-ae84-4847-a5c4-9c09dc90c069_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5075 |
+
2501.15xxx/2501.15417/7d41bbc2-e60c-4888-9495-e700e393ef3a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5076 |
+
2501.15xxx/2501.15463/f8f11d35-bb6c-443c-9a04-513391dd8da1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5077 |
+
2501.15xxx/2501.15510/36830bbc-0645-4bb3-8c2d-1fb8cce73a3b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5078 |
+
2501.15xxx/2501.15542/2b3b9deb-bd26-4fe1-8522-cb17dbfb7581_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5079 |
+
2501.15xxx/2501.15564/b59cefad-3fa4-4d04-a6f4-187269d871c1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5080 |
+
2501.15xxx/2501.15587/3b83f3b9-a17b-4f32-bd9e-b3b7ffeff445_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5081 |
+
2501.15xxx/2501.15598/44076de4-b780-4885-ad0c-487a04159286_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5082 |
+
2501.15xxx/2501.15602/a3027fbe-da31-4b18-9cda-a5c87f67dcd5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5083 |
+
2501.15xxx/2501.15654/babf00c1-3697-4e10-a2b5-c822a8b6a7cf_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5084 |
+
2501.15xxx/2501.15749/74764f09-4e3e-46a0-85e2-3ef1f8f8aa48_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5085 |
+
2501.15xxx/2501.15785/ce4d6d24-ad68-430d-b712-8516bf521658_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5086 |
+
2501.15xxx/2501.15830/65063928-07c7-40e1-bc2e-d2626114b442_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5087 |
+
2501.15xxx/2501.15891/0a8c5369-9e28-4c61-939c-0988d66aff85_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5088 |
+
2501.15xxx/2501.15907/3817194b-dc03-483d-bcd2-a7b812130a00_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5089 |
+
2501.15xxx/2501.15915/d91095db-b3db-440f-bff9-a4c66cb08aa0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5090 |
+
2501.16xxx/2501.16142/eb92a180-bfd5-4703-bc39-3d4bb5b61c27_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5091 |
+
2501.16xxx/2501.16207/bb2b9069-36fa-47bb-ae48-1e38837f7e78_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5092 |
+
2501.16xxx/2501.16212/7c9ee7b4-bd82-4799-bd9a-cc02210f39db_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5093 |
+
2501.16xxx/2501.16222/a40e1551-3c1a-4b7f-ad26-3d0405ba2d41_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5094 |
+
2501.16xxx/2501.16265/dfd907ee-cb85-4e88-874f-bad07b4da737_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5095 |
+
2501.16xxx/2501.16277/31c04007-6bd5-43bf-97b1-d79473710cfc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5096 |
+
2501.16xxx/2501.16327/45a66108-09b9-4d0c-b583-4cc573ea1ca7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5097 |
+
2501.16xxx/2501.16404/625b53d9-77d6-4825-a9f6-ea5c55c541e5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5098 |
+
2501.17xxx/2501.17195/dfd5661c-4703-4aa5-ac63-5350524974bf_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5099 |
+
2501.18xxx/2501.18616/fc0a2b6f-fd2d-476e-b012-b4aad70b4a59_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5100 |
+
2501.18xxx/2501.18624/74e44bd4-1033-46ea-9524-a95b33a3aa45_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5101 |
+
2501.18xxx/2501.18630/ffb3d067-d896-46bb-b42e-284fcc12db25_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5102 |
+
2502.14xxx/2502.14868/f7d5c44b-b57d-4d91-bb15-4ac0da92f81c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 5103 |
+
2503.16xxx/2503.16431/968d32f8-eeb2-422a-852f-bd0c5fa8b55f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
2501.13xxx/2501.13919/2ca1593e-d4fc-4339-9bba-11f11e8e6907_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.13xxx/2501.13919/2ca1593e-d4fc-4339-9bba-11f11e8e6907_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.13xxx/2501.13919/2ca1593e-d4fc-4339-9bba-11f11e8e6907_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e5cf5d82d8b3510f40593e775a5037a45f262c02d2a5a99c22f632f22748df93
|
| 3 |
+
size 3652462
|
2501.13xxx/2501.13919/full.md
ADDED
|
@@ -0,0 +1,395 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Temporal Preference Optimization of Large Multimodal Models
|
| 2 |
+
|
| 3 |
+
Rui Li* Xiaohan Wang* Yuhui Zhang Orr Zohar Zeyu Wang Serena Yeung-Levy
|
| 4 |
+
|
| 5 |
+
Stanford University
|
| 6 |
+
|
| 7 |
+
Link: Project Page | Code | Dataset & Checkpoints
|
| 8 |
+
|
| 9 |
+

|
| 10 |
+
Figure 1: Temporal Preference Optimization (TPO). TPO is a post-training algorithm designed to enhance temporal comprehension in video-LMMs. In TPO, (a) preference data is generated by prompting the video-LMM with both well-grounded and manipulated (irrelevant or incomplete) video clips to collect contrastive response pairs. (b) These pairs undergo an LLM-based post-filtering process to remove noisy or misaligned samples. The generated preference data is then used in the preference optimization process, which improves the model's temporal reasoning by prioritizing preferred responses, ultimately enhancing overall video understanding.
|
| 11 |
+
|
| 12 |
+

|
| 13 |
+
|
| 14 |
+
Abstract: Despite recent advancements in video Large Multimodal Models (video-LMMs), accurate temporal grounding remains a key challenge. In this work, we introduce Temporal Preference Optimization (TPO)—a post-training framework that unlocks superior temporal reasoning in video-LMMs without requiring human annotations. TPO enables preference modeling by manipulating video inputs to generate contrastive responses, ensuring that preferred responses are more temporally grounded than dis-preferred ones. Through preference learning, TPO enhances the model's capability to distinguish and localize events with better temporal reasoning. Extensive experiments on LongVideoBench, MLVU, and Video-MME demonstrate that TPO significantly improves temporal grounding across multiple video-LMMs. Notably, LLaVA-Video-TPO achieves state-of-the-art performance among 7B models on Video-MME, establishing TPO as a scalable and effective solution for advancing temporal understanding in video analysis.
|
| 15 |
+
|
| 16 |
+
# 1. Introduction
|
| 17 |
+
|
| 18 |
+
Recent advancements in video Large Multimodal Models (video-LMMs) [2, 45, 52] have marked a significant step forward for generalizable video understanding. While image-based LMMs [5, 19, 39] primarily focus on spatial reasoning, video-LMMs face the additional complexity of modeling temporal dependencies—a critical aspect for capturing the dynamic nature of video content.
|
| 19 |
+
|
| 20 |
+
Most existing video-LMMs acquire temporal grounding implicitly during supervised finetuning by leveraging weak correspondences between input videos and textual responses [6, 67]. While some responses may reference specific segments of a video, they lack explicit temporal alignment under next-token prediction training, limiting the model's ability to learn precise temporal grounding. Recently, alternative approaches [7, 20, 29, 46, 51] have emerged that incorporate explicit temporal annotations into training, enriching textual responses with structured temporal information as supervision. However, these methods rely on additional temporal annotations, which are costly to obtain and scale to large training datasets.
|
| 21 |
+
|
| 22 |
+
In this work, we introduce Temporal Preference Optimization (TPO), a post-training framework designed to enhance the temporal grounding capabilities of video-LMMs. TPO systematically refines the pretrained video-LMMs' ability to distinguish temporally relevant content by leveraging contrastive responses from manipulated video inputs. As shown in Fig. 1, TPO first prompts a video-LMM with the same question on both the original and the corrupted video. Questions are formulated based on a set of video frames, with preferred responses generated using these frames. In contrast, dis-preferred responses are produced using the same question but paired with irrelevant or incomplete frame sets. This pipeline ensures that preferred responses contain richer and more temporally relevant information than dis-preferred ones, thereby establishing a clear preference hierarchy. By dynamically manipulating video inputs based on the query, TPO automatically injects temporal preferences into the preference dataset through simple input transformations. To further refine the dataset, a post-filtering process is applied to remove imperfect samples caused by errors from the pretrained video-LMMs and ambiguous preference data. The resulting preference dataset is then used to optimize the model's temporal grounding capabilities via Direct Preference Optimization (DPO) [44], chosen for its flexibility and stability. This structured pipeline enables TPO to enhance the temporal reasoning capabilities of the base video-LMM using a curated post-training dataset, while preserving its pre-trained knowledge. This makes TPO a scalable and robust solution for advancing video understanding tasks.
|
| 23 |
+
|
| 24 |
+
We conducted extensive experiments on three challenging multimodal video understanding benchmarks, and the results clearly demonstrate that TPO significantly enhances the temporal grounding capabilities of video-LMMs. Specifically, TPO achieves performance gains of $2.9\%$ on LongVideoBench [55], $3.1\%$ on MLVU [70], and $2.5\%$ on Video-MME [14], when applied to the strong base model LongVA-7B [65]. Furthermore, even when integrated with the state-of-the-art large-scale pretrained video-LMM, LLaVA-Video, TPO still delivers a $2.3\%$ improvement, establishing LLaVA-Video-TPO as the best-performing 7B model on the Video-MME benchmark.
|
| 25 |
+
|
| 26 |
+
# 2. Preliminaries
|
| 27 |
+
|
| 28 |
+
Preference learning [41, 49, 72] focuses on modeling human preferences to align model behavior with user expectations. In LLMs and image-LMMs, this involves training models to generate responses favored by users. This is typically achieved by collecting human feedback on pairs of model-generated outputs and learning a function that predicts which output is preferred. Formally, given an input $x$ and two outputs $y^{+}$ (preferred) and $y^{-}$ (dispreferred), the model aims to satisfy:
|
| 29 |
+
|
| 30 |
+
$$
|
| 31 |
+
\pi_ {\theta} \left(y ^ {+} | x\right) > \pi_ {\theta} \left(y ^ {-} | x\right) \tag {1}
|
| 32 |
+
$$
|
| 33 |
+
|
| 34 |
+
where $\pi_{\theta}(y|x)$ is the model's probability of generating output $y$ given input $x$ with parameters $\theta$ . In the context of video-LMMs, a preference dataset $\mathcal{D}$ is constructed as a collection of tuples $(V,q,r^{+},r^{-})$ , where $V$ denotes a video, $q$ represents a query, $r^{+}$ is the preferred temporally grounded response, and $r^{-}$ is the dispreferred response.
|
| 35 |
+
|
| 36 |
+
Direct Preference Optimization (DPO) [44] is a methodology that directly integrates human preference data into the optimization of model parameters. Compared to Proximal Policy Optimization (PPO) [41, 49, 72], another popular preference learning implementation, DPO eliminates the need for explicit reward models or complex reinforcement learning algorithms. By leveraging human preference data as a guiding signal during optimization, DPO enhances the model's ability to generate outputs that are better aligned with human values and expectations.
|
| 37 |
+
|
| 38 |
+
# 3. Temporal Preference Optimization
|
| 39 |
+
|
| 40 |
+
While prior works focus primarily on aligning LLM outputs with human preferences, our approach uniquely aligns model outputs with intrinsic temporal preferences in videos. To achieve this, we propose Temporal Preference Optimization (TPO), illustrated in Fig. 2, which significantly enhances the video reasoning capabilities of video-LMMs. TPO systematically incorporates temporal grounding into the optimization process through creating preference pairs from the contrast between meticulously manipulated video inputs (Sec. 3.1). To further enhance the quality of these preference data, we introduce a rule-based post-filtering step (Sec. 3.2). Finally, Direct Preference Optimization (Sec. 3.3) is leveraged to optimize the model towards temporally preferred outputs without compromising its original pretrained capabilities.
|
| 41 |
+
|
| 42 |
+
# 3.1. Temporal Preference Modeling
|
| 43 |
+
|
| 44 |
+
Query Generation. Given a video $\mathbf{V}$ , we first sample a segment containing a set of frames $S_{a}$ , which may be a subset of the video or the entire sequence of frames. To generate descriptive context, we employ an image-based vision-language model (CogVLM2 [19]) to generate captions for each frame in $S_{a}$ . These captions serve as the foundation for constructing targeted questions. To ensure diversity and relevance, we design multiple question types and use a structured question-generation prompt, as shown in Fig. 8 (Appendix), incorporating the generated captions. This prompt is then processed by a large language model (GPT-4o-mini) to produce a set of candidate questions specifically tailored to the sampled video frames, resulting in a set of questions $S_{q}$ . This approach ensures that the generated questions are contextually relevant that allows precise control over subsequent response generation.
|
| 45 |
+
|
| 46 |
+

|
| 47 |
+
Figure 2: In TPO, we introduce two approaches for temporal preference data generation. Preferred responses are generated using questions and their corresponding frames for strong temporal grounding. For dispreferred responses, we introduce: (a) Generation with Irrelevant Information, where all relevant frames are excluded. (b) Generation with Incomplete Information, where only a partial subset of relevant frames is used. These manipulated clips create contrastive response pairs, highlighting differences between well-grounded and manipulated video content. This contrast serves as a learning signal to enhance the model's temporal reasoning.
|
| 48 |
+
|
| 49 |
+
Preferred Response Generation. Preferred responses in the curated dataset are expected to be strongly grounded in the corresponding temporal content. To achieve this, we use the question set $S_{q}$ along with their corresponding frame set $S_{a}$ as input to the video-LMM. By ensuring that the provided video frames are highly relevant to the query, we create conditions that maximize the likelihood of the model generating a high-quality, temporally grounded response. This process guarantees that the preferred responses align with the ideal characteristics for effective temporal grounding in video-LMMs.
|
| 50 |
+
|
| 51 |
+
Dis-Preferred Response Generation. The dis-preferred responses in the preference dataset represent the type of outputs the model aims to discourage, specifically those where it fails to localize relevant information in the video. These responses expose shortcomings in temporal reasoning by highlighting cases where the model struggles to align its predictions with the actual video content. To generate these dis-preferred responses, we manipulate the video inputs to simulate imperfect temporal grounding. As illustrated in Fig. 2, we introduce two strategies for constructing the input frame set $S_{b}$ used in dis-preferred response generation:
|
| 52 |
+
|
| 53 |
+
(a) Generation with Irrelevant Information: To simulate an extreme failure case where the model misses all relevant frames, we construct $S_{b}$ by excluding the relevant frame set $S_{a}$ and instead sampling from the remaining frames of the video. This ensures that $S_{b}$ contains only irrelevant content, forcing the model to generate a response based on unrelated visual information.
|
| 54 |
+
|
| 55 |
+
(b) Generation with Incomplete Information: To mimic scenarios where the model has access to only partial relevant information, we construct $S_{b}$ by randomly sampling a subset of $S_{a}$ . This setup introduces gaps in the temporal context, making it harder for the model to fully comprehend the event described in the query.
|
| 56 |
+
|
| 57 |
+
Unlike preferred responses, which are grounded in fully relevant video segments, these manipulated setups significantly increase ambiguity and noise by partially or completely omitting critical visual content. As a result, the model is forced to rely on incomplete or misleading information, making temporal reasoning errors and hallucinations more likely. This intentional contrast between preferred and dis-preferred responses serves as a strong learning signal, helping refine the model's ability to distinguish and accurately localize events in time, ultimately enhancing its temporal reasoning capabilities.
|
| 58 |
+
|
| 59 |
+
# 3.2. LLM-based Post-Filtering
|
| 60 |
+
|
| 61 |
+
Although we design the preferred responses to be of higher quality than the dis-preferred responses, this distinction is not always guaranteed due to the imperfect video understanding capabilities of the base video-LMMs. In some cases, errors in response generation may lead to misaligned preference pairs, where the preferred response contains noise or the dis-preferred response is of better quality than expected.
|
| 62 |
+
|
| 63 |
+
To enhance data quality and reduce noise, we introduce a post-filtering pipeline leveraging an LLM (GPT-40-mini). Specifically, we provide the model with the key frame captions of $S_{a}$ , along with their corresponding queries and preference data pairs, and instruct it to filter out samples that meet predefined criteria (detailed prompts are shown in Fig. 9 in the Appendix). The filtering rules target cases where: 1) The dis-preferred response is of higher quality than the preferred response. 2) The preferred response is factually incorrect or misaligned with the video content. 3) The query itself is ambiguous, making preference ranking unreliable. By incorporating this post-filtering step, we effectively eliminate problematic cases that could introduce noise into training, resulting in a refined, higher-quality dataset that better supports effective model optimization and improves temporal grounding performance.
|
| 64 |
+
|
| 65 |
+
# 3.3. Training Objective
|
| 66 |
+
|
| 67 |
+
The generated preference dataset is leveraged to optimize the temporal grounding capabilities of videoLMMs using Direct Preference Optimization (DPO) [44], selected for its robustness and effectiveness in preference-based learning.
|
| 68 |
+
|
| 69 |
+
Given the preference dataset $\mathrm{D}(V, q, r^{+}, r^{-})$ and the video-LMM $\pi_{\theta}$ , the DPO loss function is defined as:
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
\begin{array}{l} L _ {D P O} \left(\pi_ {\theta}; \pi_ {r e f}\right) = - E _ {(V, q, r ^ {+}, r ^ {-}) \sim \mathcal {D}} \\ \left[ \log \sigma (\beta (\log \frac {\pi_ {\theta} (r ^ {+} | V , q)}{\pi_ {r e f} (r ^ {+} | V , q)} - \log \frac {\pi_ {\theta} (r ^ {-} | V , q)}{\pi_ {r e f} (r ^ {-} | V , q)})) \right] \tag {2} \\ \end{array}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
where $\sigma$ is the sigmoid function. This objective drives the model to assign higher probabilities to preferred outputs, aligning its behavior more closely with human judgments, while preventing the model from deviating too much from its pretrained distribution.
|
| 76 |
+
|
| 77 |
+
To better align the model with the preferred responses, we incorporate a supervised fine-tuning objective into the DPO training framework. This combined objective is controlled by the hyperparameter $\alpha$ , following [8, 11, 12].
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
L _ {S F T} \left(\pi_ {\theta}\right) = - E _ {\left(V, q, r ^ {+}, r ^ {-}\right) \sim \mathcal {D}} \log \pi_ {\theta} \left(r ^ {+} \mid V, q\right) \tag {3}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
L \left(\pi_ {\theta}; \pi_ {r e f}\right) = L _ {D P O} + \alpha L _ {S F T} \tag {4}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
# 4. Experiments
|
| 88 |
+
|
| 89 |
+
# 4.1. Experimental Settings
|
| 90 |
+
|
| 91 |
+
Evaluation Benchmarks We evaluate TPO and baselines on three widely recognized benchmarks in multimodal video understanding.
|
| 92 |
+
|
| 93 |
+
- Video-MME [14] offers a comprehensive multi-modal evaluation across diverse video lengths, spanning from 11 seconds to 1 hour.
|
| 94 |
+
- LongVideoBench [55] emphasizes reasoning tasks within extended video contexts.
|
| 95 |
+
- MLVU [70] supports multitask evaluation specifically designed for long-form video understanding.
|
| 96 |
+
|
| 97 |
+
Models We test the effectiveness of TPO on two popular video-LMMs, LongVA-7B [65] and LLaVA-Video-7B [68], deriving the following models:
|
| 98 |
+
|
| 99 |
+
1. LongVA-TPO: optimized based on LongVA-7B [65], a capable video-LMM with the long-context video understanding capability transferred from language.
|
| 100 |
+
2. LLaVA-Video-TPO: optimized based on LLaVA-Video-7B [68], the current state-of-the-art 7B videoLMM.
|
| 101 |
+
|
| 102 |
+
Without other states, our ablation study and analysis utilize LongVA-TPO by default.
|
| 103 |
+
|
| 104 |
+
Implementation Details For the video source of preference dataset generation, we manually curated 200 keywords, which we used to retrieve 8k videos from the internet to curate a diverse and comprehensive dataset. From these crawled videos, we created 10k preference data pairs for LongVA-TPO using our
|
| 105 |
+
|
| 106 |
+
<table><tr><td rowspan="2">Model</td><td rowspan="2">Size</td><td rowspan="2">LongVideo Bench</td><td rowspan="2">MLVU (M-avg)</td><td colspan="4">Video-MME</td></tr><tr><td>Short</td><td>Medium</td><td>Long</td><td>Average</td></tr><tr><td>GPT-4o [2]</td><td>-</td><td>66.7</td><td>64.6</td><td>80.082.8</td><td>70.376.6</td><td>65.372.1</td><td>71.977.2</td></tr><tr><td>Video-LLaVA [31]</td><td>7B</td><td>39.1</td><td>47.3</td><td>45.346.1</td><td>38.040.7</td><td>36.238.1</td><td>39.941.6</td></tr><tr><td>LLaVA-1.5 [33]</td><td>7B</td><td>40.3</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>PLLAVA [57]</td><td>7B</td><td>40.2</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Qwen-VL-Max [5]</td><td>-</td><td>-</td><td>42.2</td><td>55.857.6</td><td>49.248.9</td><td>48.947.0</td><td>51.351.2</td></tr><tr><td>ShareGPT4Video [6]</td><td>8B</td><td>39.7</td><td>46.4</td><td>48.353.6</td><td>36.339.3</td><td>35.037.9</td><td>39.943.6</td></tr><tr><td>InternVL-Chat-V1.5 [9]</td><td>20B</td><td>51.2</td><td>50.4</td><td>50.752.4</td><td>60.261.7</td><td>46.449.1</td><td>45.646.6</td></tr><tr><td>VideoChat2 [28]</td><td>7B</td><td>39.3</td><td>47.9</td><td>48.352.8</td><td>37.039.4</td><td>33.239.2</td><td>39.543.8</td></tr><tr><td>LongLLaVA [61]</td><td>7B</td><td>-</td><td>56.3</td><td>61.966.2</td><td>51.454.7</td><td>45.450.3</td><td>52.957.1</td></tr><tr><td>Video-CCAM [13]</td><td>14B</td><td>-</td><td>63.1</td><td>62.266.0</td><td>50.656.3</td><td>46.749.9</td><td>53.257.4</td></tr><tr><td>NVILA [36]</td><td>7B</td><td>57.7</td><td>70.1</td><td>75.777.6</td><td>62.269.0</td><td>54.863.3</td><td>64.270.0</td></tr><tr><td>Qwen2-VL [52]</td><td>7B</td><td>55.6</td><td>-</td><td>-</td><td>-</td><td>-</td><td>63.369.0</td></tr><tr><td>Apollo [74]</td><td>7B</td><td>58.5</td><td>70.9</td><td>-</td><td>-</td><td>-</td><td>61.363.3</td></tr><tr><td>LongVA-7B [65]</td><td>7B</td><td>51.3</td><td>58.8</td><td>61.161.6</td><td>50.453.6</td><td>46.247.6</td><td>52.654.3</td></tr><tr><td>LLaVA-Video-7B [68]</td><td>7B</td><td>58.2</td><td>70.8</td><td>-</td><td>-</td><td>-</td><td>63.369.7</td></tr><tr><td>LongVA-TPO (ours)</td><td>7B</td><td>54.2</td><td>61.7</td><td>63.166.6</td><td>54.855.3</td><td>47.447.9</td><td>55.156.6</td></tr><tr><td>LLaVA-Video-TPO (ours)</td><td>7B</td><td>60.1</td><td>71.1</td><td>76.878.7</td><td>64.669.4</td><td>55.466.4</td><td>65.671.5</td></tr></table>
|
| 107 |
+
|
| 108 |
+
Table 1: Results on LongVideoBench [55], MLVU [70] and Video-MME [14] compared with state-of-the-art models. The Video-MME results are presented in the format w/o subs $_{w/subs}$ .
|
| 109 |
+
|
| 110 |
+
established pipeline. For LLaVA-Video-TPO, we employ a subset of the original LLaVA-Video-178K dataset, which was used for supervised fine-tuning (SFT), to generate TPO data, resulting in a total of 10K preference data pairs.
|
| 111 |
+
|
| 112 |
+
The model is trained on 8 Nvidia A100 80GB GPUs, with a batch size of 64. For the preference optimization on LongVA, we set the KL-divergence weight $\beta = 0.3$ and the SFT loss weight $\alpha = 0.5$ , while for LLaVA-Video, we set the KL-divergence weight $\beta = 0.2$ and the SFT loss weight $\alpha = 1$ . We train the model on our curated data for 1 epoch. It takes about 4 hours for TPO to perform on LongVA-7B with a learning rate of $4e^{-6}$ and also about 4 hours for LLaVA-Video-7B with a learning rate of $3e^{-7}$ . During data preparation, we employ the GPT-4o-mini language model (text-only input) for question curation and post-filtering. This choice balances cost-effectiveness with efficiency, facilitating a streamlined and scalable data processing workflow.
|
| 113 |
+
|
| 114 |
+
# 4.2. Results
|
| 115 |
+
|
| 116 |
+
We conducted comprehensive experiments across three established datasets to rigorously assess the effectiveness of TPO in long-form video understanding tasks. We first compare TPO with three different training strategies:
|
| 117 |
+
|
| 118 |
+
- $\mathrm{SFT}_{\mathrm{Self}}$ : Supervised fine-tuning using the self-generated data. For a fair comparison, we utilize the same preferred response in our curated preference dataset to optimize LongVA.
|
| 119 |
+
- $\mathrm{SFT}_{\mathrm{LLM}}$ : Supervised fine-tuning using the LLM-generated data. Following the commonly used data curation pipeline [6, 67]. We employ LLM (GPT-4o-mini) to generate responses given the query and
|
| 120 |
+
|
| 121 |
+
<table><tr><td rowspan="2">Model</td><td rowspan="2">LongVideoBench</td><td rowspan="2">MLVU (M-avg)</td><td colspan="4">Video-MME</td></tr><tr><td>Short</td><td>Medium</td><td>Long</td><td>Average</td></tr><tr><td>LongVA-7B [65]</td><td>51.3</td><td>58.8</td><td>61.161.6</td><td>50.453.6</td><td>46.247.6</td><td>52.654.3</td></tr><tr><td>+SFTSelf</td><td>52.7</td><td>58.9</td><td>62.667.7</td><td>52.452.7</td><td>46.847.4</td><td>53.955.9</td></tr><tr><td>+SFTLLM</td><td>53.1</td><td>59.9</td><td>63.764.9</td><td>52.654.3</td><td>46.347.9</td><td>54.255.7</td></tr><tr><td>+Hound-DPO†[65, 66]</td><td>52.8</td><td>59.1</td><td>62.265.8</td><td>52.454.8</td><td>46.146.3</td><td>53.655.6</td></tr><tr><td>+Hound-DPO* [65, 66]</td><td>52.6</td><td>59.3</td><td>63.165.9</td><td>50.854.7</td><td>47.247.0</td><td>53.755.9</td></tr><tr><td>LongVA-TPO (ours)</td><td>54.2</td><td>61.7</td><td>63.166.6</td><td>54.855.3</td><td>47.447.9</td><td>55.156.6</td></tr></table>
|
| 122 |
+
|
| 123 |
+
Table 2: Results of LongVA-TPO on LongVideoBench [55], MLVU [70] and Video-MME [14] benchmarks compared to 3 baseline methods mentioned in 4.2. The Video-MME results are presented in the format "w/o subs / w/ subs". The results for LongVA and LongVA+Hound-DPO† are based on publicly available checkpoints, and for LongVA+Hound-DPO* are based on our implementation on our collected video datasets, while the other results are evaluated using our trained model.
|
| 124 |
+
|
| 125 |
+
the video captions, which are subsequently used to perform supervised fine-tuning on LongVA. We use the same video data as TPO for fair comparison.
|
| 126 |
+
|
| 127 |
+
- Hound-DPO [66] is a previous method that applies Direct Preference Optimization (DPO) [44] on video-LMMs. Their approach leverages ChatGPT [2] to generate ratings for preference data, resulting in a dataset of 17k samples. In contrast, TPO employs a smaller preference dataset generated through a self-generation pipeline, offering a more streamlined alternative. Besides, to ablate the data source's effect, we also implement Hound-DPO based on our collected dataset with the same data scale.
|
| 128 |
+
|
| 129 |
+
The primary experimental results, presented in Table 2, compare TPO against the baseline methods on LongVA. The results consistently indicate that LongVA-TPO achieves superior performance, with improvements of $2.9\%$ , $3.1\%$ , and $2.5\%$ on LongVideoBench [55], MLVU [70], and Video-MME [14], respectively. These findings underscore TPO's capacity to enhance the general video understanding capabilities of a pre-trained video-LMM.
|
| 130 |
+
|
| 131 |
+
Compared to $\mathrm{SFT}_{\mathrm{Self}}$ , LongVA-TPO achieves a consistent performance gain of $1.2\%$ to $2.8\%$ by utilizing a carefully designed temporal dis-preferred response to contrast with the preferred response. Furthermore, LongVA-TPO outperforms $\mathrm{SFT}_{\mathrm{LLM}}$ , demonstrating the effectiveness and stability of our self-training paradigm. When compared to Hound-DPO [66], LongVA-TPO achieves a significant performance improvement by injecting temporal preference priors into the dataset. However, LongVA-TPO underperforms SFT methods on the Video-MME-short subset, which is expected since LongVA-TPO primarily focuses on optimizing temporal reasoning for video understanding.
|
| 132 |
+
|
| 133 |
+
In addition, the comparisons between TPO and current state-of-the-art video-LMMs are presented in Table 1. With the introduction of TPO, both the LongVA-TPO and LLaVA-Video-TPO models significantly outperform their corresponding baselines $2.5\%$ and $2.3\%$ on the video-MME benchmark, demonstrating the efficacy of our TPO pipeline. After TPO on LLaVA-Video-7B, our LLaVA-Video-TPO model outperforms all 16 baseline models in the table, including the concurrent work, NVILA [36], as well as several 14B and 20B models, achieving state-of-the-art results on video understanding. The original LongVA model performed worse than Video-CCAM [13] and LongLLaVA [61] on the Video-MME benchmark. However, after incorporating TPO, it successfully outperformed these competitive baselines on Video-MME. Overall, LLaVA-Video-TPO achieves the strongest 7B model on Video-MME, setting a new state-of-the-art performance on video comprehension.
|
| 134 |
+
|
| 135 |
+

|
| 136 |
+
Figure 3: Qualitative comparison between LongVA-TPO model and LongVA on two videos from VideoMME benchmark.
|
| 137 |
+
|
| 138 |
+
# 4.3. Ablation Study
|
| 139 |
+
|
| 140 |
+
# 4.3.1. Performance with Different Input Frame Count
|
| 141 |
+
|
| 142 |
+
We evaluate the performance of both our LongVA-TPO model and the original LongVA model across varying input lengths, ranging from 16 to 128 frames, as shown in Fig. 5. The results indicate that the LongVA model experiences performance degradation with 128 frames compared to 64 frames. In contrast, our LongVA-TPO model consistently benefits from longer input sequences, leveraging the additional information effectively. This demonstrates the LongVA-TPO model's robustness in handling extended inputs and its capacity to localize relevant information within long sequences, further validating the efficacy of TPO.
|
| 143 |
+
|
| 144 |
+

|
| 145 |
+
Figure 4: Performance comparison of LongVA and LongVA-TPO on the needle-in-a-haystack task across varying input video lengths (horizontal axis) and temporal depths (vertical axis). Heatmaps indicate improved temporal grounding capability of LongVA-TPO.
|
| 146 |
+
|
| 147 |
+
# 4.3.2. Effect of Dataset Sizes
|
| 148 |
+
|
| 149 |
+
<table><tr><td>Model</td><td>LongVideoBench</td><td>MLVU</td><td>VideoMME</td></tr><tr><td>LongVA</td><td>51.3</td><td>58.8</td><td>52.6</td></tr><tr><td>TPO2k</td><td>52.5</td><td>57.8</td><td>52.8</td></tr><tr><td>TPO5k</td><td>53.7</td><td>59.5</td><td>53.6</td></tr><tr><td>TPO10k</td><td>54.2</td><td>61.7</td><td>55.1</td></tr></table>
|
| 150 |
+
|
| 151 |
+
Table 3: Results of LongVA-TPO (TPO) trained on different data scales. TPO achieves consistent performance improvements as the data scale increases. The performance on the VideoMME benchmark is evaluated without subtitles.
|
| 152 |
+
|
| 153 |
+
Scalability is a critical metric in the evaluation of algorithms in the era of large-scale models, reflecting an algorithm's performance as data volume expands. To examine the scalability of the TPO algorithm, we conduct experiments with LongVA-TPO across incremental sizes of 2k, 5k, and 10k (the complete preference dataset). The results, presented in Table 3, highlight the impact of dataset scaling. Our findings reveal that LongVA-TPO demonstrates superior scalability, achieving consistent performance gains with increasing dataset size across all three benchmarks. This pattern highlights TPO's robustness and adaptability in larger data contexts, suggesting its potential to deliver enhanced results when scaled to larger datasets.
|
| 154 |
+
|
| 155 |
+
# 4.3.3. Effect of Post-Filtering
|
| 156 |
+
|
| 157 |
+
As a critical component of the TPO framework, post-filtering effectively reduces noise and enhances data quality. To further assess its impact, we conducted experiments comparing the performance of LongVA-TPO with and without post-filtering. The results, presented in Table 4, demonstrate that post-filtering consistently improves performance across multiple benchmarks.
|
| 158 |
+
|
| 159 |
+
<table><tr><td>Model</td><td>LongVideoBench</td><td>MLVU</td><td>VideoMME</td></tr><tr><td>LongVA</td><td>51.3</td><td>58.8</td><td>52.6</td></tr><tr><td>TPOw/o Post-Filtering</td><td>52.5</td><td>60.1</td><td>53.6</td></tr><tr><td>TPOw/ Post-Filtering</td><td>54.2</td><td>61.7</td><td>55.1</td></tr></table>
|
| 160 |
+
|
| 161 |
+
Table 4: Results of LongVA-TPO (TPO) with and without post-filtering. Post-filtering consistently improves performance across multiple benchmarks.
|
| 162 |
+
|
| 163 |
+
<table><tr><td>Ratio</td><td>LongVideoBench</td><td>MLVU</td><td>VideoMME</td></tr><tr><td>10:0</td><td>53.5</td><td>58.7</td><td>54.0</td></tr><tr><td>8:2</td><td>53.8</td><td>59.9</td><td>54.0</td></tr><tr><td>5:5 (final model)</td><td>54.2</td><td>61.7</td><td>55.1</td></tr><tr><td>2:8</td><td>53.4</td><td>59.1</td><td>54.2</td></tr><tr><td>0:10</td><td>53.4</td><td>58.5</td><td>53.8</td></tr></table>
|
| 164 |
+
|
| 165 |
+
Table 5: Performance of TPO across different training data mix ratios, varying the proportion of negative responses generated from incomplete versus irrelevant video segments.
|
| 166 |
+
|
| 167 |
+
# 4.3.4. Effect of Different Data Mix Ratio
|
| 168 |
+
|
| 169 |
+
In TPO, we design two different kinds of manipulated schema for the dis-preferred response generation, including creating incomplete and irrelevant videos. To evaluate the individual capabilities, limitations, and necessity of combining incomplete and irrelevant videos, we conducted an ablation study. In this study, we maintained the same overall dataset size as the full TPO and assessed the performance of our TPO model under various data mixing ratios between the negative responses generated from incomplete and irrelevant videos. The evaluated ratios included 10:0, 8:2, 5:5, 2:8, and 0:10. The experimental results, summarized in Table 5, clearly demonstrate that the model achieves optimal performance on general video understanding tasks when an equal proportion of negative responses generated from incomplete and irrelevant videos. This balanced data distribution effectively integrates different type of temporal information, leading to superior overall model performance.
|
| 170 |
+
|
| 171 |
+
# 4.3.5. Needle-in-a-Haystack
|
| 172 |
+
|
| 173 |
+
The Needle-in-a-Haystack task refers to the challenge of identifying a rare or specific event within a large volume of unstructured video data. Building on the work of Zhang et al. [65], we frame the task using image-based question answering (QA), where images are embedded within a 3-hour-long video, and the model is tasked with answering the corresponding image QA questions. In our experiments, we adopt the same five image QAs as Zhang et al. [65], and present the results in Fig. 4. While LongVA, optimized for long-context processing, significantly outperforms LLaVA-NeXT-Video [67] on the Needle-in-a-Haystack task
|
| 174 |
+
|
| 175 |
+

|
| 176 |
+
Figure 5: Performance comparison between LongVA-TPO and LongVA on MLVU across varying input lengths. LongVA-TPO consistently benefits from increased input length, whereas LongVA's performance declines when inputs exceed 64 frames.
|
| 177 |
+
|
| 178 |
+
(refer to Fig. 4 in [65]), our LongVA-TPO model still demonstrates superior performance, achieving even better results in long-context temporal localization.
|
| 179 |
+
|
| 180 |
+
# 4.4. Qualitative Analysis
|
| 181 |
+
|
| 182 |
+
The qualitative analysis of our LongVA-TPO model and the LongVA model on two videos from the Video-MME benchmark is provided in Fig. 3. In the first example, which involves a temporal localization and OCR task, our LongVA-TPO model demonstrates superior performance by accurately localizing the relevant information within the video and providing the correct answer to the OCR question. In the second example, a video discussing the Moon's formation, LongVA misinterprets the video content by relating it to the Earth's formation. In contrast, our LongVA-TPO model successfully comprehends and captures the key details of the video's content.
|
| 183 |
+
|
| 184 |
+
# 5. Related Work
|
| 185 |
+
|
| 186 |
+
Video Large Multimodal Models Recently, significant efforts have been devoted to extending the capabilities of large language models (LLMs) [2, 45] into the visual domain, developing various video large multimodal models (video-LMMs), including both proprietary [2, 45] and open-source models [1, 10, 15, 23, 26, 31, 34, 47, 52, 59]. Early approaches focused on curating high-quality video-text instruction-tuning datasets [6, 33, 42, 67, 68], to equip LLMs with video comprehension capabilities. However, these datasets often rely on synthetic data derived from video captions, which limits their effectiveness in capturing visual-temporal dynamics. Other studies have focused on extending pretrained video-LMMs to handle longer video contexts [22, 35-37, 48, 61, 65], while multimodal interleaved datasets [27, 32] and mixed training strategies [25, 74] have been explored to enhance video-LMM performance. Despite these advancements, the post-training stage for video-LMMs remains underexplored. Recent efforts like LLaVA-Hound [66] utilize ChatGPT to rank model outputs and create preference datasets but fall short in leveraging the temporal information inherent in video data. In contrast, our work pioneers post-training
|
| 187 |
+
|
| 188 |
+
strategies that explicitly incorporate temporal priors to address these limitations.
|
| 189 |
+
|
| 190 |
+
Temporal grounding is crucial for comprehending the video modality, particularly in long-form videos. Various efforts have been made to enhance temporal localization, including dense captioning [53, 58, 60], highlight detection [24, 40], and temporal video grounding [16, 56, 62], among others. Recent advancements have introduced temporal-aware designs in video-LMMs [7, 20, 29, 46, 51] and have explored the development of agentic systems with temporal grounding capabilities [54]. Unlike these existing approaches, our work focuses on temporal preference optimization during the post-training stage, offering a complementary enhancement to current methods.
|
| 191 |
+
|
| 192 |
+
Proximal Policy Optimization (PPO) [41, 49, 72] and Direct Preference Optimization (DPO) [44] are two widely used implementations of Reinforcement Learning from Human Feedback (RLHF) [41, 72], serving as key algorithms in preference learning and post-training. In the image-LMM domain, Sun et al. [50] enhanced model visual capabilities by incorporating image captions into the reward modeling process within RLHF. Similarly, Ahn et al. [4] fine-tuned multimodal foundation models using Reinforcement Learning from AI Feedback (RLAIF). Other approaches, such as those proposed by Li et al. [30] and Gunjal et al. [17], directly distilled GPT-4V's preferences from sampled model responses. A notable strategy involves using text as an intermediate modality, leveraging captions and other descriptive information to extract and distill LLMs preferences for both images [69] and videos [66]. Furthermore, Pi et al. [43], Zhou et al. [71], and Deng et al. [12] advanced preference learning in image-LMMs by curating preference data through image input manipulation.
|
| 193 |
+
|
| 194 |
+
Self-Training in Foundation Models To address the challenge of scaling up annotated datasets, several works have explored self-improvement and self-training methods [18, 21]. Zelikman et al. [63] introduced Self-Taught Reasoners (Star), which leverage generated chain-of-thought rationales to enhance LLMs' complex reasoning capabilities. In the image domain, BPO [43] STIC [12] and POVID [71] improve image-LMMs responses by incorporating visual priors. In the video domain, Video-STaR [73] uses existing labels as weak supervision to guide model self-improvement while Ahn et al. [3] explores iterative self-improvement in preference optimization.
|
| 195 |
+
|
| 196 |
+
# 6. Conclusion
|
| 197 |
+
|
| 198 |
+
We introduced Temporal Preference Optimization (TPO), a scalable post-training framework that enhances temporal grounding in video-LMMs. By contrasting between the preference responses from the well-grounded and manipulated video clips, TPO effectively captures the intricate temporal dependencies required for video understanding. Extensive experiments across three challenging benchmarks—LongVideoBench, MLVU, and Video-MME—demonstrated TPO's robust improvements, achieving state-of-the-art performance. By integrating multi-granularity temporal preferences, TPO offers a robust and efficient solution for advancing temporal reasoning in multimodal tasks. One future direction is scaling the preference data to improve coverage and diversity, thereby enhancing TPO's generalizability. Additionally, while this work focuses on LongVA-7B and LLaVA-Video-7B as representative Video-LMMs, applying TPO to a broader range and larger scale of Video-LMMs would provide insights into its adaptability and performance across different architectures.
|
| 199 |
+
|
| 200 |
+
# References
|
| 201 |
+
|
| 202 |
+
[1] Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, et al. Phi-3 technical report: A highly capable language model
|
| 203 |
+
|
| 204 |
+
locally on your phone. arXiv preprint arXiv:2404.14219, 2024.
|
| 205 |
+
[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.
|
| 206 |
+
[3] Daechul Ahn, Yura Choi, San Kim, Youngjae Yu, Dongyeop Kang, and Jonghyun Choi. i-srt: Aligning large multimodal models for videos by iterative self-retrospective judgment. arXiv preprint arXiv:2406.11280, 2024.
|
| 207 |
+
[4] Daechul Ahn, Yura Choi, Youngjae Yu, Dongyeop Kang, and Jonghyun Choi. Tuning large multimodal models for videos using reinforcement learning from ai feedback. arXiv preprint arXiv:2402.03746, 2024.
|
| 208 |
+
[5] Jinze Bai, Shuai Bai, Shusheng Yang, Shijie Wang, Sinan Tan, Peng Wang, Junyang Lin, Chang Zhou, and Jingren Zhou. Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond. arXiv preprint arXiv:2308.12966, 2023.
|
| 209 |
+
[6] Lin Chen, Xilin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Bin Lin, Zhenyu Tang, Li Yuan, Yu Qiao, Dahua Lin, Feng Zhao, and Jiaqi Wang. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325, 2024.
|
| 210 |
+
[7] Shimin Chen, Xiaohan Lan, Yitian Yuan, Zequn Jie, and Lin Ma. Timemarker: A versatile video-llm for long and short video understanding with superior temporal localization ability. arXiv preprint arXiv:2411.18211, 2024.
|
| 211 |
+
[8] Shuo Chen, Gang Niu, Chen Gong, Jun Li, Jian Yang, and Masashi Sugiyama. Large-margin contrastive learning with distance polarization regularizer. In International Conference on Machine Learning, pages 1673-1683. PMLR, 2021.
|
| 212 |
+
[9] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, Bin Li, Ping Luo, Tong Lu, Yu Qiao, and Jifeng Dai. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. arXiv preprint arXiv:2312.14238, 2023.
|
| 213 |
+
[10] Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Muyan Zhong, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 24185–24198, 2024.
|
| 214 |
+
[11] Zixiang Chen, Yihe Deng, Yuanzhi Li, and Quanquan Gu. Understanding transferable representation learning and zero-shot transfer in clip. arXiv preprint arXiv:2310.00927, 2023.
|
| 215 |
+
[12] Yihe Deng, Pan Lu, Fan Yin, Ziniu Hu, Sheng Shen, Quanquan Gu, James Zou, Kai-Wei Chang, and Wei Wang. Enhancing large vision language models with self-training on image comprehension. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=FZW7Ctyjm3.
|
| 216 |
+
[13] Jiajun Fei, Dian Li, Zhidong Deng, Zekun Wang, Gang Liu, and Hui Wang. Video-ccam: Enhancing video-language understanding with causal cross-attention masks for short and long videos. arXiv preprint arXiv:2408.14023, 2024.
|
| 217 |
+
[14] Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024.
|
| 218 |
+
[15] Chaoyou Fu, Haojia Lin, Xiong Wang, Yi-Fan Zhang, Yunhang Shen, Xiaoyu Liu, Yangze Li, Zuwei Long, Heting Gao, Ke Li, et al. Vita-1.5: Towards gpt-4o level real-time vision and speech interaction. arXiv preprint arXiv:2501.01957, 2025.
|
| 219 |
+
[16] Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia. Tall: Temporal activity localization via language query. In Proceedings of the IEEE international conference on computer vision, pages 5267-5275, 2017.
|
| 220 |
+
|
| 221 |
+
[17] Anisha Gunjal, Jihan Yin, and Erhan Bas. Detecting and preventing hallucinations in large vision language models. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pages 18135-18143, 2024.
|
| 222 |
+
[18] Namgyu Ho, Laura Schmid, and Se-Young Yun. Large language models are reasoning teachers. arXiv preprint arXiv:2212.10071, 2022.
|
| 223 |
+
[19] Wenyi Hong, Weihan Wang, Ming Ding, Wenmeng Yu, Qingsong Lv, Yan Wang, Yean Cheng, Shiyu Huang, Junhui Ji, Zhao Xue, et al. Cogvlm2: Visual language models for image and video understanding. arXiv preprint arXiv:2408.16500, 2024.
|
| 224 |
+
[20] De-An Huang, Shijia Liao, Subhashree Radhakrishnan, Hongxu Yin, Pavlo Molchanov, Zhiding Yu, and Jan Kautz. Lita: Language instructed temporal-localization assistant. In ECCV, 2024.
|
| 225 |
+
[21] Jiaxin Huang, Shixiang Shane Gu, Le Hou, Yuexin Wu, Xuezhi Wang, Hongkun Yu, and Jiawei Han. Large language models can self-improve. arXiv preprint arXiv:2210.11610, 2022.
|
| 226 |
+
[22] Md Mohaiminul Islam, Tushar Nagarajan, Huiyu Wang, Gedas Bertasius, and Lorenzo Torresani. Bimba: Selective-scan compression for long-range video question answering. In Proceedings of the Computer Vision and Pattern Recognition Conference, pages 29096-29107, 2025.
|
| 227 |
+
[23] Hugo Laurençon, Léo Tronchon, Matthieu Cord, and Victor Sanh. What matters when building vision-language models?, 2024.
|
| 228 |
+
[24] Jie Lei, Tamara L Berg, and Mohit Bansal. Detecting moments and highlights in videos via natural language queries. Advances in Neural Information Processing Systems, 34:11846-11858, 2021.
|
| 229 |
+
[25] Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Yanwei Li, Ziwei Liu, and Chunyuan Li. Llava-onevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326, 2024.
|
| 230 |
+
[26] Dongxu Li, Yudong Liu, Haoning Wu, Yue Wang, Zhiqi Shen, Bowen Qu, Xinyao Niu, Guoyin Wang, Bei Chen, and Junnan Li. Aria: An open multimodal native mixture-of-experts model. arXiv preprint arXiv:2410.05993, 2024.
|
| 231 |
+
[27] Feng Li, Renrui Zhang, Hao Zhang, Yuanhan Zhang, Bo Li, Wei Li, Zejun Ma, and Chunyuan Li. Llava-last: Tackling multi-image, video, and 3d in large multimodal models, June 2024. URL https://llava-v1.github.io/blog/2024-06-16-llava-last-interleave/.
|
| 232 |
+
[28] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023.
|
| 233 |
+
[29] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023.
|
| 234 |
+
[30] Lei Li, Zhihui Xie, Mukai Li, Shunian Chen, Peiyi Wang, Liang Chen, Yazheng Yang, Benyou Wang, and Lingpeng Kong. Silkie: Preference distillation for large visual language models. arXiv preprint arXiv:2312.10665, 2023.
|
| 235 |
+
[31] Bin Lin, Bin Zhu, Yang Ye, Munan Ning, Peng Jin, and Li Yuan. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122, 2023.
|
| 236 |
+
[32] Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pre-training for visual language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26689-26699, 2024.
|
| 237 |
+
[33] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. In NeurIPS, 2023.
|
| 238 |
+
[34] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36, 2024.
|
| 239 |
+
|
| 240 |
+
[35] Jiajun Liu, Yibing Wang, Hanghang Ma, Xiaoping Wu, Xiaoqi Ma, xiaoming Wei, Jianbin Jiao, Enhua Wu, and Jie Hu. Kangaroo: A powerful video-language model supporting long-context video input. arXiv preprint arXiv:2408.15542, 2024.
|
| 241 |
+
[36] Zhijian Liu, Ligeng Zhu, Baifeng Shi, Zhuoyang Zhang, Yuming Lou, Shang Yang, Haocheng Xi, Shiyi Cao, Yuxian Gu, Dacheng Li, Xiuyu Li, Yunhao Fang, Yukang Chen, Cheng-Yu Hsieh, De-An Huang, An-Chieh Cheng, Vishwesh Nath, Jinyi Hu, Sifei Liu, Ranjay Krishna, Daguang Xu, Xiaolong Wang, Pavlo Molchanov, Jan Kautz, Hongxu Yin, Song Han, and Yao Lu. Nvila: Efficient frontier visual language models, 2024. URL https://arxiv.org/abs/2412.04468.
|
| 242 |
+
[37] Zuyan Liu, Yuhao Dong, Ziwei Liu, Winston Hu, Jiwen Lu, and Yongming Rao. Oryx mllm: On-demand spatial-temporal understanding at arbitrary resolution. arXiv preprint arXiv:2409.12961, 2024.
|
| 243 |
+
[38] Ilya Loshchilov and Frank Hutter. Sgdr: Stochastic gradient descent with warm restarts. arXiv preprint arXiv:1608.03983, 2016.
|
| 244 |
+
[39] Haoyu Lu, Wen Liu, Bo Zhang, Bingxuan Wang, Kai Dong, Bo Liu, Jingxiang Sun, Tongzheng Ren, Zhuoshu Li, Yaofeng Sun, Chengqi Deng, Hanwei Xu, Zhenda Xie, and Chong Ruan. Deepseek-vl: Towards real-world vision-language understanding, 2024.
|
| 245 |
+
[40] WonJun Moon, Sangeek Hyun, SangUk Park, Dongchan Park, and Jae-Pil Heo. Query-dependent video representation for moment retrieval and highlight detection. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23023-23033, 2023.
|
| 246 |
+
[41] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in neural information processing systems, 35:27730-27744, 2022.
|
| 247 |
+
[42] Joon Sung Park, Joseph O'Brien, Carrie Jun Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology, pages 1-22, 2023.
|
| 248 |
+
[43] Renjie Pi, Tianyang Han, Wei Xiong, Jipeng Zhang, Runtao Liu, Rui Pan, and Tong Zhang. Strengthening multimodal large language model with bootstrapped preference optimization. arXiv preprint arXiv:2403.08730, 2024.
|
| 249 |
+
[44] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36, 2024.
|
| 250 |
+
[45] Machel Reid, Nikolay Savinov, Denis Teptyashin, Dmitry Lepikhin, Timothy Lillicrap, Jean-baptiste Alayrac, Radu Soricut, Angeliki Lazaridou, Orhan First, Julian Schrittwieser, et al. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv preprint arXiv:2403.05530, 2024.
|
| 251 |
+
[46] Shuhuai Ren, Linli Yao, Shicheng Li, Xu Sun, and Lu Hou. Timechat: A time-sensitive multimodal large language model for long video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14313-14323, 2024.
|
| 252 |
+
[47] Xiaoqian Shen, Yunyang Xiong, Changsheng Zhao, Lemeng Wu, Jun Chen, Chenchen Zhu, Zechun Liu, Fanyi Xiao, Balakrishnan Varadarajan, Florian Bordes, Zhuang Liu, Hu Xu, Hyunwoo J. Kim, Bilge Soran, Raghuraman Krishnamoorthi, Mohamed Elhoseiny, and Vikas Chandra. Longvu: Spatiotemporal adaptive compression for long video-language understanding. arXiv:2410.17434, 2024.
|
| 253 |
+
[48] Yan Shu, Peitian Zhang, Zheng Liu, Minghao Qin, Junjie Zhou, Tiejun Huang, and Bo Zhao. Video-xl: Extra-long vision language model for hour-scale video understanding. arXiv preprint arXiv:2409.14485, 2024.
|
| 254 |
+
|
| 255 |
+
[49] Nisan Stiannon, Long Ouyang, Jeffrey Wu, Daniel Ziegler, Ryan Lowe, Chelsea Voss, Alec Radford, Dario Amodei, and Paul F Christiano. Learning to summarize with human feedback. Advances in Neural Information Processing Systems, 33:3008-3021, 2020.
|
| 256 |
+
[50] Zhiqing Sun, Sheng Shen, Shengcao Cao, Haotian Liu, Chunyuan Li, Yikang Shen, Chuang Gan, Liang-Yan Gui, Yu-Xiong Wang, Yiming Yang, et al. Aligning large multimodal models with factually augmented rlhf. arXiv preprint arXiv:2309.14525, 2023.
|
| 257 |
+
[51] Haibo Wang, Zhiyang Xu, Yu Cheng, Shizhe Diao, Yufan Zhou, Yixin Cao, Qifan Wang, Weifeng Ge, and Lifu Huang. Grounded-videoollm: Sharpening fine-grained temporal grounding in video large language models. arXiv preprint arXiv:2410.03290, 2024.
|
| 258 |
+
[52] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024.
|
| 259 |
+
[53] Teng Wang, Ruimao Zhang, Zhichao Lu, Feng Zheng, Ran Cheng, and Ping Luo. End-to-end dense video captioning with parallel decoding. In Proceedings of the IEEE/CVF international conference on computer vision, pages 6847-6857, 2021.
|
| 260 |
+
[54] Xiaohan Wang, Yuhui Zhang, Orr Zohar, and Serena Yeung-Levy. Videoagent: Long-form video understanding with large language model as agent. In European Conference on Computer Vision, pages 58-76. Springer, 2025.
|
| 261 |
+
[55] Haoning Wu, Dongxu Li, Bei Chen, and Junnan Li. Longvideobench: A benchmark for long-context interleaved video-language understanding, 2024. URL https://arxiv.org/abs/2407.15754.
|
| 262 |
+
[56] Junbin Xiao, Angela Yao, Yicong Li, and Tat-Seng Chua. Can i trust your answer? visually grounded video question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13204-13214, 2024.
|
| 263 |
+
[57] Lin Xu, Yilin Zhao, Daquan Zhou, Zhijie Lin, See Kiong Ng, and Jiashi Feng. Pllava : Parameter-free llava extension from images to videos for video dense captioning, 2024.
|
| 264 |
+
[58] Antoine Yang, Arsha Nagrani, Paul Hongsuck Seo, Antoine Miech, Jordi Pont-Tuset, Ivan Laptev, Josef Sivic, and Cordelia Schmid. Vid2seq: Large-scale pretraining of a visual language model for dense video captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10714-10726, 2023.
|
| 265 |
+
[59] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, et al. Minicpm-v: A gpt-4v level mllm on your phone. arXiv preprint arXiv:2408.01800, 2024.
|
| 266 |
+
[60] Serena Yeung, Olga Russakovsky, Ning Jin, Mykhaylo Andriluka, Greg Mori, and Li Fei-Fei. Every moment counts: Dense detailed labeling of actions in complex videos. International Journal of Computer Vision, 126: 375–389, 2018.
|
| 267 |
+
[61] Yin Song and Chen Wu and Eden Duthie. aws-prototyping/long-llava-qwen2-7b, 2024. URL https://huggingface.coAWS-prototyping/long-llava-qwen2-7b.
|
| 268 |
+
[62] Yitian Yuan, Lin Ma, Jingwen Wang, Wei Liu, and Wenwu Zhu. Semantic conditioned dynamic modulation for temporal sentence grounding in videos. Advances in Neural Information Processing Systems, 32, 2019.
|
| 269 |
+
[63] Eric Zelikman, Yuhuai Wu, Jesse Mu, and Noah Goodman. Star: Bootstrapping reasoning with reasoning. Advances in Neural Information Processing Systems, 35:15476-15488, 2022.
|
| 270 |
+
[64] Kaichen Zhang, Bo Li, Peiyuan Zhang, Fanyi Pu, Joshua Adrian Cahyono, Kairui Hu, Shuai Liu, Yuanhan Zhang, Jingkang Yang, Chunyuan Li, and Ziwei Liu. Lmms-eval: Reality check on the evaluation of large multimodal models, 2024. URL https://arxiv.org/abs/2407.12772.
|
| 271 |
+
|
| 272 |
+
[65] Peiyuan Zhang, Kaichen Zhang, Bo Li, Guangtao Zeng, Jingkang Yang, Yuanhan Zhang, Ziyue Wang, Haoran Tan, Chunyuan Li, and Ziwei Liu. Long context transfer from language to vision. arXiv preprint arXiv:2406.16852, 2024.
|
| 273 |
+
[66] Ruohong Zhang, Liangke Gui, Zhiqing Sun, Yihao Feng, Keyang Xu, Yuanhan Zhang, Di Fu, Chunyuan Li, Alexander Hauptmann, Yonatan Bisk, et al. Direct preference optimization of video large multimodal models from language model reward. arXiv preprint arXiv:2404.01258, 2024.
|
| 274 |
+
[67] Yuanhan Zhang, Bo Li, haotian Liu, Yong jae Lee, Liangke Gui, Di Fu, Jiashi Feng, Ziwei Liu, and Chunyuan Li. Llava-last: A strong zero-shot video understanding model, April 2024. URL https://llava-v1.github.io/blog/2024-04-30-llava-last-video/.
|
| 275 |
+
[68] Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. Video instruction tuning with synthetic data, 2024. URL https://arxiv.org/abs/2410.02713.
|
| 276 |
+
[69] Zhiyuan Zhao, Bin Wang, Linke Ouyang, Xiaoyi Dong, Jiaqi Wang, and Conghui He. Beyond hallucinations: Enhancing lvlms through hallucination-aware direct preference optimization. arXiv preprint arXiv:2311.16839, 2023.
|
| 277 |
+
[70] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024.
|
| 278 |
+
[71] Yiyang Zhou, Chenhang Cui, Rafael Rafailov, Chelsea Finn, and Huaxiu Yao. Aligning modalities in vision large language models via preference fine-tuning. arXiv preprint arXiv:2402.11411, 2024.
|
| 279 |
+
[72] Daniel M Ziegler, Nisan Stiennon, Jeffrey Wu, Tom B Brown, Alec Radford, Dario Amodei, Paul Christiano, and Geoffrey Irving. Fine-tuning language models from human preferences. arXiv preprint arXiv:1909.08593, 2019.
|
| 280 |
+
[73] Orr Zohar, Xiaohan Wang, Yonatan Bitton, Idan Szpektor, and Serena Yeung-Levy. Video-star: Self-training enables video instruction tuning with any supervision. arXiv preprint arXiv:2407.06189, 2024.
|
| 281 |
+
[74] Orr Zohar, Xiaohan Wang, Yann Dubois, Nikhil Mehta, Tong Xiao, Philippe Hansen-Estruch, Licheng Yu, Xiaofang Wang, Felix Juefei-Xu, Ning Zhang, Serena Yeung-Levy, and Xide Xia. Apollo: An exploration of video understanding in large multimodal models. arXiv preprint arXiv:2412.10360, 2024.
|
| 282 |
+
|
| 283 |
+
# A. Reproducibility Statement
|
| 284 |
+
|
| 285 |
+
To ensure reproducibility, we have released the full scripts and source code of the TPO pipeline, accompanied by the curated preference dataset, which includes videos, associated queries, and corresponding preference responses, as well as the trained model weights. This release will include detailed implementations of all steps involved in the preference dataset curation and the preference optimization process. By providing these resources, we aim to facilitate the replication of our results and support further advancements in this area of research.
|
| 286 |
+
|
| 287 |
+
# B. Appendix overview
|
| 288 |
+
|
| 289 |
+
This document provides more details of our approach and additional experimental results, organized as follows:
|
| 290 |
+
|
| 291 |
+
$\S$ C More Implementation Details of TPO.
|
| 292 |
+
$\S$ D More Details of the Preference Dataset Curation.
|
| 293 |
+
- § E More Examples in the Preference Dataset.
|
| 294 |
+
$\S$ F More Qualitative Examples.
|
| 295 |
+
|
| 296 |
+
# C. Implementation Details
|
| 297 |
+
|
| 298 |
+
We conduct Temporal Preference Optimization (TPO) on LongVA [65] and LLaVA-Video [68], two state-of-the-art video-LMMs. The two TPO models are trained using 8 Nvidia A100 80GB GPUs, with a batch size of 64. For preference optimization, we set the KL-divergence weight $(\beta)$ to 0.3 and the supervised fine-tuning (SFT) loss weight $(\alpha)$ to 0.5 for LongVA-TPO and we set the KL-divergence weight $(\beta)$ to 0.2 and the supervised fine-tuning (SFT) loss weight $(\alpha)$ to 1 for LLaVA-Video-TPO. We employ full fine-tuning for both the multimodal projector and the language model while keeping the visual encoder frozen, using a learning rate of $4\times 10^{-6}$ for LongVA-TPO and $3\times 10^{-7}$ for LLaVA-Video-TPO. The training is performed on a curated dataset of 10k samples for one epoch for LongVA-TPO and 10k samples for one epoch for LLaVA-Video-TPO. A cosine learning rate scheduler with a warm-up ratio of 0.1 is utilized [38]. The entire TPO fine-tuning process takes approximately 4 hours on both two models.
|
| 299 |
+
|
| 300 |
+
For evaluation, we adopt the protocol outlined by LongVA [65] and LLaVA-Video [68], leveraging the official Imm-s-eval repository [64] to assess our model's performance on three benchmarks. For LongVA-TPO, we set the parameter max_frames_num = 128 across all three benchmarks. For LLaVA-Video-TPO, we set the parameter max_frames_num = 96 for the Video-MME benchmark and max_frames_num = 128 for the rest of the benchmarks.
|
| 301 |
+
|
| 302 |
+
# D. Preference Dataset Curation
|
| 303 |
+
|
| 304 |
+
We manually curated a set of 200 keywords assisted with GPT-4o-mini [2], which were utilized to retrieve 8,000 videos from the internet, forming a diverse and comprehensive dataset. Using this dataset, we further developed 10,000 queries paired with their corresponding preference responses, covering a broad range of tasks. The detailed prompts for preference dataset curation are provided in Fig. 8 and Fig. 9. For LLaVA-Video, we sampled a subset of 10k QA pairs from the LLaVA-Video-178k dataset with the negative responses only curated by incomplete videos.
|
| 305 |
+
|
| 306 |
+
The distribution of video lengths in our collected dataset is presented in Fig. 6. The distribution of tasks is illustrated in Fig. 7, encompassing Temporal Reasoning (8.7%), Action Reasoning (12.4%), Causal Reasoning
|
| 307 |
+
|
| 308 |
+

|
| 309 |
+
Figure 6: The distribution of lengths for 8K crawled videos.
|
| 310 |
+
|
| 311 |
+

|
| 312 |
+
Figure 7: The distribution of question types for 10K curated preference dataset for LongVA-TPO.
|
| 313 |
+
|
| 314 |
+
(11.1%), Information Extraction (18.0%), Descriptive Questions (12.8%), Summarization (7.5%), Object Reasoning (14.9%), and Spatial Reasoning (13.5%).
|
| 315 |
+
|
| 316 |
+
# E. Preference Dataset Examples
|
| 317 |
+
|
| 318 |
+
We provide three additional examples of preference datasets, as illustrated in Fig. 10. For instance, in Example (a), the task involves an OCR-based query aimed at retrieving the quote located beneath a mural. The dis-preferred response incorrectly identifies the relevant frame, failing to locate the quote below the mural and instead referencing another frame containing the phrase "Forward, Warrior." In contrast, the preferred response accurately identifies the corresponding frame based on the question. This is achieved by leveraging the highly relevant sub-video segment provided to the video-LMM, enabling the correct extraction of both the quote and its attribution.
|
| 319 |
+
|
| 320 |
+
For Example (b), the task involves summarizing information by identifying the four levels depicted in a pyramid diagram. The dis-preferred response, based on irrelevant video clips, provides incorrect names and an incorrect order for the four levels. In contrast, the preferred response accurately identifies both the correct names and the proper order of the four levels, demonstrating a better understanding of the context and alignment with the video content.
|
| 321 |
+
|
| 322 |
+
For Example (c), the task involves a high-level descriptive query requiring a summary of the exercise routine depicted in the video. The dis-preferred response, relying only on down-sampled frames, omits significant key information and provides an incomplete summary. In contrast, the preferred response accurately summarizes the entire exercise routine, offering both detailed and correctly ordered information, thereby demonstrating a comprehensive understanding of the video content.
|
| 323 |
+
|
| 324 |
+
# F. Qualitative Analysis Examples
|
| 325 |
+
|
| 326 |
+
We provide three additional qualitative analysis examples from the Video-MME dataset [14], as illustrated in Fig. 11. Example (a) involves an information extraction and optical character recognition (OCR) task,
|
| 327 |
+
|
| 328 |
+
where the question asks for the total number of measurements involved in chip manufacturing. The original LongVA model failed to accurately locate the relevant frame containing the necessary information, resulting in an incorrect response. In contrast, our LongVA-TPO model, enhanced through temporal preference optimization, successfully identified the pertinent frame within the lengthy input and provided the correct answer to the question.
|
| 329 |
+
|
| 330 |
+
Example (b) involves a high-level video understanding and information extraction task, where the question asks for the main topic introduced in the video. The original LongVA model failed to capture the overarching theme, instead responding with an unrelated term, "Criminal Trial," mentioned elsewhere in the video. In contrast, our LongVA-TPO model effectively identified the video's central theme and accurately provided the correct topic introduced in the content.
|
| 331 |
+
|
| 332 |
+
Example (c) involves an object reasoning task, where the question asks what the three curved lines extending from the bottom upward symbolize. The original LongVA model failed to interpret the representation accurately, erroneously stating that the lines represent three stages of the water cycle, which was a hallucination. In contrast, our LongVA-TPO model successfully understood the symbolic meaning of the three curved lines as representing evaporation, providing a correct and detailed response.
|
| 333 |
+
|
| 334 |
+
```txt
|
| 335 |
+
<Video Caption>
|
| 336 |
+
Using the caption of a video, create a question-answer pair that focuses on <Task $\rightarrow$ Prompt>.
|
| 337 |
+
Please generate a question tailored to the given caption. If it's inappropriate to $\rightarrow$ generate such question, please output None. Output format:
|
| 338 |
+
Q:
|
| 339 |
+
A:
|
| 340 |
+
```
|
| 341 |
+
|
| 342 |
+
Figure 8: Detailed prompt for the query generation given the video captions.
|
| 343 |
+
|
| 344 |
+
<Video Caption>
|
| 345 |
+
```asp
|
| 346 |
+
Question: <Query>
|
| 347 |
+
Answer1: <Preferred Answer>
|
| 348 |
+
Answer2: <Dis-Preferred Answer>
|
| 349 |
+
```
|
| 350 |
+
|
| 351 |
+
Task1: You are given a question, the golden answer and related captions. Is answer1 $\rightarrow$ better than answer2? Please answer with Yes or No or Equally.
|
| 352 |
+
Task3: Is the Answer1 is correct given the question and golden caption? Please respond with Yes or No.
|
| 353 |
+
```txt
|
| 354 |
+
Task2: Please check if this question and Answer1 contradicts to any part of the golden $\hookrightarrow$ caption or this question might have another answer different from the given $\hookrightarrow$ answer. Please respond with Yes or No.
|
| 355 |
+
```
|
| 356 |
+
|
| 357 |
+
Figure 9: Detailed prompt for the post-filtering process for the preference data pairs.
|
| 358 |
+
|
| 359 |
+

|
| 360 |
+
|
| 361 |
+

|
| 362 |
+
|
| 363 |
+
Query: What quote is featured below the mural in the video, and who is it attributed to?
|
| 364 |
+
|
| 365 |
+
Dis-preferred Response: The quote below the mural is "Forward, Warrior!" and it is attributed to a woman.
|
| 366 |
+
|
| 367 |
+
Preferred Response: The quote below the mural in the video is "I appeal to all of you to get into this great revolution that's sweeping this nation. Get in and stay in the streets of every city, every village and hamlet of this nation until true freedom comes, until the revolution of 1776 is complete." This quote is attributed to John Lewis.
|
| 368 |
+
|
| 369 |
+

|
| 370 |
+
(a)
|
| 371 |
+
|
| 372 |
+

|
| 373 |
+
|
| 374 |
+
Query: What are the four levels represented in the pyramid diagram of the wine tasting guide, and how do they classify wine quality?
|
| 375 |
+
|
| 376 |
+
Dis-preferred Response: The pyramid diagram in the wine tasting guide represents four levels of wine quality classification: AOC, IGP, Vin de Pays, and Vins. These levels are arranged in a descending order of quality ...
|
| 377 |
+
|
| 378 |
+
Preferred Response: The pyramid diagram in the wine tasting guide represents four levels of wine quality classification: AOC, VDQS, IGP, and Vin. The pyramid structure indicates that each level is a subset of the one above it, with AOC being the highest and Vin being the lowest. ...
|
| 379 |
+
|
| 380 |
+

|
| 381 |
+
(b)
|
| 382 |
+
|
| 383 |
+

|
| 384 |
+
(c)
|
| 385 |
+
Figure 10: Examples from the preference dataset.
|
| 386 |
+
|
| 387 |
+
Query: Describe the exercise routine depicted in the video, including the name of the exercises, the number of repetitions, and any additional instructions provided.
|
| 388 |
+
|
| 389 |
+
Dis-preferred Response: The video shows a man performing a mountain climber exercise, which involves kicking the feet back as the hands touch the floor. He also does a walkout push-up, which involves lifting the body off the ground and then returning to the starting position. The video provides the number of repetitions for each exercise, which are 10 to 12 reps.
|
| 390 |
+
|
| 391 |
+
Preferred Response: The video shows a man performing a variety of exercises in a gym. He starts by lifting a dumbbell over his head, then proceeds to do mountain climbers, burpees, kettlebell swings, and push-ups. The video also includes a summary of the workout, which includes the following exercises and repetitions: 1. Dumbbell Squat Press (10-12 reps) 2. Mountain Climbers (10-12 reps) 3. Burpees (10-12 reps) 4. Kettlebell Swings (10-12 reps) 5. Walkout Push-ups (10-12 reps). The man is wearing a black shirt and shorts, and he appears to be in good physical shape.
|
| 392 |
+
|
| 393 |
+

|
| 394 |
+
(c)
|
| 395 |
+
Figure 11: Additional qualitative examples from Video-MME [14].
|
2501.13xxx/2501.13919/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bac1c94c05770410ba108ca8a268bec4d2c5fc450c8adec5c500f286b1603041
|
| 3 |
+
size 1244387
|
2501.13xxx/2501.13919/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.13xxx/2501.13925/b36151e7-51e9-449b-a4e2-a01cd46a04c4_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.13xxx/2501.13925/b36151e7-51e9-449b-a4e2-a01cd46a04c4_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.13xxx/2501.13925/b36151e7-51e9-449b-a4e2-a01cd46a04c4_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:044bc40b2ff2ed0e04555e70bbadcc2d7bc615e3cf11bfaba923bcf04de48b1c
|
| 3 |
+
size 28461067
|
2501.13xxx/2501.13925/full.md
ADDED
|
@@ -0,0 +1,423 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Akashah Shabbir<sup>1</sup> Mohammed Zumri<sup>1</sup> Mohammed Bennamoun<sup>2</sup> Fahad S. Khan<sup>13</sup> Salman Khan<sup>14</sup>
|
| 2 |
+
|
| 3 |
+
<sup>1</sup>Mohamed bin Zayed University of AI, <sup>2</sup>The University of Western Australia, <sup>3</sup>Linköping University, <sup>4</sup>Australian National University
|
| 4 |
+
|
| 5 |
+
{akashah.shabbir,mohammed.zumri}@mbzuai.ac.ae
|
| 6 |
+
|
| 7 |
+
https://github.com/mbzuai-oryx/GeoPixel
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Recent advances in large multimodal models (LMMs) have recognized fine-grained grounding as an imperative factor of visual understanding and dialogue. However, the benefits of such representation in LMMs are limited to the natural image domain, and these models perform poorly for remote sensing (RS). The distinct overhead viewpoint, scale variation, and presence of small objects in high-resolution RS imagery present a unique challenge in region-level comprehension. Moreover, the development of the grounding conversation capability of LMMs within RS is hindered by the lack of granular, RS domain-specific grounded data. Addressing these limitations, we propose GeoPixel - the first end-to-end high-resolution RS-LMM that supports pixel-level grounding. This capability allows fine-grained visual perception by generating interleaved masks in conversation. GeoPixel supports up to 4K HD resolution in any aspect ratio, ideal for high-precision RS image analysis. To support the grounded conversation generation (GCG) in RS imagery, we curate a visually grounded dataset GeoPixelD through a semi-automated pipeline that utilizes set-of-marks prompting and spatial priors tailored for RS data to methodically control the data generation process. GeoPixel demonstrates superior performance in pixel-level comprehension, surpassing existing LMMs in both single-target and multi-target segmentation tasks. Our methodological ablation studies validate the effectiveness of each component in the overall architecture. Our code and data will be publicly released.
|
| 12 |
+
|
| 13 |
+
# 1. Introduction
|
| 14 |
+
|
| 15 |
+
Recent large multimodal models (LMMs) (Liu et al., 2024a; Dai et al., 2023; Bai et al., 2023b; Chen et al., 2024b) have utilized the foundational capabilities of Large Language Models (LLMs) (Touvron et al., 2023; Chiang et al., 2023; Javaheripi et al.; Bai et al., 2023a) and success
|
| 16 |
+
|
| 17 |
+
Can you give a thorough description of this image, including interleaved segmentation masks to highlight key objects?
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
|
| 21 |
+
The image is an aerial view of a sports complex featuring a running track and a swimming pool. The track has multiple lanes, and the field is marked for various sports. The surrounding area includes roads, buildings, and green spaces. The center is a large athletic venue with a multi lane running track encircling a grassy field marked for various sports. The center at the top is large, rectangular, and filled with a blue substance, likely water. The center is set at different locations. A soccer field within the sports complex features a rectangular pitch with goals at each end, surrounded by a running track. The image gives a static and undisturbed appearance as no people or moving objects are visible.
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Figure 1. An example of visually grounded detailed descriptions generated by the proposed GeoPixel, highlighting its ability to interpret and segment high-resolution remote sensing imagery with fine-grained precision. The model applies distinct masks to key objects (ground track field, swimming pool, soccer field) and semantic mask to smaller objects (vehicles). It effectively identifies spatial positions (e.g., center, top) and relationships (within the sports complex) while distinguishing between the global context (buildings, roads, green spaces) and localized structures.
|
| 25 |
+
|
| 26 |
+
fully expanded their horizon to the visual modality with promising capabilities. Recent LMMs can not only perform visual recognition, but also excel in advanced perception and reasoning required for vision-language tasks such as visual question answers, image captioning, visual grounding, and referring expression segmentation. Grounding LMMs (Rasheed et al., 2024; Ma et al., 2025; Zhao et al., 2023) have further advanced the fine-grained context-aware interpretation of complex visual information by allowing textual outputs to be associated with object instances. Facilitated by large-scale data in the natural images domain, grounding multimodal models pre-trained on extensive datasets have shown impressive capabilities, achieving performance levels comparable to specialist models.
|
| 27 |
+
|
| 28 |
+
However, with increasing granularity of vision and language understanding, these general domain models exhibit signifi-
|
| 29 |
+
|
| 30 |
+
cant limitations in adequately supporting complex earth observation tasks. The performance degradation is influenced not only by the unique vantage point inherent to remote sensing (RS) images but also by large variations in the objects' size and orientation. Moreover, in high-resolution remote sensing imagery, objects of interest may exhibit challenging-to-segment spatial footprints, such as narrow bridges that connect urban landscapes and play a critical role in city traffic planning, adding further complexity to the task.
|
| 31 |
+
|
| 32 |
+
Existing vision language models in RS (Luo et al., 2024; Zhang et al., 2024b; Kuckreja et al., 2024) use quantized coordinates in the form of bounding boxes to localize and ground objects in their response. Such a representation structure is not adequate to associate correct object semantics and also adds a computational burden to the LLM that scales with the number of distinguishable objects. Furthermore, monitoring the geospatial environment and its entities demands a broader spatial perspective, now increasingly achievable through advancements in RS technologies that provide high-resolution imagery. However, despite the availability of such rich data, current LMMs in RS struggle to fully exploit this spatial detail. These models often struggle with suboptimal resolution capabilities, hindering their ability to capture the intricate patterns present in high-resolution RS images. In addition, existing RS datasets often lack fine-grained spatial association between objects and their corresponding linguistic descriptions.
|
| 33 |
+
|
| 34 |
+
To address these issues, we present GeoPixel, a model that can generate a detailed natural language response for a high-resolution RS image with corresponding geospatial object segmentation masks. Our contributions are as follows:
|
| 35 |
+
|
| 36 |
+
- Our proposed LMM, GeoPixel, is explicitly designed for high-resolution RS image analysis with advanced multi-target pixel grounding capability. Our model adaptively divides the input images into local and global regions, enabling efficient encoding and analysis by accommodating up to 4k resolution.
|
| 37 |
+
- We create GeoPixelD, a multi-modal grounded conversation generation (GCG) dataset comprising 53,816 grounded phrases linked to 600,817 object masks, specifically tailored for RS image understanding. GeoPixelD offers hierarchically structured annotations, providing rich semantic descriptions that integrate both comprehensive, scene-level contextual information and precise, localized object-level details. Extensively granular annotations are created with segmentation masks through a semi-automated, scalable pipeline that integrates prior-informed visual prompting with state-of-the-art LMMs and ensures quality via rigorous verification and filtering steps.
|
| 38 |
+
- We introduce a comprehensive benchmark designed
|
| 39 |
+
|
| 40 |
+
for the systematic evaluation of RS LMMs in fine-grained visual understanding tasks. This benchmark includes 5,427 manually validated pairs of referring expressions and segmentation masks, encompassing 61,384 annotated objects in RS imagery within detailed descriptions having an average length of 647 characters. Our benchmark offers a robust basis for assessing the model's capabilities in interpreting and responding to complex, spatially grounded information.
|
| 41 |
+
|
| 42 |
+
# 2. Related Work
|
| 43 |
+
|
| 44 |
+
Large Multimodal Models (LMMs): LMMs build on the success of LLMs to acquire vision capabilities. Pioneer works such as LLaVA (Liu et al., 2024b), MiniGPT-4 (Zhu et al., 2023), InstructBLIP (Dai et al., 2023) and mPLUGOwl (Ye et al., 2023b) aligned visual features with language representations through a vision language connector, enhanced by instruction tuning to improve multimodal integration. Improving beyond image-level understanding, models such as GPT4RoI (Zhang et al., 2023), InternGPT (Liu et al., 2023b) and RegionGPT (Guo et al., 2024) introduce regional understanding by allowing inputs such as points, masks, and bounding boxes. Some models feed image coordinates directly into the language model, while others employ additional feature extraction modules to represent specific image regions' features effectively.
|
| 45 |
+
|
| 46 |
+
Grounding LMMs: Region-level comprehension is further expanded by models such as Kosmos-2 (Peng et al., 2024), Ferret (You et al., 2023), Shikra (Chen et al., 2023), Pink (Xuan et al., 2024) and LION (Chen et al., 2024a) that allow for the precise location of objects in their outputs based on textual descriptions, a capability known as grounding. These models localize objects on a coarse scale using bounding boxes. Recent models (Lai et al., 2024; Rasheed et al., 2024; Xia et al., 2024; Ren et al., 2024; Zhang et al., 2024d; Liu et al., 2023a) focus on achieving more fine-grained visual and linguistic semantic alignment, by exploring pixel grounding. LISA (Lai et al., 2024), PixelLM (Ren et al., 2024) and GLaMM (Rasheed et al., 2024) incorporate a [SEG] token into the LLM's vocabulary, leveraging its corresponding token embedding as a conditioning input for SAM (Kirillov et al., 2023) to enable segmentation. Additionally, GSVA (Xia et al., 2024) introduces a [REJ] token to explicitly learn to reject specified targets. Whereas Llava-plus (Liu et al., 2023a) employs LLMs as agents to assign tasks to the segmentation expert.
|
| 47 |
+
|
| 48 |
+
Our work aligns with pixel-grounding approaches, such as those in (Lai et al., 2024; Ren et al., 2024; Rasheed et al., 2024). However, these models do not interpret the distinct top-down perspective and cannot differentiate complex spatial arrangements of remote sensing (RS) imagery. In addition, the models' restricted input size, typically limited
|
| 49 |
+
|
| 50 |
+
Table 1. Comparison of remote sensing large multimodal models (RS-LMMs), focusing on their grounding capabilities. The 'Region Output' column highlights the model's ability to associate objects with specific spatial regions. Existing models primarily utilize LLMs to generate bounding box coordinates for object grounding. However, none of the current RS-LMMs possess the capability for 'pixel grounding', i.e., generating detailed segmentation masks, which are crucial for fine-grained spatial interpretation.
|
| 51 |
+
|
| 52 |
+
<table><tr><td>MODELS</td><td>RESOLUTION</td><td>IMAGE</td><td>REGION OUTPUT</td><td>REGION DecODER</td><td>PIXEL GROUNDING</td><td>END TO END MODEL</td></tr><tr><td>RSGPT (HU ET AL., 2023)</td><td>224 × 224</td><td>✓</td><td>×</td><td>×</td><td>×</td><td>✓</td></tr><tr><td>H2RSVLM (PANG ET AL., 2024)</td><td>336 × 336</td><td>✓</td><td>×</td><td>×</td><td>×</td><td>✓</td></tr><tr><td>RS-LLAVA (BAZI ET AL., 2024)</td><td>336 × 336</td><td>✓</td><td>×</td><td>×</td><td>×</td><td>✓</td></tr><tr><td>GEOCHAT (KUCKREJA ET AL., 2024)</td><td>504 × 504</td><td>✓</td><td>✓</td><td>×</td><td>×</td><td>✓</td></tr><tr><td>SKYEYEGPT (ZHAN ET AL., 2024)</td><td>448 × 448</td><td>✓</td><td>✓</td><td>×</td><td>×</td><td>✓</td></tr><tr><td>EARTHGPT (ZHANG ET AL., 2024C)</td><td>-</td><td>✓</td><td>✓</td><td>×</td><td>×</td><td>✓</td></tr><tr><td>LHRS-BOT (MUHTAR ET AL., 2024)</td><td>224×224</td><td>✓</td><td>✓</td><td>×</td><td>×</td><td>✓</td></tr><tr><td>SKYSENSEGPT (LUO ET AL., 2024)</td><td>504 × 504</td><td>✓</td><td>✓</td><td>×</td><td>×</td><td>✓</td></tr><tr><td>GEOPIXEL</td><td>DYNAMIC Upto 4K</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr></table>
|
| 53 |
+
|
| 54 |
+
to dimensions such as $224 \times 224$ , exacerbates this issue by constraining the field of view and spatial perception.
|
| 55 |
+
|
| 56 |
+
High-Resolution Understanding: Vision encoders, such as CLIP ViT (Radford et al., 2021), are widely utilized for various vision tasks but are typically constrained by low resolution (e.g. $224 \times 224$ ) restricting their applicability in high-resolution (HR) scenarios. To address this limitation, some approaches (Dosovitskiy et al., 2021; Bai et al., 2023b; Li et al., 2023) scale positional encodings within the CLIP model through interpolation to accommodate larger input sizes, while others such as CogAgent (Hong et al., 2024) and Vary (Wei et al., 2025), employ an additional HR branch. Models such as Monkey (Li et al., 2024), SPHNIX (Lin et al., 2023), Llava-Next (Liu et al., 2024a), IXC2.5 (Zhang et al., 2024a), Textmonkey (Liu et al., 2024d) and Ureader (Ye et al., 2023a) divide the image into grids, encoding each section independently to enhance performance on HR text-centric tasks.
|
| 57 |
+
|
| 58 |
+
Remote Sensing (RS) LMMs: RSGPT (Hu et al., 2023) is a pioneering RS model that enables natural language conversation and generates detailed captions. This was followed by GeoChat (Kuckreja et al., 2024) that supports region-specific inputs and visual grounding through oriented bounding box coordinates in its responses. Furthermore, SkyEyeGPT (Zhan et al., 2024) extends its functionality to RS video captioning, while EarthGPT (Zhang et al., 2024c) and EarthDial (Soni et al., 2024) integrate various multisensor RS interpretation tasks within the LMM framework.
|
| 59 |
+
|
| 60 |
+
Models such as RS-LLaVA (Bazi et al., 2024) and H2RSVLM (Pang et al., 2024) improve the interpretation of RS data, with H2RSVLM uniquely recognizing and rejecting unanswerable questions. SkySenseGPT (Luo et al., 2024) contributes by implementing image-level scene graph generation and relation reasoning, while LHRS-Bot (Muhtar
|
| 61 |
+
|
| 62 |
+
et al., 2024) enhances multilevel vision-language alignment. However, these models operate on low resolution and lack pixel-level understanding and grounding capabilities.
|
| 63 |
+
|
| 64 |
+
# 3. Method
|
| 65 |
+
|
| 66 |
+
In the current remote sensing landscape, large multimodal models (LMMs) face significant limitations in terms of grounding and resolution capabilities (as seen in Table 1). Specifically, the outputs generated by these models lack precise spatial and semantic association with the imagery, leading to either ungrounded or only coarsely grounded text. Furthermore, most LMMs operate on relatively low-resolution data, which restricts their ability to perform fine-scale analysis essential for RS tasks such as detailed land use and transportation network extraction, infrastructure mapping, damage assessment, and environmental monitoring. To address these limitations, we present GeoPixel, a model designed to interpret high-resolution remote sensing images and generate finely detailed, pixel-grounded outputs that encompass multiple target objects.
|
| 67 |
+
|
| 68 |
+
# 3.1. GeoPixel Architecture Overview
|
| 69 |
+
|
| 70 |
+
GeoPixel primarily consists of 5 components (see Figure 2). (1) Adaptive Image Divider (2) Vision Encoder (3) Large Language Model (4) Grounding Vision Encoder (5) Pixel Decoder. The first three components are discussed in Section 3.2, while the latter two in Section 3.3. Jointly, these modules enable high-resolution perception, fine-grained interpretation, and grounding, as detailed below.
|
| 71 |
+
|
| 72 |
+
# 3.2. High Resolution Understanding
|
| 73 |
+
|
| 74 |
+
For high resolution, we adopt the dynamic image partitioning strategy of IXC-2.5 (Zhang et al., 2024a). Initially, the
|
| 75 |
+
|
| 76 |
+

|
| 77 |
+
Figure 2. Overview of GeoPixel Architecture: Left: High-resolution RS images are dynamically partitioned into local patches and a resized global view, encoded by a frozen vision encoder. The encodings are projected into the language domain with separator tokens. Middle: Vision tokens, combined with text, are input into the LLM, where pLoRA is applied to vision tokens for efficient and effective multimodal alignment. Right: The corresponding embeddings for the [SEG] tokens are passed to a decoder through text projector, along with vision embeddings from the grounding vision encoders, to generate precise segmentation masks.
|
| 78 |
+
|
| 79 |
+
adaptive image divider processes the input image $x_{img}$ , with dimensions $[h_i \times w_i]$ , by up-scaling and padding it to align with the closest grid size denoted as $[g_h \times g_w]$ .
|
| 80 |
+
|
| 81 |
+
$$
|
| 82 |
+
g _ {h} = k _ {1} \times \mathcal {B}, \quad g _ {w} = k _ {2} \times \mathcal {B}, \tag {1}
|
| 83 |
+
$$
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
\mathrm {s . t .} k _ {1}, k _ {2} \in \mathbb {N}, \quad k _ {1} \times k _ {2} \leq \mathcal {P}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
where $\mathcal{B}$ is the base resolution of the vision encoder and $\mathcal{P}$ is the number of maximum allowable image patches. Subsequently, the image is divided into $k_{1} \times k_{2}$ non-overlapping patches $x_{p_{i,j}}$ , where $p = 0, 1, 2, \ldots, (k_{1} \times k_{2} - 1)$ , and $i, j$ denote the row and column indices of each patch in the grid.
|
| 90 |
+
|
| 91 |
+
We employ the scaled CLIP ViT-L/14 (Zhang et al., 2024a) as our vision encoder $(\mathcal{I})$ , with a base resolution of $\mathcal{B} = 560$ , facilitating large patches for enhanced visual representation. Furthermore, a global view $x_{glob}$ is generated by resizing $x_{img}$ to a fixed dimension of $560 \times 560$ , aligned with the base resolution $\mathcal{B}$ . Feature embeddings of patches $f_{p_i,j}$ are appended with a learnable token at the end of each row before flattening and merging (Dong et al., 2024b). Finally, global features $f_{glob}$ and patch features $f_p$ are concatenated $(||)$ with a special separator $(s_g)$ inserted between them (Ding et al., 2019), effectively integrating global semantics with fine-grained local details.
|
| 92 |
+
|
| 93 |
+
$$
|
| 94 |
+
x _ {v} = \mathcal {P} _ {v} \left(f _ {\text {g l o b}} \| s _ {g} \| f _ {p}\right) \tag {2}
|
| 95 |
+
$$
|
| 96 |
+
|
| 97 |
+
s.t. $f_{glob} = \mathcal{I}(x_{glob})$ , $f_{p_{i,j}} = \mathcal{I}(x_{p_{i,j}})$
|
| 98 |
+
|
| 99 |
+
We project the final unified image features onto the LLM, InternLM2 7B model (Cai et al., 2024), denoted as $\mathcal{L}$ , through a two-layer MLP as a vision projector $\mathcal{P}_v$ . InternLM2 is a LLM designed to process sequences of text tokens, where its input consists of a sequence of discrete embeddings derived from textual data. These embeddings correspond to either natural language tokens or special placeholders inserted to represent external modalities. The placeholder $<\text{IMAGE}>$ in the input text query $x_t$ is a special token that represents the position of the image within the input sequence. When processing multimodal input, this placeholder is replaced with visual features $x_v$ , extracted from the image, and projected into the same embedding space using $\mathcal{P}_v$ .
|
| 100 |
+
|
| 101 |
+
Partial Low-Rank Adaptation (LoRA) (Dong et al., 2024a) is then applied to ensure efficient alignment of the vision tokens. Partial LoRA is a modality-specific plug-in module designed to align features from a new modality with LLM, preserving the model's inherent capabilities while enriching it with modality-specific insights. By applying low-rank adaptations selectively to visual tokens, Partial LoRA enhances alignment efficiency while reducing the computational cost. Formally, it introduces low-rank matrices $W_{A} \in \mathbb{R}^{C_{r} \times C_{in}}$ and $W_{B} \in \mathbb{R}^{C_{out} \times C_{r}}$ within each LLM linear layer, modifying the visual token outputs $x_{v}$ without altering the language token outputs $x_{t}$ , thus achieving tailored cross-modal integration.
|
| 102 |
+
|
| 103 |
+

|
| 104 |
+
Figure 3. The GeoPixelD Annotation Pipeline provides detailed multi-tier descriptions of remote sensing imagery with object phrases aligned precisely with manually annotated masks. It begins with Holistic Image Annotation (bottom left), where an LMM generates concise scene descriptions. Individual Instance Annotation (bottom right) uses spatial $\{\mathrm{pos}\}$ ) and categorical (\{catagory_name\}) priors with SOM (\{mark_number\}) prompting to describe key objects. Cluster Annotation (top right) organizes smaller or dense objects using refined grids for precise spatial analysis.
|
| 105 |
+
|
| 106 |
+
# 3.3. Pixel Grounding
|
| 107 |
+
|
| 108 |
+
To establish grounding in LMM, we initialize the grounding vision encoder $(\mathcal{I}_g)$ with a pre-trained SAM-2 (Ravi et al., 2024) encoder together with a dedicated pixel decoder module $(\mathcal{D})$ . The SAM2 visual encoder is a Masked Autoencoder (MAE) (He et al., 2022) pre-trained Hiera (Ryali et al., 2023) image encoder having a hierarchical structure that allows the use of multiscale features during decoding. The tokenizer's vocabulary is expanded by incorporating an additional $<\mathrm{SEG}>$ token, with its corresponding last-layer embedding $(E)$ mapped to the decoder through a text projection layer $\mathcal{P}_t$ . The text projection is a two-layer MLP that receives embeddings of dimension 4096 and transforms them into the input space of the pixel decoder, which has a dimensionality of 256.
|
| 109 |
+
|
| 110 |
+
The pixel decoder processes the image features from the frozen grounding vision encoder, along with projected LLM embeddings, to generate segmentation masks $(M)$ . The grounding vision encoder (SAM-2) is already pre-trained on large-scale datasets, making it highly effective at extracting robust, generalized image features for segmentation. Freezing the encoder ensures that these pretrained features are preserved. However, the light-weight pixel decoder and projection layer are trained to adapt pretrained vision features for segmentation tasks in GeoPixel.
|
| 111 |
+
|
| 112 |
+
$$
|
| 113 |
+
M = \mathcal {D} \left[ \mathcal {I} _ {g} \left(x _ {i m g}\right), \mathcal {P} _ {t} (E) \right] \tag {3}
|
| 114 |
+
$$
|
| 115 |
+
|
| 116 |
+
Given the variable length of the input image tokens, resulting from adaptive image partitioning, the output embedding
|
| 117 |
+
|
| 118 |
+
mask for $<\mathrm{SEG}>$ tokens is dynamically adjusted to align with these variations. This configuration ensures accurate detection of the $<\mathrm{SEG}>$ token and its associated embedding.
|
| 119 |
+
|
| 120 |
+
# 4. GeoPixelD-RS Pixel Grounding Dataset
|
| 121 |
+
|
| 122 |
+
Remote sensing imagery captures intricate semantic information and complex inter-object relationships across diverse spatial scales. To enable LMMs to acquire a detailed comprehension ability, it is essential to integrate broad contextual views with object-level distinction. Addressing the current deficit in datasets capable of facilitating a fine-grained understanding of top-down perspectives, we introduce GeoPixelD, a dataset established to provide hierarchical descriptions derived through automated multilevel image analysis. GeoPixelD structures its descriptions at three primary levels: (1) holistic scene representation, (2) individual instance observations, and (3) densely populated object groups annotations (as depicted in Figure. 3).
|
| 123 |
+
|
| 124 |
+
# 4.1. Holistic Image Annotation
|
| 125 |
+
|
| 126 |
+
Initially, we generated descriptive captions for RS images using a robust open source model, IXC (Zhang et al., 2024a) to capture a comprehensive and diverse image details. We chose the IXC model (Zhang et al., 2024a) based on a comparative study conducted with other state-of-the-art vision language models, where IXC consistently outperformed its counterparts in terms of qualitative performance. These open-ended descriptions are constrained to a limited length, integrated in prompts like, "<image> Describe
|
| 127 |
+
|
| 128 |
+
the image in four short sentences." (Figure 3 (bottom left)). Thus, redundancy is effectively minimized in subsequent annotations, and the model is driven to provide a holistic, context-rich depiction of each image.
|
| 129 |
+
|
| 130 |
+
# 4.2. Individual Instance Annotation
|
| 131 |
+
|
| 132 |
+
Next, we identify prominent objects for the depiction and employ a technique known as set-of-mark (SOM) prompting (Yang et al., 2023). This approach involves adding a distinct set of visual markers over specific regions in an image, providing auxiliary information to obtain visually grounded outputs. However, directly employing this method for aerial imagery, which is characterized by expansive views and diverse objects and landscapes within a single frame, leads to challenges, such as the generation of hallucinated markers and incorrectly associated details (see Figure 6).
|
| 133 |
+
|
| 134 |
+
To address the challenge of accurate object description in complex RS images, we implemented an enhanced approach to spatially guide the model. We introduce prior knowledge in the query in the form of category name and location along with a marked number to accurately direct the model and create a comprehensive description of the target object.
|
| 135 |
+
|
| 136 |
+
Specifically, we partition each image into a $3 \times 3$ grid (nine quadrants). For each object, we calculate its positional reference by determining the degree of overlap with these quadrants, thereby localizing it within the grid structure. This quadrant-based localization, combined with categorical labels and marked numbers, is then fed as positional and categorical priors into the LMM, enabling it to focus more accurately on the intended object and retrieve relevant details, a process that proves effective given the densely packed and spatially complex nature of RS imagery, where objects often vary in scale, orientation, and proximity.
|
| 137 |
+
|
| 138 |
+
In addition, we conducted a comprehensive evaluation of various open-source and proprietary models for prior-informed modified SOM prompting applied to RS imagery (see Figure 7). The analysis also included a comparative assessment of combined versus individual querying approaches. ChatGPT (OpenAI, 2023) demonstrated the ability to generate detailed descriptions while incorporating inferred information, whereas Gemini (Team et al., 2023) and InternVL (Chen et al., 2024b) exhibited repetitive output as the number of target objects within the image increased. InternLM-XComposer (Zhang et al., 2024a) achieved performance comparable to ChatGPT in terms of the proportion of accurate responses generated and diversity in details.
|
| 139 |
+
|
| 140 |
+
# 4.3. Cluster/Crowd Annotation
|
| 141 |
+
|
| 142 |
+
Once prominent large objects are identified, marked and annotated, the remaining objects are grouped or identified along with determining their spatial properties, which is
|
| 143 |
+
|
| 144 |
+
obtained by a structured three-stage positional analysis. In the first stage, the image is divided into a $3 \times 3$ grid, with each grid cell assigned a unique identifier corresponding to its spatial location. To enhance alignment with human perceptual tendencies, the central region of the grid is given a larger spatial weight. In the second stage, $2 \times 2$ gird is considered for more dispersed objects' localization. Similarly, in the third stage, half image as $(1 \times 2$ and $2 \times 1)$ grid is considered to assign positional information. This gridding provides a systematic framework for analyzing the location of clusters as well as large groups of objects within the image. An LMM is then used to describe the group attributes given the quantitative information along with the determined positional information.
|
| 145 |
+
|
| 146 |
+
# 4.4. Unifying Annotations and Language Marking
|
| 147 |
+
|
| 148 |
+
For the preprocessed training subset of the iSAID (Waqas Zamir et al., 2019) dataset (Appendix A), we derive a total of 16,795 holistic image-level annotations, 36,793 instance-specific annotations, and 17,023 group annotations, collectively encompassing 600,817 objects within RS imagery. The annotations were rigorously filtered to eliminate aerial perspective inconsistencies, removing artifacts such as marker identifiers, fore/background references, distance perception, and contextually inconsistent descriptors. The key noun chunk corresponding to the object category in individual- and group-level annotations is tagged with unique identifiers ('phrase-number'), each linked to an instance or semantic mask, a process termed text marking. To unify these hierarchical annotations into a coherent description, the marked annotations are then combined with holistic scene representations to form a single descriptive narrative. We employ a Llama-3.1-instruct 8B (Dubey et al., 2024) LLM to paraphrase concatenated annotations while preserving their semantic integrity (see Figure 8). The LLM processes the concatenated text under strict constraints to retain all marked phrases unchanged, ensuring a consistent link to their associated visual masks. The outputs are rigorously evaluated for consistency, and iterative paraphrasing is applied if any marked phrases are not preserved. By adopting this language marking strategy, the GeoPixelD dataset achieves a robust framework to generate high-quality GCG descriptions that are contextually rich and precisely aligned with visual elements.
|
| 149 |
+
|
| 150 |
+
A similar procedure is followed for the test set GCG descriptions derived from the iSAID validation subset. Each GCG description within this set undergoes meticulous manual curation, an effort that requires approximately 350 man-hours to ensure annotation completeness. The process includes correcting for any omissions, inaccuracies, or partial annotations, including adjustments to object attributes that do not align with the corresponding image, thereby establishing a high-quality evaluation benchmark.
|
| 151 |
+
|
| 152 |
+
Table 2. Performance Comparison on RS-GCG task. LISA† and PixelLM† denote the pretrained LISA and PixelLM models adopted for RS-GCG and finetuned on GeoPixelD training data. GLaMM represents the zero-shot performance, whereas GLaMM-FT refers to the pretrained model finetuned on GeoPixelD. GeoPixel outperforms other models across all metrics.
|
| 153 |
+
|
| 154 |
+
<table><tr><td rowspan="2">MODEL</td><td rowspan="2">CIDEr</td><td rowspan="2">METEOR</td><td colspan="3">UNI-TARGET</td><td colspan="3">MULTI-TARGET</td><td colspan="3">OVERALL</td></tr><tr><td>AP50</td><td>MIOU</td><td>RECALL</td><td>AP50</td><td>MIOU</td><td>RECALL</td><td>AP50</td><td>MIOU</td><td>RECALL</td></tr><tr><td>GLAMM (CVPR'24)</td><td>0.1</td><td>5.8</td><td>1.2</td><td>18.1</td><td>14.8</td><td>0.5</td><td>16.5</td><td>6.3</td><td>0.5</td><td>16.9</td><td>7.1</td></tr><tr><td>LISA† (CVPR'24)</td><td>14.6</td><td>22.3</td><td>9.5</td><td>41.7</td><td>43.1</td><td>8.3</td><td>43.1</td><td>27.5</td><td>8.5</td><td>42.7</td><td>29.0</td></tr><tr><td>PIXELLM† (CVPR'24)</td><td>18.3</td><td>22.5</td><td>13.5</td><td>41.2</td><td>44.0</td><td>10.4</td><td>42.9</td><td>28.1</td><td>10.5</td><td>42.4</td><td>29.6</td></tr><tr><td>GLAMM-FT (CVPR'24)</td><td>15.7</td><td>23.0</td><td>18.8</td><td>44.4</td><td>48.5</td><td>12.4</td><td>47.1</td><td>31.1</td><td>12.5</td><td>46.4</td><td>32.8</td></tr><tr><td>GEOPIXEL</td><td>21.6</td><td>24.0</td><td>25.5</td><td>50.8</td><td>55.6</td><td>18.0</td><td>52.9</td><td>37.0</td><td>19.0</td><td>52.3</td><td>38.8</td></tr></table>
|
| 155 |
+
|
| 156 |
+
# 5. Experiments
|
| 157 |
+
|
| 158 |
+
Here, we explain the implementation details, present a comparative performance analysis on Remote Sensing Grounded Conversation Generation (RS-GCG) and Referring Remote Sensing Image Segmentation (RRSIS), and include an ablation study to assess the impact of key components.
|
| 159 |
+
|
| 160 |
+
# 5.1. Implementation Details
|
| 161 |
+
|
| 162 |
+
The model weights are initialized using the pre-trained InternLM-XComposer-2.5 model (IXC-2.5) with 7B parameters, utilizing LoRA for efficient fine-tuning of the LLM. A fixed CLIP ViT-L vision encoder with a resolution of $560 \times 560$ is employed, along with a grounded vision encoder initialized from SAM2 weights. The trainable components of the architecture include a pixel decoder $(\mathcal{D})$ , LoRA parameters $(\alpha = 8)$ , a vision projector $\mathcal{P}_v$ , and a language projector $\mathcal{P}_t$ . For the adaptive image divider, we set the maximum patch number $\mathcal{P}$ to 9 for training. In our training process, we use an effective batch size of 20 over 10 epochs. The learning rate is scheduled to increase linearly to a maximum value of $3 \times 10^{-4}$ over the initial 100 training steps, followed by a gradual decrease governed by a cosine decay strategy. We train GeoPixel on the GeoPixelD dataset for a grounded conversation generation task on two NVIDIA A6000-48GB GPUs, which take around 3 days.
|
| 163 |
+
|
| 164 |
+
# 5.2. Baselines
|
| 165 |
+
|
| 166 |
+
To rigorously evaluate the efficacy of the GeoPixel, we introduce three robust baselines for comparative analysis on the GeoPixelD benchmark. The first baseline, LISA†, is an improved version of the LISA model, modified to incorporate multitarget segmentation masks within its output pipeline. Furthermore, the tokenizer is updated to include phrase tokens (<p> and </p>) essential for the GCG task, allowing precise identification of contextual phrases within descriptive outputs that correspond to the associated segmentation masks. The second baseline is derived from the PixelLM† model, configured without the SAM encoder. In this setup, the codebook is configured using image feature
|
| 167 |
+
|
| 168 |
+
scaling fixed at a factor of 2, the number of segmentation tokens adjusted to 3, and the vision tower resize parameter defined at 448. Phrase tokens are added, and <SEG> token in data is replaced with multiple codebook tokens according to the selected configuration. The third baseline, GLaMM, specifically focuses on the GLaMM-GCG variant, a model tailored for the Grounded Conversation Generation task For LISA†, PixelLM† and GLaMM-ft model weights are initialized using pretrained LISA-7B-v1, PixelLM-7B and GLaMM-GCG (7B), respectively, and additionally trained on GeoPixelD data for RS-GCG task.
|
| 169 |
+
|
| 170 |
+
# 5.3. Results
|
| 171 |
+
|
| 172 |
+
Remote Sensing Grounded Conversation Generation: Table 2 provides a comparative analysis of the performance of various models on the RS-GCG task. The models are evaluated across different metrics, including CIDEr, METEOR, AP50, mIoU, and recall, segmented into Uni-Target, Multi-Target, and Overall categories. GeoPixel demonstrates superior performance in all metrics compared to the baselines showing better fluency and text relevance in textual outputs. In more complex multi-target scenarios, GeoPixel maintains strong performance. In contrast, LISA† struggles with segmentation-based tasks, as evidenced by its low AP50 scores in all categories. PixelLM† shows a moderate improvement over LISA†, benefiting from better image feature scaling and segmentation token adjustments. GLaMM-ft exhibits improved outcomes due to dedicated grounding encoder and GCG pre-training, however, its performance remains inferior to that of GeoPixel. Figure 4 presents the qualitative results.
|
| 173 |
+
|
| 174 |
+
Referring Remote Sensing Image Segmentation: This task focuses on segmenting specific regions in aerial imagery guided by textual descriptions. The input prompt used is: "Could you provide a segmentation mask for {referring_expression} in this image?" The model generates the response, "Sure, it is <SEG>." where the corresponding embeddings of <SEG> token is subsequently decoded to produce the segmentation mask. To address this task, we fine-tune the
|
| 175 |
+
|
| 176 |
+
Figure 4. Qualitative results of GeoPixel on RS-GCG. Contextually rich descriptions of RS imagery with grounded object annotations. Depending on object scale and density, it employs instance masks for precise delineation of individual objects (right and middle-right images) while semantic masks capture broader categories, such as large clusters of vehicles or small objects (middle-left and left images).
|
| 177 |
+

|
| 178 |
+
The image is a view of a sports complex, featuring a running track with a red running surface and green surrounding areas that may be grassy fields or additional sports facilities. The ground track field at the center is a well defined athletic track with a curved shape, surrounded by a grassy area with trees, and the soccer field at the bottom right is a well maintained grassy area with visible markings for gameplay. The structures, which could be seating areas or other facilities, are visible on one side of the track. The presence of trees and open spaces suggests that the complex is designed for outdoor activities and possibly community events, and the serene atmosphere is due to the absence of people in the scene.
|
| 179 |
+
|
| 180 |
+

|
| 181 |
+
The image is an aerial photograph of a residential area with several houses surrounded by trees. It features two prominent docks extending into a body of water, suggesting proximity to a lake or river. The layout of the roads and the positioning of the houses indicate a suburban setting, with a pier at the top being elongated, straight, and extending into the water with a perpendicular docking area at its end. A pier at the top is elongated, straight, and extends from the land into the water, with no visible structures or objects on it. A swimming pool at the bottom is rectangular, filled with blue water, and surrounded by a dark colored deck. A solitary small vehicle is parked on a driveway at the bottom left of the image, adjacent to a house with a dark roof. The presence of greenery and the absence of commercial buildings or high density housing structures suggest that this is a quiet, possibly affluent neighborhood.
|
| 182 |
+
|
| 183 |
+

|
| 184 |
+
The image is an aerial view of a parking lot with numerous cars parked in designated spaces, arranged in orderly rows, indicating a well organized parking system. The parking lot appears to be part of a larger facility, possibly a commercial or industrial complex, as suggested by the presence of trees and other structures. The image depicts a large parking area with multiple large vehicles, including buses and possibly coaches, parked in an organized manner. There are multiple small vehicles scattered across various regions. The absence of people in the image could imply that the photo was taken during a time of low activity or from a high vantage point where individuals are not easily discernible.
|
| 185 |
+
|
| 186 |
+

|
| 187 |
+
The image is an aerial photograph of a rural area with a road cutting through it, appearing to be a two lane highway with vehicles traveling on it. The surrounding landscape is predominantly dry and sparsely vegetated, indicative of a desert or arid environment. On the road, there are four small vehicles. The scene has a natural and undeveloped appearance, with no visible buildings or infrastructure other than the road itself.
|
| 188 |
+
|
| 189 |
+
Table 3. Performance Comparison of GeoPixel in Referring Expression Segmentation on RRSIS-D dataset. The segmentation accuracy based on referring expressions is expressed through the Precision at IoU threshold of 0.5 (P@0.5), Overall Intersection-over-Union (oIoU) and Mean Intersection-over-Union (mIoU).
|
| 190 |
+
|
| 191 |
+
<table><tr><td rowspan="2">METHOD</td><td colspan="3">VALIDATION SET</td><td colspan="3">TEST SET</td></tr><tr><td>P@0.5</td><td>oIOU</td><td>MIOU</td><td>P@0.5</td><td>oIOU</td><td>MIOU</td></tr><tr><td>RRN (LI ET AL., 2018)</td><td>51.09</td><td>66.53</td><td>46.06</td><td>51.07</td><td>66.43</td><td>45.64</td></tr><tr><td>CSMA (YE ET AL., 2019)</td><td>55.68</td><td>69.68</td><td>48.85</td><td>55.32</td><td>69.39</td><td>48.54</td></tr><tr><td>LSCM (HUI ET AL., 2020)</td><td>57.12</td><td>69.28</td><td>50.36</td><td>56.02</td><td>69.05</td><td>49.92</td></tr><tr><td>CMPC (HUANG ET AL., 2020)</td><td>57.93</td><td>70.15</td><td>50.41</td><td>55.83</td><td>69.22</td><td>49.24</td></tr><tr><td>BRINET (HU ET AL., 2020)</td><td>58.79</td><td>70.73</td><td>51.14</td><td>56.90</td><td>69.88</td><td>49.65</td></tr><tr><td>CMPC+ (LIU ET AL., 2022)</td><td>59.19</td><td>70.14</td><td>51.41</td><td>57.65</td><td>68.64</td><td>50.24</td></tr><tr><td>LGCE (YUAN ET AL., 2024)</td><td>68.10</td><td>76.68</td><td>60.16</td><td>67.65</td><td>76.34</td><td>59.37</td></tr><tr><td>LAVT (YANG ET AL., 2024)</td><td>69.54</td><td>77.59</td><td>61.46</td><td>69.52</td><td>77.19</td><td>61.04</td></tr><tr><td>RMSIN (LIU ET AL., 2024C)</td><td>74.66</td><td>78.27</td><td>65.10</td><td>74.26</td><td>77.79</td><td>64.20</td></tr><tr><td>GEOPIXEL-FT</td><td>80.00</td><td>81.77</td><td>67.99</td><td>83.33</td><td>84.90</td><td>67.30</td></tr></table>
|
| 192 |
+
|
| 193 |
+
GeoPixel model on the RRSIS-D (Liu et al., 2024c) dataset. The resulting GeoPixel-ft model demonstrates superior performance compared to recent approaches, as shown by results on the RRSIS-D test and validation sets in Table 3. The qualitative results are provided in Figure 9.
|
| 194 |
+
|
| 195 |
+
# 5.4. Ablation Study
|
| 196 |
+
|
| 197 |
+
Inference Resolution Effect: Increasing the number of inference patches demonstrates a consistent improvement across all evaluation metrics, reflecting improved model
|
| 198 |
+
|
| 199 |
+
Table 4. Effect of Inference Resolution. Reported metrics show the relationship between resolution and overall performance.
|
| 200 |
+
|
| 201 |
+
<table><tr><td>TRAINING PATCHES</td><td>INFERENCE PATCHES</td><td>CIDER</td><td>METEOR</td><td>AP50</td><td>MIOU</td><td>RECALL</td></tr><tr><td rowspan="3">P = 9</td><td>P = 1</td><td>14.6</td><td>23.1</td><td>12.9</td><td>47.8</td><td>32.2</td></tr><tr><td>P = 4</td><td>17.7</td><td>23.9</td><td>16.6</td><td>51.8</td><td>37.1</td></tr><tr><td>P = 9</td><td>20.5</td><td>24.3</td><td>17.6</td><td>52.1</td><td>37.4</td></tr></table>
|
| 202 |
+
|
| 203 |
+
comprehension of visual content (Table 4). For example, at $\mathcal{P} = 9$ , CIDEr increases from 14.6 to 20.5, and METEOR improves from 23.1 to 24.3, indicating improved semantic understanding as the number of image tokens scales up. The moderate gains observed in mAP and mIoU suggest that while high-resolution inference contributes to superior localization accuracy, competitive performance can still be maintained at lower resolutions when the model is pretrained at higher resolutions. The superior results associated with training with a high patch count $(\mathcal{P} = 9)$ underscore the critical role of incorporating fine-grained spatial details during the training phase for generalized feature learning.
|
| 204 |
+
|
| 205 |
+
Annotation Complexity Effect: GeoPixel adjusts its masking output based on object size and distribution (as seen in Figure 4), utilizing instance masks for precise identification of individual objects, while semantic masks are generated to represent broader categories, such as clusters or small objects. In scenarios requiring both granularity and general-
|
| 206 |
+
|
| 207 |
+
Table 5. Effect of Annotation Complexity. Avg. Len is the average character length of captions.
|
| 208 |
+
|
| 209 |
+
<table><tr><td>DATA</td><td>OBJECTS</td><td>PHRASES</td><td>AVG. LEN</td><td>MIOU</td><td>RECALL</td></tr><tr><td>INSTANCES ONLY</td><td>1,740</td><td>1,740</td><td>634</td><td>58.4</td><td>48.8</td></tr><tr><td>SEMANTIC ONLY</td><td>21,483</td><td>698</td><td>518</td><td>44.1</td><td>37.7</td></tr><tr><td>MIX DATA</td><td>38,161</td><td>2,989</td><td>737</td><td>50.9</td><td>33.3</td></tr></table>
|
| 210 |
+
|
| 211 |
+
ization, the model integrates hybrid annotations, blending instance-level and semantic mask representations(as seen in Figure 1). The effect of this complexity of the annotation is expressed in Table 5 with lowest mask recall seen in the case of mixed annotations.
|
| 212 |
+
|
| 213 |
+
Remote sensing images often contain visually similar objects with subtle variations in appearance, spatial arrangement, and positional proximity, yet exhibit significant scale variations across different images. This inherent complexity challenges the model's ability to accurately differentiate between object presence, quantity, and the corresponding type of annotation required (e.g., instance level or semantic level). The challenge is particularly evident in the semantic-only category, where the model exhibits the lowest mIoU scores. This indicates two key challenges: the models ability to cover all instances within a category, leading to complete semantic masks, and its ability to group objects under unified semantic mask rather than individual instance identification. The comparatively low mask recall score in mixed data also suggests that the most difficult scenario is to generalize masking decisions effectively in the presence of visually dense objects due to the scale and spatial variability of objects in the image.
|
| 214 |
+
|
| 215 |
+
Role of Data Complexity: In Table 6, we compare the performance of GeoPixel on different data partitions, segregated according to the level of complexity in masking. Set-1A is less complex, with no intra-class segmentation differences. Each instance of a single class is either individually masked or represented using a semantic mask uniformly across the dataset. Set-1B introduces a higher level of complexity where larger instances within the same class are assigned individual instance masks, while smaller objects are grouped under a common semantic mask. For example, two larger boats may be individually described, while all smaller boats in the image could be grouped together under a single semantic description. This structured ablation helps evaluate how GeoPixel handles varying levels of annotation granularity, providing insights into its ability to generalize across different scales and segmentation strategies. The results indicate that inclusion of more complex annotation (Set-1B) leads to improved performance, especially in terms of segmentation accuracy and descriptive detail, as the model is trained with more diverse mask configurations.
|
| 216 |
+
|
| 217 |
+
Table 6. Effect of Data Complexity and Training Vision Projection (VP) Layer. T stands for Trainable and F for Frozen.
|
| 218 |
+
|
| 219 |
+
<table><tr><td colspan="2">TRAINING DATA</td><td>VP</td><td>CIDER</td><td>METEOR</td><td>AP50</td><td>MIOU</td><td>RECALL</td></tr><tr><td>SET-1A</td><td>SET-1B</td><td></td><td></td><td></td><td></td><td></td><td></td></tr><tr><td>✓</td><td></td><td>T</td><td>19.3</td><td>23.6</td><td>18.2</td><td>48.0</td><td>33.6</td></tr><tr><td>✓</td><td>✓</td><td>T</td><td>20.5</td><td>24.0</td><td>17.8</td><td>51.7</td><td>36.7</td></tr><tr><td>✓</td><td>✓</td><td>F</td><td>18.7</td><td>24.4</td><td>15.3</td><td>51.6</td><td>35.1</td></tr></table>
|
| 220 |
+
|
| 221 |
+
Vision Projection: Next we study the effect of training the vision projection layer by comparing the performance when the vision projection layer is fixed or trainable during the fine-tuning stage. Table 6 summarizes the results. Training the vision projection layer results in an improvement in some metrics, highlighting the role of feature alignment.
|
| 222 |
+
|
| 223 |
+
Figure 5. Failure case due to incorrect mask association (left) and wrong instance segmentation in the same spatial region (right).
|
| 224 |
+

|
| 225 |
+
The image is a black and white aerial photograph of an airport, showing multiple airplanes parked on the tarmac, with the layout including defined parking areas and a clear runway. There are no visible people or moving vehicles, giving the scene a static appearance. The plane at the top is a large commercial aircraft with visible engines and a long fuselage, positioned on the ground. Additionally, the plane at the top, the plane at the top, the plane at the bottom, the plane at the right, the plane at the left, the plane at the bottom right, and the plane at the bottom right are all large commercial aircraft with distinct vertical stabilizers, positioned on the ground with other planes. The airplane at the top is positioned in the middle of the right is characterized by its large size, rectangular shape, and the presence of what appears to be a flatbed for cargo or large vehicles. A camera can also be observed at the top left. The absence of color and activity indicates that the photo might be from an older time or taken during a period of low airport activity.
|
| 226 |
+
|
| 227 |
+

|
| 228 |
+
The image is a view of a cityscape, showcasing a dense arrangement of buildings and streets that are laid out in a grid pattern, with various roof colors and textures indicating a mix of architectural elements. The scene has a still and quiet atmosphere due to the absence of visible people or moving vehicles. A large vehicle, likely a truck or bus, is situated in the top right area, positioned perpendicular to the nearby buildings. Vehicles along the urban layout are visible at different locations, providing a comprehensive overview of the urban environment's layout and design.
|
| 229 |
+
|
| 230 |
+
# 5.5. Limitations and Challenges
|
| 231 |
+
|
| 232 |
+
While GeoPixel has demonstrated significant advances in pixel-level grounding for high-resolution RS images, several challenges remain. These challenges are particularly evident in the following failure cases (illustrated in Figure 5). The model occasionally produces erroneous masks due to ambiguities in the masking strategy, particularly in determining object presence and quantity, as well as deciding whether semantic segmentation or instance-level annotation is appropriate. An incorrect decision in this regard can result in repetitive descriptions of visually similar objects, leading to inconsistencies in the generated output. Furthermore, such errors may manifest as fragmented or overlapping masks, in
|
| 233 |
+
|
| 234 |
+
producing confusion in object delineation and undermining the overall segmentation quality. Moreover, the model often confuses instance masks within the same spatial location, particularly in densely populated or crowded images.
|
| 235 |
+
|
| 236 |
+
Future work may focus on addressing these challenges by incorporating more robust masking strategies and dynamic resolution adjustment techniques to improve segmentation accuracy in complex scenes. Additionally, extending GeoPixel's capabilities to integrate multimodal data, such as Synthetic Aperture Radar (SAR) or infrared imagery, could significantly enhance its ability to analyze diverse remote sensing datasets. GeoPixel is a significant step forward in leveraging the potential of LMMs for remote sensing, opening new avenues for research in this critical domain.
|
| 237 |
+
|
| 238 |
+
# 6. Conclusion
|
| 239 |
+
|
| 240 |
+
We present GeoPixel, a large multimodal model (LMM) designed specifically for the unique challenges of high-resolution remote sensing (RS) image analysis. GeoPixel introduces a robust end-to-end architecture capable of adaptive image partitioning and pixel-level grounding, enabling the precise interpretation and generation of geospatially aware descriptions in RS imagery. By addressing key limitations of current LMMs, such as low-resolution constraints and coarse object-grounding, GeoPixel provides a fine-grained visual understanding that bridges the gap between language and high-resolution RS data.
|
| 241 |
+
|
| 242 |
+
# References
|
| 243 |
+
|
| 244 |
+
Bai, J., Bai, S., Chu, Y., Cui, Z., Dang, K., Deng, X., Fan, Y., Ge, W., Han, Y., Huang, F., Hui, B., Ji, L., Li, M., Lin, J., Lin, R., Liu, D., Liu, G., Lu, C., Lu, K., Ma, J., Men, R., Ren, X., Ren, X., Tan, C., Tan, S., Tu, J., Wang, P., Wang, S., Wang, W., Wu, S., Xu, B., Xu, J., Yang, A., Yang, H., Yang, J., Yang, S., Yao, Y., Yu, B., Yuan, H., Yuan, Z., Zhang, J., Zhang, X., Zhang, Y., Zhang, Z., Zhou, C., Zhou, J., Zhou, X., and Zhu, T. Qwen technical report. arXiv preprint arXiv:2309.16609, 2023a.
|
| 245 |
+
Bai, J., Bai, S., Yang, S., Wang, S., Tan, S., Wang, P., Lin, J., Zhou, C., and Zhou, J. Qwen-vl: A versatile vision-language model for understanding, localization, text reading, and beyond. arXiv preprint arXiv:2308.12966, 1(2): 3, 2023b.
|
| 246 |
+
Bazi, Y., Bashmal, L., Al Rahhal, M. M., Ricci, R., and Melgani, F. Rs-llava: A large vision-language model for joint captioning and question answering in remote sensing imagery. Remote Sensing, 16(9):1477, 2024.
|
| 247 |
+
Cai, Z., Cao, M., Chen, H., Chen, K., Chen, K., Chen, X., Chen, X., Chen, Z., Chen, Z., Chu, P., Dong, X., Duan, H., et al. Internl m2 technical report, 2024.
|
| 248 |
+
|
| 249 |
+
Chen, G., Shen, L., Shao, R., Deng, X., and Nie, L. Lion: Empowering multimodal large language model with dual-level visual knowledge. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 26540-26550, June 2024a.
|
| 250 |
+
Chen, K., Zhang, Z., Zeng, W., Zhang, R., Zhu, F., and Zhao, R. Shikra: Unleashing multimodal llm's referential dialogue magic. arXiv preprint arXiv:2306.15195, 2023.
|
| 251 |
+
Chen, Z., Wu, J., Wang, W., Su, W., Chen, G., Xing, S., Zhong, M., Zhang, Q., Zhu, X., Lu, L., Li, B., Luo, P., Lu, T., Qiao, Y., and Dai, J. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 24185-24198, June 2024b.
|
| 252 |
+
Chiang, W.-L., Li, Z., Lin, Z., Sheng, Y., Wu, Z., Zhang, H., Zheng, L., Zhuang, S., Zhuang, Y., Gonzalez, J. E., Stoica, I., and Xing, E. P. Vicuna: An open-source chatbot impressing gpt-4 with $90\%$ * chatgpt quality, March 2023. URL https://lmsys.org/blog/2023-03-30-vicuna/.
|
| 253 |
+
Dai, W., Li, J., Li, D., Tiong, A., Zhao, J., Wang, W., Li, B., Fung, P. N., and Hoi, S. Instructclip: Towards general-purpose vision-language models with instruction tuning. In Oh, A., Naumann, T., Globerson, A., Saenko, K., Hardt, M., and Levine, S. (eds.), Advances in Neural Information Processing Systems, volume 36, pp. 49250-49267. Curran Associates, Inc., 2023.
|
| 254 |
+
Ding, G., Khan, S., Tang, Z., Zhang, J., and Porikli, F. Towards better validity: Dispersion based clustering for unsupervised person re-identification. arXiv preprint arXiv:1906.01308, 2019.
|
| 255 |
+
Dong, X., Zhang, P., Zang, Y., Cao, Y., Wang, B., Ouyang, L., Wei, X., Zhang, S., Duan, H., Cao, M., Zhang, W., Li, Y., Yan, H., Gao, Y., Zhang, X., Li, W., Li, J., Chen, K., He, C., Zhang, X., Qiao, Y., Lin, D., and Wang, J. Internl m-xcomposer2: Mastering free-form text-image composition and comprehension in vision-language large model. arXiv preprint arXiv:2401.16420, 2024a.
|
| 256 |
+
Dong, X., Zhang, P., Zang, Y., Cao, Y., Wang, B., Ouyang, L., Zhang, S., Duan, H., Zhang, W., Li, Y., Yan, H., Gao, Y., Chen, Z., Zhang, X., Li, W., Li, J., Wang, W., Chen, K., He, C., Zhang, X., Dai, J., Qiao, Y., Lin, D., and Wang, J. Internl m-xcomposer2-4khd: A pioneering large vision-language model handling resolutions from 336 pixels to 4k hd. arXiv preprint arXiv:2404.06512, 2024b.
|
| 257 |
+
Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., Gelly, S., Uszkoreit, J., and Houlsby,
|
| 258 |
+
|
| 259 |
+
N. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. URL https://openreview.net/forum?id=YicbFdNTTy.
|
| 260 |
+
Dubey, A., Jauhri, A., Pandey, A., Kadian, A., Al-Dahle, A., Letman, A., Mathur, A., Schelten, A., Yang, A., Fan, A., et al. The llama 3 herd of models. arXiv preprint arXiv:2407.21783, 2024.
|
| 261 |
+
Guo, Q., De Mello, S., Yin, H., Byeon, W., Cheung, K. C., Yu, Y., Luo, P., and Liu, S. Regionpt: Towards region understanding vision language model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 13796-13806, June 2024.
|
| 262 |
+
He, K., Chen, X., Xie, S., Li, Y., Dólár, P., and Girshick, R. Masked autoencoders are scalable vision learners. In 2022 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 15979-15988, 2022. doi: 10.1109/CVPR52688.2022.01553.
|
| 263 |
+
Hong, W., Wang, W., Lv, Q., Xu, J., Yu, W., Ji, J., Wang, Y., Wang, Z., Dong, Y., Ding, M., et al. Cogagent: A visual language model for gui agents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14281-14290, 2024.
|
| 264 |
+
Hu, Y., Yuan, J., Wen, C., Lu, X., and Li, X. Rsgpt: A remote sensing vision language model and benchmark. arXiv preprint arXiv:2307.15266, 2023.
|
| 265 |
+
Hu, Z., Feng, G., Sun, J., Zhang, L., and Lu, H. Bidirectional relationship inferring network for referring image segmentation. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 4423-4432, 2020. doi: 10.1109/CVPR42600.2020.00448.
|
| 266 |
+
Huang, S., Hui, T., Liu, S., Li, G., Wei, Y., Han, J., Liu, L., and Li, B. Referring image segmentation via cross-modal progressive comprehension. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 10488-10497, 2020.
|
| 267 |
+
Hui, T., Liu, S., Huang, S., Li, G., Yu, S., Zhang, F., and Han, J. Linguistic structure guided context modeling for referring image segmentation. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part X 16, pp. 59-75. Springer, 2020.
|
| 268 |
+
Jahaveripi, M., Bubeck, S., et al. Phi-2: the surprising power of small language models (2023). URL https://www.microsoft.com/en-us/research/blog/phi-2-the-surprising-power-of-small-language-models.
|
| 269 |
+
|
| 270 |
+
Kirillov, A., Mintun, E., Ravi, N., Mao, H., Rolland, C., Gustafson, L., Xiao, T., Whitehead, S., Berg, A. C., Lo, W.-Y., et al. Segment anything. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pp. 4015-4026, 2023.
|
| 271 |
+
Kuckreja, K., Danish, M. S., Naseer, M., Das, A., Khan, S., and Khan, F. S. Geochat: grounded large vision-language model for remote sensing. In 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 27831-27840, 2024. doi: 10.1109/CVPR52733.2024.02629.
|
| 272 |
+
Lai, X., Tian, Z., Chen, Y., Li, Y., Yuan, Y., Liu, S., and Jia, J. Lisa: Reasoning segmentation via large language model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 9579-9589, 2024.
|
| 273 |
+
Li, J., Li, D., Savarese, S., and Hoi, S. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pp. 19730-19742. PMLR, 2023.
|
| 274 |
+
Li, R., Li, K., Kuo, Y.-C., Shu, M., Qi, X., Shen, X., and Jia, J. Referring image segmentation via recurrent refinement networks. In 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 5745-5753, 2018. doi: 10.1109/CVPR.2018.00602.
|
| 275 |
+
Li, Z., Yang, B., Liu, Q., Ma, Z., Zhang, S., Yang, J., Sun, Y., Liu, Y., and Bai, X. Monkey: Image resolution and text label are important things for large multi-modal models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 26763-26773, 2024.
|
| 276 |
+
Lin, Z., Liu, C., Zhang, R., Gao, P., Qiu, L., Xiao, H., Qiu, H., Lin, C., Shao, W., Chen, K., et al. *Sphinx*: The joint mixing of weights, tasks, and visual embeddings for multi-modal large language models. *arXiv* preprint arXiv:2311.07575, 2023.
|
| 277 |
+
Liu, H., Li, C., Li, Y., Li, B., Zhang, Y., Shen, S., and Lee, Y. J. Llava-last: Improved reasoning,OCR, and world knowledge, January 2024a. URL https://llava-v1.github.io/blog/2024-01-30-llava-last/.
|
| 278 |
+
Liu, H., Li, C., Wu, Q., and Lee, Y. J. Visual instruction tuning. Advances in neural information processing systems, 36, 2024b.
|
| 279 |
+
Liu, S., Hui, T., Huang, S., Wei, Y., Li, B., and Li, G. Cross-modal progressive comprehension for referring segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(9):4761-4775, 2022. doi: 10.1109/TPAMI.2021.3079993.
|
| 280 |
+
|
| 281 |
+
Liu, S., Cheng, H., Liu, H., Zhang, H., Li, F., Ren, T., Zou, X., Yang, J., Su, H., Zhu, J., et al. Llava-plus: Learning to use tools for creating multimodal agents. arXiv preprint arXiv:2311.05437, 2023a.
|
| 282 |
+
Liu, S., Ma, Y., Zhang, X., Wang, H., Ji, J., Sun, X., and Ji, R. Rotated multi-scale interaction network for referring remote sensing image segmentation. In 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 26648-26658, 2024c. doi: 10.1109/CVPR52733.2024.02517.
|
| 283 |
+
Liu, Y., Yang, B., Liu, Q., Li, Z., Ma, Z., Zhang, S., and Bai, X. Textmonkey: AnOCR-free large multimodal model for understanding document. arXiv preprint arXiv:2403.04473, 2024d.
|
| 284 |
+
Liu, Z., He, Y., Wang, W., Wang, W., Wang, Y., Chen, S., Zhang, Q., Lai, Z., Yang, Y., Li, Q., et al. Interrupt: Solving vision-centric tasks by interacting with chatgpt beyond language. arXiv preprint arXiv:2305.05662, 2023b.
|
| 285 |
+
Luo, J., Pang, Z., Zhang, Y., Wang, T., Wang, L., Dang, B., Lao, J., Wang, J., Chen, J., Tan, Y., et al. Skysensegpt: A fine-grained instruction tuning dataset and model for remote sensing vision-language understanding. arXiv preprint arXiv:2406.10100, 2024.
|
| 286 |
+
Ma, C., Jiang, Y., Wu, J., Yuan, Z., and Qi, X. Groma: Localized visual tokenization for grounding multimodal large language models. In European Conference on Computer Vision, pp. 417-435. Springer, 2025.
|
| 287 |
+
Muhtar, D., Li, Z., Gu, F., Zhang, X., and Xiao, P. Lhrsbot: Empowering remote sensing with vgi-enhanced large multimodal language model. arXiv preprint arXiv:2402.02544, 2024.
|
| 288 |
+
OpenAI. Chatgpt: Language model for dialogue applications. https://openai.com/chatgpt, 2023. Accessed: 2024-12-31.
|
| 289 |
+
Pang, C., Wu, J., Li, J., Liu, Y., Sun, J., Li, W., Weng, X., Wang, S., Feng, L., Xia, G.-S., et al. H2srvlm: Towards helpful and honest remote sensing large vision language model. arXiv preprint arXiv:2403.20213, 2024.
|
| 290 |
+
Peng, Z., Wang, W., Dong, L., Hao, Y., Huang, S., Ma, S., Ye, Q., and Wei, F. Grounding multimodal large language models to the world. In The Twelfth International Conference on Learning Representations, 2024.
|
| 291 |
+
Radford, A., Kim, J. W., Hallacy, C., Ramesh, A., Goh, G., Agarwal, S., Sastry, G., Askell, A., Mishkin, P., Clark, J., et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pp. 8748-8763. PMLR, 2021.
|
| 292 |
+
|
| 293 |
+
Rasheed, H., Maaz, M., Shaji, S., Shaker, A., Khan, S., Cholakkal, H., Anwer, R. M., Xing, E., Yang, M.-H., and Khan, F. S. Glamm: Pixel grounding large multimodal model. In 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 13009-13018, 2024. doi: 10.1109/CVPR52733.2024.01236.
|
| 294 |
+
Ravi, N., Gabeur, V., Hu, Y.-T., Hu, R., Ryali, C., Ma, T., Khedr, H., Radle, R., Rolland, C., Gustafson, L., Mintun, E., Pan, J., Alwala, K. V., Carion, N., Wu, C.-Y., Girshick, R., Dollar, P., and Feichtenhofer, C. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024. URL https://arxiv.org/abs/2408.00714.
|
| 295 |
+
Ren, Z., Huang, Z., Wei, Y., Zhao, Y., Fu, D., Feng, J., and Jin, X. Pixel reasoning with large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 26374-26383, June 2024.
|
| 296 |
+
Ryali, C., Hu, Y.-T., Bolya, D., Wei, C., Fan, H., Huang, P.-Y., Aggarwal, V., Chowdhury, A., Poursaeed, O., Hoffman, J., Malik, J., Li, Y., and Feichtenhofer, C. Hiera: A hierarchical vision transformer without the bells-and-whistles. ICML, 2023.
|
| 297 |
+
Soni, S., Dudhane, A., Debary, H., Fiaz, M., Munir, M. A., Danish, M. S., Fraccaro, P., Watson, C. D., Klein, L. J., Khan, F. S., et al. Earthdial: Turning multi-sensory earth observations to interactive dialogues. arXiv preprint arXiv:2412.15190, 2024.
|
| 298 |
+
Team, G., Anil, R., Borgeaud, S., Alayrac, J.-B., Yu, J., Soricut, R., Schalkwyk, J., Dai, A. M., Hauth, A., Millican, K., et al. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805, 2023.
|
| 299 |
+
Touvron, H., Lavril, T., Izacard, G., Martinet, X., Lachaux, M.-A., Lacroix, T., Rozière, B., Goyal, N., Hambro, E., Azhar, F., et al. Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971, 2023.
|
| 300 |
+
Waqas Zamir, S., Arora, A., Gupta, A., Khan, S., Sun, G., Shahbaz Khan, F., Zhu, F., Shao, L., Xia, G.-S., and Bai, X. isaid: A large-scale dataset for instance segmentation in aerial images. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition Workshops, pp. 28-37, 2019.
|
| 301 |
+
Wei, H., Kong, L., Chen, J., Zhao, L., Ge, Z., Yang, J., Sun, J., Han, C., and Zhang, X. Vary: Scaling up the vision vocabulary for large vision-language model. In European Conference on Computer Vision, pp. 408-424. Springer, 2025.
|
| 302 |
+
|
| 303 |
+
Xia, Z., Han, D., Han, Y., Pan, X., Song, S., and Huang, G. Gsva: Generalized segmentation via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 3858-3869, June 2024.
|
| 304 |
+
Xuan, S., Guo, Q., Yang, M., and Zhang, S. Pink: Unveiling the power of referential comprehension for multi-modal llms. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 13838-13848, June 2024.
|
| 305 |
+
Yang, J., Zhang, H., Li, F., Zou, X., Li, C., and Gao, J. Set-of-mark prompting unleashes extraordinary visual grounding in gpt-4v. arXiv preprint arXiv:2310.11441, 2023.
|
| 306 |
+
Yang, Z., Wang, J., Ye, X., Tang, Y., Chen, K., Zhao, H., and Torr, P. H. Language-aware vision transformer for referring segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence, pp. 1-18, 2024. doi: 10.1109/TPAMI.2024.3468640.
|
| 307 |
+
Ye, J., Hu, A., Xu, H., Ye, Q., Yan, M., Xu, G., Li, C., Tian, J., Qian, Q., Zhang, J., et al. Ureader: UniversalOCR-free visually-situated language understanding with multimodal large language model. arXiv preprint arXiv:2310.05126, 2023a.
|
| 308 |
+
Ye, L., Rochan, M., Liu, Z., and Wang, Y. Cross-modal self-attention network for referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pp. 10502–10511, 2019.
|
| 309 |
+
Ye, Q., Xu, H., Xu, G., Ye, J., Yan, M., Zhou, Y., Wang, J., Hu, A., Shi, P., Shi, Y., et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023b.
|
| 310 |
+
You, H., Zhang, H., Gan, Z., Du, X., Zhang, B., Wang, Z., Cao, L., Chang, S.-F., and Yang, Y. Ferret: Refer and ground anything anywhere at any granularity. arXiv preprint arXiv:2310.07704, 2023.
|
| 311 |
+
Yuan, Z., Mou, L., Hua, Y., and Zhu, X. X. Rrsis: Referring remote sensing image segmentation. IEEE Transactions on Geoscience and Remote Sensing, 2024.
|
| 312 |
+
Zhan, Y., Xiong, Z., and Yuan, Y. Skyeyegpt: Unifying remote sensing vision-language tasks via instruction tuning with large language model. arXiv preprint arXiv:2401.09712, 2024.
|
| 313 |
+
Zhang, P., Dong, X., Zang, Y., Cao, Y., Qian, R., Chen, L., Guo, Q., Duan, H., Wang, B., Ouyang, L., Zhang, S., Zhang, W., Li, Y., Gao, Y., Sun, P., Zhang, X., Li, W., Li, J., Wang, W., Yan, H., He, C., Zhang, X., Chen, K., Dai,
|
| 314 |
+
|
| 315 |
+
J., Qiao, Y., Lin, D., and Wang, J. Internlm-xcomposer2.5: A versatile large vision language model supporting long-contextual input and output. arXiv preprint arXiv:2407.03320, 2024a.
|
| 316 |
+
Zhang, S., Sun, P., Chen, S., Xiao, M., Shao, W., Zhang, W., Liu, Y., Chen, K., and Luo, P. Gpt4roi: Instruction tuning large language model on region-of-interest. arXiv preprint arXiv:2307.03601, 2023.
|
| 317 |
+
Zhang, W., Cai, M., Zhang, T., Zhuang, Y., and Mao, X. Earthgpt: A universal multimodal large language model for multisensor image comprehension in remote sensing domain. IEEE Transactions on Geoscience and Remote Sensing, 62:1-20, 2024b. doi: 10.1109/TGRS.2024.3409624.
|
| 318 |
+
Zhang, W., Cai, M., Zhang, T., Zhuang, Y., and Mao, X. Earthgpt: A universal multi-modal large language model for multi-sensor image comprehension in remote sensing domain. IEEE Transactions on Geoscience and Remote Sensing, 2024c.
|
| 319 |
+
Zhang, Y., Ma, Z., Gao, X., Shakiah, S., Gao, Q., and Chai, J. Groundhog: Grounding large language models to holistic segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pp. 14227-14238, June 2024d.
|
| 320 |
+
Zhao, Y., Lin, Z., Zhou, D., Huang, Z., Feng, J., and Kang, B. Bubogpt: Enabling visual grounding in multi-modal llms. arXiv preprint arXiv:2307.08581, 2023.
|
| 321 |
+
Zhu, D., Chen, J., Shen, X., Li, X., and Elhoseiny, M. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023.
|
| 322 |
+
|
| 323 |
+
# A. GeoPixelD dataset
|
| 324 |
+
|
| 325 |
+
Preprocessing and Marking: We utilize the instance-level annotated dataset, iSAID (Waqas Zamir et al., 2019), to generate grounded conversations through our annotation pipelines. The images undergo a preprocessing step in which they are cropped into $800 \times 800$ pixel patches. Objects for instance annotations are selected based on an area threshold to ensure their reasonable size, therefore preventing the marker from obscuring a significant portion of the object and maintaining its distinguishability. A $14 \times 14$ pixels fixed size marker is used, regardless of the actual dimensions of the object. However, the marker's placement is determined based on the segmentation mask's area and shape. For large objects, the marker is positioned at the center of the mask if the calculated center falls within the mask boundaries; otherwise, it is adjusted to the nearest point on the object's border. For small objects, the center of the bounding box is aligned with a point on the polygon mask boundary, which typically results in an average marker overlap of $50\%$ with the object.
|
| 326 |
+
|
| 327 |
+
In addition, multiple marking techniques were also explored, including bounding boxes, masks, contours, and numerical markers, to determine their impact on model accuracy and object fidelity. Our findings reveal that bounding boxes and contours tend to introduce superfluous visual information that can obscure the fine details of the object. In contrast, simple numerical markers placed directly on the object effectively signal its presence without compromising visual clarity or introducing noise, thereby preserving the integrity of object details for enhanced model performance.
|
| 328 |
+
|
| 329 |
+
# Set-of-Mark Prompting without Prior
|
| 330 |
+
|
| 331 |
+
Describe the object/objects of given image marked by number {markers_str} as if analyzing a map. The description should be enumerated in the following format: 1. <description> 2. <description> ...
|
| 332 |
+
|
| 333 |
+
1. The roundabout at the center of the intersection is marked with a red star-shaped sculpture, which serves as a focal point for the traffic flow.
|
| 334 |
+
2. To the north of the roundabout, there is a multi-story building with a large proof, which appears to be an institutional or commercial structure.
|
| 335 |
+
3. To the east of the roundabout, there is another multi-story building with a similar architectural style to the one in the north.
|
| 336 |
+
4. To the south of the roundabout, there is a smaller building with a flat roof, which could be an office or a service building.
|
| 337 |
+
5. To the west of the roundabout, there is a parking lot with several cars, indicating the presence of a commercial or public facility nearby.
|
| 338 |
+
6. There is a grassy area to the southeast of the roundabout, which provides a green space amidst the urban environment.
|
| 339 |
+
|
| 340 |
+

|
| 341 |
+
|
| 342 |
+
# Set-of-Mark Prompting with Prior
|
| 343 |
+
|
| 344 |
+
List down all the attributes of the {category_name} at the {pos} that is marked with number {mark_number} in one short sentence. Do not mention the marked number in output.
|
| 345 |
+
|
| 346 |
+
1. The roundabout at the center is marked by a distinctive red and orange star pattern.
|
| 347 |
+
2. The vehicle at the top right is a dark-colored sedan.
|
| 348 |
+
3. The vehicle at the top is a small, dark-colored car.
|
| 349 |
+
4. The vehicle at the top right is a dark-colored sedan.
|
| 350 |
+
5. The vehicle at the top right is a small, dark-colored car parked perpendicular to the curb, adjacent to building number 4.
|
| 351 |
+
6. The vehicle at the top is a blue car parked in a designated parking spot.
|
| 352 |
+
|
| 353 |
+
Figure 6. Comparative effectiveness of SOM prompting methods, highlighting the critical role of priors. Without priors, SOM relies solely on the VLM to detect and describe marked objects independently, resulting in inaccurate descriptions and hallucinated markers in complex remote sensing scenes. In contrast, SOM with priors utilizes explicit marker positions ( $\{\mathrm{pos}\}$ ) and predefined object categories ( $\{\mathrm{category\_name}\}$ ) as priors, providing structured prompts that reduce ambiguity and guide the VLM to produce precise and reliable descriptions. Incorrect parts are noted in red whereas correct parts are noted in green.
|
| 354 |
+
|
| 355 |
+
List down all the attributes of the vehicle at the left that is marked with number 1 in one short sentence. Do not mention the marked number in the output.
|
| 356 |
+
|
| 357 |
+

|
| 358 |
+
Figure 7. Comparison of open-source and proprietary models for prior-informed set of marks (SOM) prompting for RS imagery. Incorrect parts are noted in red whereas correct parts are noted in green.
|
| 359 |
+
|
| 360 |
+

|
| 361 |
+
|
| 362 |
+
The vehicle on the left is blue, appears to be a compact car, and looks like a parking lot. On the right is a parking lot area with a shipping container.
|
| 363 |
+
|
| 364 |
+

|
| 365 |
+
|
| 366 |
+
The vehicle is a white truck driving on a curved road.
|
| 367 |
+
|
| 368 |
+

|
| 369 |
+
|
| 370 |
+
The vehicle at the left is a semitruck with a blue cab and an attached orange container.
|
| 371 |
+
|
| 372 |
+

|
| 373 |
+
|
| 374 |
+
The vehicle is a blue truck.
|
| 375 |
+
|
| 376 |
+

|
| 377 |
+
|
| 378 |
+

|
| 379 |
+
|
| 380 |
+
1. The plane in the center is parked at a gate, has a white fuselage with wings extended, appears to be a commercial airplane and shows the tarmac due to sunlight from the top left.
|
| 381 |
+
2. The plane on the right is parked at a gate, has a white fuselage with extended wings, appears to be a commercial airliner, and can be seen from the side of the tower's side due to the sunlight from the top left.
|
| 382 |
+
3. The plane in the center is parked at a gate, has a white fuselage with extended windows and a large front seat. The seatback and casts a shadow toward the lower left side due to sunlight coming from the upper
|
| 383 |
+
|
| 384 |
+

|
| 385 |
+
|
| 386 |
+
1. The plane at the center is white and parked on a tramac.
|
| 387 |
+
2. The tram is white and parked on a tram.
|
| 388 |
+
2. the plane at the right is white and parked3. The plane at the center is white and parked
|
| 389 |
+
on a tarmac.
|
| 390 |
+
|
| 391 |
+

|
| 392 |
+
|
| 393 |
+
1. The plane at the center is white with a red stripe and is parked outside of the building.
|
| 394 |
+
2. The plane on the right is white with a blue stripe and is parked outside of town.
|
| 395 |
+
3. The plane at the center is white with red stripe and is parked at a gate.
|
| 396 |
+
|
| 397 |
+

|
| 398 |
+
|
| 399 |
+
1. The plane at the center is a large commercial aircraft with a predominantly white body and blue engines.
|
| 400 |
+
2. The plane at the right is a large commercial aircraft with a predominantly white body and blue engines.
|
| 401 |
+
3. The plane at the center is a large commercial aircraft with two engines positioned on the tarmac facing a terminal gate.
|
| 402 |
+
|
| 403 |
+
Prompt Template: Paraphrase the following description of an image viewed from a satellite in a single paragraph while ensuring that all the words enclosed in single quotes are included and preserved exactly as they are. Remove any mention of background, foreground, or camera position. Image Description: {Holistic Image Annotation}Ø:last{Individual Instance Annotation} {Cluster/Crowded Annotation} {Holistic Image Annotation} {last}
|
| 404 |
+
|
| 405 |
+
Prompt: Paraphrase the following description of an image viewed from a satellite in a single paragraph while ensuring that all the words enclosed in single quotes are included and preserved exactly as they are. Remove any mention of background, foreground, or camera position. Image Description: The image is an aerial photograph of a residential area with docks and boats. The water appears calm, and the docks are equipped with vehicles, suggesting recreational or transportation vehicles, surrounding buildings and statues generally used for private property, indicating a peaceful possibly affluent neighborhood. The "pier-1" at the center has a single boat docked to it, with a structure that includes a roof and appears to be made of wood. The "pier-2" at the right is equipped with a boat and has a structure extending over the water, connected to a docking area on land. The "pier-3" on the left is elongated with a small structure at its end, extending into the water and connected to the shore by a wooden plank walkway. The "swimming-pool-4" is rectangular, has a clear blue surface, and is surrounded by a deck area with lounge chairs. The "pier-4" at the bottom left is rectangular, filled with blue water, and is surrounded by a white wall with lounge chairs. The "boat-4" at the right is white, appears to be a motor boat, and is docked at a wooden pier. The "boat-7" at the right is white with a blue stripe, appears to be a motor boat, and is docked at a wooden pier. The "boat-8" at the center is a small motor boat with a single outboard motor, docked perpendicular to the pier. The "boat-9" at the right is white with a single cabin and is docked at a wooden pier. There is no visible activity, giving the scene a tranquil atmosphere.
|
| 406 |
+
|
| 407 |
+

|
| 408 |
+
|
| 409 |
+

|
| 410 |
+
|
| 411 |
+
GCG Description: The aerial photograph of a residential area features docks and boats, with calm water suggesting recreational or transportation use. The land surrounding the docks has greenery and private property, indicating a peaceful, possibly affluent neighborhood. The "pier-1" at the center has a single boat docked to it, with a structure including a roof made of wood. Nearby, the "pier-2" at the right is equipped with a boat and has a structure extending over the water, connected to a docking area on land. The "pier-3" on the left is elongated with a small structure at its end, extending into the water and connected to the shore by a wooden plank walkway. Two swimming pools, "swimming-pool-4" and "swimming-pool-5", are rectangular and surrounded by deck areas with lounge chairs. Various boats are visible at right, including a white motor boat "boat-6", "boat-7", a white motor boat, and "boat-9", a white boat with a single cabin, all docked at wooden piers. A small motor boat "boat-8" with a single outboard motor is also docked, perpendicular to the pier. The scene is tranquil, with no visible activity.
|
| 412 |
+
|
| 413 |
+
Prompt: Paraphrase the following description of an image viewed from a satellite in a single paragraph while ensuring that all the words enclosed in single quotes are included and preserved exactly as they are. Remove any mention of background, foreground, or camera position. Image Description: The image is an aerial view of a tennis court complex with multiple courts. The courts are arranged in a grid pattern, each with its own net and markings. The surrounding area includes tennis courts, including the tennis courts on top of the tennis courts or landscaping. An aerial view of "light-tennis-courts-1" with blue playing surfaces, marked for play, surrounded by greenery and adjacent to a parking area. There are no people visible on the courts, suggesting that play may not be in session at the time the photo was taken.
|
| 414 |
+
|
| 415 |
+

|
| 416 |
+
|
| 417 |
+

|
| 418 |
+
|
| 419 |
+
GCG Description: The image is a view of "eight-tennis-courts-" with blue playing surfaces, marked for play, surrounded by greenery and adjacent to a parking area. It features multiple tennis courts arranged in a grid pattern, each with its own net and markings, situated amidst greenery, indicating the presence of trees or landscaping.
|
| 420 |
+
|
| 421 |
+

|
| 422 |
+
Figure 8. Unifying Annotations through LLM Paraphrasing and Text Marking to track associated masks. Objects are indexed numerically (e.g., "object-N"), and holistic (blue), individual (teal), and cluster (green) annotations are concatenated into a single image description. Paraphrasing instructions with combined description produce a concise, consistent GCG description that eliminates redundancy while preserving object-mask associations, even with reordering.
|
| 423 |
+
Figure 9. Qualitative results of GLaMM's capability in referring remote sensing expression segmentation. The figure highlights Geopixel's ability to interpret referring expressions of varying lengths and generate precise segmentation masks, adapting to scale variations, as shown in the ground track fields. Spatial descriptors (e.g "right", "lower right"), and object characteristics (e.g "red") are interpreted with precision to achieve accurate segmentation.
|
2501.13xxx/2501.13925/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:59bf6fbc4355cd9e1a7a1afd6e892709db59fd7e14a5a520e34efc8f8668aeaa
|
| 3 |
+
size 803318
|
2501.13xxx/2501.13925/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.13xxx/2501.13926/85005fd1-a0be-4094-9326-d9dc14bda98c_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.13xxx/2501.13926/85005fd1-a0be-4094-9326-d9dc14bda98c_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.13xxx/2501.13926/85005fd1-a0be-4094-9326-d9dc14bda98c_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b6e41a52ad5c34f2eb721c828e56c09f7e179b5d473e68c842d7daa6897d60ea
|
| 3 |
+
size 6710833
|
2501.13xxx/2501.13926/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.13xxx/2501.13926/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:46b58847eabf8254594168343d6ba72017e91d3ac6004752d3f8125221a11a63
|
| 3 |
+
size 1681758
|
2501.13xxx/2501.13926/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.13xxx/2501.13928/fd24d2f4-5582-494f-93fc-ab143e630789_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.13xxx/2501.13928/fd24d2f4-5582-494f-93fc-ab143e630789_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.13xxx/2501.13928/fd24d2f4-5582-494f-93fc-ab143e630789_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b03c0e47786c52ee80ad4bce580d2be8dc747d29e0c7756846e9881d84239835
|
| 3 |
+
size 23642990
|
2501.13xxx/2501.13928/full.md
ADDED
|
@@ -0,0 +1,434 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Fast3R: Towards 3D Reconstruction of $1000+$ Images in One Forward Pass
|
| 2 |
+
|
| 3 |
+

|
| 4 |
+
|
| 5 |
+

|
| 6 |
+
Figure 1. Fast3R is a method towards 3D reconstructing $1000+$ unordered, unposed images in a single forward pass.
|
| 7 |
+
|
| 8 |
+
# Abstract
|
| 9 |
+
|
| 10 |
+
Multi-view 3D reconstruction remains a core challenge in computer vision, particularly in applications requiring accurate and scalable representations across diverse perspectives. Current leading methods such as DUSt3R employ a fundamentally pairwise approach, processing images in pairs and necessitating costly global alignment procedures to reconstruct from multiple views. In this work, we propose Fast 3D Reconstruction (Fast3R), a novel multi-view generalization to DUSt3R that achieves efficient and scalable 3D reconstruction by processing many views in parallel. Fast3R's Transformer-based architecture forwards $N$ images in a single forward pass, bypassing the need for iterative alignment. Through extensive experiments on camera pose estimation and 3D reconstruction, Fast3R demonstrates state-of-the-art performance, with significant improvements in inference speed and reduced error accumulation. These results establish Fast3R as a robust alternative for multi-view applications, offering enhanced scalability without compromising reconstruction accuracy.
|
| 11 |
+
|
| 12 |
+
# 1. Introduction
|
| 13 |
+
|
| 14 |
+
3D reconstruction from multiple views has long been a foundational task across applications in autonomous navigation, augmented reality, and robotics [31, 53]. Establishing correspondences across images, known as multi-view matching, is central to these applications and enables an accurate scene representation. Traditional reconstruction pipelines, such as those based on Structure-from-Motion (SfM) [44] and Multi-View Stereo (MVS) [17], fundamentally rely on image pairs to reconstruct 3D geometry. While effective in some settings, these methods require extensive engineering to manage the sequential stages of feature extraction, correspondence matching, triangulation, and global alignment, limiting scalability and speed.
|
| 15 |
+
|
| 16 |
+
This traditional "pipeline" paradigm has recently been challenged by DUSt3R [61], which directly predicts 3D structure from RGB images. It achieves this with a design that "cast[s] the pairwise reconstruction problem as a regression of pointmaps, relaxing the hard constraints of usual projective camera models" [61], yielding impressive robustness across challenging viewpoints. This represents a radical shift in 3D reconstruction, as an end-to-end learnable
|
| 17 |
+
|
| 18 |
+
solution is less prone to pipeline error accumulation, while also being dramatically simpler.
|
| 19 |
+
|
| 20 |
+
On the other hand, a fundamental limitation of DUSt3R is its restriction to two image inputs. While image pairs are an important use case, often one is interested in reconstructing from more than two views, as when scanning of objects [39] or scenes [4, 6, 19, 55, 68], e.g. for asset generation or mapping. To process more than two images, DUSt3R computes $\mathcal{O}(N^2)$ pairs of pointmaps and performs a global alignment optimization procedure. This process can be computationally expensive, scaling poorly as the collection of images grows. For instance, it will lead to OOM with only 48 views on an A100 GPU.
|
| 21 |
+
|
| 22 |
+
Moreover, such a process is still fundamentally pairwise, which limits the model's context, both affecting learning during training and ultimate accuracy during inference. In this sense, DUSt3R suffers from the same pair-wise bottleneck as traditional SfM and MVS methods.
|
| 23 |
+
|
| 24 |
+
We propose Fast3R, a novel multi-view reconstruction framework designed to overcome these limitations. Building on DUSt3R's foundations, Fast3R leverages a Transformer-based architecture [56] that processes multiple images in parallel, allowing $N$ images to be reconstructed in a single forward pass. By eliminating the need for sequential or pairwise processing, each frame can simultaneously attend to all other frames in the input set during reconstruction, significantly reducing error accumulation. Perhaps surprisingly, Fast3R also takes significantly less time.
|
| 25 |
+
|
| 26 |
+
# Our contributions are threefold.
|
| 27 |
+
|
| 28 |
+
1. We introduce Fast3R, a Transformer-based model for multi-view pointmap estimation that obviates the need for global postprocessing; resulting in significant improvements in speed, computation overhead and scalability.
|
| 29 |
+
2. We show empirically that the model performance improves by scaling along the view axis. For camera pose localization and reconstruction tasks, the model improves when trained on progressively larger sets of views. Per-view accuracy further improves when more views are used during inference, and the model can generalize to significantly more views than seen during training.
|
| 30 |
+
3. We demonstrate state-of-the-art performance in camera pose estimation with significant inference time improvements. On CO3Dv2 [39], Fast3R gets $99.7\%$ accuracy within 15-degrees for pose estimation, over a $14x$ error reduction compared to DUSt3R with global alignment.
|
| 31 |
+
|
| 32 |
+
Fast3R offers a scalable and accurate alternative for real-world applications, setting a new standard for efficient multi-view 3D reconstruction.
|
| 33 |
+
|
| 34 |
+
# 2. Related Work
|
| 35 |
+
|
| 36 |
+
Multi-view 3D reconstruction: Almost all modern 3D reconstruction approaches are based on the traditional multiview geometry (MVG) pipeline [20]. SVG-based methods first identify corresponding pixels between image pairs, and then use camera models and projective multiview geometry to lift these correspondences to 3D points. The process happens in sequential stages: feature extraction, finding pairwise image correspondences, triangulation to 3D and pairwise relative camera pose, and global bundle alignment. However, any pipeline approach is prone to accumulating errors, which are especially common in hand-crafted components. Moreover, the sequential nature prevents parallelization, which limits speed and scalability.
|
| 37 |
+
|
| 38 |
+
MVG approaches have existed since the early days of computer vision, and are still in use for a reason: they can be highly accurate when they do not catastrophically fail. The latest multi-view geometry pipelines like COLMAP [44] or OrbSLAM2 [30] incorporate nearly 60 years of compounding engineering improvements, but these approaches still catastrophically fail $>40\%$ of the time on static scenes like ETH-3D [52]), which can actually be considered an easy case due to dense image coverage of the scene.
|
| 39 |
+
|
| 40 |
+
Much recent work has successfully addressed the robustness and speed by replacing increasingly large components of MVG pipelines with end-to-end learned versions that are faster and reduce the rate of catastrophic failures [48, 58, 73]. For example, [13, 18, 24, 42, 51, 69] improve feature extraction and correspondences, [27, 50, 59, 72] learn to estimate camera pose, and [52] introduce a bundle adjustment layer. [61] contains an excellent and comprehensive survey of such efforts. Overall, the trend is towards replacing increasingly large components with end-to-end solutions.
|
| 41 |
+
|
| 42 |
+
Pointmap representation: DUSt3R [61] takes this evolution the furthest by proposing pointmap regression to replace everything in the MVG pipeline up to global pairwise alignment. Rather than first attempting to solve for camera parameters in order to triangulate corresponding pixels, DUSt3R trains a model to directly predict 3D pointmaps for pairs of images in a shared coordinate frame. Other MVG component tasks such as relative camera pose estimation and depth estimation can be recovered from the resulting pointmap representation. However, DUSt3R's pairwise assumption is a limitation, as it requires inference on $\mathcal{O}(N^2)$ image pairs and then a global alignment optimization, which is per-scene and does not improve with more data. Moreover, this process quickly becomes slow or crashes due to exceeded system memory, even for relatively modest numbers of images.
|
| 43 |
+
|
| 44 |
+
DUSt3R has inspired several follow-ups. MAST3R [24] adds a local feature head to each decoder's output, while MonST3R [70] does a data-driven exploration of dynamic scenes, but both are still fundamentally pairwise meth
|
| 45 |
+
|
| 46 |
+

|
| 47 |
+
Figure 2. Model architecture of Fast3R. Built upon a novel Transformer-based architecture which supports bidirectional information flow, Fast3R is able to process dense input views simultaneously.
|
| 48 |
+
|
| 49 |
+
ods. MASt3R in particular does not make any changes to the global alignment methodology. Concurrently with our work, Spann3R [57] treats images as an ordered sequence (e.g. from a video) and incrementally reconstructs a scene using a pairwise sliding window network, along with a learned spatial memory system. This extends DUSt3R to handle more images, but Spann3R's incremental pairwise processing cannot fix reconstructions from earlier frames, which can cause errors to accumulate. Crucially, Fast3R's transformer architecture uses all-to-all attention, allowing the model to reason simultaneously and jointly over all frames without any assumption of image order. Fast3R removes sequential dependencies, enabling parallelized inference across many devices in a single forward pass.
|
| 50 |
+
|
| 51 |
+
# 3. Model
|
| 52 |
+
|
| 53 |
+
Fast3R is a transformer-based model that predicts a 3D pointmap from a set of unordered and unposed images. The model architecture is designed to be scalable to over 1000 images during inference, though during training we use image masking to train it with far fewer. In this section, we detail our implementation of Fast3R, and discuss the design choices that enable its scalability.
|
| 54 |
+
|
| 55 |
+
# 3.1. Problem definition
|
| 56 |
+
|
| 57 |
+
Taking a set of $(N)$ unordered and unposed RGB images $\mathbf{I} \in \mathbb{R}^{N \times H \times W \times 3}$ as inputs<sup>1</sup>, Fast3R reconstructs the 3D structures of the scene by predicting the corresponding pointmap $\mathbf{X}$ , where $\mathbf{X} \in \mathbb{R}^{N \times H \times W \times 3}$ . A pointmap is a set of 3D locations indexed by pixels in an image $\mathbf{I}$ , enabling the derivation of camera poses, depths, and 3D structures.
|
| 58 |
+
|
| 59 |
+
Fast3R predicts two pointmaps: local pointmap $\mathbf{X}_L$ and global pointmap $\mathbf{X}_G$ , and corresponding confidence maps $\Sigma_L$ and $\Sigma_G$ (of shape $\Sigma \in \mathbb{R}^{N\times H\times W}$ ). Overall, Fast3R maps $N$ RGB images to $N$ local and global pointmaps:
|
| 60 |
+
|
| 61 |
+
$$
|
| 62 |
+
\mathrm {F a s t 3 R}: \mathbf {I} \rightarrow (\mathbf {X} _ {\mathrm {L}}, \Sigma_ {\mathrm {L}}, \mathbf {X} _ {\mathrm {G}}, \Sigma_ {\mathrm {G}})
|
| 63 |
+
$$
|
| 64 |
+
|
| 65 |
+
The global pointmap $\mathbf{X}_G$ is in the coordinate frame of the first camera and the $\mathbf{X}_L$ is in the coordinate frame of the viewing camera, as shown in Figure 2
|
| 66 |
+
|
| 67 |
+
# 3.2. Training Objective
|
| 68 |
+
|
| 69 |
+
This section describes the loss, using the notation in Section 3.1 above. Fast3R's predictions of $(\hat{\mathbf{X}}_{\mathrm{L}},\hat{\Sigma}_{\mathrm{L}},\hat{\mathbf{X}}_{\mathrm{G}},\hat{\Sigma}_{\mathrm{G}})$ are trained using generalized versions of the pointmap loss in DUST3R [61].
|
| 70 |
+
|
| 71 |
+
Our total loss is the combination of pointmap losses for the local and global pointmaps:
|
| 72 |
+
|
| 73 |
+
$$
|
| 74 |
+
\mathcal {L} _ {\text {t o t a l}} = \mathcal {L} _ {\mathbf {X} _ {\mathrm {G}}} + \mathcal {L} _ {\mathbf {X} _ {\mathrm {L}}} \tag {1}
|
| 75 |
+
$$
|
| 76 |
+
|
| 77 |
+
which are confidence-weighted versions of the normalized 3D pointwise regression loss.
|
| 78 |
+
|
| 79 |
+
Normalized 3D pointwise regression loss: The normalized regression loss for $\mathbf{X}$ is a multi-view version of that in DUSt3R [66] or monocular depth estimation [14, 36, 66]. It is the $L_{2}$ loss between the normalized predicted pointmaps and normalized target pointmaps, rescaled by the mean Euclidean distance to the origin:
|
| 80 |
+
|
| 81 |
+
$$
|
| 82 |
+
\ell_ {\operatorname {r e g r}} (\hat {\mathbf {X}}, \mathbf {X}) = \left\| \frac {1}{\hat {z}} \hat {\mathbf {X}} - \frac {1}{z} \mathbf {X} \right\| _ {2}, \quad z = \frac {1}{| \mathbf {X} |} \sum_ {x \in \mathbf {X}} \| x \| _ {2} \tag {2}
|
| 83 |
+
$$
|
| 84 |
+
|
| 85 |
+
Note that the predictions and targets are independently normalized by the mean euclidean distance to the origin.
|
| 86 |
+
|
| 87 |
+

|
| 88 |
+
Figure 3. Qualitative examples of Fast3R's output. The text on the yellow sign says "Caution, cleaning in progress" and is legible if zoomed in.
|
| 89 |
+
|
| 90 |
+
Pointmap loss: As in [61], we use a confidence-adjusted version of the loss above, using the confidence score $\hat{\Sigma}$ predicted by the model. The total loss for a pointmap is
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
\mathcal {L} _ {\mathbf {X}} (\hat {\Sigma}, \hat {\mathbf {X}}, \mathbf {X}) = \frac {1}{| \mathbf {X} |} \sum \hat {\Sigma} _ {+} \cdot \ell_ {\text {r e g r}} (\hat {\mathbf {X}}, \mathbf {X}) + \alpha \log (\hat {\Sigma} _ {+}) \tag {3}
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
Since the log term requires the confidence scores to be positive, we enforce $\hat{\Sigma}_{+} = 1 + \exp (\hat{\Sigma})$ . Our intuition is that the confidence weighting helps the model deal with label noise. Like DUSt3R, we train on real-world scans typically containing systematic errors in the underlying pointmap labels. For example, glass or thin structures are often not reconstructed properly in the ground-truth laser scans [4, 68], and errors in camera registration will cause misalignments between the images and pointmap labels [66].
|
| 97 |
+
|
| 98 |
+
# 3.3. Model architecture
|
| 99 |
+
|
| 100 |
+
The Fast3R meta-architecture is inspired by DUSt3R, and has three components: image encoding, fusion transformer, and pointmap decoding, as shown in Figure 2. We emphasize that Fast3R makes no assumptions on the ordering of images in $\mathbf{I}$ , and all output pointmaps and confidence maps $(\mathbf{X}_{\mathrm{L}}, \Sigma_{\mathrm{L}}, \mathbf{X}_{\mathrm{G}}, \Sigma_{\mathrm{G}})$ are predicted simultaneously, not sequentially.
|
| 101 |
+
|
| 102 |
+
Image Encoder: Fast3R encodes each image $I_{i}\in \mathbf{I}$ to a set of patch features $H_{i}$ , using a feature extractor $\mathcal{F}$ . This is done independently per image, yielding a sequence of image patch features $H_{i} = \{h_{i,j}\}_{j = 1}^{HW / P^{2}}$ for each image:
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
H _ {i} = \mathcal {F} \left(I _ {i}\right), i \in 1, \dots , N \tag {4}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
We follow DUSt3R's design and use CroCo ViT [63] as our encoder, though we found DINoV2 [33] works similarly.
|
| 109 |
+
|
| 110 |
+
Before passing image patch features $\mathbf{H}$ to the fusion transformer, we add position embeddings with one-dimensional image index positional embeddings.
|
| 111 |
+
|
| 112 |
+
Index embeddings help the fusion transformer determine which patches come from the same image and are the mechanism for identifying $I_{1}$ , which importantly defines the global coordinate frame. This is critical for allowing the model to implicitly reason about camera pose jointly for all images from an otherwise permutationally invariant set of tokens.
|
| 113 |
+
|
| 114 |
+
Fusion Transformer: Most of the computation in Fast3R happens in the fusion transformer. We use a 24-layer transformer similar to ViT-L [11]. This fusion transformer takes the concatenated encoded image patches from all views and performs all-to-all self-attention. This operation provides Fast3R with full context from all views, beyond the information provided in pairs alone.
|
| 115 |
+
|
| 116 |
+
Pointmap Decoding Heads: Finally, Fast3R uses two separate DPT [37] decoding heads to map these tokens to local and global pointmaps $(\mathbf{X}_L, \mathbf{X}_G)$ , and confidence maps $(\Sigma_L, \Sigma_G)$ .
|
| 117 |
+
|
| 118 |
+
Image index positional embedding generalization: We would like Fast3R to be able to handle many views at inference, more than were used to train a model. A naive way to embed views during testing would be to embed them in the same way as training: i.e. use the same Spherical Harmonic frequencies [49] to embed raw indices $SH(\{1,\dots,N\})$ during training, and $SH(\{1,\dots,N_{\mathrm{test}}\})$ during inference. In LLMs this causes poor performance, and in preliminary experiments, we also found that the resulting model did not work well when the number of input images exceeded that used during training (Sec. 5.3). We therefore adopt Position Interpolation [5], a solution from LLMs, where during
|
| 119 |
+
|
| 120 |
+
training we randomly draw $N$ indexes from a larger pool $N'$ of possible samples. [5] draws samples using a regular grid since the LLM inputs form a regular ordered sequence. Our images are unordered, so we draw $N \subset \{1, \dots, N'\}$ uniformly at random. To the transformer, the strategy looks indistinguishable from masking out images, and $N' \gg N$ controls the masking ratio. This strategy enables Fast3R to handle $N = 1000$ images during inference, even if only trained with $N = 20$ images.
|
| 121 |
+
|
| 122 |
+
# 3.4. Memory-Efficient Implementation
|
| 123 |
+
|
| 124 |
+
With a standard transformer architecture and a single-pass inference procedure, Fast3R is able to leverage many of the recent advances designed to improve scalability at train and inference time [2, 12, 22, 54].
|
| 125 |
+
|
| 126 |
+
For example, model size and throughput can be increased by sharding the model and/or data minibatch across multiple machines, such as through model parallelism [21, 45], data parallelism [25], and tensor parallelism [32, 46]. During training, optimizer weights, states, and gradients can also be sharded [35]. Systems-level advances have also been proposed, such as FlashAttention [7, 8], which uses highly optimized GPU kernels leveraging the hardware topology to compute attention in a time and memory-efficient way. These are implemented in libraries such as FAIRScale [15], DeepSpeed [35] and HuggingFace [64], and require significant engineering effort.
|
| 127 |
+
|
| 128 |
+
The Fast3R meta-architecture is explicitly designed to take advantage of these efforts. We leverage two different forms of parallelism at training and inference time, as well as FlashAttention, described in more detail in Sec. 4. More broadly, we believe that our approach will continue to benefit in the longer term as transformer-based scaling infrastructure continues to mature.
|
| 129 |
+
|
| 130 |
+
# 4. Experiments
|
| 131 |
+
|
| 132 |
+
Training Data We train on a mix of real-world object-centric and scene scan data: CO3D [39], ScanNet++ [68], ARKitScenes [4], Habitat [43], BlendedMVS [67], and MegaDepth [26]. This is a subset of the datasets in DUSt3R, specifically 6 of the 9 datasets.
|
| 133 |
+
|
| 134 |
+
Baselines DUSt3R [61] is the closest approach to ours, and competitive on visual odometry and reconstruction benchmarks. That paper contains extensive comparisons against other methods, and we adopt it as our main baseline. We additionally consider DUSt3R's follow-up work, MASt3R [24], as well as a concurrent work Spann3R [57], which also seeks to replace DUSt3R's expensive global alignment stage by sequentially processing frames with an external spatial memory. For camera pose estimation and
|
| 135 |
+
|
| 136 |
+
3D reconstruction, we include comparisons to task-specific methods.
|
| 137 |
+
|
| 138 |
+
Architecture Details In our experiments, we use the following components for the meta-architecture:
|
| 139 |
+
|
| 140 |
+
1. The Image Encoder uses a ViT-Large [63] architecture, initialized with DUSt3R pretrained weights [61]. The ViT-L uses 16x16 patch size, and has 24 layers, 16 heads, embedding dimension size 1024, and MLP ratio 4.0.
|
| 141 |
+
2. The Fusion Transformer is a ViT-Large model initialized from scratch. We use a pool size of $N' = 1000$ for image index embeddings.
|
| 142 |
+
3. The Pointmap Decoding Heads include two heads: global and local, both of which are DPT heads following [37]. We initialize the global head with DUSt3R pretrained weights, and initialize the local head from scratch.
|
| 143 |
+
|
| 144 |
+
Training Details Our models are trained on images with 512 resolution (512 on the longest side) using AdamW [29] for 174K steps, with a learning rate of 0.0001 and cosine annealing schedule. Unlike DUS3R, we do not use staged training. We implement multi-view dataloaders that can load N images in each sample. We train with batch size 128, with each sample consisting of a tuple of $N = 20$ views. This process takes 6.13 days on 128 Nvidia A100-80GB GPUs. We additionally make use of several strategies to enable efficient training. First, we use the FlashAttention [7, 8] to improve time and memory efficiency. Even so, a naive implementation runs out of memory even with batch size 1 when $N > 16$ , so we use DeepSpeed ZeRO stage 2 training [35], whereby optimizer states, moment estimates, and gradients are partitioned across different machines. This enables us to train with up to $N = 28$ views per data sample at max, with a batch size of one per GPU.
|
| 145 |
+
|
| 146 |
+
# 4.1. Inference Efficiency
|
| 147 |
+
|
| 148 |
+
At inference time, we aim to handle $1000+$ views compared to 20 during training, which requires additional optimizations. We observe the memory bottleneck at inference is in the DPT heads producing the pointmaps: with 320 views on a single A100 GPU, over $60\%$ of VRAM is consumed by activations from the DPT heads, largely due to each needing to upsample 1024 tokens into a high-resolution $512 \times 512$ image. To address this, we implement a simple version of tensor parallelism, putting the model on GPU 0 and then copying the DPT heads to each of the $K - 1$ other GPUs. When processing a batch of $N \approx 1000$ images, we pass the entire batch through the ViT encoder and global fusion decoder, and then split the outputs across $K$ machines for parallel DPT head inference.
|
| 149 |
+
|
| 150 |
+
Table 2 shows the inference time and memory usage as we increase the number of views. Fast3R is able to process up to 1500 views in a single pass, whereas DUSt3R runs out of memory past 32. Fast3R also has a significantly faster
|
| 151 |
+
|
| 152 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="5">CO3Dv2 [39]</td><td>RealEstate10K [75]</td><td rowspan="2">FPS</td></tr><tr><td>RRA@15↑</td><td>RRA@5↑</td><td>RTA@15↑</td><td>RTA@5↑</td><td>mAA(30)↑</td><td>mAA(30)↑</td></tr><tr><td>Colmap+SG [9, 41]</td><td>36.1</td><td>24.4</td><td>27.3</td><td>17.2</td><td>25.3</td><td>45.2</td><td>0.056</td></tr><tr><td>PixSfM [28]</td><td>33.7</td><td>26.1</td><td>32.9</td><td>17.6</td><td>30.1</td><td>49.4</td><td>-</td></tr><tr><td>RelPose [71]</td><td>57.1</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>0.02</td></tr><tr><td>PosReg [59]</td><td>53.2</td><td>-</td><td>49.1</td><td>-</td><td>45.0</td><td>-</td><td>0.015</td></tr><tr><td>PoseDiff [59]</td><td>80.5</td><td>59.5</td><td>79.8</td><td>61.7</td><td>66.5</td><td>48.0</td><td>0.015</td></tr><tr><td>RelPose++ [27]</td><td>(85.5)</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>0.02</td></tr><tr><td>DUSt3R [60]</td><td>96.2</td><td>-</td><td>86.8</td><td>-</td><td>76.7</td><td>67.7</td><td>0.78</td></tr><tr><td>MASt3R [23]</td><td>94.6</td><td>93.2</td><td>91.9</td><td>86.2</td><td>81.8</td><td>76.4</td><td>0.23</td></tr><tr><td>Fast3R-no-outdoor (Ours)</td><td>99.7</td><td>97.4</td><td>87.1</td><td>76.1</td><td>82.5</td><td>-</td><td>251.1</td></tr><tr><td>Fast3R (Ours)</td><td>96.2</td><td>90.2</td><td>81.6</td><td>68.2</td><td>75.0</td><td>72.7</td><td>251.1</td></tr></table>
|
| 153 |
+
|
| 154 |
+
Table 1. Multi-view pose regression on the CO3D [39] and RealEstate10K [75] datasets. Parentheses denote methods that do not report results on the 10 views set; we report their best for comparison (8 views). Fast3R does not assume known camera intrinsics.
|
| 155 |
+
|
| 156 |
+
<table><tr><td rowspan="2"># Views</td><td colspan="2">Fast3R</td><td colspan="2">DUSt3R</td></tr><tr><td>Time (s)</td><td>Peak GPU Mem (GiB)</td><td>Time (s)</td><td>Peak GPU Mem (GiB)</td></tr><tr><td>2</td><td>0.065</td><td>3.84</td><td>0.092</td><td>3.52</td></tr><tr><td>8</td><td>0.122</td><td>6.33</td><td>8.386</td><td>24.59</td></tr><tr><td>32</td><td>0.509</td><td>13.25</td><td>129.0</td><td>67.61</td></tr><tr><td>48</td><td>0.84</td><td>20.8</td><td>OOM</td><td>OOM</td></tr><tr><td>320</td><td>15.938</td><td>41.90</td><td>OOM</td><td>OOM</td></tr><tr><td>800</td><td>89.569</td><td>55.97</td><td>OOM</td><td>OOM</td></tr><tr><td>1000</td><td>137.62</td><td>63.01</td><td>OOM</td><td>OOM</td></tr><tr><td>1500</td><td>308.85</td><td>78.59</td><td>OOM</td><td>OOM</td></tr></table>
|
| 157 |
+
|
| 158 |
+
Table 2. System performance metrics for different view counts on Fast3R and DUSt3R on a single A100. Time is measured in seconds (s), and memory is measured in gibibytes (GiB). Each view is $512 \times 384$ in resolution. For DUSt3R, at 48 views the $N^2$ pairwise reconstructions eventually consume all VRAM at its global alignment stage. Note that Fast3R's reported fastest FPS of 251.1 uses 108 views in $224 \times 224$ resolution.
|
| 159 |
+
|
| 160 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">FPS</td><td colspan="2">7 scenes [47]</td><td colspan="2">NRGBD [3]</td></tr><tr><td>Acc↓</td><td>Comp↓</td><td>Acc↓</td><td>Comp↓</td></tr><tr><td>F-Recon [65]</td><td><0.1</td><td>7.62</td><td>2.31</td><td>20.59</td><td>6.31</td></tr><tr><td>DUSt3R†[61]</td><td>0.78</td><td>1.23</td><td>0.91</td><td>2.51</td><td>1.03</td></tr><tr><td>Spann3R [57]</td><td>65.4</td><td>1.48</td><td>0.85</td><td>3.15</td><td>1.10</td></tr><tr><td>Fast3R (Ours)</td><td>251.1</td><td>1.58</td><td>0.93</td><td>3.40</td><td>1.01</td></tr></table>
|
| 161 |
+
|
| 162 |
+
inference time, with gains that increase with more views.
|
| 163 |
+
|
| 164 |
+
# 4.2. Camera Pose Estimation
|
| 165 |
+
|
| 166 |
+
We evaluate camera pose estimation on unseen trajectories from 41 object categories from CO3D [39]. Following [61], we sample 10 random views from each trajectory.
|
| 167 |
+
|
| 168 |
+
Inspired by DUSt3R [61], we estimate the focal length, camera rotation, and camera translation from the predicted
|
| 169 |
+
|
| 170 |
+
Table 3. Quantitative reconstruction results on scene datasets: The numbers indicate median distance to GT points on 7-Scenes [47] and NRGBD [3] datasets. These datasets contain video trajectories of 500-1500 frames, and we evaluate using the same frame skip as other baselines. For 7-Scenes this is skip=20, and NRGBD uses skip=40. DUSt3R† indicates using DUSt3R's final weights on $224 \times 224$ images, to fit within GPU memory. Distances are scaled $100 \times$ to remove the leading 0.00.
|
| 171 |
+
|
| 172 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Views</td><td>Acc↓</td><td>Comp↓</td></tr><tr><td>Med.</td><td>Med.</td></tr><tr><td>DUSt3R [61]</td><td>all/5</td><td>1.159</td><td>0.914</td></tr><tr><td>DUSt3R† [61]</td><td>all/5</td><td>1.297</td><td>1.002</td></tr><tr><td>Spann3R [57]</td><td>all/5</td><td>2.268</td><td>1.295</td></tr><tr><td>Fast3R (Ours)</td><td>all/5</td><td>1.706</td><td>0.857</td></tr></table>
|
| 173 |
+
|
| 174 |
+
Table 4. Quantitative results on object-centric DTU [1] dataset. Using a skip=5 on trajectories of 49 frames.
|
| 175 |
+
|
| 176 |
+
global pointmaps. We begin by initializing a set of random focal length guesses based on the image resolution, then use RANSAC-PnP to estimate the camera's rotation and translation based on the guessed focal lengths and the global pointmap. The count of outliers from RANSAC-PnP is used to score each guessed focal length (lower is better), and the best-scoring focal length is selected to compute the intrinsic and extrinsic camera matrices.
|
| 177 |
+
|
| 178 |
+
During RANSAC-PnP, we only use points with the top $15\%$ confidence scores predicted by Fast3R, ensuring efficient PnP processing and reducing outliers. If all images are known to originate from the same physical camera, we use the focal length estimated from the first view as the focal length for all cameras, as this initial estimate has been empirically found to be more reliable. Otherwise, we independently estimate the focal length for each input. It is worth noting that the camera pose estimation process is parallelized using multi-threading, ensuring minimal wall-clock time. Even for hundreds of views, the process completes in just a few seconds on standard CPUs.
|
| 179 |
+
|
| 180 |
+
We report Relative Rotation Accuracy (RRA) and Relative Translation Accuracy (RTA) at a threshold of $15^{\circ}$ , mean Average Accuracy (mAA) at threshold $30^{\circ}$ , and model frames per second (FPS) in Table 1. On CO3D, Fast3R surpasses all other methods across the RRA and mAA metrics, achieving near-perfect RRA, while remaining competitive on RTA. Importantly, it is also orders of magnitude faster: $320\times$ faster than DUSt3R and $1000\times$ faster than MASt3R. Fast3R-no-outdoor is the same Fast3R model but trained without the BlendedMVS and MegaDepth datasets. We find these datasets help the model generalize to more di
|
| 181 |
+
|
| 182 |
+

|
| 183 |
+
Figure 4. Pose accuracy with more views: Fast3R improves with the context from more views. Fast3R saturates the orientation portion of the benchmark, even using 3-5 views.
|
| 184 |
+
|
| 185 |
+

|
| 186 |
+
Figure 5. DTU reconstruction quality vs. test number of views. Accuracy and Completion (lower is better) get better as we inference with more views.
|
| 187 |
+
|
| 188 |
+

|
| 189 |
+
Figure 6. Increasing # views during training: camera pose estimation on CO3D. Estimates of both orientation (RRA@5) and translation (RTA@5) improve with more views.
|
| 190 |
+
|
| 191 |
+
verse scenes (e.g., drone shots and large outdoor landmarks) but slightly hurt pose estimation performance on CO3D.
|
| 192 |
+
|
| 193 |
+
Figure 4 and Figure 5 shows that Fast3R's predictions improve with more views, indicating that the model is able to use the additional context from multiple images.
|
| 194 |
+
|
| 195 |
+
# 4.3. 3D Reconstruction
|
| 196 |
+
|
| 197 |
+
We evaluate Fast3R's 3D reconstruction on scene-level benchmarks: 7-Scenes [47] and Neural RGB-D [3], and the object-level benchmark DTU [1].
|
| 198 |
+
|
| 199 |
+
We found that local pointmap head learns more accurate pointmaps than the global pointmap head (ablation in Sec. 5.4). Therefore we use the local pointmaps for detail and the global pointmaps for the high-level structure. Specifically, we independently align each image's local pointmap to the global pointmap using ICP and use aligned local pointmaps for evaluation.
|
| 200 |
+
|
| 201 |
+
Fast3R is competitive with other pointmap reconstruction methods like DUSt3r and MASt3R, while being significantly faster, as shown in Table 3 and Table 4. We believe that Fast3R will continue to improve with better reconstruction data, more compute, and better training recipes. We show supportive scaling experiments in Figure 5.1.
|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
Figure 7. Increasing # views during training: reconstruction on 7scenes and NRGBD. Accuracy and Completion (lower is better) get better as we train with more views. Normal Consistency (high is better) also gets better as we train with more views.
|
| 205 |
+
|
| 206 |
+
# 5. Ablation Studies
|
| 207 |
+
|
| 208 |
+
# 5.1. Scaling the number of views
|
| 209 |
+
|
| 210 |
+
Fast3R is able to use all-to-all attention during training, which lets it learn from the global context. We hypothesize that the additional context provided by more views during training allows the model to learn higher-order correspondences between multiple frames, ultimately increasing model performance and increasing potential for scaling.
|
| 211 |
+
|
| 212 |
+
Figures 6 and 7 show that training on increasingly more views consistently improves RRA and RTA for visual odometry, and reconstruction accuracy—even when the number of views used during evaluation is held constant and the model is ultimately evaluated on fewer views than were seen during training. We further evaluate the model's ability to reason about additional views by increasing the number of images that Fast3R sees during inference. Figure 4 and Figure 5 indicate that as the model uses more views, the average per-view performance improves. This behavior holds for all evaluated metrics in both camera pose estimation and reconstruction. As shown in Figure 5, the model has a better per-view accuracy using 50 images than it does with 20, even though it was trained with 20. Many applications (e.g. reconstruction, odometry) require inference on many views, which is a major motivation for Fast3R removing the pairwise constraint.
|
| 213 |
+
|
| 214 |
+
# 5.2. Model scaling and data scaling
|
| 215 |
+
|
| 216 |
+
The Transformer architecture's scalability is a key advantage of Fast3R. Although full experiments are in the Appendix A and B, our results show that increasing model size and data consistently boosts performance, promising better outcomes with more computational investment.
|
| 217 |
+
|
| 218 |
+
# 5.3. Training without position interpolation
|
| 219 |
+
|
| 220 |
+
In Section 3.3, we introduced a randomized version of [5] to enable inference on more views than seen training. Without this technique, model accuracy quickly degrades for pointmap corresponding to image indexes outside the training range, as shown in Figure 8 (top). A version of
|
| 221 |
+
|
| 222 |
+

|
| 223 |
+
Figure 8. Effect of sampling image index PE during training. If we train the model without sampling index embeddings, regression loss spikes (orange) when testing with more views than seen at training (top). Our embedding strategy performs comparably even with $6 \times$ the number of views during training.
|
| 224 |
+
|
| 225 |
+
<table><tr><td rowspan="2">Pointmap Type</td><td colspan="2">7-Scenes [47]</td><td colspan="2">NRGBD [3]</td><td colspan="2">DTU</td></tr><tr><td>Acc↓</td><td>Comp↓</td><td>Acc↓</td><td>Comp↓</td><td>Acc↓</td><td>Comp↓</td></tr><tr><td>local aligned to global</td><td>2.84</td><td>1.37</td><td>4.39</td><td>1.28</td><td>3.91</td><td>1.41</td></tr><tr><td>global</td><td>4.81</td><td>1.64</td><td>4.85</td><td>1.32</td><td>3.88</td><td>1.41</td></tr><tr><td>Δ</td><td>+1.97</td><td>+0.27</td><td>+0.46</td><td>+0.04</td><td>-0.03</td><td>0.00</td></tr></table>
|
| 226 |
+
|
| 227 |
+
Table 5. Ablation on the effect of local head on 3D reconstruction. Red/green indicate an increase/decrease in error compared to using the local pointmap aligned to the global pointmap.
|
| 228 |
+
|
| 229 |
+
Fast3R trained on $N = 4$ views still produces high-quality pointmaps for views in slot 5 to 24 (Figure 8 bottom).
|
| 230 |
+
|
| 231 |
+
# 5.4. Inference without local head
|
| 232 |
+
|
| 233 |
+
In Table 5, we perform an ablation experiment on the effect of using local vs. global pointmaps for 3D reconstruction (also see the visualization in Figure 14 in appendix). Specifically, we compare doing 3D reconstruction directly with the predicted global pointmaps to reconstruction with the predicted local pointmaps aligned to the global coordinate system using ICP (Sec. 4.3). Qualitative and quantitative results show that the local head produces more accurate pointmaps (fewer floaters, less smearing, less distortion) compared to the global head. We attribute this behavior to the local head being more invariant during training than the global head. That is, the local head is supervised such that the 3D XYZ locations of pixels in an image do not change, no matter which view is selected as the anchor view $I_{1}$ ; whereas for the global head, the XYZ locations for pixels in a view is dependent on which view is selected as the anchor view. Conceptually, the global head needs to learn both 2D-to-3D geometry and the rigid transformation of 3D points in the global coordinate system; whereas the local head only needs to learn 2D-to-3D geometry.
|
| 234 |
+
|
| 235 |
+
# 6. Conclusion
|
| 236 |
+
|
| 237 |
+
We introduce Fast3R, a transformer that predicts 3D locations for all pixels in a common frame of reference, di
|
| 238 |
+
|
| 239 |
+
rectly in a single forward pass. By replacing the whole SfM pipeline with a generic architecture trained end-to-end, Fast3R and similar approaches should benefit from the usual scaling rules for transformers: consistent improvement with better data and increased parameters. Since Fast3R uses global attention, it avoids two potentially artificial scaling limits due to bottlenecks in existing systems. First, the bottleneck of image pair reconstruction restricts the information available to the model. Second, pairwise global optimization can only make up for this so much and does not improve with more data.
|
| 240 |
+
|
| 241 |
+
With our efficient implementation, Fast3R can operate at $>250$ FPS, and process 1500 images in one forward pass, far exceeding other methods while achieving competitive results on 3D reconstruction and camera pose estimation benchmarks. We demonstrate that Fast3R can be finetuned to reconstruct videos by changing the data and without modifying the pointmap regression objective and architecture. In contrast with pipeline approaches bottlenecked by custom and slow operations, Fast3R inherits the benefits of future engineering improvements to efficiently serve and train large transformer-based models. For example, packages like Deepspeed-Inference [38], FlashAttention [7, 8] provide fused kernels, model parallelism, and data parallelism. These speed up inference and reduce memory requirements, allowing more images per device, and the number of images scales with the number of devices.
|
| 242 |
+
|
| 243 |
+
Limitations: A current limiting factor for scaling may be data accuracy and quantity. Synthetic data [34, 40] could be a solution as, broadly speaking, models trained for geometry estimation seem to generalize well from simulation data. Fast3R can successfully use simulated data to train for 4D reconstruction, showing generalization results on DAVIS. Similarly, DepthAnythingV2 [66] showed the potential of this approach to scale for monocular depth estimation.
|
| 244 |
+
|
| 245 |
+
The architecture of Fast3R allows for parallel processing of many views, and its positional embedding design enables "train short, test long" in terms of context length of views. However, we observed that for scenes with very large reconstruction areas, when the number of views becomes extreme (e.g., more than 300), the point map of some views (particularly those with a low confidence score) begins to exhibit drifting behavior. One current way to address this issue is to drop frames with low confidence scores. In dense reconstruction, this approach typically does not hurt reconstruction quality too much. However, to fundamentally address this problem, we hypothesize that future work could explore the following avenues: (1) incorporating more data containing large scenes to improve generalization to such cases; (2) designing better positional embeddings inspired by state-of-the-art long-context language models [76], which can handle very long context lengths and exploit the temporal structure of ordered image sequences (e.g., video).
|
| 246 |
+
|
| 247 |
+
# References
|
| 248 |
+
|
| 249 |
+
[1] Henrik Aanaes, Rasmus Ramsbøl Jensen, George Vogiatzis, Engin Tola, and Anders Bjorholm Dahl. Large-scale data for multiple-view stereopsis. International Journal of Computer Vision, 120:153-168, 2016. 6, 7
|
| 250 |
+
[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report, 2024. 5
|
| 251 |
+
[3] Dejan Azinović, Ricardo Martin-Brualla, Dan B Goldman, Matthias Nießner, and Justus Thies. Neural rgb-d surface reconstruction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6290-6301, 2022. 6, 7, 8
|
| 252 |
+
[4] Gilad Baruch, Zhuoyuan Chen, Afshin Dehghan, Tal Dimry, Yuri Feigin, Peter Fu, Thomas Gebauer, Brandon Joffe, Daniel Kurz, Arik Schwartz, and Elad Shulman. Arkitscenes: A diverse real-world dataset for 3d indoor scene understanding using mobile rgb-d data. arXiv preprint arXiv:2111.08897, 2021. 2, 4, 5
|
| 253 |
+
[5] Shouyuan Chen, Sherman Wong, Liangjian Chen, and Yuandong Tian. Extending context window of large language models via positional interpolation. In Proceedings of the International Conference on Learning Representations (ICLR), 2024. 4, 5, 7
|
| 254 |
+
[6] Angela Dai, Angel X. Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proc. Computer Vision and Pattern Recognition (CVPR), IEEE, 2017. 2
|
| 255 |
+
[7] Tri Dao. FlashAttention-2: Faster attention with better parallelism and work partitioning. In International Conference on Learning Representations (ICLR), 2024. 5, 8
|
| 256 |
+
[8] Tri Dao, Daniel Y. Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. FlashAttention: Fast and memory-efficient exact attention with IO-awareness. In Advances in Neural Information Processing Systems (NeurIPS), 2022. 5, 8
|
| 257 |
+
[9] Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superpoint: Self-supervised interest point detection and description. 2018 IEEE/CVF Conference on Computer Vision and Pattern Recognition Workshops (CVPRW), pages 337-33712, 2017. 6
|
| 258 |
+
[10] Carl Doersch, Ankush Gupta, Larisa Markeeva, Adrià Recasens, Lucas Smaira, Yusuf Aytar, João Carreira, Andrew Zisserman, and Yi Yang. Tap-vid: A benchmark for tracking any point in a video, 2022. 16
|
| 259 |
+
[11] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, Jakob Uszkoreit, and Neil Houlsby. An image is worth 16x16 words: Transformers for image recognition at scale. In International Conference on Learning Representations, 2021. 4, 12
|
| 260 |
+
[12] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models, 2024. 5
|
| 261 |
+
|
| 262 |
+
[13] Mihai Dusmanu, Ignacio Rocco, Tomás Pajdla, Marc Pollefeys, Josef Sivic, Akihiko Torii, and Torsten Sattler. D2-net: A trainable cnn for joint description and detection of local features. 2019 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 8084-8093, 2019. 2
|
| 263 |
+
[14] Ainaz Eftekhar, Alexander Sax, Jitendra Malik, and Amir Zamir. Omnidata: A scalable pipeline for making multitask mid-level vision datasets from 3d scans. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10786-10796, 2021. 3
|
| 264 |
+
[15] FairScale authors. Fairscale: A general purpose modular pytorch library for high performance and large scale training. https://github.com/facebookresearch/fairscale, 2021.5
|
| 265 |
+
[16] Zhiwen Fan, Wenyan Cong, Kairun Wen, Kevin Wang, Jian Zhang, Xinghao Ding, Danfei Xu, Boris Ivanovic, Marco Pavone, Georgios Pavlakos, Zhangyang Wang, and Yue Wang. Instantsplat: Unbounded sparse-view pose-free gaussian splatting in 40 seconds, 2024. 12
|
| 266 |
+
[17] Silvano Galliani, Katrin Lasinger, and Konrad Schindler. Massively parallel multiview stereopsis by surface normal diffusion. In Proceedings of the IEEE international conference on computer vision, pages 873-881, 2015. 1
|
| 267 |
+
[18] Pierre Gleize, Weiyao Wang, and Matt Feiszli. Silk: Simple learned keypoints. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 10932-10942, 2023. 2
|
| 268 |
+
[19] Kristen Grauman, Andrew Westbury, Lorenzo Torresani, Kris Kitani, Jitendra Malik, Triantafyllos Afouras, Kumar Ashutosh, Vijay Baiyya, Siddhant Bansal, Bikram Boote, et al. Ego-exo4d: Understanding skilled human activity from first-and third-person perspectives. In Proceedings of the IEEE conference on computer vision and pattern recognition, 2024. 2
|
| 269 |
+
[20] Richard Hartley and Andrew Zisserman. Multiple View Geometry in Computer Vision. Cambridge University Press, New York, NY, USA, 2 edition, 2003. 2
|
| 270 |
+
[21] Yanping Huang, Youlong Cheng, Ankur Bapna, Orhan Firat, Dehao Chen, Mia Chen, HyoukJoong Lee, Jiquan Ngiam, Quoc V Le, Yonghui Wu, and zhifeng Chen. Gpipe: Efficient training of giant neural networks using pipeline parallelism. In Advances in Neural Information Processing Systems. Curran Associates, Inc., 2019. 5
|
| 271 |
+
[22] Albert Q. Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, Gianna Lengyel, Guillaume Bour, Guillaume Lample, Lélio Renard Lavaud, Lucile Saulnier, Marie-Anne Lachaux, Pierre Stock, Sandeep Subramanian, Sophia Yang, Szymon Antoniak, Teven Le Scao, Théophile Gervet, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. Mixtral of experts, 2024. 5
|
| 272 |
+
[23] Vincent Leroy, Yohann Cabon, and Jérôme Revaud. Grounding image matching in 3d with mast3r. arXiv preprint arXiv:2406.09756, 2024. 6
|
| 273 |
+
[24] Vincent Leroy, Yohann Cabon, and Jerome Revaud. Grounding image matching in 3d with mast3r, 2024. 2, 5
|
| 274 |
+
|
| 275 |
+
[25] Shen Li, Yanli Zhao, Rohan Varma, Omkar Salpekar, Pieter Noordhuis, Teng Li, Adam Paszke, Jeff Smith, Brian Vaughan, Pritam Damania, and Soumith Chintala. Pytorch distributed: Experiences on accelerating data parallel training. CoRR, abs/2006.15704, 2020. 5
|
| 276 |
+
[26] Zhengqi Li and Noah Snavely. Megadepth: Learning single-view depth prediction from internet photos. In Computer Vision and Pattern Recognition (CVPR), 2018. 5
|
| 277 |
+
[27] Amy Lin, Jason Y Zhang, Deva Ramanan, and Shubham Tulsiani. Relpose++: Recovering 6d poses from sparse-view observations. arXiv preprint arXiv:2305.04926, 2023. 2, 6
|
| 278 |
+
[28] Philipp Lindenberger, Paul-Edouard Sarlin, Viktor Larsson, and Marc Pollefeys. Pixel-perfect structure-from-motion with featuremetric refinement. 2021 IEEE/CVF International Conference on Computer Vision (ICCV), pages 5967-5977, 2021. 6
|
| 279 |
+
[29] Ilya Loshchilov and Frank Hutter. Fixing weight decay regularization in adam. CoRR, abs/1711.05101, 2017. 5
|
| 280 |
+
[30] Raul Mur-Artal and Juan D. Tardos. ORB-SLAM2: an open-source SLAM system for monocular, stereo and RGB-D cameras. IEEE Transactions on Robotics, 33(5):1255-1262, 2017. 2
|
| 281 |
+
[31] Raul Mur-Artal, Jose Maria Martinez Montiel, and Juan D Tardos. Orb-slam: a versatile and accurate monocular slam system. IEEE transactions on robotics, 31(5):1147-1163, 2015. 1
|
| 282 |
+
[32] Deepak Narayanan, Mohammad Shoeybi, Jared Casper, Patrick LeGresley, Mostofa Patwary, Vijay Korthikanti, Dmitri Vainbrand, Prethvi Kashinkunti, Julie Bernauer, Bryan Catanzaro, Amar Phanishayee, and Matei Zaharia. Efficient large-scale language model training on GPU clusters. CoRR, abs/2104.04473, 2021. 5
|
| 283 |
+
[33] Maxime Oquab, Timothée Darcet, Theo Moutakanni, Huy V. Vo, Marc Szafraniec, Vasil Khalidov, Pierre Fernandez, Daniel Haziza, Francisco Massa, Alaaeldin El-Nouby, Russell Howes, Po-Yao Huang, Hu Xu, Vasu Sharma, Shang-Wen Li, Wojciech Galuba, Mike Rabbat, Mido Assran, Nicolas Ballas, Gabriel Synnaeve, Ishan Misra, Herve Jegou, Julien Mairal, Patrick Labatut, Armand Joulin, and Piotr Bojanowski. Dinov2: Learning robust visual features without supervision, 2023. 4
|
| 284 |
+
[34] Alexander Raistrick, Lahav Lipson, Zeyu Ma, Lingjie Mei, Mingzhe Wang, Yiming Zuo, Karhan Kayan, Hongyu Wen, Beining Han, Yihan Wang, Alejandro Newell, Hei Law, Ankit Goyal, Kaiyu Yang, and Jia Deng. Infinite photorealistic worlds using procedural generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12630-12641, 2023. 8
|
| 285 |
+
[35] Samyam Rajbhandari, Jeff Rasley, Olatunjri Ruwase, and Yuxiong He. Zero: Memory optimization towards training A trillion parameter models. CoRR, abs/1910.02054, 2019. 5
|
| 286 |
+
[36] René Ranftl, Katrin Lasinger, David Hafner, Konrad Schindler, and Vladlen Koltun. Towards robust monocular depth estimation: Mixing datasets for zero-shot cross-dataset transfer. In IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI), 2020. 3
|
| 287 |
+
|
| 288 |
+
[37] René Ranftl, Alexey Bochkovskiy, and Vladlen Koltun. Vision transformers for dense prediction. arXiv preprint arXiv:2103.13413, 2021. 4, 5
|
| 289 |
+
[38] Jeff Rasley, Samyam Rajbhandari, Olatunj Ruwase, and Yuxiong He. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In Proceedings of the 26th ACM SIGKDD International Conference on Knowledge Discovery & Data Mining, pages 3505-3506, 2020. 8
|
| 290 |
+
[39] Jeremy Reizenstein, Roman Shapovalov, Philipp Henzler, Luca Sbordone, Patrick Labatut, and David Novotny. Common objects in 3d: Large-scale learning and evaluation of real-life 3d category reconstruction. In Proceedings of the IEEE/CVF international conference on computer vision, pages 10901-10911, 2021. 2, 5, 6
|
| 291 |
+
[40] Mike Roberts, Jason Ramapuram, Anurag Ranjan, Atulit Kumar, Miguel Angel Bautista, Nathan Paczan, Russ Webb, and Joshua M. Susskind. Hypersim: A photorealistic synthetic dataset for holistic indoor scene understanding. In International Conference on Computer Vision (ICCV) 2021, 2021. 8
|
| 292 |
+
[41] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. Superglue: Learning feature matching with graph neural networks. 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 4937–4946, 2019. 6
|
| 293 |
+
[42] Paul-Edouard Sarlin, Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. SuperGlue: Learning feature matching with graph neural networks. In CVPR, 2020. 2
|
| 294 |
+
[43] Manolis Savva, Abhishek Kadian, Oleksandr Maksymets, Yili Zhao, Erik Wijmans, Bhavana Jain, Julian Straub, Jia Liu, Vladlen Koltun, Jitendra Malik, Devi Parikh, and Dhruv Batra. Habitat: A platform for embodied AI research. CoRR, abs/1904.01201, 2019. 5
|
| 295 |
+
[44] Johannes Lutz Schonberger and Jan-Michael Frahm. Structure-from-motion revisited. In Conference on Computer Vision and Pattern Recognition (CVPR), 2016. 1, 2
|
| 296 |
+
[45] Noam Shazeer, Youlong Cheng, Niki Parmar, Dustin Tran, Ashish Vaswani, Penporn Koanantakool, Peter Hawkins, HyoukJoong Lee, Mingsheng Hong, Cliff Young, Ryan Sepassi, and Blake A. Hechtman. Mesh-tensorflow: Deep learning for supercomputers. CoRR, abs/1811.02084, 2018. 5
|
| 297 |
+
[46] Mohammad Shoeybi, Mostofa Patwary, Raul Puri, Patrick LeGresley, Jared Casper, and Bryan Catanzaro. Megatronlm: Training multi-billion parameter language models using model parallelism. CoRR, abs/1909.08053, 2019. 5
|
| 298 |
+
[47] Jamie Shotton, Ben Glocker, Christopher Zach, Shahram Izadi, Antonio Criminisi, and Andrew Fitzgibbon. Scene coordinate regression forests for camera relocalization in rgb-d images. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2930-2937, 2013. 6, 7, 8
|
| 299 |
+
[48] Cameron Smith, David Charatan, Ayush Kumar Tewari, and Vincent Sitzmann. Flowmap: High-quality camera poses, intrinsics, and depth via gradient descent. ArXiv, abs/2404.15259, 2024. 2
|
| 300 |
+
[49] Matthew Tancik, Pratul P. Srinivasan, Ben Mildenhall, Sara Fridovich-Keil, Nithin Raghavan, Utkarsh Singhal, Ravi Ra
|
| 301 |
+
|
| 302 |
+
mamoorthi, Jonathan T. Barron, and Ren Ng. Fourier features let networks learn high frequency functions in low dimensional domains. In Advances in Neural Information Processing Systems, pages 7537-7547, 2020. 4
|
| 303 |
+
[50] Hao Tang, Weiyao Wang, and Matt Feiszli. Aden: Adaptive density representations for sparse-view camera pose estimation. arXiv preprint arXiv:2408.09042, 2024. 2
|
| 304 |
+
[51] Zachary Teed and Jia Deng. Droid-slam: Deep visual slam for monocular, stereo, and rgb-d cameras. In Neural Information Processing Systems, 2021. 2
|
| 305 |
+
[52] Zachary Teed and Jia Deng. DROID-SLAM: Deep Visual SLAM for Monocular, Stereo, and RGB-D Cameras. Advances in neural information processing systems, 2021. 2
|
| 306 |
+
[53] Sebastian Thrun. Probabilistic robotics. Communications of the ACM, 45(3):52-57, 2002. 1
|
| 307 |
+
[54] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models, 2023. 5
|
| 308 |
+
[55] Vadim Tschernezki, Ahmad Darkhalil, Zhifan Zhu, David Fouhey, Iro Larina, Diane Larlus, Dima Damen, and Andrea Vedaldi. EPIC Fields: Marrying 3D Geometry and Video Understanding. In Proceedings of the Neural Information Processing Systems (NeurIPS), 2023. 2
|
| 309 |
+
[56] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in Neural Information Processing Systems, 2017. 2
|
| 310 |
+
[57] Hengyi Wang and Lourdes Agapito. 3d reconstruction with spatial memory. arXiv preprint arXiv:2408.16061, 2024. 3, 5, 6
|
| 311 |
+
[58] Jianyuan Wang, Nikita Karaev, Christian Rupprecht, and David Novotny. Vggsfm: Visual geometry grounded deep structure from motion. 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 21686-21697, 2023. 2
|
| 312 |
+
[59] Jianyuan Wang, C. Rupprecht, and David Novotny. Posediffusion: Solving pose estimation via diffusion-aided bundle adjustment. 2023 IEEE/CVF International Conference on Computer Vision (ICCV), pages 9739-9749, 2023. 2, 6
|
| 313 |
+
[60] Shuzhe Wang, Vincent Leroy, Yohann Cabon, Boris Chidlovskii, and Jérôme Revaud. Dust3r: Geometric 3d vision made easy. 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 20697-20709, 2023. 6
|
| 314 |
+
[61] Shuzhe Wang, Vincent Leroy, Yohann Cabon, Boris Chidlovskii, and Jerome Revaud. Dust3r: Geometric 3d vision made easy. In CVPR, 2024. 1, 2, 3, 4, 5, 6
|
| 315 |
+
[62] Wenshan Wang, Delong Zhu, Xiangwei Wang, Yaoyu Hu, Yuheng Qiu, Chen Wang, Yafei Hu, Ashish Kapoor, and Sebastian Scherer. Tartanair: A dataset to push the limits of visual slam. In 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), 2020. 12
|
| 316 |
+
[63] Philippe Weinzaepfel, Vincent Leroy, Thomas Lucas, Romain Brégier, Yohann Cabon, Vaibhav Arora, Leonid Antsfeld, Boris Chidlovskii, Gabriela Csurka, and Jérôme Revaud. Croco: Self-supervised pre-training for 3d vision tasks
|
| 317 |
+
|
| 318 |
+
by cross-view completion. Advances in Neural Information Processing Systems, 35:3502-3516, 2022. 4, 5
|
| 319 |
+
[64] Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chau-mond, Clement Delangue, Anthony Moi, Pierric Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, and Jamie Brew. Huggingface's transformers: State-of-the-art natural language processing. CoRR, abs/1910.03771, 2019. 5
|
| 320 |
+
[65] Guangkai Xu, Wei Yin, Hao Chen, Chunhua Shen, Kai Cheng, and Feng Zhao. Frozen recon: Pose-free 3d scene reconstruction with frozen depth models. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9310-9320, 2023. 6
|
| 321 |
+
[66] Lihe Yang, Bingyi Kang, Zilong Huang, Xiaogang Xu, Jiashi Feng, and Hengshuang Zhao. Depth anything: Unleashing the power of large-scale unlabeled data. In CVPR, 2024. 3, 4, 8
|
| 322 |
+
[67] Yao Yao, Zixin Luo, Shiwei Li, Jingyang Zhang, Yufan Ren, Lei Zhou, Tian Fang, and Long Quan. Blendedmvs: A large-scale dataset for generalized multi-view stereo networks. Computer Vision and Pattern Recognition (CVPR), 2020. 5
|
| 323 |
+
[68] Chandan Yeshwanth, Yueh-Cheng Liu, Matthias Nießner, and Angela Dai. Scannet++: A high-fidelity dataset of 3d indoor scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 12-22, 2023. 2, 4, 5
|
| 324 |
+
[69] Kwang Moo Yi, Eduard Trulls, Vincent Lepetit, and Pascal V. Fua. Lift: Learned invariant feature transform. In European Conference on Computer Vision, 2016. 2
|
| 325 |
+
[70] Junyi Zhang, Charles Herrmann, Junhwa Hur, Varun Jampani, Trevor Darrell, Forrester Cole, Deqing Sun, and Ming-Hsuan Yang. Monst3r: A simple approach for estimating geometry in the presence of motion. arXiv preprint arxiv:2410.03825, 2024. 2, 12
|
| 326 |
+
[71] Jason Y. Zhang, Deva Ramanan, and Shubham Tulsiani. Relpose: Predicting probabilistic relative rotation for single objects in the wild. ArXiv, abs/2208.05963, 2022. 6
|
| 327 |
+
[72] Jason Y. Zhang, Amy Lin, Moneish Kumar, Tzu-Hsuan Yang, Deva Ramanan, and Shubham Tulsiani. Cameras as rays: Pose estimation via ray diffusion. ArXiv, abs/2402.14817, 2024. 2
|
| 328 |
+
[73] Wang Zhao, Shaohui Liu, Hengkai Guo, Wenping Wang, and Y. Liu. *Particlesfm: Exploiting dense point trajectories for localizing moving cameras in the wild*. In European Conference on Computer Vision, 2022. 2
|
| 329 |
+
[74] Yang Zheng, Adam W Harley, Bokui Shen, Gordon Wetzstein, and Leonidas J Guibas. Pointodyssey: A large-scale synthetic dataset for long-term point tracking. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 19855-19865, 2023. 12
|
| 330 |
+
[75] Tinghui Zhou, Richard Tucker, John Flynn, Graham Fyffe, and Noah Snavely. Stereo magnification: learning view synthesis using multiplane images. ACM Trans. Graph., 37(4), 2018. 6
|
| 331 |
+
[76] Dawei Zhu, Nan Yang, Liang Wang, Yifan Song, Wenhao Wu, Furu Wei, and Sujian Li. Pose: Efficient context window extension of llms via positional skip-wise training. In The Twelfth International Conference on Learning Representations. 8
|
| 332 |
+
|
| 333 |
+
# Fast3R: Towards 3D Reconstruction of $1000+$ Images in One Forward Pass Supplementary Material
|
| 334 |
+
|
| 335 |
+
# A. Model Scaling Effect
|
| 336 |
+
|
| 337 |
+
We investigate the effect of scaling model size by trying three model sizes for the Fusion Transformer: ViT-base, ViT-large, and ViT-huge, according to the settings in the original ViT paper [11]. The results are shown in Figure 9. This experiment demonstrates that larger model size continually benefits 3D tasks including camera pose estimations and 3D reconstruction. Note that the Fusion Transformer size used in the main text for all experiments is a ViT-base.
|
| 338 |
+
|
| 339 |
+
# B. Data Scaling Effect
|
| 340 |
+
|
| 341 |
+
We study the effect of scaling the data using 4 different scales of data, $12.5\%$ , $25\%$ , $50\%$ , and $100\%$ , to train the model. The results are shown in Figure 10. The training settings for all models are kept the same except for how much data they have access to. The results demonstrate that Fast3R continually benefits from more data, suggesting Fast3R could achieve better results in the future given more data.
|
| 342 |
+
|
| 343 |
+
# C. Gaussian Splitting
|
| 344 |
+
|
| 345 |
+
We qualitatively demonstrate the potential of using Fast3R's output for downstream novel view synthesis tasks. A visualization of the Gaussian Splatting generated by adopting the pipeline of InstantSplat [16] is shown in Figure 11.
|
| 346 |
+
|
| 347 |
+
# D. Bundle Adjustment (via Gaussian Splatting)
|
| 348 |
+
|
| 349 |
+
While not necessary, using bundle adjustment at inference time can also improve Fast3R's performance. We show an example of bundle adjustment using Gaussian Splitting (GS-BA).
|
| 350 |
+
|
| 351 |
+
Specifically, we use InstantSplat [16] to optimize a set of Gaussians per scene, using initializations from a point cloud, and update the locations and poses in order to minimize reprojection error. We show an example of the Gaussian reconstruction in Figure 11 shows an example reconstruction on CO3D.
|
| 352 |
+
|
| 353 |
+
We can compare against ground-truth trajectories from COLMAP. We found that GS-BA significantly reduces both the pose and translation error. Table 6 quantifies this, showing over a $2.5\mathrm{x}$ reduction in translation error and a $4\mathrm{x}$ reduction in rotational error on the "Family" scene from Tanks and Temples, which we found to be representative. We show a visualization of the original reconstruction and the poses pre- and post-bundle-adjustment. There are only 8 scenes in the evaluation set in InstantSplat.
|
| 354 |
+
|
| 355 |
+
<table><tr><td>Method</td><td>RPE Rotation (↓)</td><td>RPE Translation (↓)</td></tr><tr><td>Fast3R</td><td>27.9</td><td>7.64</td></tr><tr><td>Fast3R w/ GS-BA</td><td>11.0</td><td>1.80</td></tr></table>
|
| 356 |
+
|
| 357 |
+
Table 6. Pose estimation can further improve with Bundle Adjustment. We show an example on the "Family" scene from Tanks and Temples, using InstantSplat [16].
|
| 358 |
+
|
| 359 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="2">ScanNet</td><td colspan="2">ETH3D</td><td colspan="2">DTU</td><td colspan="2">T&T</td></tr><tr><td>rel ↓</td><td>τ ↑</td><td>rel ↓</td><td>τ ↑</td><td>rel ↓</td><td>τ ↑</td><td>rel ↓</td><td>τ ↑</td></tr><tr><td>COLMAP-DENSE</td><td>38.0</td><td>22.5</td><td>89.8</td><td>23.2</td><td>20.8</td><td>69.3</td><td>25.7</td><td>76.4</td></tr><tr><td>DUSt3R 224</td><td>5.86</td><td>50.84</td><td>4.71</td><td>61.74</td><td>2.76</td><td>77.32</td><td>5.54</td><td>56.38</td></tr><tr><td>DUSt3R 512</td><td>4.93</td><td>60.20</td><td>2.91</td><td>76.91</td><td>3.52</td><td>69.33</td><td>3.17</td><td>76.68</td></tr><tr><td>Fast3R</td><td>6.27</td><td>50.27</td><td>4.68</td><td>62.68</td><td>3.92</td><td>62.60</td><td>4.43</td><td>63.95</td></tr></table>
|
| 360 |
+
|
| 361 |
+
Table 7. Multi-view depth evaluation. DUSt3R and Fast3R perform on par, while significantly outperforming COLMAP-DENSE.
|
| 362 |
+
|
| 363 |
+
# E. Multi-view Depth Evaluation
|
| 364 |
+
|
| 365 |
+
We compare Fast3R (using the local pointmap prediction) with DUSt3R and COLMAP on multi-view depth estimation tasks and show results in Table 7.
|
| 366 |
+
|
| 367 |
+
# F. More Visualizations
|
| 368 |
+
|
| 369 |
+
We show more visualizations of Fast3R's performance on indoor scenes in Figure 15. Fast3R learns the regularity of indoor rooms (square-like shapes) and demonstrates "loop closure" capabilities.
|
| 370 |
+
|
| 371 |
+
# F.1. 4D Reconstruction: Qualitative Results
|
| 372 |
+
|
| 373 |
+
Because Fast3R can handle multiple frames naturally, one may wonder how well Fast3R can handle dynamic scenes. We qualitatively test Fast3R's 4D reconstruction ability, showing examples of dynamic aligned pointmaps at multiple time steps in Figure 16. Fast3R can be trained to achieve such results by finetuning a 16 static views checkpoint on the PointOdyssey [74] and TartanAir [62] datasets, consisting of 110 dynamic and 150 static scenes, respectively. We freeze the ViT encoder, use $224 \times 224$ image resolution, and swap in a newly-initialized global DPT head. We fine-tune the model with 15 epochs with a frame length of 16, batch size per GPU of 1, and use the same learning rate schedule as Fast3R. The process takes 45 hours to finetune on 2 Nvidia Quadro RTX A6000 GPUs.
|
| 374 |
+
|
| 375 |
+
We see that our approach produces qualitatively reasonable reconstructions with minimal changes. MonST3R [70] is a concurrent work also tackling dynamic scene recon
|
| 376 |
+
|
| 377 |
+

|
| 378 |
+
Figure 9. Model scaling effect. Increasing the size of the Fusion Transformer leads to better camera pose estimation $(\uparrow)$ and 3D reconstruction $(\downarrow)$ . All models are trained for 60k steps (equivalent to 60 epochs; the main paper uses 100 epochs).
|
| 379 |
+
|
| 380 |
+

|
| 381 |
+
|
| 382 |
+

|
| 383 |
+
|
| 384 |
+

|
| 385 |
+
Figure 10. Data scaling effect. More training data leads to better camera pose estimation $(\uparrow)$ and 3D reconstruction $(\downarrow)$ . All models are trained for 60k steps (equivalent to 60 epochs; the main paper uses 100 epochs).
|
| 386 |
+
|
| 387 |
+

|
| 388 |
+
|
| 389 |
+

|
| 390 |
+
|
| 391 |
+

|
| 392 |
+
Figure 11. Visualization of Gaussians from unseen poses. The frames are ordered temporally along the direction of the arrows. The middle frames show poses very different from those used for reconstruction, as is evidenced by the large areas with no Gausians. The scene is fit from 7 images from CO3D.
|
| 393 |
+
|
| 394 |
+
struction that builds atop DUSt3R. However, like DUSt3R, it assumes a pairwise architecture and also uses a separate model to predict optical flow. We show that the same Fast3R architecture trained end-to-end with the same many-view pointmap regression (just swapping the data to dynamic scenes), can also work for 4D reconstruction. Importantly, our method remains significantly faster, opening the poten
|
| 395 |
+
|
| 396 |
+
tial for real-time applications.
|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
Fast3R Reconstruction
|
| 400 |
+
|
| 401 |
+

|
| 402 |
+
Original Poses
|
| 403 |
+
|
| 404 |
+

|
| 405 |
+
with GS-BA
|
| 406 |
+
Figure 12. Bundle adjustment further improves pose. Left: reconstruction from Fast3R. Middle: Original poses pre-GS-BA. Right: Poses after GS-BA.
|
| 407 |
+
|
| 408 |
+

|
| 409 |
+
Figure 13. Large-scale reconstruction: Spann3R vs. Fast3R on the Lighthouse scene from Tanks & Temples dataset.
|
| 410 |
+
|
| 411 |
+

|
| 412 |
+
|
| 413 |
+

|
| 414 |
+
Reconstruction using Global Point Map
|
| 415 |
+
|
| 416 |
+

|
| 417 |
+
Reconstruction using Local (aligned to Global) Point Map
|
| 418 |
+
|
| 419 |
+

|
| 420 |
+
Figure 14. Effect of using local vs. global pointmap. Global point maps provide good anchors for locations of points while local point maps use those anchors (by aligning using ICP on the anchor points to the global point map) to provide more accurate point locations. Best viewed when zoomed in.
|
| 421 |
+
Figure 15. Visualizations of results on NRGBD scenes. Fast3R learns the regularity of indoor rooms (square-like shapes) and demonstrates loop closure capabilities.
|
| 422 |
+
|
| 423 |
+

|
| 424 |
+
|
| 425 |
+

|
| 426 |
+
|
| 427 |
+

|
| 428 |
+
|
| 429 |
+

|
| 430 |
+
Figure 16. Qualitative 4D reconstruction results on unseen dynamic scenes in DAVIS. Results are obtained with one forward pass. The tracks are visualized using ground-truth track annotations from TAP-Vid-DAVIS [10].
|
| 431 |
+
|
| 432 |
+

|
| 433 |
+
|
| 434 |
+

|
2501.13xxx/2501.13928/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:79e54b907a89ce94a1f7cef25de56fdf8b7c1deb31beadc0cf00552a303320df
|
| 3 |
+
size 1268418
|
2501.13xxx/2501.13928/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.14xxx/2501.14143/dbe5f8b6-224b-4a5e-ae76-21a2b393dee5_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.14xxx/2501.14143/dbe5f8b6-224b-4a5e-ae76-21a2b393dee5_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.14xxx/2501.14143/dbe5f8b6-224b-4a5e-ae76-21a2b393dee5_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b09599661ac2933a05e2e518e4c4a6db1dbd41d0af75e29e9109078dbb6b4beb
|
| 3 |
+
size 3692438
|
2501.14xxx/2501.14143/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.14xxx/2501.14143/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:94d1571ac0c9008667c3c4b9fb032e36176b6a8845a8b2a071b187aeb8f9f094
|
| 3 |
+
size 759250
|
2501.14xxx/2501.14143/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.14xxx/2501.14195/55154dce-d70c-43aa-a450-f39ef81abb90_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.14xxx/2501.14195/55154dce-d70c-43aa-a450-f39ef81abb90_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.14xxx/2501.14195/55154dce-d70c-43aa-a450-f39ef81abb90_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:342a7a8c24c852a1074e697d6d99a604440a84092fc514d237b4d3a2dd091980
|
| 3 |
+
size 16383992
|
2501.14xxx/2501.14195/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.14xxx/2501.14195/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2972697b96a4f64cdd884d90a9859462fc5b043cb1846c85d5ba960f4a6f52f5
|
| 3 |
+
size 2456510
|
2501.14xxx/2501.14195/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.14xxx/2501.14208/e2caf765-1c34-43b1-b3ae-52afa7b5b7ff_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.14xxx/2501.14208/e2caf765-1c34-43b1-b3ae-52afa7b5b7ff_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.14xxx/2501.14208/e2caf765-1c34-43b1-b3ae-52afa7b5b7ff_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e3f10683d56f16c849b42802182642e46381f10438be680eefff4b56da86ec5f
|
| 3 |
+
size 26420561
|
2501.14xxx/2501.14208/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.14xxx/2501.14208/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:be831037f95b8309d8369b67d05f5b953eb96a4613f4b250fbf4f3a365981491
|
| 3 |
+
size 2501615
|
2501.14xxx/2501.14208/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.14xxx/2501.14240/3d87a418-e222-4e41-ac34-ee2f21e49a0d_content_list.json
ADDED
|
@@ -0,0 +1,1186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Generalizable Audio Deepfake Detection via Latent Space Refinement and Augmentation",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
106,
|
| 8 |
+
61,
|
| 9 |
+
890,
|
| 10 |
+
119
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Wen Huang $^{1,2}$ Yanmei Gu $^{3}$ Zhiming Wang $^{3}$ Huijia Zhu $^{3}$ Yanmin Qian $^{1\\dagger}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
200,
|
| 19 |
+
137,
|
| 20 |
+
800,
|
| 21 |
+
154
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "<sup>1</sup> Auditory Cognition and Computational Acoustics Lab, MoE Key Lab of Artificial Intelligence, AI Institute",
|
| 28 |
+
"bbox": [
|
| 29 |
+
176,
|
| 30 |
+
154,
|
| 31 |
+
825,
|
| 32 |
+
167
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China",
|
| 39 |
+
"bbox": [
|
| 40 |
+
192,
|
| 41 |
+
167,
|
| 42 |
+
803,
|
| 43 |
+
181
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "$^{2}$ SJTU Paris Elite Institute of Technology, $^{3}$ Ant Group, Shanghai, China",
|
| 50 |
+
"bbox": [
|
| 51 |
+
281,
|
| 52 |
+
181,
|
| 53 |
+
717,
|
| 54 |
+
196
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "Abstract—Advances in speech synthesis technologies, like text-to-speech (TTS) and voice conversion (VC), have made detecting deepfake speech increasingly challenging. Spoofing countermeasures often struggle to generalize effectively, particularly when faced with unseen attacks. To address this, we propose a novel strategy that integrates Latent Space Refinement (LSR) and Latent Space Augmentation (LSA) to improve the generalization of deepfake detection systems. LSR introduces multiple learnable prototypes for the spoof class, refining the latent space to better capture the intricate variations within spoofed data. LSA further diversifies spoofed data representations by applying augmentation techniques directly in the latent space, enabling the model to learn a broader range of spoofing patterns. We evaluated our approach on four representative datasets, i.e. ASVspoof 2019 LA, ASVspoof 2021 LA and DF, and In-The-Wild. The results show that LSR and LSA perform well individually, and their integration achieves competitive results, matching or surpassing current state-of-the-art methods.",
|
| 61 |
+
"bbox": [
|
| 62 |
+
73,
|
| 63 |
+
228,
|
| 64 |
+
491,
|
| 65 |
+
410
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "Index Terms—audio deepfake detection, anti-spoofing, generalization",
|
| 72 |
+
"bbox": [
|
| 73 |
+
88,
|
| 74 |
+
410,
|
| 75 |
+
485,
|
| 76 |
+
422
|
| 77 |
+
],
|
| 78 |
+
"page_idx": 0
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"text": "I. INTRODUCTION",
|
| 83 |
+
"text_level": 1,
|
| 84 |
+
"bbox": [
|
| 85 |
+
222,
|
| 86 |
+
435,
|
| 87 |
+
344,
|
| 88 |
+
448
|
| 89 |
+
],
|
| 90 |
+
"page_idx": 0
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"type": "text",
|
| 94 |
+
"text": "With advancements in speech synthesis systems such as text-to-speech (TTS) and voice conversion (VC), detecting deepfake speech has become increasingly challenging. Synthesized data can originate from a wide range of synthesis systems, each with its own distinct characteristics, making it difficult for spoofing countermeasures to generalize effectively. This challenge is exacerbated when detectors encounter unseen deepfake attacks, often leading to significant performance degradation [1], [2].",
|
| 95 |
+
"bbox": [
|
| 96 |
+
73,
|
| 97 |
+
452,
|
| 98 |
+
491,
|
| 99 |
+
565
|
| 100 |
+
],
|
| 101 |
+
"page_idx": 0
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"type": "text",
|
| 105 |
+
"text": "To enhance generalization in deepfake detectors, one key direction focuses on developing more robust classification models through improved architecture and learning strategies. Recent studies have utilized features extracted from self-supervised speech models such as Wav2vec [3], Whisper [4], and WavLM [5] as front-end inputs for deepfake detection. These models, trained on large-scale and diverse speech data, strengthen the detection process by providing reliable and domain-agnostic features [6]. Beyond improving feature extraction, researchers have also worked to improve the accuracy of back-end classifiers. Traditional binary classification methods often struggle with generalization, particularly when facing distribution mismatches. To address this, one-class learning approaches have been explored, focusing on creating a compact representation of bonafide speech while effectively pushing away spoofed speech, leading to a well-separated and more generalizable feature space [7], [8].",
|
| 106 |
+
"bbox": [
|
| 107 |
+
73,
|
| 108 |
+
565,
|
| 109 |
+
491,
|
| 110 |
+
773
|
| 111 |
+
],
|
| 112 |
+
"page_idx": 0
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"type": "text",
|
| 116 |
+
"text": "Another promising direction is through data augmentation, which enhances the robustness of the model by exposing it to a wider range of data variations during training. Traditional techniques such as speed perturbation, SpecAugment [9], and codec augmentation have been shown to improve performance. More recent methods, such as Rawboost [10], use signal processing techniques to boost or distort raw audio, leading to significant improvements. There are also augmentation strategies specifically designed for audio deepfake",
|
| 117 |
+
"bbox": [
|
| 118 |
+
73,
|
| 119 |
+
773,
|
| 120 |
+
491,
|
| 121 |
+
885
|
| 122 |
+
],
|
| 123 |
+
"page_idx": 0
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"type": "text",
|
| 127 |
+
"text": "detection. For instance, CpAug [11] employs a copy-paste strategy to generate diverse training samples, while targeted augmentation methods [12] create pseudo-fakes that challenge the decision boundary, thereby increasing the diversity of fake samples. Furthermore, research has shown that using neural vocoders to augment data can further enhance detection performance [13], [14].",
|
| 128 |
+
"bbox": [
|
| 129 |
+
501,
|
| 130 |
+
227,
|
| 131 |
+
919,
|
| 132 |
+
311
|
| 133 |
+
],
|
| 134 |
+
"page_idx": 0
|
| 135 |
+
},
|
| 136 |
+
{
|
| 137 |
+
"type": "text",
|
| 138 |
+
"text": "Building on these two key directions, we propose a novel strategy of integrating latent space refinement and augmentation to further boost the generalization ability of deepfake detection, as shown in Fig. 1. First, to address the limitations of binary classification in capturing the diverse nature of spoofed audio, we introduce Latent Space Refinement (LSR). In binary classification, models typically assign a single prototype to each class, which oversimplifies the complex variability within spoofed audio. While one-class learning tries to address this by compactly representing the bonafide class and treating others as outliers, it often imposes a rigid boundary that fails to capture the diversity in spoofed data. In contrast, our LSR approach introduces multiple learnable prototypes specifically for the spoof class, refining the latent space to better model the intricate variations within spoofed data. This enhanced representation reduces intra-class variability and allows the model to generalize more effectively across different spoofing attacks.",
|
| 139 |
+
"bbox": [
|
| 140 |
+
501,
|
| 141 |
+
311,
|
| 142 |
+
921,
|
| 143 |
+
532
|
| 144 |
+
],
|
| 145 |
+
"page_idx": 0
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"type": "text",
|
| 149 |
+
"text": "Second, to further enhance generalization, we apply Latent Space Augmentation (LSA) to diversify spoofed data representations, inspired by successful applications in computer vision [15], [16]. Unlike traditional data augmentation, which focuses on manipulating input data, LSA directly targets the latent space, allowing it to be independent of specific audio-level operations. By applying techniques such as additive noise, affine transformation, batch mixup, and linear interpolation and extrapolation, LSA generates a wide range of spoofed examples that expand the latent space. This expansion helps the model capture more diverse patterns within spoofed data, thereby improving its ability to generalize across different spoofing attacks and enhancing overall detection performance.",
|
| 150 |
+
"bbox": [
|
| 151 |
+
501,
|
| 152 |
+
532,
|
| 153 |
+
921,
|
| 154 |
+
700
|
| 155 |
+
],
|
| 156 |
+
"page_idx": 0
|
| 157 |
+
},
|
| 158 |
+
{
|
| 159 |
+
"type": "text",
|
| 160 |
+
"text": "Our experimental results confirm the effectiveness of the proposed latent space refinement and augmentation. We evaluated the approach on four representative datasets: ASVspoof 2019 LA [17], ASVspoof 2021 LA and DF [1], and In-The-Wild [2]. The findings show that both LSR and LSA individually contribute to performance improvements, with the integrated system achieving competitive results, matching or surpassing the current state-of-the-art across these diverse benchmarks.",
|
| 161 |
+
"bbox": [
|
| 162 |
+
501,
|
| 163 |
+
699,
|
| 164 |
+
921,
|
| 165 |
+
810
|
| 166 |
+
],
|
| 167 |
+
"page_idx": 0
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"type": "text",
|
| 171 |
+
"text": "II. METHODS",
|
| 172 |
+
"text_level": 1,
|
| 173 |
+
"bbox": [
|
| 174 |
+
666,
|
| 175 |
+
818,
|
| 176 |
+
759,
|
| 177 |
+
830
|
| 178 |
+
],
|
| 179 |
+
"page_idx": 0
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "text",
|
| 183 |
+
"text": "A. Latent Space Refinement",
|
| 184 |
+
"text_level": 1,
|
| 185 |
+
"bbox": [
|
| 186 |
+
503,
|
| 187 |
+
835,
|
| 188 |
+
679,
|
| 189 |
+
849
|
| 190 |
+
],
|
| 191 |
+
"page_idx": 0
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"type": "text",
|
| 195 |
+
"text": "To capture the inherent variations within the spoof class, we introduce multiple learnable prototypes that refine the latent distribution. Assume there are $K$ prototypes for each class, denoted as $\\{c_1,\\dots ,c_K\\}$ . For the bonafide class, $K = 1$ , while for the spoof",
|
| 196 |
+
"bbox": [
|
| 197 |
+
503,
|
| 198 |
+
852,
|
| 199 |
+
921,
|
| 200 |
+
910
|
| 201 |
+
],
|
| 202 |
+
"page_idx": 0
|
| 203 |
+
},
|
| 204 |
+
{
|
| 205 |
+
"type": "aside_text",
|
| 206 |
+
"text": "arXiv:2501.14240v1 [eess.AS] 24 Jan 2025",
|
| 207 |
+
"bbox": [
|
| 208 |
+
22,
|
| 209 |
+
262,
|
| 210 |
+
57,
|
| 211 |
+
720
|
| 212 |
+
],
|
| 213 |
+
"page_idx": 0
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"type": "page_footnote",
|
| 217 |
+
"text": "† Corresponding author",
|
| 218 |
+
"bbox": [
|
| 219 |
+
88,
|
| 220 |
+
893,
|
| 221 |
+
220,
|
| 222 |
+
909
|
| 223 |
+
],
|
| 224 |
+
"page_idx": 0
|
| 225 |
+
},
|
| 226 |
+
{
|
| 227 |
+
"type": "image",
|
| 228 |
+
"img_path": "images/3b8a4f32eef6006afb33df5f0eef6a8888890813e063c00b73a56972628ee3a5.jpg",
|
| 229 |
+
"image_caption": [
|
| 230 |
+
"Fig. 1. The pipeline of the proposed method, illustrating the process of Latent Space Refinement (LSR) and Latent Space Augmentation (LSA)."
|
| 231 |
+
],
|
| 232 |
+
"image_footnote": [],
|
| 233 |
+
"bbox": [
|
| 234 |
+
76,
|
| 235 |
+
61,
|
| 236 |
+
439,
|
| 237 |
+
196
|
| 238 |
+
],
|
| 239 |
+
"page_idx": 1
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"type": "image",
|
| 243 |
+
"img_path": "images/b9eb39b2623a36a6c279c99cbd722c5a790c857fc47465455c8c56b4479b08ec.jpg",
|
| 244 |
+
"image_caption": [],
|
| 245 |
+
"image_footnote": [],
|
| 246 |
+
"bbox": [
|
| 247 |
+
446,
|
| 248 |
+
61,
|
| 249 |
+
620,
|
| 250 |
+
196
|
| 251 |
+
],
|
| 252 |
+
"page_idx": 1
|
| 253 |
+
},
|
| 254 |
+
{
|
| 255 |
+
"type": "image",
|
| 256 |
+
"img_path": "images/c184e408504afbc09c22af9a89cc2e4870fea0c7bbb08fd2e802d49cb0f560a4.jpg",
|
| 257 |
+
"image_caption": [],
|
| 258 |
+
"image_footnote": [],
|
| 259 |
+
"bbox": [
|
| 260 |
+
625,
|
| 261 |
+
61,
|
| 262 |
+
797,
|
| 263 |
+
196
|
| 264 |
+
],
|
| 265 |
+
"page_idx": 1
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"type": "image",
|
| 269 |
+
"img_path": "images/d14b2ba64b94efadb3cd38f48975395f76e1f567fc888f0bfcb9cb1c29f064e0.jpg",
|
| 270 |
+
"image_caption": [],
|
| 271 |
+
"image_footnote": [],
|
| 272 |
+
"bbox": [
|
| 273 |
+
807,
|
| 274 |
+
61,
|
| 275 |
+
915,
|
| 276 |
+
79
|
| 277 |
+
],
|
| 278 |
+
"page_idx": 1
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "image",
|
| 282 |
+
"img_path": "images/15ac26bd913bdbfc3d52c60ffab2f04d795952b634c3eba5367c2e89aa772e2b.jpg",
|
| 283 |
+
"image_caption": [],
|
| 284 |
+
"image_footnote": [],
|
| 285 |
+
"bbox": [
|
| 286 |
+
808,
|
| 287 |
+
83,
|
| 288 |
+
903,
|
| 289 |
+
97
|
| 290 |
+
],
|
| 291 |
+
"page_idx": 1
|
| 292 |
+
},
|
| 293 |
+
{
|
| 294 |
+
"type": "image",
|
| 295 |
+
"img_path": "images/429954165e841c8e1b5b23ff42ba2e4f03198a87076ee014bdf488bdcfee02c4.jpg",
|
| 296 |
+
"image_caption": [],
|
| 297 |
+
"image_footnote": [],
|
| 298 |
+
"bbox": [
|
| 299 |
+
808,
|
| 300 |
+
99,
|
| 301 |
+
906,
|
| 302 |
+
113
|
| 303 |
+
],
|
| 304 |
+
"page_idx": 1
|
| 305 |
+
},
|
| 306 |
+
{
|
| 307 |
+
"type": "image",
|
| 308 |
+
"img_path": "images/bd43512fede10cbac007623d18f0cd525ba08d43a09c2a2eb4cb707d4f718c50.jpg",
|
| 309 |
+
"image_caption": [],
|
| 310 |
+
"image_footnote": [],
|
| 311 |
+
"bbox": [
|
| 312 |
+
808,
|
| 313 |
+
118,
|
| 314 |
+
893,
|
| 315 |
+
131
|
| 316 |
+
],
|
| 317 |
+
"page_idx": 1
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"type": "image",
|
| 321 |
+
"img_path": "images/bceb795e4c824e933f9394e2b5ce42bf86597211ccd3e1579fda810bd45d0fe4.jpg",
|
| 322 |
+
"image_caption": [],
|
| 323 |
+
"image_footnote": [],
|
| 324 |
+
"bbox": [
|
| 325 |
+
810,
|
| 326 |
+
136,
|
| 327 |
+
900,
|
| 328 |
+
157
|
| 329 |
+
],
|
| 330 |
+
"page_idx": 1
|
| 331 |
+
},
|
| 332 |
+
{
|
| 333 |
+
"type": "image",
|
| 334 |
+
"img_path": "images/35cf3f8870abda735a6c2dadf3cbe53a87265e8e250d1cf94a00ee7dcc75c43f.jpg",
|
| 335 |
+
"image_caption": [],
|
| 336 |
+
"image_footnote": [],
|
| 337 |
+
"bbox": [
|
| 338 |
+
810,
|
| 339 |
+
162,
|
| 340 |
+
895,
|
| 341 |
+
175
|
| 342 |
+
],
|
| 343 |
+
"page_idx": 1
|
| 344 |
+
},
|
| 345 |
+
{
|
| 346 |
+
"type": "image",
|
| 347 |
+
"img_path": "images/c1eead5f421dcdd05c5444b65a911583b9ab11838fbba2678f65a86d96c754e5.jpg",
|
| 348 |
+
"image_caption": [],
|
| 349 |
+
"image_footnote": [],
|
| 350 |
+
"bbox": [
|
| 351 |
+
810,
|
| 352 |
+
180,
|
| 353 |
+
905,
|
| 354 |
+
191
|
| 355 |
+
],
|
| 356 |
+
"page_idx": 1
|
| 357 |
+
},
|
| 358 |
+
{
|
| 359 |
+
"type": "text",
|
| 360 |
+
"text": "class, $K$ is a hyperparameter chosen based on the complexity of the data. To determine the probability of a sample $x$ belonging to a particular class, we compute the maximum cosine similarity between its embedding $z$ and each of the class prototypes:",
|
| 361 |
+
"bbox": [
|
| 362 |
+
73,
|
| 363 |
+
243,
|
| 364 |
+
491,
|
| 365 |
+
299
|
| 366 |
+
],
|
| 367 |
+
"page_idx": 1
|
| 368 |
+
},
|
| 369 |
+
{
|
| 370 |
+
"type": "equation",
|
| 371 |
+
"text": "\n$$\n\\cos \\theta = \\sum_ {i = 1} ^ {K} \\frac {e ^ {\\langle c _ {i} , z \\rangle \\cdot \\gamma}}{\\sum_ {j = 1} ^ {K} e ^ {\\langle c _ {i} , z \\rangle \\cdot \\gamma}} \\langle c _ {i}, z \\rangle \\tag {1}\n$$\n",
|
| 372 |
+
"text_format": "latex",
|
| 373 |
+
"bbox": [
|
| 374 |
+
176,
|
| 375 |
+
305,
|
| 376 |
+
491,
|
| 377 |
+
344
|
| 378 |
+
],
|
| 379 |
+
"page_idx": 1
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"type": "text",
|
| 383 |
+
"text": "where $\\langle x,y\\rangle = \\frac{x\\cdot y}{\\|x\\|\\|y\\|}$ represents the cosine similarity between two vectors, and $\\gamma$ is the scaling factor, set to 10. We smooth the maximum operator using a softmax-like operation to prevent sensitivity between multiple prototypes.",
|
| 384 |
+
"bbox": [
|
| 385 |
+
73,
|
| 386 |
+
347,
|
| 387 |
+
490,
|
| 388 |
+
404
|
| 389 |
+
],
|
| 390 |
+
"page_idx": 1
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"type": "text",
|
| 394 |
+
"text": "To guide the learning of these prototypes, we design a prototype-based classification loss, inspired by the additive angular margin loss [18]:",
|
| 395 |
+
"bbox": [
|
| 396 |
+
73,
|
| 397 |
+
404,
|
| 398 |
+
491,
|
| 399 |
+
445
|
| 400 |
+
],
|
| 401 |
+
"page_idx": 1
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"type": "equation",
|
| 405 |
+
"text": "\n$$\n\\mathcal {L} _ {\\text {p r o t o}} (z) = - \\log \\frac {e ^ {s \\left(\\cos \\left(\\theta_ {y} + m\\right)\\right)}}{e ^ {s \\left(\\cos \\left(\\theta_ {y} + m\\right)\\right)} + e ^ {s \\left(\\cos \\theta_ {1 - y}\\right)}} \\tag {2}\n$$\n",
|
| 406 |
+
"text_format": "latex",
|
| 407 |
+
"bbox": [
|
| 408 |
+
135,
|
| 409 |
+
449,
|
| 410 |
+
490,
|
| 411 |
+
481
|
| 412 |
+
],
|
| 413 |
+
"page_idx": 1
|
| 414 |
+
},
|
| 415 |
+
{
|
| 416 |
+
"type": "text",
|
| 417 |
+
"text": "Here, $y \\in \\{0,1\\}$ is the label of sample $x$ , $m$ is an angular margin penalty, and $s$ is a scaling factor. This loss function encourages the model to push the embeddings of genuine samples closer to the bonafide prototype and spoofed samples closer to their corresponding prototypes.",
|
| 418 |
+
"bbox": [
|
| 419 |
+
73,
|
| 420 |
+
486,
|
| 421 |
+
491,
|
| 422 |
+
556
|
| 423 |
+
],
|
| 424 |
+
"page_idx": 1
|
| 425 |
+
},
|
| 426 |
+
{
|
| 427 |
+
"type": "text",
|
| 428 |
+
"text": "While prototypes are learned during the training process, there's a risk that they may collapse to a single center. To mitigate this, we introduce an intra-class regularization for the spoof prototypes $\\{c^s\\}$ :",
|
| 429 |
+
"bbox": [
|
| 430 |
+
73,
|
| 431 |
+
556,
|
| 432 |
+
491,
|
| 433 |
+
599
|
| 434 |
+
],
|
| 435 |
+
"page_idx": 1
|
| 436 |
+
},
|
| 437 |
+
{
|
| 438 |
+
"type": "equation",
|
| 439 |
+
"text": "\n$$\n\\mathcal {L} _ {\\text {i n t r a}} \\left(\\left\\{c ^ {s} \\right\\}\\right) = \\frac {2}{K (K - 1)} \\sum_ {i = 1} ^ {K - 1} \\sum_ {j = i + 1} ^ {K} \\left\\langle c _ {i} ^ {s}, c _ {j} ^ {s} \\right\\rangle \\tag {3}\n$$\n",
|
| 440 |
+
"text_format": "latex",
|
| 441 |
+
"bbox": [
|
| 442 |
+
140,
|
| 443 |
+
603,
|
| 444 |
+
490,
|
| 445 |
+
642
|
| 446 |
+
],
|
| 447 |
+
"page_idx": 1
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"type": "text",
|
| 451 |
+
"text": "This regularization term calculates the mean similarity between the spoof prototypes, encouraging them to spread out in the latent space, thereby preventing prototype collapse.",
|
| 452 |
+
"bbox": [
|
| 453 |
+
73,
|
| 454 |
+
647,
|
| 455 |
+
488,
|
| 456 |
+
689
|
| 457 |
+
],
|
| 458 |
+
"page_idx": 1
|
| 459 |
+
},
|
| 460 |
+
{
|
| 461 |
+
"type": "text",
|
| 462 |
+
"text": "To further enhance the distinction between spoof and bonafide prototypes, we introduce an inter-class regularization term. This term calculates the smoothed maximum cosine similarity between the spoof prototypes $\\{c^s\\}$ and the single bonafide prototype $c^b$ :",
|
| 463 |
+
"bbox": [
|
| 464 |
+
73,
|
| 465 |
+
690,
|
| 466 |
+
491,
|
| 467 |
+
746
|
| 468 |
+
],
|
| 469 |
+
"page_idx": 1
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"type": "equation",
|
| 473 |
+
"text": "\n$$\n\\mathcal {L} _ {i n t e r} \\left(\\left\\{c ^ {s} \\right\\}, c ^ {b}\\right) = \\delta + \\sum_ {i = 1} ^ {K} \\frac {e ^ {\\langle c _ {i} ^ {s} , c ^ {b} \\rangle \\cdot \\gamma}}{\\sum_ {j = 1} ^ {K} e ^ {\\langle c _ {i} ^ {s} , c ^ {b} \\rangle \\cdot \\gamma}} \\left\\langle c _ {i} ^ {s}, c ^ {b} \\right\\rangle \\tag {4}\n$$\n",
|
| 474 |
+
"text_format": "latex",
|
| 475 |
+
"bbox": [
|
| 476 |
+
124,
|
| 477 |
+
751,
|
| 478 |
+
488,
|
| 479 |
+
789
|
| 480 |
+
],
|
| 481 |
+
"page_idx": 1
|
| 482 |
+
},
|
| 483 |
+
{
|
| 484 |
+
"type": "text",
|
| 485 |
+
"text": "here $\\delta$ is a regularization coefficient that prevents the loss from becoming negative.",
|
| 486 |
+
"bbox": [
|
| 487 |
+
73,
|
| 488 |
+
794,
|
| 489 |
+
488,
|
| 490 |
+
821
|
| 491 |
+
],
|
| 492 |
+
"page_idx": 1
|
| 493 |
+
},
|
| 494 |
+
{
|
| 495 |
+
"type": "text",
|
| 496 |
+
"text": "Hence, the overall objective function for LSR is defined as follows:",
|
| 497 |
+
"bbox": [
|
| 498 |
+
88,
|
| 499 |
+
821,
|
| 500 |
+
488,
|
| 501 |
+
835
|
| 502 |
+
],
|
| 503 |
+
"page_idx": 1
|
| 504 |
+
},
|
| 505 |
+
{
|
| 506 |
+
"type": "equation",
|
| 507 |
+
"text": "\n$$\n\\mathcal {L} _ {L S R} = \\mathcal {L} _ {\\text {p r o t o}} + \\mathcal {L} _ {\\text {i n t r a}} + \\mathcal {L} _ {\\text {i n t e r}} \\tag {5}\n$$\n",
|
| 508 |
+
"text_format": "latex",
|
| 509 |
+
"bbox": [
|
| 510 |
+
173,
|
| 511 |
+
844,
|
| 512 |
+
488,
|
| 513 |
+
859
|
| 514 |
+
],
|
| 515 |
+
"page_idx": 1
|
| 516 |
+
},
|
| 517 |
+
{
|
| 518 |
+
"type": "text",
|
| 519 |
+
"text": "In addition, the LSR loss can be incorporated alongside a binary classification loss, such as Weighted Cross Entropy (WCE), to refine the latent distribution and reduce intra-class variance.",
|
| 520 |
+
"bbox": [
|
| 521 |
+
73,
|
| 522 |
+
866,
|
| 523 |
+
488,
|
| 524 |
+
907
|
| 525 |
+
],
|
| 526 |
+
"page_idx": 1
|
| 527 |
+
},
|
| 528 |
+
{
|
| 529 |
+
"type": "text",
|
| 530 |
+
"text": "B. Latent Space Augmentation",
|
| 531 |
+
"text_level": 1,
|
| 532 |
+
"bbox": [
|
| 533 |
+
504,
|
| 534 |
+
244,
|
| 535 |
+
694,
|
| 536 |
+
258
|
| 537 |
+
],
|
| 538 |
+
"page_idx": 1
|
| 539 |
+
},
|
| 540 |
+
{
|
| 541 |
+
"type": "text",
|
| 542 |
+
"text": "While multi-prototypical refinement enhances the representation of the spoofed class, further generalization can be achieved by augmenting the diversity of the training data. Instead of solely augmenting raw input data, we apply augmentation directly in the latent space, where lower dimensionality allows for more targeted variations. By focusing these augmentations on spoofed latent features, we generate diverse spoofing variations. Notably, these augmentations are not applied to bonafide latent features, preserving their authenticity.",
|
| 543 |
+
"bbox": [
|
| 544 |
+
501,
|
| 545 |
+
262,
|
| 546 |
+
921,
|
| 547 |
+
375
|
| 548 |
+
],
|
| 549 |
+
"page_idx": 1
|
| 550 |
+
},
|
| 551 |
+
{
|
| 552 |
+
"type": "text",
|
| 553 |
+
"text": "Given $z$ a batch of embeddings, we denote the spoof embeddings in this batch as $z^s$ and the bonafide embeddings as $z^b$ . To create diverse variations of spoof embeddings, we design five latent augmentation patterns for $z^s$ :",
|
| 554 |
+
"bbox": [
|
| 555 |
+
501,
|
| 556 |
+
375,
|
| 557 |
+
919,
|
| 558 |
+
429
|
| 559 |
+
],
|
| 560 |
+
"page_idx": 1
|
| 561 |
+
},
|
| 562 |
+
{
|
| 563 |
+
"type": "text",
|
| 564 |
+
"text": "Additive Noise (AN). A simple yet efficient idea is to add random perturbation to latent features. Here we apply the additive noise drawn from a Gaussian distribution as follows:",
|
| 565 |
+
"bbox": [
|
| 566 |
+
503,
|
| 567 |
+
430,
|
| 568 |
+
919,
|
| 569 |
+
470
|
| 570 |
+
],
|
| 571 |
+
"page_idx": 1
|
| 572 |
+
},
|
| 573 |
+
{
|
| 574 |
+
"type": "equation",
|
| 575 |
+
"text": "\n$$\n\\hat {z} ^ {s} = z ^ {s} + \\beta \\cdot X, X \\sim \\mathcal {N} (0, \\mathbf {I}) \\tag {6}\n$$\n",
|
| 576 |
+
"text_format": "latex",
|
| 577 |
+
"bbox": [
|
| 578 |
+
616,
|
| 579 |
+
479,
|
| 580 |
+
919,
|
| 581 |
+
497
|
| 582 |
+
],
|
| 583 |
+
"page_idx": 1
|
| 584 |
+
},
|
| 585 |
+
{
|
| 586 |
+
"type": "text",
|
| 587 |
+
"text": "where $\\mathcal{N}(0,\\mathbf{I})$ is the standard normal distribution, $\\mathbf{I}$ is the identity matrix, and $\\beta$ is a scaling factor sampled from $\\mathcal{N}(0,1)$ .",
|
| 588 |
+
"bbox": [
|
| 589 |
+
503,
|
| 590 |
+
505,
|
| 591 |
+
919,
|
| 592 |
+
532
|
| 593 |
+
],
|
| 594 |
+
"page_idx": 1
|
| 595 |
+
},
|
| 596 |
+
{
|
| 597 |
+
"type": "text",
|
| 598 |
+
"text": "Affine Transformation (AT). This common transformation for 1D vectors involves scaling and translating the latent features:",
|
| 599 |
+
"bbox": [
|
| 600 |
+
503,
|
| 601 |
+
534,
|
| 602 |
+
919,
|
| 603 |
+
561
|
| 604 |
+
],
|
| 605 |
+
"page_idx": 1
|
| 606 |
+
},
|
| 607 |
+
{
|
| 608 |
+
"type": "equation",
|
| 609 |
+
"text": "\n$$\n\\hat {z} ^ {s} = a \\cdot z ^ {s} + b \\tag {7}\n$$\n",
|
| 610 |
+
"text_format": "latex",
|
| 611 |
+
"bbox": [
|
| 612 |
+
663,
|
| 613 |
+
570,
|
| 614 |
+
919,
|
| 615 |
+
585
|
| 616 |
+
],
|
| 617 |
+
"page_idx": 1
|
| 618 |
+
},
|
| 619 |
+
{
|
| 620 |
+
"type": "text",
|
| 621 |
+
"text": "where $a$ is sampled from $\\mathcal{U}(0.9,1.1)$ and $b$ is set to 0.",
|
| 622 |
+
"bbox": [
|
| 623 |
+
503,
|
| 624 |
+
595,
|
| 625 |
+
841,
|
| 626 |
+
609
|
| 627 |
+
],
|
| 628 |
+
"page_idx": 1
|
| 629 |
+
},
|
| 630 |
+
{
|
| 631 |
+
"type": "text",
|
| 632 |
+
"text": "Batch Mixup (BM). Inspired by data mixup strategies [19], we create new latent features by blending pairs of spoof features in the batch, creating smoother transitions and intermediate variations:",
|
| 633 |
+
"bbox": [
|
| 634 |
+
503,
|
| 635 |
+
609,
|
| 636 |
+
919,
|
| 637 |
+
651
|
| 638 |
+
],
|
| 639 |
+
"page_idx": 1
|
| 640 |
+
},
|
| 641 |
+
{
|
| 642 |
+
"type": "equation",
|
| 643 |
+
"text": "\n$$\nz _ {i} ^ {s} = \\alpha \\cdot z _ {i} ^ {s} + (1 - \\alpha) \\cdot z _ {\\pi (i)} ^ {s} \\tag {8}\n$$\n",
|
| 644 |
+
"text_format": "latex",
|
| 645 |
+
"bbox": [
|
| 646 |
+
622,
|
| 647 |
+
659,
|
| 648 |
+
919,
|
| 649 |
+
676
|
| 650 |
+
],
|
| 651 |
+
"page_idx": 1
|
| 652 |
+
},
|
| 653 |
+
{
|
| 654 |
+
"type": "text",
|
| 655 |
+
"text": "where $i$ indexes the batch, $\\pi$ denotes a random permutation of the batch indices and $\\alpha$ is a mixup coefficient sampled from Beta(0.5, 0.5).",
|
| 656 |
+
"bbox": [
|
| 657 |
+
501,
|
| 658 |
+
684,
|
| 659 |
+
919,
|
| 660 |
+
724
|
| 661 |
+
],
|
| 662 |
+
"page_idx": 1
|
| 663 |
+
},
|
| 664 |
+
{
|
| 665 |
+
"type": "text",
|
| 666 |
+
"text": "The following two techniques rely on the prototypes learned in latent space refinement:",
|
| 667 |
+
"bbox": [
|
| 668 |
+
503,
|
| 669 |
+
726,
|
| 670 |
+
919,
|
| 671 |
+
753
|
| 672 |
+
],
|
| 673 |
+
"page_idx": 1
|
| 674 |
+
},
|
| 675 |
+
{
|
| 676 |
+
"type": "text",
|
| 677 |
+
"text": "Linear Interpolation (LI). To create more challenging examples targeting the decision boundary, we perform linear interpolation on spoof embeddings towards bonafide prototype $c^b$ . Since the prototypes in LSR the prototypes are normalized to lie on a unit hypersphere due to the use of cosine similarity, the norm of the vectors is incorporated to adjust for the transition to Euclidean space:",
|
| 678 |
+
"bbox": [
|
| 679 |
+
501,
|
| 680 |
+
753,
|
| 681 |
+
919,
|
| 682 |
+
839
|
| 683 |
+
],
|
| 684 |
+
"page_idx": 1
|
| 685 |
+
},
|
| 686 |
+
{
|
| 687 |
+
"type": "equation",
|
| 688 |
+
"text": "\n$$\n\\hat {z} ^ {s} = z ^ {s} + \\lambda_ {i} \\cdot \\left(\\frac {\\| z ^ {s} \\|}{\\| c ^ {b} \\|} c ^ {b} - z ^ {s}\\right) \\tag {9}\n$$\n",
|
| 689 |
+
"text_format": "latex",
|
| 690 |
+
"bbox": [
|
| 691 |
+
619,
|
| 692 |
+
844,
|
| 693 |
+
919,
|
| 694 |
+
875
|
| 695 |
+
],
|
| 696 |
+
"page_idx": 1
|
| 697 |
+
},
|
| 698 |
+
{
|
| 699 |
+
"type": "text",
|
| 700 |
+
"text": "where $\\lambda_{i}$ is an interpolation coefficient sampled from $\\mathcal{U}(0,0.1)$ , and the norm term $\\| z^s\\| /\\| c^b\\|$ aligns the scales of the vectors.",
|
| 701 |
+
"bbox": [
|
| 702 |
+
503,
|
| 703 |
+
880,
|
| 704 |
+
919,
|
| 705 |
+
909
|
| 706 |
+
],
|
| 707 |
+
"page_idx": 1
|
| 708 |
+
},
|
| 709 |
+
{
|
| 710 |
+
"type": "text",
|
| 711 |
+
"text": "Linear Extrapolation (LE). In addition to interpolation, we also perform extrapolation from the nearest spoof prototype to create new features:",
|
| 712 |
+
"bbox": [
|
| 713 |
+
73,
|
| 714 |
+
61,
|
| 715 |
+
491,
|
| 716 |
+
99
|
| 717 |
+
],
|
| 718 |
+
"page_idx": 2
|
| 719 |
+
},
|
| 720 |
+
{
|
| 721 |
+
"type": "equation",
|
| 722 |
+
"text": "\n$$\n\\hat {z} ^ {s} = z ^ {s} + \\lambda_ {e} \\cdot \\left(z ^ {s} - \\frac {\\| z ^ {s} \\|}{\\| C _ {n} ^ {s} \\|} c _ {n} ^ {s}\\right) \\tag {10}\n$$\n",
|
| 723 |
+
"text_format": "latex",
|
| 724 |
+
"bbox": [
|
| 725 |
+
187,
|
| 726 |
+
97,
|
| 727 |
+
488,
|
| 728 |
+
127
|
| 729 |
+
],
|
| 730 |
+
"page_idx": 2
|
| 731 |
+
},
|
| 732 |
+
{
|
| 733 |
+
"type": "text",
|
| 734 |
+
"text": "where $c_{n}^{s}$ corresponds the nearest spoof prototype of $z^{s}$ and $\\lambda_{e}$ is an extrapolation coefficient sampled from $\\mathcal{U}(0,0.1)$ . Similarly, we use the norm $\\| z^{s}\\| /\\| c_{n}^{s}\\|$ to adjust for the Euclidean representation. This method extends the spoof features further away from the nearest prototype, generating more diverse variations.",
|
| 735 |
+
"bbox": [
|
| 736 |
+
73,
|
| 737 |
+
131,
|
| 738 |
+
491,
|
| 739 |
+
200
|
| 740 |
+
],
|
| 741 |
+
"page_idx": 2
|
| 742 |
+
},
|
| 743 |
+
{
|
| 744 |
+
"type": "text",
|
| 745 |
+
"text": "Finally, the augmented latent features $\\hat{z}^s$ are concatenated with the original features $z$ , forming $z' = [z \\parallel \\hat{z}^s]$ . These enhanced features are then used for loss calculation during subsequent training, allowing the model to learn from a more varied set of spoofed data.",
|
| 746 |
+
"bbox": [
|
| 747 |
+
73,
|
| 748 |
+
200,
|
| 749 |
+
491,
|
| 750 |
+
257
|
| 751 |
+
],
|
| 752 |
+
"page_idx": 2
|
| 753 |
+
},
|
| 754 |
+
{
|
| 755 |
+
"type": "text",
|
| 756 |
+
"text": "III. EXPERIMENTS",
|
| 757 |
+
"text_level": 1,
|
| 758 |
+
"bbox": [
|
| 759 |
+
220,
|
| 760 |
+
263,
|
| 761 |
+
346,
|
| 762 |
+
276
|
| 763 |
+
],
|
| 764 |
+
"page_idx": 2
|
| 765 |
+
},
|
| 766 |
+
{
|
| 767 |
+
"type": "text",
|
| 768 |
+
"text": "A. Experimental Settings",
|
| 769 |
+
"text_level": 1,
|
| 770 |
+
"bbox": [
|
| 771 |
+
73,
|
| 772 |
+
282,
|
| 773 |
+
230,
|
| 774 |
+
295
|
| 775 |
+
],
|
| 776 |
+
"page_idx": 2
|
| 777 |
+
},
|
| 778 |
+
{
|
| 779 |
+
"type": "text",
|
| 780 |
+
"text": "Datasets and metrics. We train all systems using the ASVspoof 2019 LA training set [17], which includes approximately $25\\mathrm{k}$ utterances and 6 spoofing attacks involving VC or TTS. To evaluate generalization performance, we test on multiple datasets: the ASVspoof 2019 LA evaluation set (19LA) [17], containing $71\\mathrm{k}$ utterances with 13 different spoofing attacks; the ASVspoof 2021 LA set (21LA) [1], comprising about $181\\mathrm{k}$ utterances with algorithms similar to 19LA but also reflecting telephony systems' encoding and transmission effects; the ASVspoof 2021 DF set (21DF) [1], with over $600\\mathrm{k}$ utterances and more than 100 spoofing attacks processed with various lossy codecs; and the In-The-Wild dataset (ITW) [2], which features approximately $32\\mathrm{k}$ utterances collected under real-world, non-controlled conditions, making it a more challenging dataset. Performance is measured using Equal Error Rate (EER).",
|
| 781 |
+
"bbox": [
|
| 782 |
+
73,
|
| 783 |
+
299,
|
| 784 |
+
491,
|
| 785 |
+
493
|
| 786 |
+
],
|
| 787 |
+
"page_idx": 2
|
| 788 |
+
},
|
| 789 |
+
{
|
| 790 |
+
"type": "text",
|
| 791 |
+
"text": "Training details. We adopt the model architecture from [6], utilizing Wav2Vec2.0 XLSR [3] as the frontend feature extractor and AASIST [20] as the backend classifier. Input speech is randomly chunked into 4-second segments, with Rawboost [10] applied as basic augmentation and codec augmentation as extra augmentation. The learning rate is set to 1e-6 for the backbone model and 1e-3 for the prototypes in LSR. For the LSR loss, we set the scaling factor $s = 32$ , angular margin $m = 0.2$ , and regularization coefficient $\\delta = 0.2$ . For the WCE loss, the weights for bonafide and spoof classes are set to 0.9 and 0.1, respectively. For LSA, we either fix one type of augmentation during training or randomly select from all augmentation types (denoted as All).",
|
| 792 |
+
"bbox": [
|
| 793 |
+
73,
|
| 794 |
+
494,
|
| 795 |
+
491,
|
| 796 |
+
661
|
| 797 |
+
],
|
| 798 |
+
"page_idx": 2
|
| 799 |
+
},
|
| 800 |
+
{
|
| 801 |
+
"type": "text",
|
| 802 |
+
"text": "B. Overall Performance Comparison",
|
| 803 |
+
"text_level": 1,
|
| 804 |
+
"bbox": [
|
| 805 |
+
75,
|
| 806 |
+
667,
|
| 807 |
+
303,
|
| 808 |
+
683
|
| 809 |
+
],
|
| 810 |
+
"page_idx": 2
|
| 811 |
+
},
|
| 812 |
+
{
|
| 813 |
+
"type": "text",
|
| 814 |
+
"text": "To evaluate the overall performance of the proposed methods, we tested the system on four datasets and compared the results with those from the literature that used the same training dataset, as shown in Table I. Across all datasets, LSR+LSA consistently outperforms LSR alone and often ranks among the top performers, highlighting the effectiveness of integrating latent space refinement with latent space augmentation. To further enhance the results, we applied additional data augmentation, which led to EERs of $0.12\\%$ on 19LA, $1.05\\%$ on 21LA, $1.86\\%$ on 21DF, and $5.54\\%$ on ITW. This places our method on par with, or ahead of, the current state-of-the-art methods. Notably, our method focuses on refining and augmenting the latent space, which contrasts with recent approaches that focus on modifying the model architecture [28], [29]. These two strategies—latent space manipulation and architectural improvements—target different aspects of the problem and could potentially be combined for even better results. This highlights the flexibility and advantage of our method,",
|
| 815 |
+
"bbox": [
|
| 816 |
+
73,
|
| 817 |
+
686,
|
| 818 |
+
491,
|
| 819 |
+
910
|
| 820 |
+
],
|
| 821 |
+
"page_idx": 2
|
| 822 |
+
},
|
| 823 |
+
{
|
| 824 |
+
"type": "table",
|
| 825 |
+
"img_path": "images/361823158fd852d2f25031eb7e47ed85400665c3fce1bb3460f79f568725908f.jpg",
|
| 826 |
+
"table_caption": [
|
| 827 |
+
"TABLE I OVERALL PERFORMANCE COMPARISON IN EER(%) ACROSS MULTIPLE DATASETS. ALL SYSTEMS ARE TRAINED ON THE ASVSPOOF2019 LA TRAINING SET. BEST RESULTS ARE HIGHLIGHTED IN BOLD, AND SECOND-BEST RESULTS ARE UNDERLINED."
|
| 828 |
+
],
|
| 829 |
+
"table_footnote": [
|
| 830 |
+
"\\* with extra data augmentation."
|
| 831 |
+
],
|
| 832 |
+
"table_body": "<table><tr><td>System</td><td>19LA</td><td>21LA</td><td>21DF</td><td>ITW</td></tr><tr><td>WavLM+AttM [21]</td><td>0.65</td><td>3.50</td><td>3.19</td><td>-</td></tr><tr><td>Wav2Vec+LogReg [22]</td><td>0.50</td><td>-</td><td>-</td><td>7.20</td></tr><tr><td>WavLM+MFA [23]</td><td>0.42</td><td>5.08</td><td>2.56</td><td>-</td></tr><tr><td>Wav2Vec+VIB [24]</td><td>0.40</td><td>4.92</td><td>-</td><td>-</td></tr><tr><td>OCKD [25]</td><td>0.39</td><td>0.90</td><td>2.27</td><td>7.68</td></tr><tr><td>GFL-FAD [26]</td><td>0.25</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Wav2Vec+Linear [13]</td><td>0.22</td><td>3.63</td><td>3.65</td><td>16.17</td></tr><tr><td>OC+ACS [8]</td><td>0.17</td><td>1.30</td><td>2.19</td><td>-</td></tr><tr><td>Wav2Vec+AASIST [6]</td><td>-</td><td>0.82</td><td>2.85</td><td>-</td></tr><tr><td>Wav2Vec+AASIST2 [27]</td><td>0.15</td><td>1.61</td><td>2.77</td><td>-</td></tr><tr><td>Wav2vec+Conformer+TCM [28]</td><td>-</td><td>1.03</td><td>2.06</td><td>-</td></tr><tr><td>Wav2vec+STJ-GAT+BLDL* [29]</td><td>0.06</td><td>0.56</td><td>1.89</td><td>-</td></tr><tr><td>LSR</td><td>0.19</td><td>2.35</td><td>3.01</td><td>6.58</td></tr><tr><td>LSR+LSA</td><td>0.15</td><td>1.19</td><td>2.43</td><td>5.92</td></tr><tr><td>LSR+LSA*</td><td>0.12</td><td>1.05</td><td>1.86</td><td>5.54</td></tr></table>",
|
| 833 |
+
"bbox": [
|
| 834 |
+
506,
|
| 835 |
+
128,
|
| 836 |
+
913,
|
| 837 |
+
325
|
| 838 |
+
],
|
| 839 |
+
"page_idx": 2
|
| 840 |
+
},
|
| 841 |
+
{
|
| 842 |
+
"type": "text",
|
| 843 |
+
"text": "as it enhances generalization without needing to alter the underlying model architecture. In summary, the proposed LSR+LSA method consistently delivers strong results, matching or outperforming state-of-the-art performance across various datasets, demonstrating its robustness and effectiveness in generalizing across diverse deepfake detection tasks.",
|
| 844 |
+
"bbox": [
|
| 845 |
+
501,
|
| 846 |
+
361,
|
| 847 |
+
921,
|
| 848 |
+
444
|
| 849 |
+
],
|
| 850 |
+
"page_idx": 2
|
| 851 |
+
},
|
| 852 |
+
{
|
| 853 |
+
"type": "text",
|
| 854 |
+
"text": "C. Ablation Study on Latent Space Refinement",
|
| 855 |
+
"text_level": 1,
|
| 856 |
+
"bbox": [
|
| 857 |
+
504,
|
| 858 |
+
450,
|
| 859 |
+
794,
|
| 860 |
+
465
|
| 861 |
+
],
|
| 862 |
+
"page_idx": 2
|
| 863 |
+
},
|
| 864 |
+
{
|
| 865 |
+
"type": "table",
|
| 866 |
+
"img_path": "images/ed98abd8b0d10fb914a56007eab7dbb40d95cf1b30cfc66ec921738a860b8157.jpg",
|
| 867 |
+
"table_caption": [
|
| 868 |
+
"TABLE II EER $(\\%)$ ACROSS DATASETS FOR SYSTEMS TRAINED WITH DIFFERENT LOSS CONFIGURATIONS. BEST RESULTS ARE IN BOLD, AND SECOND-BEST RESULTS ARE UNDERlined."
|
| 869 |
+
],
|
| 870 |
+
"table_footnote": [],
|
| 871 |
+
"table_body": "<table><tr><td>Loss Configuration</td><td>19LA</td><td>21LA</td><td>21DF</td><td>ITW</td><td>Avg.</td></tr><tr><td>WCE</td><td>0.30</td><td>2.64</td><td>4.74</td><td>8.09</td><td>3.94</td></tr><tr><td>OC Softmax</td><td>0.31</td><td>1.60</td><td>4.06</td><td>7.86</td><td>3.46</td></tr><tr><td>LSR</td><td>0.23</td><td>1.55</td><td>3.22</td><td>7.45</td><td>3.11</td></tr><tr><td>w/o Linter</td><td>0.23</td><td>1.84</td><td>3.30</td><td>7.84</td><td>3.30</td></tr><tr><td>w/o Lintra</td><td>0.27</td><td>2.62</td><td>4.02</td><td>7.75</td><td>3.67</td></tr><tr><td>w/o Lintra, Linter</td><td>0.32</td><td>2.86</td><td>4.11</td><td>8.05</td><td>3.84</td></tr><tr><td>WCE+LSR</td><td>0.19</td><td>2.35</td><td>3.01</td><td>6.58</td><td>3.03</td></tr></table>",
|
| 872 |
+
"bbox": [
|
| 873 |
+
519,
|
| 874 |
+
527,
|
| 875 |
+
908,
|
| 876 |
+
633
|
| 877 |
+
],
|
| 878 |
+
"page_idx": 2
|
| 879 |
+
},
|
| 880 |
+
{
|
| 881 |
+
"type": "text",
|
| 882 |
+
"text": "Table II presents the performance of various loss configurations during training. The baseline configuration uses weighted cross entropy (WCE) loss for binary classification, with OC Softmax [7] included for comparison. Incorporating Latent Space Refinement (LSR) improves performance over both WCE and OC Softmax. We further examine the effects of LSR's loss terms. Removing inter-class regularization results in minimal degradation, indicating that the core prototype-based loss sufficiently handles prototype separation. However, removing intra-class regularization significantly reduces performance, as this term is crucial for maintaining prototype diversity within the spoof class and preventing collapse. When both regularizations are removed, performance drops to baseline levels. Additionally, combining LSR with WCE yields the best overall results. While WCE provides a solid foundation for binary classification, LSR refines the latent space to better capture variations in spoofed data. This combination leads to improved generalization across the datasets.",
|
| 883 |
+
"bbox": [
|
| 884 |
+
501,
|
| 885 |
+
643,
|
| 886 |
+
921,
|
| 887 |
+
878
|
| 888 |
+
],
|
| 889 |
+
"page_idx": 2
|
| 890 |
+
},
|
| 891 |
+
{
|
| 892 |
+
"type": "text",
|
| 893 |
+
"text": "Meanwhile, we evaluated the impact of the number of prototypes on performance, as shown in Fig. 3. Increasing the prototypes",
|
| 894 |
+
"bbox": [
|
| 895 |
+
503,
|
| 896 |
+
880,
|
| 897 |
+
921,
|
| 898 |
+
910
|
| 899 |
+
],
|
| 900 |
+
"page_idx": 2
|
| 901 |
+
},
|
| 902 |
+
{
|
| 903 |
+
"type": "image",
|
| 904 |
+
"img_path": "images/dac261f9c5cdc8ccb283f99008d833d302e4b5f1abc3ffbc3e4c96e208197e3a.jpg",
|
| 905 |
+
"image_caption": [
|
| 906 |
+
"Fig. 2. t-SNE visualization of the training dataset featuring various latent space augmentations. The green, blue, and red points represent the 2D projections of embeddings for the bonafide, spoof, and augmented spoof classes, respectively."
|
| 907 |
+
],
|
| 908 |
+
"image_footnote": [],
|
| 909 |
+
"bbox": [
|
| 910 |
+
80,
|
| 911 |
+
61,
|
| 912 |
+
243,
|
| 913 |
+
161
|
| 914 |
+
],
|
| 915 |
+
"page_idx": 3
|
| 916 |
+
},
|
| 917 |
+
{
|
| 918 |
+
"type": "image",
|
| 919 |
+
"img_path": "images/c1dfc7590531c522c9b14663ebc1ae10b873fd0057a54dc15f7c28f14be225a7.jpg",
|
| 920 |
+
"image_caption": [],
|
| 921 |
+
"image_footnote": [],
|
| 922 |
+
"bbox": [
|
| 923 |
+
246,
|
| 924 |
+
61,
|
| 925 |
+
411,
|
| 926 |
+
162
|
| 927 |
+
],
|
| 928 |
+
"page_idx": 3
|
| 929 |
+
},
|
| 930 |
+
{
|
| 931 |
+
"type": "image",
|
| 932 |
+
"img_path": "images/50abdce448f7070f901640551f8045cb387ee8f0c40b525d177a64973814839c.jpg",
|
| 933 |
+
"image_caption": [],
|
| 934 |
+
"image_footnote": [],
|
| 935 |
+
"bbox": [
|
| 936 |
+
416,
|
| 937 |
+
61,
|
| 938 |
+
581,
|
| 939 |
+
161
|
| 940 |
+
],
|
| 941 |
+
"page_idx": 3
|
| 942 |
+
},
|
| 943 |
+
{
|
| 944 |
+
"type": "image",
|
| 945 |
+
"img_path": "images/d2cc2e0c5a25ed1cb6eefa84be53bc8e9ec666db4e1731c62cf11024eb8a5c44.jpg",
|
| 946 |
+
"image_caption": [],
|
| 947 |
+
"image_footnote": [],
|
| 948 |
+
"bbox": [
|
| 949 |
+
583,
|
| 950 |
+
61,
|
| 951 |
+
750,
|
| 952 |
+
161
|
| 953 |
+
],
|
| 954 |
+
"page_idx": 3
|
| 955 |
+
},
|
| 956 |
+
{
|
| 957 |
+
"type": "image",
|
| 958 |
+
"img_path": "images/bf409bafe79e02057e0415779603e4bfabd7c9a50f2cd4524f99011536d0c2fc.jpg",
|
| 959 |
+
"image_caption": [],
|
| 960 |
+
"image_footnote": [],
|
| 961 |
+
"bbox": [
|
| 962 |
+
751,
|
| 963 |
+
61,
|
| 964 |
+
916,
|
| 965 |
+
161
|
| 966 |
+
],
|
| 967 |
+
"page_idx": 3
|
| 968 |
+
},
|
| 969 |
+
{
|
| 970 |
+
"type": "image",
|
| 971 |
+
"img_path": "images/27d7430f53e6a7bd1b58c898ac28f88017dc3a8b86e893b6065ac04cbc9be186.jpg",
|
| 972 |
+
"image_caption": [
|
| 973 |
+
"Fig. 3. The effect of the number of spoofed prototypes on EER $(\\%)$ across different datasets (21LA, 21DF, and ITW)."
|
| 974 |
+
],
|
| 975 |
+
"image_footnote": [],
|
| 976 |
+
"bbox": [
|
| 977 |
+
83,
|
| 978 |
+
222,
|
| 979 |
+
485,
|
| 980 |
+
328
|
| 981 |
+
],
|
| 982 |
+
"page_idx": 3
|
| 983 |
+
},
|
| 984 |
+
{
|
| 985 |
+
"type": "text",
|
| 986 |
+
"text": "from 1 to 8 improves performance, but further increasing to 16 shows diminishing returns. At 20 prototypes, performance declines, suggesting that too many prototypes can hinder generalization.",
|
| 987 |
+
"bbox": [
|
| 988 |
+
73,
|
| 989 |
+
385,
|
| 990 |
+
491,
|
| 991 |
+
428
|
| 992 |
+
],
|
| 993 |
+
"page_idx": 3
|
| 994 |
+
},
|
| 995 |
+
{
|
| 996 |
+
"type": "text",
|
| 997 |
+
"text": "D. Ablation Study on Latent Space Augmentation",
|
| 998 |
+
"text_level": 1,
|
| 999 |
+
"bbox": [
|
| 1000 |
+
75,
|
| 1001 |
+
434,
|
| 1002 |
+
379,
|
| 1003 |
+
448
|
| 1004 |
+
],
|
| 1005 |
+
"page_idx": 3
|
| 1006 |
+
},
|
| 1007 |
+
{
|
| 1008 |
+
"type": "table",
|
| 1009 |
+
"img_path": "images/38a13e2ebe09f7147e5002a7a8d3f61cab462376971afe5f69211fc056283232.jpg",
|
| 1010 |
+
"table_caption": [
|
| 1011 |
+
"TABLE III EER $(\\%)$ ACROSS DATASETS FOR SYSTEMS TRAINED WITH DIFFERENT LATENT SPACE AUGMENTATION. BEST RESULTS ARE IN BOLD, AND SECOND-BEST RESULTS ARE UNDERLINED."
|
| 1012 |
+
],
|
| 1013 |
+
"table_footnote": [],
|
| 1014 |
+
"table_body": "<table><tr><td>Method</td><td>19LA</td><td>21LA</td><td>21DF</td><td>ITW</td><td>Avg.</td></tr><tr><td>LSR</td><td>0.19</td><td>2.35</td><td>3.01</td><td>6.58</td><td>3.03</td></tr><tr><td>+LSA(AN)</td><td>0.16</td><td>1.67</td><td>2.85</td><td>6.17</td><td>2.71</td></tr><tr><td>+LSA(AT)</td><td>0.19</td><td>1.62</td><td>2.57</td><td>6.69</td><td>2.77</td></tr><tr><td>+LSA(BM)</td><td>0.21</td><td>1.65</td><td>2.86</td><td>6.61</td><td>2.93</td></tr><tr><td>+LSA(LI)</td><td>0.23</td><td>1.92</td><td>2.65</td><td>7.05</td><td>2.96</td></tr><tr><td>+LSA(LE)</td><td>0.18</td><td>1.52</td><td>2.54</td><td>6.15</td><td>2.60</td></tr><tr><td>+LSA(All)</td><td>0.15</td><td>1.19</td><td>2.43</td><td>5.92</td><td>2.42</td></tr></table>",
|
| 1015 |
+
"bbox": [
|
| 1016 |
+
114,
|
| 1017 |
+
512,
|
| 1018 |
+
452,
|
| 1019 |
+
619
|
| 1020 |
+
],
|
| 1021 |
+
"page_idx": 3
|
| 1022 |
+
},
|
| 1023 |
+
{
|
| 1024 |
+
"type": "text",
|
| 1025 |
+
"text": "To assess the impact of different latent space augmentation methods, we conducted experiments for each method, as summarized in Table III, and visualized their effects using t-SNE in Fig. 2. Notably, since LI and LE rely on LSR prototypes, all systems were trained with $\\mathrm{LSR + WCE}$ loss. Among the first three augmentations that are independent of the prototypes, AN and AT produced more dispersed and varied distributions, leading to better performance. In contrast, BM's distribution remained closer to the original due to its mixup nature, which limited its effectiveness. For the prototype-dependent augmentations, LI, while beneficial, underperformed compared to the others, likely due to the consistent generation of challenging examples. LE, however, achieved the best results, as it effectively expanded the distribution into new regions of the latent space, offering a more balanced diversity. Ultimately, combining all augmentation methods led to the most diverse latent space, resulting in the highest overall performance.",
|
| 1026 |
+
"bbox": [
|
| 1027 |
+
73,
|
| 1028 |
+
631,
|
| 1029 |
+
490,
|
| 1030 |
+
851
|
| 1031 |
+
],
|
| 1032 |
+
"page_idx": 3
|
| 1033 |
+
},
|
| 1034 |
+
{
|
| 1035 |
+
"type": "text",
|
| 1036 |
+
"text": "While we have demonstrated the effectiveness of augmentation in latent space, we were curious whether applying the same augmentations in the input space could yield comparable or even better results. To explore this, we conducted comparison experiments between",
|
| 1037 |
+
"bbox": [
|
| 1038 |
+
73,
|
| 1039 |
+
853,
|
| 1040 |
+
488,
|
| 1041 |
+
909
|
| 1042 |
+
],
|
| 1043 |
+
"page_idx": 3
|
| 1044 |
+
},
|
| 1045 |
+
{
|
| 1046 |
+
"type": "table",
|
| 1047 |
+
"img_path": "images/b2516c157fb05f8944307d37b177e910bbbccf4a1a1e544515fdb3d88d6db8be.jpg",
|
| 1048 |
+
"table_caption": [
|
| 1049 |
+
"TABLE IV COMPARISON OF AUGMENTATION EFFECTS IN INPUT VS. LATENT SPACE ACROSS DATSETS (EER $\\%$"
|
| 1050 |
+
],
|
| 1051 |
+
"table_footnote": [],
|
| 1052 |
+
"table_body": "<table><tr><td>Method</td><td>Space</td><td>19LA</td><td>21LA</td><td>21DF</td><td>ITW</td><td>Avg.</td></tr><tr><td>None</td><td>-</td><td>0.30</td><td>2.64</td><td>4.74</td><td>8.09</td><td>3.94</td></tr><tr><td>AN</td><td>input</td><td>0.25</td><td>2.22</td><td>3.17</td><td>6.35</td><td>3.00</td></tr><tr><td>AN</td><td>latent</td><td>0.23</td><td>2.05</td><td>2.84</td><td>6.21</td><td>2.83</td></tr><tr><td>AT</td><td>input</td><td>0.27</td><td>2.43</td><td>3.44</td><td>6.81</td><td>3.24</td></tr><tr><td>AT</td><td>latent</td><td>0.25</td><td>2.03</td><td>2.91</td><td>6.72</td><td>2.98</td></tr><tr><td>BM</td><td>input</td><td>0.19</td><td>2.24</td><td>3.01</td><td>6.33</td><td>2.94</td></tr><tr><td>BM</td><td>latent</td><td>0.19</td><td>2.21</td><td>2.95</td><td>6.56</td><td>2.98</td></tr></table>",
|
| 1053 |
+
"bbox": [
|
| 1054 |
+
535,
|
| 1055 |
+
268,
|
| 1056 |
+
890,
|
| 1057 |
+
375
|
| 1058 |
+
],
|
| 1059 |
+
"page_idx": 3
|
| 1060 |
+
},
|
| 1061 |
+
{
|
| 1062 |
+
"type": "text",
|
| 1063 |
+
"text": "augmentations applied in the input space versus the latent space, focusing on three methods that do not depend on latent prototypes or embeddings: AN, AT, and BM. All experiments were conducted using WCE loss without LSR. As shown in Table IV, applying augmentation, whether in the input or latent space, improves the baseline to some extent. For AN and AT, augmentations performed in the latent space consistently yield better results than those in the input space. This suggests that latent space augmentations may more effectively capture the underlying data distributions that the model needs to learn. Interestingly, BM yields better results when applied in the input space than in the latent space. This outcome may be attributed to the nature of Mixup augmentation, which has been widely proven effective in various audio-related tasks when performed on the input data. The input space BM likely benefits from preserving more of the original data characteristics while still introducing beneficial variability.",
|
| 1064 |
+
"bbox": [
|
| 1065 |
+
501,
|
| 1066 |
+
401,
|
| 1067 |
+
919,
|
| 1068 |
+
623
|
| 1069 |
+
],
|
| 1070 |
+
"page_idx": 3
|
| 1071 |
+
},
|
| 1072 |
+
{
|
| 1073 |
+
"type": "text",
|
| 1074 |
+
"text": "IV. CONCLUSIONS",
|
| 1075 |
+
"text_level": 1,
|
| 1076 |
+
"bbox": [
|
| 1077 |
+
651,
|
| 1078 |
+
628,
|
| 1079 |
+
772,
|
| 1080 |
+
641
|
| 1081 |
+
],
|
| 1082 |
+
"page_idx": 3
|
| 1083 |
+
},
|
| 1084 |
+
{
|
| 1085 |
+
"type": "text",
|
| 1086 |
+
"text": "This paper presents a novel approach to enhance the generalization of audio deepfake detection systems by integrating Latent Space Refinement (LSR) and Latent Space Augmentation (LSA). LSR introduces multiple learnable prototypes to better capture the complex intra-class variability of spoofed audio, while LSA generates diverse representations in the latent space, further strengthening the model's robustness. Extensive experiments on multiple datasets, including ASVspoof 2019 LA, ASVspoof 2021 LA, ASVspoof 2021 DF, and In-The-Wild, demonstrate that each of the proposed LSR and LSA can improve system significantly.",
|
| 1087 |
+
"bbox": [
|
| 1088 |
+
503,
|
| 1089 |
+
651,
|
| 1090 |
+
919,
|
| 1091 |
+
790
|
| 1092 |
+
],
|
| 1093 |
+
"page_idx": 3
|
| 1094 |
+
},
|
| 1095 |
+
{
|
| 1096 |
+
"type": "text",
|
| 1097 |
+
"text": "ACKNOWLEDGMENT",
|
| 1098 |
+
"text_level": 1,
|
| 1099 |
+
"bbox": [
|
| 1100 |
+
645,
|
| 1101 |
+
804,
|
| 1102 |
+
779,
|
| 1103 |
+
816
|
| 1104 |
+
],
|
| 1105 |
+
"page_idx": 3
|
| 1106 |
+
},
|
| 1107 |
+
{
|
| 1108 |
+
"type": "text",
|
| 1109 |
+
"text": "This work was partially supported by the National Natural Science Foundation of China (NSFC) under Grants 62122050 and 62071288, and the Shanghai Municipal Science and Technology Commission under Grant 2021SHZDZX0102. Additional support was provided by the Pioneer R&D Program of Zhejiang Province (No. 2024C01024) and the Ant Group Research Intern Program.",
|
| 1110 |
+
"bbox": [
|
| 1111 |
+
503,
|
| 1112 |
+
825,
|
| 1113 |
+
919,
|
| 1114 |
+
909
|
| 1115 |
+
],
|
| 1116 |
+
"page_idx": 3
|
| 1117 |
+
},
|
| 1118 |
+
{
|
| 1119 |
+
"type": "text",
|
| 1120 |
+
"text": "REFERENCES",
|
| 1121 |
+
"text_level": 1,
|
| 1122 |
+
"bbox": [
|
| 1123 |
+
240,
|
| 1124 |
+
61,
|
| 1125 |
+
326,
|
| 1126 |
+
73
|
| 1127 |
+
],
|
| 1128 |
+
"page_idx": 4
|
| 1129 |
+
},
|
| 1130 |
+
{
|
| 1131 |
+
"type": "list",
|
| 1132 |
+
"sub_type": "ref_text",
|
| 1133 |
+
"list_items": [
|
| 1134 |
+
"[1] Junichi Yamagishi, Xin Wang, Massimiliano Todisco, Md Sahidullah, Jose Patino, Andreas Nautsch, Xuechen Liu, Kong Aik Lee, Tomi Kinnunen, Nicholas Evans, et al., \"Asvspoof 2021: accelerating progress in spoofed and deepfake speech detection,\" in ASVspoof 2021 Workshop-Automatic Speaker Verification and Spoofing Coutermeasures Challenge, 2021.",
|
| 1135 |
+
"[2] Nicolas M Müller, Pavel Czempin, Franziska Dieckmann, Adam Froghyar, and Konstantin Bötttinger, “Does audio deepfake detection generalize?,” Interspeech, 2022.",
|
| 1136 |
+
"[3] Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, and Michael Auli, \"XIs-r: Self-supervised cross-lingual speech representation learning at scale,\" arXiv, vol. abs/2111.09296, 2021.",
|
| 1137 |
+
"[4] Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever, “Robust speech recognition via large-scale weak supervision,” in International conference on machine learning. PMLR, 2023, pp. 28492-28518.",
|
| 1138 |
+
"[5] Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, et al., \"Wavlm: Large-scale self-supervised pre-training for full stack speech processing,\" IEEE Journal of Selected Topics in Signal Processing, vol. 16, no. 6, pp. 1505-1518, 2022.",
|
| 1139 |
+
"[6] Hemlata Tak, Massimiliano Todisco, Xin Wang, Jee-weon Jung, Junichi Yamagishi, and Nicholas Evans, \"Automatic speaker verification spoofing and deepfake detection using wav2vec 2.0 and data augmentation,\" in The Speaker and Language Recognition Workshop, 2022.",
|
| 1140 |
+
"[7] You Zhang, Fei Jiang, and Zhiyao Duan, \"One-class learning towards synthetic voice spoofing detection,\" IEEE Signal Processing Letters, vol. 28, pp. 937-941, 2021.",
|
| 1141 |
+
"[8] Hyun Myung Kim, Kangwook Jang, and Hoirin Kim, “One-class learning with adaptive centroid shift for audio deepfake detection,” in Interspeech 2024, 2024, pp. 4853–4857.",
|
| 1142 |
+
"[9] Daniel S Park, William Chan, Yu Zhang, Chung-Cheng Chiu, Barret Zoph, Ekin D Cubuk, and Quoc V Le, \"Specaugment: A simple data augmentation method for automatic speech recognition,\" arXiv preprint arXiv:1904.08779, 2019.",
|
| 1143 |
+
"[10] Hemlata Tak, Madhu Kamble, Jose Patino, Massimiliano Todisco, and Nicholas Evans, \"Rawboost: A raw data boosting and augmentation method applied to automatic speaker verification anti-spoofing,\" in ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2022, pp. 6382-6386.",
|
| 1144 |
+
"[11] Linjuan Zhang, Kong Aik Lee, Lin Zhang, Longbiao Wang, and Baoning Niu, \"Cpaug: Refining copy-paste augmentation for speech antispoofing,\" in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 10996-11000.",
|
| 1145 |
+
"[12] Marcella ASTRID, Enjie GHORBEL, and Djamila AOUADA, “Targeted augmented data for audio deepfake detection,” in 32nd European Signal Processing Conference (EUSIPCO 2024), 2024.",
|
| 1146 |
+
"[13] Xin Wang and Junichi Yamagishi, \"Spoofed training data for speech spoofing countermeasure can be efficiently created using neural vocoders,\" in ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2023, pp. 1-5.",
|
| 1147 |
+
"[14] Xin Wang and Junichi Yamagishi, \"Can large-scale vocoded spoofed data improve speech spoofing countermeasure with a self-supervised front end?\", in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 10311-10315.",
|
| 1148 |
+
"[15] Xiaofeng Liu, Yang Zou, Lingsheng Kong, Zhihui Diao, Junliang Yan, Jun Wang, Site Li, Ping Jia, and Jane You, \"Data augmentation via latent space interpolation for image classification,\" in 2018 24th International Conference on Pattern Recognition (ICPR). IEEE, 2018, pp. 728-733.",
|
| 1149 |
+
"[16] Zhiyuan Yan, Yuhao Luo, Siwei Lyu, Qingshan Liu, and Baoyuan Wu, \"Transcending forgery specificity with latent space augmentation for generalizable deepfake detection,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 8984-8994.",
|
| 1150 |
+
"[17] Xin Wang, Junichi Yamagishi, Massimiliano Todisco, Héctor Delgado, Andreas Nautsch, Nicholas Evans, Md Sahidullah, Ville Vestman, Tomi Kinnunen, Kong Aik Lee, et al., \"Asvspoof 2019: A large-scale public"
|
| 1151 |
+
],
|
| 1152 |
+
"bbox": [
|
| 1153 |
+
76,
|
| 1154 |
+
84,
|
| 1155 |
+
491,
|
| 1156 |
+
909
|
| 1157 |
+
],
|
| 1158 |
+
"page_idx": 4
|
| 1159 |
+
},
|
| 1160 |
+
{
|
| 1161 |
+
"type": "list",
|
| 1162 |
+
"sub_type": "ref_text",
|
| 1163 |
+
"list_items": [
|
| 1164 |
+
"database of synthesized, converted and replayed speech,\" Computer Speech & Language, vol. 64, pp. 101114, 2020.",
|
| 1165 |
+
"[18] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou, \"Arcface: Additive angular margin loss for deep face recognition,\" in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019, pp. 4690-4699.",
|
| 1166 |
+
"[19] Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz, “mixup: Beyond empirical risk minimization,” arXiv preprint arXiv:1710.09412, 2017.",
|
| 1167 |
+
"[20] Jee-weon Jung, Hee-Soo Heo, Hemlata Tak, Hye-jin Shim, Joon Son Chung, Bong-Jin Lee, Ha-Jin Yu, and Nicholas Evans, “Aasist: Audio anti-spoofing using integrated spectro-temporal graph attention networks,” in ICASSP 2022-2022 IEEE international conference on acoustics, speech and signal processing (ICASSP). IEEE, 2022, pp. 6367-6371.",
|
| 1168 |
+
"[21] Zihan Pan, Tianchi Liu, Hardik B. Sailor, and Qiongqiong Wang, \"Attentive merging of hidden embeddings from pre-trained speech model for anti-spoofing detection,\" in Interspeech 2024, 2024, pp. 2090-2094.",
|
| 1169 |
+
"[22] Octavian Pascu, Adriana Stan, Dan Oneata, Elisaba Oneata, and Horia Cucu, \"Towards generalisable and calibrated audio deepfake detection with self-supervised representations,\" in Interspeech 2024, 2024, pp. 4828-4832.",
|
| 1170 |
+
"[23] Yinlin Guo, Haofan Huang, Xi Chen, He Zhao, and Yuehai Wang, \"Audio deepfake detection with self-supervised wavlm and multi-fusion attentive classifier,\" in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 12702-12706.",
|
| 1171 |
+
"[24] Youngsik Eom, Yeonghyeon Lee, Ji Sub Um, and Hoi Rin Kim, “Antispoofing using transfer learning with variational information bottleneck,” in Interspeech 2022, 2022, pp. 3568-3572.",
|
| 1172 |
+
"[25] Jingze Lu, Yuxiang Zhang, Wenchao Wang, Zengqiang Shang, and Pengyuan Zhang, \"One-class knowledge distillation for spoofing speech detection,\" in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 11251-11255.",
|
| 1173 |
+
"[26] Xiaopeng Wang, Ruibo Fu, Zhengqi Wen, Zhiyong Wang, Yuankun Xie, Yukun Liu, Jianhua Tao, Xuefei Liu, Yongwei Li, Xin Qi, Yi Lu, and Shuchen Shi, \"Genuine-focused learning using mask autoencoder for generalized fake audio detection,\" in Interspeech 2024, 2024, pp. 4848-4852.",
|
| 1174 |
+
"[27] Yuxiang Zhang, Jingze Lu, Zengqiang Shang, Wenchao Wang, and Pengyuan Zhang, \"Improving short utterance anti-spoofing with aassist2,\" in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 11636-11640.",
|
| 1175 |
+
"[28] Duc-Tuan Truong, Ruijie Tao, Tuan Nguyen, Hieu-Thi Luong, Kong Aik Lee, and Eng Siong Chng, “Temporal-channel modeling in multi-head self-attention for synthetic speech detection,” in Interspeech 2024, 2024, pp. 537–541.",
|
| 1176 |
+
"[29] Haochen Wu, Wu Guo, Zhentao Zhang, Wenting Zhao, Shengyu Peng, and Jie Zhang, \"Spoofing speech detection by modeling local spectro-temporal and long-term dependency,\" in Interspeech 2024, 2024, pp. 507-511."
|
| 1177 |
+
],
|
| 1178 |
+
"bbox": [
|
| 1179 |
+
506,
|
| 1180 |
+
61,
|
| 1181 |
+
919,
|
| 1182 |
+
650
|
| 1183 |
+
],
|
| 1184 |
+
"page_idx": 4
|
| 1185 |
+
}
|
| 1186 |
+
]
|
2501.14xxx/2501.14240/3d87a418-e222-4e41-ac34-ee2f21e49a0d_model.json
ADDED
|
@@ -0,0 +1,1497 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "aside_text",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.023,
|
| 7 |
+
0.263,
|
| 8 |
+
0.058,
|
| 9 |
+
0.722
|
| 10 |
+
],
|
| 11 |
+
"angle": 270,
|
| 12 |
+
"content": "arXiv:2501.14240v1 [eess.AS] 24 Jan 2025"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "title",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.107,
|
| 18 |
+
0.062,
|
| 19 |
+
0.892,
|
| 20 |
+
0.121
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "Generalizable Audio Deepfake Detection via Latent Space Refinement and Augmentation"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.201,
|
| 29 |
+
0.138,
|
| 30 |
+
0.802,
|
| 31 |
+
0.155
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "Wen Huang\\(^{1,2}\\) Yanmei Gu\\(^{3}\\) Zhiming Wang\\(^{3}\\) Huijia Zhu\\(^{3}\\) Yanmin Qian\\(^{1\\dagger}\\)"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.178,
|
| 40 |
+
0.155,
|
| 41 |
+
0.826,
|
| 42 |
+
0.169
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "<sup>1</sup> Auditory Cognition and Computational Acoustics Lab, MoE Key Lab of Artificial Intelligence, AI Institute"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.194,
|
| 51 |
+
0.169,
|
| 52 |
+
0.805,
|
| 53 |
+
0.183
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.282,
|
| 62 |
+
0.183,
|
| 63 |
+
0.718,
|
| 64 |
+
0.197
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "\\(^{2}\\)SJTU Paris Elite Institute of Technology, \\(^{3}\\)Ant Group, Shanghai, China"
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.075,
|
| 73 |
+
0.229,
|
| 74 |
+
0.493,
|
| 75 |
+
0.411
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "Abstract—Advances in speech synthesis technologies, like text-to-speech (TTS) and voice conversion (VC), have made detecting deepfake speech increasingly challenging. Spoofing countermeasures often struggle to generalize effectively, particularly when faced with unseen attacks. To address this, we propose a novel strategy that integrates Latent Space Refinement (LSR) and Latent Space Augmentation (LSA) to improve the generalization of deepfake detection systems. LSR introduces multiple learnable prototypes for the spoof class, refining the latent space to better capture the intricate variations within spoofed data. LSA further diversifies spoofed data representations by applying augmentation techniques directly in the latent space, enabling the model to learn a broader range of spoofing patterns. We evaluated our approach on four representative datasets, i.e. ASVspoof 2019 LA, ASVspoof 2021 LA and DF, and In-The-Wild. The results show that LSR and LSA perform well individually, and their integration achieves competitive results, matching or surpassing current state-of-the-art methods."
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.089,
|
| 84 |
+
0.411,
|
| 85 |
+
0.486,
|
| 86 |
+
0.424
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "Index Terms—audio deepfake detection, anti-spoofing, generalization"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "title",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.223,
|
| 95 |
+
0.436,
|
| 96 |
+
0.345,
|
| 97 |
+
0.449
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "I. INTRODUCTION"
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.074,
|
| 106 |
+
0.453,
|
| 107 |
+
0.492,
|
| 108 |
+
0.566
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "With advancements in speech synthesis systems such as text-to-speech (TTS) and voice conversion (VC), detecting deepfake speech has become increasingly challenging. Synthesized data can originate from a wide range of synthesis systems, each with its own distinct characteristics, making it difficult for spoofing countermeasures to generalize effectively. This challenge is exacerbated when detectors encounter unseen deepfake attacks, often leading to significant performance degradation [1], [2]."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.074,
|
| 117 |
+
0.566,
|
| 118 |
+
0.493,
|
| 119 |
+
0.775
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "To enhance generalization in deepfake detectors, one key direction focuses on developing more robust classification models through improved architecture and learning strategies. Recent studies have utilized features extracted from self-supervised speech models such as Wav2vec [3], Whisper [4], and WavLM [5] as front-end inputs for deepfake detection. These models, trained on large-scale and diverse speech data, strengthen the detection process by providing reliable and domain-agnostic features [6]. Beyond improving feature extraction, researchers have also worked to improve the accuracy of back-end classifiers. Traditional binary classification methods often struggle with generalization, particularly when facing distribution mismatches. To address this, one-class learning approaches have been explored, focusing on creating a compact representation of bonafide speech while effectively pushing away spoofed speech, leading to a well-separated and more generalizable feature space [7], [8]."
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.074,
|
| 128 |
+
0.774,
|
| 129 |
+
0.493,
|
| 130 |
+
0.886
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "Another promising direction is through data augmentation, which enhances the robustness of the model by exposing it to a wider range of data variations during training. Traditional techniques such as speed perturbation, SpecAugment [9], and codec augmentation have been shown to improve performance. More recent methods, such as Rawboost [10], use signal processing techniques to boost or distort raw audio, leading to significant improvements. There are also augmentation strategies specifically designed for audio deepfake"
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.503,
|
| 139 |
+
0.228,
|
| 140 |
+
0.921,
|
| 141 |
+
0.312
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "detection. For instance, CpAug [11] employs a copy-paste strategy to generate diverse training samples, while targeted augmentation methods [12] create pseudo-fakes that challenge the decision boundary, thereby increasing the diversity of fake samples. Furthermore, research has shown that using neural vocoders to augment data can further enhance detection performance [13], [14]."
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"bbox": [
|
| 149 |
+
0.503,
|
| 150 |
+
0.312,
|
| 151 |
+
0.922,
|
| 152 |
+
0.534
|
| 153 |
+
],
|
| 154 |
+
"angle": 0,
|
| 155 |
+
"content": "Building on these two key directions, we propose a novel strategy of integrating latent space refinement and augmentation to further boost the generalization ability of deepfake detection, as shown in Fig. 1. First, to address the limitations of binary classification in capturing the diverse nature of spoofed audio, we introduce Latent Space Refinement (LSR). In binary classification, models typically assign a single prototype to each class, which oversimplifies the complex variability within spoofed audio. While one-class learning tries to address this by compactly representing the bonafide class and treating others as outliers, it often imposes a rigid boundary that fails to capture the diversity in spoofed data. In contrast, our LSR approach introduces multiple learnable prototypes specifically for the spoof class, refining the latent space to better model the intricate variations within spoofed data. This enhanced representation reduces intra-class variability and allows the model to generalize more effectively across different spoofing attacks."
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "text",
|
| 159 |
+
"bbox": [
|
| 160 |
+
0.503,
|
| 161 |
+
0.534,
|
| 162 |
+
0.922,
|
| 163 |
+
0.701
|
| 164 |
+
],
|
| 165 |
+
"angle": 0,
|
| 166 |
+
"content": "Second, to further enhance generalization, we apply Latent Space Augmentation (LSA) to diversify spoofed data representations, inspired by successful applications in computer vision [15], [16]. Unlike traditional data augmentation, which focuses on manipulating input data, LSA directly targets the latent space, allowing it to be independent of specific audio-level operations. By applying techniques such as additive noise, affine transformation, batch mixup, and linear interpolation and extrapolation, LSA generates a wide range of spoofed examples that expand the latent space. This expansion helps the model capture more diverse patterns within spoofed data, thereby improving its ability to generalize across different spoofing attacks and enhancing overall detection performance."
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"type": "text",
|
| 170 |
+
"bbox": [
|
| 171 |
+
0.503,
|
| 172 |
+
0.7,
|
| 173 |
+
0.922,
|
| 174 |
+
0.811
|
| 175 |
+
],
|
| 176 |
+
"angle": 0,
|
| 177 |
+
"content": "Our experimental results confirm the effectiveness of the proposed latent space refinement and augmentation. We evaluated the approach on four representative datasets: ASVspoof 2019 LA [17], ASVspoof 2021 LA and DF [1], and In-The-Wild [2]. The findings show that both LSR and LSA individually contribute to performance improvements, with the integrated system achieving competitive results, matching or surpassing the current state-of-the-art across these diverse benchmarks."
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"type": "title",
|
| 181 |
+
"bbox": [
|
| 182 |
+
0.667,
|
| 183 |
+
0.819,
|
| 184 |
+
0.76,
|
| 185 |
+
0.832
|
| 186 |
+
],
|
| 187 |
+
"angle": 0,
|
| 188 |
+
"content": "II. METHODS"
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"type": "title",
|
| 192 |
+
"bbox": [
|
| 193 |
+
0.504,
|
| 194 |
+
0.837,
|
| 195 |
+
0.681,
|
| 196 |
+
0.851
|
| 197 |
+
],
|
| 198 |
+
"angle": 0,
|
| 199 |
+
"content": "A. Latent Space Refinement"
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"type": "text",
|
| 203 |
+
"bbox": [
|
| 204 |
+
0.504,
|
| 205 |
+
0.853,
|
| 206 |
+
0.922,
|
| 207 |
+
0.911
|
| 208 |
+
],
|
| 209 |
+
"angle": 0,
|
| 210 |
+
"content": "To capture the inherent variations within the spoof class, we introduce multiple learnable prototypes that refine the latent distribution. Assume there are \\(K\\) prototypes for each class, denoted as \\(\\{c_1,\\dots ,c_K\\}\\). For the bonafide class, \\(K = 1\\), while for the spoof"
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"type": "page_footnote",
|
| 214 |
+
"bbox": [
|
| 215 |
+
0.089,
|
| 216 |
+
0.895,
|
| 217 |
+
0.222,
|
| 218 |
+
0.91
|
| 219 |
+
],
|
| 220 |
+
"angle": 0,
|
| 221 |
+
"content": "† Corresponding author"
|
| 222 |
+
}
|
| 223 |
+
],
|
| 224 |
+
[
|
| 225 |
+
{
|
| 226 |
+
"type": "image",
|
| 227 |
+
"bbox": [
|
| 228 |
+
0.078,
|
| 229 |
+
0.062,
|
| 230 |
+
0.441,
|
| 231 |
+
0.197
|
| 232 |
+
],
|
| 233 |
+
"angle": 0,
|
| 234 |
+
"content": null
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "image",
|
| 238 |
+
"bbox": [
|
| 239 |
+
0.447,
|
| 240 |
+
0.062,
|
| 241 |
+
0.621,
|
| 242 |
+
0.197
|
| 243 |
+
],
|
| 244 |
+
"angle": 0,
|
| 245 |
+
"content": null
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "image",
|
| 249 |
+
"bbox": [
|
| 250 |
+
0.627,
|
| 251 |
+
0.062,
|
| 252 |
+
0.798,
|
| 253 |
+
0.197
|
| 254 |
+
],
|
| 255 |
+
"angle": 0,
|
| 256 |
+
"content": null
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "image",
|
| 260 |
+
"bbox": [
|
| 261 |
+
0.808,
|
| 262 |
+
0.062,
|
| 263 |
+
0.916,
|
| 264 |
+
0.08
|
| 265 |
+
],
|
| 266 |
+
"angle": 0,
|
| 267 |
+
"content": null
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "image",
|
| 271 |
+
"bbox": [
|
| 272 |
+
0.809,
|
| 273 |
+
0.084,
|
| 274 |
+
0.904,
|
| 275 |
+
0.098
|
| 276 |
+
],
|
| 277 |
+
"angle": 0,
|
| 278 |
+
"content": null
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "image",
|
| 282 |
+
"bbox": [
|
| 283 |
+
0.81,
|
| 284 |
+
0.101,
|
| 285 |
+
0.908,
|
| 286 |
+
0.114
|
| 287 |
+
],
|
| 288 |
+
"angle": 0,
|
| 289 |
+
"content": null
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "image",
|
| 293 |
+
"bbox": [
|
| 294 |
+
0.81,
|
| 295 |
+
0.119,
|
| 296 |
+
0.894,
|
| 297 |
+
0.132
|
| 298 |
+
],
|
| 299 |
+
"angle": 0,
|
| 300 |
+
"content": null
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "image",
|
| 304 |
+
"bbox": [
|
| 305 |
+
0.811,
|
| 306 |
+
0.137,
|
| 307 |
+
0.901,
|
| 308 |
+
0.158
|
| 309 |
+
],
|
| 310 |
+
"angle": 0,
|
| 311 |
+
"content": null
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "image",
|
| 315 |
+
"bbox": [
|
| 316 |
+
0.811,
|
| 317 |
+
0.163,
|
| 318 |
+
0.897,
|
| 319 |
+
0.176
|
| 320 |
+
],
|
| 321 |
+
"angle": 0,
|
| 322 |
+
"content": null
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "image",
|
| 326 |
+
"bbox": [
|
| 327 |
+
0.811,
|
| 328 |
+
0.181,
|
| 329 |
+
0.906,
|
| 330 |
+
0.193
|
| 331 |
+
],
|
| 332 |
+
"angle": 0,
|
| 333 |
+
"content": null
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "image_caption",
|
| 337 |
+
"bbox": [
|
| 338 |
+
0.105,
|
| 339 |
+
0.207,
|
| 340 |
+
0.89,
|
| 341 |
+
0.221
|
| 342 |
+
],
|
| 343 |
+
"angle": 0,
|
| 344 |
+
"content": "Fig. 1. The pipeline of the proposed method, illustrating the process of Latent Space Refinement (LSR) and Latent Space Augmentation (LSA)."
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"type": "text",
|
| 348 |
+
"bbox": [
|
| 349 |
+
0.074,
|
| 350 |
+
0.244,
|
| 351 |
+
0.492,
|
| 352 |
+
0.3
|
| 353 |
+
],
|
| 354 |
+
"angle": 0,
|
| 355 |
+
"content": "class, \\( K \\) is a hyperparameter chosen based on the complexity of the data. To determine the probability of a sample \\( x \\) belonging to a particular class, we compute the maximum cosine similarity between its embedding \\( z \\) and each of the class prototypes:"
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"type": "equation",
|
| 359 |
+
"bbox": [
|
| 360 |
+
0.177,
|
| 361 |
+
0.306,
|
| 362 |
+
0.492,
|
| 363 |
+
0.345
|
| 364 |
+
],
|
| 365 |
+
"angle": 0,
|
| 366 |
+
"content": "\\[\n\\cos \\theta = \\sum_ {i = 1} ^ {K} \\frac {e ^ {\\langle c _ {i} , z \\rangle \\cdot \\gamma}}{\\sum_ {j = 1} ^ {K} e ^ {\\langle c _ {i} , z \\rangle \\cdot \\gamma}} \\langle c _ {i}, z \\rangle \\tag {1}\n\\]"
|
| 367 |
+
},
|
| 368 |
+
{
|
| 369 |
+
"type": "text",
|
| 370 |
+
"bbox": [
|
| 371 |
+
0.075,
|
| 372 |
+
0.348,
|
| 373 |
+
0.491,
|
| 374 |
+
0.405
|
| 375 |
+
],
|
| 376 |
+
"angle": 0,
|
| 377 |
+
"content": "where \\(\\langle x,y\\rangle = \\frac{x\\cdot y}{\\|x\\|\\|y\\|}\\) represents the cosine similarity between two vectors, and \\(\\gamma\\) is the scaling factor, set to 10. We smooth the maximum operator using a softmax-like operation to prevent sensitivity between multiple prototypes."
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"type": "text",
|
| 381 |
+
"bbox": [
|
| 382 |
+
0.074,
|
| 383 |
+
0.405,
|
| 384 |
+
0.492,
|
| 385 |
+
0.446
|
| 386 |
+
],
|
| 387 |
+
"angle": 0,
|
| 388 |
+
"content": "To guide the learning of these prototypes, we design a prototype-based classification loss, inspired by the additive angular margin loss [18]:"
|
| 389 |
+
},
|
| 390 |
+
{
|
| 391 |
+
"type": "equation",
|
| 392 |
+
"bbox": [
|
| 393 |
+
0.137,
|
| 394 |
+
0.45,
|
| 395 |
+
0.491,
|
| 396 |
+
0.482
|
| 397 |
+
],
|
| 398 |
+
"angle": 0,
|
| 399 |
+
"content": "\\[\n\\mathcal {L} _ {\\text {p r o t o}} (z) = - \\log \\frac {e ^ {s \\left(\\cos \\left(\\theta_ {y} + m\\right)\\right)}}{e ^ {s \\left(\\cos \\left(\\theta_ {y} + m\\right)\\right)} + e ^ {s \\left(\\cos \\theta_ {1 - y}\\right)}} \\tag {2}\n\\]"
|
| 400 |
+
},
|
| 401 |
+
{
|
| 402 |
+
"type": "text",
|
| 403 |
+
"bbox": [
|
| 404 |
+
0.075,
|
| 405 |
+
0.487,
|
| 406 |
+
0.492,
|
| 407 |
+
0.557
|
| 408 |
+
],
|
| 409 |
+
"angle": 0,
|
| 410 |
+
"content": "Here, \\( y \\in \\{0,1\\} \\) is the label of sample \\( x \\), \\( m \\) is an angular margin penalty, and \\( s \\) is a scaling factor. This loss function encourages the model to push the embeddings of genuine samples closer to the bonafide prototype and spoofed samples closer to their corresponding prototypes."
|
| 411 |
+
},
|
| 412 |
+
{
|
| 413 |
+
"type": "text",
|
| 414 |
+
"bbox": [
|
| 415 |
+
0.075,
|
| 416 |
+
0.557,
|
| 417 |
+
0.492,
|
| 418 |
+
0.6
|
| 419 |
+
],
|
| 420 |
+
"angle": 0,
|
| 421 |
+
"content": "While prototypes are learned during the training process, there's a risk that they may collapse to a single center. To mitigate this, we introduce an intra-class regularization for the spoof prototypes \\(\\{c^s\\}\\):"
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"type": "equation",
|
| 425 |
+
"bbox": [
|
| 426 |
+
0.141,
|
| 427 |
+
0.604,
|
| 428 |
+
0.491,
|
| 429 |
+
0.643
|
| 430 |
+
],
|
| 431 |
+
"angle": 0,
|
| 432 |
+
"content": "\\[\n\\mathcal {L} _ {\\text {i n t r a}} \\left(\\left\\{c ^ {s} \\right\\}\\right) = \\frac {2}{K (K - 1)} \\sum_ {i = 1} ^ {K - 1} \\sum_ {j = i + 1} ^ {K} \\left\\langle c _ {i} ^ {s}, c _ {j} ^ {s} \\right\\rangle \\tag {3}\n\\]"
|
| 433 |
+
},
|
| 434 |
+
{
|
| 435 |
+
"type": "text",
|
| 436 |
+
"bbox": [
|
| 437 |
+
0.075,
|
| 438 |
+
0.648,
|
| 439 |
+
0.49,
|
| 440 |
+
0.69
|
| 441 |
+
],
|
| 442 |
+
"angle": 0,
|
| 443 |
+
"content": "This regularization term calculates the mean similarity between the spoof prototypes, encouraging them to spread out in the latent space, thereby preventing prototype collapse."
|
| 444 |
+
},
|
| 445 |
+
{
|
| 446 |
+
"type": "text",
|
| 447 |
+
"bbox": [
|
| 448 |
+
0.075,
|
| 449 |
+
0.691,
|
| 450 |
+
0.492,
|
| 451 |
+
0.747
|
| 452 |
+
],
|
| 453 |
+
"angle": 0,
|
| 454 |
+
"content": "To further enhance the distinction between spoof and bonafide prototypes, we introduce an inter-class regularization term. This term calculates the smoothed maximum cosine similarity between the spoof prototypes \\(\\{c^s\\}\\) and the single bonafide prototype \\(c^b\\):"
|
| 455 |
+
},
|
| 456 |
+
{
|
| 457 |
+
"type": "equation",
|
| 458 |
+
"bbox": [
|
| 459 |
+
0.125,
|
| 460 |
+
0.752,
|
| 461 |
+
0.49,
|
| 462 |
+
0.79
|
| 463 |
+
],
|
| 464 |
+
"angle": 0,
|
| 465 |
+
"content": "\\[\n\\mathcal {L} _ {i n t e r} \\left(\\left\\{c ^ {s} \\right\\}, c ^ {b}\\right) = \\delta + \\sum_ {i = 1} ^ {K} \\frac {e ^ {\\langle c _ {i} ^ {s} , c ^ {b} \\rangle \\cdot \\gamma}}{\\sum_ {j = 1} ^ {K} e ^ {\\langle c _ {i} ^ {s} , c ^ {b} \\rangle \\cdot \\gamma}} \\left\\langle c _ {i} ^ {s}, c ^ {b} \\right\\rangle \\tag {4}\n\\]"
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"type": "text",
|
| 469 |
+
"bbox": [
|
| 470 |
+
0.075,
|
| 471 |
+
0.795,
|
| 472 |
+
0.49,
|
| 473 |
+
0.822
|
| 474 |
+
],
|
| 475 |
+
"angle": 0,
|
| 476 |
+
"content": "here \\(\\delta\\) is a regularization coefficient that prevents the loss from becoming negative."
|
| 477 |
+
},
|
| 478 |
+
{
|
| 479 |
+
"type": "text",
|
| 480 |
+
"bbox": [
|
| 481 |
+
0.089,
|
| 482 |
+
0.823,
|
| 483 |
+
0.49,
|
| 484 |
+
0.837
|
| 485 |
+
],
|
| 486 |
+
"angle": 0,
|
| 487 |
+
"content": "Hence, the overall objective function for LSR is defined as follows:"
|
| 488 |
+
},
|
| 489 |
+
{
|
| 490 |
+
"type": "equation",
|
| 491 |
+
"bbox": [
|
| 492 |
+
0.174,
|
| 493 |
+
0.845,
|
| 494 |
+
0.49,
|
| 495 |
+
0.86
|
| 496 |
+
],
|
| 497 |
+
"angle": 0,
|
| 498 |
+
"content": "\\[\n\\mathcal {L} _ {L S R} = \\mathcal {L} _ {\\text {p r o t o}} + \\mathcal {L} _ {\\text {i n t r a}} + \\mathcal {L} _ {\\text {i n t e r}} \\tag {5}\n\\]"
|
| 499 |
+
},
|
| 500 |
+
{
|
| 501 |
+
"type": "text",
|
| 502 |
+
"bbox": [
|
| 503 |
+
0.075,
|
| 504 |
+
0.867,
|
| 505 |
+
0.49,
|
| 506 |
+
0.909
|
| 507 |
+
],
|
| 508 |
+
"angle": 0,
|
| 509 |
+
"content": "In addition, the LSR loss can be incorporated alongside a binary classification loss, such as Weighted Cross Entropy (WCE), to refine the latent distribution and reduce intra-class variance."
|
| 510 |
+
},
|
| 511 |
+
{
|
| 512 |
+
"type": "title",
|
| 513 |
+
"bbox": [
|
| 514 |
+
0.505,
|
| 515 |
+
0.245,
|
| 516 |
+
0.696,
|
| 517 |
+
0.259
|
| 518 |
+
],
|
| 519 |
+
"angle": 0,
|
| 520 |
+
"content": "B. Latent Space Augmentation"
|
| 521 |
+
},
|
| 522 |
+
{
|
| 523 |
+
"type": "text",
|
| 524 |
+
"bbox": [
|
| 525 |
+
0.503,
|
| 526 |
+
0.263,
|
| 527 |
+
0.922,
|
| 528 |
+
0.375
|
| 529 |
+
],
|
| 530 |
+
"angle": 0,
|
| 531 |
+
"content": "While multi-prototypical refinement enhances the representation of the spoofed class, further generalization can be achieved by augmenting the diversity of the training data. Instead of solely augmenting raw input data, we apply augmentation directly in the latent space, where lower dimensionality allows for more targeted variations. By focusing these augmentations on spoofed latent features, we generate diverse spoofing variations. Notably, these augmentations are not applied to bonafide latent features, preserving their authenticity."
|
| 532 |
+
},
|
| 533 |
+
{
|
| 534 |
+
"type": "text",
|
| 535 |
+
"bbox": [
|
| 536 |
+
0.503,
|
| 537 |
+
0.375,
|
| 538 |
+
0.921,
|
| 539 |
+
0.43
|
| 540 |
+
],
|
| 541 |
+
"angle": 0,
|
| 542 |
+
"content": "Given \\( z \\) a batch of embeddings, we denote the spoof embeddings in this batch as \\( z^s \\) and the bonafide embeddings as \\( z^b \\). To create diverse variations of spoof embeddings, we design five latent augmentation patterns for \\( z^s \\):"
|
| 543 |
+
},
|
| 544 |
+
{
|
| 545 |
+
"type": "text",
|
| 546 |
+
"bbox": [
|
| 547 |
+
0.504,
|
| 548 |
+
0.431,
|
| 549 |
+
0.921,
|
| 550 |
+
0.472
|
| 551 |
+
],
|
| 552 |
+
"angle": 0,
|
| 553 |
+
"content": "Additive Noise (AN). A simple yet efficient idea is to add random perturbation to latent features. Here we apply the additive noise drawn from a Gaussian distribution as follows:"
|
| 554 |
+
},
|
| 555 |
+
{
|
| 556 |
+
"type": "equation",
|
| 557 |
+
"bbox": [
|
| 558 |
+
0.617,
|
| 559 |
+
0.481,
|
| 560 |
+
0.921,
|
| 561 |
+
0.498
|
| 562 |
+
],
|
| 563 |
+
"angle": 0,
|
| 564 |
+
"content": "\\[\n\\hat {z} ^ {s} = z ^ {s} + \\beta \\cdot X, X \\sim \\mathcal {N} (0, \\mathbf {I}) \\tag {6}\n\\]"
|
| 565 |
+
},
|
| 566 |
+
{
|
| 567 |
+
"type": "text",
|
| 568 |
+
"bbox": [
|
| 569 |
+
0.504,
|
| 570 |
+
0.506,
|
| 571 |
+
0.92,
|
| 572 |
+
0.534
|
| 573 |
+
],
|
| 574 |
+
"angle": 0,
|
| 575 |
+
"content": "where \\(\\mathcal{N}(0,\\mathbf{I})\\) is the standard normal distribution, \\(\\mathbf{I}\\) is the identity matrix, and \\(\\beta\\) is a scaling factor sampled from \\(\\mathcal{N}(0,1)\\)."
|
| 576 |
+
},
|
| 577 |
+
{
|
| 578 |
+
"type": "text",
|
| 579 |
+
"bbox": [
|
| 580 |
+
0.504,
|
| 581 |
+
0.535,
|
| 582 |
+
0.921,
|
| 583 |
+
0.562
|
| 584 |
+
],
|
| 585 |
+
"angle": 0,
|
| 586 |
+
"content": "Affine Transformation (AT). This common transformation for 1D vectors involves scaling and translating the latent features:"
|
| 587 |
+
},
|
| 588 |
+
{
|
| 589 |
+
"type": "equation",
|
| 590 |
+
"bbox": [
|
| 591 |
+
0.665,
|
| 592 |
+
0.571,
|
| 593 |
+
0.921,
|
| 594 |
+
0.587
|
| 595 |
+
],
|
| 596 |
+
"angle": 0,
|
| 597 |
+
"content": "\\[\n\\hat {z} ^ {s} = a \\cdot z ^ {s} + b \\tag {7}\n\\]"
|
| 598 |
+
},
|
| 599 |
+
{
|
| 600 |
+
"type": "text",
|
| 601 |
+
"bbox": [
|
| 602 |
+
0.504,
|
| 603 |
+
0.596,
|
| 604 |
+
0.842,
|
| 605 |
+
0.61
|
| 606 |
+
],
|
| 607 |
+
"angle": 0,
|
| 608 |
+
"content": "where \\(a\\) is sampled from \\(\\mathcal{U}(0.9,1.1)\\) and \\(b\\) is set to 0."
|
| 609 |
+
},
|
| 610 |
+
{
|
| 611 |
+
"type": "text",
|
| 612 |
+
"bbox": [
|
| 613 |
+
0.504,
|
| 614 |
+
0.61,
|
| 615 |
+
0.921,
|
| 616 |
+
0.652
|
| 617 |
+
],
|
| 618 |
+
"angle": 0,
|
| 619 |
+
"content": "Batch Mixup (BM). Inspired by data mixup strategies [19], we create new latent features by blending pairs of spoof features in the batch, creating smoother transitions and intermediate variations:"
|
| 620 |
+
},
|
| 621 |
+
{
|
| 622 |
+
"type": "equation",
|
| 623 |
+
"bbox": [
|
| 624 |
+
0.623,
|
| 625 |
+
0.66,
|
| 626 |
+
0.921,
|
| 627 |
+
0.677
|
| 628 |
+
],
|
| 629 |
+
"angle": 0,
|
| 630 |
+
"content": "\\[\nz _ {i} ^ {s} = \\alpha \\cdot z _ {i} ^ {s} + (1 - \\alpha) \\cdot z _ {\\pi (i)} ^ {s} \\tag {8}\n\\]"
|
| 631 |
+
},
|
| 632 |
+
{
|
| 633 |
+
"type": "text",
|
| 634 |
+
"bbox": [
|
| 635 |
+
0.503,
|
| 636 |
+
0.685,
|
| 637 |
+
0.921,
|
| 638 |
+
0.726
|
| 639 |
+
],
|
| 640 |
+
"angle": 0,
|
| 641 |
+
"content": "where \\(i\\) indexes the batch, \\(\\pi\\) denotes a random permutation of the batch indices and \\(\\alpha\\) is a mixup coefficient sampled from Beta(0.5, 0.5)."
|
| 642 |
+
},
|
| 643 |
+
{
|
| 644 |
+
"type": "text",
|
| 645 |
+
"bbox": [
|
| 646 |
+
0.504,
|
| 647 |
+
0.727,
|
| 648 |
+
0.92,
|
| 649 |
+
0.754
|
| 650 |
+
],
|
| 651 |
+
"angle": 0,
|
| 652 |
+
"content": "The following two techniques rely on the prototypes learned in latent space refinement:"
|
| 653 |
+
},
|
| 654 |
+
{
|
| 655 |
+
"type": "text",
|
| 656 |
+
"bbox": [
|
| 657 |
+
0.503,
|
| 658 |
+
0.755,
|
| 659 |
+
0.921,
|
| 660 |
+
0.84
|
| 661 |
+
],
|
| 662 |
+
"angle": 0,
|
| 663 |
+
"content": "Linear Interpolation (LI). To create more challenging examples targeting the decision boundary, we perform linear interpolation on spoof embeddings towards bonafide prototype \\( c^b \\). Since the prototypes in LSR the prototypes are normalized to lie on a unit hypersphere due to the use of cosine similarity, the norm of the vectors is incorporated to adjust for the transition to Euclidean space:"
|
| 664 |
+
},
|
| 665 |
+
{
|
| 666 |
+
"type": "equation",
|
| 667 |
+
"bbox": [
|
| 668 |
+
0.62,
|
| 669 |
+
0.845,
|
| 670 |
+
0.921,
|
| 671 |
+
0.875
|
| 672 |
+
],
|
| 673 |
+
"angle": 0,
|
| 674 |
+
"content": "\\[\n\\hat {z} ^ {s} = z ^ {s} + \\lambda_ {i} \\cdot \\left(\\frac {\\| z ^ {s} \\|}{\\| c ^ {b} \\|} c ^ {b} - z ^ {s}\\right) \\tag {9}\n\\]"
|
| 675 |
+
},
|
| 676 |
+
{
|
| 677 |
+
"type": "text",
|
| 678 |
+
"bbox": [
|
| 679 |
+
0.504,
|
| 680 |
+
0.881,
|
| 681 |
+
0.921,
|
| 682 |
+
0.91
|
| 683 |
+
],
|
| 684 |
+
"angle": 0,
|
| 685 |
+
"content": "where \\(\\lambda_{i}\\) is an interpolation coefficient sampled from \\(\\mathcal{U}(0,0.1)\\), and the norm term \\(\\| z^s\\| /\\| c^b\\|\\) aligns the scales of the vectors."
|
| 686 |
+
}
|
| 687 |
+
],
|
| 688 |
+
[
|
| 689 |
+
{
|
| 690 |
+
"type": "text",
|
| 691 |
+
"bbox": [
|
| 692 |
+
0.075,
|
| 693 |
+
0.062,
|
| 694 |
+
0.493,
|
| 695 |
+
0.101
|
| 696 |
+
],
|
| 697 |
+
"angle": 0,
|
| 698 |
+
"content": "Linear Extrapolation (LE). In addition to interpolation, we also perform extrapolation from the nearest spoof prototype to create new features:"
|
| 699 |
+
},
|
| 700 |
+
{
|
| 701 |
+
"type": "equation",
|
| 702 |
+
"bbox": [
|
| 703 |
+
0.188,
|
| 704 |
+
0.098,
|
| 705 |
+
0.49,
|
| 706 |
+
0.128
|
| 707 |
+
],
|
| 708 |
+
"angle": 0,
|
| 709 |
+
"content": "\\[\n\\hat {z} ^ {s} = z ^ {s} + \\lambda_ {e} \\cdot \\left(z ^ {s} - \\frac {\\| z ^ {s} \\|}{\\| C _ {n} ^ {s} \\|} c _ {n} ^ {s}\\right) \\tag {10}\n\\]"
|
| 710 |
+
},
|
| 711 |
+
{
|
| 712 |
+
"type": "text",
|
| 713 |
+
"bbox": [
|
| 714 |
+
0.075,
|
| 715 |
+
0.132,
|
| 716 |
+
0.492,
|
| 717 |
+
0.201
|
| 718 |
+
],
|
| 719 |
+
"angle": 0,
|
| 720 |
+
"content": "where \\( c_{n}^{s} \\) corresponds the nearest spoof prototype of \\( z^{s} \\) and \\( \\lambda_{e} \\) is an extrapolation coefficient sampled from \\( \\mathcal{U}(0,0.1) \\). Similarly, we use the norm \\( \\| z^{s}\\| /\\| c_{n}^{s}\\| \\) to adjust for the Euclidean representation. This method extends the spoof features further away from the nearest prototype, generating more diverse variations."
|
| 721 |
+
},
|
| 722 |
+
{
|
| 723 |
+
"type": "text",
|
| 724 |
+
"bbox": [
|
| 725 |
+
0.075,
|
| 726 |
+
0.202,
|
| 727 |
+
0.492,
|
| 728 |
+
0.258
|
| 729 |
+
],
|
| 730 |
+
"angle": 0,
|
| 731 |
+
"content": "Finally, the augmented latent features \\(\\hat{z}^s\\) are concatenated with the original features \\(z\\), forming \\(z' = [z \\parallel \\hat{z}^s]\\). These enhanced features are then used for loss calculation during subsequent training, allowing the model to learn from a more varied set of spoofed data."
|
| 732 |
+
},
|
| 733 |
+
{
|
| 734 |
+
"type": "title",
|
| 735 |
+
"bbox": [
|
| 736 |
+
0.221,
|
| 737 |
+
0.265,
|
| 738 |
+
0.347,
|
| 739 |
+
0.277
|
| 740 |
+
],
|
| 741 |
+
"angle": 0,
|
| 742 |
+
"content": "III. EXPERIMENTS"
|
| 743 |
+
},
|
| 744 |
+
{
|
| 745 |
+
"type": "title",
|
| 746 |
+
"bbox": [
|
| 747 |
+
0.075,
|
| 748 |
+
0.283,
|
| 749 |
+
0.232,
|
| 750 |
+
0.296
|
| 751 |
+
],
|
| 752 |
+
"angle": 0,
|
| 753 |
+
"content": "A. Experimental Settings"
|
| 754 |
+
},
|
| 755 |
+
{
|
| 756 |
+
"type": "text",
|
| 757 |
+
"bbox": [
|
| 758 |
+
0.074,
|
| 759 |
+
0.3,
|
| 760 |
+
0.492,
|
| 761 |
+
0.494
|
| 762 |
+
],
|
| 763 |
+
"angle": 0,
|
| 764 |
+
"content": "Datasets and metrics. We train all systems using the ASVspoof 2019 LA training set [17], which includes approximately \\(25\\mathrm{k}\\) utterances and 6 spoofing attacks involving VC or TTS. To evaluate generalization performance, we test on multiple datasets: the ASVspoof 2019 LA evaluation set (19LA) [17], containing \\(71\\mathrm{k}\\) utterances with 13 different spoofing attacks; the ASVspoof 2021 LA set (21LA) [1], comprising about \\(181\\mathrm{k}\\) utterances with algorithms similar to 19LA but also reflecting telephony systems' encoding and transmission effects; the ASVspoof 2021 DF set (21DF) [1], with over \\(600\\mathrm{k}\\) utterances and more than 100 spoofing attacks processed with various lossy codecs; and the In-The-Wild dataset (ITW) [2], which features approximately \\(32\\mathrm{k}\\) utterances collected under real-world, non-controlled conditions, making it a more challenging dataset. Performance is measured using Equal Error Rate (EER)."
|
| 765 |
+
},
|
| 766 |
+
{
|
| 767 |
+
"type": "text",
|
| 768 |
+
"bbox": [
|
| 769 |
+
0.074,
|
| 770 |
+
0.495,
|
| 771 |
+
0.493,
|
| 772 |
+
0.662
|
| 773 |
+
],
|
| 774 |
+
"angle": 0,
|
| 775 |
+
"content": "Training details. We adopt the model architecture from [6], utilizing Wav2Vec2.0 XLSR [3] as the frontend feature extractor and AASIST [20] as the backend classifier. Input speech is randomly chunked into 4-second segments, with Rawboost [10] applied as basic augmentation and codec augmentation as extra augmentation. The learning rate is set to 1e-6 for the backbone model and 1e-3 for the prototypes in LSR. For the LSR loss, we set the scaling factor \\( s = 32 \\), angular margin \\( m = 0.2 \\), and regularization coefficient \\( \\delta = 0.2 \\). For the WCE loss, the weights for bonafide and spoof classes are set to 0.9 and 0.1, respectively. For LSA, we either fix one type of augmentation during training or randomly select from all augmentation types (denoted as All)."
|
| 776 |
+
},
|
| 777 |
+
{
|
| 778 |
+
"type": "title",
|
| 779 |
+
"bbox": [
|
| 780 |
+
0.076,
|
| 781 |
+
0.669,
|
| 782 |
+
0.305,
|
| 783 |
+
0.684
|
| 784 |
+
],
|
| 785 |
+
"angle": 0,
|
| 786 |
+
"content": "B. Overall Performance Comparison"
|
| 787 |
+
},
|
| 788 |
+
{
|
| 789 |
+
"type": "text",
|
| 790 |
+
"bbox": [
|
| 791 |
+
0.074,
|
| 792 |
+
0.687,
|
| 793 |
+
0.493,
|
| 794 |
+
0.911
|
| 795 |
+
],
|
| 796 |
+
"angle": 0,
|
| 797 |
+
"content": "To evaluate the overall performance of the proposed methods, we tested the system on four datasets and compared the results with those from the literature that used the same training dataset, as shown in Table I. Across all datasets, LSR+LSA consistently outperforms LSR alone and often ranks among the top performers, highlighting the effectiveness of integrating latent space refinement with latent space augmentation. To further enhance the results, we applied additional data augmentation, which led to EERs of \\(0.12\\%\\) on 19LA, \\(1.05\\%\\) on 21LA, \\(1.86\\%\\) on 21DF, and \\(5.54\\%\\) on ITW. This places our method on par with, or ahead of, the current state-of-the-art methods. Notably, our method focuses on refining and augmenting the latent space, which contrasts with recent approaches that focus on modifying the model architecture [28], [29]. These two strategies—latent space manipulation and architectural improvements—target different aspects of the problem and could potentially be combined for even better results. This highlights the flexibility and advantage of our method,"
|
| 798 |
+
},
|
| 799 |
+
{
|
| 800 |
+
"type": "table_caption",
|
| 801 |
+
"bbox": [
|
| 802 |
+
0.515,
|
| 803 |
+
0.062,
|
| 804 |
+
0.912,
|
| 805 |
+
0.121
|
| 806 |
+
],
|
| 807 |
+
"angle": 0,
|
| 808 |
+
"content": "TABLE I OVERALL PERFORMANCE COMPARISON IN EER(%) ACROSS MULTIPLE DATASETS. ALL SYSTEMS ARE TRAINED ON THE ASVSPOOF2019 LA TRAINING SET. BEST RESULTS ARE HIGHLIGHTED IN BOLD, AND SECOND-BEST RESULTS ARE UNDERLINED."
|
| 809 |
+
},
|
| 810 |
+
{
|
| 811 |
+
"type": "table",
|
| 812 |
+
"bbox": [
|
| 813 |
+
0.508,
|
| 814 |
+
0.13,
|
| 815 |
+
0.915,
|
| 816 |
+
0.327
|
| 817 |
+
],
|
| 818 |
+
"angle": 0,
|
| 819 |
+
"content": "<table><tr><td>System</td><td>19LA</td><td>21LA</td><td>21DF</td><td>ITW</td></tr><tr><td>WavLM+AttM [21]</td><td>0.65</td><td>3.50</td><td>3.19</td><td>-</td></tr><tr><td>Wav2Vec+LogReg [22]</td><td>0.50</td><td>-</td><td>-</td><td>7.20</td></tr><tr><td>WavLM+MFA [23]</td><td>0.42</td><td>5.08</td><td>2.56</td><td>-</td></tr><tr><td>Wav2Vec+VIB [24]</td><td>0.40</td><td>4.92</td><td>-</td><td>-</td></tr><tr><td>OCKD [25]</td><td>0.39</td><td>0.90</td><td>2.27</td><td>7.68</td></tr><tr><td>GFL-FAD [26]</td><td>0.25</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Wav2Vec+Linear [13]</td><td>0.22</td><td>3.63</td><td>3.65</td><td>16.17</td></tr><tr><td>OC+ACS [8]</td><td>0.17</td><td>1.30</td><td>2.19</td><td>-</td></tr><tr><td>Wav2Vec+AASIST [6]</td><td>-</td><td>0.82</td><td>2.85</td><td>-</td></tr><tr><td>Wav2Vec+AASIST2 [27]</td><td>0.15</td><td>1.61</td><td>2.77</td><td>-</td></tr><tr><td>Wav2vec+Conformer+TCM [28]</td><td>-</td><td>1.03</td><td>2.06</td><td>-</td></tr><tr><td>Wav2vec+STJ-GAT+BLDL* [29]</td><td>0.06</td><td>0.56</td><td>1.89</td><td>-</td></tr><tr><td>LSR</td><td>0.19</td><td>2.35</td><td>3.01</td><td>6.58</td></tr><tr><td>LSR+LSA</td><td>0.15</td><td>1.19</td><td>2.43</td><td>5.92</td></tr><tr><td>LSR+LSA*</td><td>0.12</td><td>1.05</td><td>1.86</td><td>5.54</td></tr></table>"
|
| 820 |
+
},
|
| 821 |
+
{
|
| 822 |
+
"type": "table_footnote",
|
| 823 |
+
"bbox": [
|
| 824 |
+
0.509,
|
| 825 |
+
0.328,
|
| 826 |
+
0.684,
|
| 827 |
+
0.341
|
| 828 |
+
],
|
| 829 |
+
"angle": 0,
|
| 830 |
+
"content": "\\* with extra data augmentation."
|
| 831 |
+
},
|
| 832 |
+
{
|
| 833 |
+
"type": "text",
|
| 834 |
+
"bbox": [
|
| 835 |
+
0.503,
|
| 836 |
+
0.362,
|
| 837 |
+
0.922,
|
| 838 |
+
0.445
|
| 839 |
+
],
|
| 840 |
+
"angle": 0,
|
| 841 |
+
"content": "as it enhances generalization without needing to alter the underlying model architecture. In summary, the proposed LSR+LSA method consistently delivers strong results, matching or outperforming state-of-the-art performance across various datasets, demonstrating its robustness and effectiveness in generalizing across diverse deepfake detection tasks."
|
| 842 |
+
},
|
| 843 |
+
{
|
| 844 |
+
"type": "title",
|
| 845 |
+
"bbox": [
|
| 846 |
+
0.505,
|
| 847 |
+
0.452,
|
| 848 |
+
0.795,
|
| 849 |
+
0.466
|
| 850 |
+
],
|
| 851 |
+
"angle": 0,
|
| 852 |
+
"content": "C. Ablation Study on Latent Space Refinement"
|
| 853 |
+
},
|
| 854 |
+
{
|
| 855 |
+
"type": "table_caption",
|
| 856 |
+
"bbox": [
|
| 857 |
+
0.506,
|
| 858 |
+
0.472,
|
| 859 |
+
0.92,
|
| 860 |
+
0.519
|
| 861 |
+
],
|
| 862 |
+
"angle": 0,
|
| 863 |
+
"content": "TABLE II EER \\((\\%)\\) ACROSS DATASETS FOR SYSTEMS TRAINED WITH DIFFERENT LOSS CONFIGURATIONS. BEST RESULTS ARE IN BOLD, AND SECOND-BEST RESULTS ARE UNDERlined."
|
| 864 |
+
},
|
| 865 |
+
{
|
| 866 |
+
"type": "table",
|
| 867 |
+
"bbox": [
|
| 868 |
+
0.52,
|
| 869 |
+
0.528,
|
| 870 |
+
0.909,
|
| 871 |
+
0.635
|
| 872 |
+
],
|
| 873 |
+
"angle": 0,
|
| 874 |
+
"content": "<table><tr><td>Loss Configuration</td><td>19LA</td><td>21LA</td><td>21DF</td><td>ITW</td><td>Avg.</td></tr><tr><td>WCE</td><td>0.30</td><td>2.64</td><td>4.74</td><td>8.09</td><td>3.94</td></tr><tr><td>OC Softmax</td><td>0.31</td><td>1.60</td><td>4.06</td><td>7.86</td><td>3.46</td></tr><tr><td>LSR</td><td>0.23</td><td>1.55</td><td>3.22</td><td>7.45</td><td>3.11</td></tr><tr><td>w/o Linter</td><td>0.23</td><td>1.84</td><td>3.30</td><td>7.84</td><td>3.30</td></tr><tr><td>w/o Lintra</td><td>0.27</td><td>2.62</td><td>4.02</td><td>7.75</td><td>3.67</td></tr><tr><td>w/o Lintra, Linter</td><td>0.32</td><td>2.86</td><td>4.11</td><td>8.05</td><td>3.84</td></tr><tr><td>WCE+LSR</td><td>0.19</td><td>2.35</td><td>3.01</td><td>6.58</td><td>3.03</td></tr></table>"
|
| 875 |
+
},
|
| 876 |
+
{
|
| 877 |
+
"type": "text",
|
| 878 |
+
"bbox": [
|
| 879 |
+
0.503,
|
| 880 |
+
0.645,
|
| 881 |
+
0.922,
|
| 882 |
+
0.88
|
| 883 |
+
],
|
| 884 |
+
"angle": 0,
|
| 885 |
+
"content": "Table II presents the performance of various loss configurations during training. The baseline configuration uses weighted cross entropy (WCE) loss for binary classification, with OC Softmax [7] included for comparison. Incorporating Latent Space Refinement (LSR) improves performance over both WCE and OC Softmax. We further examine the effects of LSR's loss terms. Removing inter-class regularization results in minimal degradation, indicating that the core prototype-based loss sufficiently handles prototype separation. However, removing intra-class regularization significantly reduces performance, as this term is crucial for maintaining prototype diversity within the spoof class and preventing collapse. When both regularizations are removed, performance drops to baseline levels. Additionally, combining LSR with WCE yields the best overall results. While WCE provides a solid foundation for binary classification, LSR refines the latent space to better capture variations in spoofed data. This combination leads to improved generalization across the datasets."
|
| 886 |
+
},
|
| 887 |
+
{
|
| 888 |
+
"type": "text",
|
| 889 |
+
"bbox": [
|
| 890 |
+
0.504,
|
| 891 |
+
0.881,
|
| 892 |
+
0.922,
|
| 893 |
+
0.911
|
| 894 |
+
],
|
| 895 |
+
"angle": 0,
|
| 896 |
+
"content": "Meanwhile, we evaluated the impact of the number of prototypes on performance, as shown in Fig. 3. Increasing the prototypes"
|
| 897 |
+
}
|
| 898 |
+
],
|
| 899 |
+
[
|
| 900 |
+
{
|
| 901 |
+
"type": "image",
|
| 902 |
+
"bbox": [
|
| 903 |
+
0.081,
|
| 904 |
+
0.062,
|
| 905 |
+
0.245,
|
| 906 |
+
0.162
|
| 907 |
+
],
|
| 908 |
+
"angle": 0,
|
| 909 |
+
"content": null
|
| 910 |
+
},
|
| 911 |
+
{
|
| 912 |
+
"type": "image",
|
| 913 |
+
"bbox": [
|
| 914 |
+
0.247,
|
| 915 |
+
0.062,
|
| 916 |
+
0.413,
|
| 917 |
+
0.163
|
| 918 |
+
],
|
| 919 |
+
"angle": 0,
|
| 920 |
+
"content": null
|
| 921 |
+
},
|
| 922 |
+
{
|
| 923 |
+
"type": "image",
|
| 924 |
+
"bbox": [
|
| 925 |
+
0.418,
|
| 926 |
+
0.062,
|
| 927 |
+
0.582,
|
| 928 |
+
0.162
|
| 929 |
+
],
|
| 930 |
+
"angle": 0,
|
| 931 |
+
"content": null
|
| 932 |
+
},
|
| 933 |
+
{
|
| 934 |
+
"type": "image",
|
| 935 |
+
"bbox": [
|
| 936 |
+
0.584,
|
| 937 |
+
0.062,
|
| 938 |
+
0.75,
|
| 939 |
+
0.162
|
| 940 |
+
],
|
| 941 |
+
"angle": 0,
|
| 942 |
+
"content": null
|
| 943 |
+
},
|
| 944 |
+
{
|
| 945 |
+
"type": "image",
|
| 946 |
+
"bbox": [
|
| 947 |
+
0.752,
|
| 948 |
+
0.062,
|
| 949 |
+
0.918,
|
| 950 |
+
0.162
|
| 951 |
+
],
|
| 952 |
+
"angle": 0,
|
| 953 |
+
"content": null
|
| 954 |
+
},
|
| 955 |
+
{
|
| 956 |
+
"type": "image_caption",
|
| 957 |
+
"bbox": [
|
| 958 |
+
0.074,
|
| 959 |
+
0.175,
|
| 960 |
+
0.921,
|
| 961 |
+
0.2
|
| 962 |
+
],
|
| 963 |
+
"angle": 0,
|
| 964 |
+
"content": "Fig. 2. t-SNE visualization of the training dataset featuring various latent space augmentations. The green, blue, and red points represent the 2D projections of embeddings for the bonafide, spoof, and augmented spoof classes, respectively."
|
| 965 |
+
},
|
| 966 |
+
{
|
| 967 |
+
"type": "image",
|
| 968 |
+
"bbox": [
|
| 969 |
+
0.084,
|
| 970 |
+
0.223,
|
| 971 |
+
0.486,
|
| 972 |
+
0.329
|
| 973 |
+
],
|
| 974 |
+
"angle": 0,
|
| 975 |
+
"content": null
|
| 976 |
+
},
|
| 977 |
+
{
|
| 978 |
+
"type": "image_caption",
|
| 979 |
+
"bbox": [
|
| 980 |
+
0.075,
|
| 981 |
+
0.34,
|
| 982 |
+
0.49,
|
| 983 |
+
0.364
|
| 984 |
+
],
|
| 985 |
+
"angle": 0,
|
| 986 |
+
"content": "Fig. 3. The effect of the number of spoofed prototypes on EER \\((\\%)\\) across different datasets (21LA, 21DF, and ITW)."
|
| 987 |
+
},
|
| 988 |
+
{
|
| 989 |
+
"type": "text",
|
| 990 |
+
"bbox": [
|
| 991 |
+
0.075,
|
| 992 |
+
0.386,
|
| 993 |
+
0.492,
|
| 994 |
+
0.429
|
| 995 |
+
],
|
| 996 |
+
"angle": 0,
|
| 997 |
+
"content": "from 1 to 8 improves performance, but further increasing to 16 shows diminishing returns. At 20 prototypes, performance declines, suggesting that too many prototypes can hinder generalization."
|
| 998 |
+
},
|
| 999 |
+
{
|
| 1000 |
+
"type": "title",
|
| 1001 |
+
"bbox": [
|
| 1002 |
+
0.076,
|
| 1003 |
+
0.435,
|
| 1004 |
+
0.38,
|
| 1005 |
+
0.449
|
| 1006 |
+
],
|
| 1007 |
+
"angle": 0,
|
| 1008 |
+
"content": "D. Ablation Study on Latent Space Augmentation"
|
| 1009 |
+
},
|
| 1010 |
+
{
|
| 1011 |
+
"type": "table_caption",
|
| 1012 |
+
"bbox": [
|
| 1013 |
+
0.089,
|
| 1014 |
+
0.457,
|
| 1015 |
+
0.478,
|
| 1016 |
+
0.503
|
| 1017 |
+
],
|
| 1018 |
+
"angle": 0,
|
| 1019 |
+
"content": "TABLE III EER \\((\\%)\\) ACROSS DATASETS FOR SYSTEMS TRAINED WITH DIFFERENT LATENT SPACE AUGMENTATION. BEST RESULTS ARE IN BOLD, AND SECOND-BEST RESULTS ARE UNDERLINED."
|
| 1020 |
+
},
|
| 1021 |
+
{
|
| 1022 |
+
"type": "table",
|
| 1023 |
+
"bbox": [
|
| 1024 |
+
0.115,
|
| 1025 |
+
0.513,
|
| 1026 |
+
0.454,
|
| 1027 |
+
0.62
|
| 1028 |
+
],
|
| 1029 |
+
"angle": 0,
|
| 1030 |
+
"content": "<table><tr><td>Method</td><td>19LA</td><td>21LA</td><td>21DF</td><td>ITW</td><td>Avg.</td></tr><tr><td>LSR</td><td>0.19</td><td>2.35</td><td>3.01</td><td>6.58</td><td>3.03</td></tr><tr><td>+LSA(AN)</td><td>0.16</td><td>1.67</td><td>2.85</td><td>6.17</td><td>2.71</td></tr><tr><td>+LSA(AT)</td><td>0.19</td><td>1.62</td><td>2.57</td><td>6.69</td><td>2.77</td></tr><tr><td>+LSA(BM)</td><td>0.21</td><td>1.65</td><td>2.86</td><td>6.61</td><td>2.93</td></tr><tr><td>+LSA(LI)</td><td>0.23</td><td>1.92</td><td>2.65</td><td>7.05</td><td>2.96</td></tr><tr><td>+LSA(LE)</td><td>0.18</td><td>1.52</td><td>2.54</td><td>6.15</td><td>2.60</td></tr><tr><td>+LSA(All)</td><td>0.15</td><td>1.19</td><td>2.43</td><td>5.92</td><td>2.42</td></tr></table>"
|
| 1031 |
+
},
|
| 1032 |
+
{
|
| 1033 |
+
"type": "text",
|
| 1034 |
+
"bbox": [
|
| 1035 |
+
0.074,
|
| 1036 |
+
0.632,
|
| 1037 |
+
0.491,
|
| 1038 |
+
0.852
|
| 1039 |
+
],
|
| 1040 |
+
"angle": 0,
|
| 1041 |
+
"content": "To assess the impact of different latent space augmentation methods, we conducted experiments for each method, as summarized in Table III, and visualized their effects using t-SNE in Fig. 2. Notably, since LI and LE rely on LSR prototypes, all systems were trained with \\(\\mathrm{LSR + WCE}\\) loss. Among the first three augmentations that are independent of the prototypes, AN and AT produced more dispersed and varied distributions, leading to better performance. In contrast, BM's distribution remained closer to the original due to its mixup nature, which limited its effectiveness. For the prototype-dependent augmentations, LI, while beneficial, underperformed compared to the others, likely due to the consistent generation of challenging examples. LE, however, achieved the best results, as it effectively expanded the distribution into new regions of the latent space, offering a more balanced diversity. Ultimately, combining all augmentation methods led to the most diverse latent space, resulting in the highest overall performance."
|
| 1042 |
+
},
|
| 1043 |
+
{
|
| 1044 |
+
"type": "text",
|
| 1045 |
+
"bbox": [
|
| 1046 |
+
0.075,
|
| 1047 |
+
0.854,
|
| 1048 |
+
0.49,
|
| 1049 |
+
0.91
|
| 1050 |
+
],
|
| 1051 |
+
"angle": 0,
|
| 1052 |
+
"content": "While we have demonstrated the effectiveness of augmentation in latent space, we were curious whether applying the same augmentations in the input space could yield comparable or even better results. To explore this, we conducted comparison experiments between"
|
| 1053 |
+
},
|
| 1054 |
+
{
|
| 1055 |
+
"type": "table_caption",
|
| 1056 |
+
"bbox": [
|
| 1057 |
+
0.514,
|
| 1058 |
+
0.224,
|
| 1059 |
+
0.914,
|
| 1060 |
+
0.26
|
| 1061 |
+
],
|
| 1062 |
+
"angle": 0,
|
| 1063 |
+
"content": "TABLE IV COMPARISON OF AUGMENTATION EFFECTS IN INPUT VS. LATENT SPACE ACROSS DATSETS (EER \\(\\%\\)"
|
| 1064 |
+
},
|
| 1065 |
+
{
|
| 1066 |
+
"type": "table",
|
| 1067 |
+
"bbox": [
|
| 1068 |
+
0.536,
|
| 1069 |
+
0.269,
|
| 1070 |
+
0.892,
|
| 1071 |
+
0.376
|
| 1072 |
+
],
|
| 1073 |
+
"angle": 0,
|
| 1074 |
+
"content": "<table><tr><td>Method</td><td>Space</td><td>19LA</td><td>21LA</td><td>21DF</td><td>ITW</td><td>Avg.</td></tr><tr><td>None</td><td>-</td><td>0.30</td><td>2.64</td><td>4.74</td><td>8.09</td><td>3.94</td></tr><tr><td>AN</td><td>input</td><td>0.25</td><td>2.22</td><td>3.17</td><td>6.35</td><td>3.00</td></tr><tr><td>AN</td><td>latent</td><td>0.23</td><td>2.05</td><td>2.84</td><td>6.21</td><td>2.83</td></tr><tr><td>AT</td><td>input</td><td>0.27</td><td>2.43</td><td>3.44</td><td>6.81</td><td>3.24</td></tr><tr><td>AT</td><td>latent</td><td>0.25</td><td>2.03</td><td>2.91</td><td>6.72</td><td>2.98</td></tr><tr><td>BM</td><td>input</td><td>0.19</td><td>2.24</td><td>3.01</td><td>6.33</td><td>2.94</td></tr><tr><td>BM</td><td>latent</td><td>0.19</td><td>2.21</td><td>2.95</td><td>6.56</td><td>2.98</td></tr></table>"
|
| 1075 |
+
},
|
| 1076 |
+
{
|
| 1077 |
+
"type": "text",
|
| 1078 |
+
"bbox": [
|
| 1079 |
+
0.503,
|
| 1080 |
+
0.402,
|
| 1081 |
+
0.921,
|
| 1082 |
+
0.624
|
| 1083 |
+
],
|
| 1084 |
+
"angle": 0,
|
| 1085 |
+
"content": "augmentations applied in the input space versus the latent space, focusing on three methods that do not depend on latent prototypes or embeddings: AN, AT, and BM. All experiments were conducted using WCE loss without LSR. As shown in Table IV, applying augmentation, whether in the input or latent space, improves the baseline to some extent. For AN and AT, augmentations performed in the latent space consistently yield better results than those in the input space. This suggests that latent space augmentations may more effectively capture the underlying data distributions that the model needs to learn. Interestingly, BM yields better results when applied in the input space than in the latent space. This outcome may be attributed to the nature of Mixup augmentation, which has been widely proven effective in various audio-related tasks when performed on the input data. The input space BM likely benefits from preserving more of the original data characteristics while still introducing beneficial variability."
|
| 1086 |
+
},
|
| 1087 |
+
{
|
| 1088 |
+
"type": "title",
|
| 1089 |
+
"bbox": [
|
| 1090 |
+
0.652,
|
| 1091 |
+
0.63,
|
| 1092 |
+
0.774,
|
| 1093 |
+
0.642
|
| 1094 |
+
],
|
| 1095 |
+
"angle": 0,
|
| 1096 |
+
"content": "IV. CONCLUSIONS"
|
| 1097 |
+
},
|
| 1098 |
+
{
|
| 1099 |
+
"type": "text",
|
| 1100 |
+
"bbox": [
|
| 1101 |
+
0.504,
|
| 1102 |
+
0.652,
|
| 1103 |
+
0.921,
|
| 1104 |
+
0.791
|
| 1105 |
+
],
|
| 1106 |
+
"angle": 0,
|
| 1107 |
+
"content": "This paper presents a novel approach to enhance the generalization of audio deepfake detection systems by integrating Latent Space Refinement (LSR) and Latent Space Augmentation (LSA). LSR introduces multiple learnable prototypes to better capture the complex intra-class variability of spoofed audio, while LSA generates diverse representations in the latent space, further strengthening the model's robustness. Extensive experiments on multiple datasets, including ASVspoof 2019 LA, ASVspoof 2021 LA, ASVspoof 2021 DF, and In-The-Wild, demonstrate that each of the proposed LSR and LSA can improve system significantly."
|
| 1108 |
+
},
|
| 1109 |
+
{
|
| 1110 |
+
"type": "title",
|
| 1111 |
+
"bbox": [
|
| 1112 |
+
0.647,
|
| 1113 |
+
0.805,
|
| 1114 |
+
0.78,
|
| 1115 |
+
0.817
|
| 1116 |
+
],
|
| 1117 |
+
"angle": 0,
|
| 1118 |
+
"content": "ACKNOWLEDGMENT"
|
| 1119 |
+
},
|
| 1120 |
+
{
|
| 1121 |
+
"type": "text",
|
| 1122 |
+
"bbox": [
|
| 1123 |
+
0.504,
|
| 1124 |
+
0.826,
|
| 1125 |
+
0.921,
|
| 1126 |
+
0.91
|
| 1127 |
+
],
|
| 1128 |
+
"angle": 0,
|
| 1129 |
+
"content": "This work was partially supported by the National Natural Science Foundation of China (NSFC) under Grants 62122050 and 62071288, and the Shanghai Municipal Science and Technology Commission under Grant 2021SHZDZX0102. Additional support was provided by the Pioneer R&D Program of Zhejiang Province (No. 2024C01024) and the Ant Group Research Intern Program."
|
| 1130 |
+
}
|
| 1131 |
+
],
|
| 1132 |
+
[
|
| 1133 |
+
{
|
| 1134 |
+
"type": "title",
|
| 1135 |
+
"bbox": [
|
| 1136 |
+
0.241,
|
| 1137 |
+
0.062,
|
| 1138 |
+
0.327,
|
| 1139 |
+
0.074
|
| 1140 |
+
],
|
| 1141 |
+
"angle": 0,
|
| 1142 |
+
"content": "REFERENCES"
|
| 1143 |
+
},
|
| 1144 |
+
{
|
| 1145 |
+
"type": "ref_text",
|
| 1146 |
+
"bbox": [
|
| 1147 |
+
0.085,
|
| 1148 |
+
0.085,
|
| 1149 |
+
0.492,
|
| 1150 |
+
0.153
|
| 1151 |
+
],
|
| 1152 |
+
"angle": 0,
|
| 1153 |
+
"content": "[1] Junichi Yamagishi, Xin Wang, Massimiliano Todisco, Md Sahidullah, Jose Patino, Andreas Nautsch, Xuechen Liu, Kong Aik Lee, Tomi Kinnunen, Nicholas Evans, et al., \"Asvspoof 2021: accelerating progress in spoofed and deepfake speech detection,\" in ASVspoof 2021 Workshop-Automatic Speaker Verification and Spoofing Coutermeasures Challenge, 2021."
|
| 1154 |
+
},
|
| 1155 |
+
{
|
| 1156 |
+
"type": "ref_text",
|
| 1157 |
+
"bbox": [
|
| 1158 |
+
0.085,
|
| 1159 |
+
0.154,
|
| 1160 |
+
0.492,
|
| 1161 |
+
0.188
|
| 1162 |
+
],
|
| 1163 |
+
"angle": 0,
|
| 1164 |
+
"content": "[2] Nicolas M Müller, Pavel Czempin, Franziska Dieckmann, Adam Froghyar, and Konstantin Bötttinger, “Does audio deepfake detection generalize?,” Interspeech, 2022."
|
| 1165 |
+
},
|
| 1166 |
+
{
|
| 1167 |
+
"type": "ref_text",
|
| 1168 |
+
"bbox": [
|
| 1169 |
+
0.085,
|
| 1170 |
+
0.189,
|
| 1171 |
+
0.492,
|
| 1172 |
+
0.245
|
| 1173 |
+
],
|
| 1174 |
+
"angle": 0,
|
| 1175 |
+
"content": "[3] Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, and Michael Auli, \"XIs-r: Self-supervised cross-lingual speech representation learning at scale,\" arXiv, vol. abs/2111.09296, 2021."
|
| 1176 |
+
},
|
| 1177 |
+
{
|
| 1178 |
+
"type": "ref_text",
|
| 1179 |
+
"bbox": [
|
| 1180 |
+
0.085,
|
| 1181 |
+
0.246,
|
| 1182 |
+
0.492,
|
| 1183 |
+
0.292
|
| 1184 |
+
],
|
| 1185 |
+
"angle": 0,
|
| 1186 |
+
"content": "[4] Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever, “Robust speech recognition via large-scale weak supervision,” in International conference on machine learning. PMLR, 2023, pp. 28492-28518."
|
| 1187 |
+
},
|
| 1188 |
+
{
|
| 1189 |
+
"type": "ref_text",
|
| 1190 |
+
"bbox": [
|
| 1191 |
+
0.085,
|
| 1192 |
+
0.293,
|
| 1193 |
+
0.492,
|
| 1194 |
+
0.348
|
| 1195 |
+
],
|
| 1196 |
+
"angle": 0,
|
| 1197 |
+
"content": "[5] Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, et al., \"Wavlm: Large-scale self-supervised pre-training for full stack speech processing,\" IEEE Journal of Selected Topics in Signal Processing, vol. 16, no. 6, pp. 1505-1518, 2022."
|
| 1198 |
+
},
|
| 1199 |
+
{
|
| 1200 |
+
"type": "ref_text",
|
| 1201 |
+
"bbox": [
|
| 1202 |
+
0.085,
|
| 1203 |
+
0.349,
|
| 1204 |
+
0.492,
|
| 1205 |
+
0.394
|
| 1206 |
+
],
|
| 1207 |
+
"angle": 0,
|
| 1208 |
+
"content": "[6] Hemlata Tak, Massimiliano Todisco, Xin Wang, Jee-weon Jung, Junichi Yamagishi, and Nicholas Evans, \"Automatic speaker verification spoofing and deepfake detection using wav2vec 2.0 and data augmentation,\" in The Speaker and Language Recognition Workshop, 2022."
|
| 1209 |
+
},
|
| 1210 |
+
{
|
| 1211 |
+
"type": "ref_text",
|
| 1212 |
+
"bbox": [
|
| 1213 |
+
0.085,
|
| 1214 |
+
0.395,
|
| 1215 |
+
0.492,
|
| 1216 |
+
0.429
|
| 1217 |
+
],
|
| 1218 |
+
"angle": 0,
|
| 1219 |
+
"content": "[7] You Zhang, Fei Jiang, and Zhiyao Duan, \"One-class learning towards synthetic voice spoofing detection,\" IEEE Signal Processing Letters, vol. 28, pp. 937-941, 2021."
|
| 1220 |
+
},
|
| 1221 |
+
{
|
| 1222 |
+
"type": "ref_text",
|
| 1223 |
+
"bbox": [
|
| 1224 |
+
0.085,
|
| 1225 |
+
0.429,
|
| 1226 |
+
0.492,
|
| 1227 |
+
0.463
|
| 1228 |
+
],
|
| 1229 |
+
"angle": 0,
|
| 1230 |
+
"content": "[8] Hyun Myung Kim, Kangwook Jang, and Hoirin Kim, “One-class learning with adaptive centroid shift for audio deepfake detection,” in Interspeech 2024, 2024, pp. 4853–4857."
|
| 1231 |
+
},
|
| 1232 |
+
{
|
| 1233 |
+
"type": "ref_text",
|
| 1234 |
+
"bbox": [
|
| 1235 |
+
0.085,
|
| 1236 |
+
0.464,
|
| 1237 |
+
0.492,
|
| 1238 |
+
0.508
|
| 1239 |
+
],
|
| 1240 |
+
"angle": 0,
|
| 1241 |
+
"content": "[9] Daniel S Park, William Chan, Yu Zhang, Chung-Cheng Chiu, Barret Zoph, Ekin D Cubuk, and Quoc V Le, \"Specaugment: A simple data augmentation method for automatic speech recognition,\" arXiv preprint arXiv:1904.08779, 2019."
|
| 1242 |
+
},
|
| 1243 |
+
{
|
| 1244 |
+
"type": "ref_text",
|
| 1245 |
+
"bbox": [
|
| 1246 |
+
0.078,
|
| 1247 |
+
0.509,
|
| 1248 |
+
0.492,
|
| 1249 |
+
0.566
|
| 1250 |
+
],
|
| 1251 |
+
"angle": 0,
|
| 1252 |
+
"content": "[10] Hemlata Tak, Madhu Kamble, Jose Patino, Massimiliano Todisco, and Nicholas Evans, \"Rawboost: A raw data boosting and augmentation method applied to automatic speaker verification anti-spoofing,\" in ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2022, pp. 6382-6386."
|
| 1253 |
+
},
|
| 1254 |
+
{
|
| 1255 |
+
"type": "ref_text",
|
| 1256 |
+
"bbox": [
|
| 1257 |
+
0.078,
|
| 1258 |
+
0.567,
|
| 1259 |
+
0.492,
|
| 1260 |
+
0.622
|
| 1261 |
+
],
|
| 1262 |
+
"angle": 0,
|
| 1263 |
+
"content": "[11] Linjuan Zhang, Kong Aik Lee, Lin Zhang, Longbiao Wang, and Baoning Niu, \"Cpaug: Refining copy-paste augmentation for speech antispoofing,\" in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 10996-11000."
|
| 1264 |
+
},
|
| 1265 |
+
{
|
| 1266 |
+
"type": "ref_text",
|
| 1267 |
+
"bbox": [
|
| 1268 |
+
0.078,
|
| 1269 |
+
0.623,
|
| 1270 |
+
0.492,
|
| 1271 |
+
0.657
|
| 1272 |
+
],
|
| 1273 |
+
"angle": 0,
|
| 1274 |
+
"content": "[12] Marcella ASTRID, Enjie GHORBEL, and Djamila AOUADA, “Targeted augmented data for audio deepfake detection,” in 32nd European Signal Processing Conference (EUSIPCO 2024), 2024."
|
| 1275 |
+
},
|
| 1276 |
+
{
|
| 1277 |
+
"type": "ref_text",
|
| 1278 |
+
"bbox": [
|
| 1279 |
+
0.078,
|
| 1280 |
+
0.659,
|
| 1281 |
+
0.492,
|
| 1282 |
+
0.713
|
| 1283 |
+
],
|
| 1284 |
+
"angle": 0,
|
| 1285 |
+
"content": "[13] Xin Wang and Junichi Yamagishi, \"Spoofed training data for speech spoofing countermeasure can be efficiently created using neural vocoders,\" in ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2023, pp. 1-5."
|
| 1286 |
+
},
|
| 1287 |
+
{
|
| 1288 |
+
"type": "ref_text",
|
| 1289 |
+
"bbox": [
|
| 1290 |
+
0.078,
|
| 1291 |
+
0.714,
|
| 1292 |
+
0.492,
|
| 1293 |
+
0.77
|
| 1294 |
+
],
|
| 1295 |
+
"angle": 0,
|
| 1296 |
+
"content": "[14] Xin Wang and Junichi Yamagishi, \"Can large-scale vocoded spoofed data improve speech spoofing countermeasure with a self-supervised front end?\", in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 10311-10315."
|
| 1297 |
+
},
|
| 1298 |
+
{
|
| 1299 |
+
"type": "ref_text",
|
| 1300 |
+
"bbox": [
|
| 1301 |
+
0.078,
|
| 1302 |
+
0.771,
|
| 1303 |
+
0.492,
|
| 1304 |
+
0.817
|
| 1305 |
+
],
|
| 1306 |
+
"angle": 0,
|
| 1307 |
+
"content": "[15] Xiaofeng Liu, Yang Zou, Lingsheng Kong, Zhihui Diao, Junliang Yan, Jun Wang, Site Li, Ping Jia, and Jane You, \"Data augmentation via latent space interpolation for image classification,\" in 2018 24th International Conference on Pattern Recognition (ICPR). IEEE, 2018, pp. 728-733."
|
| 1308 |
+
},
|
| 1309 |
+
{
|
| 1310 |
+
"type": "ref_text",
|
| 1311 |
+
"bbox": [
|
| 1312 |
+
0.078,
|
| 1313 |
+
0.818,
|
| 1314 |
+
0.492,
|
| 1315 |
+
0.873
|
| 1316 |
+
],
|
| 1317 |
+
"angle": 0,
|
| 1318 |
+
"content": "[16] Zhiyuan Yan, Yuhao Luo, Siwei Lyu, Qingshan Liu, and Baoyuan Wu, \"Transcending forgery specificity with latent space augmentation for generalizable deepfake detection,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 8984-8994."
|
| 1319 |
+
},
|
| 1320 |
+
{
|
| 1321 |
+
"type": "ref_text",
|
| 1322 |
+
"bbox": [
|
| 1323 |
+
0.078,
|
| 1324 |
+
0.874,
|
| 1325 |
+
0.492,
|
| 1326 |
+
0.91
|
| 1327 |
+
],
|
| 1328 |
+
"angle": 0,
|
| 1329 |
+
"content": "[17] Xin Wang, Junichi Yamagishi, Massimiliano Todisco, Héctor Delgado, Andreas Nautsch, Nicholas Evans, Md Sahidullah, Ville Vestman, Tomi Kinnunen, Kong Aik Lee, et al., \"Asvspoof 2019: A large-scale public"
|
| 1330 |
+
},
|
| 1331 |
+
{
|
| 1332 |
+
"type": "list",
|
| 1333 |
+
"bbox": [
|
| 1334 |
+
0.078,
|
| 1335 |
+
0.085,
|
| 1336 |
+
0.492,
|
| 1337 |
+
0.91
|
| 1338 |
+
],
|
| 1339 |
+
"angle": 0,
|
| 1340 |
+
"content": null
|
| 1341 |
+
},
|
| 1342 |
+
{
|
| 1343 |
+
"type": "ref_text",
|
| 1344 |
+
"bbox": [
|
| 1345 |
+
0.536,
|
| 1346 |
+
0.063,
|
| 1347 |
+
0.921,
|
| 1348 |
+
0.086
|
| 1349 |
+
],
|
| 1350 |
+
"angle": 0,
|
| 1351 |
+
"content": "database of synthesized, converted and replayed speech,\" Computer Speech & Language, vol. 64, pp. 101114, 2020."
|
| 1352 |
+
},
|
| 1353 |
+
{
|
| 1354 |
+
"type": "ref_text",
|
| 1355 |
+
"bbox": [
|
| 1356 |
+
0.508,
|
| 1357 |
+
0.087,
|
| 1358 |
+
0.921,
|
| 1359 |
+
0.131
|
| 1360 |
+
],
|
| 1361 |
+
"angle": 0,
|
| 1362 |
+
"content": "[18] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou, \"Arcface: Additive angular margin loss for deep face recognition,\" in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019, pp. 4690-4699."
|
| 1363 |
+
},
|
| 1364 |
+
{
|
| 1365 |
+
"type": "ref_text",
|
| 1366 |
+
"bbox": [
|
| 1367 |
+
0.508,
|
| 1368 |
+
0.132,
|
| 1369 |
+
0.921,
|
| 1370 |
+
0.164
|
| 1371 |
+
],
|
| 1372 |
+
"angle": 0,
|
| 1373 |
+
"content": "[19] Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz, “mixup: Beyond empirical risk minimization,” arXiv preprint arXiv:1710.09412, 2017."
|
| 1374 |
+
},
|
| 1375 |
+
{
|
| 1376 |
+
"type": "ref_text",
|
| 1377 |
+
"bbox": [
|
| 1378 |
+
0.508,
|
| 1379 |
+
0.165,
|
| 1380 |
+
0.921,
|
| 1381 |
+
0.233
|
| 1382 |
+
],
|
| 1383 |
+
"angle": 0,
|
| 1384 |
+
"content": "[20] Jee-weon Jung, Hee-Soo Heo, Hemlata Tak, Hye-jin Shim, Joon Son Chung, Bong-Jin Lee, Ha-Jin Yu, and Nicholas Evans, “Aasist: Audio anti-spoofing using integrated spectro-temporal graph attention networks,” in ICASSP 2022-2022 IEEE international conference on acoustics, speech and signal processing (ICASSP). IEEE, 2022, pp. 6367-6371."
|
| 1385 |
+
},
|
| 1386 |
+
{
|
| 1387 |
+
"type": "ref_text",
|
| 1388 |
+
"bbox": [
|
| 1389 |
+
0.508,
|
| 1390 |
+
0.234,
|
| 1391 |
+
0.921,
|
| 1392 |
+
0.268
|
| 1393 |
+
],
|
| 1394 |
+
"angle": 0,
|
| 1395 |
+
"content": "[21] Zihan Pan, Tianchi Liu, Hardik B. Sailor, and Qiongqiong Wang, \"Attentive merging of hidden embeddings from pre-trained speech model for anti-spoofing detection,\" in Interspeech 2024, 2024, pp. 2090-2094."
|
| 1396 |
+
},
|
| 1397 |
+
{
|
| 1398 |
+
"type": "ref_text",
|
| 1399 |
+
"bbox": [
|
| 1400 |
+
0.508,
|
| 1401 |
+
0.268,
|
| 1402 |
+
0.921,
|
| 1403 |
+
0.311
|
| 1404 |
+
],
|
| 1405 |
+
"angle": 0,
|
| 1406 |
+
"content": "[22] Octavian Pascu, Adriana Stan, Dan Oneata, Elisaba Oneata, and Horia Cucu, \"Towards generalisable and calibrated audio deepfake detection with self-supervised representations,\" in Interspeech 2024, 2024, pp. 4828-4832."
|
| 1407 |
+
},
|
| 1408 |
+
{
|
| 1409 |
+
"type": "ref_text",
|
| 1410 |
+
"bbox": [
|
| 1411 |
+
0.508,
|
| 1412 |
+
0.312,
|
| 1413 |
+
0.921,
|
| 1414 |
+
0.369
|
| 1415 |
+
],
|
| 1416 |
+
"angle": 0,
|
| 1417 |
+
"content": "[23] Yinlin Guo, Haofan Huang, Xi Chen, He Zhao, and Yuehai Wang, \"Audio deepfake detection with self-supervised wavlm and multi-fusion attentive classifier,\" in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 12702-12706."
|
| 1418 |
+
},
|
| 1419 |
+
{
|
| 1420 |
+
"type": "ref_text",
|
| 1421 |
+
"bbox": [
|
| 1422 |
+
0.508,
|
| 1423 |
+
0.37,
|
| 1424 |
+
0.921,
|
| 1425 |
+
0.403
|
| 1426 |
+
],
|
| 1427 |
+
"angle": 0,
|
| 1428 |
+
"content": "[24] Youngsik Eom, Yeonghyeon Lee, Ji Sub Um, and Hoi Rin Kim, “Antispoofing using transfer learning with variational information bottleneck,” in Interspeech 2022, 2022, pp. 3568-3572."
|
| 1429 |
+
},
|
| 1430 |
+
{
|
| 1431 |
+
"type": "ref_text",
|
| 1432 |
+
"bbox": [
|
| 1433 |
+
0.508,
|
| 1434 |
+
0.403,
|
| 1435 |
+
0.921,
|
| 1436 |
+
0.459
|
| 1437 |
+
],
|
| 1438 |
+
"angle": 0,
|
| 1439 |
+
"content": "[25] Jingze Lu, Yuxiang Zhang, Wenchao Wang, Zengqiang Shang, and Pengyuan Zhang, \"One-class knowledge distillation for spoofing speech detection,\" in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 11251-11255."
|
| 1440 |
+
},
|
| 1441 |
+
{
|
| 1442 |
+
"type": "ref_text",
|
| 1443 |
+
"bbox": [
|
| 1444 |
+
0.508,
|
| 1445 |
+
0.459,
|
| 1446 |
+
0.921,
|
| 1447 |
+
0.515
|
| 1448 |
+
],
|
| 1449 |
+
"angle": 0,
|
| 1450 |
+
"content": "[26] Xiaopeng Wang, Ruibo Fu, Zhengqi Wen, Zhiyong Wang, Yuankun Xie, Yukun Liu, Jianhua Tao, Xuefei Liu, Yongwei Li, Xin Qi, Yi Lu, and Shuchen Shi, \"Genuine-focused learning using mask autoencoder for generalized fake audio detection,\" in Interspeech 2024, 2024, pp. 4848-4852."
|
| 1451 |
+
},
|
| 1452 |
+
{
|
| 1453 |
+
"type": "ref_text",
|
| 1454 |
+
"bbox": [
|
| 1455 |
+
0.508,
|
| 1456 |
+
0.516,
|
| 1457 |
+
0.921,
|
| 1458 |
+
0.562
|
| 1459 |
+
],
|
| 1460 |
+
"angle": 0,
|
| 1461 |
+
"content": "[27] Yuxiang Zhang, Jingze Lu, Zengqiang Shang, Wenchao Wang, and Pengyuan Zhang, \"Improving short utterance anti-spoofing with aassist2,\" in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 11636-11640."
|
| 1462 |
+
},
|
| 1463 |
+
{
|
| 1464 |
+
"type": "ref_text",
|
| 1465 |
+
"bbox": [
|
| 1466 |
+
0.508,
|
| 1467 |
+
0.562,
|
| 1468 |
+
0.921,
|
| 1469 |
+
0.607
|
| 1470 |
+
],
|
| 1471 |
+
"angle": 0,
|
| 1472 |
+
"content": "[28] Duc-Tuan Truong, Ruijie Tao, Tuan Nguyen, Hieu-Thi Luong, Kong Aik Lee, and Eng Siong Chng, “Temporal-channel modeling in multi-head self-attention for synthetic speech detection,” in Interspeech 2024, 2024, pp. 537–541."
|
| 1473 |
+
},
|
| 1474 |
+
{
|
| 1475 |
+
"type": "ref_text",
|
| 1476 |
+
"bbox": [
|
| 1477 |
+
0.508,
|
| 1478 |
+
0.608,
|
| 1479 |
+
0.921,
|
| 1480 |
+
0.651
|
| 1481 |
+
],
|
| 1482 |
+
"angle": 0,
|
| 1483 |
+
"content": "[29] Haochen Wu, Wu Guo, Zhentao Zhang, Wenting Zhao, Shengyu Peng, and Jie Zhang, \"Spoofing speech detection by modeling local spectro-temporal and long-term dependency,\" in Interspeech 2024, 2024, pp. 507-511."
|
| 1484 |
+
},
|
| 1485 |
+
{
|
| 1486 |
+
"type": "list",
|
| 1487 |
+
"bbox": [
|
| 1488 |
+
0.508,
|
| 1489 |
+
0.063,
|
| 1490 |
+
0.921,
|
| 1491 |
+
0.651
|
| 1492 |
+
],
|
| 1493 |
+
"angle": 0,
|
| 1494 |
+
"content": null
|
| 1495 |
+
}
|
| 1496 |
+
]
|
| 1497 |
+
]
|
2501.14xxx/2501.14240/3d87a418-e222-4e41-ac34-ee2f21e49a0d_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:69a3bc55fee38a8516a5ed5a67ab5cdc2d8a0c62cfca1a1ebb9d6f0b320541be
|
| 3 |
+
size 1330725
|
2501.14xxx/2501.14240/full.md
ADDED
|
@@ -0,0 +1,250 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generalizable Audio Deepfake Detection via Latent Space Refinement and Augmentation
|
| 2 |
+
|
| 3 |
+
Wen Huang $^{1,2}$ Yanmei Gu $^{3}$ Zhiming Wang $^{3}$ Huijia Zhu $^{3}$ Yanmin Qian $^{1\dagger}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup> Auditory Cognition and Computational Acoustics Lab, MoE Key Lab of Artificial Intelligence, AI Institute
|
| 6 |
+
|
| 7 |
+
Department of Computer Science and Engineering, Shanghai Jiao Tong University, Shanghai, China
|
| 8 |
+
|
| 9 |
+
$^{2}$ SJTU Paris Elite Institute of Technology, $^{3}$ Ant Group, Shanghai, China
|
| 10 |
+
|
| 11 |
+
Abstract—Advances in speech synthesis technologies, like text-to-speech (TTS) and voice conversion (VC), have made detecting deepfake speech increasingly challenging. Spoofing countermeasures often struggle to generalize effectively, particularly when faced with unseen attacks. To address this, we propose a novel strategy that integrates Latent Space Refinement (LSR) and Latent Space Augmentation (LSA) to improve the generalization of deepfake detection systems. LSR introduces multiple learnable prototypes for the spoof class, refining the latent space to better capture the intricate variations within spoofed data. LSA further diversifies spoofed data representations by applying augmentation techniques directly in the latent space, enabling the model to learn a broader range of spoofing patterns. We evaluated our approach on four representative datasets, i.e. ASVspoof 2019 LA, ASVspoof 2021 LA and DF, and In-The-Wild. The results show that LSR and LSA perform well individually, and their integration achieves competitive results, matching or surpassing current state-of-the-art methods.
|
| 12 |
+
|
| 13 |
+
Index Terms—audio deepfake detection, anti-spoofing, generalization
|
| 14 |
+
|
| 15 |
+
# I. INTRODUCTION
|
| 16 |
+
|
| 17 |
+
With advancements in speech synthesis systems such as text-to-speech (TTS) and voice conversion (VC), detecting deepfake speech has become increasingly challenging. Synthesized data can originate from a wide range of synthesis systems, each with its own distinct characteristics, making it difficult for spoofing countermeasures to generalize effectively. This challenge is exacerbated when detectors encounter unseen deepfake attacks, often leading to significant performance degradation [1], [2].
|
| 18 |
+
|
| 19 |
+
To enhance generalization in deepfake detectors, one key direction focuses on developing more robust classification models through improved architecture and learning strategies. Recent studies have utilized features extracted from self-supervised speech models such as Wav2vec [3], Whisper [4], and WavLM [5] as front-end inputs for deepfake detection. These models, trained on large-scale and diverse speech data, strengthen the detection process by providing reliable and domain-agnostic features [6]. Beyond improving feature extraction, researchers have also worked to improve the accuracy of back-end classifiers. Traditional binary classification methods often struggle with generalization, particularly when facing distribution mismatches. To address this, one-class learning approaches have been explored, focusing on creating a compact representation of bonafide speech while effectively pushing away spoofed speech, leading to a well-separated and more generalizable feature space [7], [8].
|
| 20 |
+
|
| 21 |
+
Another promising direction is through data augmentation, which enhances the robustness of the model by exposing it to a wider range of data variations during training. Traditional techniques such as speed perturbation, SpecAugment [9], and codec augmentation have been shown to improve performance. More recent methods, such as Rawboost [10], use signal processing techniques to boost or distort raw audio, leading to significant improvements. There are also augmentation strategies specifically designed for audio deepfake
|
| 22 |
+
|
| 23 |
+
detection. For instance, CpAug [11] employs a copy-paste strategy to generate diverse training samples, while targeted augmentation methods [12] create pseudo-fakes that challenge the decision boundary, thereby increasing the diversity of fake samples. Furthermore, research has shown that using neural vocoders to augment data can further enhance detection performance [13], [14].
|
| 24 |
+
|
| 25 |
+
Building on these two key directions, we propose a novel strategy of integrating latent space refinement and augmentation to further boost the generalization ability of deepfake detection, as shown in Fig. 1. First, to address the limitations of binary classification in capturing the diverse nature of spoofed audio, we introduce Latent Space Refinement (LSR). In binary classification, models typically assign a single prototype to each class, which oversimplifies the complex variability within spoofed audio. While one-class learning tries to address this by compactly representing the bonafide class and treating others as outliers, it often imposes a rigid boundary that fails to capture the diversity in spoofed data. In contrast, our LSR approach introduces multiple learnable prototypes specifically for the spoof class, refining the latent space to better model the intricate variations within spoofed data. This enhanced representation reduces intra-class variability and allows the model to generalize more effectively across different spoofing attacks.
|
| 26 |
+
|
| 27 |
+
Second, to further enhance generalization, we apply Latent Space Augmentation (LSA) to diversify spoofed data representations, inspired by successful applications in computer vision [15], [16]. Unlike traditional data augmentation, which focuses on manipulating input data, LSA directly targets the latent space, allowing it to be independent of specific audio-level operations. By applying techniques such as additive noise, affine transformation, batch mixup, and linear interpolation and extrapolation, LSA generates a wide range of spoofed examples that expand the latent space. This expansion helps the model capture more diverse patterns within spoofed data, thereby improving its ability to generalize across different spoofing attacks and enhancing overall detection performance.
|
| 28 |
+
|
| 29 |
+
Our experimental results confirm the effectiveness of the proposed latent space refinement and augmentation. We evaluated the approach on four representative datasets: ASVspoof 2019 LA [17], ASVspoof 2021 LA and DF [1], and In-The-Wild [2]. The findings show that both LSR and LSA individually contribute to performance improvements, with the integrated system achieving competitive results, matching or surpassing the current state-of-the-art across these diverse benchmarks.
|
| 30 |
+
|
| 31 |
+
# II. METHODS
|
| 32 |
+
|
| 33 |
+
# A. Latent Space Refinement
|
| 34 |
+
|
| 35 |
+
To capture the inherent variations within the spoof class, we introduce multiple learnable prototypes that refine the latent distribution. Assume there are $K$ prototypes for each class, denoted as $\{c_1,\dots ,c_K\}$ . For the bonafide class, $K = 1$ , while for the spoof
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
Fig. 1. The pipeline of the proposed method, illustrating the process of Latent Space Refinement (LSR) and Latent Space Augmentation (LSA).
|
| 39 |
+
|
| 40 |
+

|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
|
| 44 |
+

|
| 45 |
+
|
| 46 |
+

|
| 47 |
+
|
| 48 |
+

|
| 49 |
+
|
| 50 |
+

|
| 51 |
+
|
| 52 |
+

|
| 53 |
+
|
| 54 |
+

|
| 55 |
+
|
| 56 |
+

|
| 57 |
+
|
| 58 |
+
class, $K$ is a hyperparameter chosen based on the complexity of the data. To determine the probability of a sample $x$ belonging to a particular class, we compute the maximum cosine similarity between its embedding $z$ and each of the class prototypes:
|
| 59 |
+
|
| 60 |
+
$$
|
| 61 |
+
\cos \theta = \sum_ {i = 1} ^ {K} \frac {e ^ {\langle c _ {i} , z \rangle \cdot \gamma}}{\sum_ {j = 1} ^ {K} e ^ {\langle c _ {i} , z \rangle \cdot \gamma}} \langle c _ {i}, z \rangle \tag {1}
|
| 62 |
+
$$
|
| 63 |
+
|
| 64 |
+
where $\langle x,y\rangle = \frac{x\cdot y}{\|x\|\|y\|}$ represents the cosine similarity between two vectors, and $\gamma$ is the scaling factor, set to 10. We smooth the maximum operator using a softmax-like operation to prevent sensitivity between multiple prototypes.
|
| 65 |
+
|
| 66 |
+
To guide the learning of these prototypes, we design a prototype-based classification loss, inspired by the additive angular margin loss [18]:
|
| 67 |
+
|
| 68 |
+
$$
|
| 69 |
+
\mathcal {L} _ {\text {p r o t o}} (z) = - \log \frac {e ^ {s \left(\cos \left(\theta_ {y} + m\right)\right)}}{e ^ {s \left(\cos \left(\theta_ {y} + m\right)\right)} + e ^ {s \left(\cos \theta_ {1 - y}\right)}} \tag {2}
|
| 70 |
+
$$
|
| 71 |
+
|
| 72 |
+
Here, $y \in \{0,1\}$ is the label of sample $x$ , $m$ is an angular margin penalty, and $s$ is a scaling factor. This loss function encourages the model to push the embeddings of genuine samples closer to the bonafide prototype and spoofed samples closer to their corresponding prototypes.
|
| 73 |
+
|
| 74 |
+
While prototypes are learned during the training process, there's a risk that they may collapse to a single center. To mitigate this, we introduce an intra-class regularization for the spoof prototypes $\{c^s\}$ :
|
| 75 |
+
|
| 76 |
+
$$
|
| 77 |
+
\mathcal {L} _ {\text {i n t r a}} \left(\left\{c ^ {s} \right\}\right) = \frac {2}{K (K - 1)} \sum_ {i = 1} ^ {K - 1} \sum_ {j = i + 1} ^ {K} \left\langle c _ {i} ^ {s}, c _ {j} ^ {s} \right\rangle \tag {3}
|
| 78 |
+
$$
|
| 79 |
+
|
| 80 |
+
This regularization term calculates the mean similarity between the spoof prototypes, encouraging them to spread out in the latent space, thereby preventing prototype collapse.
|
| 81 |
+
|
| 82 |
+
To further enhance the distinction between spoof and bonafide prototypes, we introduce an inter-class regularization term. This term calculates the smoothed maximum cosine similarity between the spoof prototypes $\{c^s\}$ and the single bonafide prototype $c^b$ :
|
| 83 |
+
|
| 84 |
+
$$
|
| 85 |
+
\mathcal {L} _ {i n t e r} \left(\left\{c ^ {s} \right\}, c ^ {b}\right) = \delta + \sum_ {i = 1} ^ {K} \frac {e ^ {\langle c _ {i} ^ {s} , c ^ {b} \rangle \cdot \gamma}}{\sum_ {j = 1} ^ {K} e ^ {\langle c _ {i} ^ {s} , c ^ {b} \rangle \cdot \gamma}} \left\langle c _ {i} ^ {s}, c ^ {b} \right\rangle \tag {4}
|
| 86 |
+
$$
|
| 87 |
+
|
| 88 |
+
here $\delta$ is a regularization coefficient that prevents the loss from becoming negative.
|
| 89 |
+
|
| 90 |
+
Hence, the overall objective function for LSR is defined as follows:
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
\mathcal {L} _ {L S R} = \mathcal {L} _ {\text {p r o t o}} + \mathcal {L} _ {\text {i n t r a}} + \mathcal {L} _ {\text {i n t e r}} \tag {5}
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
In addition, the LSR loss can be incorporated alongside a binary classification loss, such as Weighted Cross Entropy (WCE), to refine the latent distribution and reduce intra-class variance.
|
| 97 |
+
|
| 98 |
+
# B. Latent Space Augmentation
|
| 99 |
+
|
| 100 |
+
While multi-prototypical refinement enhances the representation of the spoofed class, further generalization can be achieved by augmenting the diversity of the training data. Instead of solely augmenting raw input data, we apply augmentation directly in the latent space, where lower dimensionality allows for more targeted variations. By focusing these augmentations on spoofed latent features, we generate diverse spoofing variations. Notably, these augmentations are not applied to bonafide latent features, preserving their authenticity.
|
| 101 |
+
|
| 102 |
+
Given $z$ a batch of embeddings, we denote the spoof embeddings in this batch as $z^s$ and the bonafide embeddings as $z^b$ . To create diverse variations of spoof embeddings, we design five latent augmentation patterns for $z^s$ :
|
| 103 |
+
|
| 104 |
+
Additive Noise (AN). A simple yet efficient idea is to add random perturbation to latent features. Here we apply the additive noise drawn from a Gaussian distribution as follows:
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
\hat {z} ^ {s} = z ^ {s} + \beta \cdot X, X \sim \mathcal {N} (0, \mathbf {I}) \tag {6}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
where $\mathcal{N}(0,\mathbf{I})$ is the standard normal distribution, $\mathbf{I}$ is the identity matrix, and $\beta$ is a scaling factor sampled from $\mathcal{N}(0,1)$ .
|
| 111 |
+
|
| 112 |
+
Affine Transformation (AT). This common transformation for 1D vectors involves scaling and translating the latent features:
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
\hat {z} ^ {s} = a \cdot z ^ {s} + b \tag {7}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
where $a$ is sampled from $\mathcal{U}(0.9,1.1)$ and $b$ is set to 0.
|
| 119 |
+
|
| 120 |
+
Batch Mixup (BM). Inspired by data mixup strategies [19], we create new latent features by blending pairs of spoof features in the batch, creating smoother transitions and intermediate variations:
|
| 121 |
+
|
| 122 |
+
$$
|
| 123 |
+
z _ {i} ^ {s} = \alpha \cdot z _ {i} ^ {s} + (1 - \alpha) \cdot z _ {\pi (i)} ^ {s} \tag {8}
|
| 124 |
+
$$
|
| 125 |
+
|
| 126 |
+
where $i$ indexes the batch, $\pi$ denotes a random permutation of the batch indices and $\alpha$ is a mixup coefficient sampled from Beta(0.5, 0.5).
|
| 127 |
+
|
| 128 |
+
The following two techniques rely on the prototypes learned in latent space refinement:
|
| 129 |
+
|
| 130 |
+
Linear Interpolation (LI). To create more challenging examples targeting the decision boundary, we perform linear interpolation on spoof embeddings towards bonafide prototype $c^b$ . Since the prototypes in LSR the prototypes are normalized to lie on a unit hypersphere due to the use of cosine similarity, the norm of the vectors is incorporated to adjust for the transition to Euclidean space:
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
\hat {z} ^ {s} = z ^ {s} + \lambda_ {i} \cdot \left(\frac {\| z ^ {s} \|}{\| c ^ {b} \|} c ^ {b} - z ^ {s}\right) \tag {9}
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
where $\lambda_{i}$ is an interpolation coefficient sampled from $\mathcal{U}(0,0.1)$ , and the norm term $\| z^s\| /\| c^b\|$ aligns the scales of the vectors.
|
| 137 |
+
|
| 138 |
+
Linear Extrapolation (LE). In addition to interpolation, we also perform extrapolation from the nearest spoof prototype to create new features:
|
| 139 |
+
|
| 140 |
+
$$
|
| 141 |
+
\hat {z} ^ {s} = z ^ {s} + \lambda_ {e} \cdot \left(z ^ {s} - \frac {\| z ^ {s} \|}{\| C _ {n} ^ {s} \|} c _ {n} ^ {s}\right) \tag {10}
|
| 142 |
+
$$
|
| 143 |
+
|
| 144 |
+
where $c_{n}^{s}$ corresponds the nearest spoof prototype of $z^{s}$ and $\lambda_{e}$ is an extrapolation coefficient sampled from $\mathcal{U}(0,0.1)$ . Similarly, we use the norm $\| z^{s}\| /\| c_{n}^{s}\|$ to adjust for the Euclidean representation. This method extends the spoof features further away from the nearest prototype, generating more diverse variations.
|
| 145 |
+
|
| 146 |
+
Finally, the augmented latent features $\hat{z}^s$ are concatenated with the original features $z$ , forming $z' = [z \parallel \hat{z}^s]$ . These enhanced features are then used for loss calculation during subsequent training, allowing the model to learn from a more varied set of spoofed data.
|
| 147 |
+
|
| 148 |
+
# III. EXPERIMENTS
|
| 149 |
+
|
| 150 |
+
# A. Experimental Settings
|
| 151 |
+
|
| 152 |
+
Datasets and metrics. We train all systems using the ASVspoof 2019 LA training set [17], which includes approximately $25\mathrm{k}$ utterances and 6 spoofing attacks involving VC or TTS. To evaluate generalization performance, we test on multiple datasets: the ASVspoof 2019 LA evaluation set (19LA) [17], containing $71\mathrm{k}$ utterances with 13 different spoofing attacks; the ASVspoof 2021 LA set (21LA) [1], comprising about $181\mathrm{k}$ utterances with algorithms similar to 19LA but also reflecting telephony systems' encoding and transmission effects; the ASVspoof 2021 DF set (21DF) [1], with over $600\mathrm{k}$ utterances and more than 100 spoofing attacks processed with various lossy codecs; and the In-The-Wild dataset (ITW) [2], which features approximately $32\mathrm{k}$ utterances collected under real-world, non-controlled conditions, making it a more challenging dataset. Performance is measured using Equal Error Rate (EER).
|
| 153 |
+
|
| 154 |
+
Training details. We adopt the model architecture from [6], utilizing Wav2Vec2.0 XLSR [3] as the frontend feature extractor and AASIST [20] as the backend classifier. Input speech is randomly chunked into 4-second segments, with Rawboost [10] applied as basic augmentation and codec augmentation as extra augmentation. The learning rate is set to 1e-6 for the backbone model and 1e-3 for the prototypes in LSR. For the LSR loss, we set the scaling factor $s = 32$ , angular margin $m = 0.2$ , and regularization coefficient $\delta = 0.2$ . For the WCE loss, the weights for bonafide and spoof classes are set to 0.9 and 0.1, respectively. For LSA, we either fix one type of augmentation during training or randomly select from all augmentation types (denoted as All).
|
| 155 |
+
|
| 156 |
+
# B. Overall Performance Comparison
|
| 157 |
+
|
| 158 |
+
To evaluate the overall performance of the proposed methods, we tested the system on four datasets and compared the results with those from the literature that used the same training dataset, as shown in Table I. Across all datasets, LSR+LSA consistently outperforms LSR alone and often ranks among the top performers, highlighting the effectiveness of integrating latent space refinement with latent space augmentation. To further enhance the results, we applied additional data augmentation, which led to EERs of $0.12\%$ on 19LA, $1.05\%$ on 21LA, $1.86\%$ on 21DF, and $5.54\%$ on ITW. This places our method on par with, or ahead of, the current state-of-the-art methods. Notably, our method focuses on refining and augmenting the latent space, which contrasts with recent approaches that focus on modifying the model architecture [28], [29]. These two strategies—latent space manipulation and architectural improvements—target different aspects of the problem and could potentially be combined for even better results. This highlights the flexibility and advantage of our method,
|
| 159 |
+
|
| 160 |
+
TABLE I OVERALL PERFORMANCE COMPARISON IN EER(%) ACROSS MULTIPLE DATASETS. ALL SYSTEMS ARE TRAINED ON THE ASVSPOOF2019 LA TRAINING SET. BEST RESULTS ARE HIGHLIGHTED IN BOLD, AND SECOND-BEST RESULTS ARE UNDERLINED.
|
| 161 |
+
|
| 162 |
+
<table><tr><td>System</td><td>19LA</td><td>21LA</td><td>21DF</td><td>ITW</td></tr><tr><td>WavLM+AttM [21]</td><td>0.65</td><td>3.50</td><td>3.19</td><td>-</td></tr><tr><td>Wav2Vec+LogReg [22]</td><td>0.50</td><td>-</td><td>-</td><td>7.20</td></tr><tr><td>WavLM+MFA [23]</td><td>0.42</td><td>5.08</td><td>2.56</td><td>-</td></tr><tr><td>Wav2Vec+VIB [24]</td><td>0.40</td><td>4.92</td><td>-</td><td>-</td></tr><tr><td>OCKD [25]</td><td>0.39</td><td>0.90</td><td>2.27</td><td>7.68</td></tr><tr><td>GFL-FAD [26]</td><td>0.25</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Wav2Vec+Linear [13]</td><td>0.22</td><td>3.63</td><td>3.65</td><td>16.17</td></tr><tr><td>OC+ACS [8]</td><td>0.17</td><td>1.30</td><td>2.19</td><td>-</td></tr><tr><td>Wav2Vec+AASIST [6]</td><td>-</td><td>0.82</td><td>2.85</td><td>-</td></tr><tr><td>Wav2Vec+AASIST2 [27]</td><td>0.15</td><td>1.61</td><td>2.77</td><td>-</td></tr><tr><td>Wav2vec+Conformer+TCM [28]</td><td>-</td><td>1.03</td><td>2.06</td><td>-</td></tr><tr><td>Wav2vec+STJ-GAT+BLDL* [29]</td><td>0.06</td><td>0.56</td><td>1.89</td><td>-</td></tr><tr><td>LSR</td><td>0.19</td><td>2.35</td><td>3.01</td><td>6.58</td></tr><tr><td>LSR+LSA</td><td>0.15</td><td>1.19</td><td>2.43</td><td>5.92</td></tr><tr><td>LSR+LSA*</td><td>0.12</td><td>1.05</td><td>1.86</td><td>5.54</td></tr></table>
|
| 163 |
+
|
| 164 |
+
\* with extra data augmentation.
|
| 165 |
+
|
| 166 |
+
as it enhances generalization without needing to alter the underlying model architecture. In summary, the proposed LSR+LSA method consistently delivers strong results, matching or outperforming state-of-the-art performance across various datasets, demonstrating its robustness and effectiveness in generalizing across diverse deepfake detection tasks.
|
| 167 |
+
|
| 168 |
+
# C. Ablation Study on Latent Space Refinement
|
| 169 |
+
|
| 170 |
+
TABLE II EER $(\%)$ ACROSS DATASETS FOR SYSTEMS TRAINED WITH DIFFERENT LOSS CONFIGURATIONS. BEST RESULTS ARE IN BOLD, AND SECOND-BEST RESULTS ARE UNDERlined.
|
| 171 |
+
|
| 172 |
+
<table><tr><td>Loss Configuration</td><td>19LA</td><td>21LA</td><td>21DF</td><td>ITW</td><td>Avg.</td></tr><tr><td>WCE</td><td>0.30</td><td>2.64</td><td>4.74</td><td>8.09</td><td>3.94</td></tr><tr><td>OC Softmax</td><td>0.31</td><td>1.60</td><td>4.06</td><td>7.86</td><td>3.46</td></tr><tr><td>LSR</td><td>0.23</td><td>1.55</td><td>3.22</td><td>7.45</td><td>3.11</td></tr><tr><td>w/o Linter</td><td>0.23</td><td>1.84</td><td>3.30</td><td>7.84</td><td>3.30</td></tr><tr><td>w/o Lintra</td><td>0.27</td><td>2.62</td><td>4.02</td><td>7.75</td><td>3.67</td></tr><tr><td>w/o Lintra, Linter</td><td>0.32</td><td>2.86</td><td>4.11</td><td>8.05</td><td>3.84</td></tr><tr><td>WCE+LSR</td><td>0.19</td><td>2.35</td><td>3.01</td><td>6.58</td><td>3.03</td></tr></table>
|
| 173 |
+
|
| 174 |
+
Table II presents the performance of various loss configurations during training. The baseline configuration uses weighted cross entropy (WCE) loss for binary classification, with OC Softmax [7] included for comparison. Incorporating Latent Space Refinement (LSR) improves performance over both WCE and OC Softmax. We further examine the effects of LSR's loss terms. Removing inter-class regularization results in minimal degradation, indicating that the core prototype-based loss sufficiently handles prototype separation. However, removing intra-class regularization significantly reduces performance, as this term is crucial for maintaining prototype diversity within the spoof class and preventing collapse. When both regularizations are removed, performance drops to baseline levels. Additionally, combining LSR with WCE yields the best overall results. While WCE provides a solid foundation for binary classification, LSR refines the latent space to better capture variations in spoofed data. This combination leads to improved generalization across the datasets.
|
| 175 |
+
|
| 176 |
+
Meanwhile, we evaluated the impact of the number of prototypes on performance, as shown in Fig. 3. Increasing the prototypes
|
| 177 |
+
|
| 178 |
+

|
| 179 |
+
Fig. 2. t-SNE visualization of the training dataset featuring various latent space augmentations. The green, blue, and red points represent the 2D projections of embeddings for the bonafide, spoof, and augmented spoof classes, respectively.
|
| 180 |
+
|
| 181 |
+

|
| 182 |
+
|
| 183 |
+

|
| 184 |
+
|
| 185 |
+

|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
|
| 189 |
+

|
| 190 |
+
Fig. 3. The effect of the number of spoofed prototypes on EER $(\%)$ across different datasets (21LA, 21DF, and ITW).
|
| 191 |
+
|
| 192 |
+
from 1 to 8 improves performance, but further increasing to 16 shows diminishing returns. At 20 prototypes, performance declines, suggesting that too many prototypes can hinder generalization.
|
| 193 |
+
|
| 194 |
+
# D. Ablation Study on Latent Space Augmentation
|
| 195 |
+
|
| 196 |
+
TABLE III EER $(\%)$ ACROSS DATASETS FOR SYSTEMS TRAINED WITH DIFFERENT LATENT SPACE AUGMENTATION. BEST RESULTS ARE IN BOLD, AND SECOND-BEST RESULTS ARE UNDERLINED.
|
| 197 |
+
|
| 198 |
+
<table><tr><td>Method</td><td>19LA</td><td>21LA</td><td>21DF</td><td>ITW</td><td>Avg.</td></tr><tr><td>LSR</td><td>0.19</td><td>2.35</td><td>3.01</td><td>6.58</td><td>3.03</td></tr><tr><td>+LSA(AN)</td><td>0.16</td><td>1.67</td><td>2.85</td><td>6.17</td><td>2.71</td></tr><tr><td>+LSA(AT)</td><td>0.19</td><td>1.62</td><td>2.57</td><td>6.69</td><td>2.77</td></tr><tr><td>+LSA(BM)</td><td>0.21</td><td>1.65</td><td>2.86</td><td>6.61</td><td>2.93</td></tr><tr><td>+LSA(LI)</td><td>0.23</td><td>1.92</td><td>2.65</td><td>7.05</td><td>2.96</td></tr><tr><td>+LSA(LE)</td><td>0.18</td><td>1.52</td><td>2.54</td><td>6.15</td><td>2.60</td></tr><tr><td>+LSA(All)</td><td>0.15</td><td>1.19</td><td>2.43</td><td>5.92</td><td>2.42</td></tr></table>
|
| 199 |
+
|
| 200 |
+
To assess the impact of different latent space augmentation methods, we conducted experiments for each method, as summarized in Table III, and visualized their effects using t-SNE in Fig. 2. Notably, since LI and LE rely on LSR prototypes, all systems were trained with $\mathrm{LSR + WCE}$ loss. Among the first three augmentations that are independent of the prototypes, AN and AT produced more dispersed and varied distributions, leading to better performance. In contrast, BM's distribution remained closer to the original due to its mixup nature, which limited its effectiveness. For the prototype-dependent augmentations, LI, while beneficial, underperformed compared to the others, likely due to the consistent generation of challenging examples. LE, however, achieved the best results, as it effectively expanded the distribution into new regions of the latent space, offering a more balanced diversity. Ultimately, combining all augmentation methods led to the most diverse latent space, resulting in the highest overall performance.
|
| 201 |
+
|
| 202 |
+
While we have demonstrated the effectiveness of augmentation in latent space, we were curious whether applying the same augmentations in the input space could yield comparable or even better results. To explore this, we conducted comparison experiments between
|
| 203 |
+
|
| 204 |
+
TABLE IV COMPARISON OF AUGMENTATION EFFECTS IN INPUT VS. LATENT SPACE ACROSS DATSETS (EER $\%$
|
| 205 |
+
|
| 206 |
+
<table><tr><td>Method</td><td>Space</td><td>19LA</td><td>21LA</td><td>21DF</td><td>ITW</td><td>Avg.</td></tr><tr><td>None</td><td>-</td><td>0.30</td><td>2.64</td><td>4.74</td><td>8.09</td><td>3.94</td></tr><tr><td>AN</td><td>input</td><td>0.25</td><td>2.22</td><td>3.17</td><td>6.35</td><td>3.00</td></tr><tr><td>AN</td><td>latent</td><td>0.23</td><td>2.05</td><td>2.84</td><td>6.21</td><td>2.83</td></tr><tr><td>AT</td><td>input</td><td>0.27</td><td>2.43</td><td>3.44</td><td>6.81</td><td>3.24</td></tr><tr><td>AT</td><td>latent</td><td>0.25</td><td>2.03</td><td>2.91</td><td>6.72</td><td>2.98</td></tr><tr><td>BM</td><td>input</td><td>0.19</td><td>2.24</td><td>3.01</td><td>6.33</td><td>2.94</td></tr><tr><td>BM</td><td>latent</td><td>0.19</td><td>2.21</td><td>2.95</td><td>6.56</td><td>2.98</td></tr></table>
|
| 207 |
+
|
| 208 |
+
augmentations applied in the input space versus the latent space, focusing on three methods that do not depend on latent prototypes or embeddings: AN, AT, and BM. All experiments were conducted using WCE loss without LSR. As shown in Table IV, applying augmentation, whether in the input or latent space, improves the baseline to some extent. For AN and AT, augmentations performed in the latent space consistently yield better results than those in the input space. This suggests that latent space augmentations may more effectively capture the underlying data distributions that the model needs to learn. Interestingly, BM yields better results when applied in the input space than in the latent space. This outcome may be attributed to the nature of Mixup augmentation, which has been widely proven effective in various audio-related tasks when performed on the input data. The input space BM likely benefits from preserving more of the original data characteristics while still introducing beneficial variability.
|
| 209 |
+
|
| 210 |
+
# IV. CONCLUSIONS
|
| 211 |
+
|
| 212 |
+
This paper presents a novel approach to enhance the generalization of audio deepfake detection systems by integrating Latent Space Refinement (LSR) and Latent Space Augmentation (LSA). LSR introduces multiple learnable prototypes to better capture the complex intra-class variability of spoofed audio, while LSA generates diverse representations in the latent space, further strengthening the model's robustness. Extensive experiments on multiple datasets, including ASVspoof 2019 LA, ASVspoof 2021 LA, ASVspoof 2021 DF, and In-The-Wild, demonstrate that each of the proposed LSR and LSA can improve system significantly.
|
| 213 |
+
|
| 214 |
+
# ACKNOWLEDGMENT
|
| 215 |
+
|
| 216 |
+
This work was partially supported by the National Natural Science Foundation of China (NSFC) under Grants 62122050 and 62071288, and the Shanghai Municipal Science and Technology Commission under Grant 2021SHZDZX0102. Additional support was provided by the Pioneer R&D Program of Zhejiang Province (No. 2024C01024) and the Ant Group Research Intern Program.
|
| 217 |
+
|
| 218 |
+
# REFERENCES
|
| 219 |
+
|
| 220 |
+
[1] Junichi Yamagishi, Xin Wang, Massimiliano Todisco, Md Sahidullah, Jose Patino, Andreas Nautsch, Xuechen Liu, Kong Aik Lee, Tomi Kinnunen, Nicholas Evans, et al., "Asvspoof 2021: accelerating progress in spoofed and deepfake speech detection," in ASVspoof 2021 Workshop-Automatic Speaker Verification and Spoofing Coutermeasures Challenge, 2021.
|
| 221 |
+
[2] Nicolas M Müller, Pavel Czempin, Franziska Dieckmann, Adam Froghyar, and Konstantin Bötttinger, “Does audio deepfake detection generalize?,” Interspeech, 2022.
|
| 222 |
+
[3] Arun Babu, Changhan Wang, Andros Tjandra, Kushal Lakhotia, Qiantong Xu, Naman Goyal, Kritika Singh, Patrick von Platen, Yatharth Saraf, Juan Pino, Alexei Baevski, Alexis Conneau, and Michael Auli, "XIs-r: Self-supervised cross-lingual speech representation learning at scale," arXiv, vol. abs/2111.09296, 2021.
|
| 223 |
+
[4] Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever, “Robust speech recognition via large-scale weak supervision,” in International conference on machine learning. PMLR, 2023, pp. 28492-28518.
|
| 224 |
+
[5] Sanyuan Chen, Chengyi Wang, Zhengyang Chen, Yu Wu, Shujie Liu, Zhuo Chen, Jinyu Li, Naoyuki Kanda, Takuya Yoshioka, Xiong Xiao, et al., "Wavlm: Large-scale self-supervised pre-training for full stack speech processing," IEEE Journal of Selected Topics in Signal Processing, vol. 16, no. 6, pp. 1505-1518, 2022.
|
| 225 |
+
[6] Hemlata Tak, Massimiliano Todisco, Xin Wang, Jee-weon Jung, Junichi Yamagishi, and Nicholas Evans, "Automatic speaker verification spoofing and deepfake detection using wav2vec 2.0 and data augmentation," in The Speaker and Language Recognition Workshop, 2022.
|
| 226 |
+
[7] You Zhang, Fei Jiang, and Zhiyao Duan, "One-class learning towards synthetic voice spoofing detection," IEEE Signal Processing Letters, vol. 28, pp. 937-941, 2021.
|
| 227 |
+
[8] Hyun Myung Kim, Kangwook Jang, and Hoirin Kim, “One-class learning with adaptive centroid shift for audio deepfake detection,” in Interspeech 2024, 2024, pp. 4853–4857.
|
| 228 |
+
[9] Daniel S Park, William Chan, Yu Zhang, Chung-Cheng Chiu, Barret Zoph, Ekin D Cubuk, and Quoc V Le, "Specaugment: A simple data augmentation method for automatic speech recognition," arXiv preprint arXiv:1904.08779, 2019.
|
| 229 |
+
[10] Hemlata Tak, Madhu Kamble, Jose Patino, Massimiliano Todisco, and Nicholas Evans, "Rawboost: A raw data boosting and augmentation method applied to automatic speaker verification anti-spoofing," in ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2022, pp. 6382-6386.
|
| 230 |
+
[11] Linjuan Zhang, Kong Aik Lee, Lin Zhang, Longbiao Wang, and Baoning Niu, "Cpaug: Refining copy-paste augmentation for speech antispoofing," in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 10996-11000.
|
| 231 |
+
[12] Marcella ASTRID, Enjie GHORBEL, and Djamila AOUADA, “Targeted augmented data for audio deepfake detection,” in 32nd European Signal Processing Conference (EUSIPCO 2024), 2024.
|
| 232 |
+
[13] Xin Wang and Junichi Yamagishi, "Spoofed training data for speech spoofing countermeasure can be efficiently created using neural vocoders," in ICASSP 2023-2023 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2023, pp. 1-5.
|
| 233 |
+
[14] Xin Wang and Junichi Yamagishi, "Can large-scale vocoded spoofed data improve speech spoofing countermeasure with a self-supervised front end?", in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 10311-10315.
|
| 234 |
+
[15] Xiaofeng Liu, Yang Zou, Lingsheng Kong, Zhihui Diao, Junliang Yan, Jun Wang, Site Li, Ping Jia, and Jane You, "Data augmentation via latent space interpolation for image classification," in 2018 24th International Conference on Pattern Recognition (ICPR). IEEE, 2018, pp. 728-733.
|
| 235 |
+
[16] Zhiyuan Yan, Yuhao Luo, Siwei Lyu, Qingshan Liu, and Baoyuan Wu, "Transcending forgery specificity with latent space augmentation for generalizable deepfake detection," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2024, pp. 8984-8994.
|
| 236 |
+
[17] Xin Wang, Junichi Yamagishi, Massimiliano Todisco, Héctor Delgado, Andreas Nautsch, Nicholas Evans, Md Sahidullah, Ville Vestman, Tomi Kinnunen, Kong Aik Lee, et al., "Asvspoof 2019: A large-scale public
|
| 237 |
+
|
| 238 |
+
database of synthesized, converted and replayed speech," Computer Speech & Language, vol. 64, pp. 101114, 2020.
|
| 239 |
+
[18] Jiankang Deng, Jia Guo, Niannan Xue, and Stefanos Zafeiriou, "Arcface: Additive angular margin loss for deep face recognition," in Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, 2019, pp. 4690-4699.
|
| 240 |
+
[19] Hongyi Zhang, Moustapha Cisse, Yann N Dauphin, and David Lopez-Paz, “mixup: Beyond empirical risk minimization,” arXiv preprint arXiv:1710.09412, 2017.
|
| 241 |
+
[20] Jee-weon Jung, Hee-Soo Heo, Hemlata Tak, Hye-jin Shim, Joon Son Chung, Bong-Jin Lee, Ha-Jin Yu, and Nicholas Evans, “Aasist: Audio anti-spoofing using integrated spectro-temporal graph attention networks,” in ICASSP 2022-2022 IEEE international conference on acoustics, speech and signal processing (ICASSP). IEEE, 2022, pp. 6367-6371.
|
| 242 |
+
[21] Zihan Pan, Tianchi Liu, Hardik B. Sailor, and Qiongqiong Wang, "Attentive merging of hidden embeddings from pre-trained speech model for anti-spoofing detection," in Interspeech 2024, 2024, pp. 2090-2094.
|
| 243 |
+
[22] Octavian Pascu, Adriana Stan, Dan Oneata, Elisaba Oneata, and Horia Cucu, "Towards generalisable and calibrated audio deepfake detection with self-supervised representations," in Interspeech 2024, 2024, pp. 4828-4832.
|
| 244 |
+
[23] Yinlin Guo, Haofan Huang, Xi Chen, He Zhao, and Yuehai Wang, "Audio deepfake detection with self-supervised wavlm and multi-fusion attentive classifier," in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 12702-12706.
|
| 245 |
+
[24] Youngsik Eom, Yeonghyeon Lee, Ji Sub Um, and Hoi Rin Kim, “Antispoofing using transfer learning with variational information bottleneck,” in Interspeech 2022, 2022, pp. 3568-3572.
|
| 246 |
+
[25] Jingze Lu, Yuxiang Zhang, Wenchao Wang, Zengqiang Shang, and Pengyuan Zhang, "One-class knowledge distillation for spoofing speech detection," in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 11251-11255.
|
| 247 |
+
[26] Xiaopeng Wang, Ruibo Fu, Zhengqi Wen, Zhiyong Wang, Yuankun Xie, Yukun Liu, Jianhua Tao, Xuefei Liu, Yongwei Li, Xin Qi, Yi Lu, and Shuchen Shi, "Genuine-focused learning using mask autoencoder for generalized fake audio detection," in Interspeech 2024, 2024, pp. 4848-4852.
|
| 248 |
+
[27] Yuxiang Zhang, Jingze Lu, Zengqiang Shang, Wenchao Wang, and Pengyuan Zhang, "Improving short utterance anti-spoofing with aassist2," in ICASSP 2024-2024 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2024, pp. 11636-11640.
|
| 249 |
+
[28] Duc-Tuan Truong, Ruijie Tao, Tuan Nguyen, Hieu-Thi Luong, Kong Aik Lee, and Eng Siong Chng, “Temporal-channel modeling in multi-head self-attention for synthetic speech detection,” in Interspeech 2024, 2024, pp. 537–541.
|
| 250 |
+
[29] Haochen Wu, Wu Guo, Zhentao Zhang, Wenting Zhao, Shengyu Peng, and Jie Zhang, "Spoofing speech detection by modeling local spectro-temporal and long-term dependency," in Interspeech 2024, 2024, pp. 507-511.
|
2501.14xxx/2501.14240/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f0d89d2ff7fced07ca5b2ebc56230fa5450ab95cf561e73b394137939ede4da4
|
| 3 |
+
size 305890
|
2501.14xxx/2501.14240/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2501.14xxx/2501.14249/2102c0e9-a2a1-42c5-8617-430fd4e1c8e9_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|