Add MinerU batch 3990d73e-6cc2-4120-b9fe-f3637dc0140b
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +16 -0
- data/2025/2503_13xxx/2503.13268/27688151-5fde-4680-b981-ebf649a96c83_content_list.json +952 -0
- data/2025/2503_13xxx/2503.13268/27688151-5fde-4680-b981-ebf649a96c83_model.json +1090 -0
- data/2025/2503_13xxx/2503.13268/27688151-5fde-4680-b981-ebf649a96c83_origin.pdf +3 -0
- data/2025/2503_13xxx/2503.13268/full.md +188 -0
- data/2025/2503_13xxx/2503.13268/images.zip +3 -0
- data/2025/2503_13xxx/2503.13268/layout.json +0 -0
- data/2025/2503_13xxx/2503.13299/1182f1ca-70ec-4ed4-9c5b-10aa50e1a3e7_content_list.json +0 -0
- data/2025/2503_13xxx/2503.13299/1182f1ca-70ec-4ed4-9c5b-10aa50e1a3e7_model.json +0 -0
- data/2025/2503_13xxx/2503.13299/1182f1ca-70ec-4ed4-9c5b-10aa50e1a3e7_origin.pdf +3 -0
- data/2025/2503_13xxx/2503.13299/full.md +0 -0
- data/2025/2503_13xxx/2503.13299/images.zip +3 -0
- data/2025/2503_13xxx/2503.13299/layout.json +0 -0
- data/2025/2503_13xxx/2503.13360/f41871e7-052a-41b7-a381-f27188074bdf_content_list.json +1896 -0
- data/2025/2503_13xxx/2503.13360/f41871e7-052a-41b7-a381-f27188074bdf_model.json +0 -0
- data/2025/2503_13xxx/2503.13360/f41871e7-052a-41b7-a381-f27188074bdf_origin.pdf +3 -0
- data/2025/2503_13xxx/2503.13360/full.md +383 -0
- data/2025/2503_13xxx/2503.13360/images.zip +3 -0
- data/2025/2503_13xxx/2503.13360/layout.json +0 -0
- data/2025/2503_13xxx/2503.13377/48689f30-a9be-48a0-a682-282cc19b9dc2_content_list.json +0 -0
- data/2025/2503_13xxx/2503.13377/48689f30-a9be-48a0-a682-282cc19b9dc2_model.json +0 -0
- data/2025/2503_13xxx/2503.13377/48689f30-a9be-48a0-a682-282cc19b9dc2_origin.pdf +3 -0
- data/2025/2503_13xxx/2503.13377/full.md +716 -0
- data/2025/2503_13xxx/2503.13377/images.zip +3 -0
- data/2025/2503_13xxx/2503.13377/layout.json +0 -0
- data/2025/2503_13xxx/2503.13399/3216c4a9-6622-4f8f-9268-b62c8b700c4e_content_list.json +0 -0
- data/2025/2503_13xxx/2503.13399/3216c4a9-6622-4f8f-9268-b62c8b700c4e_model.json +0 -0
- data/2025/2503_13xxx/2503.13399/3216c4a9-6622-4f8f-9268-b62c8b700c4e_origin.pdf +3 -0
- data/2025/2503_13xxx/2503.13399/full.md +0 -0
- data/2025/2503_13xxx/2503.13399/images.zip +3 -0
- data/2025/2503_13xxx/2503.13399/layout.json +0 -0
- data/2025/2503_13xxx/2503.13415/d6bcf737-8470-4ed5-949a-6d04e53f6bf7_content_list.json +0 -0
- data/2025/2503_13xxx/2503.13415/d6bcf737-8470-4ed5-949a-6d04e53f6bf7_model.json +0 -0
- data/2025/2503_13xxx/2503.13415/d6bcf737-8470-4ed5-949a-6d04e53f6bf7_origin.pdf +3 -0
- data/2025/2503_13xxx/2503.13415/full.md +0 -0
- data/2025/2503_13xxx/2503.13415/images.zip +3 -0
- data/2025/2503_13xxx/2503.13415/layout.json +0 -0
- data/2025/2503_13xxx/2503.13423/ee02b8fd-a1b4-4f31-b152-93f5d36bdbe3_content_list.json +0 -0
- data/2025/2503_13xxx/2503.13423/ee02b8fd-a1b4-4f31-b152-93f5d36bdbe3_model.json +0 -0
- data/2025/2503_13xxx/2503.13423/ee02b8fd-a1b4-4f31-b152-93f5d36bdbe3_origin.pdf +3 -0
- data/2025/2503_13xxx/2503.13423/full.md +0 -0
- data/2025/2503_13xxx/2503.13423/images.zip +3 -0
- data/2025/2503_13xxx/2503.13423/layout.json +0 -0
- data/2025/2503_13xxx/2503.13436/53224727-d9f9-435a-ad74-c91800bf8f34_content_list.json +1985 -0
- data/2025/2503_13xxx/2503.13436/53224727-d9f9-435a-ad74-c91800bf8f34_model.json +0 -0
- data/2025/2503_13xxx/2503.13436/53224727-d9f9-435a-ad74-c91800bf8f34_origin.pdf +3 -0
- data/2025/2503_13xxx/2503.13436/full.md +380 -0
- data/2025/2503_13xxx/2503.13436/images.zip +3 -0
- data/2025/2503_13xxx/2503.13436/layout.json +0 -0
- data/2025/2503_13xxx/2503.13439/696dfadf-883e-4109-9010-e095246a341a_content_list.json +0 -0
.gitattributes
CHANGED
|
@@ -1963,3 +1963,19 @@ data/2025/2503_19xxx/2503.19903/0906a2ad-9088-4fc5-8324-00e6a98a949f_origin.pdf
|
|
| 1963 |
data/2025/2503_19xxx/2503.19907/6d3e71a9-f367-43e8-9154-5a3f1c98eccc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1964 |
data/2025/2503_20xxx/2503.20807/9709de1a-2664-4358-b8ae-7fae53399612_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1965 |
data/2025/2503_20xxx/2503.20823/460bf0cd-81d7-4046-966b-33a5a80b3f9b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1963 |
data/2025/2503_19xxx/2503.19907/6d3e71a9-f367-43e8-9154-5a3f1c98eccc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1964 |
data/2025/2503_20xxx/2503.20807/9709de1a-2664-4358-b8ae-7fae53399612_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1965 |
data/2025/2503_20xxx/2503.20823/460bf0cd-81d7-4046-966b-33a5a80b3f9b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1966 |
+
data/2025/2503_13xxx/2503.13268/27688151-5fde-4680-b981-ebf649a96c83_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1967 |
+
data/2025/2503_13xxx/2503.13299/1182f1ca-70ec-4ed4-9c5b-10aa50e1a3e7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1968 |
+
data/2025/2503_13xxx/2503.13360/f41871e7-052a-41b7-a381-f27188074bdf_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1969 |
+
data/2025/2503_13xxx/2503.13377/48689f30-a9be-48a0-a682-282cc19b9dc2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1970 |
+
data/2025/2503_13xxx/2503.13399/3216c4a9-6622-4f8f-9268-b62c8b700c4e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1971 |
+
data/2025/2503_13xxx/2503.13415/d6bcf737-8470-4ed5-949a-6d04e53f6bf7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1972 |
+
data/2025/2503_13xxx/2503.13423/ee02b8fd-a1b4-4f31-b152-93f5d36bdbe3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1973 |
+
data/2025/2503_13xxx/2503.13436/53224727-d9f9-435a-ad74-c91800bf8f34_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1974 |
+
data/2025/2503_13xxx/2503.13439/696dfadf-883e-4109-9010-e095246a341a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1975 |
+
data/2025/2503_13xxx/2503.13441/7dee0cb2-adf2-4076-a132-e57fc86ae441_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1976 |
+
data/2025/2503_13xxx/2503.13444/1c5e9011-0450-44ee-b04b-75479b89c9af_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1977 |
+
data/2025/2503_13xxx/2503.13446/40268637-4c49-45fd-874f-4b7639b520fb_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1978 |
+
data/2025/2503_13xxx/2503.13646/ec61822f-80f5-4b4c-80e5-df7fb23ac8f4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1979 |
+
data/2025/2503_13xxx/2503.13657/f9f0f12f-c46c-4f07-9710-e29462265246_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1980 |
+
data/2025/2503_13xxx/2503.13721/a97f1dd6-65e6-4086-81b6-b457f406ad62_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 1981 |
+
data/2025/2503_16xxx/2503.16527/a8560f1b-d038-456e-8a05-1cf3f71e9afc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
data/2025/2503_13xxx/2503.13268/27688151-5fde-4680-b981-ebf649a96c83_content_list.json
ADDED
|
@@ -0,0 +1,952 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Channel Estimation for Pinching-Antenna Systems (PASS)",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
161,
|
| 8 |
+
69,
|
| 9 |
+
836,
|
| 10 |
+
140
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Jian Xiao, Ji Wang, Senior Member, IEEE, and Yuanwei Liu, Fellow, IEEE",
|
| 17 |
+
"bbox": [
|
| 18 |
+
215,
|
| 19 |
+
147,
|
| 20 |
+
781,
|
| 21 |
+
164
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Abstract—Pinching antennas (PAs) represent a revolutionary flexible antenna technology that leverages dielectric waveguides and electromagnetic coupling to mitigate large-scale path loss. This letter is the first to explore channel estimation for Pinching-Antenna SyStems (PASS), addressing their uniquely ill-conditioned and underdetermined channel characteristics. In particular, two efficient deep learning-based channel estimators are proposed. 1) PAMoE: This estimator incorporates dynamic padding, feature embedding, fusion, and mixture of experts (MoE) modules, which effectively leverage the positional information of PAs and exploit expert diversity. 2) PAformer: This Transformer-style estimator employs the self-attention mechanism to predict channel coefficients in a per-antenna manner, which offers more flexibility to adaptively deal with dynamic numbers of PAs in practical deployment. Numerical results demonstrate that 1) the proposed deep learning-based channel estimators outperform conventional methods and exhibit excellent zero-shot learning capabilities, and 2) PAMoE delivers higher channel estimation accuracy via MoE specialization, while PAformer natively handles an arbitrary number of PAs, trading self-attention complexity for superior scalability.",
|
| 28 |
+
"bbox": [
|
| 29 |
+
73,
|
| 30 |
+
220,
|
| 31 |
+
491,
|
| 32 |
+
487
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Index Terms—Channel estimation, mixture of experts, pinching antenna, Transformer.",
|
| 39 |
+
"bbox": [
|
| 40 |
+
73,
|
| 41 |
+
492,
|
| 42 |
+
491,
|
| 43 |
+
518
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "I. INTRODUCTION",
|
| 50 |
+
"text_level": 1,
|
| 51 |
+
"bbox": [
|
| 52 |
+
215,
|
| 53 |
+
541,
|
| 54 |
+
351,
|
| 55 |
+
555
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "ADVANCED multiple-input multiple-output (MIMO) is a critical enabling technology for sixth-generation (6G) wireless networks, targeting ultra-high-speed data transmission and seamless connectivity. Among emerging MIMO technologies, flexible antenna systems, e.g., fluid and movable antennas, offer dynamic channel reconfiguration capabilities by leveraging its positional or rotational adjustability to adapt to the evolving spatial distribution of users [1]. However, they still struggle with addressing large-scale path loss and the need for line-of-sight (LoS) links, both of which are essential for high-quality communication. To overcome these challenges, pinching antennas (PAs) offer a novel approach to creating controlled radiation sites. PAs were initially introduced and developed into a prototype by NTT DOCOMO in 2022 [2]. The fundamental concept of PAs relies on the transfer of electromagnetic (EM) waves from a dielectric waveguide to a nearby dielectric material. Compared to typical movable antennas, PAs can achieve movement ranges spanning thousands to tens of thousands of wavelengths and support a larger number of connected antennas on a waveguide. This technology allows",
|
| 62 |
+
"bbox": [
|
| 63 |
+
73,
|
| 64 |
+
560,
|
| 65 |
+
491,
|
| 66 |
+
864
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "Jian Xiao and Ji Wang are with the Department of Electronics and Information Engineering, College of Physical Science and Technology, Central China Normal University, Wuhan 430079, China (e-mail: jianx@mails.ccnu.edu.cn; jiwang@ccnu.edu.cn).",
|
| 73 |
+
"bbox": [
|
| 74 |
+
73,
|
| 75 |
+
875,
|
| 76 |
+
488,
|
| 77 |
+
920
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "text",
|
| 83 |
+
"text": "Yuanwei Liu is with the Department of Electrical and Electronic Engineering, The University of Hong Kong, Hong Kong (e-mail: yuanwei@hku.hk).",
|
| 84 |
+
"bbox": [
|
| 85 |
+
73,
|
| 86 |
+
920,
|
| 87 |
+
488,
|
| 88 |
+
945
|
| 89 |
+
],
|
| 90 |
+
"page_idx": 0
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"type": "text",
|
| 94 |
+
"text": "for the flexible positioning of PAs, enabling the establishment of adjustable and reliable LoS transceiver links in Pinching-Antenna SyStems (PASS), thereby effectively mitigating large-scale path loss.",
|
| 95 |
+
"bbox": [
|
| 96 |
+
501,
|
| 97 |
+
219,
|
| 98 |
+
921,
|
| 99 |
+
280
|
| 100 |
+
],
|
| 101 |
+
"page_idx": 0
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"type": "text",
|
| 105 |
+
"text": "The theoretical exploration of PA technology is still in its early stages, yet it has already garnered significant attention from both academia and industry. The authors of [3] first provided a comprehensive performance analysis for PASS and proposed a non-orthogonal multiple access (NOMA)-enhanced PASS. Subsequently, related works have explored performance analysis [4], physical modeling and beamforming [5], and minimum rate maximization for PASS [6]. It should be emphasized that the existing works on PA optimization strategies hinge crucially on precise channel estimation. However, the channel estimation in PASS has not been investigated in-depth due to new challenges. Firstly, PASS introduces a fundamental challenge in channel estimation due to the highly coupled nature of the in-waveguide channel and wireless propagation channel between PAs and users. Each waveguide is fed by only one radio frequency (RF) chain, while it may carry multiple PAs. This setup necessitates the recovery of high-dimensional information from low-dimensional observations, resulting in an ill-conditioned underdetermined recovery problem. Secondly, the in-waveguide channel is deterministic and completely depends on the positions of PAs in the waveguide. Hence, no stacking of pilot signals can produce additional independent linear equations to solve the high-dimensional wireless propagation channel. Moreover, the dynamic nature of PAs with the plug-and-play capability complicates the channel acquisition. The activation locations of a large number of PAs can be adjusted in a specified region to improve user service. Therefore, the wireless propagation channel presents near-field spatial non-stationarity effects due to the large array aperture.",
|
| 106 |
+
"bbox": [
|
| 107 |
+
501,
|
| 108 |
+
280,
|
| 109 |
+
921,
|
| 110 |
+
718
|
| 111 |
+
],
|
| 112 |
+
"page_idx": 0
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"type": "text",
|
| 116 |
+
"text": "To fill this research gap, this letter presents the first investigation into channel estimation for PASS. First and foremost, we extend classic linear channel estimators by adapting the antenna switching strategy for PASS, which provide fundamental channel estimation benchmarks. Furthermore, inspired by recent advances in deep learning (DL)-enabled channel estimation [7]-[9], we propose two efficient DL models to improve channel estimation accuracy while reducing pilot overhead. The first DL estimator, termed $PAMoE$ , is built on the mixture of experts (MoE) architecture, which integrates PA positions and pilot signal features through multi-expert mechanisms to adaptively model the dynamic channel distributions in PASS. To enhance the flexibility and scalability of the channel estimation model for dynamic PA counts, we further propose a Transformer-style estimator, termed $PAformer$ , which lever-",
|
| 117 |
+
"bbox": [
|
| 118 |
+
501,
|
| 119 |
+
718,
|
| 120 |
+
921,
|
| 121 |
+
944
|
| 122 |
+
],
|
| 123 |
+
"page_idx": 0
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"type": "page_number",
|
| 127 |
+
"text": "1",
|
| 128 |
+
"bbox": [
|
| 129 |
+
911,
|
| 130 |
+
30,
|
| 131 |
+
919,
|
| 132 |
+
40
|
| 133 |
+
],
|
| 134 |
+
"page_idx": 0
|
| 135 |
+
},
|
| 136 |
+
{
|
| 137 |
+
"type": "aside_text",
|
| 138 |
+
"text": "arXiv:2503.13268v4 [cs.IT] 10 May 2025",
|
| 139 |
+
"bbox": [
|
| 140 |
+
22,
|
| 141 |
+
265,
|
| 142 |
+
58,
|
| 143 |
+
707
|
| 144 |
+
],
|
| 145 |
+
"page_idx": 0
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"type": "image",
|
| 149 |
+
"img_path": "images/1809a3d795957d4d22b8aaff957d2fc3fdf1e8341d4a39ed60030baef9bf4f24.jpg",
|
| 150 |
+
"image_caption": [
|
| 151 |
+
"Fig. 1. Pinching antennas assisted multi-user systems."
|
| 152 |
+
],
|
| 153 |
+
"image_footnote": [],
|
| 154 |
+
"bbox": [
|
| 155 |
+
133,
|
| 156 |
+
73,
|
| 157 |
+
434,
|
| 158 |
+
210
|
| 159 |
+
],
|
| 160 |
+
"page_idx": 1
|
| 161 |
+
},
|
| 162 |
+
{
|
| 163 |
+
"type": "text",
|
| 164 |
+
"text": "ages the self-attention mechanism to predict channel coefficients on a per-antenna basis. Numerical results demonstrate that the proposed DL estimators achieve superior channel estimation accuracy with significantly reduced pilot overhead compared to conventional channel estimators. In particular, both PAMoE and PAformer exhibit superior zero-shot learning capabilities on dynamic PA configurations without retraining operations.",
|
| 165 |
+
"bbox": [
|
| 166 |
+
73,
|
| 167 |
+
262,
|
| 168 |
+
491,
|
| 169 |
+
383
|
| 170 |
+
],
|
| 171 |
+
"page_idx": 1
|
| 172 |
+
},
|
| 173 |
+
{
|
| 174 |
+
"type": "text",
|
| 175 |
+
"text": "II. SYSTEM MODEL AND PROBLEM FORMULATION",
|
| 176 |
+
"text_level": 1,
|
| 177 |
+
"bbox": [
|
| 178 |
+
101,
|
| 179 |
+
397,
|
| 180 |
+
465,
|
| 181 |
+
411
|
| 182 |
+
],
|
| 183 |
+
"page_idx": 1
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"type": "text",
|
| 187 |
+
"text": "As illustrated in Fig. 1, considering an uplink communication system that includes a base station (BS) equipped with $N$ PAs and $K$ single-antenna user equipments (UEs). Each PA has the same length $L$ on the waveguide. In a Cartesian system, the UEs are assumed to be randomly distributed within a rectangular region on the $x - y$ plane, with dimensions $D_{x}$ and $D_{y}$ . The position of the $k$ -th UE is represented by $\\psi_{k} = (x_{k},y_{k},0)$ . Suppose the waveguide extends parallel to the $x$ -axis. Its height is denoted by $d$ , and its length aligns with the rectangular dimension $D_{x}$ . Hence, the coordinates of PA $n$ are given by $\\psi_{n}^{\\mathrm{P}} = (x_{n}^{\\mathrm{P}},0,d)$ , where $x_{n}^{\\mathrm{P}}$ lies in the interval $[L,D_x]$ . In this work, the discrete activation deployment of PAs is adopted, which simplifies the hardware design and is more practical than continuous activation [5]. The PAs can only be activated at specific discrete positions along the waveguide, forming the feasible set $\\mathcal{S} = \\left\\{L + \\frac{D_x - L}{Q - 1} (q - 1) \\mid q = 1,2,\\dots ,Q\\right\\}$ , where $Q$ denotes the number of discrete positions available. Since all $N$ PAs lie along the same waveguide, the transmitted signal of each PA is essentially a phase-shifted version of the signal from the BS at the waveguide feed point. The in-waveguide channel $\\mathbf{g} \\in \\mathbb{C}^{N \\times 1}$ can be expressed as",
|
| 188 |
+
"bbox": [
|
| 189 |
+
73,
|
| 190 |
+
415,
|
| 191 |
+
491,
|
| 192 |
+
736
|
| 193 |
+
],
|
| 194 |
+
"page_idx": 1
|
| 195 |
+
},
|
| 196 |
+
{
|
| 197 |
+
"type": "equation",
|
| 198 |
+
"text": "\n$$\n\\mathbf {g} = \\left[ \\alpha_ {1} e ^ {- j \\frac {2 \\pi}{\\lambda_ {g}} \\left| \\psi_ {0} ^ {\\mathrm {P}} - \\psi_ {1} ^ {\\mathrm {P}} \\right|}, \\dots , \\alpha_ {N} e ^ {- j \\frac {2 \\pi}{\\lambda_ {g}} \\left| \\psi_ {0} ^ {\\mathrm {P}} - \\psi_ {N} ^ {\\mathrm {P}} \\right|} \\right] ^ {\\top}, \\tag {1}\n$$\n",
|
| 199 |
+
"text_format": "latex",
|
| 200 |
+
"bbox": [
|
| 201 |
+
116,
|
| 202 |
+
741,
|
| 203 |
+
488,
|
| 204 |
+
771
|
| 205 |
+
],
|
| 206 |
+
"page_idx": 1
|
| 207 |
+
},
|
| 208 |
+
{
|
| 209 |
+
"type": "text",
|
| 210 |
+
"text": "where $\\psi_0^{\\mathrm{P}}$ denotes the position of the waveguide's feed point. Parameter $\\lambda_{g} = \\frac{\\lambda}{n_{e}}$ is the guided wavelength. Here, $\\lambda$ is the wavelength in free space and $n_e$ is the effective refractive index of the dielectric waveguide. $\\alpha_{n}$ is the factor that determines the ratio of power exchanged between the waveguide and PA $n$ , governed by the coupling length of the PA [5].",
|
| 211 |
+
"bbox": [
|
| 212 |
+
73,
|
| 213 |
+
777,
|
| 214 |
+
488,
|
| 215 |
+
868
|
| 216 |
+
],
|
| 217 |
+
"page_idx": 1
|
| 218 |
+
},
|
| 219 |
+
{
|
| 220 |
+
"type": "text",
|
| 221 |
+
"text": "For the wireless propagation channel $\\mathbf{h}_k\\in \\mathbb{C}^{N\\times 1}$ between the $k$ -th UE and the PAs, $\\mathbf{h}_k$ is composed of the LoS component $\\mathbf{h}_k^{\\mathrm{LoS}}$ and the non-line-of-sight (NLoS) component $\\mathbf{h}_k^{\\mathrm{NLoS}}$ , i.e., $\\mathbf{h}_k = \\Upsilon_k\\odot \\mathbf{h}_k^{\\mathrm{LoS}} + \\mathbf{h}_k^{\\mathrm{NLoS}}$ . Here, the operator $\\odot$ denotes the Hadamard product. $\\Upsilon_{k} = [\\upsilon_{k,n},\\dots ,\\upsilon_{k,N}]\\in \\mathbb{C}^{N\\times 1}$",
|
| 222 |
+
"bbox": [
|
| 223 |
+
73,
|
| 224 |
+
869,
|
| 225 |
+
491,
|
| 226 |
+
946
|
| 227 |
+
],
|
| 228 |
+
"page_idx": 1
|
| 229 |
+
},
|
| 230 |
+
{
|
| 231 |
+
"type": "text",
|
| 232 |
+
"text": "is a Bernoulli random vector and the variable $\\nu_{k,n}$ takes values from the set $\\{0,1\\}$ , characterizing the existence of a LoS link between the $n$ -th PA and the $k$ -th UE<sup>1</sup>. The geometric spherical wavefront model-based LoS channel between the $k$ -th UE and the PAs is expressed as [3]",
|
| 233 |
+
"bbox": [
|
| 234 |
+
501,
|
| 235 |
+
69,
|
| 236 |
+
919,
|
| 237 |
+
143
|
| 238 |
+
],
|
| 239 |
+
"page_idx": 1
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"type": "equation",
|
| 243 |
+
"text": "\n$$\n\\mathbf {h} _ {k} ^ {\\mathrm {L o S}} = \\left[ \\frac {\\sqrt {\\eta} e ^ {- j \\frac {2 \\pi}{\\lambda} | \\psi_ {k} - \\psi_ {1} ^ {\\mathrm {P}} |}}{| \\psi_ {k} - \\psi_ {1} ^ {\\mathrm {P}} |}, \\dots , \\frac {\\sqrt {\\eta} e ^ {- j \\frac {2 \\pi}{\\lambda} | \\psi_ {k} - \\psi_ {N} ^ {\\mathrm {P}} |}}{| \\psi_ {k} - \\psi_ {N} ^ {\\mathrm {P}} |} \\right] ^ {\\top}, \\tag {2}\n$$\n",
|
| 244 |
+
"text_format": "latex",
|
| 245 |
+
"bbox": [
|
| 246 |
+
527,
|
| 247 |
+
148,
|
| 248 |
+
919,
|
| 249 |
+
193
|
| 250 |
+
],
|
| 251 |
+
"page_idx": 1
|
| 252 |
+
},
|
| 253 |
+
{
|
| 254 |
+
"type": "text",
|
| 255 |
+
"text": "where $\\eta = \\frac{\\lambda^2}{16\\pi^2}$ denotes the path loss at a reference distance of $1\\mathrm{m}$ . Considering $S$ scatterers in the $k$ -th UE $\\rightarrow$ PAs link, the NLoS channel $\\mathbf{h}_k^{\\mathrm{NLoS}} \\in \\mathbb{C}^{N \\times 1}$ is given by",
|
| 256 |
+
"bbox": [
|
| 257 |
+
501,
|
| 258 |
+
199,
|
| 259 |
+
919,
|
| 260 |
+
250
|
| 261 |
+
],
|
| 262 |
+
"page_idx": 1
|
| 263 |
+
},
|
| 264 |
+
{
|
| 265 |
+
"type": "equation",
|
| 266 |
+
"text": "\n$$\n\\mathbf {h} _ {k} ^ {\\mathrm {N L o S}} = \\sqrt {\\frac {1}{S}} \\sum_ {s = 1} ^ {S} \\beta_ {k, s} \\mathbf {a} _ {k, s} e ^ {j \\eta_ {k, s}}, \\tag {3}\n$$\n",
|
| 267 |
+
"text_format": "latex",
|
| 268 |
+
"bbox": [
|
| 269 |
+
604,
|
| 270 |
+
255,
|
| 271 |
+
919,
|
| 272 |
+
296
|
| 273 |
+
],
|
| 274 |
+
"page_idx": 1
|
| 275 |
+
},
|
| 276 |
+
{
|
| 277 |
+
"type": "text",
|
| 278 |
+
"text": "where $\\beta_{k,s} \\sim \\mathcal{CN}(0, \\sigma_s^2)$ and $\\eta_{k,s} \\sim \\mathcal{U}[0, 2\\pi]$ denote the complex gain and the random phase of the $s$ -th scatterer path, respectively. $\\mathbf{a}_{k,s} \\in \\mathbb{C}^{N \\times 1}$ represents the receiving array response at the PAs. Since the movable region of PAs is large, the equivalent array aperture of PA is likely to exceed the Rayleigh distance that is the criterion to determine the near-field boundary. Hence, the spherical wavefront is utilized to characterize array response $\\mathbf{a}_{k,s}$ and is given by",
|
| 279 |
+
"bbox": [
|
| 280 |
+
501,
|
| 281 |
+
301,
|
| 282 |
+
921,
|
| 283 |
+
425
|
| 284 |
+
],
|
| 285 |
+
"page_idx": 1
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"type": "equation",
|
| 289 |
+
"text": "\n$$\n\\mathbf {a} _ {k, s} = \\left[ \\frac {\\sqrt {\\eta} e ^ {- j 2 \\pi d _ {s , 1} / \\lambda}}{d _ {k , s} d _ {s , 1}}, \\dots , \\frac {\\sqrt {\\eta} e ^ {- j 2 \\pi d _ {s , N} / \\lambda}}{d _ {k , s} d _ {s , N}} \\right] ^ {\\top}, \\tag {4}\n$$\n",
|
| 290 |
+
"text_format": "latex",
|
| 291 |
+
"bbox": [
|
| 292 |
+
555,
|
| 293 |
+
430,
|
| 294 |
+
919,
|
| 295 |
+
474
|
| 296 |
+
],
|
| 297 |
+
"page_idx": 1
|
| 298 |
+
},
|
| 299 |
+
{
|
| 300 |
+
"type": "text",
|
| 301 |
+
"text": "where $d_{k,s}$ and $d_{s,n},n\\in \\{1,\\ldots ,N\\}$ denote the distances from UE $k$ to scatterer $s$ and from scatterer $s$ to PA $n$ , respectively.",
|
| 302 |
+
"bbox": [
|
| 303 |
+
503,
|
| 304 |
+
477,
|
| 305 |
+
919,
|
| 306 |
+
507
|
| 307 |
+
],
|
| 308 |
+
"page_idx": 1
|
| 309 |
+
},
|
| 310 |
+
{
|
| 311 |
+
"type": "text",
|
| 312 |
+
"text": "The received signal at the BS during the $t$ -th slot can be expressed as",
|
| 313 |
+
"bbox": [
|
| 314 |
+
503,
|
| 315 |
+
508,
|
| 316 |
+
919,
|
| 317 |
+
537
|
| 318 |
+
],
|
| 319 |
+
"page_idx": 1
|
| 320 |
+
},
|
| 321 |
+
{
|
| 322 |
+
"type": "equation",
|
| 323 |
+
"text": "\n$$\ny _ {t} = \\sum_ {k = 1} ^ {K} \\mathbf {g} ^ {\\top} \\mathbf {h} _ {k} s _ {k, t} + n _ {t}, \\tag {5}\n$$\n",
|
| 324 |
+
"text_format": "latex",
|
| 325 |
+
"bbox": [
|
| 326 |
+
633,
|
| 327 |
+
544,
|
| 328 |
+
919,
|
| 329 |
+
584
|
| 330 |
+
],
|
| 331 |
+
"page_idx": 1
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"type": "text",
|
| 335 |
+
"text": "where $s_{k,t}$ represents the symbol transmitted by the $k$ -th UE, and $n_t \\sim CN(0, \\sigma_n^2)$ is the additive white Gaussian noise.",
|
| 336 |
+
"bbox": [
|
| 337 |
+
503,
|
| 338 |
+
590,
|
| 339 |
+
919,
|
| 340 |
+
619
|
| 341 |
+
],
|
| 342 |
+
"page_idx": 1
|
| 343 |
+
},
|
| 344 |
+
{
|
| 345 |
+
"type": "text",
|
| 346 |
+
"text": "In PASS, the in-waveguide channel $\\mathbf{g}$ can be regarded as the deterministic channel component that depends on the locations and the coupling lengths of $\\mathrm{PAs}^2$ . Consequently, we merely need to estimate the wireless propagation channel $\\mathbf{h}_k$ from the $k$ -th UE to the PAs. However, PASS introduces a fundamental challenge in channel estimation due to the highly coupled nature of $\\mathbf{g}$ and $\\mathbf{h}_k$ . Specifically, while each waveguide is connected to multiple PAs, the waveguide channel is fixed and cannot be reconfigured to perform diverse beam measurements as in conventional hybrid precoding. As a result, the system must infer a high-dimensional channel vector $\\mathbf{h}_k$ from inherently low-dimensional pilot observations, typically just one",
|
| 347 |
+
"bbox": [
|
| 348 |
+
501,
|
| 349 |
+
621,
|
| 350 |
+
921,
|
| 351 |
+
801
|
| 352 |
+
],
|
| 353 |
+
"page_idx": 1
|
| 354 |
+
},
|
| 355 |
+
{
|
| 356 |
+
"type": "text",
|
| 357 |
+
"text": "<sup>1</sup>In the existing works for PASS, the free space channel model between UEs and the PAs is commonly utilized by assuming the flexible PAs close to UEs [3]–[6]. However, in the practical wireless propagation environment, the NLoS channel component caused by the statical and dynamic scatters should be considered. In particular, all PAs are hard to provide the full coverage of LoS services for all UEs in practical deployment environment.",
|
| 358 |
+
"bbox": [
|
| 359 |
+
501,
|
| 360 |
+
816,
|
| 361 |
+
921,
|
| 362 |
+
886
|
| 363 |
+
],
|
| 364 |
+
"page_idx": 1
|
| 365 |
+
},
|
| 366 |
+
{
|
| 367 |
+
"type": "text",
|
| 368 |
+
"text": "2This work employs the electronic activation of pre-positioned PAs along the waveguide. The system controller only needs to determine the activation state to ascertain the exact positions of PAs. The reconfiguration of the activated PA set is usually several orders of magnitude faster than the channel coherence time, enabling it to respond efficiently to rapid channel variations.",
|
| 369 |
+
"bbox": [
|
| 370 |
+
501,
|
| 371 |
+
886,
|
| 372 |
+
921,
|
| 373 |
+
944
|
| 374 |
+
],
|
| 375 |
+
"page_idx": 1
|
| 376 |
+
},
|
| 377 |
+
{
|
| 378 |
+
"type": "page_number",
|
| 379 |
+
"text": "2",
|
| 380 |
+
"bbox": [
|
| 381 |
+
911,
|
| 382 |
+
30,
|
| 383 |
+
919,
|
| 384 |
+
40
|
| 385 |
+
],
|
| 386 |
+
"page_idx": 1
|
| 387 |
+
},
|
| 388 |
+
{
|
| 389 |
+
"type": "text",
|
| 390 |
+
"text": "scalar per waveguide per measurement. To elaborate, suppose the widely used orthogonal pilot transmission strategy, e.g., time-division, is adopted, and $T$ denotes the number of pilot transmission slots transmitted by UE $k$ . Note that even if the UE transmits $T \\geq N$ or more pilot slots, a single waveguide output may provide no more than one linearly independent measurement per slot. That is, each pilot symbol experiences the same fixed merging of $N$ PAs in the waveguide, yielding only repeated versions of the same scalar. Hence, the classic error-criterion-based algorithms, e.g., least square (LS) or linear minimum mean square error (LMMSE) estimators, are difficult to apply directly for channel estimation in $\\mathrm{PASS}^3$ .",
|
| 391 |
+
"bbox": [
|
| 392 |
+
73,
|
| 393 |
+
69,
|
| 394 |
+
491,
|
| 395 |
+
251
|
| 396 |
+
],
|
| 397 |
+
"page_idx": 2
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"type": "text",
|
| 401 |
+
"text": "III. DEEP LEARNING BASED CHANNEL ESTIMATION",
|
| 402 |
+
"text_level": 1,
|
| 403 |
+
"bbox": [
|
| 404 |
+
96,
|
| 405 |
+
266,
|
| 406 |
+
470,
|
| 407 |
+
281
|
| 408 |
+
],
|
| 409 |
+
"page_idx": 2
|
| 410 |
+
},
|
| 411 |
+
{
|
| 412 |
+
"type": "text",
|
| 413 |
+
"text": "In this section, to improve the channel estimation performance, we leverage DL models to develop efficient channel estimation schemes for PASS. However, two critical challenges emerge when applying the DL approach to PASS. Firstly, the channel characteristics vary dramatically as PAs freely change positions within designated regions, which requires the efficient network architecture with sufficient capacity to learn the extended channel state space. Secondly, by dynamically activating candidate PAs along the waveguide in response to real-time communication demands, the system introduces time-varying channel dimension and heterogeneous signal distribution. The proposed DL-based channel estimator necessitates an innovative architecture capable of adaptively handling spatio-temporal variations in channel parameters, thereby fostering advancements in neural network design to enable robust estimation under non-stationary conditions.",
|
| 414 |
+
"bbox": [
|
| 415 |
+
73,
|
| 416 |
+
285,
|
| 417 |
+
491,
|
| 418 |
+
526
|
| 419 |
+
],
|
| 420 |
+
"page_idx": 2
|
| 421 |
+
},
|
| 422 |
+
{
|
| 423 |
+
"type": "text",
|
| 424 |
+
"text": "A. Dataset Construction",
|
| 425 |
+
"text_level": 1,
|
| 426 |
+
"bbox": [
|
| 427 |
+
73,
|
| 428 |
+
546,
|
| 429 |
+
243,
|
| 430 |
+
559
|
| 431 |
+
],
|
| 432 |
+
"page_idx": 2
|
| 433 |
+
},
|
| 434 |
+
{
|
| 435 |
+
"type": "text",
|
| 436 |
+
"text": "In the offline training stage, we collect $N_{s}$ paired samples, i.e., the pilot observation vector $\\widetilde{\\mathbf{y}}_k\\in \\mathbb{C}^{T\\times 1}$ at the BS, the position set $\\Psi^{\\mathrm{P}} = [\\psi_n^1,\\dots ,\\psi_N^{\\mathrm{P}}] = [(x_1^{\\mathrm{P}},0,d),\\dots ,(x_N^{\\mathrm{P}},0,d)]$ of PAs, and the corresponding channel sample $\\mathbf{h}_k$ . In the proposed channel estimation network, the input tensor is designed as the set of both PA positions and the pilot observation. Considering the waveguide is deployed parallel to the $x$ -axis, we merely need the $x$ -axis coordinate set $\\mathbf{x}^{\\mathrm{P}} = [x_1^{\\mathrm{P}},\\ldots ,x_N^{\\mathrm{P}}]^T\\in \\mathbb{R}^{N\\times 1}$ of PAs as the input feature. To enable the neural network to adaptively estimate channels across varying PA configurations, we utilize the in-waveguide channel $\\mathbf{g}$ to transform the observation $\\widetilde{\\mathbf{y}}_k\\in \\mathbb{C}^{T\\times 1}$ into $\\widetilde{\\mathbf{Y}}_k = \\mathbf{g}\\widetilde{\\mathbf{y}}_k^\\top \\in \\mathbb{C}^{N\\times T}$ . This transformation aligns the input dimension of neural network with the current number of PAs $N$ in PASS, which facilitates the subsequent channel estimation network design. The complex-value matrix $\\widetilde{\\mathbf{Y}}_k$ is converted into the real-value tensor $\\overline{\\mathbf{Y}}_k = \\{\\Re (\\widetilde{\\mathbf{Y}}_k),\\Im (\\widetilde{\\mathbf{Y}}_k)\\} \\in \\mathbb{R}^{N\\times 2T}$ for neural network processing. Accordingly, the label tensor in the network training is $\\bar{\\mathbf{H}}_k = \\{\\Re (\\mathbf{h}_k),\\Im (\\mathbf{h}_k)\\} \\in \\mathbb{R}^{N\\times 2}$ .",
|
| 437 |
+
"bbox": [
|
| 438 |
+
73,
|
| 439 |
+
564,
|
| 440 |
+
491,
|
| 441 |
+
853
|
| 442 |
+
],
|
| 443 |
+
"page_idx": 2
|
| 444 |
+
},
|
| 445 |
+
{
|
| 446 |
+
"type": "text",
|
| 447 |
+
"text": "3To collect independent pilot measurements for each PA, as required by LS and LMMSE, one feasible approach is to introduce an antenna switching matrix that selects subsets of PAs in different time slots to realize the signal separation. In Section IV of this letter, we provide antenna switching matrix-based LS and LMMSE estimators as channel estimation benchmarks. Note that switching each PA in different slots introduces hardware overhead, extended measurement time, and potential switch losses, all of which reduce practicality.",
|
| 448 |
+
"bbox": [
|
| 449 |
+
73,
|
| 450 |
+
862,
|
| 451 |
+
491,
|
| 452 |
+
946
|
| 453 |
+
],
|
| 454 |
+
"page_idx": 2
|
| 455 |
+
},
|
| 456 |
+
{
|
| 457 |
+
"type": "image",
|
| 458 |
+
"img_path": "images/241e3ab1863fe20fbdb6cad0c762ddc9234ff318ed7416f9440786b3f82e7423.jpg",
|
| 459 |
+
"image_caption": [
|
| 460 |
+
"Fig. 2. Proposed PAMoE model for channel estimation."
|
| 461 |
+
],
|
| 462 |
+
"image_footnote": [],
|
| 463 |
+
"bbox": [
|
| 464 |
+
568,
|
| 465 |
+
69,
|
| 466 |
+
866,
|
| 467 |
+
202
|
| 468 |
+
],
|
| 469 |
+
"page_idx": 2
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"type": "text",
|
| 473 |
+
"text": "B. PAMoE: Mixture of Experts-Based Channel Estimation",
|
| 474 |
+
"text_level": 1,
|
| 475 |
+
"bbox": [
|
| 476 |
+
503,
|
| 477 |
+
252,
|
| 478 |
+
901,
|
| 479 |
+
266
|
| 480 |
+
],
|
| 481 |
+
"page_idx": 2
|
| 482 |
+
},
|
| 483 |
+
{
|
| 484 |
+
"type": "text",
|
| 485 |
+
"text": "We first propose a PAMoE estimator as illustrated in Fig. 2 to address high-dimensional dynamic channel estimation from low-dimensional received pilots, which incorporates the dynamic padding, feature embedding, fusion, and position-aware MoE modules [10].",
|
| 486 |
+
"bbox": [
|
| 487 |
+
501,
|
| 488 |
+
270,
|
| 489 |
+
919,
|
| 490 |
+
345
|
| 491 |
+
],
|
| 492 |
+
"page_idx": 2
|
| 493 |
+
},
|
| 494 |
+
{
|
| 495 |
+
"type": "text",
|
| 496 |
+
"text": "1) Dynamic Padding: To accommodate variable PA counts $N$ and handle variable-length inputs, $PAMoE$ employs dynamic padding up to a maximum $N_{\\mathrm{max}}$ . Let $B$ denote the batch size in the network training stage. The input tensor in a training batch can be expressed as PA positions $\\mathbf{P} \\in \\mathbb{R}^{B \\times N \\times 1}$ and pilot signals $\\mathbf{S} \\in \\mathbb{R}^{B \\times N \\times 2T}$ , which are the batch version of $\\mathbf{x}^{\\mathrm{P}}$ and $\\overline{\\mathbf{Y}}_k$ , respectively. If $N < N_{\\mathrm{max}}$ , the network pads along the PA dimension so that both $\\mathbf{P}$ and $\\mathbf{S}$ become length $N_{\\mathrm{max}}$ , i.e., $\\mathbf{P}' = [\\mathbf{P}, \\phi^1] \\in \\mathbb{R}^{B \\times N_{\\mathrm{max}} \\times 1}$ , $\\mathbf{S}' = [\\mathbf{S}, \\phi^2] \\in \\mathbb{R}^{B \\times N_{\\mathrm{max}} \\times d_{\\mathrm{sig}}}$ , where $\\phi^1$ and $\\phi^2 \\in \\mathbb{R}^{N_{\\mathrm{pad}} \\times 1}(N_{\\mathrm{pad}} = N_{\\mathrm{max}} - N)$ denote the learnable padding embeddings, respectively.",
|
| 497 |
+
"bbox": [
|
| 498 |
+
501,
|
| 499 |
+
345,
|
| 500 |
+
919,
|
| 501 |
+
512
|
| 502 |
+
],
|
| 503 |
+
"page_idx": 2
|
| 504 |
+
},
|
| 505 |
+
{
|
| 506 |
+
"type": "text",
|
| 507 |
+
"text": "2) Feature Embedding: To handle continuous spatial information and achieve efficient extrapolation of PAs, the Fourier basis function is employed, developing a Fourier positional embedding approach. Suppose a set of exponentially increasing frequency bases $\\mathbf{f} = \\left[2^{f}\\pi \\right]_{f = 0}^{F - 1}\\in \\mathbb{R}^{F}$ is defined, where $F$ is the number of frequency components. The scaled position encodings can be expressed as",
|
| 508 |
+
"bbox": [
|
| 509 |
+
501,
|
| 510 |
+
512,
|
| 511 |
+
919,
|
| 512 |
+
619
|
| 513 |
+
],
|
| 514 |
+
"page_idx": 2
|
| 515 |
+
},
|
| 516 |
+
{
|
| 517 |
+
"type": "equation",
|
| 518 |
+
"text": "\n$$\n\\boldsymbol {\\Theta} = \\mathbf {S} ^ {\\prime} \\odot \\mathbf {f} \\in \\mathbb {R} ^ {B \\times N _ {\\max } \\times F}. \\tag {6}\n$$\n",
|
| 519 |
+
"text_format": "latex",
|
| 520 |
+
"bbox": [
|
| 521 |
+
625,
|
| 522 |
+
627,
|
| 523 |
+
919,
|
| 524 |
+
645
|
| 525 |
+
],
|
| 526 |
+
"page_idx": 2
|
| 527 |
+
},
|
| 528 |
+
{
|
| 529 |
+
"type": "text",
|
| 530 |
+
"text": "Then, we apply sinusoidal transformations to obtain sine and cosine features, and then are concatenated as",
|
| 531 |
+
"bbox": [
|
| 532 |
+
503,
|
| 533 |
+
654,
|
| 534 |
+
919,
|
| 535 |
+
684
|
| 536 |
+
],
|
| 537 |
+
"page_idx": 2
|
| 538 |
+
},
|
| 539 |
+
{
|
| 540 |
+
"type": "equation",
|
| 541 |
+
"text": "\n$$\n\\mathbf {E} = \\operatorname {C o n c a t} \\left[ \\sin (\\boldsymbol {\\Theta}), \\cos (\\boldsymbol {\\Theta}) \\right] \\in \\mathbb {R} ^ {B \\times N _ {\\max } \\times 2 F}. \\tag {7}\n$$\n",
|
| 542 |
+
"text_format": "latex",
|
| 543 |
+
"bbox": [
|
| 544 |
+
565,
|
| 545 |
+
693,
|
| 546 |
+
919,
|
| 547 |
+
712
|
| 548 |
+
],
|
| 549 |
+
"page_idx": 2
|
| 550 |
+
},
|
| 551 |
+
{
|
| 552 |
+
"type": "text",
|
| 553 |
+
"text": "Further, $\\mathbf{E}$ is projected into the embedding space $\\mathbf{Z}_{\\mathrm{pos}} = \\mathbf{EW}_{\\mathrm{pos}} + \\mathbf{b}_{\\mathrm{pos}} \\in \\mathbb{R}^{B \\times N_{\\mathrm{max}} \\times d_{\\mathrm{embed}}}$ , where $\\mathbf{W}_{\\mathrm{pos}} \\in \\mathbb{R}^{2F \\times d_{\\mathrm{embed}}}$ is the learnable weight matrix and $\\mathbf{b}_{\\mathrm{pos}} \\in \\mathbb{R}^{d_{\\mathrm{embed}}}$ is the bias term. This Fourier embedding effectively captures multi-scale positional variations in the input space. The pilot signal data is embedded to the same hidden dimension $d_{\\mathrm{hid}}$ by the multilayer perceptron (MLP)-based linear mapping module $\\phi_{\\mathrm{sig}}$ , i.e., $\\mathbf{Z}_{\\mathrm{sig}} = \\phi_{\\mathrm{sig}}(\\mathbf{S}') \\in \\mathbb{R}^{B \\times N_{\\mathrm{max}} \\times d_{\\mathrm{hid}}}$ .",
|
| 554 |
+
"bbox": [
|
| 555 |
+
501,
|
| 556 |
+
720,
|
| 557 |
+
919,
|
| 558 |
+
840
|
| 559 |
+
],
|
| 560 |
+
"page_idx": 2
|
| 561 |
+
},
|
| 562 |
+
{
|
| 563 |
+
"type": "text",
|
| 564 |
+
"text": "3) Feature Fusion: To fuse positional features with pilot signal features, we employ a gating function to generate a gate from a control feature based on PA positions and apply it to the target feature based on pilot signal. The gating operation is formulated as",
|
| 565 |
+
"bbox": [
|
| 566 |
+
501,
|
| 567 |
+
842,
|
| 568 |
+
921,
|
| 569 |
+
915
|
| 570 |
+
],
|
| 571 |
+
"page_idx": 2
|
| 572 |
+
},
|
| 573 |
+
{
|
| 574 |
+
"type": "equation",
|
| 575 |
+
"text": "\n$$\n\\mathbf {G} = \\sigma \\left(\\mathbf {W} _ {g} \\mathbf {Z} _ {\\text {p o s}} + \\mathbf {b} _ {g}\\right) \\in \\mathbb {R} ^ {B \\times N _ {\\max } \\times d _ {\\text {h i d}}}, \\tag {8}\n$$\n",
|
| 576 |
+
"text_format": "latex",
|
| 577 |
+
"bbox": [
|
| 578 |
+
578,
|
| 579 |
+
925,
|
| 580 |
+
919,
|
| 581 |
+
944
|
| 582 |
+
],
|
| 583 |
+
"page_idx": 2
|
| 584 |
+
},
|
| 585 |
+
{
|
| 586 |
+
"type": "page_number",
|
| 587 |
+
"text": "3",
|
| 588 |
+
"bbox": [
|
| 589 |
+
911,
|
| 590 |
+
30,
|
| 591 |
+
919,
|
| 592 |
+
40
|
| 593 |
+
],
|
| 594 |
+
"page_idx": 2
|
| 595 |
+
},
|
| 596 |
+
{
|
| 597 |
+
"type": "image",
|
| 598 |
+
"img_path": "images/a8f19f86ad289a59e60b7ffa3eb8b3b7b38f5339d4b348c9f7164601efdf43a2.jpg",
|
| 599 |
+
"image_caption": [
|
| 600 |
+
"Fig. 3. Proposed PAformer model for channel estimation."
|
| 601 |
+
],
|
| 602 |
+
"image_footnote": [],
|
| 603 |
+
"bbox": [
|
| 604 |
+
138,
|
| 605 |
+
70,
|
| 606 |
+
433,
|
| 607 |
+
258
|
| 608 |
+
],
|
| 609 |
+
"page_idx": 3
|
| 610 |
+
},
|
| 611 |
+
{
|
| 612 |
+
"type": "equation",
|
| 613 |
+
"text": "\n$$\n\\mathbf {Z} _ {\\text {f u s e d}} = \\mathbf {G} \\odot \\mathbf {Z} _ {\\text {s i g}} + \\mathbf {Z} _ {\\text {p o s}} \\in \\mathbb {R} ^ {B \\times N _ {\\max } \\times d _ {\\text {h i d}}}, \\tag {9}\n$$\n",
|
| 614 |
+
"text_format": "latex",
|
| 615 |
+
"bbox": [
|
| 616 |
+
138,
|
| 617 |
+
305,
|
| 618 |
+
491,
|
| 619 |
+
325
|
| 620 |
+
],
|
| 621 |
+
"page_idx": 3
|
| 622 |
+
},
|
| 623 |
+
{
|
| 624 |
+
"type": "text",
|
| 625 |
+
"text": "where $\\mathbf{W}_g$ and $\\mathbf{b}_g$ denote the weight and bias of a linear layer with $d_{\\mathrm{hid}}$ neurons, respectively, and $\\sigma (\\cdot)$ is a Sigmoid activation function.",
|
| 626 |
+
"bbox": [
|
| 627 |
+
73,
|
| 628 |
+
332,
|
| 629 |
+
491,
|
| 630 |
+
377
|
| 631 |
+
],
|
| 632 |
+
"page_idx": 3
|
| 633 |
+
},
|
| 634 |
+
{
|
| 635 |
+
"type": "text",
|
| 636 |
+
"text": "4) MoE With Gating Network: Suppose there are $E$ experts and each expert takes $\\mathbf{Z}_{\\mathrm{fused}}$ and processes it with an MLP-Mixer block across both the feature dimension $d_{\\mathrm{hid}}$ and the spatial dimension $N_{\\mathrm{max}}$ . Let $\\mathbf{Z}_e = E_e(\\mathbf{Z}_{\\mathrm{fused}}) \\in \\mathbb{R}^{B \\times N_{\\mathrm{max}} \\times d_{\\mathrm{hid}}}$ denote the output of expert $e$ , $(e = 1, \\dots, E)$ . A gating network is employed to weight each expert's output. Typically, it pools $\\mathbf{Z}_{\\mathrm{fused}}$ over the PA dimension to get a global context $\\mathbf{z}_{\\mathrm{pool}} \\in \\mathbb{R}^{B \\times d_{\\mathrm{hid}}}$ , and then applies the softmax activation function to produce gating weights, which can be expressed as",
|
| 637 |
+
"bbox": [
|
| 638 |
+
73,
|
| 639 |
+
378,
|
| 640 |
+
490,
|
| 641 |
+
513
|
| 642 |
+
],
|
| 643 |
+
"page_idx": 3
|
| 644 |
+
},
|
| 645 |
+
{
|
| 646 |
+
"type": "equation",
|
| 647 |
+
"text": "\n$$\n\\boldsymbol {\\alpha} = \\operatorname {s o f t m a x} \\left(\\mathbf {W} _ {\\alpha} \\mathbf {z} _ {\\text {p o o l}} + \\mathbf {b} _ {\\alpha}\\right) \\in \\mathbb {R} ^ {B \\times E}, \\tag {10}\n$$\n",
|
| 648 |
+
"text_format": "latex",
|
| 649 |
+
"bbox": [
|
| 650 |
+
148,
|
| 651 |
+
522,
|
| 652 |
+
488,
|
| 653 |
+
542
|
| 654 |
+
],
|
| 655 |
+
"page_idx": 3
|
| 656 |
+
},
|
| 657 |
+
{
|
| 658 |
+
"type": "text",
|
| 659 |
+
"text": "where $\\sum_{e=1}^{E} \\alpha_{b,e} = 1$ , $\\forall b \\in \\{1, \\dots, B\\}$ , $\\mathbf{W}_{\\alpha}$ and $\\mathbf{b}_{\\alpha}$ are the weight and bias of a linear layer with $E$ neurons, respectively.",
|
| 660 |
+
"bbox": [
|
| 661 |
+
73,
|
| 662 |
+
549,
|
| 663 |
+
488,
|
| 664 |
+
580
|
| 665 |
+
],
|
| 666 |
+
"page_idx": 3
|
| 667 |
+
},
|
| 668 |
+
{
|
| 669 |
+
"type": "text",
|
| 670 |
+
"text": "Given the output $\\mathbf{Z}_e$ of each expert and the gating weights $\\alpha$ , we form a weighted sum over experts. Let $\\alpha$ be reshaped to $\\mathbb{R}^{B\\times 1\\times E}$ so it can broadcast over the $N_{\\mathrm{max}}$ dimension to obtain the following output of MoE",
|
| 671 |
+
"bbox": [
|
| 672 |
+
73,
|
| 673 |
+
580,
|
| 674 |
+
491,
|
| 675 |
+
642
|
| 676 |
+
],
|
| 677 |
+
"page_idx": 3
|
| 678 |
+
},
|
| 679 |
+
{
|
| 680 |
+
"type": "equation",
|
| 681 |
+
"text": "\n$$\n\\mathbf {Z} _ {\\mathrm {M o E}} = \\sum_ {e = 1} ^ {E} \\boldsymbol {\\alpha} _ {e} \\mathbf {Z} _ {e} \\in \\mathbb {R} ^ {B \\times N _ {\\max } \\times d _ {\\mathrm {h i d}}} \\tag {11}\n$$\n",
|
| 682 |
+
"text_format": "latex",
|
| 683 |
+
"bbox": [
|
| 684 |
+
161,
|
| 685 |
+
647,
|
| 686 |
+
488,
|
| 687 |
+
686
|
| 688 |
+
],
|
| 689 |
+
"page_idx": 3
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"type": "text",
|
| 693 |
+
"text": "Finally, the network concatenates the positional feature $\\mathbf{Z}_{\\mathrm{pos}}$ and $\\mathbf{Z}_{\\mathrm{MoE}}$ along the last dimension, i.e., $\\mathbf{Z}_{\\mathrm{concat}} \\in \\mathbb{R}^{B \\times N_{\\mathrm{max}} \\times (2d_{\\mathrm{hid}})}$ . Then, a linear layer maps $\\mathbf{Z}_{\\mathrm{concat}}$ to the estimated channel $\\widetilde{\\mathbf{H}} \\in \\mathbb{R}^{B \\times N \\times 2}$ . In $PAMoE$ , the dynamic padding pattern requires the network to fix a predefined maximum number of PAs $N_{\\mathrm{max}}$ in the training stage, which is a limiting factor if PAs exceed the predefined maximum bound $N_{\\mathrm{max}}$ .",
|
| 694 |
+
"bbox": [
|
| 695 |
+
73,
|
| 696 |
+
691,
|
| 697 |
+
491,
|
| 698 |
+
799
|
| 699 |
+
],
|
| 700 |
+
"page_idx": 3
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "text",
|
| 704 |
+
"text": "C. P Aformer: Transformer-Based Channel Estimation",
|
| 705 |
+
"text_level": 1,
|
| 706 |
+
"bbox": [
|
| 707 |
+
75,
|
| 708 |
+
819,
|
| 709 |
+
441,
|
| 710 |
+
834
|
| 711 |
+
],
|
| 712 |
+
"page_idx": 3
|
| 713 |
+
},
|
| 714 |
+
{
|
| 715 |
+
"type": "text",
|
| 716 |
+
"text": "We further propose a PAformer estimator as illustrated in Fig. 3 to address the scalability limitation of $PAMoE$ , which is inherently constrained by fixed $N_{\\mathrm{max}}$ during training. PAformer incorporates self-attention layers with permutation-equivariance, enabling the network to dynamically accommodate PAs of arbitrary size $N$ . The proposed PAformer predicts channel coefficients in a per-antenna manner, offering",
|
| 717 |
+
"bbox": [
|
| 718 |
+
73,
|
| 719 |
+
838,
|
| 720 |
+
491,
|
| 721 |
+
946
|
| 722 |
+
],
|
| 723 |
+
"page_idx": 3
|
| 724 |
+
},
|
| 725 |
+
{
|
| 726 |
+
"type": "text",
|
| 727 |
+
"text": "flexibility if new data has more antennas than seen before in the training stage.",
|
| 728 |
+
"bbox": [
|
| 729 |
+
503,
|
| 730 |
+
69,
|
| 731 |
+
919,
|
| 732 |
+
98
|
| 733 |
+
],
|
| 734 |
+
"page_idx": 3
|
| 735 |
+
},
|
| 736 |
+
{
|
| 737 |
+
"type": "text",
|
| 738 |
+
"text": "1) Input Embedding: In PAformer, we first concatenate the position and pilot signal features along their last dimension, i.e., $\\mathbf{V} = \\left[\\mathbf{P},\\mathbf{S}\\right]\\in \\mathbb{R}^{B\\times N\\times (2T + 1)}$ . Next, $\\mathbf{V}$ is mapped into a hidden representation $\\mathbf{Z}^{(0)}$ via an embedding network $\\phi$ based on MLP, i.e., $\\mathbf{Z}^{(0)} = \\phi (\\mathbf{V})\\in \\mathbb{R}^{B\\times N\\times d_{\\mathrm{hid}}}$ .",
|
| 739 |
+
"bbox": [
|
| 740 |
+
503,
|
| 741 |
+
99,
|
| 742 |
+
919,
|
| 743 |
+
175
|
| 744 |
+
],
|
| 745 |
+
"page_idx": 3
|
| 746 |
+
},
|
| 747 |
+
{
|
| 748 |
+
"type": "text",
|
| 749 |
+
"text": "2) Transformer Encoder: PAformer employs a pre-norm Transformer with $P$ stacked self-attention blocks (SABs), each comprising multi-head self-attention (MHA) and a feedforward network (FFN), both with residual connections and layer normalization. Let $\\mathbf{Z}^{(\\ell -1)}$ denote the input to the $\\ell$ -th SAB, and $\\overline{\\mathbf{Z}}^{(\\ell -1)} = \\mathrm{LayerNorm}(\\mathbf{Z}^{(\\ell -1)})$ is the pre-normalized feature representation by the layer normalization. The output of the MHA module is expressed as $\\mathbf{A}^{(\\ell)} = \\mathrm{MHA}(\\mathbf{Q},\\mathbf{K},\\mathbf{V})\\in$ $\\mathbb{R}^{B\\times N\\times d_{\\mathrm{hid}}}$ where Q, K, and V denote query, key, and value tokens of $\\overline{\\mathbf{Z}}^{(\\ell -1)}$ [11], respectively. The result is combined via a residual connection $\\overline{\\mathbf{Z}}^{\\prime (\\ell)} = \\overline{\\mathbf{Z}}^{(\\ell -1)} + \\mathbf{A}^{(\\ell)}$ . Then, the FFN is applied to obtain $\\mathbf{F}^{(\\ell)} = \\mathrm{FFN}(\\overline{\\mathbf{Z}}^{\\prime (\\ell)})$ , where FFN is a point-wise MLP operating on each PA token separately. Another residual connection completes this sub-layer $\\mathbf{Z}^{(\\ell)} =$ $\\overline{\\mathbf{Z}}^{\\prime (\\ell)} + \\mathbf{F}^{(\\ell)}$ . Stacking and applying $P$ blocks in sequence yields the final feature representation $\\mathbf{Z}^P = \\mathrm{SAB}^{(\\ell)}\\bigg(\\mathbf{Z}^{(\\ell -1)}\\bigg)\\in$ $\\mathbb{R}^{B\\times N\\times d_{\\mathrm{hid}}}$ , $\\ell = 1,\\dots ,P$ , starting from $\\mathbf{Z}^{(0)}$ . The final step maps each antenna-wise feature vector in $\\mathbf{Z}^{(P)}$ to the estimated channel $\\widehat{\\mathbf{H}}_i$ , where $i = 1,\\ldots ,N$ indexes PAs.",
|
| 750 |
+
"bbox": [
|
| 751 |
+
503,
|
| 752 |
+
176,
|
| 753 |
+
921,
|
| 754 |
+
479
|
| 755 |
+
],
|
| 756 |
+
"page_idx": 3
|
| 757 |
+
},
|
| 758 |
+
{
|
| 759 |
+
"type": "text",
|
| 760 |
+
"text": "For the proposed two DL estimators, during the test stage, the trained network can be applied to scenarios with dynamic numbers of PAs, even though the test dataset has a different data distribution and dimensionality compared to the training dataset. This property of the channel estimation network can also be termed as zero-shot learning.",
|
| 761 |
+
"bbox": [
|
| 762 |
+
501,
|
| 763 |
+
479,
|
| 764 |
+
921,
|
| 765 |
+
571
|
| 766 |
+
],
|
| 767 |
+
"page_idx": 3
|
| 768 |
+
},
|
| 769 |
+
{
|
| 770 |
+
"type": "text",
|
| 771 |
+
"text": "IV. NUMERICAL RESULTS",
|
| 772 |
+
"text_level": 1,
|
| 773 |
+
"bbox": [
|
| 774 |
+
617,
|
| 775 |
+
589,
|
| 776 |
+
807,
|
| 777 |
+
603
|
| 778 |
+
],
|
| 779 |
+
"page_idx": 3
|
| 780 |
+
},
|
| 781 |
+
{
|
| 782 |
+
"type": "text",
|
| 783 |
+
"text": "In simulation setups, we set $Q = 200$ , $K = 4$ , $n_e = 1.4$ , $d = 5$ , $D_x \\times D_y = 20 \\times 20 \\mathrm{~m}^2$ , $S = 6$ and the carrier frequency $f_c = 28 \\mathrm{GHz}$ . In the training dataset construction, we collect $N_s = 10^5$ training samples with the fixed number of PAs $N^{\\mathrm{tr}} = 16$ , while the test number of PAs is $N^{\\mathrm{te}} \\in \\{8, \\dots, 32\\}$ . In the hyper-parameter setups of the proposed DL estimators, we set $N_{\\mathrm{max}} = 32$ , $E = 4$ , $d_{\\mathrm{hid}} = 64$ , $P = 4$ , and $B = 256$ . The 1-norm $\\ell_1$ is used as the loss function in the network training, i.e., $\\ell_1 = ||\\widehat{\\mathbf{H}}_k - \\overline{\\mathbf{H}}_k||_1$ , while the normalized mean squared error (NMSE) is employed as the performance metric, i.e., $\\mathrm{NMSE} = \\mathbb{E}\\{||\\widehat{\\mathbf{H}}_k - \\overline{\\mathbf{H}}_k||_F^2 / ||\\overline{\\mathbf{H}}_k||_F^2\\}$ . The antenna switching-based LS and LMMSE estimators are used as the conventional channel estimation benchmarks. Furthermore, to establish relevant benchmarks incorporating state-of-the-art DL estimators, we further develop two comparison DL benchmarks drawing inspiration from recent attention-based channel estimation networks, i.e., SA-RN-CE [8] and DACEN [9], which are created by replacing the MoE module in PAMoE with spatial attention (SA) and dual attention (DA) mechanisms, respectively.",
|
| 784 |
+
"bbox": [
|
| 785 |
+
503,
|
| 786 |
+
609,
|
| 787 |
+
921,
|
| 788 |
+
898
|
| 789 |
+
],
|
| 790 |
+
"page_idx": 3
|
| 791 |
+
},
|
| 792 |
+
{
|
| 793 |
+
"type": "text",
|
| 794 |
+
"text": "In Fig. 4, we present the NMSE performance of different channel estimation schemes with $N^{\\mathrm{te}} = 32$ . The proposed DL estimators with reduced pilot overhead $T$ outperform",
|
| 795 |
+
"bbox": [
|
| 796 |
+
501,
|
| 797 |
+
898,
|
| 798 |
+
921,
|
| 799 |
+
945
|
| 800 |
+
],
|
| 801 |
+
"page_idx": 3
|
| 802 |
+
},
|
| 803 |
+
{
|
| 804 |
+
"type": "page_number",
|
| 805 |
+
"text": "4",
|
| 806 |
+
"bbox": [
|
| 807 |
+
911,
|
| 808 |
+
31,
|
| 809 |
+
919,
|
| 810 |
+
39
|
| 811 |
+
],
|
| 812 |
+
"page_idx": 3
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"type": "image",
|
| 816 |
+
"img_path": "images/693e462a34cadef9c1cfda5dfe90c288c6651c3f415570ed7ba8c78a5da6867a.jpg",
|
| 817 |
+
"image_caption": [
|
| 818 |
+
"Fig. 4. NMSE vs. SNR for different algorithms."
|
| 819 |
+
],
|
| 820 |
+
"image_footnote": [],
|
| 821 |
+
"bbox": [
|
| 822 |
+
148,
|
| 823 |
+
78,
|
| 824 |
+
410,
|
| 825 |
+
239
|
| 826 |
+
],
|
| 827 |
+
"page_idx": 4
|
| 828 |
+
},
|
| 829 |
+
{
|
| 830 |
+
"type": "image",
|
| 831 |
+
"img_path": "images/ceecef8ee6c03c7cbdd8591e5cd323baeb805da3fe491adbccdc3edcb814fd50.jpg",
|
| 832 |
+
"image_caption": [
|
| 833 |
+
"Fig. 5. NMSE vs. number of PAs $N$ for different algorithms."
|
| 834 |
+
],
|
| 835 |
+
"image_footnote": [],
|
| 836 |
+
"bbox": [
|
| 837 |
+
148,
|
| 838 |
+
292,
|
| 839 |
+
411,
|
| 840 |
+
454
|
| 841 |
+
],
|
| 842 |
+
"page_idx": 4
|
| 843 |
+
},
|
| 844 |
+
{
|
| 845 |
+
"type": "table",
|
| 846 |
+
"img_path": "images/a79046fc7708e5fdb8ab65be83e6202f3d9466f4cb785d6263c17bb30b971d73.jpg",
|
| 847 |
+
"table_caption": [
|
| 848 |
+
"TABLEI COMPLEXITY ANALYSIS OF PROPOSED DL ESTIMATORS"
|
| 849 |
+
],
|
| 850 |
+
"table_footnote": [],
|
| 851 |
+
"table_body": "<table><tr><td rowspan=\"2\">Estimator</td><td rowspan=\"2\">N</td><td rowspan=\"2\">Params(K)</td><td rowspan=\"2\">FLOPs(M)</td><td colspan=\"2\">Runtime (μs)</td></tr><tr><td>CPU</td><td>GPU</td></tr><tr><td rowspan=\"3\">PAMoE</td><td>8</td><td></td><td></td><td></td><td></td></tr><tr><td>16</td><td>211.9</td><td>13.18</td><td>211.8</td><td>12.73</td></tr><tr><td>32</td><td></td><td></td><td></td><td></td></tr><tr><td rowspan=\"3\">PAformer</td><td>8</td><td></td><td>4.377</td><td>74.62</td><td>3.814</td></tr><tr><td>16</td><td>545.9</td><td>8.756</td><td>128.2</td><td>7.943</td></tr><tr><td>32</td><td></td><td>17.51</td><td>226.8</td><td>12.95</td></tr></table>",
|
| 852 |
+
"bbox": [
|
| 853 |
+
104,
|
| 854 |
+
532,
|
| 855 |
+
462,
|
| 856 |
+
674
|
| 857 |
+
],
|
| 858 |
+
"page_idx": 4
|
| 859 |
+
},
|
| 860 |
+
{
|
| 861 |
+
"type": "text",
|
| 862 |
+
"text": "the existing linear estimators and DL models. The superior accuracy of $PAMoE$ compared to $PA\\text{former}$ primarily stems from $PAMoE$ 's more specialized design, particularly in its handling and exploitation of the pinching antenna positional information. $PA\\text{former}$ , while robust and scalable due to its Transformer foundation, operates with a more general mechanism to support variable PA counts. In Fig. 5, we provide NMSE performance of different channel estimation schemes for varying numbers of PAs, where the signal-to-noise ratio (SNR) is set to 0 dB. Compared to SA-RN-CE and DACEN, the proposed $PAMoE$ and $PA\\text{former}$ trained by the fixed PA configuration exhibit excellent robustness and generalization for dynamic PAs. The proposed DL estimators possess sufficient zero-shot learning capabilities to deal with distinct data distribution in the test stage. Table I summarizes the number of trainable parameters (Params), floating point",
|
| 863 |
+
"bbox": [
|
| 864 |
+
73,
|
| 865 |
+
703,
|
| 866 |
+
491,
|
| 867 |
+
946
|
| 868 |
+
],
|
| 869 |
+
"page_idx": 4
|
| 870 |
+
},
|
| 871 |
+
{
|
| 872 |
+
"type": "text",
|
| 873 |
+
"text": "operations (FLOPs) and inference runtime of the proposed $PAMoE$ and $PA\\text{former}$ , where the NVIDIA RTX 3090 GPU and the 12th Gen Intel(R) Core(TM) i9- 12900K CPU are used as the inference platform. The average inference runtime per channel estimation instance was calculated by averaging over 1000 Monte Carlo experiments. Firstly, for different numbers of PAs $N$ , the Params of the proposed $PAMoE$ and $PA\\text{former}$ are the same due to the fixed network architecture. Secondly, the FLOPs of $PA\\text{former}$ increase linearly with the number of PAs $N$ , as the higher-dimensional input tensor needs to be processed in the SAB of $PA\\text{former}$ . In the $PAMoE$ model, the input tensor is uniformly padded to the same $N_{\\max}$ via padding interpolation, and hence the FLOPs of the $PAMoE$ remain constant for different $N$ . Finally, we observe that both $PAMoE$ and $PA\\text{former}$ can achieve the channel estimation at the microseconds ( $\\mu s$ ) level, demonstrating the practical feasibility of the proposed models for real-time deployment.",
|
| 874 |
+
"bbox": [
|
| 875 |
+
501,
|
| 876 |
+
69,
|
| 877 |
+
921,
|
| 878 |
+
325
|
| 879 |
+
],
|
| 880 |
+
"page_idx": 4
|
| 881 |
+
},
|
| 882 |
+
{
|
| 883 |
+
"type": "text",
|
| 884 |
+
"text": "V. CONCLUSIONS",
|
| 885 |
+
"text_level": 1,
|
| 886 |
+
"bbox": [
|
| 887 |
+
647,
|
| 888 |
+
337,
|
| 889 |
+
779,
|
| 890 |
+
349
|
| 891 |
+
],
|
| 892 |
+
"page_idx": 4
|
| 893 |
+
},
|
| 894 |
+
{
|
| 895 |
+
"type": "text",
|
| 896 |
+
"text": "In this letter, we investigated the channel estimation approaches in PASS, and proposed two DL estimators with the advanced neural network architecture to infer a high-dimensional channel vector from inherently low-dimensional pilot observations, respectively. Specifically, the proposed PAMoE accommodates variable PA configurations and exploits multi-expert diversity in the MLP-Mixer for improved channel estimation. Accordingly, the proposed PAFomer is capable of handling arbitrary number of PAs thanks to the self-attention mechanism. Numerical results demonstrated that the proposed DL estimators outperform conventional methods, and significantly reduce the pilot overhead.",
|
| 897 |
+
"bbox": [
|
| 898 |
+
501,
|
| 899 |
+
354,
|
| 900 |
+
921,
|
| 901 |
+
536
|
| 902 |
+
],
|
| 903 |
+
"page_idx": 4
|
| 904 |
+
},
|
| 905 |
+
{
|
| 906 |
+
"type": "text",
|
| 907 |
+
"text": "REFERENCES",
|
| 908 |
+
"text_level": 1,
|
| 909 |
+
"bbox": [
|
| 910 |
+
663,
|
| 911 |
+
547,
|
| 912 |
+
761,
|
| 913 |
+
561
|
| 914 |
+
],
|
| 915 |
+
"page_idx": 4
|
| 916 |
+
},
|
| 917 |
+
{
|
| 918 |
+
"type": "list",
|
| 919 |
+
"sub_type": "ref_text",
|
| 920 |
+
"list_items": [
|
| 921 |
+
"[1] X. Shao, Q. Jiang, and R. Zhang, \"6D movable antenna based on user distribution: Modeling and optimization,\" IEEE Trans. Wireless Commun., vol. 24, no. 1, pp. 355-370, Jan. 2025.",
|
| 922 |
+
"[2] A. Fukuda, H. Yamamoto, H. Okazaki, Y. Suzuki, and K. Kawai, \"Pinching antenna: Using a dielectric waveguide as an antenna,\" NTT DOCOMO Technical J., vol. 23, no. 3, pp. 5-12, Jan. 2022.",
|
| 923 |
+
"[3] Z. Ding, R. Schober, and H. Vincent Poor, “Flexible-antenna systems: A pinching-antenna perspective,” IEEE Trans. Commun., 2025.",
|
| 924 |
+
"[4] D. Tyrovolas, S. A. Tegos, P. D. Diamantoulakis, S. Ioannidis, C. K. Liaskos, and G. K. Karagiannidis, “Performance analysis of pinching-antenna systems,” IEEE Trans. Cognit. Commun. Networking, 2025.",
|
| 925 |
+
"[5] Z. Wang, C. Ouyang, X. Mu, Y. Liu, and Z. Ding, \"Modeling and beamforming optimization for pinching-antenna systems,\" arXiv preprint arXiv:2502.05917, 2025.",
|
| 926 |
+
"[6] S. A. Tegos, P. D. Diamantoulakis, Z. Ding, and G. K. Karagiannidis, \"Minimum data rate maximization for uplink pinching-antenna systems,\" IEEE Wireless Commun. Lett., 2025.",
|
| 927 |
+
"[7] F. Rezaei, S. M. Marvasti-Zadeh, C. Tellambura, and A. Maaref, \"Adversarial score-based generative models for MMSE-achieving AmBC channel estimation,\" IEEE Wireless Commun. Lett., vol. 13, no. 4, pp. 1053-1057, Apr. 2024.",
|
| 928 |
+
"[8] X. Fan, Y. Zou, and L. Zhai, \"Spatial-attention-based channel estimation in IRS-assisted mmWave MU-MISO systems,\" IEEE Internet Things J., vol. 11, no. 6, pp. 9801-9813, Mar. 2024.",
|
| 929 |
+
"[9] B. Zhou, X. Yang, S. Ma, F. Gao, and G. Yang, \"Pay less but get more: A dual-attention-based channel estimation network for massive MIMO systems with low-density pilots,\" IEEE Trans. Wireless Commun., vol. 23, no. 6, pp. 6061-6076, Jun. 2024.",
|
| 930 |
+
"[10] W. Cai, J. Jiang, F. Wang, J. Tang, S. Kim, and J. Huang, “A survey on mixture of experts,” arXiv preprint arXiv:2407.06204, 2024.",
|
| 931 |
+
"[11] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polosukhin, “Attention is all you need,” in Proc. NeurIPS, vol. 30, Nov. 2017."
|
| 932 |
+
],
|
| 933 |
+
"bbox": [
|
| 934 |
+
506,
|
| 935 |
+
566,
|
| 936 |
+
921,
|
| 937 |
+
941
|
| 938 |
+
],
|
| 939 |
+
"page_idx": 4
|
| 940 |
+
},
|
| 941 |
+
{
|
| 942 |
+
"type": "page_number",
|
| 943 |
+
"text": "5",
|
| 944 |
+
"bbox": [
|
| 945 |
+
911,
|
| 946 |
+
30,
|
| 947 |
+
919,
|
| 948 |
+
40
|
| 949 |
+
],
|
| 950 |
+
"page_idx": 4
|
| 951 |
+
}
|
| 952 |
+
]
|
data/2025/2503_13xxx/2503.13268/27688151-5fde-4680-b981-ebf649a96c83_model.json
ADDED
|
@@ -0,0 +1,1090 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "page_number",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.912,
|
| 7 |
+
0.031,
|
| 8 |
+
0.921,
|
| 9 |
+
0.041
|
| 10 |
+
],
|
| 11 |
+
"angle": 0,
|
| 12 |
+
"content": "1"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "title",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.163,
|
| 18 |
+
0.07,
|
| 19 |
+
0.838,
|
| 20 |
+
0.141
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "Channel Estimation for Pinching-Antenna Systems (PASS)"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.217,
|
| 29 |
+
0.148,
|
| 30 |
+
0.782,
|
| 31 |
+
0.165
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "Jian Xiao, Ji Wang, Senior Member, IEEE, and Yuanwei Liu, Fellow, IEEE"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "aside_text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.023,
|
| 40 |
+
0.266,
|
| 41 |
+
0.06,
|
| 42 |
+
0.708
|
| 43 |
+
],
|
| 44 |
+
"angle": 270,
|
| 45 |
+
"content": "arXiv:2503.13268v4 [cs.IT] 10 May 2025"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.074,
|
| 51 |
+
0.221,
|
| 52 |
+
0.493,
|
| 53 |
+
0.488
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "Abstract—Pinching antennas (PAs) represent a revolutionary flexible antenna technology that leverages dielectric waveguides and electromagnetic coupling to mitigate large-scale path loss. This letter is the first to explore channel estimation for Pinching-Antenna SyStems (PASS), addressing their uniquely ill-conditioned and underdetermined channel characteristics. In particular, two efficient deep learning-based channel estimators are proposed. 1) PAMoE: This estimator incorporates dynamic padding, feature embedding, fusion, and mixture of experts (MoE) modules, which effectively leverage the positional information of PAs and exploit expert diversity. 2) PAformer: This Transformer-style estimator employs the self-attention mechanism to predict channel coefficients in a per-antenna manner, which offers more flexibility to adaptively deal with dynamic numbers of PAs in practical deployment. Numerical results demonstrate that 1) the proposed deep learning-based channel estimators outperform conventional methods and exhibit excellent zero-shot learning capabilities, and 2) PAMoE delivers higher channel estimation accuracy via MoE specialization, while PAformer natively handles an arbitrary number of PAs, trading self-attention complexity for superior scalability."
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.075,
|
| 62 |
+
0.493,
|
| 63 |
+
0.492,
|
| 64 |
+
0.52
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "Index Terms—Channel estimation, mixture of experts, pinching antenna, Transformer."
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "title",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.217,
|
| 73 |
+
0.542,
|
| 74 |
+
0.352,
|
| 75 |
+
0.556
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "I. INTRODUCTION"
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.074,
|
| 84 |
+
0.561,
|
| 85 |
+
0.493,
|
| 86 |
+
0.865
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "ADVANCED multiple-input multiple-output (MIMO) is a critical enabling technology for sixth-generation (6G) wireless networks, targeting ultra-high-speed data transmission and seamless connectivity. Among emerging MIMO technologies, flexible antenna systems, e.g., fluid and movable antennas, offer dynamic channel reconfiguration capabilities by leveraging its positional or rotational adjustability to adapt to the evolving spatial distribution of users [1]. However, they still struggle with addressing large-scale path loss and the need for line-of-sight (LoS) links, both of which are essential for high-quality communication. To overcome these challenges, pinching antennas (PAs) offer a novel approach to creating controlled radiation sites. PAs were initially introduced and developed into a prototype by NTT DOCOMO in 2022 [2]. The fundamental concept of PAs relies on the transfer of electromagnetic (EM) waves from a dielectric waveguide to a nearby dielectric material. Compared to typical movable antennas, PAs can achieve movement ranges spanning thousands to tens of thousands of wavelengths and support a larger number of connected antennas on a waveguide. This technology allows"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.074,
|
| 95 |
+
0.875,
|
| 96 |
+
0.49,
|
| 97 |
+
0.921
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "Jian Xiao and Ji Wang are with the Department of Electronics and Information Engineering, College of Physical Science and Technology, Central China Normal University, Wuhan 430079, China (e-mail: jianx@mails.ccnu.edu.cn; jiwang@ccnu.edu.cn)."
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.075,
|
| 106 |
+
0.921,
|
| 107 |
+
0.49,
|
| 108 |
+
0.946
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "Yuanwei Liu is with the Department of Electrical and Electronic Engineering, The University of Hong Kong, Hong Kong (e-mail: yuanwei@hku.hk)."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.503,
|
| 117 |
+
0.22,
|
| 118 |
+
0.922,
|
| 119 |
+
0.281
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "for the flexible positioning of PAs, enabling the establishment of adjustable and reliable LoS transceiver links in Pinching-Antenna SyStems (PASS), thereby effectively mitigating large-scale path loss."
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.503,
|
| 128 |
+
0.281,
|
| 129 |
+
0.922,
|
| 130 |
+
0.719
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "The theoretical exploration of PA technology is still in its early stages, yet it has already garnered significant attention from both academia and industry. The authors of [3] first provided a comprehensive performance analysis for PASS and proposed a non-orthogonal multiple access (NOMA)-enhanced PASS. Subsequently, related works have explored performance analysis [4], physical modeling and beamforming [5], and minimum rate maximization for PASS [6]. It should be emphasized that the existing works on PA optimization strategies hinge crucially on precise channel estimation. However, the channel estimation in PASS has not been investigated in-depth due to new challenges. Firstly, PASS introduces a fundamental challenge in channel estimation due to the highly coupled nature of the in-waveguide channel and wireless propagation channel between PAs and users. Each waveguide is fed by only one radio frequency (RF) chain, while it may carry multiple PAs. This setup necessitates the recovery of high-dimensional information from low-dimensional observations, resulting in an ill-conditioned underdetermined recovery problem. Secondly, the in-waveguide channel is deterministic and completely depends on the positions of PAs in the waveguide. Hence, no stacking of pilot signals can produce additional independent linear equations to solve the high-dimensional wireless propagation channel. Moreover, the dynamic nature of PAs with the plug-and-play capability complicates the channel acquisition. The activation locations of a large number of PAs can be adjusted in a specified region to improve user service. Therefore, the wireless propagation channel presents near-field spatial non-stationarity effects due to the large array aperture."
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.503,
|
| 139 |
+
0.719,
|
| 140 |
+
0.922,
|
| 141 |
+
0.945
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "To fill this research gap, this letter presents the first investigation into channel estimation for PASS. First and foremost, we extend classic linear channel estimators by adapting the antenna switching strategy for PASS, which provide fundamental channel estimation benchmarks. Furthermore, inspired by recent advances in deep learning (DL)-enabled channel estimation [7]-[9], we propose two efficient DL models to improve channel estimation accuracy while reducing pilot overhead. The first DL estimator, termed \\(PAMoE\\), is built on the mixture of experts (MoE) architecture, which integrates PA positions and pilot signal features through multi-expert mechanisms to adaptively model the dynamic channel distributions in PASS. To enhance the flexibility and scalability of the channel estimation model for dynamic PA counts, we further propose a Transformer-style estimator, termed \\(PAformer\\), which lever-"
|
| 145 |
+
}
|
| 146 |
+
],
|
| 147 |
+
[
|
| 148 |
+
{
|
| 149 |
+
"type": "page_number",
|
| 150 |
+
"bbox": [
|
| 151 |
+
0.912,
|
| 152 |
+
0.031,
|
| 153 |
+
0.921,
|
| 154 |
+
0.041
|
| 155 |
+
],
|
| 156 |
+
"angle": 0,
|
| 157 |
+
"content": "2"
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "image",
|
| 161 |
+
"bbox": [
|
| 162 |
+
0.134,
|
| 163 |
+
0.074,
|
| 164 |
+
0.435,
|
| 165 |
+
0.212
|
| 166 |
+
],
|
| 167 |
+
"angle": 0,
|
| 168 |
+
"content": null
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"type": "image_caption",
|
| 172 |
+
"bbox": [
|
| 173 |
+
0.075,
|
| 174 |
+
0.226,
|
| 175 |
+
0.375,
|
| 176 |
+
0.24
|
| 177 |
+
],
|
| 178 |
+
"angle": 0,
|
| 179 |
+
"content": "Fig. 1. Pinching antennas assisted multi-user systems."
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "text",
|
| 183 |
+
"bbox": [
|
| 184 |
+
0.074,
|
| 185 |
+
0.263,
|
| 186 |
+
0.492,
|
| 187 |
+
0.385
|
| 188 |
+
],
|
| 189 |
+
"angle": 0,
|
| 190 |
+
"content": "ages the self-attention mechanism to predict channel coefficients on a per-antenna basis. Numerical results demonstrate that the proposed DL estimators achieve superior channel estimation accuracy with significantly reduced pilot overhead compared to conventional channel estimators. In particular, both PAMoE and PAformer exhibit superior zero-shot learning capabilities on dynamic PA configurations without retraining operations."
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "title",
|
| 194 |
+
"bbox": [
|
| 195 |
+
0.102,
|
| 196 |
+
0.398,
|
| 197 |
+
0.466,
|
| 198 |
+
0.412
|
| 199 |
+
],
|
| 200 |
+
"angle": 0,
|
| 201 |
+
"content": "II. SYSTEM MODEL AND PROBLEM FORMULATION"
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "text",
|
| 205 |
+
"bbox": [
|
| 206 |
+
0.074,
|
| 207 |
+
0.416,
|
| 208 |
+
0.493,
|
| 209 |
+
0.737
|
| 210 |
+
],
|
| 211 |
+
"angle": 0,
|
| 212 |
+
"content": "As illustrated in Fig. 1, considering an uplink communication system that includes a base station (BS) equipped with \\(N\\) PAs and \\(K\\) single-antenna user equipments (UEs). Each PA has the same length \\(L\\) on the waveguide. In a Cartesian system, the UEs are assumed to be randomly distributed within a rectangular region on the \\(x - y\\) plane, with dimensions \\(D_{x}\\) and \\(D_{y}\\). The position of the \\(k\\)-th UE is represented by \\(\\psi_{k} = (x_{k},y_{k},0)\\). Suppose the waveguide extends parallel to the \\(x\\)-axis. Its height is denoted by \\(d\\), and its length aligns with the rectangular dimension \\(D_{x}\\). Hence, the coordinates of PA \\(n\\) are given by \\(\\psi_{n}^{\\mathrm{P}} = (x_{n}^{\\mathrm{P}},0,d)\\), where \\(x_{n}^{\\mathrm{P}}\\) lies in the interval \\([L,D_x]\\). In this work, the discrete activation deployment of PAs is adopted, which simplifies the hardware design and is more practical than continuous activation [5]. The PAs can only be activated at specific discrete positions along the waveguide, forming the feasible set \\(\\mathcal{S} = \\left\\{L + \\frac{D_x - L}{Q - 1} (q - 1) \\mid q = 1,2,\\dots ,Q\\right\\}\\), where \\(Q\\) denotes the number of discrete positions available. Since all \\(N\\) PAs lie along the same waveguide, the transmitted signal of each PA is essentially a phase-shifted version of the signal from the BS at the waveguide feed point. The in-waveguide channel \\(\\mathbf{g} \\in \\mathbb{C}^{N \\times 1}\\) can be expressed as"
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"type": "equation",
|
| 216 |
+
"bbox": [
|
| 217 |
+
0.117,
|
| 218 |
+
0.742,
|
| 219 |
+
0.49,
|
| 220 |
+
0.772
|
| 221 |
+
],
|
| 222 |
+
"angle": 0,
|
| 223 |
+
"content": "\\[\n\\mathbf {g} = \\left[ \\alpha_ {1} e ^ {- j \\frac {2 \\pi}{\\lambda_ {g}} \\left| \\psi_ {0} ^ {\\mathrm {P}} - \\psi_ {1} ^ {\\mathrm {P}} \\right|}, \\dots , \\alpha_ {N} e ^ {- j \\frac {2 \\pi}{\\lambda_ {g}} \\left| \\psi_ {0} ^ {\\mathrm {P}} - \\psi_ {N} ^ {\\mathrm {P}} \\right|} \\right] ^ {\\top}, \\tag {1}\n\\]"
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "text",
|
| 227 |
+
"bbox": [
|
| 228 |
+
0.075,
|
| 229 |
+
0.778,
|
| 230 |
+
0.49,
|
| 231 |
+
0.869
|
| 232 |
+
],
|
| 233 |
+
"angle": 0,
|
| 234 |
+
"content": "where \\(\\psi_0^{\\mathrm{P}}\\) denotes the position of the waveguide's feed point. Parameter \\(\\lambda_{g} = \\frac{\\lambda}{n_{e}}\\) is the guided wavelength. Here, \\(\\lambda\\) is the wavelength in free space and \\(n_e\\) is the effective refractive index of the dielectric waveguide. \\(\\alpha_{n}\\) is the factor that determines the ratio of power exchanged between the waveguide and PA \\(n\\), governed by the coupling length of the PA [5]."
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "text",
|
| 238 |
+
"bbox": [
|
| 239 |
+
0.075,
|
| 240 |
+
0.87,
|
| 241 |
+
0.492,
|
| 242 |
+
0.947
|
| 243 |
+
],
|
| 244 |
+
"angle": 0,
|
| 245 |
+
"content": "For the wireless propagation channel \\(\\mathbf{h}_k\\in \\mathbb{C}^{N\\times 1}\\) between the \\(k\\) -th UE and the PAs, \\(\\mathbf{h}_k\\) is composed of the LoS component \\(\\mathbf{h}_k^{\\mathrm{LoS}}\\) and the non-line-of-sight (NLoS) component \\(\\mathbf{h}_k^{\\mathrm{NLoS}}\\) , i.e., \\(\\mathbf{h}_k = \\Upsilon_k\\odot \\mathbf{h}_k^{\\mathrm{LoS}} + \\mathbf{h}_k^{\\mathrm{NLoS}}\\) . Here, the operator \\(\\odot\\) denotes the Hadamard product. \\(\\Upsilon_{k} = [\\upsilon_{k,n},\\dots ,\\upsilon_{k,N}]\\in \\mathbb{C}^{N\\times 1}\\)"
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "text",
|
| 249 |
+
"bbox": [
|
| 250 |
+
0.503,
|
| 251 |
+
0.07,
|
| 252 |
+
0.921,
|
| 253 |
+
0.145
|
| 254 |
+
],
|
| 255 |
+
"angle": 0,
|
| 256 |
+
"content": "is a Bernoulli random vector and the variable \\(\\nu_{k,n}\\) takes values from the set \\(\\{0,1\\}\\), characterizing the existence of a LoS link between the \\(n\\)-th PA and the \\(k\\)-th UE<sup>1</sup>. The geometric spherical wavefront model-based LoS channel between the \\(k\\)-th UE and the PAs is expressed as [3]"
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "equation",
|
| 260 |
+
"bbox": [
|
| 261 |
+
0.529,
|
| 262 |
+
0.15,
|
| 263 |
+
0.921,
|
| 264 |
+
0.194
|
| 265 |
+
],
|
| 266 |
+
"angle": 0,
|
| 267 |
+
"content": "\\[\n\\mathbf {h} _ {k} ^ {\\mathrm {L o S}} = \\left[ \\frac {\\sqrt {\\eta} e ^ {- j \\frac {2 \\pi}{\\lambda} | \\psi_ {k} - \\psi_ {1} ^ {\\mathrm {P}} |}}{| \\psi_ {k} - \\psi_ {1} ^ {\\mathrm {P}} |}, \\dots , \\frac {\\sqrt {\\eta} e ^ {- j \\frac {2 \\pi}{\\lambda} | \\psi_ {k} - \\psi_ {N} ^ {\\mathrm {P}} |}}{| \\psi_ {k} - \\psi_ {N} ^ {\\mathrm {P}} |} \\right] ^ {\\top}, \\tag {2}\n\\]"
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"bbox": [
|
| 272 |
+
0.503,
|
| 273 |
+
0.2,
|
| 274 |
+
0.921,
|
| 275 |
+
0.25
|
| 276 |
+
],
|
| 277 |
+
"angle": 0,
|
| 278 |
+
"content": "where \\(\\eta = \\frac{\\lambda^2}{16\\pi^2}\\) denotes the path loss at a reference distance of \\(1\\mathrm{m}\\). Considering \\(S\\) scatterers in the \\(k\\)-th UE \\(\\rightarrow\\) PAs link, the NLoS channel \\(\\mathbf{h}_k^{\\mathrm{NLoS}} \\in \\mathbb{C}^{N \\times 1}\\) is given by"
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "equation",
|
| 282 |
+
"bbox": [
|
| 283 |
+
0.605,
|
| 284 |
+
0.256,
|
| 285 |
+
0.921,
|
| 286 |
+
0.297
|
| 287 |
+
],
|
| 288 |
+
"angle": 0,
|
| 289 |
+
"content": "\\[\n\\mathbf {h} _ {k} ^ {\\mathrm {N L o S}} = \\sqrt {\\frac {1}{S}} \\sum_ {s = 1} ^ {S} \\beta_ {k, s} \\mathbf {a} _ {k, s} e ^ {j \\eta_ {k, s}}, \\tag {3}\n\\]"
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "text",
|
| 293 |
+
"bbox": [
|
| 294 |
+
0.503,
|
| 295 |
+
0.303,
|
| 296 |
+
0.922,
|
| 297 |
+
0.426
|
| 298 |
+
],
|
| 299 |
+
"angle": 0,
|
| 300 |
+
"content": "where \\(\\beta_{k,s} \\sim \\mathcal{CN}(0, \\sigma_s^2)\\) and \\(\\eta_{k,s} \\sim \\mathcal{U}[0, 2\\pi]\\) denote the complex gain and the random phase of the \\(s\\)-th scatterer path, respectively. \\(\\mathbf{a}_{k,s} \\in \\mathbb{C}^{N \\times 1}\\) represents the receiving array response at the PAs. Since the movable region of PAs is large, the equivalent array aperture of PA is likely to exceed the Rayleigh distance that is the criterion to determine the near-field boundary. Hence, the spherical wavefront is utilized to characterize array response \\(\\mathbf{a}_{k,s}\\) and is given by"
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "equation",
|
| 304 |
+
"bbox": [
|
| 305 |
+
0.556,
|
| 306 |
+
0.431,
|
| 307 |
+
0.921,
|
| 308 |
+
0.475
|
| 309 |
+
],
|
| 310 |
+
"angle": 0,
|
| 311 |
+
"content": "\\[\n\\mathbf {a} _ {k, s} = \\left[ \\frac {\\sqrt {\\eta} e ^ {- j 2 \\pi d _ {s , 1} / \\lambda}}{d _ {k , s} d _ {s , 1}}, \\dots , \\frac {\\sqrt {\\eta} e ^ {- j 2 \\pi d _ {s , N} / \\lambda}}{d _ {k , s} d _ {s , N}} \\right] ^ {\\top}, \\tag {4}\n\\]"
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"bbox": [
|
| 316 |
+
0.504,
|
| 317 |
+
0.478,
|
| 318 |
+
0.921,
|
| 319 |
+
0.508
|
| 320 |
+
],
|
| 321 |
+
"angle": 0,
|
| 322 |
+
"content": "where \\(d_{k,s}\\) and \\(d_{s,n},n\\in \\{1,\\ldots ,N\\}\\) denote the distances from UE \\(k\\) to scatterer \\(s\\) and from scatterer \\(s\\) to PA \\(n\\) , respectively."
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "text",
|
| 326 |
+
"bbox": [
|
| 327 |
+
0.504,
|
| 328 |
+
0.509,
|
| 329 |
+
0.921,
|
| 330 |
+
0.539
|
| 331 |
+
],
|
| 332 |
+
"angle": 0,
|
| 333 |
+
"content": "The received signal at the BS during the \\(t\\)-th slot can be expressed as"
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "equation",
|
| 337 |
+
"bbox": [
|
| 338 |
+
0.635,
|
| 339 |
+
0.545,
|
| 340 |
+
0.921,
|
| 341 |
+
0.585
|
| 342 |
+
],
|
| 343 |
+
"angle": 0,
|
| 344 |
+
"content": "\\[\ny _ {t} = \\sum_ {k = 1} ^ {K} \\mathbf {g} ^ {\\top} \\mathbf {h} _ {k} s _ {k, t} + n _ {t}, \\tag {5}\n\\]"
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"type": "text",
|
| 348 |
+
"bbox": [
|
| 349 |
+
0.504,
|
| 350 |
+
0.591,
|
| 351 |
+
0.92,
|
| 352 |
+
0.621
|
| 353 |
+
],
|
| 354 |
+
"angle": 0,
|
| 355 |
+
"content": "where \\( s_{k,t} \\) represents the symbol transmitted by the \\( k \\)-th UE, and \\( n_t \\sim CN(0, \\sigma_n^2) \\) is the additive white Gaussian noise."
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"type": "text",
|
| 359 |
+
"bbox": [
|
| 360 |
+
0.503,
|
| 361 |
+
0.622,
|
| 362 |
+
0.922,
|
| 363 |
+
0.803
|
| 364 |
+
],
|
| 365 |
+
"angle": 0,
|
| 366 |
+
"content": "In PASS, the in-waveguide channel \\(\\mathbf{g}\\) can be regarded as the deterministic channel component that depends on the locations and the coupling lengths of \\(\\mathrm{PAs}^2\\). Consequently, we merely need to estimate the wireless propagation channel \\(\\mathbf{h}_k\\) from the \\(k\\)-th UE to the PAs. However, PASS introduces a fundamental challenge in channel estimation due to the highly coupled nature of \\(\\mathbf{g}\\) and \\(\\mathbf{h}_k\\). Specifically, while each waveguide is connected to multiple PAs, the waveguide channel is fixed and cannot be reconfigured to perform diverse beam measurements as in conventional hybrid precoding. As a result, the system must infer a high-dimensional channel vector \\(\\mathbf{h}_k\\) from inherently low-dimensional pilot observations, typically just one"
|
| 367 |
+
},
|
| 368 |
+
{
|
| 369 |
+
"type": "text",
|
| 370 |
+
"bbox": [
|
| 371 |
+
0.503,
|
| 372 |
+
0.817,
|
| 373 |
+
0.922,
|
| 374 |
+
0.887
|
| 375 |
+
],
|
| 376 |
+
"angle": 0,
|
| 377 |
+
"content": "<sup>1</sup>In the existing works for PASS, the free space channel model between UEs and the PAs is commonly utilized by assuming the flexible PAs close to UEs [3]–[6]. However, in the practical wireless propagation environment, the NLoS channel component caused by the statical and dynamic scatters should be considered. In particular, all PAs are hard to provide the full coverage of LoS services for all UEs in practical deployment environment."
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"type": "text",
|
| 381 |
+
"bbox": [
|
| 382 |
+
0.503,
|
| 383 |
+
0.887,
|
| 384 |
+
0.922,
|
| 385 |
+
0.945
|
| 386 |
+
],
|
| 387 |
+
"angle": 0,
|
| 388 |
+
"content": "2This work employs the electronic activation of pre-positioned PAs along the waveguide. The system controller only needs to determine the activation state to ascertain the exact positions of PAs. The reconfiguration of the activated PA set is usually several orders of magnitude faster than the channel coherence time, enabling it to respond efficiently to rapid channel variations."
|
| 389 |
+
}
|
| 390 |
+
],
|
| 391 |
+
[
|
| 392 |
+
{
|
| 393 |
+
"type": "page_number",
|
| 394 |
+
"bbox": [
|
| 395 |
+
0.912,
|
| 396 |
+
0.031,
|
| 397 |
+
0.921,
|
| 398 |
+
0.041
|
| 399 |
+
],
|
| 400 |
+
"angle": 0,
|
| 401 |
+
"content": "3"
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"type": "text",
|
| 405 |
+
"bbox": [
|
| 406 |
+
0.074,
|
| 407 |
+
0.07,
|
| 408 |
+
0.493,
|
| 409 |
+
0.252
|
| 410 |
+
],
|
| 411 |
+
"angle": 0,
|
| 412 |
+
"content": "scalar per waveguide per measurement. To elaborate, suppose the widely used orthogonal pilot transmission strategy, e.g., time-division, is adopted, and \\( T \\) denotes the number of pilot transmission slots transmitted by UE \\( k \\). Note that even if the UE transmits \\( T \\geq N \\) or more pilot slots, a single waveguide output may provide no more than one linearly independent measurement per slot. That is, each pilot symbol experiences the same fixed merging of \\( N \\) PAs in the waveguide, yielding only repeated versions of the same scalar. Hence, the classic error-criterion-based algorithms, e.g., least square (LS) or linear minimum mean square error (LMMSE) estimators, are difficult to apply directly for channel estimation in \\( \\mathrm{PASS}^3 \\)."
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"type": "title",
|
| 416 |
+
"bbox": [
|
| 417 |
+
0.097,
|
| 418 |
+
0.267,
|
| 419 |
+
0.471,
|
| 420 |
+
0.282
|
| 421 |
+
],
|
| 422 |
+
"angle": 0,
|
| 423 |
+
"content": "III. DEEP LEARNING BASED CHANNEL ESTIMATION"
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"type": "text",
|
| 427 |
+
"bbox": [
|
| 428 |
+
0.074,
|
| 429 |
+
0.286,
|
| 430 |
+
0.492,
|
| 431 |
+
0.527
|
| 432 |
+
],
|
| 433 |
+
"angle": 0,
|
| 434 |
+
"content": "In this section, to improve the channel estimation performance, we leverage DL models to develop efficient channel estimation schemes for PASS. However, two critical challenges emerge when applying the DL approach to PASS. Firstly, the channel characteristics vary dramatically as PAs freely change positions within designated regions, which requires the efficient network architecture with sufficient capacity to learn the extended channel state space. Secondly, by dynamically activating candidate PAs along the waveguide in response to real-time communication demands, the system introduces time-varying channel dimension and heterogeneous signal distribution. The proposed DL-based channel estimator necessitates an innovative architecture capable of adaptively handling spatio-temporal variations in channel parameters, thereby fostering advancements in neural network design to enable robust estimation under non-stationary conditions."
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"type": "title",
|
| 438 |
+
"bbox": [
|
| 439 |
+
0.075,
|
| 440 |
+
0.547,
|
| 441 |
+
0.245,
|
| 442 |
+
0.56
|
| 443 |
+
],
|
| 444 |
+
"angle": 0,
|
| 445 |
+
"content": "A. Dataset Construction"
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"type": "text",
|
| 449 |
+
"bbox": [
|
| 450 |
+
0.074,
|
| 451 |
+
0.565,
|
| 452 |
+
0.493,
|
| 453 |
+
0.854
|
| 454 |
+
],
|
| 455 |
+
"angle": 0,
|
| 456 |
+
"content": "In the offline training stage, we collect \\(N_{s}\\) paired samples, i.e., the pilot observation vector \\(\\widetilde{\\mathbf{y}}_k\\in \\mathbb{C}^{T\\times 1}\\) at the BS, the position set \\(\\Psi^{\\mathrm{P}} = [\\psi_n^1,\\dots ,\\psi_N^{\\mathrm{P}}] = [(x_1^{\\mathrm{P}},0,d),\\dots ,(x_N^{\\mathrm{P}},0,d)]\\) of PAs, and the corresponding channel sample \\(\\mathbf{h}_k\\). In the proposed channel estimation network, the input tensor is designed as the set of both PA positions and the pilot observation. Considering the waveguide is deployed parallel to the \\(x\\)-axis, we merely need the \\(x\\)-axis coordinate set \\(\\mathbf{x}^{\\mathrm{P}} = [x_1^{\\mathrm{P}},\\ldots ,x_N^{\\mathrm{P}}]^T\\in \\mathbb{R}^{N\\times 1}\\) of PAs as the input feature. To enable the neural network to adaptively estimate channels across varying PA configurations, we utilize the in-waveguide channel \\(\\mathbf{g}\\) to transform the observation \\(\\widetilde{\\mathbf{y}}_k\\in \\mathbb{C}^{T\\times 1}\\) into \\(\\widetilde{\\mathbf{Y}}_k = \\mathbf{g}\\widetilde{\\mathbf{y}}_k^\\top \\in \\mathbb{C}^{N\\times T}\\). This transformation aligns the input dimension of neural network with the current number of PAs \\(N\\) in PASS, which facilitates the subsequent channel estimation network design. The complex-value matrix \\(\\widetilde{\\mathbf{Y}}_k\\) is converted into the real-value tensor \\(\\overline{\\mathbf{Y}}_k = \\{\\Re (\\widetilde{\\mathbf{Y}}_k),\\Im (\\widetilde{\\mathbf{Y}}_k)\\} \\in \\mathbb{R}^{N\\times 2T}\\) for neural network processing. Accordingly, the label tensor in the network training is \\(\\bar{\\mathbf{H}}_k = \\{\\Re (\\mathbf{h}_k),\\Im (\\mathbf{h}_k)\\} \\in \\mathbb{R}^{N\\times 2}\\)."
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "text",
|
| 460 |
+
"bbox": [
|
| 461 |
+
0.074,
|
| 462 |
+
0.863,
|
| 463 |
+
0.493,
|
| 464 |
+
0.947
|
| 465 |
+
],
|
| 466 |
+
"angle": 0,
|
| 467 |
+
"content": "3To collect independent pilot measurements for each PA, as required by LS and LMMSE, one feasible approach is to introduce an antenna switching matrix that selects subsets of PAs in different time slots to realize the signal separation. In Section IV of this letter, we provide antenna switching matrix-based LS and LMMSE estimators as channel estimation benchmarks. Note that switching each PA in different slots introduces hardware overhead, extended measurement time, and potential switch losses, all of which reduce practicality."
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "image",
|
| 471 |
+
"bbox": [
|
| 472 |
+
0.569,
|
| 473 |
+
0.07,
|
| 474 |
+
0.867,
|
| 475 |
+
0.203
|
| 476 |
+
],
|
| 477 |
+
"angle": 0,
|
| 478 |
+
"content": null
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"type": "image_caption",
|
| 482 |
+
"bbox": [
|
| 483 |
+
0.504,
|
| 484 |
+
0.213,
|
| 485 |
+
0.813,
|
| 486 |
+
0.227
|
| 487 |
+
],
|
| 488 |
+
"angle": 0,
|
| 489 |
+
"content": "Fig. 2. Proposed PAMoE model for channel estimation."
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "title",
|
| 493 |
+
"bbox": [
|
| 494 |
+
0.504,
|
| 495 |
+
0.253,
|
| 496 |
+
0.902,
|
| 497 |
+
0.267
|
| 498 |
+
],
|
| 499 |
+
"angle": 0,
|
| 500 |
+
"content": "B. PAMoE: Mixture of Experts-Based Channel Estimation"
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"type": "text",
|
| 504 |
+
"bbox": [
|
| 505 |
+
0.503,
|
| 506 |
+
0.271,
|
| 507 |
+
0.921,
|
| 508 |
+
0.346
|
| 509 |
+
],
|
| 510 |
+
"angle": 0,
|
| 511 |
+
"content": "We first propose a PAMoE estimator as illustrated in Fig. 2 to address high-dimensional dynamic channel estimation from low-dimensional received pilots, which incorporates the dynamic padding, feature embedding, fusion, and position-aware MoE modules [10]."
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"type": "text",
|
| 515 |
+
"bbox": [
|
| 516 |
+
0.503,
|
| 517 |
+
0.347,
|
| 518 |
+
0.921,
|
| 519 |
+
0.513
|
| 520 |
+
],
|
| 521 |
+
"angle": 0,
|
| 522 |
+
"content": "1) Dynamic Padding: To accommodate variable PA counts \\(N\\) and handle variable-length inputs, \\(PAMoE\\) employs dynamic padding up to a maximum \\(N_{\\mathrm{max}}\\). Let \\(B\\) denote the batch size in the network training stage. The input tensor in a training batch can be expressed as PA positions \\(\\mathbf{P} \\in \\mathbb{R}^{B \\times N \\times 1}\\) and pilot signals \\(\\mathbf{S} \\in \\mathbb{R}^{B \\times N \\times 2T}\\), which are the batch version of \\(\\mathbf{x}^{\\mathrm{P}}\\) and \\(\\overline{\\mathbf{Y}}_k\\), respectively. If \\(N < N_{\\mathrm{max}}\\), the network pads along the PA dimension so that both \\(\\mathbf{P}\\) and \\(\\mathbf{S}\\) become length \\(N_{\\mathrm{max}}\\), i.e., \\(\\mathbf{P}' = [\\mathbf{P}, \\phi^1] \\in \\mathbb{R}^{B \\times N_{\\mathrm{max}} \\times 1}\\), \\(\\mathbf{S}' = [\\mathbf{S}, \\phi^2] \\in \\mathbb{R}^{B \\times N_{\\mathrm{max}} \\times d_{\\mathrm{sig}}}\\), where \\(\\phi^1\\) and \\(\\phi^2 \\in \\mathbb{R}^{N_{\\mathrm{pad}} \\times 1}(N_{\\mathrm{pad}} = N_{\\mathrm{max}} - N)\\) denote the learnable padding embeddings, respectively."
|
| 523 |
+
},
|
| 524 |
+
{
|
| 525 |
+
"type": "text",
|
| 526 |
+
"bbox": [
|
| 527 |
+
0.503,
|
| 528 |
+
0.513,
|
| 529 |
+
0.92,
|
| 530 |
+
0.62
|
| 531 |
+
],
|
| 532 |
+
"angle": 0,
|
| 533 |
+
"content": "2) Feature Embedding: To handle continuous spatial information and achieve efficient extrapolation of PAs, the Fourier basis function is employed, developing a Fourier positional embedding approach. Suppose a set of exponentially increasing frequency bases \\(\\mathbf{f} = \\left[2^{f}\\pi \\right]_{f = 0}^{F - 1}\\in \\mathbb{R}^{F}\\) is defined, where \\(F\\) is the number of frequency components. The scaled position encodings can be expressed as"
|
| 534 |
+
},
|
| 535 |
+
{
|
| 536 |
+
"type": "equation",
|
| 537 |
+
"bbox": [
|
| 538 |
+
0.627,
|
| 539 |
+
0.628,
|
| 540 |
+
0.921,
|
| 541 |
+
0.646
|
| 542 |
+
],
|
| 543 |
+
"angle": 0,
|
| 544 |
+
"content": "\\[\n\\boldsymbol {\\Theta} = \\mathbf {S} ^ {\\prime} \\odot \\mathbf {f} \\in \\mathbb {R} ^ {B \\times N _ {\\max } \\times F}. \\tag {6}\n\\]"
|
| 545 |
+
},
|
| 546 |
+
{
|
| 547 |
+
"type": "text",
|
| 548 |
+
"bbox": [
|
| 549 |
+
0.504,
|
| 550 |
+
0.655,
|
| 551 |
+
0.921,
|
| 552 |
+
0.685
|
| 553 |
+
],
|
| 554 |
+
"angle": 0,
|
| 555 |
+
"content": "Then, we apply sinusoidal transformations to obtain sine and cosine features, and then are concatenated as"
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"type": "equation",
|
| 559 |
+
"bbox": [
|
| 560 |
+
0.566,
|
| 561 |
+
0.694,
|
| 562 |
+
0.921,
|
| 563 |
+
0.713
|
| 564 |
+
],
|
| 565 |
+
"angle": 0,
|
| 566 |
+
"content": "\\[\n\\mathbf {E} = \\operatorname {C o n c a t} \\left[ \\sin (\\boldsymbol {\\Theta}), \\cos (\\boldsymbol {\\Theta}) \\right] \\in \\mathbb {R} ^ {B \\times N _ {\\max } \\times 2 F}. \\tag {7}\n\\]"
|
| 567 |
+
},
|
| 568 |
+
{
|
| 569 |
+
"type": "text",
|
| 570 |
+
"bbox": [
|
| 571 |
+
0.503,
|
| 572 |
+
0.721,
|
| 573 |
+
0.92,
|
| 574 |
+
0.842
|
| 575 |
+
],
|
| 576 |
+
"angle": 0,
|
| 577 |
+
"content": "Further, \\(\\mathbf{E}\\) is projected into the embedding space \\(\\mathbf{Z}_{\\mathrm{pos}} = \\mathbf{EW}_{\\mathrm{pos}} + \\mathbf{b}_{\\mathrm{pos}} \\in \\mathbb{R}^{B \\times N_{\\mathrm{max}} \\times d_{\\mathrm{embed}}}\\), where \\(\\mathbf{W}_{\\mathrm{pos}} \\in \\mathbb{R}^{2F \\times d_{\\mathrm{embed}}}\\) is the learnable weight matrix and \\(\\mathbf{b}_{\\mathrm{pos}} \\in \\mathbb{R}^{d_{\\mathrm{embed}}}\\) is the bias term. This Fourier embedding effectively captures multi-scale positional variations in the input space. The pilot signal data is embedded to the same hidden dimension \\(d_{\\mathrm{hid}}\\) by the multilayer perceptron (MLP)-based linear mapping module \\(\\phi_{\\mathrm{sig}}\\), i.e., \\(\\mathbf{Z}_{\\mathrm{sig}} = \\phi_{\\mathrm{sig}}(\\mathbf{S}') \\in \\mathbb{R}^{B \\times N_{\\mathrm{max}} \\times d_{\\mathrm{hid}}}\\)."
|
| 578 |
+
},
|
| 579 |
+
{
|
| 580 |
+
"type": "text",
|
| 581 |
+
"bbox": [
|
| 582 |
+
0.503,
|
| 583 |
+
0.843,
|
| 584 |
+
0.922,
|
| 585 |
+
0.916
|
| 586 |
+
],
|
| 587 |
+
"angle": 0,
|
| 588 |
+
"content": "3) Feature Fusion: To fuse positional features with pilot signal features, we employ a gating function to generate a gate from a control feature based on PA positions and apply it to the target feature based on pilot signal. The gating operation is formulated as"
|
| 589 |
+
},
|
| 590 |
+
{
|
| 591 |
+
"type": "equation",
|
| 592 |
+
"bbox": [
|
| 593 |
+
0.58,
|
| 594 |
+
0.926,
|
| 595 |
+
0.921,
|
| 596 |
+
0.945
|
| 597 |
+
],
|
| 598 |
+
"angle": 0,
|
| 599 |
+
"content": "\\[\n\\mathbf {G} = \\sigma \\left(\\mathbf {W} _ {g} \\mathbf {Z} _ {\\text {p o s}} + \\mathbf {b} _ {g}\\right) \\in \\mathbb {R} ^ {B \\times N _ {\\max } \\times d _ {\\text {h i d}}}, \\tag {8}\n\\]"
|
| 600 |
+
}
|
| 601 |
+
],
|
| 602 |
+
[
|
| 603 |
+
{
|
| 604 |
+
"type": "page_number",
|
| 605 |
+
"bbox": [
|
| 606 |
+
0.912,
|
| 607 |
+
0.032,
|
| 608 |
+
0.921,
|
| 609 |
+
0.04
|
| 610 |
+
],
|
| 611 |
+
"angle": 0,
|
| 612 |
+
"content": "4"
|
| 613 |
+
},
|
| 614 |
+
{
|
| 615 |
+
"type": "image",
|
| 616 |
+
"bbox": [
|
| 617 |
+
0.14,
|
| 618 |
+
0.071,
|
| 619 |
+
0.434,
|
| 620 |
+
0.26
|
| 621 |
+
],
|
| 622 |
+
"angle": 0,
|
| 623 |
+
"content": null
|
| 624 |
+
},
|
| 625 |
+
{
|
| 626 |
+
"type": "image_caption",
|
| 627 |
+
"bbox": [
|
| 628 |
+
0.075,
|
| 629 |
+
0.271,
|
| 630 |
+
0.393,
|
| 631 |
+
0.284
|
| 632 |
+
],
|
| 633 |
+
"angle": 0,
|
| 634 |
+
"content": "Fig. 3. Proposed PAformer model for channel estimation."
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"type": "equation",
|
| 638 |
+
"bbox": [
|
| 639 |
+
0.14,
|
| 640 |
+
0.306,
|
| 641 |
+
0.492,
|
| 642 |
+
0.326
|
| 643 |
+
],
|
| 644 |
+
"angle": 0,
|
| 645 |
+
"content": "\\[\n\\mathbf {Z} _ {\\text {f u s e d}} = \\mathbf {G} \\odot \\mathbf {Z} _ {\\text {s i g}} + \\mathbf {Z} _ {\\text {p o s}} \\in \\mathbb {R} ^ {B \\times N _ {\\max } \\times d _ {\\text {h i d}}}, \\tag {9}\n\\]"
|
| 646 |
+
},
|
| 647 |
+
{
|
| 648 |
+
"type": "text",
|
| 649 |
+
"bbox": [
|
| 650 |
+
0.075,
|
| 651 |
+
0.333,
|
| 652 |
+
0.492,
|
| 653 |
+
0.378
|
| 654 |
+
],
|
| 655 |
+
"angle": 0,
|
| 656 |
+
"content": "where \\(\\mathbf{W}_g\\) and \\(\\mathbf{b}_g\\) denote the weight and bias of a linear layer with \\(d_{\\mathrm{hid}}\\) neurons, respectively, and \\(\\sigma (\\cdot)\\) is a Sigmoid activation function."
|
| 657 |
+
},
|
| 658 |
+
{
|
| 659 |
+
"type": "text",
|
| 660 |
+
"bbox": [
|
| 661 |
+
0.075,
|
| 662 |
+
0.379,
|
| 663 |
+
0.491,
|
| 664 |
+
0.515
|
| 665 |
+
],
|
| 666 |
+
"angle": 0,
|
| 667 |
+
"content": "4) MoE With Gating Network: Suppose there are \\( E \\) experts and each expert takes \\( \\mathbf{Z}_{\\mathrm{fused}} \\) and processes it with an MLP-Mixer block across both the feature dimension \\( d_{\\mathrm{hid}} \\) and the spatial dimension \\( N_{\\mathrm{max}} \\). Let \\( \\mathbf{Z}_e = E_e(\\mathbf{Z}_{\\mathrm{fused}}) \\in \\mathbb{R}^{B \\times N_{\\mathrm{max}} \\times d_{\\mathrm{hid}}} \\) denote the output of expert \\( e \\), \\( (e = 1, \\dots, E) \\). A gating network is employed to weight each expert's output. Typically, it pools \\( \\mathbf{Z}_{\\mathrm{fused}} \\) over the PA dimension to get a global context \\( \\mathbf{z}_{\\mathrm{pool}} \\in \\mathbb{R}^{B \\times d_{\\mathrm{hid}}} \\), and then applies the softmax activation function to produce gating weights, which can be expressed as"
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"type": "equation",
|
| 671 |
+
"bbox": [
|
| 672 |
+
0.15,
|
| 673 |
+
0.523,
|
| 674 |
+
0.49,
|
| 675 |
+
0.543
|
| 676 |
+
],
|
| 677 |
+
"angle": 0,
|
| 678 |
+
"content": "\\[\n\\boldsymbol {\\alpha} = \\operatorname {s o f t m a x} \\left(\\mathbf {W} _ {\\alpha} \\mathbf {z} _ {\\text {p o o l}} + \\mathbf {b} _ {\\alpha}\\right) \\in \\mathbb {R} ^ {B \\times E}, \\tag {10}\n\\]"
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
"type": "text",
|
| 682 |
+
"bbox": [
|
| 683 |
+
0.075,
|
| 684 |
+
0.55,
|
| 685 |
+
0.49,
|
| 686 |
+
0.581
|
| 687 |
+
],
|
| 688 |
+
"angle": 0,
|
| 689 |
+
"content": "where \\(\\sum_{e=1}^{E} \\alpha_{b,e} = 1\\), \\(\\forall b \\in \\{1, \\dots, B\\}\\), \\(\\mathbf{W}_{\\alpha}\\) and \\(\\mathbf{b}_{\\alpha}\\) are the weight and bias of a linear layer with \\(E\\) neurons, respectively."
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"type": "text",
|
| 693 |
+
"bbox": [
|
| 694 |
+
0.075,
|
| 695 |
+
0.581,
|
| 696 |
+
0.492,
|
| 697 |
+
0.643
|
| 698 |
+
],
|
| 699 |
+
"angle": 0,
|
| 700 |
+
"content": "Given the output \\(\\mathbf{Z}_e\\) of each expert and the gating weights \\(\\alpha\\), we form a weighted sum over experts. Let \\(\\alpha\\) be reshaped to \\(\\mathbb{R}^{B\\times 1\\times E}\\) so it can broadcast over the \\(N_{\\mathrm{max}}\\) dimension to obtain the following output of MoE"
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "equation",
|
| 704 |
+
"bbox": [
|
| 705 |
+
0.162,
|
| 706 |
+
0.648,
|
| 707 |
+
0.49,
|
| 708 |
+
0.688
|
| 709 |
+
],
|
| 710 |
+
"angle": 0,
|
| 711 |
+
"content": "\\[\n\\mathbf {Z} _ {\\mathrm {M o E}} = \\sum_ {e = 1} ^ {E} \\boldsymbol {\\alpha} _ {e} \\mathbf {Z} _ {e} \\in \\mathbb {R} ^ {B \\times N _ {\\max } \\times d _ {\\mathrm {h i d}}} \\tag {11}\n\\]"
|
| 712 |
+
},
|
| 713 |
+
{
|
| 714 |
+
"type": "text",
|
| 715 |
+
"bbox": [
|
| 716 |
+
0.075,
|
| 717 |
+
0.693,
|
| 718 |
+
0.492,
|
| 719 |
+
0.8
|
| 720 |
+
],
|
| 721 |
+
"angle": 0,
|
| 722 |
+
"content": "Finally, the network concatenates the positional feature \\(\\mathbf{Z}_{\\mathrm{pos}}\\) and \\(\\mathbf{Z}_{\\mathrm{MoE}}\\) along the last dimension, i.e., \\(\\mathbf{Z}_{\\mathrm{concat}} \\in \\mathbb{R}^{B \\times N_{\\mathrm{max}} \\times (2d_{\\mathrm{hid}})}\\). Then, a linear layer maps \\(\\mathbf{Z}_{\\mathrm{concat}}\\) to the estimated channel \\(\\widetilde{\\mathbf{H}} \\in \\mathbb{R}^{B \\times N \\times 2}\\). In \\(PAMoE\\), the dynamic padding pattern requires the network to fix a predefined maximum number of PAs \\(N_{\\mathrm{max}}\\) in the training stage, which is a limiting factor if PAs exceed the predefined maximum bound \\(N_{\\mathrm{max}}\\)."
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"type": "title",
|
| 726 |
+
"bbox": [
|
| 727 |
+
0.076,
|
| 728 |
+
0.82,
|
| 729 |
+
0.442,
|
| 730 |
+
0.835
|
| 731 |
+
],
|
| 732 |
+
"angle": 0,
|
| 733 |
+
"content": "C. P Aformer: Transformer-Based Channel Estimation"
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"type": "text",
|
| 737 |
+
"bbox": [
|
| 738 |
+
0.075,
|
| 739 |
+
0.839,
|
| 740 |
+
0.492,
|
| 741 |
+
0.947
|
| 742 |
+
],
|
| 743 |
+
"angle": 0,
|
| 744 |
+
"content": "We further propose a PAformer estimator as illustrated in Fig. 3 to address the scalability limitation of \\( PAMoE \\), which is inherently constrained by fixed \\( N_{\\mathrm{max}} \\) during training. PAformer incorporates self-attention layers with permutation-equivariance, enabling the network to dynamically accommodate PAs of arbitrary size \\( N \\). The proposed PAformer predicts channel coefficients in a per-antenna manner, offering"
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "text",
|
| 748 |
+
"bbox": [
|
| 749 |
+
0.504,
|
| 750 |
+
0.07,
|
| 751 |
+
0.921,
|
| 752 |
+
0.099
|
| 753 |
+
],
|
| 754 |
+
"angle": 0,
|
| 755 |
+
"content": "flexibility if new data has more antennas than seen before in the training stage."
|
| 756 |
+
},
|
| 757 |
+
{
|
| 758 |
+
"type": "text",
|
| 759 |
+
"bbox": [
|
| 760 |
+
0.504,
|
| 761 |
+
0.1,
|
| 762 |
+
0.921,
|
| 763 |
+
0.176
|
| 764 |
+
],
|
| 765 |
+
"angle": 0,
|
| 766 |
+
"content": "1) Input Embedding: In PAformer, we first concatenate the position and pilot signal features along their last dimension, i.e., \\(\\mathbf{V} = \\left[\\mathbf{P},\\mathbf{S}\\right]\\in \\mathbb{R}^{B\\times N\\times (2T + 1)}\\). Next, \\(\\mathbf{V}\\) is mapped into a hidden representation \\(\\mathbf{Z}^{(0)}\\) via an embedding network \\(\\phi\\) based on MLP, i.e., \\(\\mathbf{Z}^{(0)} = \\phi (\\mathbf{V})\\in \\mathbb{R}^{B\\times N\\times d_{\\mathrm{hid}}}\\)."
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"type": "text",
|
| 770 |
+
"bbox": [
|
| 771 |
+
0.504,
|
| 772 |
+
0.177,
|
| 773 |
+
0.922,
|
| 774 |
+
0.48
|
| 775 |
+
],
|
| 776 |
+
"angle": 0,
|
| 777 |
+
"content": "2) Transformer Encoder: PAformer employs a pre-norm Transformer with \\(P\\) stacked self-attention blocks (SABs), each comprising multi-head self-attention (MHA) and a feedforward network (FFN), both with residual connections and layer normalization. Let \\(\\mathbf{Z}^{(\\ell -1)}\\) denote the input to the \\(\\ell\\)-th SAB, and \\(\\overline{\\mathbf{Z}}^{(\\ell -1)} = \\mathrm{LayerNorm}(\\mathbf{Z}^{(\\ell -1)})\\) is the pre-normalized feature representation by the layer normalization. The output of the MHA module is expressed as \\(\\mathbf{A}^{(\\ell)} = \\mathrm{MHA}(\\mathbf{Q},\\mathbf{K},\\mathbf{V})\\in\\) \\(\\mathbb{R}^{B\\times N\\times d_{\\mathrm{hid}}}\\) where Q, K, and V denote query, key, and value tokens of \\(\\overline{\\mathbf{Z}}^{(\\ell -1)}\\) [11], respectively. The result is combined via a residual connection \\(\\overline{\\mathbf{Z}}^{\\prime (\\ell)} = \\overline{\\mathbf{Z}}^{(\\ell -1)} + \\mathbf{A}^{(\\ell)}\\). Then, the FFN is applied to obtain \\(\\mathbf{F}^{(\\ell)} = \\mathrm{FFN}(\\overline{\\mathbf{Z}}^{\\prime (\\ell)})\\), where FFN is a point-wise MLP operating on each PA token separately. Another residual connection completes this sub-layer \\(\\mathbf{Z}^{(\\ell)} =\\) \\(\\overline{\\mathbf{Z}}^{\\prime (\\ell)} + \\mathbf{F}^{(\\ell)}\\). Stacking and applying \\(P\\) blocks in sequence yields the final feature representation \\(\\mathbf{Z}^P = \\mathrm{SAB}^{(\\ell)}\\bigg(\\mathbf{Z}^{(\\ell -1)}\\bigg)\\in\\) \\(\\mathbb{R}^{B\\times N\\times d_{\\mathrm{hid}}}\\), \\(\\ell = 1,\\dots ,P\\), starting from \\(\\mathbf{Z}^{(0)}\\). The final step maps each antenna-wise feature vector in \\(\\mathbf{Z}^{(P)}\\) to the estimated channel \\(\\widehat{\\mathbf{H}}_i\\), where \\(i = 1,\\ldots ,N\\) indexes PAs."
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"type": "text",
|
| 781 |
+
"bbox": [
|
| 782 |
+
0.503,
|
| 783 |
+
0.481,
|
| 784 |
+
0.922,
|
| 785 |
+
0.572
|
| 786 |
+
],
|
| 787 |
+
"angle": 0,
|
| 788 |
+
"content": "For the proposed two DL estimators, during the test stage, the trained network can be applied to scenarios with dynamic numbers of PAs, even though the test dataset has a different data distribution and dimensionality compared to the training dataset. This property of the channel estimation network can also be termed as zero-shot learning."
|
| 789 |
+
},
|
| 790 |
+
{
|
| 791 |
+
"type": "title",
|
| 792 |
+
"bbox": [
|
| 793 |
+
0.619,
|
| 794 |
+
0.59,
|
| 795 |
+
0.808,
|
| 796 |
+
0.604
|
| 797 |
+
],
|
| 798 |
+
"angle": 0,
|
| 799 |
+
"content": "IV. NUMERICAL RESULTS"
|
| 800 |
+
},
|
| 801 |
+
{
|
| 802 |
+
"type": "text",
|
| 803 |
+
"bbox": [
|
| 804 |
+
0.504,
|
| 805 |
+
0.61,
|
| 806 |
+
0.922,
|
| 807 |
+
0.899
|
| 808 |
+
],
|
| 809 |
+
"angle": 0,
|
| 810 |
+
"content": "In simulation setups, we set \\(Q = 200\\), \\(K = 4\\), \\(n_e = 1.4\\), \\(d = 5\\), \\(D_x \\times D_y = 20 \\times 20 \\mathrm{~m}^2\\), \\(S = 6\\) and the carrier frequency \\(f_c = 28 \\mathrm{GHz}\\). In the training dataset construction, we collect \\(N_s = 10^5\\) training samples with the fixed number of PAs \\(N^{\\mathrm{tr}} = 16\\), while the test number of PAs is \\(N^{\\mathrm{te}} \\in \\{8, \\dots, 32\\}\\). In the hyper-parameter setups of the proposed DL estimators, we set \\(N_{\\mathrm{max}} = 32\\), \\(E = 4\\), \\(d_{\\mathrm{hid}} = 64\\), \\(P = 4\\), and \\(B = 256\\). The 1-norm \\(\\ell_1\\) is used as the loss function in the network training, i.e., \\(\\ell_1 = ||\\widehat{\\mathbf{H}}_k - \\overline{\\mathbf{H}}_k||_1\\), while the normalized mean squared error (NMSE) is employed as the performance metric, i.e., \\(\\mathrm{NMSE} = \\mathbb{E}\\{||\\widehat{\\mathbf{H}}_k - \\overline{\\mathbf{H}}_k||_F^2 / ||\\overline{\\mathbf{H}}_k||_F^2\\}\\). The antenna switching-based LS and LMMSE estimators are used as the conventional channel estimation benchmarks. Furthermore, to establish relevant benchmarks incorporating state-of-the-art DL estimators, we further develop two comparison DL benchmarks drawing inspiration from recent attention-based channel estimation networks, i.e., SA-RN-CE [8] and DACEN [9], which are created by replacing the MoE module in PAMoE with spatial attention (SA) and dual attention (DA) mechanisms, respectively."
|
| 811 |
+
},
|
| 812 |
+
{
|
| 813 |
+
"type": "text",
|
| 814 |
+
"bbox": [
|
| 815 |
+
0.503,
|
| 816 |
+
0.9,
|
| 817 |
+
0.922,
|
| 818 |
+
0.946
|
| 819 |
+
],
|
| 820 |
+
"angle": 0,
|
| 821 |
+
"content": "In Fig. 4, we present the NMSE performance of different channel estimation schemes with \\( N^{\\mathrm{te}} = 32 \\). The proposed DL estimators with reduced pilot overhead \\( T \\) outperform"
|
| 822 |
+
}
|
| 823 |
+
],
|
| 824 |
+
[
|
| 825 |
+
{
|
| 826 |
+
"type": "page_number",
|
| 827 |
+
"bbox": [
|
| 828 |
+
0.912,
|
| 829 |
+
0.031,
|
| 830 |
+
0.921,
|
| 831 |
+
0.041
|
| 832 |
+
],
|
| 833 |
+
"angle": 0,
|
| 834 |
+
"content": "5"
|
| 835 |
+
},
|
| 836 |
+
{
|
| 837 |
+
"type": "image",
|
| 838 |
+
"bbox": [
|
| 839 |
+
0.15,
|
| 840 |
+
0.079,
|
| 841 |
+
0.411,
|
| 842 |
+
0.24
|
| 843 |
+
],
|
| 844 |
+
"angle": 0,
|
| 845 |
+
"content": null
|
| 846 |
+
},
|
| 847 |
+
{
|
| 848 |
+
"type": "image_caption",
|
| 849 |
+
"bbox": [
|
| 850 |
+
0.075,
|
| 851 |
+
0.254,
|
| 852 |
+
0.345,
|
| 853 |
+
0.267
|
| 854 |
+
],
|
| 855 |
+
"angle": 0,
|
| 856 |
+
"content": "Fig. 4. NMSE vs. SNR for different algorithms."
|
| 857 |
+
},
|
| 858 |
+
{
|
| 859 |
+
"type": "image",
|
| 860 |
+
"bbox": [
|
| 861 |
+
0.149,
|
| 862 |
+
0.294,
|
| 863 |
+
0.412,
|
| 864 |
+
0.455
|
| 865 |
+
],
|
| 866 |
+
"angle": 0,
|
| 867 |
+
"content": null
|
| 868 |
+
},
|
| 869 |
+
{
|
| 870 |
+
"type": "image_caption",
|
| 871 |
+
"bbox": [
|
| 872 |
+
0.075,
|
| 873 |
+
0.469,
|
| 874 |
+
0.415,
|
| 875 |
+
0.483
|
| 876 |
+
],
|
| 877 |
+
"angle": 0,
|
| 878 |
+
"content": "Fig. 5. NMSE vs. number of PAs \\(N\\) for different algorithms."
|
| 879 |
+
},
|
| 880 |
+
{
|
| 881 |
+
"type": "table_caption",
|
| 882 |
+
"bbox": [
|
| 883 |
+
0.125,
|
| 884 |
+
0.502,
|
| 885 |
+
0.443,
|
| 886 |
+
0.526
|
| 887 |
+
],
|
| 888 |
+
"angle": 0,
|
| 889 |
+
"content": "TABLEI COMPLEXITY ANALYSIS OF PROPOSED DL ESTIMATORS"
|
| 890 |
+
},
|
| 891 |
+
{
|
| 892 |
+
"type": "table",
|
| 893 |
+
"bbox": [
|
| 894 |
+
0.106,
|
| 895 |
+
0.534,
|
| 896 |
+
0.464,
|
| 897 |
+
0.675
|
| 898 |
+
],
|
| 899 |
+
"angle": 0,
|
| 900 |
+
"content": "<table><tr><td rowspan=\"2\">Estimator</td><td rowspan=\"2\">N</td><td rowspan=\"2\">Params(K)</td><td rowspan=\"2\">FLOPs(M)</td><td colspan=\"2\">Runtime (μs)</td></tr><tr><td>CPU</td><td>GPU</td></tr><tr><td rowspan=\"3\">PAMoE</td><td>8</td><td></td><td></td><td></td><td></td></tr><tr><td>16</td><td>211.9</td><td>13.18</td><td>211.8</td><td>12.73</td></tr><tr><td>32</td><td></td><td></td><td></td><td></td></tr><tr><td rowspan=\"3\">PAformer</td><td>8</td><td></td><td>4.377</td><td>74.62</td><td>3.814</td></tr><tr><td>16</td><td>545.9</td><td>8.756</td><td>128.2</td><td>7.943</td></tr><tr><td>32</td><td></td><td>17.51</td><td>226.8</td><td>12.95</td></tr></table>"
|
| 901 |
+
},
|
| 902 |
+
{
|
| 903 |
+
"type": "text",
|
| 904 |
+
"bbox": [
|
| 905 |
+
0.074,
|
| 906 |
+
0.704,
|
| 907 |
+
0.493,
|
| 908 |
+
0.947
|
| 909 |
+
],
|
| 910 |
+
"angle": 0,
|
| 911 |
+
"content": "the existing linear estimators and DL models. The superior accuracy of \\(PAMoE\\) compared to \\(PA\\text{former}\\) primarily stems from \\(PAMoE\\)'s more specialized design, particularly in its handling and exploitation of the pinching antenna positional information. \\(PA\\text{former}\\), while robust and scalable due to its Transformer foundation, operates with a more general mechanism to support variable PA counts. In Fig. 5, we provide NMSE performance of different channel estimation schemes for varying numbers of PAs, where the signal-to-noise ratio (SNR) is set to 0 dB. Compared to SA-RN-CE and DACEN, the proposed \\(PAMoE\\) and \\(PA\\text{former}\\) trained by the fixed PA configuration exhibit excellent robustness and generalization for dynamic PAs. The proposed DL estimators possess sufficient zero-shot learning capabilities to deal with distinct data distribution in the test stage. Table I summarizes the number of trainable parameters (Params), floating point"
|
| 912 |
+
},
|
| 913 |
+
{
|
| 914 |
+
"type": "text",
|
| 915 |
+
"bbox": [
|
| 916 |
+
0.503,
|
| 917 |
+
0.07,
|
| 918 |
+
0.923,
|
| 919 |
+
0.327
|
| 920 |
+
],
|
| 921 |
+
"angle": 0,
|
| 922 |
+
"content": "operations (FLOPs) and inference runtime of the proposed \\(PAMoE\\) and \\(PA\\text{former}\\), where the NVIDIA RTX 3090 GPU and the 12th Gen Intel(R) Core(TM) i9- 12900K CPU are used as the inference platform. The average inference runtime per channel estimation instance was calculated by averaging over 1000 Monte Carlo experiments. Firstly, for different numbers of PAs \\(N\\), the Params of the proposed \\(PAMoE\\) and \\(PA\\text{former}\\) are the same due to the fixed network architecture. Secondly, the FLOPs of \\(PA\\text{former}\\) increase linearly with the number of PAs \\(N\\), as the higher-dimensional input tensor needs to be processed in the SAB of \\(PA\\text{former}\\). In the \\(PAMoE\\) model, the input tensor is uniformly padded to the same \\(N_{\\max}\\) via padding interpolation, and hence the FLOPs of the \\(PAMoE\\) remain constant for different \\(N\\). Finally, we observe that both \\(PAMoE\\) and \\(PA\\text{former}\\) can achieve the channel estimation at the microseconds (\\(\\mu s\\)) level, demonstrating the practical feasibility of the proposed models for real-time deployment."
|
| 923 |
+
},
|
| 924 |
+
{
|
| 925 |
+
"type": "title",
|
| 926 |
+
"bbox": [
|
| 927 |
+
0.648,
|
| 928 |
+
0.338,
|
| 929 |
+
0.78,
|
| 930 |
+
0.351
|
| 931 |
+
],
|
| 932 |
+
"angle": 0,
|
| 933 |
+
"content": "V. CONCLUSIONS"
|
| 934 |
+
},
|
| 935 |
+
{
|
| 936 |
+
"type": "text",
|
| 937 |
+
"bbox": [
|
| 938 |
+
0.503,
|
| 939 |
+
0.356,
|
| 940 |
+
0.923,
|
| 941 |
+
0.537
|
| 942 |
+
],
|
| 943 |
+
"angle": 0,
|
| 944 |
+
"content": "In this letter, we investigated the channel estimation approaches in PASS, and proposed two DL estimators with the advanced neural network architecture to infer a high-dimensional channel vector from inherently low-dimensional pilot observations, respectively. Specifically, the proposed PAMoE accommodates variable PA configurations and exploits multi-expert diversity in the MLP-Mixer for improved channel estimation. Accordingly, the proposed PAFomer is capable of handling arbitrary number of PAs thanks to the self-attention mechanism. Numerical results demonstrated that the proposed DL estimators outperform conventional methods, and significantly reduce the pilot overhead."
|
| 945 |
+
},
|
| 946 |
+
{
|
| 947 |
+
"type": "title",
|
| 948 |
+
"bbox": [
|
| 949 |
+
0.665,
|
| 950 |
+
0.549,
|
| 951 |
+
0.762,
|
| 952 |
+
0.562
|
| 953 |
+
],
|
| 954 |
+
"angle": 0,
|
| 955 |
+
"content": "REFERENCES"
|
| 956 |
+
},
|
| 957 |
+
{
|
| 958 |
+
"type": "ref_text",
|
| 959 |
+
"bbox": [
|
| 960 |
+
0.515,
|
| 961 |
+
0.567,
|
| 962 |
+
0.922,
|
| 963 |
+
0.603
|
| 964 |
+
],
|
| 965 |
+
"angle": 0,
|
| 966 |
+
"content": "[1] X. Shao, Q. Jiang, and R. Zhang, \"6D movable antenna based on user distribution: Modeling and optimization,\" IEEE Trans. Wireless Commun., vol. 24, no. 1, pp. 355-370, Jan. 2025."
|
| 967 |
+
},
|
| 968 |
+
{
|
| 969 |
+
"type": "ref_text",
|
| 970 |
+
"bbox": [
|
| 971 |
+
0.515,
|
| 972 |
+
0.603,
|
| 973 |
+
0.922,
|
| 974 |
+
0.637
|
| 975 |
+
],
|
| 976 |
+
"angle": 0,
|
| 977 |
+
"content": "[2] A. Fukuda, H. Yamamoto, H. Okazaki, Y. Suzuki, and K. Kawai, \"Pinching antenna: Using a dielectric waveguide as an antenna,\" NTT DOCOMO Technical J., vol. 23, no. 3, pp. 5-12, Jan. 2022."
|
| 978 |
+
},
|
| 979 |
+
{
|
| 980 |
+
"type": "ref_text",
|
| 981 |
+
"bbox": [
|
| 982 |
+
0.514,
|
| 983 |
+
0.637,
|
| 984 |
+
0.921,
|
| 985 |
+
0.659
|
| 986 |
+
],
|
| 987 |
+
"angle": 0,
|
| 988 |
+
"content": "[3] Z. Ding, R. Schober, and H. Vincent Poor, “Flexible-antenna systems: A pinching-antenna perspective,” IEEE Trans. Commun., 2025."
|
| 989 |
+
},
|
| 990 |
+
{
|
| 991 |
+
"type": "ref_text",
|
| 992 |
+
"bbox": [
|
| 993 |
+
0.514,
|
| 994 |
+
0.659,
|
| 995 |
+
0.921,
|
| 996 |
+
0.693
|
| 997 |
+
],
|
| 998 |
+
"angle": 0,
|
| 999 |
+
"content": "[4] D. Tyrovolas, S. A. Tegos, P. D. Diamantoulakis, S. Ioannidis, C. K. Liaskos, and G. K. Karagiannidis, “Performance analysis of pinching-antenna systems,” IEEE Trans. Cognit. Commun. Networking, 2025."
|
| 1000 |
+
},
|
| 1001 |
+
{
|
| 1002 |
+
"type": "ref_text",
|
| 1003 |
+
"bbox": [
|
| 1004 |
+
0.514,
|
| 1005 |
+
0.693,
|
| 1006 |
+
0.921,
|
| 1007 |
+
0.727
|
| 1008 |
+
],
|
| 1009 |
+
"angle": 0,
|
| 1010 |
+
"content": "[5] Z. Wang, C. Ouyang, X. Mu, Y. Liu, and Z. Ding, \"Modeling and beamforming optimization for pinching-antenna systems,\" arXiv preprint arXiv:2502.05917, 2025."
|
| 1011 |
+
},
|
| 1012 |
+
{
|
| 1013 |
+
"type": "ref_text",
|
| 1014 |
+
"bbox": [
|
| 1015 |
+
0.514,
|
| 1016 |
+
0.727,
|
| 1017 |
+
0.921,
|
| 1018 |
+
0.761
|
| 1019 |
+
],
|
| 1020 |
+
"angle": 0,
|
| 1021 |
+
"content": "[6] S. A. Tegos, P. D. Diamantoulakis, Z. Ding, and G. K. Karagiannidis, \"Minimum data rate maximization for uplink pinching-antenna systems,\" IEEE Wireless Commun. Lett., 2025."
|
| 1022 |
+
},
|
| 1023 |
+
{
|
| 1024 |
+
"type": "ref_text",
|
| 1025 |
+
"bbox": [
|
| 1026 |
+
0.514,
|
| 1027 |
+
0.761,
|
| 1028 |
+
0.921,
|
| 1029 |
+
0.806
|
| 1030 |
+
],
|
| 1031 |
+
"angle": 0,
|
| 1032 |
+
"content": "[7] F. Rezaei, S. M. Marvasti-Zadeh, C. Tellambura, and A. Maaref, \"Adversarial score-based generative models for MMSE-achieving AmBC channel estimation,\" IEEE Wireless Commun. Lett., vol. 13, no. 4, pp. 1053-1057, Apr. 2024."
|
| 1033 |
+
},
|
| 1034 |
+
{
|
| 1035 |
+
"type": "ref_text",
|
| 1036 |
+
"bbox": [
|
| 1037 |
+
0.514,
|
| 1038 |
+
0.806,
|
| 1039 |
+
0.921,
|
| 1040 |
+
0.841
|
| 1041 |
+
],
|
| 1042 |
+
"angle": 0,
|
| 1043 |
+
"content": "[8] X. Fan, Y. Zou, and L. Zhai, \"Spatial-attention-based channel estimation in IRS-assisted mmWave MU-MISO systems,\" IEEE Internet Things J., vol. 11, no. 6, pp. 9801-9813, Mar. 2024."
|
| 1044 |
+
},
|
| 1045 |
+
{
|
| 1046 |
+
"type": "ref_text",
|
| 1047 |
+
"bbox": [
|
| 1048 |
+
0.514,
|
| 1049 |
+
0.841,
|
| 1050 |
+
0.921,
|
| 1051 |
+
0.886
|
| 1052 |
+
],
|
| 1053 |
+
"angle": 0,
|
| 1054 |
+
"content": "[9] B. Zhou, X. Yang, S. Ma, F. Gao, and G. Yang, \"Pay less but get more: A dual-attention-based channel estimation network for massive MIMO systems with low-density pilots,\" IEEE Trans. Wireless Commun., vol. 23, no. 6, pp. 6061-6076, Jun. 2024."
|
| 1055 |
+
},
|
| 1056 |
+
{
|
| 1057 |
+
"type": "ref_text",
|
| 1058 |
+
"bbox": [
|
| 1059 |
+
0.508,
|
| 1060 |
+
0.886,
|
| 1061 |
+
0.921,
|
| 1062 |
+
0.908
|
| 1063 |
+
],
|
| 1064 |
+
"angle": 0,
|
| 1065 |
+
"content": "[10] W. Cai, J. Jiang, F. Wang, J. Tang, S. Kim, and J. Huang, “A survey on mixture of experts,” arXiv preprint arXiv:2407.06204, 2024."
|
| 1066 |
+
},
|
| 1067 |
+
{
|
| 1068 |
+
"type": "ref_text",
|
| 1069 |
+
"bbox": [
|
| 1070 |
+
0.508,
|
| 1071 |
+
0.908,
|
| 1072 |
+
0.921,
|
| 1073 |
+
0.942
|
| 1074 |
+
],
|
| 1075 |
+
"angle": 0,
|
| 1076 |
+
"content": "[11] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polosukhin, “Attention is all you need,” in Proc. NeurIPS, vol. 30, Nov. 2017."
|
| 1077 |
+
},
|
| 1078 |
+
{
|
| 1079 |
+
"type": "list",
|
| 1080 |
+
"bbox": [
|
| 1081 |
+
0.508,
|
| 1082 |
+
0.567,
|
| 1083 |
+
0.922,
|
| 1084 |
+
0.942
|
| 1085 |
+
],
|
| 1086 |
+
"angle": 0,
|
| 1087 |
+
"content": null
|
| 1088 |
+
}
|
| 1089 |
+
]
|
| 1090 |
+
]
|
data/2025/2503_13xxx/2503.13268/27688151-5fde-4680-b981-ebf649a96c83_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:36ad8569b4283436789ec0402d894921d4515dbd509930ed799b73ecbeefaef3
|
| 3 |
+
size 1493277
|
data/2025/2503_13xxx/2503.13268/full.md
ADDED
|
@@ -0,0 +1,188 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Channel Estimation for Pinching-Antenna Systems (PASS)
|
| 2 |
+
|
| 3 |
+
Jian Xiao, Ji Wang, Senior Member, IEEE, and Yuanwei Liu, Fellow, IEEE
|
| 4 |
+
|
| 5 |
+
Abstract—Pinching antennas (PAs) represent a revolutionary flexible antenna technology that leverages dielectric waveguides and electromagnetic coupling to mitigate large-scale path loss. This letter is the first to explore channel estimation for Pinching-Antenna SyStems (PASS), addressing their uniquely ill-conditioned and underdetermined channel characteristics. In particular, two efficient deep learning-based channel estimators are proposed. 1) PAMoE: This estimator incorporates dynamic padding, feature embedding, fusion, and mixture of experts (MoE) modules, which effectively leverage the positional information of PAs and exploit expert diversity. 2) PAformer: This Transformer-style estimator employs the self-attention mechanism to predict channel coefficients in a per-antenna manner, which offers more flexibility to adaptively deal with dynamic numbers of PAs in practical deployment. Numerical results demonstrate that 1) the proposed deep learning-based channel estimators outperform conventional methods and exhibit excellent zero-shot learning capabilities, and 2) PAMoE delivers higher channel estimation accuracy via MoE specialization, while PAformer natively handles an arbitrary number of PAs, trading self-attention complexity for superior scalability.
|
| 6 |
+
|
| 7 |
+
Index Terms—Channel estimation, mixture of experts, pinching antenna, Transformer.
|
| 8 |
+
|
| 9 |
+
# I. INTRODUCTION
|
| 10 |
+
|
| 11 |
+
ADVANCED multiple-input multiple-output (MIMO) is a critical enabling technology for sixth-generation (6G) wireless networks, targeting ultra-high-speed data transmission and seamless connectivity. Among emerging MIMO technologies, flexible antenna systems, e.g., fluid and movable antennas, offer dynamic channel reconfiguration capabilities by leveraging its positional or rotational adjustability to adapt to the evolving spatial distribution of users [1]. However, they still struggle with addressing large-scale path loss and the need for line-of-sight (LoS) links, both of which are essential for high-quality communication. To overcome these challenges, pinching antennas (PAs) offer a novel approach to creating controlled radiation sites. PAs were initially introduced and developed into a prototype by NTT DOCOMO in 2022 [2]. The fundamental concept of PAs relies on the transfer of electromagnetic (EM) waves from a dielectric waveguide to a nearby dielectric material. Compared to typical movable antennas, PAs can achieve movement ranges spanning thousands to tens of thousands of wavelengths and support a larger number of connected antennas on a waveguide. This technology allows
|
| 12 |
+
|
| 13 |
+
Jian Xiao and Ji Wang are with the Department of Electronics and Information Engineering, College of Physical Science and Technology, Central China Normal University, Wuhan 430079, China (e-mail: jianx@mails.ccnu.edu.cn; jiwang@ccnu.edu.cn).
|
| 14 |
+
|
| 15 |
+
Yuanwei Liu is with the Department of Electrical and Electronic Engineering, The University of Hong Kong, Hong Kong (e-mail: yuanwei@hku.hk).
|
| 16 |
+
|
| 17 |
+
for the flexible positioning of PAs, enabling the establishment of adjustable and reliable LoS transceiver links in Pinching-Antenna SyStems (PASS), thereby effectively mitigating large-scale path loss.
|
| 18 |
+
|
| 19 |
+
The theoretical exploration of PA technology is still in its early stages, yet it has already garnered significant attention from both academia and industry. The authors of [3] first provided a comprehensive performance analysis for PASS and proposed a non-orthogonal multiple access (NOMA)-enhanced PASS. Subsequently, related works have explored performance analysis [4], physical modeling and beamforming [5], and minimum rate maximization for PASS [6]. It should be emphasized that the existing works on PA optimization strategies hinge crucially on precise channel estimation. However, the channel estimation in PASS has not been investigated in-depth due to new challenges. Firstly, PASS introduces a fundamental challenge in channel estimation due to the highly coupled nature of the in-waveguide channel and wireless propagation channel between PAs and users. Each waveguide is fed by only one radio frequency (RF) chain, while it may carry multiple PAs. This setup necessitates the recovery of high-dimensional information from low-dimensional observations, resulting in an ill-conditioned underdetermined recovery problem. Secondly, the in-waveguide channel is deterministic and completely depends on the positions of PAs in the waveguide. Hence, no stacking of pilot signals can produce additional independent linear equations to solve the high-dimensional wireless propagation channel. Moreover, the dynamic nature of PAs with the plug-and-play capability complicates the channel acquisition. The activation locations of a large number of PAs can be adjusted in a specified region to improve user service. Therefore, the wireless propagation channel presents near-field spatial non-stationarity effects due to the large array aperture.
|
| 20 |
+
|
| 21 |
+
To fill this research gap, this letter presents the first investigation into channel estimation for PASS. First and foremost, we extend classic linear channel estimators by adapting the antenna switching strategy for PASS, which provide fundamental channel estimation benchmarks. Furthermore, inspired by recent advances in deep learning (DL)-enabled channel estimation [7]-[9], we propose two efficient DL models to improve channel estimation accuracy while reducing pilot overhead. The first DL estimator, termed $PAMoE$ , is built on the mixture of experts (MoE) architecture, which integrates PA positions and pilot signal features through multi-expert mechanisms to adaptively model the dynamic channel distributions in PASS. To enhance the flexibility and scalability of the channel estimation model for dynamic PA counts, we further propose a Transformer-style estimator, termed $PAformer$ , which lever-
|
| 22 |
+
|
| 23 |
+

|
| 24 |
+
Fig. 1. Pinching antennas assisted multi-user systems.
|
| 25 |
+
|
| 26 |
+
ages the self-attention mechanism to predict channel coefficients on a per-antenna basis. Numerical results demonstrate that the proposed DL estimators achieve superior channel estimation accuracy with significantly reduced pilot overhead compared to conventional channel estimators. In particular, both PAMoE and PAformer exhibit superior zero-shot learning capabilities on dynamic PA configurations without retraining operations.
|
| 27 |
+
|
| 28 |
+
# II. SYSTEM MODEL AND PROBLEM FORMULATION
|
| 29 |
+
|
| 30 |
+
As illustrated in Fig. 1, considering an uplink communication system that includes a base station (BS) equipped with $N$ PAs and $K$ single-antenna user equipments (UEs). Each PA has the same length $L$ on the waveguide. In a Cartesian system, the UEs are assumed to be randomly distributed within a rectangular region on the $x - y$ plane, with dimensions $D_{x}$ and $D_{y}$ . The position of the $k$ -th UE is represented by $\psi_{k} = (x_{k},y_{k},0)$ . Suppose the waveguide extends parallel to the $x$ -axis. Its height is denoted by $d$ , and its length aligns with the rectangular dimension $D_{x}$ . Hence, the coordinates of PA $n$ are given by $\psi_{n}^{\mathrm{P}} = (x_{n}^{\mathrm{P}},0,d)$ , where $x_{n}^{\mathrm{P}}$ lies in the interval $[L,D_x]$ . In this work, the discrete activation deployment of PAs is adopted, which simplifies the hardware design and is more practical than continuous activation [5]. The PAs can only be activated at specific discrete positions along the waveguide, forming the feasible set $\mathcal{S} = \left\{L + \frac{D_x - L}{Q - 1} (q - 1) \mid q = 1,2,\dots ,Q\right\}$ , where $Q$ denotes the number of discrete positions available. Since all $N$ PAs lie along the same waveguide, the transmitted signal of each PA is essentially a phase-shifted version of the signal from the BS at the waveguide feed point. The in-waveguide channel $\mathbf{g} \in \mathbb{C}^{N \times 1}$ can be expressed as
|
| 31 |
+
|
| 32 |
+
$$
|
| 33 |
+
\mathbf {g} = \left[ \alpha_ {1} e ^ {- j \frac {2 \pi}{\lambda_ {g}} \left| \psi_ {0} ^ {\mathrm {P}} - \psi_ {1} ^ {\mathrm {P}} \right|}, \dots , \alpha_ {N} e ^ {- j \frac {2 \pi}{\lambda_ {g}} \left| \psi_ {0} ^ {\mathrm {P}} - \psi_ {N} ^ {\mathrm {P}} \right|} \right] ^ {\top}, \tag {1}
|
| 34 |
+
$$
|
| 35 |
+
|
| 36 |
+
where $\psi_0^{\mathrm{P}}$ denotes the position of the waveguide's feed point. Parameter $\lambda_{g} = \frac{\lambda}{n_{e}}$ is the guided wavelength. Here, $\lambda$ is the wavelength in free space and $n_e$ is the effective refractive index of the dielectric waveguide. $\alpha_{n}$ is the factor that determines the ratio of power exchanged between the waveguide and PA $n$ , governed by the coupling length of the PA [5].
|
| 37 |
+
|
| 38 |
+
For the wireless propagation channel $\mathbf{h}_k\in \mathbb{C}^{N\times 1}$ between the $k$ -th UE and the PAs, $\mathbf{h}_k$ is composed of the LoS component $\mathbf{h}_k^{\mathrm{LoS}}$ and the non-line-of-sight (NLoS) component $\mathbf{h}_k^{\mathrm{NLoS}}$ , i.e., $\mathbf{h}_k = \Upsilon_k\odot \mathbf{h}_k^{\mathrm{LoS}} + \mathbf{h}_k^{\mathrm{NLoS}}$ . Here, the operator $\odot$ denotes the Hadamard product. $\Upsilon_{k} = [\upsilon_{k,n},\dots ,\upsilon_{k,N}]\in \mathbb{C}^{N\times 1}$
|
| 39 |
+
|
| 40 |
+
is a Bernoulli random vector and the variable $\nu_{k,n}$ takes values from the set $\{0,1\}$ , characterizing the existence of a LoS link between the $n$ -th PA and the $k$ -th UE<sup>1</sup>. The geometric spherical wavefront model-based LoS channel between the $k$ -th UE and the PAs is expressed as [3]
|
| 41 |
+
|
| 42 |
+
$$
|
| 43 |
+
\mathbf {h} _ {k} ^ {\mathrm {L o S}} = \left[ \frac {\sqrt {\eta} e ^ {- j \frac {2 \pi}{\lambda} | \psi_ {k} - \psi_ {1} ^ {\mathrm {P}} |}}{| \psi_ {k} - \psi_ {1} ^ {\mathrm {P}} |}, \dots , \frac {\sqrt {\eta} e ^ {- j \frac {2 \pi}{\lambda} | \psi_ {k} - \psi_ {N} ^ {\mathrm {P}} |}}{| \psi_ {k} - \psi_ {N} ^ {\mathrm {P}} |} \right] ^ {\top}, \tag {2}
|
| 44 |
+
$$
|
| 45 |
+
|
| 46 |
+
where $\eta = \frac{\lambda^2}{16\pi^2}$ denotes the path loss at a reference distance of $1\mathrm{m}$ . Considering $S$ scatterers in the $k$ -th UE $\rightarrow$ PAs link, the NLoS channel $\mathbf{h}_k^{\mathrm{NLoS}} \in \mathbb{C}^{N \times 1}$ is given by
|
| 47 |
+
|
| 48 |
+
$$
|
| 49 |
+
\mathbf {h} _ {k} ^ {\mathrm {N L o S}} = \sqrt {\frac {1}{S}} \sum_ {s = 1} ^ {S} \beta_ {k, s} \mathbf {a} _ {k, s} e ^ {j \eta_ {k, s}}, \tag {3}
|
| 50 |
+
$$
|
| 51 |
+
|
| 52 |
+
where $\beta_{k,s} \sim \mathcal{CN}(0, \sigma_s^2)$ and $\eta_{k,s} \sim \mathcal{U}[0, 2\pi]$ denote the complex gain and the random phase of the $s$ -th scatterer path, respectively. $\mathbf{a}_{k,s} \in \mathbb{C}^{N \times 1}$ represents the receiving array response at the PAs. Since the movable region of PAs is large, the equivalent array aperture of PA is likely to exceed the Rayleigh distance that is the criterion to determine the near-field boundary. Hence, the spherical wavefront is utilized to characterize array response $\mathbf{a}_{k,s}$ and is given by
|
| 53 |
+
|
| 54 |
+
$$
|
| 55 |
+
\mathbf {a} _ {k, s} = \left[ \frac {\sqrt {\eta} e ^ {- j 2 \pi d _ {s , 1} / \lambda}}{d _ {k , s} d _ {s , 1}}, \dots , \frac {\sqrt {\eta} e ^ {- j 2 \pi d _ {s , N} / \lambda}}{d _ {k , s} d _ {s , N}} \right] ^ {\top}, \tag {4}
|
| 56 |
+
$$
|
| 57 |
+
|
| 58 |
+
where $d_{k,s}$ and $d_{s,n},n\in \{1,\ldots ,N\}$ denote the distances from UE $k$ to scatterer $s$ and from scatterer $s$ to PA $n$ , respectively.
|
| 59 |
+
|
| 60 |
+
The received signal at the BS during the $t$ -th slot can be expressed as
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
y _ {t} = \sum_ {k = 1} ^ {K} \mathbf {g} ^ {\top} \mathbf {h} _ {k} s _ {k, t} + n _ {t}, \tag {5}
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
where $s_{k,t}$ represents the symbol transmitted by the $k$ -th UE, and $n_t \sim CN(0, \sigma_n^2)$ is the additive white Gaussian noise.
|
| 67 |
+
|
| 68 |
+
In PASS, the in-waveguide channel $\mathbf{g}$ can be regarded as the deterministic channel component that depends on the locations and the coupling lengths of $\mathrm{PAs}^2$ . Consequently, we merely need to estimate the wireless propagation channel $\mathbf{h}_k$ from the $k$ -th UE to the PAs. However, PASS introduces a fundamental challenge in channel estimation due to the highly coupled nature of $\mathbf{g}$ and $\mathbf{h}_k$ . Specifically, while each waveguide is connected to multiple PAs, the waveguide channel is fixed and cannot be reconfigured to perform diverse beam measurements as in conventional hybrid precoding. As a result, the system must infer a high-dimensional channel vector $\mathbf{h}_k$ from inherently low-dimensional pilot observations, typically just one
|
| 69 |
+
|
| 70 |
+
<sup>1</sup>In the existing works for PASS, the free space channel model between UEs and the PAs is commonly utilized by assuming the flexible PAs close to UEs [3]–[6]. However, in the practical wireless propagation environment, the NLoS channel component caused by the statical and dynamic scatters should be considered. In particular, all PAs are hard to provide the full coverage of LoS services for all UEs in practical deployment environment.
|
| 71 |
+
|
| 72 |
+
2This work employs the electronic activation of pre-positioned PAs along the waveguide. The system controller only needs to determine the activation state to ascertain the exact positions of PAs. The reconfiguration of the activated PA set is usually several orders of magnitude faster than the channel coherence time, enabling it to respond efficiently to rapid channel variations.
|
| 73 |
+
|
| 74 |
+
scalar per waveguide per measurement. To elaborate, suppose the widely used orthogonal pilot transmission strategy, e.g., time-division, is adopted, and $T$ denotes the number of pilot transmission slots transmitted by UE $k$ . Note that even if the UE transmits $T \geq N$ or more pilot slots, a single waveguide output may provide no more than one linearly independent measurement per slot. That is, each pilot symbol experiences the same fixed merging of $N$ PAs in the waveguide, yielding only repeated versions of the same scalar. Hence, the classic error-criterion-based algorithms, e.g., least square (LS) or linear minimum mean square error (LMMSE) estimators, are difficult to apply directly for channel estimation in $\mathrm{PASS}^3$ .
|
| 75 |
+
|
| 76 |
+
# III. DEEP LEARNING BASED CHANNEL ESTIMATION
|
| 77 |
+
|
| 78 |
+
In this section, to improve the channel estimation performance, we leverage DL models to develop efficient channel estimation schemes for PASS. However, two critical challenges emerge when applying the DL approach to PASS. Firstly, the channel characteristics vary dramatically as PAs freely change positions within designated regions, which requires the efficient network architecture with sufficient capacity to learn the extended channel state space. Secondly, by dynamically activating candidate PAs along the waveguide in response to real-time communication demands, the system introduces time-varying channel dimension and heterogeneous signal distribution. The proposed DL-based channel estimator necessitates an innovative architecture capable of adaptively handling spatio-temporal variations in channel parameters, thereby fostering advancements in neural network design to enable robust estimation under non-stationary conditions.
|
| 79 |
+
|
| 80 |
+
# A. Dataset Construction
|
| 81 |
+
|
| 82 |
+
In the offline training stage, we collect $N_{s}$ paired samples, i.e., the pilot observation vector $\widetilde{\mathbf{y}}_k\in \mathbb{C}^{T\times 1}$ at the BS, the position set $\Psi^{\mathrm{P}} = [\psi_n^1,\dots ,\psi_N^{\mathrm{P}}] = [(x_1^{\mathrm{P}},0,d),\dots ,(x_N^{\mathrm{P}},0,d)]$ of PAs, and the corresponding channel sample $\mathbf{h}_k$ . In the proposed channel estimation network, the input tensor is designed as the set of both PA positions and the pilot observation. Considering the waveguide is deployed parallel to the $x$ -axis, we merely need the $x$ -axis coordinate set $\mathbf{x}^{\mathrm{P}} = [x_1^{\mathrm{P}},\ldots ,x_N^{\mathrm{P}}]^T\in \mathbb{R}^{N\times 1}$ of PAs as the input feature. To enable the neural network to adaptively estimate channels across varying PA configurations, we utilize the in-waveguide channel $\mathbf{g}$ to transform the observation $\widetilde{\mathbf{y}}_k\in \mathbb{C}^{T\times 1}$ into $\widetilde{\mathbf{Y}}_k = \mathbf{g}\widetilde{\mathbf{y}}_k^\top \in \mathbb{C}^{N\times T}$ . This transformation aligns the input dimension of neural network with the current number of PAs $N$ in PASS, which facilitates the subsequent channel estimation network design. The complex-value matrix $\widetilde{\mathbf{Y}}_k$ is converted into the real-value tensor $\overline{\mathbf{Y}}_k = \{\Re (\widetilde{\mathbf{Y}}_k),\Im (\widetilde{\mathbf{Y}}_k)\} \in \mathbb{R}^{N\times 2T}$ for neural network processing. Accordingly, the label tensor in the network training is $\bar{\mathbf{H}}_k = \{\Re (\mathbf{h}_k),\Im (\mathbf{h}_k)\} \in \mathbb{R}^{N\times 2}$ .
|
| 83 |
+
|
| 84 |
+
3To collect independent pilot measurements for each PA, as required by LS and LMMSE, one feasible approach is to introduce an antenna switching matrix that selects subsets of PAs in different time slots to realize the signal separation. In Section IV of this letter, we provide antenna switching matrix-based LS and LMMSE estimators as channel estimation benchmarks. Note that switching each PA in different slots introduces hardware overhead, extended measurement time, and potential switch losses, all of which reduce practicality.
|
| 85 |
+
|
| 86 |
+

|
| 87 |
+
Fig. 2. Proposed PAMoE model for channel estimation.
|
| 88 |
+
|
| 89 |
+
# B. PAMoE: Mixture of Experts-Based Channel Estimation
|
| 90 |
+
|
| 91 |
+
We first propose a PAMoE estimator as illustrated in Fig. 2 to address high-dimensional dynamic channel estimation from low-dimensional received pilots, which incorporates the dynamic padding, feature embedding, fusion, and position-aware MoE modules [10].
|
| 92 |
+
|
| 93 |
+
1) Dynamic Padding: To accommodate variable PA counts $N$ and handle variable-length inputs, $PAMoE$ employs dynamic padding up to a maximum $N_{\mathrm{max}}$ . Let $B$ denote the batch size in the network training stage. The input tensor in a training batch can be expressed as PA positions $\mathbf{P} \in \mathbb{R}^{B \times N \times 1}$ and pilot signals $\mathbf{S} \in \mathbb{R}^{B \times N \times 2T}$ , which are the batch version of $\mathbf{x}^{\mathrm{P}}$ and $\overline{\mathbf{Y}}_k$ , respectively. If $N < N_{\mathrm{max}}$ , the network pads along the PA dimension so that both $\mathbf{P}$ and $\mathbf{S}$ become length $N_{\mathrm{max}}$ , i.e., $\mathbf{P}' = [\mathbf{P}, \phi^1] \in \mathbb{R}^{B \times N_{\mathrm{max}} \times 1}$ , $\mathbf{S}' = [\mathbf{S}, \phi^2] \in \mathbb{R}^{B \times N_{\mathrm{max}} \times d_{\mathrm{sig}}}$ , where $\phi^1$ and $\phi^2 \in \mathbb{R}^{N_{\mathrm{pad}} \times 1}(N_{\mathrm{pad}} = N_{\mathrm{max}} - N)$ denote the learnable padding embeddings, respectively.
|
| 94 |
+
|
| 95 |
+
2) Feature Embedding: To handle continuous spatial information and achieve efficient extrapolation of PAs, the Fourier basis function is employed, developing a Fourier positional embedding approach. Suppose a set of exponentially increasing frequency bases $\mathbf{f} = \left[2^{f}\pi \right]_{f = 0}^{F - 1}\in \mathbb{R}^{F}$ is defined, where $F$ is the number of frequency components. The scaled position encodings can be expressed as
|
| 96 |
+
|
| 97 |
+
$$
|
| 98 |
+
\boldsymbol {\Theta} = \mathbf {S} ^ {\prime} \odot \mathbf {f} \in \mathbb {R} ^ {B \times N _ {\max } \times F}. \tag {6}
|
| 99 |
+
$$
|
| 100 |
+
|
| 101 |
+
Then, we apply sinusoidal transformations to obtain sine and cosine features, and then are concatenated as
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
\mathbf {E} = \operatorname {C o n c a t} \left[ \sin (\boldsymbol {\Theta}), \cos (\boldsymbol {\Theta}) \right] \in \mathbb {R} ^ {B \times N _ {\max } \times 2 F}. \tag {7}
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
Further, $\mathbf{E}$ is projected into the embedding space $\mathbf{Z}_{\mathrm{pos}} = \mathbf{EW}_{\mathrm{pos}} + \mathbf{b}_{\mathrm{pos}} \in \mathbb{R}^{B \times N_{\mathrm{max}} \times d_{\mathrm{embed}}}$ , where $\mathbf{W}_{\mathrm{pos}} \in \mathbb{R}^{2F \times d_{\mathrm{embed}}}$ is the learnable weight matrix and $\mathbf{b}_{\mathrm{pos}} \in \mathbb{R}^{d_{\mathrm{embed}}}$ is the bias term. This Fourier embedding effectively captures multi-scale positional variations in the input space. The pilot signal data is embedded to the same hidden dimension $d_{\mathrm{hid}}$ by the multilayer perceptron (MLP)-based linear mapping module $\phi_{\mathrm{sig}}$ , i.e., $\mathbf{Z}_{\mathrm{sig}} = \phi_{\mathrm{sig}}(\mathbf{S}') \in \mathbb{R}^{B \times N_{\mathrm{max}} \times d_{\mathrm{hid}}}$ .
|
| 108 |
+
|
| 109 |
+
3) Feature Fusion: To fuse positional features with pilot signal features, we employ a gating function to generate a gate from a control feature based on PA positions and apply it to the target feature based on pilot signal. The gating operation is formulated as
|
| 110 |
+
|
| 111 |
+
$$
|
| 112 |
+
\mathbf {G} = \sigma \left(\mathbf {W} _ {g} \mathbf {Z} _ {\text {p o s}} + \mathbf {b} _ {g}\right) \in \mathbb {R} ^ {B \times N _ {\max } \times d _ {\text {h i d}}}, \tag {8}
|
| 113 |
+
$$
|
| 114 |
+
|
| 115 |
+

|
| 116 |
+
Fig. 3. Proposed PAformer model for channel estimation.
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
\mathbf {Z} _ {\text {f u s e d}} = \mathbf {G} \odot \mathbf {Z} _ {\text {s i g}} + \mathbf {Z} _ {\text {p o s}} \in \mathbb {R} ^ {B \times N _ {\max } \times d _ {\text {h i d}}}, \tag {9}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
where $\mathbf{W}_g$ and $\mathbf{b}_g$ denote the weight and bias of a linear layer with $d_{\mathrm{hid}}$ neurons, respectively, and $\sigma (\cdot)$ is a Sigmoid activation function.
|
| 123 |
+
|
| 124 |
+
4) MoE With Gating Network: Suppose there are $E$ experts and each expert takes $\mathbf{Z}_{\mathrm{fused}}$ and processes it with an MLP-Mixer block across both the feature dimension $d_{\mathrm{hid}}$ and the spatial dimension $N_{\mathrm{max}}$ . Let $\mathbf{Z}_e = E_e(\mathbf{Z}_{\mathrm{fused}}) \in \mathbb{R}^{B \times N_{\mathrm{max}} \times d_{\mathrm{hid}}}$ denote the output of expert $e$ , $(e = 1, \dots, E)$ . A gating network is employed to weight each expert's output. Typically, it pools $\mathbf{Z}_{\mathrm{fused}}$ over the PA dimension to get a global context $\mathbf{z}_{\mathrm{pool}} \in \mathbb{R}^{B \times d_{\mathrm{hid}}}$ , and then applies the softmax activation function to produce gating weights, which can be expressed as
|
| 125 |
+
|
| 126 |
+
$$
|
| 127 |
+
\boldsymbol {\alpha} = \operatorname {s o f t m a x} \left(\mathbf {W} _ {\alpha} \mathbf {z} _ {\text {p o o l}} + \mathbf {b} _ {\alpha}\right) \in \mathbb {R} ^ {B \times E}, \tag {10}
|
| 128 |
+
$$
|
| 129 |
+
|
| 130 |
+
where $\sum_{e=1}^{E} \alpha_{b,e} = 1$ , $\forall b \in \{1, \dots, B\}$ , $\mathbf{W}_{\alpha}$ and $\mathbf{b}_{\alpha}$ are the weight and bias of a linear layer with $E$ neurons, respectively.
|
| 131 |
+
|
| 132 |
+
Given the output $\mathbf{Z}_e$ of each expert and the gating weights $\alpha$ , we form a weighted sum over experts. Let $\alpha$ be reshaped to $\mathbb{R}^{B\times 1\times E}$ so it can broadcast over the $N_{\mathrm{max}}$ dimension to obtain the following output of MoE
|
| 133 |
+
|
| 134 |
+
$$
|
| 135 |
+
\mathbf {Z} _ {\mathrm {M o E}} = \sum_ {e = 1} ^ {E} \boldsymbol {\alpha} _ {e} \mathbf {Z} _ {e} \in \mathbb {R} ^ {B \times N _ {\max } \times d _ {\mathrm {h i d}}} \tag {11}
|
| 136 |
+
$$
|
| 137 |
+
|
| 138 |
+
Finally, the network concatenates the positional feature $\mathbf{Z}_{\mathrm{pos}}$ and $\mathbf{Z}_{\mathrm{MoE}}$ along the last dimension, i.e., $\mathbf{Z}_{\mathrm{concat}} \in \mathbb{R}^{B \times N_{\mathrm{max}} \times (2d_{\mathrm{hid}})}$ . Then, a linear layer maps $\mathbf{Z}_{\mathrm{concat}}$ to the estimated channel $\widetilde{\mathbf{H}} \in \mathbb{R}^{B \times N \times 2}$ . In $PAMoE$ , the dynamic padding pattern requires the network to fix a predefined maximum number of PAs $N_{\mathrm{max}}$ in the training stage, which is a limiting factor if PAs exceed the predefined maximum bound $N_{\mathrm{max}}$ .
|
| 139 |
+
|
| 140 |
+
# C. P Aformer: Transformer-Based Channel Estimation
|
| 141 |
+
|
| 142 |
+
We further propose a PAformer estimator as illustrated in Fig. 3 to address the scalability limitation of $PAMoE$ , which is inherently constrained by fixed $N_{\mathrm{max}}$ during training. PAformer incorporates self-attention layers with permutation-equivariance, enabling the network to dynamically accommodate PAs of arbitrary size $N$ . The proposed PAformer predicts channel coefficients in a per-antenna manner, offering
|
| 143 |
+
|
| 144 |
+
flexibility if new data has more antennas than seen before in the training stage.
|
| 145 |
+
|
| 146 |
+
1) Input Embedding: In PAformer, we first concatenate the position and pilot signal features along their last dimension, i.e., $\mathbf{V} = \left[\mathbf{P},\mathbf{S}\right]\in \mathbb{R}^{B\times N\times (2T + 1)}$ . Next, $\mathbf{V}$ is mapped into a hidden representation $\mathbf{Z}^{(0)}$ via an embedding network $\phi$ based on MLP, i.e., $\mathbf{Z}^{(0)} = \phi (\mathbf{V})\in \mathbb{R}^{B\times N\times d_{\mathrm{hid}}}$ .
|
| 147 |
+
|
| 148 |
+
2) Transformer Encoder: PAformer employs a pre-norm Transformer with $P$ stacked self-attention blocks (SABs), each comprising multi-head self-attention (MHA) and a feedforward network (FFN), both with residual connections and layer normalization. Let $\mathbf{Z}^{(\ell -1)}$ denote the input to the $\ell$ -th SAB, and $\overline{\mathbf{Z}}^{(\ell -1)} = \mathrm{LayerNorm}(\mathbf{Z}^{(\ell -1)})$ is the pre-normalized feature representation by the layer normalization. The output of the MHA module is expressed as $\mathbf{A}^{(\ell)} = \mathrm{MHA}(\mathbf{Q},\mathbf{K},\mathbf{V})\in$ $\mathbb{R}^{B\times N\times d_{\mathrm{hid}}}$ where Q, K, and V denote query, key, and value tokens of $\overline{\mathbf{Z}}^{(\ell -1)}$ [11], respectively. The result is combined via a residual connection $\overline{\mathbf{Z}}^{\prime (\ell)} = \overline{\mathbf{Z}}^{(\ell -1)} + \mathbf{A}^{(\ell)}$ . Then, the FFN is applied to obtain $\mathbf{F}^{(\ell)} = \mathrm{FFN}(\overline{\mathbf{Z}}^{\prime (\ell)})$ , where FFN is a point-wise MLP operating on each PA token separately. Another residual connection completes this sub-layer $\mathbf{Z}^{(\ell)} =$ $\overline{\mathbf{Z}}^{\prime (\ell)} + \mathbf{F}^{(\ell)}$ . Stacking and applying $P$ blocks in sequence yields the final feature representation $\mathbf{Z}^P = \mathrm{SAB}^{(\ell)}\bigg(\mathbf{Z}^{(\ell -1)}\bigg)\in$ $\mathbb{R}^{B\times N\times d_{\mathrm{hid}}}$ , $\ell = 1,\dots ,P$ , starting from $\mathbf{Z}^{(0)}$ . The final step maps each antenna-wise feature vector in $\mathbf{Z}^{(P)}$ to the estimated channel $\widehat{\mathbf{H}}_i$ , where $i = 1,\ldots ,N$ indexes PAs.
|
| 149 |
+
|
| 150 |
+
For the proposed two DL estimators, during the test stage, the trained network can be applied to scenarios with dynamic numbers of PAs, even though the test dataset has a different data distribution and dimensionality compared to the training dataset. This property of the channel estimation network can also be termed as zero-shot learning.
|
| 151 |
+
|
| 152 |
+
# IV. NUMERICAL RESULTS
|
| 153 |
+
|
| 154 |
+
In simulation setups, we set $Q = 200$ , $K = 4$ , $n_e = 1.4$ , $d = 5$ , $D_x \times D_y = 20 \times 20 \mathrm{~m}^2$ , $S = 6$ and the carrier frequency $f_c = 28 \mathrm{GHz}$ . In the training dataset construction, we collect $N_s = 10^5$ training samples with the fixed number of PAs $N^{\mathrm{tr}} = 16$ , while the test number of PAs is $N^{\mathrm{te}} \in \{8, \dots, 32\}$ . In the hyper-parameter setups of the proposed DL estimators, we set $N_{\mathrm{max}} = 32$ , $E = 4$ , $d_{\mathrm{hid}} = 64$ , $P = 4$ , and $B = 256$ . The 1-norm $\ell_1$ is used as the loss function in the network training, i.e., $\ell_1 = ||\widehat{\mathbf{H}}_k - \overline{\mathbf{H}}_k||_1$ , while the normalized mean squared error (NMSE) is employed as the performance metric, i.e., $\mathrm{NMSE} = \mathbb{E}\{||\widehat{\mathbf{H}}_k - \overline{\mathbf{H}}_k||_F^2 / ||\overline{\mathbf{H}}_k||_F^2\}$ . The antenna switching-based LS and LMMSE estimators are used as the conventional channel estimation benchmarks. Furthermore, to establish relevant benchmarks incorporating state-of-the-art DL estimators, we further develop two comparison DL benchmarks drawing inspiration from recent attention-based channel estimation networks, i.e., SA-RN-CE [8] and DACEN [9], which are created by replacing the MoE module in PAMoE with spatial attention (SA) and dual attention (DA) mechanisms, respectively.
|
| 155 |
+
|
| 156 |
+
In Fig. 4, we present the NMSE performance of different channel estimation schemes with $N^{\mathrm{te}} = 32$ . The proposed DL estimators with reduced pilot overhead $T$ outperform
|
| 157 |
+
|
| 158 |
+

|
| 159 |
+
Fig. 4. NMSE vs. SNR for different algorithms.
|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
Fig. 5. NMSE vs. number of PAs $N$ for different algorithms.
|
| 163 |
+
|
| 164 |
+
TABLEI COMPLEXITY ANALYSIS OF PROPOSED DL ESTIMATORS
|
| 165 |
+
|
| 166 |
+
<table><tr><td rowspan="2">Estimator</td><td rowspan="2">N</td><td rowspan="2">Params(K)</td><td rowspan="2">FLOPs(M)</td><td colspan="2">Runtime (μs)</td></tr><tr><td>CPU</td><td>GPU</td></tr><tr><td rowspan="3">PAMoE</td><td>8</td><td></td><td></td><td></td><td></td></tr><tr><td>16</td><td>211.9</td><td>13.18</td><td>211.8</td><td>12.73</td></tr><tr><td>32</td><td></td><td></td><td></td><td></td></tr><tr><td rowspan="3">PAformer</td><td>8</td><td></td><td>4.377</td><td>74.62</td><td>3.814</td></tr><tr><td>16</td><td>545.9</td><td>8.756</td><td>128.2</td><td>7.943</td></tr><tr><td>32</td><td></td><td>17.51</td><td>226.8</td><td>12.95</td></tr></table>
|
| 167 |
+
|
| 168 |
+
the existing linear estimators and DL models. The superior accuracy of $PAMoE$ compared to $PA\text{former}$ primarily stems from $PAMoE$ 's more specialized design, particularly in its handling and exploitation of the pinching antenna positional information. $PA\text{former}$ , while robust and scalable due to its Transformer foundation, operates with a more general mechanism to support variable PA counts. In Fig. 5, we provide NMSE performance of different channel estimation schemes for varying numbers of PAs, where the signal-to-noise ratio (SNR) is set to 0 dB. Compared to SA-RN-CE and DACEN, the proposed $PAMoE$ and $PA\text{former}$ trained by the fixed PA configuration exhibit excellent robustness and generalization for dynamic PAs. The proposed DL estimators possess sufficient zero-shot learning capabilities to deal with distinct data distribution in the test stage. Table I summarizes the number of trainable parameters (Params), floating point
|
| 169 |
+
|
| 170 |
+
operations (FLOPs) and inference runtime of the proposed $PAMoE$ and $PA\text{former}$ , where the NVIDIA RTX 3090 GPU and the 12th Gen Intel(R) Core(TM) i9- 12900K CPU are used as the inference platform. The average inference runtime per channel estimation instance was calculated by averaging over 1000 Monte Carlo experiments. Firstly, for different numbers of PAs $N$ , the Params of the proposed $PAMoE$ and $PA\text{former}$ are the same due to the fixed network architecture. Secondly, the FLOPs of $PA\text{former}$ increase linearly with the number of PAs $N$ , as the higher-dimensional input tensor needs to be processed in the SAB of $PA\text{former}$ . In the $PAMoE$ model, the input tensor is uniformly padded to the same $N_{\max}$ via padding interpolation, and hence the FLOPs of the $PAMoE$ remain constant for different $N$ . Finally, we observe that both $PAMoE$ and $PA\text{former}$ can achieve the channel estimation at the microseconds ( $\mu s$ ) level, demonstrating the practical feasibility of the proposed models for real-time deployment.
|
| 171 |
+
|
| 172 |
+
# V. CONCLUSIONS
|
| 173 |
+
|
| 174 |
+
In this letter, we investigated the channel estimation approaches in PASS, and proposed two DL estimators with the advanced neural network architecture to infer a high-dimensional channel vector from inherently low-dimensional pilot observations, respectively. Specifically, the proposed PAMoE accommodates variable PA configurations and exploits multi-expert diversity in the MLP-Mixer for improved channel estimation. Accordingly, the proposed PAFomer is capable of handling arbitrary number of PAs thanks to the self-attention mechanism. Numerical results demonstrated that the proposed DL estimators outperform conventional methods, and significantly reduce the pilot overhead.
|
| 175 |
+
|
| 176 |
+
# REFERENCES
|
| 177 |
+
|
| 178 |
+
[1] X. Shao, Q. Jiang, and R. Zhang, "6D movable antenna based on user distribution: Modeling and optimization," IEEE Trans. Wireless Commun., vol. 24, no. 1, pp. 355-370, Jan. 2025.
|
| 179 |
+
[2] A. Fukuda, H. Yamamoto, H. Okazaki, Y. Suzuki, and K. Kawai, "Pinching antenna: Using a dielectric waveguide as an antenna," NTT DOCOMO Technical J., vol. 23, no. 3, pp. 5-12, Jan. 2022.
|
| 180 |
+
[3] Z. Ding, R. Schober, and H. Vincent Poor, “Flexible-antenna systems: A pinching-antenna perspective,” IEEE Trans. Commun., 2025.
|
| 181 |
+
[4] D. Tyrovolas, S. A. Tegos, P. D. Diamantoulakis, S. Ioannidis, C. K. Liaskos, and G. K. Karagiannidis, “Performance analysis of pinching-antenna systems,” IEEE Trans. Cognit. Commun. Networking, 2025.
|
| 182 |
+
[5] Z. Wang, C. Ouyang, X. Mu, Y. Liu, and Z. Ding, "Modeling and beamforming optimization for pinching-antenna systems," arXiv preprint arXiv:2502.05917, 2025.
|
| 183 |
+
[6] S. A. Tegos, P. D. Diamantoulakis, Z. Ding, and G. K. Karagiannidis, "Minimum data rate maximization for uplink pinching-antenna systems," IEEE Wireless Commun. Lett., 2025.
|
| 184 |
+
[7] F. Rezaei, S. M. Marvasti-Zadeh, C. Tellambura, and A. Maaref, "Adversarial score-based generative models for MMSE-achieving AmBC channel estimation," IEEE Wireless Commun. Lett., vol. 13, no. 4, pp. 1053-1057, Apr. 2024.
|
| 185 |
+
[8] X. Fan, Y. Zou, and L. Zhai, "Spatial-attention-based channel estimation in IRS-assisted mmWave MU-MISO systems," IEEE Internet Things J., vol. 11, no. 6, pp. 9801-9813, Mar. 2024.
|
| 186 |
+
[9] B. Zhou, X. Yang, S. Ma, F. Gao, and G. Yang, "Pay less but get more: A dual-attention-based channel estimation network for massive MIMO systems with low-density pilots," IEEE Trans. Wireless Commun., vol. 23, no. 6, pp. 6061-6076, Jun. 2024.
|
| 187 |
+
[10] W. Cai, J. Jiang, F. Wang, J. Tang, S. Kim, and J. Huang, “A survey on mixture of experts,” arXiv preprint arXiv:2407.06204, 2024.
|
| 188 |
+
[11] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polosukhin, “Attention is all you need,” in Proc. NeurIPS, vol. 30, Nov. 2017.
|
data/2025/2503_13xxx/2503.13268/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:745f6751708c039e10059419ff108897dbdde37b9fc1b124093db9a75f9592f7
|
| 3 |
+
size 197989
|
data/2025/2503_13xxx/2503.13268/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13299/1182f1ca-70ec-4ed4-9c5b-10aa50e1a3e7_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13299/1182f1ca-70ec-4ed4-9c5b-10aa50e1a3e7_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13299/1182f1ca-70ec-4ed4-9c5b-10aa50e1a3e7_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:423a121648d785faaa52084a77ca065a42235c5b2e8791412509b8649a4fec91
|
| 3 |
+
size 1874397
|
data/2025/2503_13xxx/2503.13299/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13299/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ebacccde40d8c8a162ff9aaced1bb103b30fdfd0359c1e15ca4d47e8dd4e20ea
|
| 3 |
+
size 415843
|
data/2025/2503_13xxx/2503.13299/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13360/f41871e7-052a-41b7-a381-f27188074bdf_content_list.json
ADDED
|
@@ -0,0 +1,1896 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Mitigating Visual Forgetting via Take-along Visual Conditioning for Multi-modal Long CoT Reasoning",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
166,
|
| 8 |
+
89,
|
| 9 |
+
831,
|
| 10 |
+
130
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Hai-Long Sun $^{1,2}$ , Zhun Sun $^{4,5}$ , Houwen Peng $^{3}$ , Han-Jia Ye $^{1,2}$ †",
|
| 17 |
+
"bbox": [
|
| 18 |
+
221,
|
| 19 |
+
145,
|
| 20 |
+
776,
|
| 21 |
+
162
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "$^{1}$ School of Artificial Intelligence, Nanjing University",
|
| 28 |
+
"bbox": [
|
| 29 |
+
284,
|
| 30 |
+
162,
|
| 31 |
+
712,
|
| 32 |
+
180
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "$^{2}$ National Key Laboratory for Novel Software Technology, Nanjing University",
|
| 39 |
+
"bbox": [
|
| 40 |
+
181,
|
| 41 |
+
180,
|
| 42 |
+
813,
|
| 43 |
+
196
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "$^{3}$ Tencent $^{4}$ Center for Language AI Research, Tohoku University",
|
| 50 |
+
"bbox": [
|
| 51 |
+
238,
|
| 52 |
+
196,
|
| 53 |
+
757,
|
| 54 |
+
212
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "$^{5}$ RIKEN Center for Advanced Intelligence Project",
|
| 61 |
+
"bbox": [
|
| 62 |
+
294,
|
| 63 |
+
212,
|
| 64 |
+
702,
|
| 65 |
+
230
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "Abstract",
|
| 72 |
+
"text_level": 1,
|
| 73 |
+
"bbox": [
|
| 74 |
+
260,
|
| 75 |
+
260,
|
| 76 |
+
339,
|
| 77 |
+
275
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "text",
|
| 83 |
+
"text": "Recent advancements in Large Language Models (LLMs) have demonstrated enhanced reasoning capabilities, evolving from Chain-of-Thought (CoT) prompting to advanced, product-oriented solutions like OpenAI o1. During our re-implementation of this model, we noticed that in multimodal tasks requiring visual input (e.g., geometry problems), Multimodal LLMs (MLLMs) struggle to maintain focus on the visual information, in other words, MLLMs suffer from a gradual decline in attention to visual information as reasoning progresses, causing text-over-relied outputs. To investigate this, we ablate image inputs during long-chain reasoning. Concretely, we truncate the reasoning process midway, then re-complete the reasoning process with the input image removed. We observe only a $\\sim 2\\%$ accuracy drop on MathVista's test-hard subset, revealing the model's textual outputs dominate the following reasoning process. Motivated by this, we propose Take-along Visual Conditioning (TVC), a strategy that shifts image input to critical reasoning stages and compresses redundant visual tokens via dynamic pruning. This methodology helps the model retain attention to the visual components throughout the reasoning. Our approach achieves state-of-the-art performance on average across five mathematical reasoning benchmarks (+3.4 points vs previous sota), demonstrating the effectiveness of TVC in enhancing multimodal reasoning systems. The project page is available at https://sun-hailong.github.io/projects/TVC.",
|
| 84 |
+
"bbox": [
|
| 85 |
+
142,
|
| 86 |
+
288,
|
| 87 |
+
460,
|
| 88 |
+
772
|
| 89 |
+
],
|
| 90 |
+
"page_idx": 0
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"type": "text",
|
| 94 |
+
"text": "1 Introduction",
|
| 95 |
+
"text_level": 1,
|
| 96 |
+
"bbox": [
|
| 97 |
+
114,
|
| 98 |
+
784,
|
| 99 |
+
258,
|
| 100 |
+
800
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "text",
|
| 106 |
+
"text": "Large Language Models (LLMs) have achieved significant advancements in natural language processing (NLP), particularly in the area of reasoning. These models have evolved from simple",
|
| 107 |
+
"bbox": [
|
| 108 |
+
112,
|
| 109 |
+
810,
|
| 110 |
+
487,
|
| 111 |
+
875
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "text",
|
| 117 |
+
"text": "prompt-based Chain-of-Thought (CoT) (Wei et al., 2022) techniques to sophisticated product-oriented solutions like OpenAI's o1 (OpenAI, 2024b), DeepSeek-R1 (DeepSeek-AI, 2024), and Qwen-QVQ (QwenTeam, 2024), demonstrating iterative reasoning capabilities for complex multi-step tasks, which enables them to handle tasks that require multi-step thinking, logic, and knowledge integration. Recently, several works also extended the CoT reasoning process to MLLMs settings through data-centric innovations. For instance, Math-LLaVA (Shi et al., 2024) pioneers domain-specific training with the MathV360K dataset, while MAmmoTH-VL (Guo et al., 2024) scales up multimodal CoT data generation.",
|
| 118 |
+
"bbox": [
|
| 119 |
+
507,
|
| 120 |
+
260,
|
| 121 |
+
884,
|
| 122 |
+
501
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 0
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "text",
|
| 128 |
+
"text": "While such progress is notable in text-based domains, extending these advancements to multimodal scenarios presents unique challenges that transcend traditional language model boundaries. Reasoning in MLLMs requires fused understanding across different modalities, for example, in geometric reasoning tasks the model should interpret and reason about images alongside text. Therefore, the model's ability to integrate and maintain focus on both types of information is critical. Unlike text-only LLMs that reinforce problem context through linguistic recurrence, MLLMs struggle to sustain visual attention across reasoning steps. That is, as the length of the context increases, the model is more inclined to conduct the next step of reasoning based on the previously outputted text rather than the information of the image itself, which eventually leads to the continuation of the wrong text reasoning process and degraded model performance. We term this phenomenon as visual forgetting.",
|
| 129 |
+
"bbox": [
|
| 130 |
+
507,
|
| 131 |
+
502,
|
| 132 |
+
884,
|
| 133 |
+
824
|
| 134 |
+
],
|
| 135 |
+
"page_idx": 0
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "text",
|
| 139 |
+
"text": "In this work, we conduct a diagnostic analysis of the visual forgetting effect within a long-chain reasoning system. The system processes multimodal Q&A tasks through a series of interconnected reasoning steps. We demonstrate significantly reduced attentional allocation to visual inputs during multi",
|
| 140 |
+
"bbox": [
|
| 141 |
+
507,
|
| 142 |
+
825,
|
| 143 |
+
884,
|
| 144 |
+
921
|
| 145 |
+
],
|
| 146 |
+
"page_idx": 0
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "aside_text",
|
| 150 |
+
"text": "arXiv:2503.13360v2 [cs.CV] 3 Jun 2025",
|
| 151 |
+
"bbox": [
|
| 152 |
+
21,
|
| 153 |
+
296,
|
| 154 |
+
60,
|
| 155 |
+
700
|
| 156 |
+
],
|
| 157 |
+
"page_idx": 0
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "page_footnote",
|
| 161 |
+
"text": "*Work done during their internship/employment at Tencent Hunyuan",
|
| 162 |
+
"bbox": [
|
| 163 |
+
112,
|
| 164 |
+
881,
|
| 165 |
+
485,
|
| 166 |
+
906
|
| 167 |
+
],
|
| 168 |
+
"page_idx": 0
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"type": "page_footnote",
|
| 172 |
+
"text": "†Corresponding author: yehj@lamda.nju.edu.cn",
|
| 173 |
+
"bbox": [
|
| 174 |
+
137,
|
| 175 |
+
906,
|
| 176 |
+
430,
|
| 177 |
+
920
|
| 178 |
+
],
|
| 179 |
+
"page_idx": 0
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "text",
|
| 183 |
+
"text": "stage reasoning processes. More formally, our analysis: 1) truncates the reasoning process midway and removes the image embeddings; 2) regenerates subsequent reasoning trajectories; 3) evaluates the reasoning outcomes of the pre/post-ablation inference trajectories. Intuitively, the performance gap between normal reasoning and diagnostic reasoning reveals the model's dependency on generated text over original visual evidence. Our results (See Section 2.1) on the MathVista-Hard datasets show that, removing the image midway through the reasoning only causes an insignificant performance degradation (i.e. $\\sim 2\\%$ ), indicating that the model completes the reasoning process based primarily on its output text. More importantly, we also observe that the model's dependency on the visual evidence diminishes over time since the early removal of the image inputs could hurt accuracy by $\\sim 20\\%$ . This suggests model's reasoning employs both visual and textual information in the early stage, then over-relying on text history which limits full visual reasoning potential.",
|
| 184 |
+
"bbox": [
|
| 185 |
+
110,
|
| 186 |
+
84,
|
| 187 |
+
492,
|
| 188 |
+
437
|
| 189 |
+
],
|
| 190 |
+
"page_idx": 1
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "text",
|
| 194 |
+
"text": "Motivated by this, we propose a novel strategy to mitigate the visual forgetting effect and maintain visual attention throughout the reasoning process. Our methodology compresses and shifts the image input to the later stages of the reasoning process, ensuring the model integrates sufficient visual evidence into its reasoning. This approach results in improved performance and achieves state-of-the-art results on average across five mathematical reasoning benchmarks (i.e., $+3.4\\%$ vs previous sota). Our findings highlight the effectiveness of this strategy in enhancing the performance of multimodal reasoning systems, providing a robust solution to the problem of visual forgetting in long-chain reasoning tasks.",
|
| 195 |
+
"bbox": [
|
| 196 |
+
110,
|
| 197 |
+
439,
|
| 198 |
+
489,
|
| 199 |
+
682
|
| 200 |
+
],
|
| 201 |
+
"page_idx": 1
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "text",
|
| 205 |
+
"text": "2 Take-along Visual Conditioning: Sustaining Visual Evidence for Multi-modal Long CoT Reasoning",
|
| 206 |
+
"text_level": 1,
|
| 207 |
+
"bbox": [
|
| 208 |
+
112,
|
| 209 |
+
689,
|
| 210 |
+
448,
|
| 211 |
+
740
|
| 212 |
+
],
|
| 213 |
+
"page_idx": 1
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"type": "text",
|
| 217 |
+
"text": "In this section, we first discuss our motivation and observations of the visual forgetting phenomenon in MLLM reasoning systems (Section 2.1). Then, we propose the Take-alone Visual Conditioning (TVC) approach to mitigate visual forgetting and enhance the model's long-chain reasoning capabilities (Section 2.2).",
|
| 218 |
+
"bbox": [
|
| 219 |
+
110,
|
| 220 |
+
746,
|
| 221 |
+
489,
|
| 222 |
+
858
|
| 223 |
+
],
|
| 224 |
+
"page_idx": 1
|
| 225 |
+
},
|
| 226 |
+
{
|
| 227 |
+
"type": "text",
|
| 228 |
+
"text": "2.1 Capturing the Visual Forgetting",
|
| 229 |
+
"text_level": 1,
|
| 230 |
+
"bbox": [
|
| 231 |
+
112,
|
| 232 |
+
868,
|
| 233 |
+
413,
|
| 234 |
+
885
|
| 235 |
+
],
|
| 236 |
+
"page_idx": 1
|
| 237 |
+
},
|
| 238 |
+
{
|
| 239 |
+
"type": "text",
|
| 240 |
+
"text": "Text-based reasoning systems often repeat key terms or phrases (e.g., restating \"triangle ABC\" or",
|
| 241 |
+
"bbox": [
|
| 242 |
+
112,
|
| 243 |
+
889,
|
| 244 |
+
489,
|
| 245 |
+
921
|
| 246 |
+
],
|
| 247 |
+
"page_idx": 1
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"type": "image",
|
| 251 |
+
"img_path": "images/886b0c6086c57d45b8ba57f43d20ef52e6a0a72d4e445a4baad3ac0cf0ac4cbd.jpg",
|
| 252 |
+
"image_caption": [
|
| 253 |
+
"Figure 1: The visual forgetting phenomenon by removing the image at different reasoning stages. It shows that by the midpoint of the reasoning process, the model becomes less dependent on the image, causing text-over-relied outputs."
|
| 254 |
+
],
|
| 255 |
+
"image_footnote": [],
|
| 256 |
+
"bbox": [
|
| 257 |
+
534,
|
| 258 |
+
84,
|
| 259 |
+
858,
|
| 260 |
+
287
|
| 261 |
+
],
|
| 262 |
+
"page_idx": 1
|
| 263 |
+
},
|
| 264 |
+
{
|
| 265 |
+
"type": "text",
|
| 266 |
+
"text": "\"equation (1)\" to keep the problem's context clear and focused. By restating these critical details, they create strong connections between each step of the reasoning process and the original problem's requirements. This repetition ensures the logic stays on track, and consistent to follow.",
|
| 267 |
+
"bbox": [
|
| 268 |
+
505,
|
| 269 |
+
397,
|
| 270 |
+
884,
|
| 271 |
+
494
|
| 272 |
+
],
|
| 273 |
+
"page_idx": 1
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"type": "equation",
|
| 277 |
+
"text": "\n$$\n\\mathcal {C} _ {\\mathrm {M L L M}} = f \\left(V, T _ {1}, \\dots , T _ {n}\\right) \\tag {1}\n$$\n",
|
| 278 |
+
"text_format": "latex",
|
| 279 |
+
"bbox": [
|
| 280 |
+
596,
|
| 281 |
+
506,
|
| 282 |
+
882,
|
| 283 |
+
521
|
| 284 |
+
],
|
| 285 |
+
"page_idx": 1
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"type": "text",
|
| 289 |
+
"text": "On the other hand, MLLMs struggle with this approach due to their design. As formalized in Equation (1), visual inputs $V$ are confined to the initial processing stages. Unlike text, these visual evidence aren't revisited or reinforced later in the reasoning process. Because there's no built-in way to keep visual information \"active\" or relevant throughout the task, the system's ability to connect visual details with text or logic weakens over time, leading to a progressive visual attention decay. The model is more likely to reason with previously outputted text and becomes particularly problematic in visual reasoning tasks that require continuous validation of spatial relationships.",
|
| 290 |
+
"bbox": [
|
| 291 |
+
505,
|
| 292 |
+
533,
|
| 293 |
+
882,
|
| 294 |
+
760
|
| 295 |
+
],
|
| 296 |
+
"page_idx": 1
|
| 297 |
+
},
|
| 298 |
+
{
|
| 299 |
+
"type": "text",
|
| 300 |
+
"text": "We conduct two analytic analyses using the QVQ-72B-Preview model (QwenTeam, 2024) to capture this visual forgetting phenomenon quantitatively and qualitatively. On one hand, we remove the visual inputs at eight different stages to observe their impact. On the other hand, we depict the attention matrix to directly track the attention decay of the visual evidence over time.",
|
| 301 |
+
"bbox": [
|
| 302 |
+
507,
|
| 303 |
+
760,
|
| 304 |
+
884,
|
| 305 |
+
887
|
| 306 |
+
],
|
| 307 |
+
"page_idx": 1
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"type": "text",
|
| 311 |
+
"text": "Progressive Image Removing. To assess the extent to which the reasoning process depends on",
|
| 312 |
+
"bbox": [
|
| 313 |
+
507,
|
| 314 |
+
889,
|
| 315 |
+
882,
|
| 316 |
+
921
|
| 317 |
+
],
|
| 318 |
+
"page_idx": 1
|
| 319 |
+
},
|
| 320 |
+
{
|
| 321 |
+
"type": "image",
|
| 322 |
+
"img_path": "images/77eb70636c452e011386d11fd027206bb93759242176bc30a6b4ace266dfcda4.jpg",
|
| 323 |
+
"image_caption": [],
|
| 324 |
+
"image_footnote": [],
|
| 325 |
+
"bbox": [
|
| 326 |
+
137,
|
| 327 |
+
84,
|
| 328 |
+
460,
|
| 329 |
+
278
|
| 330 |
+
],
|
| 331 |
+
"page_idx": 2
|
| 332 |
+
},
|
| 333 |
+
{
|
| 334 |
+
"type": "image",
|
| 335 |
+
"img_path": "images/daaae34465bb2ee45fe7e1f6871bcb2fa3f7f2424896dbd8b372eed08af85a45.jpg",
|
| 336 |
+
"image_caption": [
|
| 337 |
+
"Figure 2: Illustration of layer-level and token-level attention weights. (a) The layer-level attention weights of image tokens across different response token positions. (b) The token-level attention weights at the middle layer. It shows that the model's attention to the image gradually decreases during the reasoning process."
|
| 338 |
+
],
|
| 339 |
+
"image_footnote": [],
|
| 340 |
+
"bbox": [
|
| 341 |
+
144,
|
| 342 |
+
280,
|
| 343 |
+
460,
|
| 344 |
+
391
|
| 345 |
+
],
|
| 346 |
+
"page_idx": 2
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"type": "text",
|
| 350 |
+
"text": "previously generated text, we first perform a normal reasoning process, then reset the KV cache at various stages of the reasoning process. This effectively removed image tokens and forced subsequent steps to rely solely on text-based information. Specifically, the reasoning process was divided into $K = 8$ intervals based on output token counts regardless of the length of the normal reasoning process, with visual input progressively masked by resetting the KV cache at different cutoff positions $k \\in \\{0,1,\\dots ,K - 1\\}$ . In other words, the first $k / 8$ part of the normal reasoning process is now employed as a prompt, and the model now re-complete the reasoning process without image inputs. Furthermore, we discovered that for some questions (30.9% of the MathVista dataset), the model could answer correctly using only the text-based prompt. Consequently, we excluded these questions and designated the remaining dataset as the MathVista-Hard dataset.",
|
| 351 |
+
"bbox": [
|
| 352 |
+
112,
|
| 353 |
+
517,
|
| 354 |
+
489,
|
| 355 |
+
838
|
| 356 |
+
],
|
| 357 |
+
"page_idx": 2
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"type": "text",
|
| 361 |
+
"text": "As illustrated in Figure 1, we observe that the performance is 40.9 at $k = 4$ and 43.1 for the normal reasoning. This minimal $2.2\\%$ gap suggests that the model overly relies on text outputs during the later reasoning stages rather than on the image.",
|
| 362 |
+
"bbox": [
|
| 363 |
+
112,
|
| 364 |
+
841,
|
| 365 |
+
489,
|
| 366 |
+
921
|
| 367 |
+
],
|
| 368 |
+
"page_idx": 2
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"type": "text",
|
| 372 |
+
"text": "This indicates that once the model completes the half stages of reasoning, it becomes overly reliant on its own generated CoT. Over-reliance on this self-generated reasoning worsens the model's tendency to forget visual evidence over time, which can result in hallucinations during lengthy, multi-step reasoning tasks. This result also reveals a near-exponential forgetting effect in performance as $k$ increases, which we formalize as:",
|
| 373 |
+
"bbox": [
|
| 374 |
+
507,
|
| 375 |
+
84,
|
| 376 |
+
884,
|
| 377 |
+
229
|
| 378 |
+
],
|
| 379 |
+
"page_idx": 2
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"type": "equation",
|
| 383 |
+
"text": "\n$$\n\\mathcal {R} (k) = \\mathcal {R} _ {\\text {f u l l}} - \\Delta_ {\\text {v i s u a l}} (k), \\quad \\Delta_ {\\text {v i s u a l}} (k) \\propto e ^ {- k}, \\tag {2}\n$$\n",
|
| 384 |
+
"text_format": "latex",
|
| 385 |
+
"bbox": [
|
| 386 |
+
515,
|
| 387 |
+
241,
|
| 388 |
+
882,
|
| 389 |
+
274
|
| 390 |
+
],
|
| 391 |
+
"page_idx": 2
|
| 392 |
+
},
|
| 393 |
+
{
|
| 394 |
+
"type": "text",
|
| 395 |
+
"text": "where $\\mathcal{R}_{\\mathrm{full}}$ represents the full multimodal reasoning performance, and $\\Delta_{\\mathrm{visual}}(k)$ quantifies the performance degradation caused by visual masking at position $k$ .",
|
| 396 |
+
"bbox": [
|
| 397 |
+
507,
|
| 398 |
+
275,
|
| 399 |
+
882,
|
| 400 |
+
338
|
| 401 |
+
],
|
| 402 |
+
"page_idx": 2
|
| 403 |
+
},
|
| 404 |
+
{
|
| 405 |
+
"type": "text",
|
| 406 |
+
"text": "Visual Attention Decay. To more intuitively observe the changes in visual feature attention across different stages of reasoning, we investigate the layer-level attention weights at various response token positions and the token-level attention weights at layer 16. Given that MLLM's attention weights reflect the focus on tokens and influence the decision-making process, we first analyze the attention weights at each layer of the MLLM. Specifically, for each layer, we calculate the proportion of attention weights on all image tokens.",
|
| 407 |
+
"bbox": [
|
| 408 |
+
507,
|
| 409 |
+
340,
|
| 410 |
+
884,
|
| 411 |
+
517
|
| 412 |
+
],
|
| 413 |
+
"page_idx": 2
|
| 414 |
+
},
|
| 415 |
+
{
|
| 416 |
+
"type": "text",
|
| 417 |
+
"text": "As shown in Figure 2(a), we observe that at the $1/8$ position of the reasoning process, the model effectively focuses on the visual inputs. However, as reasoning progresses, despite fluctuations in attention to visual tokens across layers, the model's overall attention to visual evidence gradually decreases, leading to visual forgetting. Next, following the methodology of FastV (Chen et al., 2024a), we analyze the attention maps for several cases and find that the model predominantly focuses on previously generated text tokens rather than the input image. After approximately $20\\%$ tokens, the existence of image inputs on attention maps diminishes significantly, as illustrated in Figure 2(b). Both of the observations indicate a phenomenon of visual memory degradation, revealing the model's limitations in maintaining consistent attention to visual inputs throughout the reasoning process.",
|
| 418 |
+
"bbox": [
|
| 419 |
+
507,
|
| 420 |
+
518,
|
| 421 |
+
882,
|
| 422 |
+
808
|
| 423 |
+
],
|
| 424 |
+
"page_idx": 2
|
| 425 |
+
},
|
| 426 |
+
{
|
| 427 |
+
"type": "text",
|
| 428 |
+
"text": "2.2 Take-along Visual Conditioning",
|
| 429 |
+
"text_level": 1,
|
| 430 |
+
"bbox": [
|
| 431 |
+
507,
|
| 432 |
+
819,
|
| 433 |
+
806,
|
| 434 |
+
835
|
| 435 |
+
],
|
| 436 |
+
"page_idx": 2
|
| 437 |
+
},
|
| 438 |
+
{
|
| 439 |
+
"type": "text",
|
| 440 |
+
"text": "In this section, we introduce our solution to tackle this problem in detail. We propose Take-along Visual Conditioning (TVC), a dynamic image retention mechanism that re-introduces visual inputs at strategic intervals throughout the reasoning pro",
|
| 441 |
+
"bbox": [
|
| 442 |
+
507,
|
| 443 |
+
841,
|
| 444 |
+
884,
|
| 445 |
+
921
|
| 446 |
+
],
|
| 447 |
+
"page_idx": 2
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"type": "text",
|
| 451 |
+
"text": "cess. TVC mitigates visual attention decay by periodically reaffirming visual information, akin to human problem-solving behaviors where individuals frequently refer back to visual inputs. Our approach enhances the model's ability to incorporate visual information continuously, improving its long-chain reasoning capacity by ensuring that visual evidence is revisited during critical decision-making moments.",
|
| 452 |
+
"bbox": [
|
| 453 |
+
112,
|
| 454 |
+
84,
|
| 455 |
+
489,
|
| 456 |
+
227
|
| 457 |
+
],
|
| 458 |
+
"page_idx": 3
|
| 459 |
+
},
|
| 460 |
+
{
|
| 461 |
+
"type": "text",
|
| 462 |
+
"text": "The TVC method consists of two key stages: training and testing. In the training stage, we introduce Dynamic Visual Reaffirmation (DVR), which guides the model through iterative reinforcement of visual evidence during long reasoning chains. In the testing phase, we present Periodic Visual Calibration (PVC), where visual reactivation is periodically triggered at self-reflection intervals. To prevent the model from forgetting previous text-based reasoning steps due to an excessive number of image tokens, we adopt image compression through adaptive pooling to reduce the image token size while preserving spatial semantics. This dual-modality engagement mechanism ensures consistent interaction between textual reasoning and visual evidence. We present an illustration of the TVC system in Figure 3.",
|
| 463 |
+
"bbox": [
|
| 464 |
+
115,
|
| 465 |
+
230,
|
| 466 |
+
489,
|
| 467 |
+
502
|
| 468 |
+
],
|
| 469 |
+
"page_idx": 3
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"type": "text",
|
| 473 |
+
"text": "Dynamic Visual Reaffirmation. Our dynamic visual reaffirmation training strategy combines two key components: 1) Data Curation: We curate long-chain reasoning data using the pipeline described in Section 3, sourced from high-quality academic datasets (e.g., MathV360K, Geo170K, and LLaVA-OneVision). This process yields a high-quality dataset optimized for long-chain reasoning training. 2) Visual Content Injection: While the curated data ensures correctness, the QVQ model inherently lacks the ability to iteratively reference visual inputs during reasoning. Therefore, we manually re-inject the visual content (visual embeddings and bridging prompt) to triggers visual re-activation at predefined self-reflection intervals. Specifically, given the initial multimodal input $\\mathcal{M}_0 = (V,T_0)$ , DVR performs visual reactivation at self-reflection intervals $\\{r_1,\\dots,r_m\\}$ :",
|
| 474 |
+
"bbox": [
|
| 475 |
+
112,
|
| 476 |
+
505,
|
| 477 |
+
489,
|
| 478 |
+
795
|
| 479 |
+
],
|
| 480 |
+
"page_idx": 3
|
| 481 |
+
},
|
| 482 |
+
{
|
| 483 |
+
"type": "equation",
|
| 484 |
+
"text": "\n$$\n\\mathcal {M} _ {i} = \\left(V, \\left[ T _ {\\text {p r e v}}; \\underbrace {\\text {P r o m p t}} _ {\\text {R e - a c t i v a t i o n}}; T _ {\\text {n e w}} \\right]\\right) \\text {a t s t e p} r _ {i} \\tag {3}\n$$\n",
|
| 485 |
+
"text_format": "latex",
|
| 486 |
+
"bbox": [
|
| 487 |
+
124,
|
| 488 |
+
809,
|
| 489 |
+
487,
|
| 490 |
+
871
|
| 491 |
+
],
|
| 492 |
+
"page_idx": 3
|
| 493 |
+
},
|
| 494 |
+
{
|
| 495 |
+
"type": "text",
|
| 496 |
+
"text": "where $T_{\\mathrm{prev}}$ represents the previous reasoning steps and $T_{\\mathrm{new}}$ denotes the new reasoning steps that are based on prior reasoning and reintroduce focus on",
|
| 497 |
+
"bbox": [
|
| 498 |
+
112,
|
| 499 |
+
873,
|
| 500 |
+
487,
|
| 501 |
+
921
|
| 502 |
+
],
|
| 503 |
+
"page_idx": 3
|
| 504 |
+
},
|
| 505 |
+
{
|
| 506 |
+
"type": "image",
|
| 507 |
+
"img_path": "images/a5c52460f9451914f821519dcfa5dc5a3c2c8dd7793690a978a99f9e91aaa6c5.jpg",
|
| 508 |
+
"image_caption": [],
|
| 509 |
+
"image_footnote": [],
|
| 510 |
+
"bbox": [
|
| 511 |
+
514,
|
| 512 |
+
82,
|
| 513 |
+
877,
|
| 514 |
+
153
|
| 515 |
+
],
|
| 516 |
+
"page_idx": 3
|
| 517 |
+
},
|
| 518 |
+
{
|
| 519 |
+
"type": "image",
|
| 520 |
+
"img_path": "images/8ded0c6d380ffc66b6e04e3afc988892b918477e055637ca5f1fcff5d86fce63.jpg",
|
| 521 |
+
"image_caption": [
|
| 522 |
+
"Figure 3: Overview of TVC System Design. We enable the model to have take-along visual conditioning capabilities through two stages: training and inference."
|
| 523 |
+
],
|
| 524 |
+
"image_footnote": [],
|
| 525 |
+
"bbox": [
|
| 526 |
+
512,
|
| 527 |
+
155,
|
| 528 |
+
877,
|
| 529 |
+
280
|
| 530 |
+
],
|
| 531 |
+
"page_idx": 3
|
| 532 |
+
},
|
| 533 |
+
{
|
| 534 |
+
"type": "text",
|
| 535 |
+
"text": "visual information. The bridging prompt is employed to hint the existence of the image, e.g., Let me see the image again. To improve efficiency, our initial implementation adopts midpoint reactivation $(m = 1, r_1 = 0.5L$ for $L$ -step chains).",
|
| 536 |
+
"bbox": [
|
| 537 |
+
507,
|
| 538 |
+
366,
|
| 539 |
+
882,
|
| 540 |
+
445
|
| 541 |
+
],
|
| 542 |
+
"page_idx": 3
|
| 543 |
+
},
|
| 544 |
+
{
|
| 545 |
+
"type": "text",
|
| 546 |
+
"text": "During the self-reflection phase of the reasoning steps, we randomly select $m$ positions to inject visual content. Specifically, we inject reactivation prompts and regenerate visual captions using the model's intrinsic capabilities. This forces joint attention to both textual reasoning and visual evidence. By leveraging the model's intrinsic image captioning capability, we continuously reinforce visual information throughout the reasoning process. This ensures that the model incorporates image evidence during reflection, rather than relying solely on textual reasoning.",
|
| 547 |
+
"bbox": [
|
| 548 |
+
507,
|
| 549 |
+
449,
|
| 550 |
+
884,
|
| 551 |
+
642
|
| 552 |
+
],
|
| 553 |
+
"page_idx": 3
|
| 554 |
+
},
|
| 555 |
+
{
|
| 556 |
+
"type": "text",
|
| 557 |
+
"text": "Periodic Visual Calibration. Calibrating visual attention plays a crucial role in enhancing long-chain reasoning capabilities. After training our model, we further design the periodic visual calibration process. Specifically, we coordinate operations during reactivation as follows: 1) Token Compression: We first compress visual tokens using average pooling to prevent text-based reasoning from forgetting visual information. 2) Visual Cache Reset: We then prepend an instruction (bridging prompt) to reintroduce the image and re-inject image tokens by resetting the KV cache of the generation process.",
|
| 558 |
+
"bbox": [
|
| 559 |
+
507,
|
| 560 |
+
645,
|
| 561 |
+
882,
|
| 562 |
+
838
|
| 563 |
+
],
|
| 564 |
+
"page_idx": 3
|
| 565 |
+
},
|
| 566 |
+
{
|
| 567 |
+
"type": "text",
|
| 568 |
+
"text": "We also provide an example of how PVC is implemented in the case study section (Section 4.5). PVC both improves reasoning efficiency and prevents the model from forgetting previous reasoning steps due to an overload of visual tokens.",
|
| 569 |
+
"bbox": [
|
| 570 |
+
507,
|
| 571 |
+
841,
|
| 572 |
+
884,
|
| 573 |
+
921
|
| 574 |
+
],
|
| 575 |
+
"page_idx": 3
|
| 576 |
+
},
|
| 577 |
+
{
|
| 578 |
+
"type": "image",
|
| 579 |
+
"img_path": "images/dcbf95c1a314d7c6dcc0dffd0f08a9b502ceb96620470fa2a70540736ef0cf49.jpg",
|
| 580 |
+
"image_caption": [
|
| 581 |
+
"Figure 4: Data Generation Pipeline of TVC. We use iterative distillation to collect long-chain reasoning data, followed by a comprehensive response filtering process to ensure high-quality reasoning."
|
| 582 |
+
],
|
| 583 |
+
"image_footnote": [],
|
| 584 |
+
"bbox": [
|
| 585 |
+
119,
|
| 586 |
+
82,
|
| 587 |
+
480,
|
| 588 |
+
236
|
| 589 |
+
],
|
| 590 |
+
"page_idx": 4
|
| 591 |
+
},
|
| 592 |
+
{
|
| 593 |
+
"type": "text",
|
| 594 |
+
"text": "3 Data-Centric Implementation of Multimodal Reasoning System",
|
| 595 |
+
"text_level": 1,
|
| 596 |
+
"bbox": [
|
| 597 |
+
114,
|
| 598 |
+
338,
|
| 599 |
+
428,
|
| 600 |
+
370
|
| 601 |
+
],
|
| 602 |
+
"page_idx": 4
|
| 603 |
+
},
|
| 604 |
+
{
|
| 605 |
+
"type": "text",
|
| 606 |
+
"text": "In this section, we briefly describe our implementation of the multimodal reasoning system through a scalable curated data generation pipeline.",
|
| 607 |
+
"bbox": [
|
| 608 |
+
112,
|
| 609 |
+
392,
|
| 610 |
+
489,
|
| 611 |
+
441
|
| 612 |
+
],
|
| 613 |
+
"page_idx": 4
|
| 614 |
+
},
|
| 615 |
+
{
|
| 616 |
+
"type": "text",
|
| 617 |
+
"text": "3.1 Long-Chain Reasoning Data Collection",
|
| 618 |
+
"text_level": 1,
|
| 619 |
+
"bbox": [
|
| 620 |
+
114,
|
| 621 |
+
468,
|
| 622 |
+
468,
|
| 623 |
+
483
|
| 624 |
+
],
|
| 625 |
+
"page_idx": 4
|
| 626 |
+
},
|
| 627 |
+
{
|
| 628 |
+
"type": "text",
|
| 629 |
+
"text": "Prior research (Qin et al., 2024; Jiang et al., 2024) has identified two dominant paradigms for constructing long-chain reasoning systems: (1) explicit search-based methods, which utilize structures such as Monte Carlo Tree Search (MCTS) combined with specially trained reward models to guide the search process toward optimal solutions, and (2) instruction distillation approaches, which fine-tune models on curated long chain-of-thought (CoT) datasets. To efficiently develop an MLLM with long-chain reasoning capabilities, we adopt the distillation paradigm. In this section, we describe the distillation process and present the data generation pipeline aimed at enhancing the reasoning capability of MLLM.",
|
| 630 |
+
"bbox": [
|
| 631 |
+
112,
|
| 632 |
+
497,
|
| 633 |
+
489,
|
| 634 |
+
739
|
| 635 |
+
],
|
| 636 |
+
"page_idx": 4
|
| 637 |
+
},
|
| 638 |
+
{
|
| 639 |
+
"type": "text",
|
| 640 |
+
"text": "Our distillation pipeline begins with aggregating publicly available reasoning datasets (e.g., MathV360K (Shi et al., 2024) and Geo170K (Gao et al., 2023)). Through an iterative process of distillation and response filtering, we progressively enhance the model's long-chain reasoning capabilities. Specifically, the teacher model, QVQ-72B-Preview, generates long COT reasoning data, while the student model, Qwen2-VL, undergoes supervised fine-tuning on the filtered CoT data to enhance its reasoning performance.",
|
| 641 |
+
"bbox": [
|
| 642 |
+
112,
|
| 643 |
+
744,
|
| 644 |
+
490,
|
| 645 |
+
921
|
| 646 |
+
],
|
| 647 |
+
"page_idx": 4
|
| 648 |
+
},
|
| 649 |
+
{
|
| 650 |
+
"type": "text",
|
| 651 |
+
"text": "3.2 Iterative Distillation with Reject Sampling",
|
| 652 |
+
"text_level": 1,
|
| 653 |
+
"bbox": [
|
| 654 |
+
507,
|
| 655 |
+
84,
|
| 656 |
+
880,
|
| 657 |
+
101
|
| 658 |
+
],
|
| 659 |
+
"page_idx": 4
|
| 660 |
+
},
|
| 661 |
+
{
|
| 662 |
+
"type": "text",
|
| 663 |
+
"text": "After obtaining the long-chain reasoning responses, we employ an assessment pipeline to ensure data quality. Specifically, we introduce a dual-temperature sampling mechanism to optimize data quality through variance exploitation.",
|
| 664 |
+
"bbox": [
|
| 665 |
+
507,
|
| 666 |
+
105,
|
| 667 |
+
882,
|
| 668 |
+
185
|
| 669 |
+
],
|
| 670 |
+
"page_idx": 4
|
| 671 |
+
},
|
| 672 |
+
{
|
| 673 |
+
"type": "text",
|
| 674 |
+
"text": "Deterministic Initial Sampling. For the first-stage data generation, we employ temperature $\\tau = 0$ to obtain the QVQ's most confident reasoning paths:",
|
| 675 |
+
"bbox": [
|
| 676 |
+
507,
|
| 677 |
+
186,
|
| 678 |
+
880,
|
| 679 |
+
233
|
| 680 |
+
],
|
| 681 |
+
"page_idx": 4
|
| 682 |
+
},
|
| 683 |
+
{
|
| 684 |
+
"type": "equation",
|
| 685 |
+
"text": "\n$$\n\\mathcal {D} _ {\\text {i n i t}} = \\left\\{\\left(\\mathbf {x}, \\mathbf {y} _ {\\mathrm {C o T}}\\right) \\sim \\mathcal {P} _ {\\mathrm {Q V Q}} (\\cdot | \\mathbf {x}; \\tau = 0) \\right\\}, \\tag {4}\n$$\n",
|
| 686 |
+
"text_format": "latex",
|
| 687 |
+
"bbox": [
|
| 688 |
+
529,
|
| 689 |
+
246,
|
| 690 |
+
882,
|
| 691 |
+
263
|
| 692 |
+
],
|
| 693 |
+
"page_idx": 4
|
| 694 |
+
},
|
| 695 |
+
{
|
| 696 |
+
"type": "text",
|
| 697 |
+
"text": "where $\\mathbf{y}_{\\mathrm{CoT}}$ represents the response generated by QVQ. This ensures that the model generates the most probable reasoning path for each input. Subsequently, we use these high-confidence responses as a foundation for further refinement in later stages of data generation.",
|
| 698 |
+
"bbox": [
|
| 699 |
+
507,
|
| 700 |
+
275,
|
| 701 |
+
882,
|
| 702 |
+
370
|
| 703 |
+
],
|
| 704 |
+
"page_idx": 4
|
| 705 |
+
},
|
| 706 |
+
{
|
| 707 |
+
"type": "text",
|
| 708 |
+
"text": "Answer-Centric Reject Sampling. To ensure the quality of the data, we employ an LLM-as-a-Judge approach for answer-centric reject sampling. Specifically, we use a strong LLM (e.g., Qwen2.5-72B-Instruct) as the verifier and prompt the model to output a 'yes' or 'no' in JSON format, indicating whether the long-chain reasoning answer is correct, i.e., $\\mathcal{V}_{\\mathrm{valid}} = \\{\\mathbf{y}_{\\mathrm{CoT}}|\\mathrm{Match}(\\mathbf{y}_{\\mathrm{CoT}},\\mathbf{y}_{\\mathrm{gt}})\\}$ . After this process, we obtain $\\sim 200\\mathrm{K}$ high-quality samples.",
|
| 709 |
+
"bbox": [
|
| 710 |
+
507,
|
| 711 |
+
372,
|
| 712 |
+
884,
|
| 713 |
+
517
|
| 714 |
+
],
|
| 715 |
+
"page_idx": 4
|
| 716 |
+
},
|
| 717 |
+
{
|
| 718 |
+
"type": "text",
|
| 719 |
+
"text": "Best-of-N Error Correction. While initial sampling with temperature $\\tau = 0$ yields a set of data, there are substantial residual errors $(\\mathcal{D}_{\\mathrm{error}})$ where QVQ's responses $(\\mathbf{y}_{\\mathrm{CoT}})$ unmatched with ground truth answers (55.8% of $\\mathcal{D}_{\\mathrm{init}}$ ). To fully leverage the available open-source data, we implement a contrastive regeneration strategy using $\\tau = 1$ :",
|
| 720 |
+
"bbox": [
|
| 721 |
+
507,
|
| 722 |
+
517,
|
| 723 |
+
882,
|
| 724 |
+
630
|
| 725 |
+
],
|
| 726 |
+
"page_idx": 4
|
| 727 |
+
},
|
| 728 |
+
{
|
| 729 |
+
"type": "equation",
|
| 730 |
+
"text": "\n$$\n\\mathcal {D} _ {\\text {c o r r e c t e d}} = \\bigcup_ {\\mathbf {x} \\in \\mathcal {D} _ {\\text {e r r o r}}} \\left\\{\\arg \\min _ {\\mathbf {y} ^ {i} \\in \\mathcal {Y} _ {\\text {v a l i d}}} \\operatorname {L e n g t h} \\left(\\mathbf {y} ^ {i}\\right) \\right\\} _ {i = 1} ^ {8}, \\tag {5}\n$$\n",
|
| 731 |
+
"text_format": "latex",
|
| 732 |
+
"bbox": [
|
| 733 |
+
507,
|
| 734 |
+
640,
|
| 735 |
+
884,
|
| 736 |
+
695
|
| 737 |
+
],
|
| 738 |
+
"page_idx": 4
|
| 739 |
+
},
|
| 740 |
+
{
|
| 741 |
+
"type": "text",
|
| 742 |
+
"text": "where $\\mathcal{V}_{\\mathrm{valid}}$ denotes the subset of 8 regenerated responses (at $\\tau = 1$ ) that satisfy answer correctness. For cases with multiple valid responses, we prioritize minimal reasoning token length to enhance conciseness and efficiency:",
|
| 743 |
+
"bbox": [
|
| 744 |
+
507,
|
| 745 |
+
696,
|
| 746 |
+
882,
|
| 747 |
+
776
|
| 748 |
+
],
|
| 749 |
+
"page_idx": 4
|
| 750 |
+
},
|
| 751 |
+
{
|
| 752 |
+
"type": "equation",
|
| 753 |
+
"text": "\n$$\n\\operatorname {L e n g t h} (\\mathbf {y}) = \\sum_ {t = 1} ^ {T} \\mathbb {I} \\left(w _ {t} \\notin \\{[ \\mathrm {P A D} ], [ \\mathrm {S E P} ] \\}\\right) \\tag {6}\n$$\n",
|
| 754 |
+
"text_format": "latex",
|
| 755 |
+
"bbox": [
|
| 756 |
+
524,
|
| 757 |
+
787,
|
| 758 |
+
882,
|
| 759 |
+
831
|
| 760 |
+
],
|
| 761 |
+
"page_idx": 4
|
| 762 |
+
},
|
| 763 |
+
{
|
| 764 |
+
"type": "text",
|
| 765 |
+
"text": "This two-stage sampling achieves partial error recovery while reducing the average token count to minimize meaningless self-reflection, effectively balancing correctness and efficiency. To further enhance the quality of the dataset, we also perform",
|
| 766 |
+
"bbox": [
|
| 767 |
+
507,
|
| 768 |
+
841,
|
| 769 |
+
884,
|
| 770 |
+
921
|
| 771 |
+
],
|
| 772 |
+
"page_idx": 4
|
| 773 |
+
},
|
| 774 |
+
{
|
| 775 |
+
"type": "text",
|
| 776 |
+
"text": "dynamic token truncation and reflection word pruning. This approach helps reduce the ineffective reflection parts in the dataset, thereby mitigating the issue of the model being unable to complete its answers. More details are in Section B.",
|
| 777 |
+
"bbox": [
|
| 778 |
+
112,
|
| 779 |
+
84,
|
| 780 |
+
487,
|
| 781 |
+
164
|
| 782 |
+
],
|
| 783 |
+
"page_idx": 5
|
| 784 |
+
},
|
| 785 |
+
{
|
| 786 |
+
"type": "text",
|
| 787 |
+
"text": "4 Experiments",
|
| 788 |
+
"text_level": 1,
|
| 789 |
+
"bbox": [
|
| 790 |
+
112,
|
| 791 |
+
175,
|
| 792 |
+
260,
|
| 793 |
+
192
|
| 794 |
+
],
|
| 795 |
+
"page_idx": 5
|
| 796 |
+
},
|
| 797 |
+
{
|
| 798 |
+
"type": "text",
|
| 799 |
+
"text": "We conduct comprehensive experiments across multiple vision-language benchmarks to demonstrate the effectiveness of our method. Section 4.1 provides implementation details for TVC. In Section 4.3, we present key results on visual reasoning tasks, supplemented with findings on general image understanding. Section 4.4 details ablation studies to evaluate the influence of critical design decisions. Finally, Section 4.5 presents visualizations and case studies to illustrate the method's operational characteristics and insights derived from it.",
|
| 800 |
+
"bbox": [
|
| 801 |
+
112,
|
| 802 |
+
200,
|
| 803 |
+
489,
|
| 804 |
+
376
|
| 805 |
+
],
|
| 806 |
+
"page_idx": 5
|
| 807 |
+
},
|
| 808 |
+
{
|
| 809 |
+
"type": "text",
|
| 810 |
+
"text": "4.1 Training Recipe",
|
| 811 |
+
"text_level": 1,
|
| 812 |
+
"bbox": [
|
| 813 |
+
112,
|
| 814 |
+
387,
|
| 815 |
+
285,
|
| 816 |
+
403
|
| 817 |
+
],
|
| 818 |
+
"page_idx": 5
|
| 819 |
+
},
|
| 820 |
+
{
|
| 821 |
+
"type": "text",
|
| 822 |
+
"text": "We integrate the TVC system with MLLMs of varying scales to demonstrate the effectiveness and generalizability of our approach. Initial implementation with Qwen2-VL-7B-Instruct confirmed the method's validity. To further evaluate scalability and establish robust baselines against state-of-the-art MLLMs, we expanded the approach to a 72B model. Prior to training TVC, we follow the long-chain reasoning pipeline described earlier. We use the LLaMA-Factory (Zheng et al., 2024) framework, with a learning rate of 2e-5, a batch size of 256, and 5 training epochs. During optimization, only the LLM parameters and cross-modal connector are trained, while the visual encoder remains frozen. The training process requires 10 hours on a $64\\times \\mathrm{H}20$ GPU setup for the 7B model and approximately 4 days for the 72B model. Additional details are presented in Table 4.",
|
| 823 |
+
"bbox": [
|
| 824 |
+
112,
|
| 825 |
+
407,
|
| 826 |
+
489,
|
| 827 |
+
697
|
| 828 |
+
],
|
| 829 |
+
"page_idx": 5
|
| 830 |
+
},
|
| 831 |
+
{
|
| 832 |
+
"type": "text",
|
| 833 |
+
"text": "4.2 Evaluation Setup",
|
| 834 |
+
"text_level": 1,
|
| 835 |
+
"bbox": [
|
| 836 |
+
112,
|
| 837 |
+
708,
|
| 838 |
+
294,
|
| 839 |
+
722
|
| 840 |
+
],
|
| 841 |
+
"page_idx": 5
|
| 842 |
+
},
|
| 843 |
+
{
|
| 844 |
+
"type": "text",
|
| 845 |
+
"text": "We conduct a comprehensive experimental analysis across various visual reasoning benchmarks that require advanced visual reasoning skills. To ensure a well-rounded evaluation, we select several widely recognized and representative benchmarks, including MathVista (Lu et al., 2024a), MathVerse (Zhang et al., 2024a), MathVision (Wang et al., 2024a), Dynamath (Zou et al., 2024), and OlympiadBench (He et al., 2024). MathVista consists of 6,141 examples that require fine-grained, deep visual understanding and compositional reasoning. MathVerse contains 2,612 multi-subject",
|
| 846 |
+
"bbox": [
|
| 847 |
+
112,
|
| 848 |
+
728,
|
| 849 |
+
489,
|
| 850 |
+
921
|
| 851 |
+
],
|
| 852 |
+
"page_idx": 5
|
| 853 |
+
},
|
| 854 |
+
{
|
| 855 |
+
"type": "image",
|
| 856 |
+
"img_path": "images/b67cf65f88ddd239a7d839548d4229efad69558040285509805ad4ef26fee458.jpg",
|
| 857 |
+
"image_caption": [
|
| 858 |
+
"Figure 5: Ablations on the amount of training data. TVC benefits from data scaling, continually improving the reasoning capabilities."
|
| 859 |
+
],
|
| 860 |
+
"image_footnote": [],
|
| 861 |
+
"bbox": [
|
| 862 |
+
514,
|
| 863 |
+
84,
|
| 864 |
+
877,
|
| 865 |
+
274
|
| 866 |
+
],
|
| 867 |
+
"page_idx": 5
|
| 868 |
+
},
|
| 869 |
+
{
|
| 870 |
+
"type": "text",
|
| 871 |
+
"text": "math problems from a variety of sources. MathVision includes 3,040 high-quality mathematical problems sourced from established mathematics competitions. OlympiadBench features 8,476 bilingual multimodal problems tailored to Olympic-level mathematics and physics competitions. These benchmarks evaluate problem-solving abilities in mathematics, and following standard practice, we use GPT-4o-mini as the evaluator. Following the VLMEvalKit guidelines, we exclude the text-only split from MathVerse and the theorem-proof sections from OlympiadBench. For a fair comparison, we conduct evaluations using the testmini sets of MathVerse and MathVista. Fast evaluation is made possible through the use of the VLMEvalKit (Duan et al., 2024) and vLLM (Kwon et al., 2023).",
|
| 872 |
+
"bbox": [
|
| 873 |
+
505,
|
| 874 |
+
356,
|
| 875 |
+
884,
|
| 876 |
+
613
|
| 877 |
+
],
|
| 878 |
+
"page_idx": 5
|
| 879 |
+
},
|
| 880 |
+
{
|
| 881 |
+
"type": "text",
|
| 882 |
+
"text": "4.3 Main Results on Visual Reasoning",
|
| 883 |
+
"text_level": 1,
|
| 884 |
+
"bbox": [
|
| 885 |
+
507,
|
| 886 |
+
626,
|
| 887 |
+
823,
|
| 888 |
+
642
|
| 889 |
+
],
|
| 890 |
+
"page_idx": 5
|
| 891 |
+
},
|
| 892 |
+
{
|
| 893 |
+
"type": "text",
|
| 894 |
+
"text": "The results in Table 1 demonstrate the effectiveness and generalizability of TVC across multiple visual reasoning benchmarks. Notably, our model achieves a $16.7\\%$ improvement over QVQ-72B-Preview on MathVision and a $17.6\\%$ gain on MathVerse, highlighting enhanced reasoning capabilities. Unlike conventional datasets where textual descriptions may include implicit visual cues, MathVerse is an all-around visual math benchmark specifically designed for equitable, in-depth evaluation of MLLMs. The significant gains on MathVerse underscore the significance of TVC, given the benchmark's unique design principles. Furthermore, the TVC-7B model, despite its smaller size compared to counterparts, achieves competitive performance, even outperforming leading MLLMs in multiple cases. This demonstrates the robustness of our",
|
| 895 |
+
"bbox": [
|
| 896 |
+
505,
|
| 897 |
+
646,
|
| 898 |
+
884,
|
| 899 |
+
920
|
| 900 |
+
],
|
| 901 |
+
"page_idx": 5
|
| 902 |
+
},
|
| 903 |
+
{
|
| 904 |
+
"type": "table",
|
| 905 |
+
"img_path": "images/4e2812122af8c722544df25a6b2cabd42befc062c02de7576a7911f9529d2ab6.jpg",
|
| 906 |
+
"table_caption": [
|
| 907 |
+
"Table 1: Results on Visual Reasoning Tasks. We conduct evaluation experiments across 6 benchmarks, covering both general reasoning and task-specific reasoning assessments. TVC exhibits notable effectiveness and generalizability when applied to Qwen2-VL, surpassing other state-of-the-art MLLMs by a large margin."
|
| 908 |
+
],
|
| 909 |
+
"table_footnote": [],
|
| 910 |
+
"table_body": "<table><tr><td>Model</td><td>Size</td><td>MathVista</td><td>MathVision</td><td>MathVerse</td><td>Dynamath</td><td>OlympiadBench</td><td>Average</td></tr><tr><td>MiniCPM-V-2.6 (Yadav et al., 2025)</td><td>8B</td><td>60.8</td><td>18.4</td><td>17.6</td><td>9.8</td><td>-</td><td>-</td></tr><tr><td>VITA-1.5 (Fu et al., 2025a)</td><td>8B</td><td>66.2</td><td>19.5</td><td>23.4</td><td>9.6</td><td>-</td><td>-</td></tr><tr><td>LLaVA-COT (Xu et al., 2024)</td><td>11B</td><td>52.5</td><td>19.9</td><td>22.6</td><td>7.8</td><td>-</td><td>-</td></tr><tr><td>Qwen2-VL (Wang et al., 2024b)</td><td>7B</td><td>60.9</td><td>16.3</td><td>24.6</td><td>11.0</td><td>3.2</td><td>23.2</td></tr><tr><td>InternVL2.5 (Chen et al., 2024b)</td><td>8B</td><td>64.5</td><td>17.0</td><td>22.8</td><td>9.4</td><td>0.1</td><td>22.8</td></tr><tr><td>POINTS1.5 (Liu et al., 2024b)</td><td>8B</td><td>66.4</td><td>22.0</td><td>26.6</td><td>14.2</td><td>-</td><td>-</td></tr><tr><td>Ovis1.6-Gemma2 (Lu et al., 2024b)</td><td>27B</td><td>70.2</td><td>20.6</td><td>37.8</td><td>17.0</td><td>-</td><td>-</td></tr><tr><td>InternVL2.5-COT (Chen et al., 2024b)</td><td>78B</td><td>71.4</td><td>32.5</td><td>40.1</td><td>28.5</td><td>-</td><td>-</td></tr><tr><td>LLaVA-OneVision (Li et al., 2024)</td><td>72B</td><td>67.1</td><td>25.3</td><td>27.2</td><td>15.6</td><td>-</td><td>-</td></tr><tr><td>Qwen2-VL (Wang et al., 2024b)</td><td>72B</td><td>69.7</td><td>26.6</td><td>36.2</td><td>20.0</td><td>10.3</td><td>32.6</td></tr><tr><td>QVQ-72B-preview (QwenTeam, 2024)</td><td>72B</td><td>71.4</td><td>35.9</td><td>41.5</td><td>30.7</td><td>20.4</td><td>40.0</td></tr><tr><td>TVC</td><td>7B</td><td>68.1</td><td>22.7</td><td>38.9</td><td>15.1</td><td>9.8</td><td>30.9</td></tr><tr><td>TVC</td><td>72B</td><td>72.2</td><td>41.9</td><td>48.8</td><td>30.0</td><td>24.3</td><td>43.4</td></tr></table>",
|
| 911 |
+
"bbox": [
|
| 912 |
+
117,
|
| 913 |
+
135,
|
| 914 |
+
884,
|
| 915 |
+
331
|
| 916 |
+
],
|
| 917 |
+
"page_idx": 6
|
| 918 |
+
},
|
| 919 |
+
{
|
| 920 |
+
"type": "table",
|
| 921 |
+
"img_path": "images/56fb82a543aa388c80425d95ebaaed59088b49d07b6fa1bfe12dbb29bf397700.jpg",
|
| 922 |
+
"table_caption": [
|
| 923 |
+
"Table 2: Ablations on the TVC System. TVC enhances reasoning capabilities, showing significant improvements on both general and task-specific reasoning benchmarks."
|
| 924 |
+
],
|
| 925 |
+
"table_footnote": [],
|
| 926 |
+
"table_body": "<table><tr><td>Method</td><td>MathVista</td><td>MathVision</td><td>MathVerse</td><td>Avg</td></tr><tr><td>Baseline</td><td>60.9</td><td>16.3</td><td>24.6</td><td>33.9</td></tr><tr><td>Vanilla - Direct SFT</td><td>63.5</td><td>19.8</td><td>31.6</td><td>38.3</td></tr><tr><td>TVC w/o PVC</td><td>66.7</td><td>21.8</td><td>35.6</td><td>41.4</td></tr><tr><td>TVC w/o DVR</td><td>66.2</td><td>22.3</td><td>34.7</td><td>41.0</td></tr><tr><td>TVC Full</td><td>68.1</td><td>22.7</td><td>38.9</td><td>43.2</td></tr></table>",
|
| 927 |
+
"bbox": [
|
| 928 |
+
117,
|
| 929 |
+
420,
|
| 930 |
+
485,
|
| 931 |
+
500
|
| 932 |
+
],
|
| 933 |
+
"page_idx": 6
|
| 934 |
+
},
|
| 935 |
+
{
|
| 936 |
+
"type": "text",
|
| 937 |
+
"text": "methodology even with more compact model configurations. Beyond task-specific visual reasoning, we extend our evaluation to general reasoning benchmarks (e.g., MathVista), where TVC consistently delivers strong performance, achieving a $3.6\\%$ improvement over the original Qwen2-VL72B model. These results emphasize TVC's ability to excel in tasks requiring both perception and reasoning. Collectively, the findings indicate that TVC not only advances performance in specialized visual reasoning tasks but also offers substantial benefits in broader application scenarios.",
|
| 938 |
+
"bbox": [
|
| 939 |
+
112,
|
| 940 |
+
527,
|
| 941 |
+
489,
|
| 942 |
+
720
|
| 943 |
+
],
|
| 944 |
+
"page_idx": 6
|
| 945 |
+
},
|
| 946 |
+
{
|
| 947 |
+
"type": "text",
|
| 948 |
+
"text": "4.4 Further Analysis",
|
| 949 |
+
"text_level": 1,
|
| 950 |
+
"bbox": [
|
| 951 |
+
112,
|
| 952 |
+
734,
|
| 953 |
+
294,
|
| 954 |
+
751
|
| 955 |
+
],
|
| 956 |
+
"page_idx": 6
|
| 957 |
+
},
|
| 958 |
+
{
|
| 959 |
+
"type": "text",
|
| 960 |
+
"text": "In this section, we conduct comprehensive experiments to evaluate the design choices of TVC, emphasizing the key contributions of our approach. We also present a case study to further illustrate the qualitative effectiveness of TVC.",
|
| 961 |
+
"bbox": [
|
| 962 |
+
112,
|
| 963 |
+
758,
|
| 964 |
+
489,
|
| 965 |
+
839
|
| 966 |
+
],
|
| 967 |
+
"page_idx": 6
|
| 968 |
+
},
|
| 969 |
+
{
|
| 970 |
+
"type": "text",
|
| 971 |
+
"text": "Effectiveness of TVC system. To evaluate the effectiveness of the TVC system, we conduct comprehensive ablation experiments on various components using the Qwen2-VL-7B as the Baseline. We begin by performing supervised fine-tuning on the",
|
| 972 |
+
"bbox": [
|
| 973 |
+
112,
|
| 974 |
+
841,
|
| 975 |
+
489,
|
| 976 |
+
921
|
| 977 |
+
],
|
| 978 |
+
"page_idx": 6
|
| 979 |
+
},
|
| 980 |
+
{
|
| 981 |
+
"type": "table",
|
| 982 |
+
"img_path": "images/3bd77dbb748e6d5a4b9a4950ab47ef7ead5e90b2f52314ea31b2cfeb357c8418.jpg",
|
| 983 |
+
"table_caption": [
|
| 984 |
+
"Table 3: Ablations on Token Compression."
|
| 985 |
+
],
|
| 986 |
+
"table_footnote": [],
|
| 987 |
+
"table_body": "<table><tr><td>Method</td><td>MathVista</td><td>MathVision</td><td>MathVerse</td><td>Avg</td></tr><tr><td>TVC Baseline</td><td>68.3</td><td>21.5</td><td>39.6</td><td>43.1</td></tr><tr><td>+ 2x2 Avg Pooling</td><td>67.8</td><td>22.9</td><td>38.3</td><td>43.0</td></tr><tr><td>+ 4x4 Avg Pooling</td><td>68.1</td><td>22.7</td><td>38.9</td><td>43.2</td></tr></table>",
|
| 988 |
+
"bbox": [
|
| 989 |
+
512,
|
| 990 |
+
376,
|
| 991 |
+
878,
|
| 992 |
+
437
|
| 993 |
+
],
|
| 994 |
+
"page_idx": 6
|
| 995 |
+
},
|
| 996 |
+
{
|
| 997 |
+
"type": "text",
|
| 998 |
+
"text": "Qwen2-VL-7B model with the data from Section 3, referred to as Vanilla - Direct SFT. Furthermore, we apply the DVR training approach outlined in Section 2.2, which increases the focus on the visual information in the training data, enabling the model to implicitly learn visual conditioning capabilities. Additionally, during the testing phase, we experiment with resetting the visual KV cache midway through the reasoning process, and after compressing the visual tokens, we add them to the end of the reasoning steps. This strategy allows the model to further observe the image content during its thought process, mitigating the visual forgetting and suppressing hallucinations.",
|
| 999 |
+
"bbox": [
|
| 1000 |
+
507,
|
| 1001 |
+
467,
|
| 1002 |
+
884,
|
| 1003 |
+
692
|
| 1004 |
+
],
|
| 1005 |
+
"page_idx": 6
|
| 1006 |
+
},
|
| 1007 |
+
{
|
| 1008 |
+
"type": "text",
|
| 1009 |
+
"text": "As demonstrated in Table 2, the results highlight that the TVC system is crucial for enhancing visual reasoning capabilities. When using only the SFT training data without the DVR strategy in TVC (Vanilla - Direct SFT), improvements in visual reasoning tasks are limited, as the model lacks the ability to reflect on visual information. Furthermore, models trained with the DVR strategy alone still produce sub-optimal results, underscoring the importance of a comprehensive approach that integrates PVC and DVR. The contributions of PVC and DVR are relatively balanced in enhancing the reasoning capabilities. To further validate the effectiveness of our visual token compression, we",
|
| 1010 |
+
"bbox": [
|
| 1011 |
+
507,
|
| 1012 |
+
696,
|
| 1013 |
+
884,
|
| 1014 |
+
921
|
| 1015 |
+
],
|
| 1016 |
+
"page_idx": 6
|
| 1017 |
+
},
|
| 1018 |
+
{
|
| 1019 |
+
"type": "image",
|
| 1020 |
+
"img_path": "images/0c4be083c4972b0ec6877c4318b42fb9003a36717e0940aeeddc10e6556d40f2.jpg",
|
| 1021 |
+
"image_caption": [
|
| 1022 |
+
"Figure 6: Case Study of TVC. TVC effectively re-examines the image during the reflection process to correct mistakes, guiding the model to the correct answer."
|
| 1023 |
+
],
|
| 1024 |
+
"image_footnote": [],
|
| 1025 |
+
"bbox": [
|
| 1026 |
+
115,
|
| 1027 |
+
82,
|
| 1028 |
+
884,
|
| 1029 |
+
428
|
| 1030 |
+
],
|
| 1031 |
+
"page_idx": 7
|
| 1032 |
+
},
|
| 1033 |
+
{
|
| 1034 |
+
"type": "text",
|
| 1035 |
+
"text": "conduct experiments with different pooling methods. As shown in Table 3, the TVC Baseline represents the method without image compression. We observe that the use of pooling methods has little impact on the model's capabilities. Utilizing $4 \\times 4$ average pooling for compression not only enhances the model's inference efficiency but also achieves a slight performance improvement.",
|
| 1036 |
+
"bbox": [
|
| 1037 |
+
112,
|
| 1038 |
+
493,
|
| 1039 |
+
487,
|
| 1040 |
+
621
|
| 1041 |
+
],
|
| 1042 |
+
"page_idx": 7
|
| 1043 |
+
},
|
| 1044 |
+
{
|
| 1045 |
+
"type": "text",
|
| 1046 |
+
"text": "Data Scaling Law. To validate the scalability of our approach, we also conduct a data scaling law experiment for training. As shown in Figure 5, we compare models across various data size: 50K, 100K, 150K, and 200K samples, and present the performance relative to the base model (Qwen2-VL-7B). Our findings show that as the dataset size increases, the model's performance continues to improve. However, it is evident that with a small dataset, the model's reasoning ability cannot reach an optimal level solely through SFT. Therefore, data scaling proves highly effective during SFT training, and the results indicate that TVC benefits from increased data.",
|
| 1047 |
+
"bbox": [
|
| 1048 |
+
112,
|
| 1049 |
+
627,
|
| 1050 |
+
489,
|
| 1051 |
+
851
|
| 1052 |
+
],
|
| 1053 |
+
"page_idx": 7
|
| 1054 |
+
},
|
| 1055 |
+
{
|
| 1056 |
+
"type": "text",
|
| 1057 |
+
"text": "To prevent underfitting from becoming a performance bottleneck, we increase the number of training epochs with smaller datasets, which further improves model performance. This indicates that in",
|
| 1058 |
+
"bbox": [
|
| 1059 |
+
112,
|
| 1060 |
+
857,
|
| 1061 |
+
489,
|
| 1062 |
+
921
|
| 1063 |
+
],
|
| 1064 |
+
"page_idx": 7
|
| 1065 |
+
},
|
| 1066 |
+
{
|
| 1067 |
+
"type": "text",
|
| 1068 |
+
"text": "sufficient data or epochs lead to undertrained models, making it difficult to learn long-chain reasoning. Increasing both the data and training epochs can effectively alleviate this issue.",
|
| 1069 |
+
"bbox": [
|
| 1070 |
+
507,
|
| 1071 |
+
493,
|
| 1072 |
+
884,
|
| 1073 |
+
557
|
| 1074 |
+
],
|
| 1075 |
+
"page_idx": 7
|
| 1076 |
+
},
|
| 1077 |
+
{
|
| 1078 |
+
"type": "text",
|
| 1079 |
+
"text": "4.5 Case Study",
|
| 1080 |
+
"text_level": 1,
|
| 1081 |
+
"bbox": [
|
| 1082 |
+
507,
|
| 1083 |
+
569,
|
| 1084 |
+
643,
|
| 1085 |
+
586
|
| 1086 |
+
],
|
| 1087 |
+
"page_idx": 7
|
| 1088 |
+
},
|
| 1089 |
+
{
|
| 1090 |
+
"type": "text",
|
| 1091 |
+
"text": "We present a comprehensive case study in Figure 6 to illustrate the improvements of our TVC approach. Specifically, we provide an example that highlights advancements in the reasoning process. In this example, the model is tasked with carefully observing the objects within an image and, after eliminating certain shapes and attributes, providing the count of the remaining objects. During base CoT reasoning, the model fails to check the attributes of each object and only focuses on their shapes, leading to an incorrect final answer. Subsequently, the model learns to re-focus on the image and carefully describe the attributes of each object in detail. This process allows the model to identify the issue in its previous reasoning and provide the correct answer.",
|
| 1092 |
+
"bbox": [
|
| 1093 |
+
505,
|
| 1094 |
+
592,
|
| 1095 |
+
882,
|
| 1096 |
+
848
|
| 1097 |
+
],
|
| 1098 |
+
"page_idx": 7
|
| 1099 |
+
},
|
| 1100 |
+
{
|
| 1101 |
+
"type": "text",
|
| 1102 |
+
"text": "5 Conclusion",
|
| 1103 |
+
"text_level": 1,
|
| 1104 |
+
"bbox": [
|
| 1105 |
+
507,
|
| 1106 |
+
862,
|
| 1107 |
+
640,
|
| 1108 |
+
877
|
| 1109 |
+
],
|
| 1110 |
+
"page_idx": 7
|
| 1111 |
+
},
|
| 1112 |
+
{
|
| 1113 |
+
"type": "text",
|
| 1114 |
+
"text": "In this paper, we introduce Take-along Visual Conditioning (TVC), a novel strategy designed to en",
|
| 1115 |
+
"bbox": [
|
| 1116 |
+
507,
|
| 1117 |
+
889,
|
| 1118 |
+
884,
|
| 1119 |
+
921
|
| 1120 |
+
],
|
| 1121 |
+
"page_idx": 7
|
| 1122 |
+
},
|
| 1123 |
+
{
|
| 1124 |
+
"type": "text",
|
| 1125 |
+
"text": "hance the reasoning capabilities of MLLMs by addressing the challenge of visual attention degradation during long-chain reasoning. By dynamically shifting the image input to critical stages of reasoning and compressing redundant visual tokens, we ensure that the model maintains focus on the visual information throughout the process. Our extensive evaluation on several mathematical reasoning benchmarks demonstrates the effectiveness of TVC in improving multimodal reasoning, providing a robust approach to equip MLLMs with better visual grounding for complex tasks.",
|
| 1126 |
+
"bbox": [
|
| 1127 |
+
112,
|
| 1128 |
+
84,
|
| 1129 |
+
492,
|
| 1130 |
+
280
|
| 1131 |
+
],
|
| 1132 |
+
"page_idx": 8
|
| 1133 |
+
},
|
| 1134 |
+
{
|
| 1135 |
+
"type": "text",
|
| 1136 |
+
"text": "Acknowledgments",
|
| 1137 |
+
"text_level": 1,
|
| 1138 |
+
"bbox": [
|
| 1139 |
+
114,
|
| 1140 |
+
290,
|
| 1141 |
+
280,
|
| 1142 |
+
307
|
| 1143 |
+
],
|
| 1144 |
+
"page_idx": 8
|
| 1145 |
+
},
|
| 1146 |
+
{
|
| 1147 |
+
"type": "text",
|
| 1148 |
+
"text": "This work is partially supported by National Key R&D Program of China (2024YFE0202800), NSFC (62376118), Key Program of Jiangsu Science Foundation (BK20243012), CCF-Tencent Rhino-Bird Open Research Fund (RAGR20240101), AMED (JP25wm0625405), and Collaborative Innovation Center of Novel Software Technology and Industrialization.",
|
| 1149 |
+
"bbox": [
|
| 1150 |
+
112,
|
| 1151 |
+
316,
|
| 1152 |
+
489,
|
| 1153 |
+
447
|
| 1154 |
+
],
|
| 1155 |
+
"page_idx": 8
|
| 1156 |
+
},
|
| 1157 |
+
{
|
| 1158 |
+
"type": "text",
|
| 1159 |
+
"text": "Limitations",
|
| 1160 |
+
"text_level": 1,
|
| 1161 |
+
"bbox": [
|
| 1162 |
+
114,
|
| 1163 |
+
458,
|
| 1164 |
+
220,
|
| 1165 |
+
474
|
| 1166 |
+
],
|
| 1167 |
+
"page_idx": 8
|
| 1168 |
+
},
|
| 1169 |
+
{
|
| 1170 |
+
"type": "text",
|
| 1171 |
+
"text": "Despite advancements, our method may still exhibit several limitations. First, for highly complex reasoning tasks requiring sophisticated analytical capabilities, simply increasing visual revisits proves insufficient. In contrast, it is crucial to enhance the model's inherent reasoning capacity. Second, our method assumes the availability of delayed visual processing, making it potentially unsuitable for real-time applications requiring instantaneous visual feedback, such as robotic navigation or time-sensitive decision-making scenarios.",
|
| 1172 |
+
"bbox": [
|
| 1173 |
+
112,
|
| 1174 |
+
485,
|
| 1175 |
+
489,
|
| 1176 |
+
663
|
| 1177 |
+
],
|
| 1178 |
+
"page_idx": 8
|
| 1179 |
+
},
|
| 1180 |
+
{
|
| 1181 |
+
"type": "text",
|
| 1182 |
+
"text": "References",
|
| 1183 |
+
"text_level": 1,
|
| 1184 |
+
"bbox": [
|
| 1185 |
+
114,
|
| 1186 |
+
689,
|
| 1187 |
+
213,
|
| 1188 |
+
703
|
| 1189 |
+
],
|
| 1190 |
+
"page_idx": 8
|
| 1191 |
+
},
|
| 1192 |
+
{
|
| 1193 |
+
"type": "list",
|
| 1194 |
+
"sub_type": "ref_text",
|
| 1195 |
+
"list_items": [
|
| 1196 |
+
"Meta AI. 2024. Build the future of ai with meta llama 3. Technical report, Meta AI.",
|
| 1197 |
+
"anthropic. 2024. Introducing the next generation of claude. Technical report, anthropic.",
|
| 1198 |
+
"Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, et al. 2023. Qwen technical report. arXiv:2309.16609.",
|
| 1199 |
+
"Liang Chen, Haozhe Zhao, Tianyu Liu, Shuai Bai, Junyang Lin, Chang Zhou, and Baobao Chang. 2024a. An image is worth 1/2 tokens after layer 2: Plug-and-play inference acceleration for large vision-language models. Preprint, arXiv:2403.06764."
|
| 1200 |
+
],
|
| 1201 |
+
"bbox": [
|
| 1202 |
+
114,
|
| 1203 |
+
713,
|
| 1204 |
+
489,
|
| 1205 |
+
920
|
| 1206 |
+
],
|
| 1207 |
+
"page_idx": 8
|
| 1208 |
+
},
|
| 1209 |
+
{
|
| 1210 |
+
"type": "list",
|
| 1211 |
+
"sub_type": "ref_text",
|
| 1212 |
+
"list_items": [
|
| 1213 |
+
"Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. 2024b. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271.",
|
| 1214 |
+
"Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Zhong Muyan, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. 2023. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. arXiv:2312.14238.",
|
| 1215 |
+
"DeepSeek-AI. 2024. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. Technical report, DeepSeek.",
|
| 1216 |
+
"Yuhao Dong, Zuyan Liu, Hai-Long Sun, Jingkang Yang, Winston Hu, Yongming Rao, and Ziwei Liu. 2024. Insight-v: Exploring long-chain visual reasoning with multimodal large language models. arXiv preprint arXiv:2411.14432.",
|
| 1217 |
+
"Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. 2024. Vlmealkit: An open-source toolkit for evaluating large multi-modality models. arXiv preprint arXiv:2407.11691.",
|
| 1218 |
+
"Chaoyou Fu, Haojia Lin, Xiong Wang, Yi-Fan Zhang, Yunhang Shen, Xiaoyu Liu, Yangze Li, Zuwei Long, Heting Gao, Ke Li, et al. 2025a. Vita-1.5: Towards gpt-4o level real-time vision and speech interaction. arXiv preprint arXiv:2501.01957.",
|
| 1219 |
+
"Jiale Fu, Yuchu Jiang, Junkai Chen, Jiaming Fan, Xin Geng, and Xu Yang. 2025b. Speculative ensemble: Fast large language model ensemble via speculation. arXiv preprint arXiv:2502.01662.",
|
| 1220 |
+
"Jiahui Gao, Renjie Pi, Jipeng Zhang, Jiacheng Ye, Wanjun Zhong, Yufei Wang, Lanqing Hong, Jianhua Han, Hang Xu, Zhenguo Li, et al. 2023. G-llava: Solving geometric problem with multi-modal large language model. arXiv preprint arXiv:2312.11370.",
|
| 1221 |
+
"Jarvis Guo, Tuney Zheng, Yuelin Bai, Bo Li, Yubo Wang, King Zhu, Yizhi Li, Graham Neubig, Wenhu Chen, and Xiang Yue. 2024. Mammoth-vl: Eliciting multimodal reasoning with instruction tuning at scale. arXiv preprint arXiv:2412.05237.",
|
| 1222 |
+
"Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, et al. 2024. Olympiad-bench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008.",
|
| 1223 |
+
"Yushi Hu, Weijia Shi, Xingyu Fu, Dan Roth, Mari Ostendorf, Luke Zettlemoyer, Noah A Smith, and Ranjay Krishna. 2024. Visual sketchpad: Sketching as a visual chain of thought for multimodal language models. arXiv preprint arXiv:2406.09403."
|
| 1224 |
+
],
|
| 1225 |
+
"bbox": [
|
| 1226 |
+
510,
|
| 1227 |
+
85,
|
| 1228 |
+
884,
|
| 1229 |
+
920
|
| 1230 |
+
],
|
| 1231 |
+
"page_idx": 8
|
| 1232 |
+
},
|
| 1233 |
+
{
|
| 1234 |
+
"type": "list",
|
| 1235 |
+
"sub_type": "ref_text",
|
| 1236 |
+
"list_items": [
|
| 1237 |
+
"Jinhao Jiang, Zhipeng Chen, Yingqian Min, Jie Chen, Xiaoxue Cheng, Jiapeng Wang, Yiru Tang, Haoxiang Sun, Jia Deng, Wayne Xin Zhao, et al. 2024. Technical report: Enhancing llm reasoning with reward-guided tree search. arXiv preprint arXiv:2411.11694.",
|
| 1238 |
+
"Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. 2023. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles.",
|
| 1239 |
+
"Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Peiyuan Zhang, Yanwei Li, Ziwei Liu, et al. 2024. Llavaonevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326.",
|
| 1240 |
+
"Chunyuan Li, Cliff Wong, Sheng Zhang, Naoto Usuyama, Haotian Liu, Jianwei Yang, Tristan Naumann, Hoifung Poon, and Jianfeng Gao. 2023. Llava-med: Training a large language-and-vision assistant for biomedicine in one day. arXiv:2306.00890.",
|
| 1241 |
+
"Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. 2024a. Llavanext: Improved reasoning,OCR, and world knowledge.",
|
| 1242 |
+
"Yuan Liu, Le Tian, Xiao Zhou, Xinyu Gao, Kavio Yu, Yang Yu, and Jie Zhou. 2024b. Points1. 5: Building a vision-language model towards real world applications. arXiv preprint arXiv:2412.08443.",
|
| 1243 |
+
"Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. 2024a. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. In ICLR.",
|
| 1244 |
+
"Pan Lu, Baolin Peng, Hao Cheng, Michel Galley, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, and Jianfeng Gao. 2023. Chameleon: Plug-and-play compositional reasoning with large language models. Advances in Neural Information Processing Systems, 36:43447-43478.",
|
| 1245 |
+
"Shiyin Lu, Yang Li, Qing-Guo Chen, Zhao Xu, Weihua Luo, Kaifu Zhang, and Han-Jia Ye. 2024b. Ovis: Structural embedding alignment for multimodal large language model. arXiv preprint arXiv:2405.20797.",
|
| 1246 |
+
"Brandon McKinzie, Zhe Gan, Jean-Philippe Fauconnier, Sam Dodge, Bowen Zhang, Philipp Dufter, Dhruti Shah, Xianzhi Du, Futang Peng, Floris Weers, et al. 2024. Mm1: Methods, analysis & insights from multimodal llm pre-training. arXiv:2403.09611.",
|
| 1247 |
+
"OpenAI. 2024a. Gpt-4o: Hello gpt-4o. Technical report, OpenAI.",
|
| 1248 |
+
"OpenAI. 2024b. Learning to reason with llms. Technical report, OpenAI."
|
| 1249 |
+
],
|
| 1250 |
+
"bbox": [
|
| 1251 |
+
115,
|
| 1252 |
+
85,
|
| 1253 |
+
489,
|
| 1254 |
+
920
|
| 1255 |
+
],
|
| 1256 |
+
"page_idx": 9
|
| 1257 |
+
},
|
| 1258 |
+
{
|
| 1259 |
+
"type": "list",
|
| 1260 |
+
"sub_type": "ref_text",
|
| 1261 |
+
"list_items": [
|
| 1262 |
+
"Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. 2025. Lmmr1: Empowering 3b lmms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536.",
|
| 1263 |
+
"Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, et al. 2024. O1 replication journey: A strategic progress report-part 1. arXiv preprint arXiv:2410.18982.",
|
| 1264 |
+
"QwenTeam. 2024. Qvq: To see the world with wisdom. Technical report, Alibaba.",
|
| 1265 |
+
"Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. 2021. Learning transferable visual models from natural language supervision. In ICML.",
|
| 1266 |
+
"Machel Reid, Nikolay Savinov, Denis Teptyashin, Dmitry Lepikhin, Timothy Lillicrap, Jean-baptiste Alayrac, Radu Soricut, Angeliki Lazaridou, Orhan Firat, Julian Schrittwieser, et al. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv:2403.05530.",
|
| 1267 |
+
"Wenhao Shi, Zhiqiang Hu, Yi Bin, Junhua Liu, Yang Yang, See-Kiong Ng, Lidong Bing, and Roy Ka-Wei Lee. 2024. Math-llava: Bootstrapping mathematical reasoning for multimodal large language models. arXiv preprint arXiv:2406.17294.",
|
| 1268 |
+
"Hai-Long Sun, Da-Wei Zhou, Yang Li, Shiyin Lu, Chao Yi, Qing-Guo Chen, Zhao Xu, Weihua Luo, Kaifu Zhang, De-Chuan Zhan, et al. 2025a. Parrot: Multilingual visual instruction tuning. In ICML.",
|
| 1269 |
+
"Hai-Long Sun, Da-Wei Zhou, De-Chuan Zhan, and Han-Jia Ye. 2025b. Pilot: A pre-trained model-based continual learning toolbox. SCIENCE CHINA Information Sciences, 68(4):147101.",
|
| 1270 |
+
"Hai-Long Sun, Da-Wei Zhou, Hanbin Zhao, Le Gan, De-Chuan Zhan, and Han-Jia Ye. 2025c. Mos: Model surgery for pre-trained model-based class-incremental learning. In AAAI, volume 39, pages 20699-20707.",
|
| 1271 |
+
"Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. 2023. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805.",
|
| 1272 |
+
"Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. 2024. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860."
|
| 1273 |
+
],
|
| 1274 |
+
"bbox": [
|
| 1275 |
+
510,
|
| 1276 |
+
85,
|
| 1277 |
+
882,
|
| 1278 |
+
919
|
| 1279 |
+
],
|
| 1280 |
+
"page_idx": 9
|
| 1281 |
+
},
|
| 1282 |
+
{
|
| 1283 |
+
"type": "list",
|
| 1284 |
+
"sub_type": "ref_text",
|
| 1285 |
+
"list_items": [
|
| 1286 |
+
"Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Mingjie Zhan, and Hongsheng Li. 2024a. Measuring multimodal mathematical reasoning with math-vision dataset. Preprint, arXiv:2402.14804.",
|
| 1287 |
+
"Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. 2024b. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191.",
|
| 1288 |
+
"Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Ed Chi, Quoc Le, and Denny Zhou. 2022. Chain of thought prompting elicits reasoning in large language models. arXiv:2201.11903.",
|
| 1289 |
+
"Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. 2024. Llava-o1: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440.",
|
| 1290 |
+
"Ankit Yadav, Lingqiao Liu, and Yuankai Qi. 2025. Exploring primitive visual measurement understanding and the role of output format in learning in vision-language models. arXiv preprint arXiv:2501.15144.",
|
| 1291 |
+
"Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. 2023. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11975-11986.",
|
| 1292 |
+
"Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Peng Gao, et al. 2024a. Math-verse: Does your multi-modal llm truly see the diagrams in visual math problems? arXiv preprint arXiv:2403.14624.",
|
| 1293 |
+
"Renrui Zhang, Xinyu Wei, Dongzhi Jiang, Ziyu Guo, Shicheng Li, Yichi Zhang, Chengzhuo Tong, Jiaming Liu, Aojun Zhou, Bin Wei, et al. 2024b. Mavis: Mathematical visual instruction tuning with an automatic data engine. arXiv preprint arXiv:2407.08739.",
|
| 1294 |
+
"Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. 2024. Llamafactory: Unified efficient fine-tuning of $100+$ language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), Bangkok, Thailand. Association for Computational Linguistics.",
|
| 1295 |
+
"Wenwen Zhuang, Xin Huang, Xiantao Zhang, and Jin Zeng. 2024. Math-puma: Progressive upward multimodal alignment to enhance mathematical reasoning. arXiv preprint arXiv:2408.08640.",
|
| 1296 |
+
"Chengke Zou, Xingang Guo, Rui Yang, Junyu Zhang, Bin Hu, and Huan Zhang. 2024. Dynamath: A dynamic visual benchmark for evaluating mathematical reasoning robustness of vision language models. arXiv preprint arXiv:2411.00836."
|
| 1297 |
+
],
|
| 1298 |
+
"bbox": [
|
| 1299 |
+
115,
|
| 1300 |
+
85,
|
| 1301 |
+
485,
|
| 1302 |
+
914
|
| 1303 |
+
],
|
| 1304 |
+
"page_idx": 10
|
| 1305 |
+
},
|
| 1306 |
+
{
|
| 1307 |
+
"type": "text",
|
| 1308 |
+
"text": "A Related Work",
|
| 1309 |
+
"text_level": 1,
|
| 1310 |
+
"bbox": [
|
| 1311 |
+
115,
|
| 1312 |
+
83,
|
| 1313 |
+
273,
|
| 1314 |
+
98
|
| 1315 |
+
],
|
| 1316 |
+
"page_idx": 11
|
| 1317 |
+
},
|
| 1318 |
+
{
|
| 1319 |
+
"type": "text",
|
| 1320 |
+
"text": "Multimodal Large Language Models. Multimodal Large Language Models (MLLMs) (Li et al., 2023; Liu et al., 2024a; Sun et al., 2025a; Wang et al., 2024b; Lu et al., 2024b; McKinzie et al., 2024; Sun et al., 2025c,b; Fu et al., 2025b; Dong et al., 2024) integrate vision encoders (Radford et al., 2021; Zhai et al., 2023) with LLMs (AI, 2024; Bai et al., 2023), endowing them with robust capabilities across a wide range of domains. These include general visual understanding(OpenAI, 2024a; Li et al., 2024), mathematical reasoning (Shi et al., 2024; Gao et al., 2023), and answering college-level questions (Chen et al., 2023), demonstrating their versatility in real-world tasks. The rapid advancements in open-source models have also spurred the development of proprietary models, such as GPT-4o (OpenAI, 2024a), Gemini (Team et al., 2023; Reid et al., 2024), Qwen2-VL-MAX (Wang et al., 2024b), and Claude3 (anthropic, 2024). These models have demonstrated remarkable performance in both evaluation benchmarks and practical applications, solidifying their position at the forefront of AI research and deployment.",
|
| 1321 |
+
"bbox": [
|
| 1322 |
+
115,
|
| 1323 |
+
124,
|
| 1324 |
+
489,
|
| 1325 |
+
510
|
| 1326 |
+
],
|
| 1327 |
+
"page_idx": 11
|
| 1328 |
+
},
|
| 1329 |
+
{
|
| 1330 |
+
"type": "text",
|
| 1331 |
+
"text": "Reasoning with MLLMs. Recent advancements in MLLMs have significantly enhanced performance in reasoning tasks across both text and multimodal scenarios (OpenAI, 2024b; DeepSeek-AI, 2024; QwenTeam, 2024; Peng et al., 2025). Current methods primarily rely on CoT (Wei et al., 2022) to train MLLMs for step-by-step reasoning. Data-driven approaches include MathLLaVA (Shi et al., 2024), which introduced the MathV360K dataset, and MAmmoTH-VL (Guo et al., 2024), which curates a large-scale multimodal CoT dataset in a scalable manner. Another line of research explores vision-text alignment. MAVIS (Zhang et al., 2024b) fine-tunes a math-specific vision encoder with curated caption data, while Math-PUMA (Zhuang et al., 2024) leverages the Kullback-Leibler (KL) divergence of next-token prediction distributions for modality alignment. In a different paradigm, MLLMs act as coordinators, utilizing external tools such as LLMs, web search engines, and computer programs for complex reasoning. Chameleon (Lu et al., 2023) orchestrates tool-call sequences, and Visual Sketchpad (Hu et al., 2024) enables models to generate visual sketches to aid reasoning.",
|
| 1332 |
+
"bbox": [
|
| 1333 |
+
115,
|
| 1334 |
+
519,
|
| 1335 |
+
489,
|
| 1336 |
+
920
|
| 1337 |
+
],
|
| 1338 |
+
"page_idx": 11
|
| 1339 |
+
},
|
| 1340 |
+
{
|
| 1341 |
+
"type": "text",
|
| 1342 |
+
"text": "B More Details of Reasoning Dataset",
|
| 1343 |
+
"text_level": 1,
|
| 1344 |
+
"bbox": [
|
| 1345 |
+
510,
|
| 1346 |
+
83,
|
| 1347 |
+
845,
|
| 1348 |
+
99
|
| 1349 |
+
],
|
| 1350 |
+
"page_idx": 11
|
| 1351 |
+
},
|
| 1352 |
+
{
|
| 1353 |
+
"type": "text",
|
| 1354 |
+
"text": "In this section, we provide a detailed description of dynamic token truncation and reflection word pruning in the process of constructing the reasoning dataset. We also provide detailed information about the training data in Table 5.",
|
| 1355 |
+
"bbox": [
|
| 1356 |
+
510,
|
| 1357 |
+
116,
|
| 1358 |
+
880,
|
| 1359 |
+
197
|
| 1360 |
+
],
|
| 1361 |
+
"page_idx": 11
|
| 1362 |
+
},
|
| 1363 |
+
{
|
| 1364 |
+
"type": "text",
|
| 1365 |
+
"text": "B.1 Dynamic Token Truncation",
|
| 1366 |
+
"text_level": 1,
|
| 1367 |
+
"bbox": [
|
| 1368 |
+
510,
|
| 1369 |
+
218,
|
| 1370 |
+
773,
|
| 1371 |
+
234
|
| 1372 |
+
],
|
| 1373 |
+
"page_idx": 11
|
| 1374 |
+
},
|
| 1375 |
+
{
|
| 1376 |
+
"type": "text",
|
| 1377 |
+
"text": "To further improve the dataset quality, we analyze the distribution of token lengths after the answer-centric rejection sampling. We find that many samples are close to the maximum token limit, and manual checks show that these long reasoning chains often have problems—such as logical errors, mistakes in multi-step calculations, and reliance on shortcuts that don't work in general cases (e.g., substituting specific values). Motivated by the correlation between extreme token lengths and compromised solution quality, we implement adaptive truncation thresholds to keep the answers within the 200-8000 token range. This dynamic filtering not only eliminates the invalid cases (overly verbose or terse responses) but also enhances the overall quality of the data. The final length distribution matches how human experts solve problems and keeps the important reasoning steps intact.",
|
| 1378 |
+
"bbox": [
|
| 1379 |
+
510,
|
| 1380 |
+
244,
|
| 1381 |
+
884,
|
| 1382 |
+
533
|
| 1383 |
+
],
|
| 1384 |
+
"page_idx": 11
|
| 1385 |
+
},
|
| 1386 |
+
{
|
| 1387 |
+
"type": "text",
|
| 1388 |
+
"text": "B.2 Reflection Word Pruning",
|
| 1389 |
+
"text_level": 1,
|
| 1390 |
+
"bbox": [
|
| 1391 |
+
510,
|
| 1392 |
+
556,
|
| 1393 |
+
756,
|
| 1394 |
+
571
|
| 1395 |
+
],
|
| 1396 |
+
"page_idx": 11
|
| 1397 |
+
},
|
| 1398 |
+
{
|
| 1399 |
+
"type": "text",
|
| 1400 |
+
"text": "Our analysis reveals a critical flaw in distilled reasoning chains: excessive metacognitive loops caused by uncontrolled reflection markers (e.g., 'Alternatively,' 'Wait'), which led to performance degradation through infinite loops or ungrounded speculation. Term frequency analysis of reflection density shows a heavy-tailed distribution— $95\\%$ of samples contained fewer than 10 reflection markers per chain, while $1\\%$ exhibited over 50 markers, with this extreme group strongly correlating to hallucination rates. To address this, we introduce a reflection token quota system that automatically prunes samples exceeding 25 reflection markers while retaining core reasoning logic using semantic-aware span detection. As shown in Figure 4, this approach significantly reduced infinite-loop instances in validation tasks while improving answer accuracy. The refined reflection pattern mirrors expert human problem-solving strategies, wherein targeted self-correction enhances, rather than disrupts, the continuity of the reasoning process.",
|
| 1401 |
+
"bbox": [
|
| 1402 |
+
510,
|
| 1403 |
+
583,
|
| 1404 |
+
884,
|
| 1405 |
+
920
|
| 1406 |
+
],
|
| 1407 |
+
"page_idx": 11
|
| 1408 |
+
},
|
| 1409 |
+
{
|
| 1410 |
+
"type": "image",
|
| 1411 |
+
"img_path": "images/e6e818e208bce7e7184afc19e3effb07a641dfed7a0d213ac1fd5c32cde5e241.jpg",
|
| 1412 |
+
"image_caption": [
|
| 1413 |
+
"Figure 7: The token and reflection word distribution of the long-chain reasoning dataset."
|
| 1414 |
+
],
|
| 1415 |
+
"image_footnote": [],
|
| 1416 |
+
"bbox": [
|
| 1417 |
+
115,
|
| 1418 |
+
82,
|
| 1419 |
+
499,
|
| 1420 |
+
236
|
| 1421 |
+
],
|
| 1422 |
+
"page_idx": 12
|
| 1423 |
+
},
|
| 1424 |
+
{
|
| 1425 |
+
"type": "image",
|
| 1426 |
+
"img_path": "images/19734dcb34cce543e8463529115a85acd6bad9216134c3e1ed1179348600b169.jpg",
|
| 1427 |
+
"image_caption": [],
|
| 1428 |
+
"image_footnote": [],
|
| 1429 |
+
"bbox": [
|
| 1430 |
+
502,
|
| 1431 |
+
82,
|
| 1432 |
+
878,
|
| 1433 |
+
236
|
| 1434 |
+
],
|
| 1435 |
+
"page_idx": 12
|
| 1436 |
+
},
|
| 1437 |
+
{
|
| 1438 |
+
"type": "table",
|
| 1439 |
+
"img_path": "images/ed5183c30b072c3055cdbc52c33cbe7366f61b55f31d0f89626a003a8768d22c.jpg",
|
| 1440 |
+
"table_caption": [
|
| 1441 |
+
"Table 4: The detailed training hyperparameters."
|
| 1442 |
+
],
|
| 1443 |
+
"table_footnote": [],
|
| 1444 |
+
"table_body": "<table><tr><td>Config</td><td>SFT</td></tr><tr><td>Deepspeed</td><td>Zero3</td></tr><tr><td>Epoch</td><td>5</td></tr><tr><td>Warmup Ratio</td><td>0.1</td></tr><tr><td>Max Grad Norm</td><td>1.0</td></tr><tr><td>Optimizer</td><td>AdamW</td></tr><tr><td>Learning rate</td><td>2e-5</td></tr><tr><td>Learning rate scheduler</td><td>Cosine</td></tr><tr><td>Text max length</td><td>8192</td></tr><tr><td>Batch size per GPU</td><td>1</td></tr><tr><td>Gradient Accumulation Steps</td><td>4</td></tr><tr><td>GPU</td><td>64×H20-96G</td></tr><tr><td>Precision</td><td>Bf16</td></tr></table>",
|
| 1445 |
+
"bbox": [
|
| 1446 |
+
147,
|
| 1447 |
+
310,
|
| 1448 |
+
455,
|
| 1449 |
+
482
|
| 1450 |
+
],
|
| 1451 |
+
"page_idx": 12
|
| 1452 |
+
},
|
| 1453 |
+
{
|
| 1454 |
+
"type": "text",
|
| 1455 |
+
"text": "C Discussion",
|
| 1456 |
+
"text_level": 1,
|
| 1457 |
+
"bbox": [
|
| 1458 |
+
114,
|
| 1459 |
+
506,
|
| 1460 |
+
243,
|
| 1461 |
+
521
|
| 1462 |
+
],
|
| 1463 |
+
"page_idx": 12
|
| 1464 |
+
},
|
| 1465 |
+
{
|
| 1466 |
+
"type": "text",
|
| 1467 |
+
"text": "In this work, we investigate the phenomenon of visual information forgetting in MLLMs during long-chain reasoning. Through comprehensive analysis and experiments, we show that as reasoning chains progressively lengthen, models exhibit a gradual deterioration in retaining visual inputs, ultimately undermining their multimodal reasoning capabilities and exacerbating hallucination issues. To address this challenge, we propose a take-along visual conditioning mechanism that enables models to dynamically revisit visual inputs during reasoning steps, thereby enhancing content fidelity throughout the inference process.",
|
| 1468 |
+
"bbox": [
|
| 1469 |
+
112,
|
| 1470 |
+
533,
|
| 1471 |
+
489,
|
| 1472 |
+
743
|
| 1473 |
+
],
|
| 1474 |
+
"page_idx": 12
|
| 1475 |
+
},
|
| 1476 |
+
{
|
| 1477 |
+
"type": "text",
|
| 1478 |
+
"text": "However, as illustrated in Figure 8, we acknowledge several limitations. First, for highly complex reasoning tasks requiring sophisticated analytical capabilities, simply increasing visual revisits proves insufficient. In contrast, it is crucial to enhance the model's inherent reasoning capacity. Second, our method assumes the availability of delayed visual processing, making it potentially unsuitable for real-time applications requiring instantaneous visual feedback, such as robotic navigation or time-sensitive decision-making scenarios.",
|
| 1479 |
+
"bbox": [
|
| 1480 |
+
112,
|
| 1481 |
+
744,
|
| 1482 |
+
489,
|
| 1483 |
+
921
|
| 1484 |
+
],
|
| 1485 |
+
"page_idx": 12
|
| 1486 |
+
},
|
| 1487 |
+
{
|
| 1488 |
+
"type": "table",
|
| 1489 |
+
"img_path": "images/745a67a45e9a8fb99fc9a5e2ebcf5aebf3c4c0260658c3e43b3d580ec59cacbf.jpg",
|
| 1490 |
+
"table_caption": [
|
| 1491 |
+
"Table 5: Details on the TVC's training data, which is derived from publicly available datasets."
|
| 1492 |
+
],
|
| 1493 |
+
"table_footnote": [],
|
| 1494 |
+
"table_body": "<table><tr><td>Datasets</td><td>Samples</td></tr><tr><td>MathV360K (Shi et al., 2024)</td><td>221K</td></tr><tr><td>Geo170K (Gao et al., 2023)</td><td>22K</td></tr><tr><td>LLaVA-OneVision (Li et al., 2024)</td><td>97K</td></tr><tr><td>Cambrian-1 (Tong et al., 2024)</td><td>1K</td></tr></table>",
|
| 1495 |
+
"bbox": [
|
| 1496 |
+
519,
|
| 1497 |
+
323,
|
| 1498 |
+
868,
|
| 1499 |
+
420
|
| 1500 |
+
],
|
| 1501 |
+
"page_idx": 12
|
| 1502 |
+
},
|
| 1503 |
+
{
|
| 1504 |
+
"type": "text",
|
| 1505 |
+
"text": "Our work represents an initial exploration into mitigating visual forgetting in extended multimodal reasoning chains. We envision future research directions including: (1) Developing hybrid architectures that synergistically enhance both visual retention and intrinsic reasoning capabilities; (2) Investigating adaptive attention mechanisms for real-time multimodal applications; (3) Exploring curriculum learning strategies to progressively strengthen long-chain reasoning capacities. We hope this foundational study will inspire further advances in understanding and improving multimodal reasoning systems for complex real-world applications.",
|
| 1506 |
+
"bbox": [
|
| 1507 |
+
507,
|
| 1508 |
+
445,
|
| 1509 |
+
884,
|
| 1510 |
+
653
|
| 1511 |
+
],
|
| 1512 |
+
"page_idx": 12
|
| 1513 |
+
},
|
| 1514 |
+
{
|
| 1515 |
+
"type": "image",
|
| 1516 |
+
"img_path": "images/6a7493ff659617dcedca570aa164a80dbd4533998a3aa4bf6246b4c28549486d.jpg",
|
| 1517 |
+
"image_caption": [],
|
| 1518 |
+
"image_footnote": [],
|
| 1519 |
+
"bbox": [
|
| 1520 |
+
127,
|
| 1521 |
+
334,
|
| 1522 |
+
164,
|
| 1523 |
+
355
|
| 1524 |
+
],
|
| 1525 |
+
"page_idx": 13
|
| 1526 |
+
},
|
| 1527 |
+
{
|
| 1528 |
+
"type": "image",
|
| 1529 |
+
"img_path": "images/679522b38765370c555efed06eb171b6a97b036167f248e17a8a2a3a6956f88b.jpg",
|
| 1530 |
+
"image_caption": [
|
| 1531 |
+
"Figure 8: Qualitative Results of TVC."
|
| 1532 |
+
],
|
| 1533 |
+
"image_footnote": [],
|
| 1534 |
+
"bbox": [
|
| 1535 |
+
169,
|
| 1536 |
+
329,
|
| 1537 |
+
369,
|
| 1538 |
+
583
|
| 1539 |
+
],
|
| 1540 |
+
"page_idx": 13
|
| 1541 |
+
},
|
| 1542 |
+
{
|
| 1543 |
+
"type": "image",
|
| 1544 |
+
"img_path": "images/e4da8a755a3bfc4d8b87c6a8b3c35c5ae1c52093de2983e4e008244ee89b6da6.jpg",
|
| 1545 |
+
"image_caption": [],
|
| 1546 |
+
"image_footnote": [],
|
| 1547 |
+
"bbox": [
|
| 1548 |
+
127,
|
| 1549 |
+
596,
|
| 1550 |
+
169,
|
| 1551 |
+
621
|
| 1552 |
+
],
|
| 1553 |
+
"page_idx": 13
|
| 1554 |
+
},
|
| 1555 |
+
{
|
| 1556 |
+
"type": "text",
|
| 1557 |
+
"text": "Which of the cubes is not identical to the unfolded net?",
|
| 1558 |
+
"bbox": [
|
| 1559 |
+
173,
|
| 1560 |
+
600,
|
| 1561 |
+
361,
|
| 1562 |
+
618
|
| 1563 |
+
],
|
| 1564 |
+
"page_idx": 13
|
| 1565 |
+
},
|
| 1566 |
+
{
|
| 1567 |
+
"type": "text",
|
| 1568 |
+
"text": "Choices: (A) A (B) B (C) C (D) D (E) E",
|
| 1569 |
+
"bbox": [
|
| 1570 |
+
173,
|
| 1571 |
+
619,
|
| 1572 |
+
352,
|
| 1573 |
+
627
|
| 1574 |
+
],
|
| 1575 |
+
"page_idx": 13
|
| 1576 |
+
},
|
| 1577 |
+
{
|
| 1578 |
+
"type": "text",
|
| 1579 |
+
"text": "Let's see here. I've got this problem where there's an unfolded net of a cube, and then there are several cubes shown below with letters A through E. The question is asking which one of these cubes is not identical to the unfolded net. Okay, so I need to compare each cube to the net and see if they match or not.",
|
| 1580 |
+
"bbox": [
|
| 1581 |
+
418,
|
| 1582 |
+
331,
|
| 1583 |
+
843,
|
| 1584 |
+
357
|
| 1585 |
+
],
|
| 1586 |
+
"page_idx": 13
|
| 1587 |
+
},
|
| 1588 |
+
{
|
| 1589 |
+
"type": "text",
|
| 1590 |
+
"text": "Let me label the squares for clarity:",
|
| 1591 |
+
"bbox": [
|
| 1592 |
+
420,
|
| 1593 |
+
365,
|
| 1594 |
+
559,
|
| 1595 |
+
373
|
| 1596 |
+
],
|
| 1597 |
+
"page_idx": 13
|
| 1598 |
+
},
|
| 1599 |
+
{
|
| 1600 |
+
"type": "text",
|
| 1601 |
+
"text": "2. Back face (B)",
|
| 1602 |
+
"bbox": [
|
| 1603 |
+
420,
|
| 1604 |
+
378,
|
| 1605 |
+
485,
|
| 1606 |
+
388
|
| 1607 |
+
],
|
| 1608 |
+
"page_idx": 13
|
| 1609 |
+
},
|
| 1610 |
+
{
|
| 1611 |
+
"type": "text",
|
| 1612 |
+
"text": "3. Top face (T)",
|
| 1613 |
+
"bbox": [
|
| 1614 |
+
420,
|
| 1615 |
+
390,
|
| 1616 |
+
478,
|
| 1617 |
+
398
|
| 1618 |
+
],
|
| 1619 |
+
"page_idx": 13
|
| 1620 |
+
},
|
| 1621 |
+
{
|
| 1622 |
+
"type": "text",
|
| 1623 |
+
"text": "4. Bottom face (D)",
|
| 1624 |
+
"bbox": [
|
| 1625 |
+
420,
|
| 1626 |
+
399,
|
| 1627 |
+
495,
|
| 1628 |
+
407
|
| 1629 |
+
],
|
| 1630 |
+
"page_idx": 13
|
| 1631 |
+
},
|
| 1632 |
+
{
|
| 1633 |
+
"type": "text",
|
| 1634 |
+
"text": "5. Left face (L)",
|
| 1635 |
+
"bbox": [
|
| 1636 |
+
420,
|
| 1637 |
+
407,
|
| 1638 |
+
480,
|
| 1639 |
+
414
|
| 1640 |
+
],
|
| 1641 |
+
"page_idx": 13
|
| 1642 |
+
},
|
| 1643 |
+
{
|
| 1644 |
+
"type": "text",
|
| 1645 |
+
"text": "6. Right face (R)",
|
| 1646 |
+
"bbox": [
|
| 1647 |
+
420,
|
| 1648 |
+
414,
|
| 1649 |
+
487,
|
| 1650 |
+
423
|
| 1651 |
+
],
|
| 1652 |
+
"page_idx": 13
|
| 1653 |
+
},
|
| 1654 |
+
{
|
| 1655 |
+
"type": "text",
|
| 1656 |
+
"text": "Let's look at each cube option:",
|
| 1657 |
+
"bbox": [
|
| 1658 |
+
420,
|
| 1659 |
+
430,
|
| 1660 |
+
539,
|
| 1661 |
+
439
|
| 1662 |
+
],
|
| 1663 |
+
"page_idx": 13
|
| 1664 |
+
},
|
| 1665 |
+
{
|
| 1666 |
+
"type": "text",
|
| 1667 |
+
"text": "A) This is a cube with letters on its faces. Wait, no, it's a cube with colors, similar to the others. Wait, actually, all cubes have combinations of teal and yellow faces.",
|
| 1668 |
+
"bbox": [
|
| 1669 |
+
420,
|
| 1670 |
+
439,
|
| 1671 |
+
789,
|
| 1672 |
+
455
|
| 1673 |
+
],
|
| 1674 |
+
"page_idx": 13
|
| 1675 |
+
},
|
| 1676 |
+
{
|
| 1677 |
+
"type": "text",
|
| 1678 |
+
"text": "I need to carefully observe the color placement on each cube.",
|
| 1679 |
+
"bbox": [
|
| 1680 |
+
420,
|
| 1681 |
+
455,
|
| 1682 |
+
658,
|
| 1683 |
+
463
|
| 1684 |
+
],
|
| 1685 |
+
"page_idx": 13
|
| 1686 |
+
},
|
| 1687 |
+
{
|
| 1688 |
+
"type": "text",
|
| 1689 |
+
"text": "Option A",
|
| 1690 |
+
"bbox": [
|
| 1691 |
+
420,
|
| 1692 |
+
464,
|
| 1693 |
+
458,
|
| 1694 |
+
470
|
| 1695 |
+
],
|
| 1696 |
+
"page_idx": 13
|
| 1697 |
+
},
|
| 1698 |
+
{
|
| 1699 |
+
"type": "text",
|
| 1700 |
+
"text": "- Front face: teal",
|
| 1701 |
+
"bbox": [
|
| 1702 |
+
420,
|
| 1703 |
+
473,
|
| 1704 |
+
485,
|
| 1705 |
+
480
|
| 1706 |
+
],
|
| 1707 |
+
"page_idx": 13
|
| 1708 |
+
},
|
| 1709 |
+
{
|
| 1710 |
+
"type": "text",
|
| 1711 |
+
"text": "-Top face: yellow",
|
| 1712 |
+
"bbox": [
|
| 1713 |
+
420,
|
| 1714 |
+
482,
|
| 1715 |
+
492,
|
| 1716 |
+
489
|
| 1717 |
+
],
|
| 1718 |
+
"page_idx": 13
|
| 1719 |
+
},
|
| 1720 |
+
{
|
| 1721 |
+
"type": "text",
|
| 1722 |
+
"text": "-Right face:teal",
|
| 1723 |
+
"bbox": [
|
| 1724 |
+
420,
|
| 1725 |
+
489,
|
| 1726 |
+
485,
|
| 1727 |
+
497
|
| 1728 |
+
],
|
| 1729 |
+
"page_idx": 13
|
| 1730 |
+
},
|
| 1731 |
+
{
|
| 1732 |
+
"type": "text",
|
| 1733 |
+
"text": "- Left face: yellow",
|
| 1734 |
+
"bbox": [
|
| 1735 |
+
420,
|
| 1736 |
+
498,
|
| 1737 |
+
494,
|
| 1738 |
+
505
|
| 1739 |
+
],
|
| 1740 |
+
"page_idx": 13
|
| 1741 |
+
},
|
| 1742 |
+
{
|
| 1743 |
+
"type": "text",
|
| 1744 |
+
"text": "-Back face:teal",
|
| 1745 |
+
"bbox": [
|
| 1746 |
+
420,
|
| 1747 |
+
506,
|
| 1748 |
+
485,
|
| 1749 |
+
512
|
| 1750 |
+
],
|
| 1751 |
+
"page_idx": 13
|
| 1752 |
+
},
|
| 1753 |
+
{
|
| 1754 |
+
"type": "text",
|
| 1755 |
+
"text": "- Bottom face: yellow",
|
| 1756 |
+
"bbox": [
|
| 1757 |
+
420,
|
| 1758 |
+
514,
|
| 1759 |
+
505,
|
| 1760 |
+
521
|
| 1761 |
+
],
|
| 1762 |
+
"page_idx": 13
|
| 1763 |
+
},
|
| 1764 |
+
{
|
| 1765 |
+
"type": "text",
|
| 1766 |
+
"text": "Option E:",
|
| 1767 |
+
"bbox": [
|
| 1768 |
+
420,
|
| 1769 |
+
530,
|
| 1770 |
+
458,
|
| 1771 |
+
537
|
| 1772 |
+
],
|
| 1773 |
+
"page_idx": 13
|
| 1774 |
+
},
|
| 1775 |
+
{
|
| 1776 |
+
"type": "text",
|
| 1777 |
+
"text": "- Front: teal",
|
| 1778 |
+
"bbox": [
|
| 1779 |
+
420,
|
| 1780 |
+
538,
|
| 1781 |
+
495,
|
| 1782 |
+
545
|
| 1783 |
+
],
|
| 1784 |
+
"page_idx": 13
|
| 1785 |
+
},
|
| 1786 |
+
{
|
| 1787 |
+
"type": "text",
|
| 1788 |
+
"text": "-Top: yellow",
|
| 1789 |
+
"bbox": [
|
| 1790 |
+
420,
|
| 1791 |
+
546,
|
| 1792 |
+
470,
|
| 1793 |
+
552
|
| 1794 |
+
],
|
| 1795 |
+
"page_idx": 13
|
| 1796 |
+
},
|
| 1797 |
+
{
|
| 1798 |
+
"type": "text",
|
| 1799 |
+
"text": "-Right:teal",
|
| 1800 |
+
"bbox": [
|
| 1801 |
+
420,
|
| 1802 |
+
554,
|
| 1803 |
+
467,
|
| 1804 |
+
561
|
| 1805 |
+
],
|
| 1806 |
+
"page_idx": 13
|
| 1807 |
+
},
|
| 1808 |
+
{
|
| 1809 |
+
"type": "text",
|
| 1810 |
+
"text": "- Left: yellow",
|
| 1811 |
+
"bbox": [
|
| 1812 |
+
420,
|
| 1813 |
+
562,
|
| 1814 |
+
470,
|
| 1815 |
+
569
|
| 1816 |
+
],
|
| 1817 |
+
"page_idx": 13
|
| 1818 |
+
},
|
| 1819 |
+
{
|
| 1820 |
+
"type": "text",
|
| 1821 |
+
"text": "-Back:teal",
|
| 1822 |
+
"bbox": [
|
| 1823 |
+
420,
|
| 1824 |
+
571,
|
| 1825 |
+
467,
|
| 1826 |
+
577
|
| 1827 |
+
],
|
| 1828 |
+
"page_idx": 13
|
| 1829 |
+
},
|
| 1830 |
+
{
|
| 1831 |
+
"type": "text",
|
| 1832 |
+
"text": "- Bottom: yellow",
|
| 1833 |
+
"bbox": [
|
| 1834 |
+
420,
|
| 1835 |
+
579,
|
| 1836 |
+
487,
|
| 1837 |
+
586
|
| 1838 |
+
],
|
| 1839 |
+
"page_idx": 13
|
| 1840 |
+
},
|
| 1841 |
+
{
|
| 1842 |
+
"type": "text",
|
| 1843 |
+
"text": "Comparing to the net: F(t), T(y), R(t), L(y), B(y), D(t)",
|
| 1844 |
+
"bbox": [
|
| 1845 |
+
420,
|
| 1846 |
+
587,
|
| 1847 |
+
631,
|
| 1848 |
+
594
|
| 1849 |
+
],
|
| 1850 |
+
"page_idx": 13
|
| 1851 |
+
},
|
| 1852 |
+
{
|
| 1853 |
+
"type": "text",
|
| 1854 |
+
"text": "So, according to the net, B should be yellow, but in Option E, it's teal.",
|
| 1855 |
+
"bbox": [
|
| 1856 |
+
420,
|
| 1857 |
+
596,
|
| 1858 |
+
690,
|
| 1859 |
+
602
|
| 1860 |
+
],
|
| 1861 |
+
"page_idx": 13
|
| 1862 |
+
},
|
| 1863 |
+
{
|
| 1864 |
+
"type": "text",
|
| 1865 |
+
"text": "Therefore, Option E differs from the net in the back face color.",
|
| 1866 |
+
"bbox": [
|
| 1867 |
+
420,
|
| 1868 |
+
604,
|
| 1869 |
+
665,
|
| 1870 |
+
611
|
| 1871 |
+
],
|
| 1872 |
+
"page_idx": 13
|
| 1873 |
+
},
|
| 1874 |
+
{
|
| 1875 |
+
"type": "text",
|
| 1876 |
+
"text": "Hence, Option E is not identical to the unfolded net.",
|
| 1877 |
+
"bbox": [
|
| 1878 |
+
420,
|
| 1879 |
+
612,
|
| 1880 |
+
623,
|
| 1881 |
+
619
|
| 1882 |
+
],
|
| 1883 |
+
"page_idx": 13
|
| 1884 |
+
},
|
| 1885 |
+
{
|
| 1886 |
+
"type": "text",
|
| 1887 |
+
"text": "Therefore, the answer is E.",
|
| 1888 |
+
"bbox": [
|
| 1889 |
+
420,
|
| 1890 |
+
620,
|
| 1891 |
+
532,
|
| 1892 |
+
627
|
| 1893 |
+
],
|
| 1894 |
+
"page_idx": 13
|
| 1895 |
+
}
|
| 1896 |
+
]
|
data/2025/2503_13xxx/2503.13360/f41871e7-052a-41b7-a381-f27188074bdf_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13360/f41871e7-052a-41b7-a381-f27188074bdf_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3cba925af02389613036cfcbad07505bcc142814739a062826fa0391255b0aa8
|
| 3 |
+
size 2762614
|
data/2025/2503_13xxx/2503.13360/full.md
ADDED
|
@@ -0,0 +1,383 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Mitigating Visual Forgetting via Take-along Visual Conditioning for Multi-modal Long CoT Reasoning
|
| 2 |
+
|
| 3 |
+
Hai-Long Sun $^{1,2}$ , Zhun Sun $^{4,5}$ , Houwen Peng $^{3}$ , Han-Jia Ye $^{1,2}$ †
|
| 4 |
+
|
| 5 |
+
$^{1}$ School of Artificial Intelligence, Nanjing University
|
| 6 |
+
|
| 7 |
+
$^{2}$ National Key Laboratory for Novel Software Technology, Nanjing University
|
| 8 |
+
|
| 9 |
+
$^{3}$ Tencent $^{4}$ Center for Language AI Research, Tohoku University
|
| 10 |
+
|
| 11 |
+
$^{5}$ RIKEN Center for Advanced Intelligence Project
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
Recent advancements in Large Language Models (LLMs) have demonstrated enhanced reasoning capabilities, evolving from Chain-of-Thought (CoT) prompting to advanced, product-oriented solutions like OpenAI o1. During our re-implementation of this model, we noticed that in multimodal tasks requiring visual input (e.g., geometry problems), Multimodal LLMs (MLLMs) struggle to maintain focus on the visual information, in other words, MLLMs suffer from a gradual decline in attention to visual information as reasoning progresses, causing text-over-relied outputs. To investigate this, we ablate image inputs during long-chain reasoning. Concretely, we truncate the reasoning process midway, then re-complete the reasoning process with the input image removed. We observe only a $\sim 2\%$ accuracy drop on MathVista's test-hard subset, revealing the model's textual outputs dominate the following reasoning process. Motivated by this, we propose Take-along Visual Conditioning (TVC), a strategy that shifts image input to critical reasoning stages and compresses redundant visual tokens via dynamic pruning. This methodology helps the model retain attention to the visual components throughout the reasoning. Our approach achieves state-of-the-art performance on average across five mathematical reasoning benchmarks (+3.4 points vs previous sota), demonstrating the effectiveness of TVC in enhancing multimodal reasoning systems. The project page is available at https://sun-hailong.github.io/projects/TVC.
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
Large Language Models (LLMs) have achieved significant advancements in natural language processing (NLP), particularly in the area of reasoning. These models have evolved from simple
|
| 20 |
+
|
| 21 |
+
prompt-based Chain-of-Thought (CoT) (Wei et al., 2022) techniques to sophisticated product-oriented solutions like OpenAI's o1 (OpenAI, 2024b), DeepSeek-R1 (DeepSeek-AI, 2024), and Qwen-QVQ (QwenTeam, 2024), demonstrating iterative reasoning capabilities for complex multi-step tasks, which enables them to handle tasks that require multi-step thinking, logic, and knowledge integration. Recently, several works also extended the CoT reasoning process to MLLMs settings through data-centric innovations. For instance, Math-LLaVA (Shi et al., 2024) pioneers domain-specific training with the MathV360K dataset, while MAmmoTH-VL (Guo et al., 2024) scales up multimodal CoT data generation.
|
| 22 |
+
|
| 23 |
+
While such progress is notable in text-based domains, extending these advancements to multimodal scenarios presents unique challenges that transcend traditional language model boundaries. Reasoning in MLLMs requires fused understanding across different modalities, for example, in geometric reasoning tasks the model should interpret and reason about images alongside text. Therefore, the model's ability to integrate and maintain focus on both types of information is critical. Unlike text-only LLMs that reinforce problem context through linguistic recurrence, MLLMs struggle to sustain visual attention across reasoning steps. That is, as the length of the context increases, the model is more inclined to conduct the next step of reasoning based on the previously outputted text rather than the information of the image itself, which eventually leads to the continuation of the wrong text reasoning process and degraded model performance. We term this phenomenon as visual forgetting.
|
| 24 |
+
|
| 25 |
+
In this work, we conduct a diagnostic analysis of the visual forgetting effect within a long-chain reasoning system. The system processes multimodal Q&A tasks through a series of interconnected reasoning steps. We demonstrate significantly reduced attentional allocation to visual inputs during multi
|
| 26 |
+
|
| 27 |
+
stage reasoning processes. More formally, our analysis: 1) truncates the reasoning process midway and removes the image embeddings; 2) regenerates subsequent reasoning trajectories; 3) evaluates the reasoning outcomes of the pre/post-ablation inference trajectories. Intuitively, the performance gap between normal reasoning and diagnostic reasoning reveals the model's dependency on generated text over original visual evidence. Our results (See Section 2.1) on the MathVista-Hard datasets show that, removing the image midway through the reasoning only causes an insignificant performance degradation (i.e. $\sim 2\%$ ), indicating that the model completes the reasoning process based primarily on its output text. More importantly, we also observe that the model's dependency on the visual evidence diminishes over time since the early removal of the image inputs could hurt accuracy by $\sim 20\%$ . This suggests model's reasoning employs both visual and textual information in the early stage, then over-relying on text history which limits full visual reasoning potential.
|
| 28 |
+
|
| 29 |
+
Motivated by this, we propose a novel strategy to mitigate the visual forgetting effect and maintain visual attention throughout the reasoning process. Our methodology compresses and shifts the image input to the later stages of the reasoning process, ensuring the model integrates sufficient visual evidence into its reasoning. This approach results in improved performance and achieves state-of-the-art results on average across five mathematical reasoning benchmarks (i.e., $+3.4\%$ vs previous sota). Our findings highlight the effectiveness of this strategy in enhancing the performance of multimodal reasoning systems, providing a robust solution to the problem of visual forgetting in long-chain reasoning tasks.
|
| 30 |
+
|
| 31 |
+
# 2 Take-along Visual Conditioning: Sustaining Visual Evidence for Multi-modal Long CoT Reasoning
|
| 32 |
+
|
| 33 |
+
In this section, we first discuss our motivation and observations of the visual forgetting phenomenon in MLLM reasoning systems (Section 2.1). Then, we propose the Take-alone Visual Conditioning (TVC) approach to mitigate visual forgetting and enhance the model's long-chain reasoning capabilities (Section 2.2).
|
| 34 |
+
|
| 35 |
+
# 2.1 Capturing the Visual Forgetting
|
| 36 |
+
|
| 37 |
+
Text-based reasoning systems often repeat key terms or phrases (e.g., restating "triangle ABC" or
|
| 38 |
+
|
| 39 |
+

|
| 40 |
+
Figure 1: The visual forgetting phenomenon by removing the image at different reasoning stages. It shows that by the midpoint of the reasoning process, the model becomes less dependent on the image, causing text-over-relied outputs.
|
| 41 |
+
|
| 42 |
+
"equation (1)" to keep the problem's context clear and focused. By restating these critical details, they create strong connections between each step of the reasoning process and the original problem's requirements. This repetition ensures the logic stays on track, and consistent to follow.
|
| 43 |
+
|
| 44 |
+
$$
|
| 45 |
+
\mathcal {C} _ {\mathrm {M L L M}} = f \left(V, T _ {1}, \dots , T _ {n}\right) \tag {1}
|
| 46 |
+
$$
|
| 47 |
+
|
| 48 |
+
On the other hand, MLLMs struggle with this approach due to their design. As formalized in Equation (1), visual inputs $V$ are confined to the initial processing stages. Unlike text, these visual evidence aren't revisited or reinforced later in the reasoning process. Because there's no built-in way to keep visual information "active" or relevant throughout the task, the system's ability to connect visual details with text or logic weakens over time, leading to a progressive visual attention decay. The model is more likely to reason with previously outputted text and becomes particularly problematic in visual reasoning tasks that require continuous validation of spatial relationships.
|
| 49 |
+
|
| 50 |
+
We conduct two analytic analyses using the QVQ-72B-Preview model (QwenTeam, 2024) to capture this visual forgetting phenomenon quantitatively and qualitatively. On one hand, we remove the visual inputs at eight different stages to observe their impact. On the other hand, we depict the attention matrix to directly track the attention decay of the visual evidence over time.
|
| 51 |
+
|
| 52 |
+
Progressive Image Removing. To assess the extent to which the reasoning process depends on
|
| 53 |
+
|
| 54 |
+

|
| 55 |
+
|
| 56 |
+

|
| 57 |
+
Figure 2: Illustration of layer-level and token-level attention weights. (a) The layer-level attention weights of image tokens across different response token positions. (b) The token-level attention weights at the middle layer. It shows that the model's attention to the image gradually decreases during the reasoning process.
|
| 58 |
+
|
| 59 |
+
previously generated text, we first perform a normal reasoning process, then reset the KV cache at various stages of the reasoning process. This effectively removed image tokens and forced subsequent steps to rely solely on text-based information. Specifically, the reasoning process was divided into $K = 8$ intervals based on output token counts regardless of the length of the normal reasoning process, with visual input progressively masked by resetting the KV cache at different cutoff positions $k \in \{0,1,\dots ,K - 1\}$ . In other words, the first $k / 8$ part of the normal reasoning process is now employed as a prompt, and the model now re-complete the reasoning process without image inputs. Furthermore, we discovered that for some questions (30.9% of the MathVista dataset), the model could answer correctly using only the text-based prompt. Consequently, we excluded these questions and designated the remaining dataset as the MathVista-Hard dataset.
|
| 60 |
+
|
| 61 |
+
As illustrated in Figure 1, we observe that the performance is 40.9 at $k = 4$ and 43.1 for the normal reasoning. This minimal $2.2\%$ gap suggests that the model overly relies on text outputs during the later reasoning stages rather than on the image.
|
| 62 |
+
|
| 63 |
+
This indicates that once the model completes the half stages of reasoning, it becomes overly reliant on its own generated CoT. Over-reliance on this self-generated reasoning worsens the model's tendency to forget visual evidence over time, which can result in hallucinations during lengthy, multi-step reasoning tasks. This result also reveals a near-exponential forgetting effect in performance as $k$ increases, which we formalize as:
|
| 64 |
+
|
| 65 |
+
$$
|
| 66 |
+
\mathcal {R} (k) = \mathcal {R} _ {\text {f u l l}} - \Delta_ {\text {v i s u a l}} (k), \quad \Delta_ {\text {v i s u a l}} (k) \propto e ^ {- k}, \tag {2}
|
| 67 |
+
$$
|
| 68 |
+
|
| 69 |
+
where $\mathcal{R}_{\mathrm{full}}$ represents the full multimodal reasoning performance, and $\Delta_{\mathrm{visual}}(k)$ quantifies the performance degradation caused by visual masking at position $k$ .
|
| 70 |
+
|
| 71 |
+
Visual Attention Decay. To more intuitively observe the changes in visual feature attention across different stages of reasoning, we investigate the layer-level attention weights at various response token positions and the token-level attention weights at layer 16. Given that MLLM's attention weights reflect the focus on tokens and influence the decision-making process, we first analyze the attention weights at each layer of the MLLM. Specifically, for each layer, we calculate the proportion of attention weights on all image tokens.
|
| 72 |
+
|
| 73 |
+
As shown in Figure 2(a), we observe that at the $1/8$ position of the reasoning process, the model effectively focuses on the visual inputs. However, as reasoning progresses, despite fluctuations in attention to visual tokens across layers, the model's overall attention to visual evidence gradually decreases, leading to visual forgetting. Next, following the methodology of FastV (Chen et al., 2024a), we analyze the attention maps for several cases and find that the model predominantly focuses on previously generated text tokens rather than the input image. After approximately $20\%$ tokens, the existence of image inputs on attention maps diminishes significantly, as illustrated in Figure 2(b). Both of the observations indicate a phenomenon of visual memory degradation, revealing the model's limitations in maintaining consistent attention to visual inputs throughout the reasoning process.
|
| 74 |
+
|
| 75 |
+
# 2.2 Take-along Visual Conditioning
|
| 76 |
+
|
| 77 |
+
In this section, we introduce our solution to tackle this problem in detail. We propose Take-along Visual Conditioning (TVC), a dynamic image retention mechanism that re-introduces visual inputs at strategic intervals throughout the reasoning pro
|
| 78 |
+
|
| 79 |
+
cess. TVC mitigates visual attention decay by periodically reaffirming visual information, akin to human problem-solving behaviors where individuals frequently refer back to visual inputs. Our approach enhances the model's ability to incorporate visual information continuously, improving its long-chain reasoning capacity by ensuring that visual evidence is revisited during critical decision-making moments.
|
| 80 |
+
|
| 81 |
+
The TVC method consists of two key stages: training and testing. In the training stage, we introduce Dynamic Visual Reaffirmation (DVR), which guides the model through iterative reinforcement of visual evidence during long reasoning chains. In the testing phase, we present Periodic Visual Calibration (PVC), where visual reactivation is periodically triggered at self-reflection intervals. To prevent the model from forgetting previous text-based reasoning steps due to an excessive number of image tokens, we adopt image compression through adaptive pooling to reduce the image token size while preserving spatial semantics. This dual-modality engagement mechanism ensures consistent interaction between textual reasoning and visual evidence. We present an illustration of the TVC system in Figure 3.
|
| 82 |
+
|
| 83 |
+
Dynamic Visual Reaffirmation. Our dynamic visual reaffirmation training strategy combines two key components: 1) Data Curation: We curate long-chain reasoning data using the pipeline described in Section 3, sourced from high-quality academic datasets (e.g., MathV360K, Geo170K, and LLaVA-OneVision). This process yields a high-quality dataset optimized for long-chain reasoning training. 2) Visual Content Injection: While the curated data ensures correctness, the QVQ model inherently lacks the ability to iteratively reference visual inputs during reasoning. Therefore, we manually re-inject the visual content (visual embeddings and bridging prompt) to triggers visual re-activation at predefined self-reflection intervals. Specifically, given the initial multimodal input $\mathcal{M}_0 = (V,T_0)$ , DVR performs visual reactivation at self-reflection intervals $\{r_1,\dots,r_m\}$ :
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
\mathcal {M} _ {i} = \left(V, \left[ T _ {\text {p r e v}}; \underbrace {\text {P r o m p t}} _ {\text {R e - a c t i v a t i o n}}; T _ {\text {n e w}} \right]\right) \text {a t s t e p} r _ {i} \tag {3}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
where $T_{\mathrm{prev}}$ represents the previous reasoning steps and $T_{\mathrm{new}}$ denotes the new reasoning steps that are based on prior reasoning and reintroduce focus on
|
| 90 |
+
|
| 91 |
+

|
| 92 |
+
|
| 93 |
+

|
| 94 |
+
Figure 3: Overview of TVC System Design. We enable the model to have take-along visual conditioning capabilities through two stages: training and inference.
|
| 95 |
+
|
| 96 |
+
visual information. The bridging prompt is employed to hint the existence of the image, e.g., Let me see the image again. To improve efficiency, our initial implementation adopts midpoint reactivation $(m = 1, r_1 = 0.5L$ for $L$ -step chains).
|
| 97 |
+
|
| 98 |
+
During the self-reflection phase of the reasoning steps, we randomly select $m$ positions to inject visual content. Specifically, we inject reactivation prompts and regenerate visual captions using the model's intrinsic capabilities. This forces joint attention to both textual reasoning and visual evidence. By leveraging the model's intrinsic image captioning capability, we continuously reinforce visual information throughout the reasoning process. This ensures that the model incorporates image evidence during reflection, rather than relying solely on textual reasoning.
|
| 99 |
+
|
| 100 |
+
Periodic Visual Calibration. Calibrating visual attention plays a crucial role in enhancing long-chain reasoning capabilities. After training our model, we further design the periodic visual calibration process. Specifically, we coordinate operations during reactivation as follows: 1) Token Compression: We first compress visual tokens using average pooling to prevent text-based reasoning from forgetting visual information. 2) Visual Cache Reset: We then prepend an instruction (bridging prompt) to reintroduce the image and re-inject image tokens by resetting the KV cache of the generation process.
|
| 101 |
+
|
| 102 |
+
We also provide an example of how PVC is implemented in the case study section (Section 4.5). PVC both improves reasoning efficiency and prevents the model from forgetting previous reasoning steps due to an overload of visual tokens.
|
| 103 |
+
|
| 104 |
+

|
| 105 |
+
Figure 4: Data Generation Pipeline of TVC. We use iterative distillation to collect long-chain reasoning data, followed by a comprehensive response filtering process to ensure high-quality reasoning.
|
| 106 |
+
|
| 107 |
+
# 3 Data-Centric Implementation of Multimodal Reasoning System
|
| 108 |
+
|
| 109 |
+
In this section, we briefly describe our implementation of the multimodal reasoning system through a scalable curated data generation pipeline.
|
| 110 |
+
|
| 111 |
+
# 3.1 Long-Chain Reasoning Data Collection
|
| 112 |
+
|
| 113 |
+
Prior research (Qin et al., 2024; Jiang et al., 2024) has identified two dominant paradigms for constructing long-chain reasoning systems: (1) explicit search-based methods, which utilize structures such as Monte Carlo Tree Search (MCTS) combined with specially trained reward models to guide the search process toward optimal solutions, and (2) instruction distillation approaches, which fine-tune models on curated long chain-of-thought (CoT) datasets. To efficiently develop an MLLM with long-chain reasoning capabilities, we adopt the distillation paradigm. In this section, we describe the distillation process and present the data generation pipeline aimed at enhancing the reasoning capability of MLLM.
|
| 114 |
+
|
| 115 |
+
Our distillation pipeline begins with aggregating publicly available reasoning datasets (e.g., MathV360K (Shi et al., 2024) and Geo170K (Gao et al., 2023)). Through an iterative process of distillation and response filtering, we progressively enhance the model's long-chain reasoning capabilities. Specifically, the teacher model, QVQ-72B-Preview, generates long COT reasoning data, while the student model, Qwen2-VL, undergoes supervised fine-tuning on the filtered CoT data to enhance its reasoning performance.
|
| 116 |
+
|
| 117 |
+
# 3.2 Iterative Distillation with Reject Sampling
|
| 118 |
+
|
| 119 |
+
After obtaining the long-chain reasoning responses, we employ an assessment pipeline to ensure data quality. Specifically, we introduce a dual-temperature sampling mechanism to optimize data quality through variance exploitation.
|
| 120 |
+
|
| 121 |
+
Deterministic Initial Sampling. For the first-stage data generation, we employ temperature $\tau = 0$ to obtain the QVQ's most confident reasoning paths:
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
\mathcal {D} _ {\text {i n i t}} = \left\{\left(\mathbf {x}, \mathbf {y} _ {\mathrm {C o T}}\right) \sim \mathcal {P} _ {\mathrm {Q V Q}} (\cdot | \mathbf {x}; \tau = 0) \right\}, \tag {4}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
where $\mathbf{y}_{\mathrm{CoT}}$ represents the response generated by QVQ. This ensures that the model generates the most probable reasoning path for each input. Subsequently, we use these high-confidence responses as a foundation for further refinement in later stages of data generation.
|
| 128 |
+
|
| 129 |
+
Answer-Centric Reject Sampling. To ensure the quality of the data, we employ an LLM-as-a-Judge approach for answer-centric reject sampling. Specifically, we use a strong LLM (e.g., Qwen2.5-72B-Instruct) as the verifier and prompt the model to output a 'yes' or 'no' in JSON format, indicating whether the long-chain reasoning answer is correct, i.e., $\mathcal{V}_{\mathrm{valid}} = \{\mathbf{y}_{\mathrm{CoT}}|\mathrm{Match}(\mathbf{y}_{\mathrm{CoT}},\mathbf{y}_{\mathrm{gt}})\}$ . After this process, we obtain $\sim 200\mathrm{K}$ high-quality samples.
|
| 130 |
+
|
| 131 |
+
Best-of-N Error Correction. While initial sampling with temperature $\tau = 0$ yields a set of data, there are substantial residual errors $(\mathcal{D}_{\mathrm{error}})$ where QVQ's responses $(\mathbf{y}_{\mathrm{CoT}})$ unmatched with ground truth answers (55.8% of $\mathcal{D}_{\mathrm{init}}$ ). To fully leverage the available open-source data, we implement a contrastive regeneration strategy using $\tau = 1$ :
|
| 132 |
+
|
| 133 |
+
$$
|
| 134 |
+
\mathcal {D} _ {\text {c o r r e c t e d}} = \bigcup_ {\mathbf {x} \in \mathcal {D} _ {\text {e r r o r}}} \left\{\arg \min _ {\mathbf {y} ^ {i} \in \mathcal {Y} _ {\text {v a l i d}}} \operatorname {L e n g t h} \left(\mathbf {y} ^ {i}\right) \right\} _ {i = 1} ^ {8}, \tag {5}
|
| 135 |
+
$$
|
| 136 |
+
|
| 137 |
+
where $\mathcal{V}_{\mathrm{valid}}$ denotes the subset of 8 regenerated responses (at $\tau = 1$ ) that satisfy answer correctness. For cases with multiple valid responses, we prioritize minimal reasoning token length to enhance conciseness and efficiency:
|
| 138 |
+
|
| 139 |
+
$$
|
| 140 |
+
\operatorname {L e n g t h} (\mathbf {y}) = \sum_ {t = 1} ^ {T} \mathbb {I} \left(w _ {t} \notin \{[ \mathrm {P A D} ], [ \mathrm {S E P} ] \}\right) \tag {6}
|
| 141 |
+
$$
|
| 142 |
+
|
| 143 |
+
This two-stage sampling achieves partial error recovery while reducing the average token count to minimize meaningless self-reflection, effectively balancing correctness and efficiency. To further enhance the quality of the dataset, we also perform
|
| 144 |
+
|
| 145 |
+
dynamic token truncation and reflection word pruning. This approach helps reduce the ineffective reflection parts in the dataset, thereby mitigating the issue of the model being unable to complete its answers. More details are in Section B.
|
| 146 |
+
|
| 147 |
+
# 4 Experiments
|
| 148 |
+
|
| 149 |
+
We conduct comprehensive experiments across multiple vision-language benchmarks to demonstrate the effectiveness of our method. Section 4.1 provides implementation details for TVC. In Section 4.3, we present key results on visual reasoning tasks, supplemented with findings on general image understanding. Section 4.4 details ablation studies to evaluate the influence of critical design decisions. Finally, Section 4.5 presents visualizations and case studies to illustrate the method's operational characteristics and insights derived from it.
|
| 150 |
+
|
| 151 |
+
# 4.1 Training Recipe
|
| 152 |
+
|
| 153 |
+
We integrate the TVC system with MLLMs of varying scales to demonstrate the effectiveness and generalizability of our approach. Initial implementation with Qwen2-VL-7B-Instruct confirmed the method's validity. To further evaluate scalability and establish robust baselines against state-of-the-art MLLMs, we expanded the approach to a 72B model. Prior to training TVC, we follow the long-chain reasoning pipeline described earlier. We use the LLaMA-Factory (Zheng et al., 2024) framework, with a learning rate of 2e-5, a batch size of 256, and 5 training epochs. During optimization, only the LLM parameters and cross-modal connector are trained, while the visual encoder remains frozen. The training process requires 10 hours on a $64\times \mathrm{H}20$ GPU setup for the 7B model and approximately 4 days for the 72B model. Additional details are presented in Table 4.
|
| 154 |
+
|
| 155 |
+
# 4.2 Evaluation Setup
|
| 156 |
+
|
| 157 |
+
We conduct a comprehensive experimental analysis across various visual reasoning benchmarks that require advanced visual reasoning skills. To ensure a well-rounded evaluation, we select several widely recognized and representative benchmarks, including MathVista (Lu et al., 2024a), MathVerse (Zhang et al., 2024a), MathVision (Wang et al., 2024a), Dynamath (Zou et al., 2024), and OlympiadBench (He et al., 2024). MathVista consists of 6,141 examples that require fine-grained, deep visual understanding and compositional reasoning. MathVerse contains 2,612 multi-subject
|
| 158 |
+
|
| 159 |
+

|
| 160 |
+
Figure 5: Ablations on the amount of training data. TVC benefits from data scaling, continually improving the reasoning capabilities.
|
| 161 |
+
|
| 162 |
+
math problems from a variety of sources. MathVision includes 3,040 high-quality mathematical problems sourced from established mathematics competitions. OlympiadBench features 8,476 bilingual multimodal problems tailored to Olympic-level mathematics and physics competitions. These benchmarks evaluate problem-solving abilities in mathematics, and following standard practice, we use GPT-4o-mini as the evaluator. Following the VLMEvalKit guidelines, we exclude the text-only split from MathVerse and the theorem-proof sections from OlympiadBench. For a fair comparison, we conduct evaluations using the testmini sets of MathVerse and MathVista. Fast evaluation is made possible through the use of the VLMEvalKit (Duan et al., 2024) and vLLM (Kwon et al., 2023).
|
| 163 |
+
|
| 164 |
+
# 4.3 Main Results on Visual Reasoning
|
| 165 |
+
|
| 166 |
+
The results in Table 1 demonstrate the effectiveness and generalizability of TVC across multiple visual reasoning benchmarks. Notably, our model achieves a $16.7\%$ improvement over QVQ-72B-Preview on MathVision and a $17.6\%$ gain on MathVerse, highlighting enhanced reasoning capabilities. Unlike conventional datasets where textual descriptions may include implicit visual cues, MathVerse is an all-around visual math benchmark specifically designed for equitable, in-depth evaluation of MLLMs. The significant gains on MathVerse underscore the significance of TVC, given the benchmark's unique design principles. Furthermore, the TVC-7B model, despite its smaller size compared to counterparts, achieves competitive performance, even outperforming leading MLLMs in multiple cases. This demonstrates the robustness of our
|
| 167 |
+
|
| 168 |
+
Table 1: Results on Visual Reasoning Tasks. We conduct evaluation experiments across 6 benchmarks, covering both general reasoning and task-specific reasoning assessments. TVC exhibits notable effectiveness and generalizability when applied to Qwen2-VL, surpassing other state-of-the-art MLLMs by a large margin.
|
| 169 |
+
|
| 170 |
+
<table><tr><td>Model</td><td>Size</td><td>MathVista</td><td>MathVision</td><td>MathVerse</td><td>Dynamath</td><td>OlympiadBench</td><td>Average</td></tr><tr><td>MiniCPM-V-2.6 (Yadav et al., 2025)</td><td>8B</td><td>60.8</td><td>18.4</td><td>17.6</td><td>9.8</td><td>-</td><td>-</td></tr><tr><td>VITA-1.5 (Fu et al., 2025a)</td><td>8B</td><td>66.2</td><td>19.5</td><td>23.4</td><td>9.6</td><td>-</td><td>-</td></tr><tr><td>LLaVA-COT (Xu et al., 2024)</td><td>11B</td><td>52.5</td><td>19.9</td><td>22.6</td><td>7.8</td><td>-</td><td>-</td></tr><tr><td>Qwen2-VL (Wang et al., 2024b)</td><td>7B</td><td>60.9</td><td>16.3</td><td>24.6</td><td>11.0</td><td>3.2</td><td>23.2</td></tr><tr><td>InternVL2.5 (Chen et al., 2024b)</td><td>8B</td><td>64.5</td><td>17.0</td><td>22.8</td><td>9.4</td><td>0.1</td><td>22.8</td></tr><tr><td>POINTS1.5 (Liu et al., 2024b)</td><td>8B</td><td>66.4</td><td>22.0</td><td>26.6</td><td>14.2</td><td>-</td><td>-</td></tr><tr><td>Ovis1.6-Gemma2 (Lu et al., 2024b)</td><td>27B</td><td>70.2</td><td>20.6</td><td>37.8</td><td>17.0</td><td>-</td><td>-</td></tr><tr><td>InternVL2.5-COT (Chen et al., 2024b)</td><td>78B</td><td>71.4</td><td>32.5</td><td>40.1</td><td>28.5</td><td>-</td><td>-</td></tr><tr><td>LLaVA-OneVision (Li et al., 2024)</td><td>72B</td><td>67.1</td><td>25.3</td><td>27.2</td><td>15.6</td><td>-</td><td>-</td></tr><tr><td>Qwen2-VL (Wang et al., 2024b)</td><td>72B</td><td>69.7</td><td>26.6</td><td>36.2</td><td>20.0</td><td>10.3</td><td>32.6</td></tr><tr><td>QVQ-72B-preview (QwenTeam, 2024)</td><td>72B</td><td>71.4</td><td>35.9</td><td>41.5</td><td>30.7</td><td>20.4</td><td>40.0</td></tr><tr><td>TVC</td><td>7B</td><td>68.1</td><td>22.7</td><td>38.9</td><td>15.1</td><td>9.8</td><td>30.9</td></tr><tr><td>TVC</td><td>72B</td><td>72.2</td><td>41.9</td><td>48.8</td><td>30.0</td><td>24.3</td><td>43.4</td></tr></table>
|
| 171 |
+
|
| 172 |
+
Table 2: Ablations on the TVC System. TVC enhances reasoning capabilities, showing significant improvements on both general and task-specific reasoning benchmarks.
|
| 173 |
+
|
| 174 |
+
<table><tr><td>Method</td><td>MathVista</td><td>MathVision</td><td>MathVerse</td><td>Avg</td></tr><tr><td>Baseline</td><td>60.9</td><td>16.3</td><td>24.6</td><td>33.9</td></tr><tr><td>Vanilla - Direct SFT</td><td>63.5</td><td>19.8</td><td>31.6</td><td>38.3</td></tr><tr><td>TVC w/o PVC</td><td>66.7</td><td>21.8</td><td>35.6</td><td>41.4</td></tr><tr><td>TVC w/o DVR</td><td>66.2</td><td>22.3</td><td>34.7</td><td>41.0</td></tr><tr><td>TVC Full</td><td>68.1</td><td>22.7</td><td>38.9</td><td>43.2</td></tr></table>
|
| 175 |
+
|
| 176 |
+
methodology even with more compact model configurations. Beyond task-specific visual reasoning, we extend our evaluation to general reasoning benchmarks (e.g., MathVista), where TVC consistently delivers strong performance, achieving a $3.6\%$ improvement over the original Qwen2-VL72B model. These results emphasize TVC's ability to excel in tasks requiring both perception and reasoning. Collectively, the findings indicate that TVC not only advances performance in specialized visual reasoning tasks but also offers substantial benefits in broader application scenarios.
|
| 177 |
+
|
| 178 |
+
# 4.4 Further Analysis
|
| 179 |
+
|
| 180 |
+
In this section, we conduct comprehensive experiments to evaluate the design choices of TVC, emphasizing the key contributions of our approach. We also present a case study to further illustrate the qualitative effectiveness of TVC.
|
| 181 |
+
|
| 182 |
+
Effectiveness of TVC system. To evaluate the effectiveness of the TVC system, we conduct comprehensive ablation experiments on various components using the Qwen2-VL-7B as the Baseline. We begin by performing supervised fine-tuning on the
|
| 183 |
+
|
| 184 |
+
Table 3: Ablations on Token Compression.
|
| 185 |
+
|
| 186 |
+
<table><tr><td>Method</td><td>MathVista</td><td>MathVision</td><td>MathVerse</td><td>Avg</td></tr><tr><td>TVC Baseline</td><td>68.3</td><td>21.5</td><td>39.6</td><td>43.1</td></tr><tr><td>+ 2x2 Avg Pooling</td><td>67.8</td><td>22.9</td><td>38.3</td><td>43.0</td></tr><tr><td>+ 4x4 Avg Pooling</td><td>68.1</td><td>22.7</td><td>38.9</td><td>43.2</td></tr></table>
|
| 187 |
+
|
| 188 |
+
Qwen2-VL-7B model with the data from Section 3, referred to as Vanilla - Direct SFT. Furthermore, we apply the DVR training approach outlined in Section 2.2, which increases the focus on the visual information in the training data, enabling the model to implicitly learn visual conditioning capabilities. Additionally, during the testing phase, we experiment with resetting the visual KV cache midway through the reasoning process, and after compressing the visual tokens, we add them to the end of the reasoning steps. This strategy allows the model to further observe the image content during its thought process, mitigating the visual forgetting and suppressing hallucinations.
|
| 189 |
+
|
| 190 |
+
As demonstrated in Table 2, the results highlight that the TVC system is crucial for enhancing visual reasoning capabilities. When using only the SFT training data without the DVR strategy in TVC (Vanilla - Direct SFT), improvements in visual reasoning tasks are limited, as the model lacks the ability to reflect on visual information. Furthermore, models trained with the DVR strategy alone still produce sub-optimal results, underscoring the importance of a comprehensive approach that integrates PVC and DVR. The contributions of PVC and DVR are relatively balanced in enhancing the reasoning capabilities. To further validate the effectiveness of our visual token compression, we
|
| 191 |
+
|
| 192 |
+

|
| 193 |
+
Figure 6: Case Study of TVC. TVC effectively re-examines the image during the reflection process to correct mistakes, guiding the model to the correct answer.
|
| 194 |
+
|
| 195 |
+
conduct experiments with different pooling methods. As shown in Table 3, the TVC Baseline represents the method without image compression. We observe that the use of pooling methods has little impact on the model's capabilities. Utilizing $4 \times 4$ average pooling for compression not only enhances the model's inference efficiency but also achieves a slight performance improvement.
|
| 196 |
+
|
| 197 |
+
Data Scaling Law. To validate the scalability of our approach, we also conduct a data scaling law experiment for training. As shown in Figure 5, we compare models across various data size: 50K, 100K, 150K, and 200K samples, and present the performance relative to the base model (Qwen2-VL-7B). Our findings show that as the dataset size increases, the model's performance continues to improve. However, it is evident that with a small dataset, the model's reasoning ability cannot reach an optimal level solely through SFT. Therefore, data scaling proves highly effective during SFT training, and the results indicate that TVC benefits from increased data.
|
| 198 |
+
|
| 199 |
+
To prevent underfitting from becoming a performance bottleneck, we increase the number of training epochs with smaller datasets, which further improves model performance. This indicates that in
|
| 200 |
+
|
| 201 |
+
sufficient data or epochs lead to undertrained models, making it difficult to learn long-chain reasoning. Increasing both the data and training epochs can effectively alleviate this issue.
|
| 202 |
+
|
| 203 |
+
# 4.5 Case Study
|
| 204 |
+
|
| 205 |
+
We present a comprehensive case study in Figure 6 to illustrate the improvements of our TVC approach. Specifically, we provide an example that highlights advancements in the reasoning process. In this example, the model is tasked with carefully observing the objects within an image and, after eliminating certain shapes and attributes, providing the count of the remaining objects. During base CoT reasoning, the model fails to check the attributes of each object and only focuses on their shapes, leading to an incorrect final answer. Subsequently, the model learns to re-focus on the image and carefully describe the attributes of each object in detail. This process allows the model to identify the issue in its previous reasoning and provide the correct answer.
|
| 206 |
+
|
| 207 |
+
# 5 Conclusion
|
| 208 |
+
|
| 209 |
+
In this paper, we introduce Take-along Visual Conditioning (TVC), a novel strategy designed to en
|
| 210 |
+
|
| 211 |
+
hance the reasoning capabilities of MLLMs by addressing the challenge of visual attention degradation during long-chain reasoning. By dynamically shifting the image input to critical stages of reasoning and compressing redundant visual tokens, we ensure that the model maintains focus on the visual information throughout the process. Our extensive evaluation on several mathematical reasoning benchmarks demonstrates the effectiveness of TVC in improving multimodal reasoning, providing a robust approach to equip MLLMs with better visual grounding for complex tasks.
|
| 212 |
+
|
| 213 |
+
# Acknowledgments
|
| 214 |
+
|
| 215 |
+
This work is partially supported by National Key R&D Program of China (2024YFE0202800), NSFC (62376118), Key Program of Jiangsu Science Foundation (BK20243012), CCF-Tencent Rhino-Bird Open Research Fund (RAGR20240101), AMED (JP25wm0625405), and Collaborative Innovation Center of Novel Software Technology and Industrialization.
|
| 216 |
+
|
| 217 |
+
# Limitations
|
| 218 |
+
|
| 219 |
+
Despite advancements, our method may still exhibit several limitations. First, for highly complex reasoning tasks requiring sophisticated analytical capabilities, simply increasing visual revisits proves insufficient. In contrast, it is crucial to enhance the model's inherent reasoning capacity. Second, our method assumes the availability of delayed visual processing, making it potentially unsuitable for real-time applications requiring instantaneous visual feedback, such as robotic navigation or time-sensitive decision-making scenarios.
|
| 220 |
+
|
| 221 |
+
# References
|
| 222 |
+
|
| 223 |
+
Meta AI. 2024. Build the future of ai with meta llama 3. Technical report, Meta AI.
|
| 224 |
+
anthropic. 2024. Introducing the next generation of claude. Technical report, anthropic.
|
| 225 |
+
Jinze Bai, Shuai Bai, Yunfei Chu, Zeyu Cui, Kai Dang, Xiaodong Deng, Yang Fan, Wenbin Ge, Yu Han, Fei Huang, et al. 2023. Qwen technical report. arXiv:2309.16609.
|
| 226 |
+
Liang Chen, Haozhe Zhao, Tianyu Liu, Shuai Bai, Junyang Lin, Chang Zhou, and Baobao Chang. 2024a. An image is worth 1/2 tokens after layer 2: Plug-and-play inference acceleration for large vision-language models. Preprint, arXiv:2403.06764.
|
| 227 |
+
|
| 228 |
+
Zhe Chen, Weiyun Wang, Yue Cao, Yangzhou Liu, Zhangwei Gao, Erfei Cui, Jinguo Zhu, Shenglong Ye, Hao Tian, Zhaoyang Liu, et al. 2024b. Expanding performance boundaries of open-source multimodal models with model, data, and test-time scaling. arXiv preprint arXiv:2412.05271.
|
| 229 |
+
Zhe Chen, Jiannan Wu, Wenhai Wang, Weijie Su, Guo Chen, Sen Xing, Zhong Muyan, Qinglong Zhang, Xizhou Zhu, Lewei Lu, et al. 2023. Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks. arXiv:2312.14238.
|
| 230 |
+
DeepSeek-AI. 2024. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. Technical report, DeepSeek.
|
| 231 |
+
Yuhao Dong, Zuyan Liu, Hai-Long Sun, Jingkang Yang, Winston Hu, Yongming Rao, and Ziwei Liu. 2024. Insight-v: Exploring long-chain visual reasoning with multimodal large language models. arXiv preprint arXiv:2411.14432.
|
| 232 |
+
Haodong Duan, Junming Yang, Yuxuan Qiao, Xinyu Fang, Lin Chen, Yuan Liu, Xiaoyi Dong, Yuhang Zang, Pan Zhang, Jiaqi Wang, et al. 2024. Vlmealkit: An open-source toolkit for evaluating large multi-modality models. arXiv preprint arXiv:2407.11691.
|
| 233 |
+
Chaoyou Fu, Haojia Lin, Xiong Wang, Yi-Fan Zhang, Yunhang Shen, Xiaoyu Liu, Yangze Li, Zuwei Long, Heting Gao, Ke Li, et al. 2025a. Vita-1.5: Towards gpt-4o level real-time vision and speech interaction. arXiv preprint arXiv:2501.01957.
|
| 234 |
+
Jiale Fu, Yuchu Jiang, Junkai Chen, Jiaming Fan, Xin Geng, and Xu Yang. 2025b. Speculative ensemble: Fast large language model ensemble via speculation. arXiv preprint arXiv:2502.01662.
|
| 235 |
+
Jiahui Gao, Renjie Pi, Jipeng Zhang, Jiacheng Ye, Wanjun Zhong, Yufei Wang, Lanqing Hong, Jianhua Han, Hang Xu, Zhenguo Li, et al. 2023. G-llava: Solving geometric problem with multi-modal large language model. arXiv preprint arXiv:2312.11370.
|
| 236 |
+
Jarvis Guo, Tuney Zheng, Yuelin Bai, Bo Li, Yubo Wang, King Zhu, Yizhi Li, Graham Neubig, Wenhu Chen, and Xiang Yue. 2024. Mammoth-vl: Eliciting multimodal reasoning with instruction tuning at scale. arXiv preprint arXiv:2412.05237.
|
| 237 |
+
Chaoqun He, Renjie Luo, Yuzhuo Bai, Shengding Hu, Zhen Leng Thai, Junhao Shen, Jinyi Hu, Xu Han, Yujie Huang, Yuxiang Zhang, et al. 2024. Olympiad-bench: A challenging benchmark for promoting agi with olympiad-level bilingual multimodal scientific problems. arXiv preprint arXiv:2402.14008.
|
| 238 |
+
Yushi Hu, Weijia Shi, Xingyu Fu, Dan Roth, Mari Ostendorf, Luke Zettlemoyer, Noah A Smith, and Ranjay Krishna. 2024. Visual sketchpad: Sketching as a visual chain of thought for multimodal language models. arXiv preprint arXiv:2406.09403.
|
| 239 |
+
|
| 240 |
+
Jinhao Jiang, Zhipeng Chen, Yingqian Min, Jie Chen, Xiaoxue Cheng, Jiapeng Wang, Yiru Tang, Haoxiang Sun, Jia Deng, Wayne Xin Zhao, et al. 2024. Technical report: Enhancing llm reasoning with reward-guided tree search. arXiv preprint arXiv:2411.11694.
|
| 241 |
+
Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. 2023. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles.
|
| 242 |
+
Bo Li, Yuanhan Zhang, Dong Guo, Renrui Zhang, Feng Li, Hao Zhang, Kaichen Zhang, Peiyuan Zhang, Yanwei Li, Ziwei Liu, et al. 2024. Llavaonevision: Easy visual task transfer. arXiv preprint arXiv:2408.03326.
|
| 243 |
+
Chunyuan Li, Cliff Wong, Sheng Zhang, Naoto Usuyama, Haotian Liu, Jianwei Yang, Tristan Naumann, Hoifung Poon, and Jianfeng Gao. 2023. Llava-med: Training a large language-and-vision assistant for biomedicine in one day. arXiv:2306.00890.
|
| 244 |
+
Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. 2024a. Llavanext: Improved reasoning,OCR, and world knowledge.
|
| 245 |
+
Yuan Liu, Le Tian, Xiao Zhou, Xinyu Gao, Kavio Yu, Yang Yu, and Jie Zhou. 2024b. Points1. 5: Building a vision-language model towards real world applications. arXiv preprint arXiv:2412.08443.
|
| 246 |
+
Pan Lu, Hritik Bansal, Tony Xia, Jiacheng Liu, Chunyuan Li, Hannaneh Hajishirzi, Hao Cheng, Kai-Wei Chang, Michel Galley, and Jianfeng Gao. 2024a. Mathvista: Evaluating mathematical reasoning of foundation models in visual contexts. In ICLR.
|
| 247 |
+
Pan Lu, Baolin Peng, Hao Cheng, Michel Galley, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, and Jianfeng Gao. 2023. Chameleon: Plug-and-play compositional reasoning with large language models. Advances in Neural Information Processing Systems, 36:43447-43478.
|
| 248 |
+
Shiyin Lu, Yang Li, Qing-Guo Chen, Zhao Xu, Weihua Luo, Kaifu Zhang, and Han-Jia Ye. 2024b. Ovis: Structural embedding alignment for multimodal large language model. arXiv preprint arXiv:2405.20797.
|
| 249 |
+
Brandon McKinzie, Zhe Gan, Jean-Philippe Fauconnier, Sam Dodge, Bowen Zhang, Philipp Dufter, Dhruti Shah, Xianzhi Du, Futang Peng, Floris Weers, et al. 2024. Mm1: Methods, analysis & insights from multimodal llm pre-training. arXiv:2403.09611.
|
| 250 |
+
OpenAI. 2024a. Gpt-4o: Hello gpt-4o. Technical report, OpenAI.
|
| 251 |
+
OpenAI. 2024b. Learning to reason with llms. Technical report, OpenAI.
|
| 252 |
+
|
| 253 |
+
Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. 2025. Lmmr1: Empowering 3b lmms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536.
|
| 254 |
+
Yiwei Qin, Xuefeng Li, Haoyang Zou, Yixiu Liu, Shijie Xia, Zhen Huang, Yixin Ye, Weizhe Yuan, Hector Liu, Yuanzhi Li, et al. 2024. O1 replication journey: A strategic progress report-part 1. arXiv preprint arXiv:2410.18982.
|
| 255 |
+
QwenTeam. 2024. Qvq: To see the world with wisdom. Technical report, Alibaba.
|
| 256 |
+
Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. 2021. Learning transferable visual models from natural language supervision. In ICML.
|
| 257 |
+
Machel Reid, Nikolay Savinov, Denis Teptyashin, Dmitry Lepikhin, Timothy Lillicrap, Jean-baptiste Alayrac, Radu Soricut, Angeliki Lazaridou, Orhan Firat, Julian Schrittwieser, et al. 2024. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context. arXiv:2403.05530.
|
| 258 |
+
Wenhao Shi, Zhiqiang Hu, Yi Bin, Junhua Liu, Yang Yang, See-Kiong Ng, Lidong Bing, and Roy Ka-Wei Lee. 2024. Math-llava: Bootstrapping mathematical reasoning for multimodal large language models. arXiv preprint arXiv:2406.17294.
|
| 259 |
+
Hai-Long Sun, Da-Wei Zhou, Yang Li, Shiyin Lu, Chao Yi, Qing-Guo Chen, Zhao Xu, Weihua Luo, Kaifu Zhang, De-Chuan Zhan, et al. 2025a. Parrot: Multilingual visual instruction tuning. In ICML.
|
| 260 |
+
Hai-Long Sun, Da-Wei Zhou, De-Chuan Zhan, and Han-Jia Ye. 2025b. Pilot: A pre-trained model-based continual learning toolbox. SCIENCE CHINA Information Sciences, 68(4):147101.
|
| 261 |
+
Hai-Long Sun, Da-Wei Zhou, Hanbin Zhao, Le Gan, De-Chuan Zhan, and Han-Jia Ye. 2025c. Mos: Model surgery for pre-trained model-based class-incremental learning. In AAAI, volume 39, pages 20699-20707.
|
| 262 |
+
Gemini Team, Rohan Anil, Sebastian Borgeaud, Yonghui Wu, Jean-Baptiste Alayrac, Jiahui Yu, Radu Soricut, Johan Schalkwyk, Andrew M Dai, Anja Hauth, et al. 2023. Gemini: a family of highly capable multimodal models. arXiv preprint arXiv:2312.11805.
|
| 263 |
+
Shengbang Tong, Ellis Brown, Penghao Wu, Sanghyun Woo, Manoj Middepogu, Sai Charitha Akula, Jihan Yang, Shusheng Yang, Adithya Iyer, Xichen Pan, et al. 2024. Cambrian-1: A fully open, vision-centric exploration of multimodal llms. arXiv preprint arXiv:2406.16860.
|
| 264 |
+
|
| 265 |
+
Ke Wang, Junting Pan, Weikang Shi, Zimu Lu, Mingjie Zhan, and Hongsheng Li. 2024a. Measuring multimodal mathematical reasoning with math-vision dataset. Preprint, arXiv:2402.14804.
|
| 266 |
+
Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. 2024b. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191.
|
| 267 |
+
Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Ed Chi, Quoc Le, and Denny Zhou. 2022. Chain of thought prompting elicits reasoning in large language models. arXiv:2201.11903.
|
| 268 |
+
Guowei Xu, Peng Jin, Li Hao, Yibing Song, Lichao Sun, and Li Yuan. 2024. Llava-o1: Let vision language models reason step-by-step. arXiv preprint arXiv:2411.10440.
|
| 269 |
+
Ankit Yadav, Lingqiao Liu, and Yuankai Qi. 2025. Exploring primitive visual measurement understanding and the role of output format in learning in vision-language models. arXiv preprint arXiv:2501.15144.
|
| 270 |
+
Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. 2023. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11975-11986.
|
| 271 |
+
Renrui Zhang, Dongzhi Jiang, Yichi Zhang, Haokun Lin, Ziyu Guo, Pengshuo Qiu, Aojun Zhou, Pan Lu, Kai-Wei Chang, Peng Gao, et al. 2024a. Math-verse: Does your multi-modal llm truly see the diagrams in visual math problems? arXiv preprint arXiv:2403.14624.
|
| 272 |
+
Renrui Zhang, Xinyu Wei, Dongzhi Jiang, Ziyu Guo, Shicheng Li, Yichi Zhang, Chengzhuo Tong, Jiaming Liu, Aojun Zhou, Bin Wei, et al. 2024b. Mavis: Mathematical visual instruction tuning with an automatic data engine. arXiv preprint arXiv:2407.08739.
|
| 273 |
+
Yaowei Zheng, Richong Zhang, Junhao Zhang, Yanhan Ye, Zheyan Luo, Zhangchi Feng, and Yongqiang Ma. 2024. Llamafactory: Unified efficient fine-tuning of $100+$ language models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 3: System Demonstrations), Bangkok, Thailand. Association for Computational Linguistics.
|
| 274 |
+
Wenwen Zhuang, Xin Huang, Xiantao Zhang, and Jin Zeng. 2024. Math-puma: Progressive upward multimodal alignment to enhance mathematical reasoning. arXiv preprint arXiv:2408.08640.
|
| 275 |
+
Chengke Zou, Xingang Guo, Rui Yang, Junyu Zhang, Bin Hu, and Huan Zhang. 2024. Dynamath: A dynamic visual benchmark for evaluating mathematical reasoning robustness of vision language models. arXiv preprint arXiv:2411.00836.
|
| 276 |
+
|
| 277 |
+
# A Related Work
|
| 278 |
+
|
| 279 |
+
Multimodal Large Language Models. Multimodal Large Language Models (MLLMs) (Li et al., 2023; Liu et al., 2024a; Sun et al., 2025a; Wang et al., 2024b; Lu et al., 2024b; McKinzie et al., 2024; Sun et al., 2025c,b; Fu et al., 2025b; Dong et al., 2024) integrate vision encoders (Radford et al., 2021; Zhai et al., 2023) with LLMs (AI, 2024; Bai et al., 2023), endowing them with robust capabilities across a wide range of domains. These include general visual understanding(OpenAI, 2024a; Li et al., 2024), mathematical reasoning (Shi et al., 2024; Gao et al., 2023), and answering college-level questions (Chen et al., 2023), demonstrating their versatility in real-world tasks. The rapid advancements in open-source models have also spurred the development of proprietary models, such as GPT-4o (OpenAI, 2024a), Gemini (Team et al., 2023; Reid et al., 2024), Qwen2-VL-MAX (Wang et al., 2024b), and Claude3 (anthropic, 2024). These models have demonstrated remarkable performance in both evaluation benchmarks and practical applications, solidifying their position at the forefront of AI research and deployment.
|
| 280 |
+
|
| 281 |
+
Reasoning with MLLMs. Recent advancements in MLLMs have significantly enhanced performance in reasoning tasks across both text and multimodal scenarios (OpenAI, 2024b; DeepSeek-AI, 2024; QwenTeam, 2024; Peng et al., 2025). Current methods primarily rely on CoT (Wei et al., 2022) to train MLLMs for step-by-step reasoning. Data-driven approaches include MathLLaVA (Shi et al., 2024), which introduced the MathV360K dataset, and MAmmoTH-VL (Guo et al., 2024), which curates a large-scale multimodal CoT dataset in a scalable manner. Another line of research explores vision-text alignment. MAVIS (Zhang et al., 2024b) fine-tunes a math-specific vision encoder with curated caption data, while Math-PUMA (Zhuang et al., 2024) leverages the Kullback-Leibler (KL) divergence of next-token prediction distributions for modality alignment. In a different paradigm, MLLMs act as coordinators, utilizing external tools such as LLMs, web search engines, and computer programs for complex reasoning. Chameleon (Lu et al., 2023) orchestrates tool-call sequences, and Visual Sketchpad (Hu et al., 2024) enables models to generate visual sketches to aid reasoning.
|
| 282 |
+
|
| 283 |
+
# B More Details of Reasoning Dataset
|
| 284 |
+
|
| 285 |
+
In this section, we provide a detailed description of dynamic token truncation and reflection word pruning in the process of constructing the reasoning dataset. We also provide detailed information about the training data in Table 5.
|
| 286 |
+
|
| 287 |
+
# B.1 Dynamic Token Truncation
|
| 288 |
+
|
| 289 |
+
To further improve the dataset quality, we analyze the distribution of token lengths after the answer-centric rejection sampling. We find that many samples are close to the maximum token limit, and manual checks show that these long reasoning chains often have problems—such as logical errors, mistakes in multi-step calculations, and reliance on shortcuts that don't work in general cases (e.g., substituting specific values). Motivated by the correlation between extreme token lengths and compromised solution quality, we implement adaptive truncation thresholds to keep the answers within the 200-8000 token range. This dynamic filtering not only eliminates the invalid cases (overly verbose or terse responses) but also enhances the overall quality of the data. The final length distribution matches how human experts solve problems and keeps the important reasoning steps intact.
|
| 290 |
+
|
| 291 |
+
# B.2 Reflection Word Pruning
|
| 292 |
+
|
| 293 |
+
Our analysis reveals a critical flaw in distilled reasoning chains: excessive metacognitive loops caused by uncontrolled reflection markers (e.g., 'Alternatively,' 'Wait'), which led to performance degradation through infinite loops or ungrounded speculation. Term frequency analysis of reflection density shows a heavy-tailed distribution— $95\%$ of samples contained fewer than 10 reflection markers per chain, while $1\%$ exhibited over 50 markers, with this extreme group strongly correlating to hallucination rates. To address this, we introduce a reflection token quota system that automatically prunes samples exceeding 25 reflection markers while retaining core reasoning logic using semantic-aware span detection. As shown in Figure 4, this approach significantly reduced infinite-loop instances in validation tasks while improving answer accuracy. The refined reflection pattern mirrors expert human problem-solving strategies, wherein targeted self-correction enhances, rather than disrupts, the continuity of the reasoning process.
|
| 294 |
+
|
| 295 |
+

|
| 296 |
+
Figure 7: The token and reflection word distribution of the long-chain reasoning dataset.
|
| 297 |
+
|
| 298 |
+

|
| 299 |
+
|
| 300 |
+
Table 4: The detailed training hyperparameters.
|
| 301 |
+
|
| 302 |
+
<table><tr><td>Config</td><td>SFT</td></tr><tr><td>Deepspeed</td><td>Zero3</td></tr><tr><td>Epoch</td><td>5</td></tr><tr><td>Warmup Ratio</td><td>0.1</td></tr><tr><td>Max Grad Norm</td><td>1.0</td></tr><tr><td>Optimizer</td><td>AdamW</td></tr><tr><td>Learning rate</td><td>2e-5</td></tr><tr><td>Learning rate scheduler</td><td>Cosine</td></tr><tr><td>Text max length</td><td>8192</td></tr><tr><td>Batch size per GPU</td><td>1</td></tr><tr><td>Gradient Accumulation Steps</td><td>4</td></tr><tr><td>GPU</td><td>64×H20-96G</td></tr><tr><td>Precision</td><td>Bf16</td></tr></table>
|
| 303 |
+
|
| 304 |
+
# C Discussion
|
| 305 |
+
|
| 306 |
+
In this work, we investigate the phenomenon of visual information forgetting in MLLMs during long-chain reasoning. Through comprehensive analysis and experiments, we show that as reasoning chains progressively lengthen, models exhibit a gradual deterioration in retaining visual inputs, ultimately undermining their multimodal reasoning capabilities and exacerbating hallucination issues. To address this challenge, we propose a take-along visual conditioning mechanism that enables models to dynamically revisit visual inputs during reasoning steps, thereby enhancing content fidelity throughout the inference process.
|
| 307 |
+
|
| 308 |
+
However, as illustrated in Figure 8, we acknowledge several limitations. First, for highly complex reasoning tasks requiring sophisticated analytical capabilities, simply increasing visual revisits proves insufficient. In contrast, it is crucial to enhance the model's inherent reasoning capacity. Second, our method assumes the availability of delayed visual processing, making it potentially unsuitable for real-time applications requiring instantaneous visual feedback, such as robotic navigation or time-sensitive decision-making scenarios.
|
| 309 |
+
|
| 310 |
+
Table 5: Details on the TVC's training data, which is derived from publicly available datasets.
|
| 311 |
+
|
| 312 |
+
<table><tr><td>Datasets</td><td>Samples</td></tr><tr><td>MathV360K (Shi et al., 2024)</td><td>221K</td></tr><tr><td>Geo170K (Gao et al., 2023)</td><td>22K</td></tr><tr><td>LLaVA-OneVision (Li et al., 2024)</td><td>97K</td></tr><tr><td>Cambrian-1 (Tong et al., 2024)</td><td>1K</td></tr></table>
|
| 313 |
+
|
| 314 |
+
Our work represents an initial exploration into mitigating visual forgetting in extended multimodal reasoning chains. We envision future research directions including: (1) Developing hybrid architectures that synergistically enhance both visual retention and intrinsic reasoning capabilities; (2) Investigating adaptive attention mechanisms for real-time multimodal applications; (3) Exploring curriculum learning strategies to progressively strengthen long-chain reasoning capacities. We hope this foundational study will inspire further advances in understanding and improving multimodal reasoning systems for complex real-world applications.
|
| 315 |
+
|
| 316 |
+

|
| 317 |
+
|
| 318 |
+

|
| 319 |
+
Figure 8: Qualitative Results of TVC.
|
| 320 |
+
|
| 321 |
+

|
| 322 |
+
|
| 323 |
+
Which of the cubes is not identical to the unfolded net?
|
| 324 |
+
|
| 325 |
+
Choices: (A) A (B) B (C) C (D) D (E) E
|
| 326 |
+
|
| 327 |
+
Let's see here. I've got this problem where there's an unfolded net of a cube, and then there are several cubes shown below with letters A through E. The question is asking which one of these cubes is not identical to the unfolded net. Okay, so I need to compare each cube to the net and see if they match or not.
|
| 328 |
+
|
| 329 |
+
Let me label the squares for clarity:
|
| 330 |
+
|
| 331 |
+
2. Back face (B)
|
| 332 |
+
|
| 333 |
+
3. Top face (T)
|
| 334 |
+
|
| 335 |
+
4. Bottom face (D)
|
| 336 |
+
|
| 337 |
+
5. Left face (L)
|
| 338 |
+
|
| 339 |
+
6. Right face (R)
|
| 340 |
+
|
| 341 |
+
Let's look at each cube option:
|
| 342 |
+
|
| 343 |
+
A) This is a cube with letters on its faces. Wait, no, it's a cube with colors, similar to the others. Wait, actually, all cubes have combinations of teal and yellow faces.
|
| 344 |
+
|
| 345 |
+
I need to carefully observe the color placement on each cube.
|
| 346 |
+
|
| 347 |
+
Option A
|
| 348 |
+
|
| 349 |
+
- Front face: teal
|
| 350 |
+
|
| 351 |
+
-Top face: yellow
|
| 352 |
+
|
| 353 |
+
-Right face:teal
|
| 354 |
+
|
| 355 |
+
- Left face: yellow
|
| 356 |
+
|
| 357 |
+
-Back face:teal
|
| 358 |
+
|
| 359 |
+
- Bottom face: yellow
|
| 360 |
+
|
| 361 |
+
Option E:
|
| 362 |
+
|
| 363 |
+
- Front: teal
|
| 364 |
+
|
| 365 |
+
-Top: yellow
|
| 366 |
+
|
| 367 |
+
-Right:teal
|
| 368 |
+
|
| 369 |
+
- Left: yellow
|
| 370 |
+
|
| 371 |
+
-Back:teal
|
| 372 |
+
|
| 373 |
+
- Bottom: yellow
|
| 374 |
+
|
| 375 |
+
Comparing to the net: F(t), T(y), R(t), L(y), B(y), D(t)
|
| 376 |
+
|
| 377 |
+
So, according to the net, B should be yellow, but in Option E, it's teal.
|
| 378 |
+
|
| 379 |
+
Therefore, Option E differs from the net in the back face color.
|
| 380 |
+
|
| 381 |
+
Hence, Option E is not identical to the unfolded net.
|
| 382 |
+
|
| 383 |
+
Therefore, the answer is E.
|
data/2025/2503_13xxx/2503.13360/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:63e0e9e97292e1d1dd24b0c5678eb16c5663af837eb3eab8d5085e071fb1eb6b
|
| 3 |
+
size 535793
|
data/2025/2503_13xxx/2503.13360/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13377/48689f30-a9be-48a0-a682-282cc19b9dc2_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13377/48689f30-a9be-48a0-a682-282cc19b9dc2_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13377/48689f30-a9be-48a0-a682-282cc19b9dc2_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:530716976b130bbdd34ea7da590f9663a1e77405bbcc8a6eeccaf58d2de08fea
|
| 3 |
+
size 4081235
|
data/2025/2503_13xxx/2503.13377/full.md
ADDED
|
@@ -0,0 +1,716 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Time-R1: Post-Training Large Vision Language Model for Temporal Video Grounding
|
| 2 |
+
|
| 3 |
+
Ye Wang $^{1*}$ Ziheng Wang $^{1*}$ Boshen Xu $^{1*‡}$ Yang Du $^{1}$ Kejun Lin $^{1}$ Zihan Xiao $^{3}$
|
| 4 |
+
Zihao Yue $^{1}$ Jianzhong Ju $^{2}$ Liang Zhang $^{1}$ Dingyi Yang $^{1}$ Xiangnan Fang $^{1}$ Zewen He $^{2}$
|
| 5 |
+
Zhenbo Luo $^{2}$ Wenxuan Wang $^{1}$ Junqi Lin $^{2}$ Jian Luan $^{2}$ Qin Jin $^{1†}$
|
| 6 |
+
|
| 7 |
+
$^{1}$ AIM3 Lab, Renmin University of China $^{2}$ MiLM Plus, Xiaomi Inc. $^{3}$ Independent Researcher Project Page: https://xuboshen.github.io/Time-R1/
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Temporal Video Grounding (TVG), the task of locating specific video segments based on language queries, is a core challenge in long-form video understanding. While recent Large Vision-Language Models (LVLMs) have shown early promise in tackling TVG through supervised fine-tuning (SFT), their abilities to generalize remain limited. To address this, we propose a novel post-training framework that enhances the generalization capabilities of LVLMs via reinforcement learning (RL). Specifically, our contributions span three key directions: (1) Time-R1: we introduce a reasoning-guided post-training framework via RL with verifiable reward to enhance the capabilities of LVLMs on the TVG task. (2) TimeRFT: we explore data-efficient post-training strategies on our curated RL-friendly dataset, which trains the model to progressively comprehend difficult samples, leading to better generalization. (3) TVGBench: we carefully construct a small yet comprehensive benchmark for LVLM evaluation, assessing 11 types of queries and featuring balanced distributions across both videos and queries. Extensive experiments demonstrate that Time-R1 achieves state-of-the-art performance across multiple downstream datasets using only 2.5K training data, while improving its general video understanding capabilities.
|
| 12 |
+
|
| 13 |
+
# 1 Introduction
|
| 14 |
+
|
| 15 |
+
Understanding long-form videos has long been a core ambition in computer vision [14, 24, 9]. A critical step toward this goal is Temporal Video Grounding (TVG) [15, 61], which enables models to localize video segments corresponding to natural language queries (e.g., "Find the segment where a person walks into the living room"). Such capability is fundamental for real-world applications, including smart home assistants [55, 16, 46] and video retrieval systems on online platforms [5, 3].
|
| 16 |
+
|
| 17 |
+
Traditional TVG approaches adopt a feature-based paradigm, where pretrained models (e.g., CLIP [42], I3D [6]) extract text and video features, followed by task-specific grounding models [30, 22, 29]. However, these methods suffer from error accumulation due to imperfect pretrained features. To overcome these limitations, recent efforts have shifted toward end-to-end Large Vision-Language Models (LVLMs) [44, 60], which directly process long-form videos and text queries. Despite being pretrained on datasets $100 \times$ larger than domain-specific benchmarks [46], LVLMs (with $7\mathrm{B}+$ parameters) often underperform compared to much smaller feature-based models (e.g.,
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
TimeRFT Training training strategy, dataset
|
| 21 |
+
Time-R1 Framework data-efficient, effective, R1-like
|
| 22 |
+
TVGBench Evaluation
|
| 23 |
+
ata, 11 types, balanced distributions
|
| 24 |
+
|
| 25 |
+

|
| 26 |
+
Improved Temporal Video Grounding (video length: 148s)
|
| 27 |
+
|
| 28 |
+
Question: Where is the event "He took out cutting board".
|
| 29 |
+

|
| 30 |
+
Time-R1 thinking (ours): The event "He took out cutting board" occurs between 13.40 and 18.20 seconds. This is when the person is seen placing the cutting board on the counter.
|
| 31 |
+
|
| 32 |
+

|
| 33 |
+
Improved Short Video QA (video length: 25s)
|
| 34 |
+
|
| 35 |
+
(A) Y
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
Question: What letter is likely to be shown next?
|
| 39 |
+
|
| 40 |
+

|
| 41 |
+
|
| 42 |
+
# Base model thinking:
|
| 43 |
+
|
| 44 |
+
... Given this sequence, the next logical letter would be the next letter in the alphabet after 'F', which is 'G'. The letters 'D', 'E', 'F', and 'G' are now on the table, forming the sequence 'DEFG'. Therefore, the next letter to be shown is likely to be 'G'.
|
| 45 |
+
|
| 46 |
+
# Time-R1 thinking (ours):
|
| 47 |
+
|
| 48 |
+
The sequence of letters shown in the video is "D", "E", "F", and "G". This suggests a pattern of showing the next letter in the alphabet. The letters are being placed in a specific order, and the next logical letter after "G" would be "H".
|
| 49 |
+
|
| 50 |
+

|
| 51 |
+
Improved Long Video QA (video length: 59m)
|
| 52 |
+
Figure 1: Our contributions include a novel post-training framework for LVLMs via reinforcement learning, Time-R1; an RL fine-tuning strategy along with its associated training dataset, TimeRFT; and a new benchmark, TVGBench, for evaluating LVLMs on the TVG task. Our Time-R1 model not only achieves SoTA TVG performance, but also enhances performance on both short- and long-form multi-choice video question answering tasks.
|
| 53 |
+
|
| 54 |
+
Question: What do the expanding red lines on the map in the first few minutes of the video stand for?
|
| 55 |
+
|
| 56 |
+
(A) The Yellow River (C) Du Fu's route to Xi'an
|
| 57 |
+
|
| 58 |
+
(B) The Silk Road
|
| 59 |
+
(D) The Yangtze River
|
| 60 |
+
|
| 61 |
+
# Base model thinking:
|
| 62 |
+
|
| 63 |
+
... The map is used to illustrate the historical journey of Du Fu, a famous Chinese poet, as indicated by the text "Du Fu's route to Xi'an" and the historical context provided by the narrator. Given this analysis, the expanding red lines on the map in the first few minutes of the video stand for Du Fu's route to Xi'an.... Time-R1 thinking (ours):
|
| 64 |
+
|
| 65 |
+
... 2. The map is shown with red lines expanding from a central point, which is likely Xi'an, a major city on the Silk Road. 3. The map is labeled with "Silk Road" and "The Great Silk Road," confirming that the red lines represent the Silk Road. ...
|
| 66 |
+
|
| 67 |
+
9M parameters [22]). This raises a critical question: Why do LVLMs, despite their vast pretrained knowledge, fail to excel on TVG?
|
| 68 |
+
|
| 69 |
+
We attribute the suboptimal performance of LVLMs to over-penalization of false negatives during supervised fine-tuning (SFT). For instance, when the ground truth timestamp is [2s, 4s], even when the model makes a reasonable prediction of timestamp [1.9s, 3.9s], the autoregressive loss would still be undesirably high. Such disproportionate penalties on reasonable predictions result in overfitting and poor generalization. While previous solutions have attempted to address this by introducing new timestamp tokens into the vocabulary [18, 17, 53] or by appending a regression head to predict timestamps [65], they often sacrifice the pretrained language capabilities of LLMs.
|
| 70 |
+
|
| 71 |
+
Inspired by recent success in reinforcement learning (RL) for post-training LLMs [39, 1] with chain-of-thought (CoT) prompting, especially in domains with deterministic answers, such as code generation and mathematical reasoning, we explore whether RL can serve as an effective alternative for TVG. Unlike SFT, RL allows direct optimization of task-specific metrics (e.g., IoU), thereby reducing rigid penalties of autoregressive losses and encouraging plausible timestamp predictions. In this work, we present an RL-based framework, Time-R1, that effectively post-trains LVLMs for TVG and pushes the performance frontier. Our contributions include:
|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
(a) VLP
|
| 75 |
+
|
| 76 |
+

|
| 77 |
+
(b) SFT
|
| 78 |
+
|
| 79 |
+

|
| 80 |
+
(c) RL (ours)
|
| 81 |
+
Figure 2: Comparison of different approaches for the TVG task, including feature-based video-language pretraining (VLP) [37, 22], supervised fine-tuning (SFT) [41, 60], and RL (ours).
|
| 82 |
+
|
| 83 |
+
- RL-based framework for temporal video grounding. We introduce Time-R1, a reasoning-enhanced post-training framework via RL with verifiable rewards, where the LVLM first generates chain-of-thought descriptions and then predicts timestamps. The post-training process is optimized using Generalized Reinforcement Policy Optimization (GRPO) with a novel reward function, incorporating both a structured template reward and a timestamp-aware tIoU reward.
|
| 84 |
+
- Time-aware reinforcement fine-tuning. We propose TimeRFT, a reinforcement fine-tuning strategy with dynamic hard sampling, which mines hard samples on a curated dataset and progressively selects low-IoU samples for multi-epoch training. To ensure stable reasoning and reduce hallucinations, we adopt a cold-start approach to generate CoT with video captions. To support RL-friendly training, we curate an RFT dataset with difficulty annotations on the TVG task.
|
| 85 |
+
- Comprehensive benchmark for LVLMs on TVG. Existing TVG benchmarks are designed for the large-scale evaluation of small models. Considering the inference speed bottlenecks and general-purpose role of LVLMs, we construct TVGBench, a compact yet comprehensive benchmark for TVG. We carefully balance the video distribution, query distribution, and design specific query semantics to ensure that the benchmark is well-suited for evaluating LVLMs.
|
| 86 |
+
- State-of-the-Art results and generalization. Compared with 7B LVLMs on the temporal video grounding task, our method outperforms all prior SFT-based methods with only 2.5K training data. After fine-tuning on downstream benchmarks like Charades [46] and ActivityNet [5], it surpasses many previous feature-based approaches. Furthermore, Time-R1 also improves the general video understanding on video QA benchmarks like MVBench [26] and VideoMME [13].
|
| 87 |
+
|
| 88 |
+
# 2 Related Works
|
| 89 |
+
|
| 90 |
+
Temporal video grounding. The TVG task [15, 3] aims to localize temporal segments in untrimmed long videos given natural language queries. Previous works can be broadly categorized into feature-based video-language pretraining and frame-based LVLM methods, as shown in Figure 2. Feature-based methods first extract video and language features using pre-trained encoders (e.g., I3D [6], EgoVLP [29], CLIP [42], BERT [12], etc.), then build timestamp prediction modules based on multimodal fused features. Existing methods differ mainly in their design of the multimodal fusion module and timestamp prediction module. For example, SnAG [37] adopts a late fusion strategy and regresses timestamps directly in a single forward pass without proposal generation. While effective, these approaches are fundamentally limited by the quality of the pretrained features. Recent efforts have shifted toward end-to-end frame-based methods by fine-tuning LVLMs using SFT with autoregressive losses [27, 44, 60, 19, 52, 18, 25]. For instance, TRACE [18] treats each event as a combination of timestamp, saliency score, and caption, and fine-tunes the LVLM to generate event sequences autoregressively. However, such methods still underperform on even simple TVG benchmarks like Charades [46], often lagging behind feature-based approaches. In this work, we propose a novel RL-based post-training framework that establishes new state-of-the-art performance for LVLMs on TVG tasks, even surpassing many feature-based methods.
|
| 91 |
+
|
| 92 |
+
RL in LLMs and LVLMs. RL is a foundational machine learning paradigm applied in diverse domains such as game playing [47], robotics [34], and increasingly, language and vision-language
|
| 93 |
+
|
| 94 |
+
models [39]. RL updates models by interacting with environments and maximizing reward signals. In recent years, RL has profoundly affected the field of LLM and LVLM post-training, which falls into two main categories: Reinforcement Learning with Human Feedback (RLHF) [40, 57] and Reinforcement Learning with Verifiable Reward (RLVR) [1, 39, 7]. Early works find that RLHF can align LLM to generate human preference data, which primarily reduces the safety risks in LLM and hallucination problems in LVLM. For example, RLHF-V [57] collects fine-grained pairs of incorrect and corrected captions and leverages direct preference optimization to optimize the model to generate correct captions, thus mitigating hallucinations. More recent works have explored RLVR in tasks with deterministic answers, which not only benefits mathematical problem solving and code generation in LLMs (e.g., DeepSeek-R1 [1]), but also enhances the generalization of LVLMs across a range of applications, such as visual grounding [32] and visual reasoning [48]. As a pioneer among open-source LLMs, DeepSeek-R1 [1] adopts GRPO to enhance reasoning capabilities by designing rule-based rewards that incorporate both reasoning templates and final answers. In the context of LVLMs, MM-Eureka [35] investigates multimodal image reasoning using GRPO, introducing an online filtering mechanism and a two-stage training strategy to stabilize the optimization process. However, existing approaches remain confined to language [1, 56], image understanding [7, 48, 32, 35], and short video understanding [64, 28]. It remains unclear whether and how reinforcement learning impacts long-form video understanding. To bridge this gap, we take a first step by introducing RLVR into LVLMs for the temporal video grounding task.
|
| 95 |
+
|
| 96 |
+
# 3 Method
|
| 97 |
+
|
| 98 |
+
The TVG task aims to temporally localize video segments within long-form videos based on natural language queries. Given a video of duration $t$ seconds, which is represented as a sequence of $T$ frames $\{x_{1},\ldots ,x_{T}\}$ , and a language query $q$ , the goal is to identify the temporal boundaries $[t_s,t_e]$ of the segment that best corresponds to $q$ , where $t_s,t_e\in \mathbb{R}^+$ . In this work, we introduce Time-R1, a framework designed to unleash the potential of LVLMs for the TVG task using RL. We first provide background on RL-based training for LLMs in Section 3.1, then detail the training procedure of TimeR1 in Section 3.2. Next, we describe specific training techniques used in TimeRFT in Section 3.3, and finally, we present the construction of our evaluation benchmark TVGBench in Section 3.4.
|
| 99 |
+
|
| 100 |
+
# 3.1 Background of GRPO: RL for LLM
|
| 101 |
+
|
| 102 |
+
As a pioneer among open-sourced R1-style LLMs, Deepseek-R1 [1] leverages GRPO to train the policy model $\pi_{\theta}$ (i.e., the LLM) to think before answering, making it particularly well-suited for tasks with well-defined answers, such as mathematical reasoning. In the GRPO framework, given an input question $p$ , the LLM samples $G$ candidate responses $o = \{o_1,\dots,o_G\}$ , and a reward function $r(\cdot)$ assigns a reward score to each response, yielding $\{r(o_1),\ldots ,r(o_G)\}$ . GRPO encourages the LLM to generate responses that maximize a weighted sum reward $R(o)$ , defined by:
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
R (o) = \sum_ {i = 1} ^ {G} \frac {\pi_ {\theta} \left(o _ {i}\right)}{\pi_ {\theta_ {\mathrm {o l d}}} \left(o _ {i}\right)} \cdot \frac {r \left(o _ {i}\right) - \operatorname {m e a n} \left(\left\{r \left(o _ {i}\right) \right\} _ {i = 1} ^ {G}\right)}{\operatorname {s t d} \left(\left\{r \left(o _ {i}\right) \right\} _ {i = 1} ^ {G}\right)} \tag {1}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
where $\pi_{\theta}(o)$ denotes the probability of LLM generating the response $o$ , and $\pi_{\theta_{\mathrm{old}}}$ represents the LLM parameters from a recently optimized state. To ensure training stability and avoid large deviations from the original language model behavior, the final training objective incorporates a KL-divergence regularization term [1], penalizing divergence between $\pi_{\theta}$ and $\pi_{\mathrm{ref}}$ :
|
| 109 |
+
|
| 110 |
+
$$
|
| 111 |
+
\max _ {\pi_ {\theta}} \mathbb {E} _ {o \sim \pi_ {\theta_ {\mathrm {o l d}}} (p)} [ R (o) - \beta \mathrm {D} _ {\mathrm {K L}} (\pi_ {\theta} \| \pi_ {\mathrm {r e f}}) ] \tag {2}
|
| 112 |
+
$$
|
| 113 |
+
|
| 114 |
+
where $\beta$ is a scaling coefficient. We omit the clipping operation for simplicity.
|
| 115 |
+
|
| 116 |
+
# 3.2 Time-R1: RL for Temporal Video Grounding
|
| 117 |
+
|
| 118 |
+
Since the TVG task has defined answers and well-established evaluation metrics, RL can optimize LVLMs for task-specific performance through tailored reward design. To enhance interpretability and align with human-like reasoning, we additionally incorporate an explicit "thinking process" prior to timestamp prediction. This process encourages the model to produce contextualized video descriptions that support its final decision. We detail our reward modeling and training process below.
|
| 119 |
+
|
| 120 |
+
Reward modeling. The reward $r_i$ plays a crucial role in guiding the model's learning objective. To encourage effective temporal grounding with an explicit reasoning process, we design a composite reward function comprising two components: the timestamp-aware Intersection over Union (IoU) $r_{\mathrm{tIoU}}$ and the reasoning template reward $r_{\mathrm{form}}$ .
|
| 121 |
+
|
| 122 |
+
- Timestamp-aware IoU reward $r_{\mathrm{tIoU}}(\cdot)$ . The TVG task primarily uses IoU [58] to evaluate the quality of predicted segments against the ground-truth $[t_s', t_e']$ , computed as:
|
| 123 |
+
|
| 124 |
+
$$
|
| 125 |
+
\mathrm {I o U} = \frac {\left[ t _ {s} , t _ {e} \right] \cap \left[ t _ {s} ^ {\prime} , t _ {e} ^ {\prime} \right]}{\left[ t _ {s} , t _ {e} \right] \cup \left[ t _ {s} ^ {\prime} , t _ {e} ^ {\prime} \right]} \tag {3}
|
| 126 |
+
$$
|
| 127 |
+
|
| 128 |
+
where $A \cap B$ and $A \cup B$ denote the union and intersection between sets A and B, respectively. Optimizing for the IoU inherently encourages the LVLM to produce predictions that fall within a permissible range of variation $\epsilon$ , such that $t_{s \text{ or } e}^{\prime} - \epsilon \leq t_{s \text{ or } e} \leq t_{s \text{ or } e}^{\prime} + \epsilon$ still yields high IoUs. This encourages the LVLM to focus more on the semantic understanding of the event within possible temporal boundaries, rather than rigidly requiring exact temporal alignment like SFT. However, standard IoU may fail to accurately reflect the quality of temporal alignment in certain scenarios. For example, when the ground truth span is [0, 30] (i.e., the full video), any prediction covering more than $30\%$ of the video would result in an IoU greater than 0.3. A prediction like [10, 25] would yield an IoU of 0.5, which overestimates its quality despite incorrect timestamps. To address this issue, we introduce the timestamp-aware IoU (tIoU) as a corrective measure. tIoU augments the standard IoU with penalties on timestamp deviations, defined as:
|
| 129 |
+
|
| 130 |
+
$$
|
| 131 |
+
r _ {\mathrm {t I o U}} (o) = \operatorname {I o U} \cdot \left(1 - \frac {\left| t _ {s} - t _ {s} ^ {\prime} \right|}{t}\right) \cdot \left(1 - \frac {\left| t _ {e} - t _ {e} ^ {\prime} \right|}{t}\right) \tag {4}
|
| 132 |
+
$$
|
| 133 |
+
|
| 134 |
+
This modification penalizes predictions that deviate from the reference timestamps relative to the video duration $t$ . In the earlier example, the reward value changes from 0.5 (IoU) to 0.28 (tIoU), providing a more realistic signal for learning. Overall, tIoU acts as a stricter and more informative reward signal, encouraging the LVLM to develop a deeper temporal understanding of events, rather than relying on superficial shortcuts.
|
| 135 |
+
|
| 136 |
+
- Reasoning template reward $r_{\mathrm{form}}(\cdot)$ . In TVG, the video segments relevant to a textual query typically comprise only a small portion of the entire long video. For LVLMs, it is therefore suboptimal to directly predict timestamps without first engaging in a reasoning process to identify the relevant content. Instead, the model should allocate its computational capacity toward reasoning over visual and linguistic cues to better understand the temporal context before making predictions. For instance, given the query "the man washes dishes", reasoning that the person is likely in a kitchen can improve temporal localization. Such context-aware inference supports more accurate and semantically aligned predictions. To encourage this behavior, we introduce a template-based reasoning reward, which incentivizes the model to generate intermediate reasoning steps (structured in a predefined format) prior to timestamp localization. The reasoning template reward requires the LVLM to structure its response like "<think><...</think><answer>< $t_s$ to $t_e$ ></answer>", formulated as:
|
| 137 |
+
|
| 138 |
+
$$
|
| 139 |
+
r _ {\text {f o r m}} (o) = \left\{ \begin{array}{l} 0, \text {i f} o \text {h a s w r o n g f r o m a t} \\ 1, \text {i f} o \text {h a s c o r r e c t f r o m a t} \end{array} \right. \tag {5}
|
| 140 |
+
$$
|
| 141 |
+
|
| 142 |
+
The overall reward is the sum of the two:
|
| 143 |
+
|
| 144 |
+
$$
|
| 145 |
+
r (o) = r _ {\mathrm {t I o U}} (o) + r _ {\text {f o r m}} (o) \tag {6}
|
| 146 |
+
$$
|
| 147 |
+
|
| 148 |
+
GRPO training. The LVLM $\mathcal{F}(\cdot)$ takes the video frames $x_{1},\ldots ,x_{t}$ and the language query $q$ as input and generates $G$ candidate responses $o_1,\dots,o_G$ , where each response is computed as $o_i = \mathcal{F}(x_1,\ldots ,x_t;q)$ . The reward for each response is calculated using Equation 1, and the model is optimized with the GRPO objective in Equation 2. To focus learning on the reasoning and localization capabilities, we freeze the visual encoder and update only the parameters of the LLM during training.
|
| 149 |
+
|
| 150 |
+
# 3.3 TimeRFT: Time-Aware RL-Friendly Fine-Tuning
|
| 151 |
+
|
| 152 |
+
Due to the high computational cost associated with RL training, we explore data-efficient strategies to reduce sample requirements. To this end, we propose TimeRFT, which incorporates time-aware, RL-friendly dataset curation and fine-tuning techniques aimed at enhancing generalization while minimizing training overhead.
|
| 153 |
+
|
| 154 |
+
RL-friendly dataset curation. We construct the TimeRFT dataset by leveraging only TVG samples and assign a difficulty score to each sample based on the base model's performance. A small subset is then selected for subsequent RL training.
|
| 155 |
+
|
| 156 |
+
- Source data collection. Our training videos are sourced from Internet video datasets including YT-Temporal [54], DiDeMo [3], QuerYD [38], InternVid [49], and HowTo100M [36]. We obtain grounding data with annotations from VTG-IT [17], TimeIT [44], TimePro [60], HTStep [2], and LongVid [27]. This process yields 339K temporal grounding samples.
|
| 157 |
+
- RFT data filtering. We propose a data selection strategy based on training difficulty to significantly reduce training costs while preserving strong generalization performance. Models trained only on easy samples (e.g., IoU $\geq 0.7$ ) tend to overfit, whereas training on overly difficult samples (e.g., IoU = 0) often suffers from sparse reward signals, making it hard for the model to receive positive feedback. To strike a balance, we select samples of moderate difficulty that are more conducive to generalization during reinforcement fine-tuning. We first estimate a difficulty score for each sample based on the performance of the base model. For grounding tasks, difficulty is quantified using the IoU between the predicted and ground-truth temporal regions. We then filter out samples that are either too easy or too hard. Specifically, we sample a subset of data from a Gaussian distribution over the IoU axis centered at 0.3, resulting in a set of 2.5K moderately difficult samples for RL training.
|
| 158 |
+
|
| 159 |
+
RFT training strategy. For selected difficult samples, the model may struggle to learn them in a single pass. However, we argue that effectively mastering these challenging cases is essential for improving overall model performance. To this end, we employ a multi-epoch training approach combined with per-epoch sample filtering, allowing the model to repeatedly focus on harder samples and gradually improve its understanding.
|
| 160 |
+
|
| 161 |
+
- Dynamic hard sampling. We adopt a multi-epoch training strategy coupled with per-epoch sample filtering to enhance learning from difficult examples. The model is trained over multiple epochs, and after each epoch, we exclude easy samples with an IoU greater than 0.7 that have become easy. This dynamic curriculum discourages overfitting on easy instances while ensuring consistent exposure to harder samples, ultimately promoting stronger generalization.
|
| 162 |
+
- Cold start fine-tuning with few CoT data. For smaller models (e.g., 3B parameters), we observe that directly training with RL to generate CoT responses often results in reasoning steps that are either unintelligible or hallucinated, which impairs answer quality. Additionally, the length of generated reasoning during early training stages is difficult to control, leading to an unstable training process. To address these issues, we introduce a cold-start fine-tuning strategy using a small set of CoT-formatted examples that encourage grounded reasoning aligned with video content. Specifically, we guide the model to produce structured sequential captions with associated timestamps with the template as:
|
| 163 |
+
|
| 164 |
+
$<$ think $> < t_{s_1}$ to $t_{e_1}:C_1$ ; $t_{s_2}$ to $t_{e_2}:C_2 > < / \text{think} > < \text{answer} > t_s$ to $t_e < / \text{answer} > (7)$ where $C_i$ represent captions corresponding to video segments $[t_{s_i}, t_{e_i}]$ , respectively.
|
| 165 |
+
|
| 166 |
+
# 3.4 TVGBench: Evaluation Benchmark for LVLM on Temporal Video Grounding
|
| 167 |
+
|
| 168 |
+
Existing benchmarks for temporal video grounding either focus on large-scale datasets tailored for smaller models within specific domains (e.g., human activities in ActivityNet) or consist of small, limited test sets (e.g., the 2K home activity samples in Charades) typically used for LVLM evaluation due to their slower inference speed. However, these benchmarks fall short in capturing the evaluation needs of LVLMs, which, despite slower inference, exhibit strong generalization capabilities. To bridge this gap, we introduce TVGBench, a lightweight yet comprehensive evaluation benchmark specifically designed for assessing the performance of LVLMs on temporal video grounding tasks.
|
| 169 |
+
|
| 170 |
+
Data sources. To ensure a comprehensive evaluation, we construct our TVGBench by curating samples from five public benchmarks with a balanced distribution of data sources: Charades-STA [46], ActivityNet-Captions [5], HiREST [59], EgoNLQ [16], and TaCoS [43].
|
| 171 |
+
|
| 172 |
+
Benchmark statistics. We carefully balance the video duration, video domain, and query center, and construct query semantics in TVGBench to construct 800 instances, as shown in Figure 3.
|
| 173 |
+
|
| 174 |
+
- Distribution statistics. Video durations in the dataset have a balanced range from short clips up to 3 minutes long. To ensure temporal diversity, the center timestamps of queries are designed to be approximately uniformly distributed across the entire span of each video.
|
| 175 |
+
- Query semantics. Since the original datasets do not provide semantic labels for queries, we manually define 11 semantic categories grouped under three major types: human, object, and
|
| 176 |
+
|
| 177 |
+

|
| 178 |
+
Figure 3: Statistics of TVGBench. TVGBench comprises 11 types of queries covering aspects related to humans, objects, and environments. As illustrated in the figure on the right, the distributions of query center, video length, and dataset source are designed to be as balanced as possible. This balanced construction allows for a comprehensive evaluation of model performance across different dimensions, enabling fine-grained analysis along each axis during benchmarking.
|
| 179 |
+
|
| 180 |
+

|
| 181 |
+
|
| 182 |
+

|
| 183 |
+
|
| 184 |
+
environment. We leverage DeepSeek-V3 [11] to annotate the semantic type of each query and ensure approximate balance across these categories. While most categories are evenly represented, the Human Action Simple (HAS) category is slightly overrepresented due to inherent dataset bias (simple indoor actions are more common). In such cases, we prioritize achieving a balance across datasets from different domains while maintaining semantic diversity, accepting a skew in HAS.
|
| 185 |
+
|
| 186 |
+
# 4 Experiments
|
| 187 |
+
|
| 188 |
+
We first present our experimental setup in Section 4.1. Then, we evaluate our model from three key perspectives: (1) Comparison with state-of-the-art methods in Section 4.2: We evaluate our model across multiple TVG benchmarks to assess its performance against existing approaches; (2) Ablation studies and analyses in Section 4.3: We examine the individual contributions of each component in our framework to better understand their roles in overall performance. We also compare RL and SFT strategies across TVG, short video QA, and long video QA tasks.
|
| 189 |
+
|
| 190 |
+
# 4.1 Experimental Setup
|
| 191 |
+
|
| 192 |
+
**Benchmarks.** We evaluate our model on a wide range of benchmarks covering both temporal video grounding and general video understanding tasks, including: (1) Charades-STA [46] contains 6,672 long videos capturing indoor human activities. The official split for the TVG task includes 12,408 clip-query pairs for training and 3,720 for testing. (2) ActivityNet [5] comprises 20K long videos with an average of 3.65 clip-query pairs per video. Following previous work in fine-tuning setting [62, 22] for the TVG task, we use the standard dataset splits with 37,421 training, 17,505 validation, and 17,031 test samples. (3) MVBench [26] is a short video QA benchmark focused on temporal reasoning. It includes 4K QA pairs for 20 types of tasks. (4) TempCompass [31] assesses fine-grained temporal understanding with 410 short videos. We use all multi-choice QA tasks except for the video captioning task. (5) EgoSchema [33] features 5K egocentric video clips, each approximately 3 minutes long, with temporally demanding QA pairs. (6) VideoMME [13] is a general video QA benchmark covering diverse domains. It contains 2.7K QA samples over videos of varied lengths, ranging from 11 seconds to 1 hour. We use the long video split for evaluation.
|
| 193 |
+
|
| 194 |
+
Implementation details. Unless otherwise specified, we use Qwen2.5-VL-7B [4] as the base model. To strike a balance between training efficiency and memory consumption, we sample video frames at 2 FPS and adaptively resize each video input to contain approximately 2.8 million pixels. For instance, a 50-second video yields 100 frames, each with a resolution of roughly $96 \times 96 \times 3$ . During the reinforcement fine-tuning phase, we train for 5 epochs using a batch size of 8 and select the final
|
| 195 |
+
|
| 196 |
+
Table 1: Performance of temporal video grounding on Charades-STA, ActivityNet, and TVGBench. The methods marked in gray* represent fine-tuning on corresponding benchmarks, while those in black indicate zero-shot settings. We compare our Time-R1 against existing 7B open-source LVLMs, as well as state-of-the-art VLP models.
|
| 197 |
+
|
| 198 |
+
<table><tr><td rowspan="2">Type</td><td rowspan="2">Method</td><td colspan="3">Charades-STA</td><td colspan="3">ActivityNet</td><td colspan="3">TVGBench</td></tr><tr><td>R1@0.3</td><td>R1@0.5</td><td>R1@0.7</td><td>R1@0.3</td><td>R1@0.5</td><td>R1@0.7</td><td>R1@0.3</td><td>R1@0.5</td><td>R1@0.7</td></tr><tr><td rowspan="5">VLP</td><td>2D-TAN* [63]</td><td>57.3</td><td>45.8</td><td>27.9</td><td>60.4</td><td>43.4</td><td>25.0</td><td>-</td><td>-</td><td>-</td></tr><tr><td>UniVTG* [30]</td><td>72.6</td><td>60.2</td><td>38.6</td><td>56.1</td><td>43.4</td><td>24.3</td><td>-</td><td>-</td><td>-</td></tr><tr><td>SSRN* [66]</td><td>-</td><td>65.5</td><td>42.6</td><td>-</td><td>54.5</td><td>33.2</td><td>-</td><td>-</td><td>-</td></tr><tr><td>SnAG* [37]</td><td>-</td><td>64.6</td><td>46.2</td><td>-</td><td>48.6</td><td>30.6</td><td>-</td><td>-</td><td>-</td></tr><tr><td>EaTR* [22]</td><td>-</td><td>68.4</td><td>44.9</td><td>-</td><td>58.2</td><td>37.6</td><td>-</td><td>-</td><td>-</td></tr><tr><td></td><td>Gemini-2.5-Pro [10]</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>39.1</td><td>24.4</td><td>12.8</td></tr><tr><td rowspan="9">SFT</td><td>ChatVTG [41]</td><td>52.7</td><td>33.0</td><td>15.9</td><td>40.7</td><td>22.5</td><td>9.4</td><td>-</td><td>-</td><td>-</td></tr><tr><td>TimeChat [44]</td><td>-</td><td>32.2</td><td>13.4</td><td>36.2</td><td>20.2</td><td>9.5</td><td>22.4</td><td>11.9</td><td>5.3</td></tr><tr><td>HawkEye [50]</td><td>50.6</td><td>31.4</td><td>14.5</td><td>49.1</td><td>29.3</td><td>10.7</td><td>-</td><td>-</td><td>-</td></tr><tr><td>VTimeLLM [21]</td><td>51.0</td><td>27.5</td><td>11.4</td><td>44.0</td><td>27.8</td><td>14.3</td><td>-</td><td>-</td><td>-</td></tr><tr><td>TimeSuite [60]</td><td>69.9</td><td>48.7</td><td>24.0</td><td>-</td><td>-</td><td>-</td><td>31.1</td><td>18.0</td><td>8.9</td></tr><tr><td>VideoChat-Flash [27]</td><td>74.5</td><td>53.1</td><td>27.6</td><td>-</td><td>-</td><td>-</td><td>32.8</td><td>19.8</td><td>10.4</td></tr><tr><td>TRACE [18]</td><td>-</td><td>40.3</td><td>19.4</td><td>-</td><td>37.7</td><td>24.0</td><td>37.0</td><td>25.5</td><td>14.6</td></tr><tr><td>HawkEye* [50]</td><td>72.5</td><td>58.3</td><td>28.8</td><td>55.9</td><td>34.7</td><td>17.9</td><td>-</td><td>-</td><td>-</td></tr><tr><td>TimeSuite* [60]</td><td>79.4</td><td>67.1</td><td>43.0</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td rowspan="2">RL</td><td>Time-R1 (ours)</td><td>78.1</td><td>60.8</td><td>35.3</td><td>58.6</td><td>39.0</td><td>21.4</td><td>41.8</td><td>29.4</td><td>16.4</td></tr><tr><td>Time-R1 (ours)*</td><td>82.8</td><td>72.2</td><td>50.1</td><td>73.3</td><td>55.6</td><td>34.0</td><td>-</td><td>-</td><td>-</td></tr></table>
|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
Figure 4: Comparison between post-training paradigms across various tasks, including temporal video grounding, short video QA, and long video QA. Both "SFT" and "RL" full-finetune the LLM, while "SFT-LoRA" denotes finetuning the LLM with LoRA [20]. The "Base" is Qwen2.5-VL-7B.
|
| 202 |
+
|
| 203 |
+
checkpoint for evaluation. For fine-tuning on downstream benchmarks, we train for 2 epochs. All experiments are conducted on a cluster with $8 \times$ NVIDIA A100 GPUs.
|
| 204 |
+
|
| 205 |
+
Evaluation metrics. For TVG, following [44, 60], we adopt the “R1@m” evaluation protocol to compare with state-of-the-art models, which computes the percentage of samples where the top-1 predicted segment has an IoU greater than a threshold $m$ , with $m \in \{0.3, 0.5, 0.7\}$ . For brevity, we also adopt mIoU, which calculates the average IoU on all testing data as an alternative metric. For video QA, we report accuracy as the evaluation metric.
|
| 206 |
+
|
| 207 |
+
# 4.2 Comparison with State-of-the-Art
|
| 208 |
+
|
| 209 |
+
We compare Time-R1 with state-of-the-art TVG methods, including both traditional video-language pre-training models (VLP) and recent large video-language models fine-tuned via SFT.
|
| 210 |
+
|
| 211 |
+
Time-R1 surpasses SFT-based models in zero-shot settings. As shown in Table 1, in the zero-shot setting, Time-R1 demonstrates strong performance, outperforming SFT-based models that rely on large-scale supervision. Despite using only $2.5\mathrm{K}$ samples for RL, Time-R1 achieves leading results across multiple benchmarks. For example, on Charades-STA, Time-R1 attains an R1@0.7 score of 35.3, outperforming VideoChat-Flash (27.6) and TimeSuite (24.0). On ActivityNet, it achieves R1@0.7 score of 21.4, surpassing VTimeLLM (14.3) and TimeChat (10.7). On TVGBench, it scores 16.4, outperforming TRACE (14.6) and Gemini-2.5-Pro (12.8).
|
| 212 |
+
|
| 213 |
+
Time-R1* outperforms all SFT-based LVLMs and many traditional VLP-based models. Time-R1* consistently outperforms both traditional VLP-based and SFT-based models on the TVG task. On Charades-STA, it exceeds EaTR and fine-tuned TimeSuite by 3.4-7.1 percentage points across R1@0.3 to R1@0.7. Notably, Time-R1* surpasses TimeSuite despite using far fewer RL samples compared to TimeSuite's 349K SFT examples. On the more challenging ActivityNet dataset, Time-R1* also outperforms SSRN [66] and TRACE [18], achieving significant gains across key metrics.
|
| 214 |
+
|
| 215 |
+
Table 2: Ablation of Time-R1-7B training. GF, ME, SF refers to Gaussian Filtering, Multi-EPOCH, and Sample Filtering per epoch, respectively.
|
| 216 |
+
|
| 217 |
+
<table><tr><td rowspan="2"></td><td rowspan="2">tIoU</td><td rowspan="2">GF</td><td rowspan="2">ME</td><td rowspan="2">SF</td><td colspan="3">TVGBench</td></tr><tr><td>R1@0.3</td><td>R1@0.5</td><td>R1@0.7</td></tr><tr><td>1</td><td>X</td><td>X</td><td>X</td><td>X</td><td>38.0</td><td>24.8</td><td>13.2</td></tr><tr><td>2</td><td>✓</td><td>X</td><td>X</td><td>X</td><td>36.0</td><td>23.6</td><td>12.9</td></tr><tr><td>3</td><td>X</td><td>✓</td><td>X</td><td>X</td><td>37.2</td><td>25.0</td><td>13.4</td></tr><tr><td>4</td><td>X</td><td>X</td><td>✓</td><td>X</td><td>39.9</td><td>26.0</td><td>14.2</td></tr><tr><td>5</td><td>✓</td><td>✓</td><td>X</td><td>X</td><td>38.4</td><td>25.6</td><td>14.1</td></tr><tr><td>6</td><td>✓</td><td>X</td><td>✓</td><td>X</td><td>39.4</td><td>26.5</td><td>16.4</td></tr><tr><td>7</td><td>✓</td><td>✓</td><td>✓</td><td>X</td><td>41.6</td><td>28.5</td><td>15.6</td></tr><tr><td>8</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>41.8</td><td>29.4</td><td>16.4</td></tr></table>
|
| 218 |
+
|
| 219 |
+
# 4.3 Ablation Study
|
| 220 |
+
|
| 221 |
+
We conduct a detailed ablation on the Time-R1-7B model to investigate the contribution of various training strategies.
|
| 222 |
+
|
| 223 |
+
Utility of TimeRFT and Time-R1 components. As shown in Table 2, both Gaussian Filtering (GF) and Multi-Epoch training (ME) individually improve performance, with ME yielding a more substantial gain, improving from R1@0.7 of 13.2 in row 1 to 14.2 in row 4. Notably, the combination of tIoU supervision and ME (Row 6) leads to a significant boost across all metrics. As more components are added, GF and ME (Row 7), followed by Sample Filtering (SF) in Row 8, the performance continues to improve, ultimately reaching R1@0.5 of 29.4 and R1@0.7 of 16.4.
|
| 224 |
+
|
| 225 |
+
Generalization of RL vs. SFT. As shown in Figure 4, when both RL and SFT perform full fine-tuning of the LLM using a small amount of data, SFT significantly degrades the model's generalization ability across all tasks, whereas RL consistently preserves generalization. While SFT can alleviate this issue by adopting LoRA-based fine-tuning (SFT-LoRA), RL still demonstrates stronger generalization and additionally improves performance on video QA tasks, whereas SFT-LoRA leads to a decline. For example, on ActivityNet, RL improves the mIoU from 16.3 to 29.2, while SFT degrades it to 15.4, and SFT-LoRA improves it to 25.9. On the VideoMMe video QA benchmark, RL increases the performance from 53.0 to 54.2, whereas SFT-LoRA decreases it to 51.7.
|
| 226 |
+
|
| 227 |
+
Impact of cold start. As shown in Figure 5, cold start boosts the performance of the 3B model and reduces the number of thinking tokens for both models. We attribute this to the cold start's function to suppress hallucinations, which tend to be more prevalent in weaker models.
|
| 228 |
+
|
| 229 |
+

|
| 230 |
+
Figure 5: Impact of SFT-based cold start on IoU performance and thinking token count, with token counts marked by $\star$ on dashed lines.
|
| 231 |
+
|
| 232 |
+
# 5 Conclusion
|
| 233 |
+
|
| 234 |
+
In this work, we present Time-R1, a reinforcement learning based post-training framework that significantly improves the generalization of Large Vision-Language Models for Temporal Video Grounding. Unlike prior methods that rely on large-scale supervised fine-tuning, Time-R1 leverages a verifiable reward signal to unlock strong temporal reasoning from pretrained models using limited data. Our contributions include: (1) Time-R1, a reasoning-guided post-training framework that enhances TVG via RL; (2) TimeRFT, a curated dataset and training strategy that fosters temporal grounding; (3) TVGBench, a small yet comprehensive benchmark for evaluating LVLMs on TVG. Extensive experiments show that Time-R1 achieves SoTA performance across TVG benchmarks in both zero-shot and fine-tuned settings, surpassing prior LVLMs and traditional VLP-based models, while also improving general video understanding. We hope this work inspires future directions in data-efficient and generalizable video-language understanding via reinforcement learning.
|
| 235 |
+
|
| 236 |
+
# References
|
| 237 |
+
|
| 238 |
+
[1] Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 2, 4
|
| 239 |
+
[2] Triantafyllos Afouras, Effrosyni Mavroudi, Tushar Nagarajan, Huiyu Wang, and Lorenzo Torresani. Ht-step: Aligning instructional articles with how-to videos. Advances in Neural Information Processing Systems, 36:50310-50326, 2023. 6
|
| 240 |
+
[3] Lisa Anne Hendricks, Oliver Wang, Eli Shechtman, Josef Sivic, Trevor Darrell, and Bryan Russell. Localizing moments in video with natural language. In Proceedings of the IEEE international conference on computer vision, pages 5803-5812, 2017. 1, 3, 6
|
| 241 |
+
[4] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, Humen Zhong, Yuanzhi Zhu, Mingkun Yang, Zhaohai Li, Jianqiang Wan, Pengfei Wang, Wei Ding, Zheren Fu, Yiheng Xu, Jiabo Ye, Xi Zhang, Tianbao Xie, Zesen Cheng, Hang Zhang, Zhibo Yang, Haiyang Xu, and Junyang Lin. Qwen2.5-v1 technical report. arXiv preprint arXiv:2502.13923, 2025.7
|
| 242 |
+
[5] Fabian Caba Heilbron, Victor Escorcia, Bernard Ghanem, and Juan Carlos Niebles. Activitynet: A large-scale video benchmark for human activity understanding. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 961-970, 2015. 1, 3, 6, 7
|
| 243 |
+
[6] Joao Carreira and Andrew Zisserman. Quo vadis, action recognition? a new model and the kinetics dataset. In proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6299-6308, 2017. 1, 3
|
| 244 |
+
[7] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025.4
|
| 245 |
+
[8] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. Instructlip: Towards general-purpose vision-language models with instruction tuning, 2023. 14
|
| 246 |
+
[9] Trevor Darrell and Alex Pentland. Space-time gestures. In Proceedings of IEEE Conference on Computer Vision and Pattern Recognition, pages 335-340. IEEE, 1993. 1
|
| 247 |
+
[10] Google DeepMind. Gemini 2.5: Our most intelligent ai model. Google DeepMind, 2025. Model ID: gemini-2.5-pro-preview-03-25. 8, 14, 15
|
| 248 |
+
[11] DeepSeek-AI. Deepseek llm: Scaling open-source language models with longtermism. arXiv preprint arXiv:2401.02954, 2024. 7
|
| 249 |
+
[12] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics, pages 4171–4186, 2019. 3
|
| 250 |
+
[13] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2025. 3, 7
|
| 251 |
+
[14] Adrien Gaidon, Zaid Harchaoui, and Cordelia Schmid. Temporal localization of actions with actons. IEEE transactions on pattern analysis and machine intelligence, 35(11):2782-2795, 2013. 1
|
| 252 |
+
[15] Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia. Tall: Temporal activity localization via language query. In Proceedings of the IEEE international conference on computer vision, pages 5267-5275, 2017. 1, 3
|
| 253 |
+
[16] Kristen Grauman, Andrew Westbury, Eugene Byrne, Zachary Chavis, Antonino Furnari, Rohit Girdhar, Jackson Hamburger, Hao Jiang, Miao Liu, Xingyu Liu, et al. Ego4d: Around the world in 3,000 hours of egocentric video. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18995-19012, 2022. 1, 6
|
| 254 |
+
[17] Yongxin Guo, Jingyu Liu, Mingda Li, Dingxin Cheng, Xiaoying Tang, Dianbo Sui, Qingbin Liu, Xi Chen, and Kevin Zhao. Vtg-llm: Integrating timestamp knowledge into video llms for enhanced video temporal grounding. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 39, pages 3302–3310, 2025. 2, 6
|
| 255 |
+
[18] Yongxin Guo, Jingyu Liu, Mingda Li, Qingbin Liu, Xi Chen, and Xiaoying Tang. Trace: Temporal grounding video llm via causal event modeling. arXiv preprint arXiv:2410.05643, 2024. 2, 3, 8, 9, 14, 15
|
| 256 |
+
[19] Tanveer Hannan, Md Mohaiminul Islam, Jindong Gu, Thomas Seidl, and Gedas Bertasius. Revisionlmm: Recursive vision-language model for temporal grounding in hour-long videos. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2025. 3
|
| 257 |
+
|
| 258 |
+
[20] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, Weizhu Chen, et al. Lora: Low-rank adaptation of large language models. *ICLR*, 1(2):3, 2022. 8, 14
|
| 259 |
+
[21] Bin Huang, Xin Wang, Hong Chen, Zihan Song, and Wenwu Zhu. Vtimellm: Empower llm to grasp video moments. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14271-14280, 2024. 8
|
| 260 |
+
[22] Jinhyun Jang, Jungin Park, Jin Kim, Hyeongjun Kwon, and Kwanghoon Sohn. Knowing where to focus: Event-aware transformer for video grounding. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 13846-13856, 2023. 1, 2, 3, 7, 8
|
| 261 |
+
[23] Woosuk Kwon, Zhuohan Li, Siyuan Zhuang, Ying Sheng, Lianmin Zheng, Cody Hao Yu, Joseph E. Gonzalez, Hao Zhang, and Ion Stoica. Efficient memory management for large language model serving with pagedattention. In Proceedings of the ACM SIGOPS 29th Symposium on Operating Systems Principles, 2023. 15
|
| 262 |
+
[24] Ivan Laptev and Patrick Pérez. Retrieving actions in movies. In 2007 IEEE 11th International Conference on Computer Vision, pages 1-8. IEEE, 2007. 1
|
| 263 |
+
[25] Jiaze Li, Yaya Shi, Zongyang Ma, Haoran Xu, Feng Cheng, Huihui Xiao, Ruiwen Kang, Fan Yang, Tingting Gao, and Di Zhang. imove: Instance-motion-aware video understanding. arXiv preprint arXiv:2502.11594, 2025.3
|
| 264 |
+
[26] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mvbench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024. 3, 7
|
| 265 |
+
[27] Xinhao Li, Yi Wang, Jiashuo Yu, Xiangyu Zeng, Yuhan Zhu, Haian Huang, Jianfei Gao, Kunchang Li, Yinan He, Chenting Wang, Yu Qiao, Yali Wang, and Limin Wang. Videochat-flash: Hierarchical compression for long-context video modeling. arXiv preprint arXiv:2501.00574, 2024. 3, 6, 8, 14, 15
|
| 266 |
+
[28] Zhenyi Liao, Qingsong Xie, Yanhao Zhang, Zijian Kong, Haonan Lu, Zhenyu Yang, and Zhijie Deng. Improved visual-spatial reasoning via r1-zero-like training. arXiv preprint arXiv:2504.00883, 2025. 4
|
| 267 |
+
[29] Kevin Qinghong Lin, Jinpeng Wang, Mattia Soldan, Michael Wray, Rui Yan, Eric Z Xu, Difei Gao, Rong-Cheng Tu, Wenzhe Zhao, Weijie Kong, et al. Egocentric video-language pretraining. Advances in Neural Information Processing Systems, 35:7575-7586, 2022. 1, 3
|
| 268 |
+
[30] Kevin Qinghong Lin, Pengchuan Zhang, Joya Chen, Shraman Pramanick, Difei Gao, Alex Jinpeng Wang, Rui Yan, and Mike Zheng Shou. Univtg: Towards unified video-language temporal grounding. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2794–2804, 2023. 1, 8
|
| 269 |
+
[31] Yuanxin Liu, Shicheng Li, Yi Liu, Yuxiang Wang, Shuhuai Ren, Lei Li, Sishuo Chen, Xu Sun, and Lu Hou. Tempcompass: Do video llms really understand videos? arXiv preprint arXiv:2403.00476, 2024. 7
|
| 270 |
+
[32] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025. 4
|
| 271 |
+
[33] Karttikeya Mangalam, Raiymbek Akshulakov, and Jitendra Malik. Egoschema: A diagnostic benchmark for very long-form video language understanding. Advances in Neural Information Processing Systems, 36:46212-46244, 2023. 7
|
| 272 |
+
[34] Gabriel B Margolis and Pulkit Agrawal. Walk these ways: Tuning robot control for generalization with multiplicity of behavior. In Conference on Robot Learning, pages 22-31. PMLR, 2023. 3
|
| 273 |
+
[35] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, et al. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025. 4
|
| 274 |
+
[36] Antoine Miech, Dimitri Zhukov, Jean-Baptiste Alayrac, Makarand Tapaswi, Ivan Laptev, and Josef Sivic. Howto100m: Learning a text-video embedding by watching hundred million narrated video clips. In Proceedings of the IEEE/CVF international conference on computer vision, pages 2630-2640, 2019. 6
|
| 275 |
+
[37] Fangzhou Mu, Sicheng Mo, and Yin Li. Snag: Scalable and accurate video grounding. 2024 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 18930-18940, 2024. 3, 8
|
| 276 |
+
[38] Andreea-Maria Oncescu, Joao F Henriques, Yang Liu, Andrew Zisserman, and Samuel Albanie. Queryd: A video dataset with high-quality text and audio narrations. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pages 2265-2269. IEEE, 2021. 6
|
| 277 |
+
[39] OpenAI. Openai o1, 2024. 2, 4
|
| 278 |
+
[40] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in neural information processing systems, 35:27730-27744, 2022. 4
|
| 279 |
+
|
| 280 |
+
[41] Mengxue Qu, Xiaodong Chen, Wu Liu, Alicia Li, and Yao Zhao. Chatvtg: Video temporal grounding via chat with video dialogue large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1847-1856, 2024. 3, 8
|
| 281 |
+
[42] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. 1, 3
|
| 282 |
+
[43] Michaela Regneri, Marcus Rohrbach, Dominikus Wetzel, Stefan Thater, Bernt Schiele, and Manfred Pinkal. Grounding action descriptions in videos. Transactions of the Association for Computational Linguistics, 1:25-36, 2013. 6
|
| 283 |
+
[44] Shuhuai Ren, Linli Yao, Shicheng Li, Xu Sun, and Lu Hou. Timechat: A time-sensitive multimodal large language model for long video understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14313-14323, 2024. 1, 3, 6, 8, 14, 15
|
| 284 |
+
[45] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, Y. K. Li, Y. Wu, and Daya Guo. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. 16
|
| 285 |
+
[46] Gunnar A Sigurdsson, Gúl Varol, Xiaolong Wang, Ali Farhadi, Ivan Laptev, and Abhinav Gupta. Hollywood in homes: Crowdsourcing data collection for activity understanding. In Proceedings of the European Conference on Computer Vision (ECCV), 2016. 1, 3, 6, 7
|
| 286 |
+
[47] David Silver, Thomas Hubert, Julian Schrittwieser, Ioannis Antonoglou, Matthew Lai, Arthur Guez, Marc Lanctot, Laurent Sifre, Dharshan Kumaran, Thore Graepel, et al. Mastering chess and shogi by self-play with a general reinforcement learning algorithm. arXiv preprint arXiv:1712.01815, 2017. 3
|
| 287 |
+
[48] Huajie Tan, Yuheng Ji, Xiaoshuai Hao, Minglan Lin, Pengwei Wang, Zhongyuan Wang, and Shanghang Zhang. Reason-rft: Reinforcement fine-tuning for visual reasoning. arXiv preprint arXiv:2503.20752, 2025.4
|
| 288 |
+
[49] Yi Wang, Yinan He, Yizhuo Li, Kunchang Li, Jiashuo Yu, Xin Ma, Xinhao Li, Guo Chen, Xinyuan Chen, Yaohui Wang, et al. Internvid: A large-scale video-text dataset for multimodal understanding and generation. arXiv preprint arXiv:2307.06942, 2023. 6
|
| 289 |
+
[50] Yueqian Wang, Xiaojun Meng, Jianxin Liang, Yuxuan Wang, Qun Liu, and Dongyan Zhao. Hawkeye: Training video-text llms for grounding text in videos, 2024. 8
|
| 290 |
+
[51] Thomas Wolf, Lysandre Debut, Victor Sanh, Julien Chaumont, Clement Delangue, Anthony Moi, Pierrick Cistac, Tim Rault, Rémi Louf, Morgan Funtowicz, Joe Davison, Sam Shleifer, Patrick von Platen, Clara Ma, Yacine Jernite, Julien Plu, Canwen Xu, Teven Le Scao, Sylvain Gugger, Mariama Drame, Quentin Lhoest, and Alexander M. Rush. Transformers: State-of-the-art natural language processing. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, pages 38-45, Online, October 2020. Association for Computational Linguistics. 15
|
| 291 |
+
[52] Yongliang Wu, Xinting Hu, Yuyang Sun, Yizhou Zhou, Wenbo Zhu, Fengyun Rao, Bernt Schiele, and Xu Yang. Number it: Temporal grounding videos like flipping manga. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2025. 3
|
| 292 |
+
[53] Antoine Yang, Arsha Nagrani, Paul Hongsuck Seo, Antoine Miech, Jordi Pont-Tuset, Ivan Laptev, Josef Sivic, and Cordelia Schmid. Vid2seq: Large-scale pretraining of a visual language model for dense video captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10714-10726, 2023. 2
|
| 293 |
+
[54] Antoine Yang, Arsha Nagrani, Paul Hongsuck Seo, Antoine Miech, Jordi Pont-Tuset, Ivan Laptev, Josef Sivic, and Cordelia Schmid. Vid2seq: Large-scale pretraining of a visual language model for dense video captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10714-10726, 2023. 6, 14
|
| 294 |
+
[55] Jingkang Yang, Shuai Liu, Hongming Guo, Yuhao Dong, Xiamengwei Zhang, Sicheng Zhang, Pengyun Wang, Zitang Zhou, Binzhu Xie, Ziyue Wang, Bei Ouyang, Zhengyu Lin, Marco Cominelli, Zhongang Cai, Yuanhan Zhang, Peiyuan Zhang, Fangzhou Hong, Joerg Widmer, Francesco Gringoli, Lei Yang, Bo Li, and Ziwei Liu. Egolife: Towards egocentric life assistant. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2025. 1
|
| 295 |
+
[56] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025. 4, 14, 16
|
| 296 |
+
[57] Tianyu Yu, Yuan Yao, Haoye Zhang, Taiwen He, Yifeng Han, Ganqu Cui, Jinyi Hu, Zhiyuan Liu, Hai-Tao Zheng, Maosong Sun, et al. Rlhf-v: Towards trustworthy mllms via behavior alignment from fine-grained correctional human feedback. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13807-13816, 2024. 4
|
| 297 |
+
|
| 298 |
+
[58] Yitian Yuan, Xiaohan Lan, Xin Wang, Long Chen, Zhi Wang, and Wenwu Zhu. A closer look at temporal sentence grounding in videos: Dataset and metric. In Proceedings of the 2nd international workshop on human-centric multimedia analysis, pages 13-21, 2021. 5
|
| 299 |
+
[59] Abhay Zala, Jaemin Cho, Satwik Kottur, Xilun Chen, Barlas Oguz, Yashar Mehdad, and Mohit Bansal. Hierarchical video-moment retrieval and step-captioning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23056-23065, 2023. 6
|
| 300 |
+
[60] Xiangyu Zeng, Kunchang Li, Chenting Wang, Xinhao Li, Tianxiang Jiang, Ziang Yan, Songze Li, Yansong Shi, Zhengrong Yue, Yi Wang, Yali Wang, Yu Qiao, and Limin Wang. Timesuite: Improving MLLMs for long video understanding via grounded tuning. In The Thirteenth International Conference on Learning Representations, 2025. 1, 3, 6, 8, 14, 15
|
| 301 |
+
[61] Hao Zhang, Aixin Sun, Wei Jing, and Joey Tianyi Zhou. Temporal sentence grounding in videos: A survey and future directions. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(8):10443-10465, 2023. 1
|
| 302 |
+
[62] Songyang Zhang, Houwen Peng, Jianlong Fu, Yijuan Lu, and Jiebo Luo. Multi-scale 2d temporal adjacency networks for moment localization with natural language. IEEE Transactions on Pattern Analysis and Machine Intelligence, 2021. 7
|
| 303 |
+
[63] Songyang Zhang, Houwen Peng, Jianlong Fu, and Jiebo Luo. Learning 2d temporal adjacent networks for moment localization with natural language. In Proceedings of the AAAI Conference on Artificial Intelligence, 2020. 8
|
| 304 |
+
[64] Xingjian Zhang, Siwei Wen, Wenjun Wu, and Lei Huang. Tinyllava-video-r1: Towards smaller lmms for video reasoning. arXiv preprint arXiv:2504.09641, 2025. 4
|
| 305 |
+
[65] Henghao Zhao, Ge-Peng Ji, Rui Yan, Huan Xiong, and Zechao Li. Videoexpert: Augmented llm for temporal-sensitive video understanding. arXiv preprint arXiv:2504.07519, 2025. 2
|
| 306 |
+
[66] Jiahao Zhu, Daizong Liu, Pan Zhou, Xing Di, Yu Cheng, Song Yang, Wenzheng Xu, Zichuan Xu, Yao Wan, Lichao Sun, and Zeyu Xiong. Rethinking the video sampling and reasoning strategies for temporal sentence grounding. In Findings of the Association for Computational Linguistics: EMNLP 2022, 2022. 8, 9
|
| 307 |
+
|
| 308 |
+
A Limitations 14
|
| 309 |
+
B Implementation Details 14
|
| 310 |
+
C Additional Analyses 15
|
| 311 |
+
D Ablation Studies 15
|
| 312 |
+
E Qualitative Result 16
|
| 313 |
+
|
| 314 |
+
# A Limitations
|
| 315 |
+
|
| 316 |
+
Despite achieving notable improvements on the TVG task, our approach still has several limitations. First, Time-R1 suffers from slower training and inference speeds, primarily due to its large model size and reliance on autoregressive text generation. Second, to manage GPU memory consumption, we use a relatively low frame sampling rate, which may result in the loss of fine-grained motion information across frames. Finally, Time-R1 currently cannot handle ultra-long videos, limiting its applicability in scenarios such as full-length movie understanding.
|
| 317 |
+
|
| 318 |
+
# B Implementation Details
|
| 319 |
+
|
| 320 |
+
Details of Time-R1 framework. Inspired by DAPO [56], we adopt its token-level loss for training, rather than the sample-level loss used in GRPO. Apart from minor changes to the loss, all settings are identical to GRPO. Besides, we find that other techniques introduced in DAPO do not benefit the TVG task, thus aborting other techniques. We full-finetune the LLM parameters at every step, thus $\frac{\pi_{\theta}(o_i)}{\pi_{\theta_{\mathrm{old}}}(o_i)} = 1$ . The sample number $G$ is set to 8. The coefficient $\beta$ is set to 0.04.
|
| 321 |
+
|
| 322 |
+
Details of TimeRFT training. For RFT data filtering, we use a Gaussian distribution with a fixed variance of 0.2 while varying the mean to control sample selection. In our cold start phase, we construct 150 samples from our training data sources (e.g., YT-Temporal [54]) to fine-tune the LLM using LoRA [20], with a LoRA rank of 64 and a LoRA alpha of 128. All of our results are reported based on the final training epoch. For RL, we use a learning rate of 1e-6 with the AdamW optimizer with $\beta_{1} = 0.9$ , $\beta_{2} = 0.999$ , and a linear scheduler to decay the learning rate from 1e-6 to 0. We use a batch size of 8 with gradient accumulation set to 2. It requires 15 hours of training on 8 A100 GPUs.
|
| 323 |
+
|
| 324 |
+
Details of our evaluation prompts. As shown in Figure 11, for temporal video grounding, the prompts used for training and testing are designed to encourage the model to reason before responding, following a template-based answer format. For VideoQA, we have two versions of prompts: one with CoT and one without CoT.
|
| 325 |
+
|
| 326 |
+
Details of TVG baseline methods and implementations. We evaluate the baselines on TVGBench using their original best-performing setting, focusing primarily on video input and prompt design.
|
| 327 |
+
|
| 328 |
+
- TimeChat [44] is built upon the InstructBLIP [8] architecture and introduces a video Q-former to encode video tokens. It operates at a resolution of 224 and samples 96 frames.
|
| 329 |
+
- TRACE [18] treats each combination of timestamp, saliency score, and caption as a discrete event and enables the LVLM to autoregressively generate event sequences. It operates at a higher resolution of 336 and samples 128 frames.
|
| 330 |
+
- TimeSuite [60] introduces a token shuffling strategy to compress long video token sequences and incorporates positional encoding to enhance visual understanding. It adopts a resolution of 224 and samples 128 frames.
|
| 331 |
+
- VideoChat-Flash [27] proposes a progressive visual token dropping mechanism within intermediate LLM layers to compress video inputs and extend the effective context length. It uses a resolution of 448 and samples video at 1 fps, with a maximum of 512 frames.
|
| 332 |
+
- Gemini-2.5-Pro [10]: Gemini-2.5-Pro is a state-of-the-art video understanding model capable of reasoning over videos exceeding one hour in length. It supports video question answering and temporal localization tasks.
|
| 333 |
+
|
| 334 |
+
Table 3: Comparison of different approaches on TVGBench for all types. We use mIoU as metric.
|
| 335 |
+
|
| 336 |
+
<table><tr><td>Method</td><td>EC</td><td>ES</td><td>HAC</td><td>HAP</td><td>HAS</td><td>HP</td><td>OA</td><td>OC</td><td>OEC</td><td>OES</td><td>OT</td></tr><tr><td>TimeChat [44]</td><td>22.3</td><td>32.8</td><td>16.6</td><td>9.8</td><td>14.6</td><td>35.1</td><td>15.0</td><td>9.2</td><td>2.4</td><td>18.0</td><td>10.2</td></tr><tr><td>TimeSuite [60]</td><td>27.3</td><td>39.6</td><td>14.2</td><td>12.8</td><td>24.9</td><td>39.6</td><td>14.6</td><td>13.9</td><td>6.7</td><td>32.6</td><td>14.3</td></tr><tr><td>TRACE [18]</td><td>57.1</td><td>66.8</td><td>25.9</td><td>17.5</td><td>26.5</td><td>45.1</td><td>17.8</td><td>22.1</td><td>12.5</td><td>36.8</td><td>24.9</td></tr><tr><td>VideoChat-Flash [27]</td><td>38.3</td><td>47.2</td><td>12.9</td><td>13.9</td><td>27.1</td><td>39.4</td><td>14.9</td><td>12.7</td><td>6.5</td><td>24.3</td><td>12.9</td></tr><tr><td>Gemini-2.5-Pro [10]</td><td>46.7</td><td>45.3</td><td>21.1</td><td>27.6</td><td>30.9</td><td>39.9</td><td>23.0</td><td>31.1</td><td>14.1</td><td>35.9</td><td>17.8</td></tr><tr><td>Time-R1 (ours)</td><td>49.3</td><td>65.3</td><td>28.3</td><td>24.3</td><td>39.3</td><td>56.2</td><td>26.3</td><td>21.8</td><td>9.0</td><td>32.7</td><td>21.8</td></tr></table>
|
| 337 |
+
|
| 338 |
+
Details of our implemented SFT baselines. We implemented two versions of SFT fine-tuning: one is full-parameter fine-tuning of the LLM (SFT), and the other is LoRA-based fine-tuning of the LLM (SFT-LoRA). For SFT-LoRA, the LoRA rank is set to 64, and the LoRA alpha is set to 128. Both configurations use the following settings: a learning rate of 2e-5, the AdamW optimizer with $\beta_{1} = 0.9$ , $\beta_{2} = 0.999$ , a weight decay of 0, the batch size of 8, and accumulation steps of 2. We fine-tune for 5 epochs on our 2.5K data, and use a linear scheduler to gradually decay the learning rate to 0.
|
| 339 |
+
|
| 340 |
+
# C Additional Analyses
|
| 341 |
+
|
| 342 |
+
In-depth comparisons of different approaches on TVGBench by semantic type. Table 3 provides a detailed performance comparison of various methods on the TVGBench across different semantic categories. Specifically, the abbreviations represent: EC (Environment Change), ES (Environment State), HAC (Human Action - Complex), HAP (Human Action - Procedural), HAS (Human Action - Simple), HP (Human Pose), OA (Object Attribute), OC (Object Counting), OEC (Object Existence - Complex), OES (Object Existence - Simple), and OT (Object Transition). Detailed definition and construction process can be found in Figure 15.
|
| 343 |
+
|
| 344 |
+
Time-R1 demonstrates strong competitiveness across multiple semantic categories. First, particularly in the four tasks of HAC, HAS, HP, and OA, Time-R1 achieved the highest scores among all compared methods, showcasing its excellent ability in understanding the details of human actions and identifying object features. For example, Time-R1 achieves an mIoU of 56.2 on HP, which is 11.1 points higher than the second-best method, TRACE, with an mIoU of 45.1. On HAS, Time-R1 reaches 39.3, outperforming Gemini-2.5-Pro's 30.9 by 8.4 points. Second, in the three tasks of ES, EC, and OT, Time-R1 demonstrates strong performance comparable to the top model TRACE, with its performance being very close or immediately following. In the HAP task, Time-R1 also performs excellently, with its performance being in the same tier as Gemini-2.5-Pro. Lastly, all models still show a noticeable gap compared to Gemini in understanding complex instructions, such as in HAP, OC, and OEC. For example, in HAP, which involves procedural activity localization, Gemini achieves 27.6, while our model ranks second with a score of 24.3. In object counting, Gemini attains 31.1, substantially outperforming our model's 21.8. In summary, Time-R1 performs well on both non-human simple instructions and human-related instructions, but there is still room for improvement in complex instruction grounding and object-related grounding.
|
| 345 |
+
|
| 346 |
+
Comparison of speed and accuracy between inference library transformers and vLLM. We observe that the inference speed of the implementation in the transformers [51] library is very slow. To address this, we implemented an accelerated inference version using vLLM [23] for all related 7 downstream benchmarks. For example, on TVGBench, the vLLM-based implementation requires only 502 seconds to infer 800 samples using 8 GPUs, whereas the transformers library implementation takes 2520 seconds. This achieves an overall speedup of $5 \times$ .
|
| 347 |
+
|
| 348 |
+
# D Ablation Studies
|
| 349 |
+
|
| 350 |
+
Ablation of different RFT data filtering strategies. As shown in Table 4, different data filtering strategy in the initial round affects the model's performance. First, appropriate Gaussian filtering outperforms both uniform and random filtering methods. Among the Gaussian filtering settings, a standard deviation of 0.3 yields the best results, followed by 0.5 and then 0.7. These findings suggest that incorporating moderately challenging samples during RFT helps improve the model's generalization capability more effectively than using either overly easy or extremely difficult examples.
|
| 351 |
+
|
| 352 |
+
Table 4: Ablation of data filtering strategies.
|
| 353 |
+
|
| 354 |
+
<table><tr><td>Method</td><td>R1@0.3</td><td>R1@0.5</td><td>R1@0.7</td><td>mIoU</td></tr><tr><td>random</td><td>39.4</td><td>26.5</td><td>16.4</td><td>27.4</td></tr><tr><td>gaussian (0.3)</td><td>41.6</td><td>28.5</td><td>15.6</td><td>28.6</td></tr><tr><td>gaussian (0.5)</td><td>40.6</td><td>28.2</td><td>16.0</td><td>28.3</td></tr><tr><td>gaussian (0.7)</td><td>37.2</td><td>26.9</td><td>15.5</td><td>26.5</td></tr><tr><td>uniform</td><td>40.4</td><td>28.5</td><td>15.9</td><td>28.3</td></tr></table>
|
| 355 |
+
|
| 356 |
+
Table 5: Ablation of KL and CoT in GRPO.
|
| 357 |
+
|
| 358 |
+
<table><tr><td>KL</td><td>CoT</td><td>R1@0.3</td><td>R1@0.5</td><td>R1@0.7</td><td>mIoU</td></tr><tr><td>X</td><td>X</td><td>40.4</td><td>29.1</td><td>14.9</td><td>28.1</td></tr><tr><td>✓</td><td>X</td><td>40.8</td><td>27.4</td><td>15.0</td><td>27.7</td></tr><tr><td>X</td><td>✓</td><td>42.9</td><td>29.5</td><td>15.0</td><td>29.1</td></tr><tr><td>✓</td><td>✓</td><td>41.6</td><td>28.5</td><td>15.6</td><td>28.6</td></tr></table>
|
| 359 |
+
|
| 360 |
+
Table 6: Comparison of the token-level loss design used by DAPO [56] and the sample-level loss design used by GRPO [45].
|
| 361 |
+
|
| 362 |
+
<table><tr><td rowspan="2">Loss</td><td colspan="4">Charades-STA</td><td colspan="4">ActivityNet</td><td colspan="4">TVGBench</td></tr><tr><td>R1@0.3</td><td>R1@0.5</td><td>R1@0.7</td><td>mIoU</td><td>R1@0.3</td><td>R1@0.5</td><td>R1@0.7</td><td>mIoU</td><td>R1@0.3</td><td>R1@0.5</td><td>R1@0.7</td><td>mIoU</td></tr><tr><td>GRPO</td><td>76.7</td><td>59.8</td><td>34.4</td><td>57.0</td><td>55.9</td><td>37.1</td><td>20.3</td><td>37.8</td><td>40.8</td><td>28.0</td><td>16.5</td><td>28.4</td></tr><tr><td>DAPO</td><td>77.4</td><td>60.0</td><td>34.1</td><td>57.2</td><td>56.2</td><td>37.4</td><td>20.4</td><td>38.0</td><td>41.6</td><td>28.5</td><td>15.6</td><td>28.6</td></tr></table>
|
| 363 |
+
|
| 364 |
+
Ablation of KL and CoT during GRPO training. As shown in Table 5, incorporating CoT reasoning during training leads to improved performance compared to the No-CoT setting, suggesting that CoT enhances the model's temporal video grounding capabilities. When KL divergence is omitted (No-KL), performance slightly decreases under the No-CoT setting but unexpectedly improves when CoT is present. However, we find that in the No-KL+CoT setting, the model often fails to produce a thinking process, directly jumping to answers. In contrast, using KL divergence helps maintain more logical reasoning that is easier to follow. To balance performance and interpretability, we adopt a training setup that includes both KL and CoT.
|
| 365 |
+
|
| 366 |
+
Comparison of tIoU and IoU during multi-epoch training. As shown in Figure 6, tIoU consistently outperforms standard IoU during both the early and late stages of training over the first 5 epochs. Notably, while tIoU steadily improves as training progresses, IoU shows a decline in performance by the fifth epoch. This highlights the advantage of using tIoU as a more stable and reliable reward for temporal video grounding.
|
| 367 |
+
|
| 368 |
+
Ablation of sample filtering in multi-epoch training. As shown in Figure 7, applying sample filtering (SF) to remove simpler training samples yields consistent performance improvements across epochs. This suggests that easy samples with high IoU may introduce noise or reduce the effectiveness of learning, and filtering them helps focus the model on more informative and challenging instances.
|
| 369 |
+
|
| 370 |
+
Ablation of DAPO & GRPO. The sample-level loss used by GRPO computes the loss by averaging over each individual sample. This approach leads to unequal loss contributions for tokens when dealing with CoTs of varying lengths. DAPO addresses this issue by employing a token-level loss. The underlying principle is that the token-level loss can effectively guide the model in the process of CoT generation, allowing it to learn useful patterns from CoTs of different lengths sampled during training. In Table 6, we compare these two loss designs. We empirically find that DAPO outperforms GRPO on the majority of metrics, thus, we adopt DAPO's loss design.
|
| 371 |
+
|
| 372 |
+
Different Model Size. Table 7 presents a performance comparison of different model sizes. These results indicate that larger models achieve better zero-shot performance and continue to outperform smaller models after fine-tuning. These findings support the notion that scaling up model capacity enhances generalization and leads to superior results on the TVG tasks.
|
| 373 |
+
|
| 374 |
+
# E Qualitative Result
|
| 375 |
+
|
| 376 |
+
Case study of temporal video grounding on Charades and ActivityNet. As shown in Figure 8, in the example above, given a relatively complex language instruction, Time-R1 demonstrates more accurate localization than all baselines, successfully capturing the initial event "goes back to the pink bucket" within the timestamp, while other SoTA models like Gemini-2.5-Pro fail. In the example below, the model accurately localizes the event, excluding "a person is lying on the couch," and correctly distinguishes between sitting and lying, unlike other models, which either localize only a small segment (TimeSuite and VideoChat-Flash) or the entire segment (TRACE and Gemini-2.5-Pro).
|
| 377 |
+
|
| 378 |
+
Case study of short video QA on VideoMME and TempCompass. As shown in Figures 9 and 10, Time-R1 demonstrates improved performance over the base model in tasks requiring positional
|
| 379 |
+
|
| 380 |
+
Table 7: Performance comparison of different model sizes.
|
| 381 |
+
|
| 382 |
+
<table><tr><td rowspan="2">Method</td><td colspan="4">Charades-STA</td><td colspan="4">ActivityNet</td><td colspan="4">TVGBench</td></tr><tr><td>R1@0.3</td><td>R1@0.5</td><td>R1@0.7</td><td>mIoU</td><td>R1@0.3</td><td>R1@0.5</td><td>R1@0.7</td><td>mIoU</td><td>R1@0.3</td><td>R1@0.5</td><td>R1@0.7</td><td>mIoU</td></tr><tr><td>Time-R1-3B</td><td>74.6</td><td>53.1</td><td>26.0</td><td>51.2</td><td>40.0</td><td>21.0</td><td>8.7</td><td>23.2</td><td>33.5</td><td>21.0</td><td>10.5</td><td>21.7</td></tr><tr><td>Time-R1-3B*</td><td>78.7</td><td>64.1</td><td>36.9</td><td>59.9</td><td>66.8</td><td>46.8</td><td>24.7</td><td>46.1</td><td>-</td><td>-</td><td>-</td><td>-</td></tr><tr><td>Time-R1-7B</td><td>78.1</td><td>60.8</td><td>35.5</td><td>58.1</td><td>58.1</td><td>39.0</td><td>21.4</td><td>40.5</td><td>41.8</td><td>29.4</td><td>16.4</td><td>29.2</td></tr><tr><td>Time-R1-7B*</td><td>82.8</td><td>72.2</td><td>50.1</td><td>60.9</td><td>73.3</td><td>55.6</td><td>34.0</td><td>52.1</td><td>-</td><td>-</td><td>-</td><td>-</td></tr></table>
|
| 383 |
+
|
| 384 |
+

|
| 385 |
+
Figure 6: Performance comparison of tIoU and IoU in multi-epoch training.
|
| 386 |
+
|
| 387 |
+

|
| 388 |
+
Figure 7: Ablation of sample filtering in multi-epoch training.
|
| 389 |
+
|
| 390 |
+
judgment, scene storyline reasoning, and visual reasoning. For example, in Figure 9, Time-R1 correctly identifies that a car in the video is missing its right-front wheel, a detail that the base model fails to recognize. This reflects that Time-R1 likely possesses stronger video localization capabilities, which in turn enhance its visual reasoning ability. In Figure 12, we output a CoT when answering the QA task, providing some interpretability. This example shows that Time-R1's reasoning process is more concise, whereas the base model often reasons correctly but arrives at the wrong answer. This suggests that Time-R1's reasoning may be more effective in guiding the final answer, possibly benefiting from the outcome-driven RL of GRPO.
|
| 391 |
+
|
| 392 |
+
Case study of long video QA on EgoSchema and VideoMME. Figure 13 presents a long egocentric video QA example focused on summarizing task steps. In the "Hanging the Dress" case, the base model fails to identify all key steps, while our Time-R1 model correctly selects the answer by generating a more accurate chain-of-thought (CoT). In Figure 14, the task involves identifying the primary tools and ingredients used in the video and understanding their respective contributions. An additional example in Figure 9 involves animated scene reasoning, where Time-R1 correctly infers, based on elements like the presence of a guitar, that the heroes defeated the enemies using the power of music, whereas the base model misinterprets the cause. Across these complex reasoning tasks involving long videos, Time-R1 consistently demonstrates superior performance compared to the base model.
|
| 393 |
+
|
| 394 |
+
Illustration of our prompt at training and inference time. Figure 11 presents the prompts used for the temporal video grounding and video QA tasks at both training and inference time.
|
| 395 |
+
|
| 396 |
+
Illustration of our prompt to annotate query semantics on TVGBench. Figure 15 presents the prompt used to annotate query semantics in the TVGBench dataset. The prompt is designed to guide the DeepSeek LLM in classifying each input query into one of 11 predefined semantic categories. To improve annotation quality, we refer to the construction of existing benchmarks and carefully select both positive and negative examples for each semantic type. These examples are chosen to include queries that are either easy or difficult for the model to answer, helping to refine the model's understanding and improve labeling accuracy.
|
| 397 |
+
|
| 398 |
+

|
| 399 |
+
|
| 400 |
+

|
| 401 |
+
|
| 402 |
+

|
| 403 |
+
|
| 404 |
+
Question: Where is the event "The man goes back to the pink bucket to wash another dark item clothing, he wrings it out in the pink bucket, throws it in the yellow bucket, then wrings his hands together a few times while staring at the camera"?
|
| 405 |
+
|
| 406 |
+
(video length: 74s)
|
| 407 |
+
|
| 408 |
+
<table><tr><td>Ground Truth</td><td>33.1s</td><td>→ 73.5s</td></tr><tr><td>TimeSuite</td><td>20.0s</td><td>→ 30.0s</td></tr><tr><td>Gemini-2.5-Pro</td><td></td><td>57.0s</td></tr><tr><td>VideoChat-Flash</td><td>0.0s</td><td>→ 17.2s</td></tr><tr><td>Trace</td><td></td><td>54.9s</td></tr><tr><td>Time-R1 (ours)</td><td>35.0s</td><td>→ 75.0s</td></tr></table>
|
| 409 |
+
|
| 410 |
+
Time-R1 thinking (ours): The event "The man goes back to the pink bucket to wash another dark item clothing, he wrings it out in the pink bucket, throws it in the yellow bucket, then wrings his hands together a few times while staring at the camera." occurs between 35.00 and 75.00 seconds in the video.
|
| 411 |
+
|
| 412 |
+

|
| 413 |
+
Figure 8: Temporal video grounding cases from Charades and ActivityNet benchmark. Time-R1 outperforms other SoTA models.
|
| 414 |
+
|
| 415 |
+

|
| 416 |
+
|
| 417 |
+
Question: Where is the event "a person is seen sitting on a couch"?
|
| 418 |
+
(video length: 31s)
|
| 419 |
+
|
| 420 |
+
<table><tr><td>Ground Truth</td><td>0.0s←</td><td>12.8s</td></tr><tr><td>TimeSuite</td><td>0.0s←5.0s</td><td></td></tr><tr><td>Gemini-2.5-Pro</td><td>0.0s←</td><td>30.0s</td></tr><tr><td>VideoChat-Flash</td><td>0.0s��5.0s</td><td></td></tr><tr><td>Trace</td><td>0.0s←</td><td>31.6s</td></tr><tr><td>Time-R1 (ours)</td><td>0.0s←10.0s</td><td></td></tr></table>
|
| 421 |
+
|
| 422 |
+
Time-R1 thinking (ours): The event "a person is seen sitting on a couch" occurs at the beginning of the video. The person is seated on a couch, holding a phone, and appears to be engaged with it. This aligns with the initial frames of the video.
|
| 423 |
+
|
| 424 |
+

|
| 425 |
+
|
| 426 |
+

|
| 427 |
+
|
| 428 |
+

|
| 429 |
+
|
| 430 |
+
(video length: 38m) Question: What do heroes of legend use to defeat the enemy based on the video?
|
| 431 |
+
|
| 432 |
+

|
| 433 |
+
|
| 434 |
+
Their wisdom
|
| 435 |
+
|
| 436 |
+

|
| 437 |
+
|
| 438 |
+
Their superpower
|
| 439 |
+
|
| 440 |
+

|
| 441 |
+
Figure 9: Case study on VideoMME (w/o CoT), demonstrating that Time-R1 achieves better performance than the base model.
|
| 442 |
+
|
| 443 |
+
(B) A big robot
|
| 444 |
+
|
| 445 |
+

|
| 446 |
+
|
| 447 |
+
4 Power of music
|
| 448 |
+
|
| 449 |
+

|
| 450 |
+
|
| 451 |
+

|
| 452 |
+
|
| 453 |
+
(video length: 17s) Question: What's wrong with this car?
|
| 454 |
+
|
| 455 |
+

|
| 456 |
+
|
| 457 |
+
It doesn't have a left rear wheel.
|
| 458 |
+
|
| 459 |
+

|
| 460 |
+
|
| 461 |
+
Its headlamp is broken.
|
| 462 |
+
|
| 463 |
+
(B) It doesn't have a right front wheel.
|
| 464 |
+
(D) Its right door is broken.
|
| 465 |
+
|
| 466 |
+

|
| 467 |
+
Figure 10: Case study on TempCompass (w/o CoT), demonstrating that Time-R1 achieves better performance than the base model.
|
| 468 |
+
|
| 469 |
+

|
| 470 |
+
|
| 471 |
+

|
| 472 |
+
|
| 473 |
+
(video length: 10s)Question: Which sentence better captures the essence of the video?
|
| 474 |
+
|
| 475 |
+
(A) First, two hand are holding the clay pot and then three hands are holding it.
|
| 476 |
+
In the video, three hands are holding the clay pot, then two hands are holding the clay pot.
|
| 477 |
+
|
| 478 |
+

|
| 479 |
+
Figure 11: Illustration of prompts at both training and inference time.
|
| 480 |
+
|
| 481 |
+

|
| 482 |
+
|
| 483 |
+

|
| 484 |
+
|
| 485 |
+
(video length: 14s)Question: What is the sequence of actions performed by the girl?
|
| 486 |
+
|
| 487 |
+
(A) Putting on clothes, jumping into water, taking off clothes
|
| 488 |
+
(B) Jumping into water, putting on clothes, taking off clothes
|
| 489 |
+
(C) Taking off clothes, putting on clothes, jumping into water
|
| 490 |
+
|
| 491 |
+
# Temporal Video Grounding
|
| 492 |
+
|
| 493 |
+
# System Prompt
|
| 494 |
+
|
| 495 |
+
You are a video analysis expert.
|
| 496 |
+
|
| 497 |
+
# Input Prompt
|
| 498 |
+
|
| 499 |
+
To accurately pinpoint the event "[EVENT]" in the video, determine the precise time period of the event. Output your thought process within the <think> </think> tags, including analysis with either specific time ranges (xx.xx to xx.xx) in < timestep> </ timestep> tags. Then, provide the start and end times (in seconds, precise to two decimal places) in the format "start time to end time" within the <answer> </answer> tags. For example: "12.54 to 17.83"."""
|
| 500 |
+
|
| 501 |
+
# Video QA
|
| 502 |
+
|
| 503 |
+
# System Prompt
|
| 504 |
+
|
| 505 |
+
You are a video analysis expert.
|
| 506 |
+
|
| 507 |
+
# Input Prompt (w/o CoT)
|
| 508 |
+
|
| 509 |
+
Carefully watch the video and pay attention to the cause and sequence of events, the detail and movement of objects and the action and pose of persons. Based on your observations, select the best option that accurately addresses the question. Provide the letter of the best option wrapped in parentheses within the <answer> </answer> tags. Question: {} {}""
|
| 510 |
+
|
| 511 |
+
# Input Prompt (w/ CoT)
|
| 512 |
+
|
| 513 |
+
Carefully watch the video and pay attention to the cause and sequence of events, the detail and movement of objects and the action and pose of persons. Based on your observations, select the best option that accurately addresses the question. Output your thought process within the <think> </think> tags, including analysis with either specific timestamps (xx.xx) or time ranges (xx.xx to xx.xx) in < timestep> </ timestep> tags. Then, provide the letter of the best option wrapped in parentheses within the <answer> </answer> tags. Question: {} {}
|
| 514 |
+
|
| 515 |
+

|
| 516 |
+
|
| 517 |
+

|
| 518 |
+
|
| 519 |
+

|
| 520 |
+
|
| 521 |
+
(video length: 8s) Question: Is the pineapple being pushed forward by a person?
|
| 522 |
+
|
| 523 |
+

|
| 524 |
+
|
| 525 |
+

|
| 526 |
+
|
| 527 |
+
# Base model thinking:
|
| 528 |
+
|
| 529 |
+
To determine if the pineapple is being pushed forward by a person, let's analyze the video:
|
| 530 |
+
|
| 531 |
+
1. The video shows a pineapple on a pink surface.
|
| 532 |
+
2. A hand appears in the frame and interacts with the pineapple.
|
| 533 |
+
3. The hand lifts the pineapple, indicating that it is being moved by the person.
|
| 534 |
+
|
| 535 |
+
Given this analysis, the pineapple is indeed being pushed forward by a person.
|
| 536 |
+
|
| 537 |
+
# Time-R1 thinking (ours):
|
| 538 |
+
|
| 539 |
+
The video shows a hand reaching into the frame and grasping the pineapple. The hand then lifts the pineapple, indicating that the pineapple is not being pushed forward by a person but rather being moved by the hand.
|
| 540 |
+
|
| 541 |
+

|
| 542 |
+
|
| 543 |
+

|
| 544 |
+
|
| 545 |
+

|
| 546 |
+
|
| 547 |
+
(video length: 9s) Question: In which sequence does the man perform the actions?
|
| 548 |
+
|
| 549 |
+
(A) Gliding on the skateboard and then jumping
|
| 550 |
+
(B) Jumping and then tying his shoelaces
|
| 551 |
+
(C) Tying his shoelaces and then gliding on the skateboard
|
| 552 |
+
(D) Gliding on the skateboard and then tying his shoelaces
|
| 553 |
+
|
| 554 |
+
# Base model thinking:
|
| 555 |
+
|
| 556 |
+
Let's analyze the sequence of actions performed by the man in the video:
|
| 557 |
+
|
| 558 |
+
1. The man is initially seen standing on a skateboard.
|
| 559 |
+
2. He then proceeds to tie his shoelaces.
|
| 560 |
+
3. After tying his shoelaces, he glides on the skateboard.
|
| 561 |
+
|
| 562 |
+
Given this sequence, the correct answer is: (D) gliding on the skateboard and then tying his shoelaces
|
| 563 |
+
|
| 564 |
+
# Time-R1 thinking (ours):
|
| 565 |
+
|
| 566 |
+
The sequence of actions performed by the man in the video is as follows: first, he ties his shoelaces, and then he glides on the skateboard. This can be observed from the video where the man is seen tying his shoelaces before he starts gliding on the skateboard.
|
| 567 |
+
|
| 568 |
+
Figure 12: Case study on TempCompass (w/ CoT), demonstrating that Time-R1 achieves better performance than the base model.
|
| 569 |
+
|
| 570 |
+

|
| 571 |
+
|
| 572 |
+

|
| 573 |
+
|
| 574 |
+

|
| 575 |
+
|
| 576 |
+
(video length: 3m) Question: Summarize the steps taken by c to complete the tasks in this video. don't list every action narrated, but rather focus on major components and how they're executed.
|
| 577 |
+
|
| 578 |
+
(A) C folds the dress, places it on the ironing board, and then hangs it up.
|
| 579 |
+
(B) C folds the dress, places it on the ironing board, and then packs it.
|
| 580 |
+
(C) C folds the dress, places it on the ironing board, and then washes it.
|
| 581 |
+
(D) C folds the dress, places it on the ironing board, and then irons it and hangs it up.
|
| 582 |
+
(E) C folds the dress, places it on the ironing board, irons it, and then folds it again.
|
| 583 |
+
|
| 584 |
+
# Base model thinking:
|
| 585 |
+
|
| 586 |
+
Let's analyze the video step by step to determine the correct sequence of actions:
|
| 587 |
+
|
| 588 |
+
1. Folding the Dress: The video starts with C folding a blue dress on the ironing board. This is the first major action.
|
| 589 |
+
2. Placing the Dress on the Ironing Board: After folding the dress, C places it on the ironing board.
|
| 590 |
+
3. Ironing the Dress: C then irons the dress, ensuring it is smooth and free of wrinkles.
|
| 591 |
+
|
| 592 |
+
4. Hanging the Dress: Finally, C hangs the dress up, completing the task.
|
| 593 |
+
|
| 594 |
+
Now, let's evaluate the options:
|
| 595 |
+
|
| 596 |
+
(A) C folds the dress, places it on the ironing board, and then hangs it up.
|
| 597 |
+
(B) C folds the dress, places it on the ironing board, and then packs it.
|
| 598 |
+
(C) C folds the dress, places it on the ironing board, and then washes it.
|
| 599 |
+
(D) C folds the dress, places it on the ironing board, and then irons it and hangs it up.
|
| 600 |
+
(E) C folds the dress, places it on the ironing board, irons it, and then folds it again. The correct sequence of actions matches option (D).
|
| 601 |
+
|
| 602 |
+
# Time-R1 thinking (ours):
|
| 603 |
+
|
| 604 |
+
The video shows C folding a dress, placing it on the ironing board, and then ironing it. The sequence of actions is clear and follows a logical order. The dress is folded, placed on the ironing board, and then ironed, which is a common sequence for completing a task like ironing a dress.
|
| 605 |
+
|
| 606 |
+
Figure 13: Case study on EgoSchema (w/ CoT), demonstrating that Time-R1 achieves better performance than the base model.
|
| 607 |
+
|
| 608 |
+

|
| 609 |
+
Figure 14: Case study on EgoSchema (w/o CoT), demonstrating that Time-R1 achieves better performance than the base model.
|
| 610 |
+
|
| 611 |
+

|
| 612 |
+
|
| 613 |
+

|
| 614 |
+
|
| 615 |
+
(video length: 3m) Question: Identify the primary tools and ingredients c used throughout the video, and discuss how they contributed to the overall dessert preparation process.
|
| 616 |
+
|
| 617 |
+
(A) The primary tools and ingredients c used throughout the video were a spatula, a strainer, a dessert bowl, cocoa powder, and tissue paper. the spatula was used to spread the batter in the bowl. the strainer was used to sieve the cocoa powder into the bowl. the dessert bowl was used to hold the batter. the cocoa powder was used to add flavor and color to the batter. the tissue paper was used to wipe the edges of the bowl.
|
| 618 |
+
(B) The primary tools and ingredients c used throughout the video were a mixing bowl, a whisk, X a baking dish, flour, sugar, eggs, and butter. the mixing bowl was used to mix the batter. the whisk was used to beat the eggs. the baking dish was used to bake the cake. the flour was used to add structure to the cake. the sugar was used to add sweetness to the cake. the eggs were used to add moisture to the cake. the butter was used to add flavor and richness to the cake.
|
| 619 |
+
(C) The primary tools and ingredients c used throughout the video were a muffin tin, a spoon, chocolate chips, flour, sugar, eggs, and butter. the muffin tin was used to bake the muffins. the spoon was used to scoop the batter into the muffin tin. the chocolate chips were used to add flavor and color to the muffins. the flour was used to add structure to the muffins. the sugar was used to add sweetness to the muffins. the eggs were used to add moisture to the muffins. the butter was used to add flavor and richness to the muffins.
|
| 620 |
+
(D) The primary tools and ingredients c used throughout the video were a cake pan, a spatula, chocolate frosting, flour, sugar, eggs, and butter. The cake pan was used to bake the cake. The spatula was used to spread the frosting on the cake. The chocolate frosting was used to add flavor and color to the cake. The flour was used to add structure to the cake. The sugar was used to add sweetness to the cake. The eggs were used to add moisture to the cake. The butter was used to add flavor and richness to the cake.
|
| 621 |
+
(E) The primary tools and ingredients c used throughout the video were a mixing bowl, a whisk, a baking sheet, flour, sugar, eggs, and butter. the mixing bowl was used to mix the batter. the whisk was used to beat the eggs. the baking sheet was used to bake the cookies. the flour was used to add structure to the cookies. the sugar was used to add sweetness to the cookies. the eggs were used to add moisture to the cookies. the butter was used to add flavor and richness to the cookies.
|
| 622 |
+
|
| 623 |
+
# ## Task
|
| 624 |
+
|
| 625 |
+
Classify input queries into exactly one of the following categories based on their semantic content:
|
| 626 |
+
|
| 627 |
+
# 1. Human Action (Simple)
|
| 628 |
+
|
| 629 |
+
- Definition: Singular physical movements or basic interactions.
|
| 630 |
+
- Examples: - person opens a book over their head. - The person gets out some ginger. - who did I talk to in the shopping mall?
|
| 631 |
+
|
| 632 |
+
# 2. Human Action (Complex)
|
| 633 |
+
|
| 634 |
+
- Definition: Single continuous event with intricate components or concurrent elements.
|
| 635 |
+
- Examples: - He is talking while several people are using rowing machines.
|
| 636 |
+
|
| 637 |
+
- One man wearing blue shirt wearing a jumping leg extension and another man wearing red pants play on a field.
|
| 638 |
+
- who did I interact with when I did activity of fixing camping tent?
|
| 639 |
+
|
| 640 |
+
# 3. Human Action (procedural)
|
| 641 |
+
|
| 642 |
+
- Definition: contains multiple sequential events with explicit temporal boundaries. contains multiple actions, each with a clear start and end.
|
| 643 |
+
- Examples: - The person procures a condiment from the pantry, takes a spoon from the drawer which he uses to scoop it into the pan, then returns the condiment to the pantry, places the spoon in the sink and again stirs the pan.
|
| 644 |
+
|
| 645 |
+
- The person takes out a spoon from the drawer, scoops some sugar into the glass, stirs it with the juice, and returns the package to the pantry.
|
| 646 |
+
|
| 647 |
+
- Negative Examples: - Then the man juices some lemons in a juicer: only one action
|
| 648 |
+
|
| 649 |
+
- She gets out a cutting board and knife: only one action
|
| 650 |
+
- He then finishes by doing tricks: only one action
|
| 651 |
+
- She removes bits of shell until there is a small hole: only one action
|
| 652 |
+
|
| 653 |
+
# 4. Human Pose
|
| 654 |
+
|
| 655 |
+
- Definition: Static body positions or group configurations. Posture descriptors, positional prepositions
|
| 656 |
+
- Examples: - Several other people are in the background working out on the equipment.
|
| 657 |
+
|
| 658 |
+
- A young child is seen standing before a set of monkey bars.
|
| 659 |
+
|
| 660 |
+
# 5. Object Existence (Simple)
|
| 661 |
+
|
| 662 |
+
- Definition: Current location/status queries. Simple location prepositions.
|
| 663 |
+
- Examples: - Where is the tap?
|
| 664 |
+
|
| 665 |
+
- where is the chopsticks?
|
| 666 |
+
- In what location did i see the blue tent?
|
| 667 |
+
|
| 668 |
+
# 6. Object Existence (Complex)
|
| 669 |
+
|
| 670 |
+
- Definition: Queries about historical object positions changed by human actions, requiring temporal-action context (e.g., "after/before [action]").
|
| 671 |
+
- Examples: - Where was the spatula after I first used it?
|
| 672 |
+
|
| 673 |
+
- Where was the sieve before I picked it?
|
| 674 |
+
- what bolt did I pick?
|
| 675 |
+
- What mushroom did i chop
|
| 676 |
+
|
| 677 |
+
# 7. Object Attribute
|
| 678 |
+
|
| 679 |
+
- Definition: Physical/abstract property inquiries. Property descriptors (color/size/material)
|
| 680 |
+
- Examples: - what material did I pick from the shelf?
|
| 681 |
+
|
| 682 |
+
- what color is the toilet bin?
|
| 683 |
+
|
| 684 |
+
# 8. Object Counting
|
| 685 |
+
|
| 686 |
+
- Definition: Quantitative object presence queries. Numeric quantifiers, plural objects
|
| 687 |
+
- Examples: - how many tissue paper were on the floor?
|
| 688 |
+
|
| 689 |
+
- how many rolls are in the tray
|
| 690 |
+
|
| 691 |
+
# 9. Object Transition
|
| 692 |
+
|
| 693 |
+
- Definition: State/position change confirmation. Transformation verbs, completion checks
|
| 694 |
+
- Examples: - The bulb is broken apart.
|
| 695 |
+
|
| 696 |
+
- Did I close fridge?
|
| 697 |
+
|
| 698 |
+
# 10. Environment Change
|
| 699 |
+
|
| 700 |
+
- Definition: Dynamic scene modifications. Transient elements, overlay content
|
| 701 |
+
- Examples: - video ends with clothes/captions scrolling down
|
| 702 |
+
|
| 703 |
+
# 11. Environment State
|
| 704 |
+
|
| 705 |
+
- Definition: Persistent scene elements. Static overlays, permanent fixtures
|
| 706 |
+
- Examples: - Intro states 'Progression: Lisa's First Season'
|
| 707 |
+
|
| 708 |
+
-Trend Routing Technology' logo appears
|
| 709 |
+
|
| 710 |
+
# Output Format
|
| 711 |
+
|
| 712 |
+
Return ONLY the exact category name from: [Human Action (Procedural), Human Action (Complex), Human Action (Simple), Human Pose, Object Existence (Simple), Object Existence (Complex), Object Attribute, Object Counting, Object Transition, Environment Change, Environment State]
|
| 713 |
+
|
| 714 |
+
INPUT_prompt = ""Given the query below, classify it into one of the categories mentioned above. Query: {query} Your response:
|
| 715 |
+
|
| 716 |
+
Figure 15: Prompts for LLM used to annotate the semantics of each query on TVGBench.
|
data/2025/2503_13xxx/2503.13377/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2cab935b45a76b83197c91cb35b8824c90c8f0083ce9de30f8515a98e4f2e15f
|
| 3 |
+
size 908273
|
data/2025/2503_13xxx/2503.13377/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13399/3216c4a9-6622-4f8f-9268-b62c8b700c4e_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13399/3216c4a9-6622-4f8f-9268-b62c8b700c4e_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13399/3216c4a9-6622-4f8f-9268-b62c8b700c4e_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6914af7a48f591a220b0a452973c5cfbf0866e5a1e32891b26e86a71fc8971b1
|
| 3 |
+
size 28043625
|
data/2025/2503_13xxx/2503.13399/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13399/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fd50438fe70c13178e19e8e85c4e2f4a4a50c79e98d15cf3c1d62698d2361ba1
|
| 3 |
+
size 2506727
|
data/2025/2503_13xxx/2503.13399/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13415/d6bcf737-8470-4ed5-949a-6d04e53f6bf7_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13415/d6bcf737-8470-4ed5-949a-6d04e53f6bf7_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13415/d6bcf737-8470-4ed5-949a-6d04e53f6bf7_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:20523402a8d58ab93c1cb436ab38e43f3a276bad100ce4e592c8dc586bad7d46
|
| 3 |
+
size 11544534
|
data/2025/2503_13xxx/2503.13415/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13415/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a68dbd1f052bc70ece874719173b58a55de03a208f6d10e4966d985040b6ca04
|
| 3 |
+
size 1641056
|
data/2025/2503_13xxx/2503.13415/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13423/ee02b8fd-a1b4-4f31-b152-93f5d36bdbe3_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13423/ee02b8fd-a1b4-4f31-b152-93f5d36bdbe3_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13423/ee02b8fd-a1b4-4f31-b152-93f5d36bdbe3_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:93be3cf8411166f6183cfcad9112748ba68ba79433e4f0a5a89be39c59c7d6cf
|
| 3 |
+
size 1715365
|
data/2025/2503_13xxx/2503.13423/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13423/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7dbb94913a22c6ac9cca71011ad6dd70655cffc30cbe5dd83d4aa7d6a0f3ae8d
|
| 3 |
+
size 896437
|
data/2025/2503_13xxx/2503.13423/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13436/53224727-d9f9-435a-ad74-c91800bf8f34_content_list.json
ADDED
|
@@ -0,0 +1,1985 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Unified Autoregressive Visual Generation and Understanding with Continuous Tokens",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
222,
|
| 8 |
+
122,
|
| 9 |
+
774,
|
| 10 |
+
172
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Lijie Fan $^{1,*}$ Luming Tang $^{1,*}$ Siyang Qin $^{1,*}$ Tianhong Li $^{2}$ Xuan Yang $^{1}$ Siyuan Qiao $^{1}$ Andreas Steiner $^{1}$ Chen Sun $^{1}$ Yuanzhen Li $^{1}$ Tao Zhu $^{1}$ Michael Rubinstein $^{1}$ Michalis Raptis $^{1}$ Deqing Sun $^{1,\\dagger}$ Radu Soricut $^{1,\\dagger}$ Google DeepMind ${}^{2}\\mathrm{MIT}\\quad {*},$ equal contribution",
|
| 17 |
+
"bbox": [
|
| 18 |
+
186,
|
| 19 |
+
223,
|
| 20 |
+
821,
|
| 21 |
+
290
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "image",
|
| 27 |
+
"img_path": "images/fe57fbb071286d72cde1ae0a4dc870c142112e69c0e374902ceb8453aa9cf6c0.jpg",
|
| 28 |
+
"image_caption": [
|
| 29 |
+
"Figure 1: Generated images from our UniFluid autoregressive model after aesthetic finetuning."
|
| 30 |
+
],
|
| 31 |
+
"image_footnote": [],
|
| 32 |
+
"bbox": [
|
| 33 |
+
178,
|
| 34 |
+
304,
|
| 35 |
+
815,
|
| 36 |
+
665
|
| 37 |
+
],
|
| 38 |
+
"page_idx": 0
|
| 39 |
+
},
|
| 40 |
+
{
|
| 41 |
+
"type": "text",
|
| 42 |
+
"text": "Abstract",
|
| 43 |
+
"text_level": 1,
|
| 44 |
+
"bbox": [
|
| 45 |
+
459,
|
| 46 |
+
686,
|
| 47 |
+
537,
|
| 48 |
+
702
|
| 49 |
+
],
|
| 50 |
+
"page_idx": 0
|
| 51 |
+
},
|
| 52 |
+
{
|
| 53 |
+
"type": "text",
|
| 54 |
+
"text": "We present UniFluid, a unified autoregressive framework for joint visual generation and understanding leveraging continuous visual tokens. Our unified autoregressive architecture processes multimodal image and text inputs, generating discrete tokens for text and continuous tokens for image. We find though there is an inherent trade-off between the image generation and understanding task, a carefully tuned training recipe enables them to improve each other. By selecting an appropriate loss balance weight, the unified model achieves results comparable to or exceeding those of single-task baselines on both tasks. Furthermore, we demonstrate that employing stronger pre-trained LLMs and random-order generation during training is important to achieve high-fidelity image generation within this unified framework. Built upon the Gemma model series, UniFluid exhibits competitive performance across both image generation and understanding, demonstrating strong transferability to various downstream tasks, including image editing for generation, as well as visual captioning and question answering for understanding.",
|
| 55 |
+
"bbox": [
|
| 56 |
+
228,
|
| 57 |
+
710,
|
| 58 |
+
767,
|
| 59 |
+
904
|
| 60 |
+
],
|
| 61 |
+
"page_idx": 0
|
| 62 |
+
},
|
| 63 |
+
{
|
| 64 |
+
"type": "aside_text",
|
| 65 |
+
"text": "arXiv:2503.13436v1 [cs.CV] 17 Mar 2025",
|
| 66 |
+
"bbox": [
|
| 67 |
+
22,
|
| 68 |
+
260,
|
| 69 |
+
57,
|
| 70 |
+
705
|
| 71 |
+
],
|
| 72 |
+
"page_idx": 0
|
| 73 |
+
},
|
| 74 |
+
{
|
| 75 |
+
"type": "footer",
|
| 76 |
+
"text": "Tech Report.",
|
| 77 |
+
"bbox": [
|
| 78 |
+
171,
|
| 79 |
+
922,
|
| 80 |
+
250,
|
| 81 |
+
936
|
| 82 |
+
],
|
| 83 |
+
"page_idx": 0
|
| 84 |
+
},
|
| 85 |
+
{
|
| 86 |
+
"type": "text",
|
| 87 |
+
"text": "1 Introduction",
|
| 88 |
+
"text_level": 1,
|
| 89 |
+
"bbox": [
|
| 90 |
+
174,
|
| 91 |
+
89,
|
| 92 |
+
310,
|
| 93 |
+
106
|
| 94 |
+
],
|
| 95 |
+
"page_idx": 1
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"type": "text",
|
| 99 |
+
"text": "Large Language Models (LLMs) have recently advanced from text-centric architectures, such as BERT [11] and GPT [35], toward multimodal systems capable of understanding and generating content across different modalities. GPT-3 [5] and PaLM [9] show that scaling language models leads to emergent capabilities, while Flamingo [2] further demonstrates that incorporating visual inputs facilitates unified multimodal reasoning. This trend toward unified vision-language model—using a single model for diverse tasks of visual understanding and generation—has promising potential for leveraging knowledge and reasoning abilities that transfer across different vision and language tasks, ultimately facilitating more robust and generalizable multimodal representation and modeling capabilities.",
|
| 100 |
+
"bbox": [
|
| 101 |
+
174,
|
| 102 |
+
122,
|
| 103 |
+
823,
|
| 104 |
+
247
|
| 105 |
+
],
|
| 106 |
+
"page_idx": 1
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"type": "text",
|
| 110 |
+
"text": "Motivated by the advantages and strong scaling properties of autoregressive models, coupled with their simplicity, we investigate a pure autoregressive framework for unified visual generation and understanding, without the limitations introduced by vector quantization (VQ). In this paper, we introduce UniFluid, a unified framework that leverages continuous visual tokens within an autoregressive architecture to jointly handle vision-language generation and understanding tasks. Building upon pre-trained Gemma [48] on large-scale text corpus, UniFluid unlocks powerful visual generation and understanding capabilities through training with paired image-text data, and further allows these two tasks to mutually benefit each other within a single architecture.",
|
| 111 |
+
"bbox": [
|
| 112 |
+
174,
|
| 113 |
+
253,
|
| 114 |
+
823,
|
| 115 |
+
364
|
| 116 |
+
],
|
| 117 |
+
"page_idx": 1
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"type": "text",
|
| 121 |
+
"text": "Specifically, UniFluid adopts a unified autoregressive framework where both text and continuous visual inputs are embedded as tokens in the same space, enabling seamless joint training of image generation and understanding tasks. UniFluid integrates a continuous tokenizer [12, 21] for image generation and a pre-trained SigLIP [59] image encoder for understanding tasks, while textual inputs are processed using a standard SentencePiece tokenizer [20]. The resulting multimodal sequences are modeled autoregressively using Gemma [48] as the underlying transformer backbone. Task-specific prediction heads—a diffusion-based head for image generation and a cross-entropy head for text generation—ensure effective modality-specific training and inference, enabling UniFluid to efficiently learn shared representations that mutually enhance its generation and understanding capabilities.",
|
| 122 |
+
"bbox": [
|
| 123 |
+
174,
|
| 124 |
+
369,
|
| 125 |
+
823,
|
| 126 |
+
494
|
| 127 |
+
],
|
| 128 |
+
"page_idx": 1
|
| 129 |
+
},
|
| 130 |
+
{
|
| 131 |
+
"type": "text",
|
| 132 |
+
"text": "Our experiments demonstrate several key advantages of the proposed unified training strategy. We find though there is a trade-off between the two tasks, a carefully tuned training recipe can allow the tasks to support each other and outperform the single-task baselines. Effectively balancing the loss between the tasks allows a single model that performs both with results superior to or on par with single-task models. Moreover, the choice of pre-trained LLM backbone significantly impacts visual generation performance. We also find that while employing random generation order is essential for high-quality image synthesis, it is less critical for understanding tasks. Finally, our unified pre-trained models show strong generalization and transferability, achieving compelling results in downstream applications, including image editing and various vision-language understanding benchmarks.",
|
| 133 |
+
"bbox": [
|
| 134 |
+
174,
|
| 135 |
+
501,
|
| 136 |
+
823,
|
| 137 |
+
626
|
| 138 |
+
],
|
| 139 |
+
"page_idx": 1
|
| 140 |
+
},
|
| 141 |
+
{
|
| 142 |
+
"type": "text",
|
| 143 |
+
"text": "2 Related Works",
|
| 144 |
+
"text_level": 1,
|
| 145 |
+
"bbox": [
|
| 146 |
+
174,
|
| 147 |
+
648,
|
| 148 |
+
328,
|
| 149 |
+
666
|
| 150 |
+
],
|
| 151 |
+
"page_idx": 1
|
| 152 |
+
},
|
| 153 |
+
{
|
| 154 |
+
"type": "text",
|
| 155 |
+
"text": "Multimodal Large Language Models. Multimodal Large Language Models [2, 10, 23, 26, 25, 3, 42] have shown significant performance in visual understanding tasks. Flamingo [2] adopted a frozen LLM and vision encoder, utilizing perceiver with cross-attention to bridge the modalities. LLaVA [26, 25] proposed instruction tuning over pre-trained LLMs with multimodal inputs to align a pre-trained image encoder into the LLM's embedding space, thereby enabling it with visual understanding and instruction following capabilities. MiniGPT-4 [62] and mPLUG-Owl [56] have shown vision encoders can be connected to LLMs through projection layers, demonstrating sophisticated visual reasoning capabilities. The PaliGemma [3, 42] series built upon the Gemma [47, 48] model family to develop versatile vision-language models capable of strong transfer to diverse downstream visual understanding tasks.",
|
| 156 |
+
"bbox": [
|
| 157 |
+
174,
|
| 158 |
+
681,
|
| 159 |
+
823,
|
| 160 |
+
821
|
| 161 |
+
],
|
| 162 |
+
"page_idx": 1
|
| 163 |
+
},
|
| 164 |
+
{
|
| 165 |
+
"type": "text",
|
| 166 |
+
"text": "Autoregressive Image Generation. While diffusion models [41, 36] have achieved impressive success in image generation, autoregressive image generation methods have also shown significant development, driven by their simplicity and closeness to LLM training paradigms. A large body of research centers on tokenizing images into discrete tokens and applying autoregressive objectives to these discrete representations. Notable examples include Parti [57] and Muse [6]. [49] proposes an approach that operates on image scales, progressively refining resolutions from coarse to fine through",
|
| 167 |
+
"bbox": [
|
| 168 |
+
174,
|
| 169 |
+
827,
|
| 170 |
+
823,
|
| 171 |
+
911
|
| 172 |
+
],
|
| 173 |
+
"page_idx": 1
|
| 174 |
+
},
|
| 175 |
+
{
|
| 176 |
+
"type": "page_number",
|
| 177 |
+
"text": "2",
|
| 178 |
+
"bbox": [
|
| 179 |
+
493,
|
| 180 |
+
936,
|
| 181 |
+
503,
|
| 182 |
+
946
|
| 183 |
+
],
|
| 184 |
+
"page_idx": 1
|
| 185 |
+
},
|
| 186 |
+
{
|
| 187 |
+
"type": "image",
|
| 188 |
+
"img_path": "images/21fcf027d2bdd513ed513d39f97fd528b50bf3f4df10285dbb085df06e72a1cd.jpg",
|
| 189 |
+
"image_caption": [
|
| 190 |
+
"Figure 2: High-level illustration of UniFluid. UniFluid performs joint training of image generation and understanding tasks through next token prediction. For image embeddings, we use a VAE image tokenizer for generation, and a SigLIP image encoder for understanding. BOI/BOS stands for beginning of Image/Sentence."
|
| 191 |
+
],
|
| 192 |
+
"image_footnote": [],
|
| 193 |
+
"bbox": [
|
| 194 |
+
174,
|
| 195 |
+
95,
|
| 196 |
+
823,
|
| 197 |
+
366
|
| 198 |
+
],
|
| 199 |
+
"page_idx": 2
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"type": "text",
|
| 203 |
+
"text": "next-scale prediction. Alternatively, works such as MAR [21], Fluid [12], and techniques employing per-token diffusion heads on top of LLM-predicted embeddings have explored autoregressive image generation with continuous visual tokens.",
|
| 204 |
+
"bbox": [
|
| 205 |
+
169,
|
| 206 |
+
439,
|
| 207 |
+
823,
|
| 208 |
+
481
|
| 209 |
+
],
|
| 210 |
+
"page_idx": 2
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"type": "text",
|
| 214 |
+
"text": "Unified Multimodal Models. There is growing research interests in unifying visual generation and understanding within a single model. VQ-based models, such as Chameleon [46], Emu [44], and Janus [54, 8], propose encoding visual inputs into discrete tokens and unifying tasks into next-token prediction within this discrete token space. Models with hybrid training targets, such as Transfusion [61, 38] and Show-O [55], aim to unify next-token prediction objectives with diffusion objectives within a single framework. MetaMorph [50] maintains the autoregressive objective by regressing visual SigLIP [59] features using an LLM, but necessitates a separate diffusion model to decode the predicted latent features into images. Our approach distinguishes itself by performing per-token autoregressive generation using continuous visual tokens. This maintains the next-token prediction objective, while not being limited by the vector quantized tokens. [45] also explores using continuous tokens to generate multimodal outputs.",
|
| 215 |
+
"bbox": [
|
| 216 |
+
169,
|
| 217 |
+
487,
|
| 218 |
+
826,
|
| 219 |
+
640
|
| 220 |
+
],
|
| 221 |
+
"page_idx": 2
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"type": "text",
|
| 225 |
+
"text": "3 Method",
|
| 226 |
+
"text_level": 1,
|
| 227 |
+
"bbox": [
|
| 228 |
+
171,
|
| 229 |
+
659,
|
| 230 |
+
272,
|
| 231 |
+
674
|
| 232 |
+
],
|
| 233 |
+
"page_idx": 2
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"type": "text",
|
| 237 |
+
"text": "In this section, we illustrate the architecture of our UniFluid model. The model expects both image and text sequences as input and achieves joint training on both generation and understanding tasks, using next-token prediction as its training objective.",
|
| 238 |
+
"bbox": [
|
| 239 |
+
169,
|
| 240 |
+
689,
|
| 241 |
+
823,
|
| 242 |
+
733
|
| 243 |
+
],
|
| 244 |
+
"page_idx": 2
|
| 245 |
+
},
|
| 246 |
+
{
|
| 247 |
+
"type": "text",
|
| 248 |
+
"text": "3.1 Unified Autoregressive Modeling with Continuous Visual Tokens",
|
| 249 |
+
"text_level": 1,
|
| 250 |
+
"bbox": [
|
| 251 |
+
169,
|
| 252 |
+
747,
|
| 253 |
+
660,
|
| 254 |
+
762
|
| 255 |
+
],
|
| 256 |
+
"page_idx": 2
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "text",
|
| 260 |
+
"text": "Our approach leverages the autoregressive paradigm to unify visual understanding and generation within a single framework. Given an ordered sequence of tokens $X = \\{x^{1},\\dots,x^{n}\\}$ , the autoregressive model [34, 14, 51, 52] factorizes the joint probability distribution as a product of conditional probabilities, effectively framing the generation task as a sequential \"next token prediction\" problem: $p(X) = \\prod_{i = 1}^{n}p(x^{i}\\mid x^{1},\\dots,x^{i - 1})$ . As shown in MAR [21] and Fluid [12], this autoregressive formulation is applicable for both discrete tokens and continuous tokens. In UniFluid, we exploit this property to enable the generation of continuous visual tokens under the unified decoder-only framework. Our model is modality-agnostic; both text and image tokens are treated as elements within a long unified sequence, and their respective logits are predicted iteratively in an autoregressive manner by the backbone transformer.",
|
| 261 |
+
"bbox": [
|
| 262 |
+
169,
|
| 263 |
+
772,
|
| 264 |
+
826,
|
| 265 |
+
912
|
| 266 |
+
],
|
| 267 |
+
"page_idx": 2
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "page_number",
|
| 271 |
+
"text": "3",
|
| 272 |
+
"bbox": [
|
| 273 |
+
493,
|
| 274 |
+
935,
|
| 275 |
+
503,
|
| 276 |
+
946
|
| 277 |
+
],
|
| 278 |
+
"page_idx": 2
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "text",
|
| 282 |
+
"text": "To accommodate the distinct nature of text and image modalities, we employ modality-specific prediction heads to calculate the appropriate loss functions and sampling for each modality. This unified approach allows the model to learn a shared representation space through the unified training procedure, facilitating synergistic learning and enabling seamless transitions between visual generation and understanding.",
|
| 283 |
+
"bbox": [
|
| 284 |
+
169,
|
| 285 |
+
90,
|
| 286 |
+
823,
|
| 287 |
+
161
|
| 288 |
+
],
|
| 289 |
+
"page_idx": 3
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "text",
|
| 293 |
+
"text": "3.2 Architecture",
|
| 294 |
+
"text_level": 1,
|
| 295 |
+
"bbox": [
|
| 296 |
+
171,
|
| 297 |
+
181,
|
| 298 |
+
302,
|
| 299 |
+
196
|
| 300 |
+
],
|
| 301 |
+
"page_idx": 3
|
| 302 |
+
},
|
| 303 |
+
{
|
| 304 |
+
"type": "text",
|
| 305 |
+
"text": "As depicted in Figure 2, UniFluid employs a unified framework where both text and image inputs are tokenized and projected into a shared embedding space. This allows it to leverage a decoder-only transformer as the core backbone for the unified autoregressive task. Text inputs are tokenized using the SentencePiece tokenizer. This results in discrete tokens with a vocabulary size of $V$ . For image generation, images are encoded into continuous visual tokens using a continuous variational autoencoder. To facilitate the process of image understanding, following PaliGemma, we used SigLIP as a separate image encoder to extract high-level information from the visual inputs. UniFluid consists of a classification head to convert the transformer's text logits into a categorical distribution, and a diffusion head to convert image logits into a per-token probability distribution.",
|
| 306 |
+
"bbox": [
|
| 307 |
+
169,
|
| 308 |
+
209,
|
| 309 |
+
823,
|
| 310 |
+
335
|
| 311 |
+
],
|
| 312 |
+
"page_idx": 3
|
| 313 |
+
},
|
| 314 |
+
{
|
| 315 |
+
"type": "text",
|
| 316 |
+
"text": "The inherent structure of text as a linear sequence aligns well with the standard 1D positional embeddings of the LLM, which are sufficient for text modeling and image understanding tasks. However, image tokens possess a 2D spatial structure. To capture this inherent 2D nature, we incorporate learnable 2D positional embeddings, which are added to the image token embeddings. Meanwhile, inspired by [58], to achieve random order generation, a position embedding for the next predicted token is also added to each image token. To enhance the model's ability to initiate and guide image generation, we prepend a \"Beginning of Image\" (BOI) token to the sequence of continuous image tokens. This BOI token serves as a distinct signal, indicating the start of the visual generation process. Given that the sequence length for generated image tokens is predefined (256 tokens for 256x256 images), an explicit \"End of Image\" token is unnecessary in our case.",
|
| 317 |
+
"bbox": [
|
| 318 |
+
169,
|
| 319 |
+
340,
|
| 320 |
+
826,
|
| 321 |
+
481
|
| 322 |
+
],
|
| 323 |
+
"page_idx": 3
|
| 324 |
+
},
|
| 325 |
+
{
|
| 326 |
+
"type": "text",
|
| 327 |
+
"text": "4 Implementation",
|
| 328 |
+
"text_level": 1,
|
| 329 |
+
"bbox": [
|
| 330 |
+
171,
|
| 331 |
+
505,
|
| 332 |
+
339,
|
| 333 |
+
522
|
| 334 |
+
],
|
| 335 |
+
"page_idx": 3
|
| 336 |
+
},
|
| 337 |
+
{
|
| 338 |
+
"type": "text",
|
| 339 |
+
"text": "4.1 Training",
|
| 340 |
+
"text_level": 1,
|
| 341 |
+
"bbox": [
|
| 342 |
+
171,
|
| 343 |
+
537,
|
| 344 |
+
274,
|
| 345 |
+
553
|
| 346 |
+
],
|
| 347 |
+
"page_idx": 3
|
| 348 |
+
},
|
| 349 |
+
{
|
| 350 |
+
"type": "text",
|
| 351 |
+
"text": "Per-token Classification Head for Discrete Text Tokens. We employ the same SentencePiece tokenizer as Gemma for text tokenization. The transformer's output logits for text are transformed into categorical probability distributions over the vocabulary, and we apply the standard cross-entropy loss, denoted as $L_{Text}$ , to optimize the prediction of these discrete text tokens.",
|
| 352 |
+
"bbox": [
|
| 353 |
+
169,
|
| 354 |
+
566,
|
| 355 |
+
823,
|
| 356 |
+
622
|
| 357 |
+
],
|
| 358 |
+
"page_idx": 3
|
| 359 |
+
},
|
| 360 |
+
{
|
| 361 |
+
"type": "text",
|
| 362 |
+
"text": "Per-token Diffusion Head for Continuous Visual Tokens. We adopt the same continuous tokenizer as Fluid to embed 256x256 images into 32x32x4 continuous tokens, and use a patch size of 2 to merge 4 tokens into one. To model the per-token distribution of these predicted continuous visual tokens, we employ a lightweight MLP as a diffusion head. We adopt the same diffusion process and loss function, denoted as $\\mathcal{L}_{\\text{Visual}}$ , as in [21, 12], which is specifically tailored for continuous visual token prediction. For the understanding task, the input image resolution is $224 \\times 224$ , and we use SigLIP as the image encoder. Note that the SigLIP features are only used as prefix for the understanding task during training, and no losses are added on top of them.",
|
| 363 |
+
"bbox": [
|
| 364 |
+
169,
|
| 365 |
+
628,
|
| 366 |
+
823,
|
| 367 |
+
739
|
| 368 |
+
],
|
| 369 |
+
"page_idx": 3
|
| 370 |
+
},
|
| 371 |
+
{
|
| 372 |
+
"type": "text",
|
| 373 |
+
"text": "Task-Specific Training Configurations.",
|
| 374 |
+
"text_level": 1,
|
| 375 |
+
"bbox": [
|
| 376 |
+
171,
|
| 377 |
+
744,
|
| 378 |
+
444,
|
| 379 |
+
760
|
| 380 |
+
],
|
| 381 |
+
"page_idx": 3
|
| 382 |
+
},
|
| 383 |
+
{
|
| 384 |
+
"type": "text",
|
| 385 |
+
"text": "Image Understanding: For image understanding tasks, the model is provided with image embeddings and question tokens as input prefix. Following PaliGemma, we apply a bidirectional attention mask to both image and question tokens. A causal attention mask is applied to the answer tokens, ensuring that the model only attends to previous answer tokens during autoregressive generation. The text token loss, $\\mathcal{L}_{\\text{Text}}$ , is calculated specifically on the answer text tokens.",
|
| 386 |
+
"bbox": [
|
| 387 |
+
169,
|
| 388 |
+
766,
|
| 389 |
+
823,
|
| 390 |
+
835
|
| 391 |
+
],
|
| 392 |
+
"page_idx": 3
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"type": "text",
|
| 396 |
+
"text": "Image Generation: Conversely, for image generation tasks, text prompts are provided as conditioning inputs. To maintain the appropriate information flow, we employ a bidirectional attention mask for the text prompt tokens, enabling them to attend to all other text tokens. A causal attention mask is applied to the image tokens, ensuring that each image token only attends to preceding image tokens. The visual token loss, $L_{\\text{Visual}}$ , is calculated on the generated image tokens.",
|
| 397 |
+
"bbox": [
|
| 398 |
+
169,
|
| 399 |
+
842,
|
| 400 |
+
826,
|
| 401 |
+
912
|
| 402 |
+
],
|
| 403 |
+
"page_idx": 3
|
| 404 |
+
},
|
| 405 |
+
{
|
| 406 |
+
"type": "page_number",
|
| 407 |
+
"text": "4",
|
| 408 |
+
"bbox": [
|
| 409 |
+
493,
|
| 410 |
+
935,
|
| 411 |
+
504,
|
| 412 |
+
946
|
| 413 |
+
],
|
| 414 |
+
"page_idx": 3
|
| 415 |
+
},
|
| 416 |
+
{
|
| 417 |
+
"type": "text",
|
| 418 |
+
"text": "Unified Loss Function. The total training loss for UniFluid is a weighted sum of the text token prediction loss and the visual token prediction loss, defined as: $\\mathcal{L} = \\mathcal{L}_{\\text{Visual}} + \\lambda \\cdot \\mathcal{L}_{\\text{Text}}$ where $\\lambda$ is a hyper-parameter that represents the weight assigned to the text token prediction loss, allowing us to balance the contributions of the two modalities during training.",
|
| 419 |
+
"bbox": [
|
| 420 |
+
169,
|
| 421 |
+
90,
|
| 422 |
+
823,
|
| 423 |
+
148
|
| 424 |
+
],
|
| 425 |
+
"page_idx": 4
|
| 426 |
+
},
|
| 427 |
+
{
|
| 428 |
+
"type": "text",
|
| 429 |
+
"text": "Training Details. We train the model with a batch size of 2048 using the AdamW optimizer with a learning rate of 1e-4. The training process consists of 1 million steps with a constant learning rate schedule and a warm-up period of 65k steps. Following [58], for image generation, the image token order is randomly permuted during the initial 300k training iterations, then linearly anneals to raster between 300k and 600k iterations, and finally sticks to raster order for the subsequent 400k steps. Except for the comparison with Gemma-1, we use the Gemma-2 model series as the backbone transformer for all experiments in this paper.",
|
| 430 |
+
"bbox": [
|
| 431 |
+
169,
|
| 432 |
+
152,
|
| 433 |
+
826,
|
| 434 |
+
252
|
| 435 |
+
],
|
| 436 |
+
"page_idx": 4
|
| 437 |
+
},
|
| 438 |
+
{
|
| 439 |
+
"type": "text",
|
| 440 |
+
"text": "4.2 Inference",
|
| 441 |
+
"text_level": 1,
|
| 442 |
+
"bbox": [
|
| 443 |
+
171,
|
| 444 |
+
267,
|
| 445 |
+
279,
|
| 446 |
+
282
|
| 447 |
+
],
|
| 448 |
+
"page_idx": 4
|
| 449 |
+
},
|
| 450 |
+
{
|
| 451 |
+
"type": "text",
|
| 452 |
+
"text": "For text decoding, we employ categorical sampling for each generated text prediction. The predicted token is then selected from the vocabulary $V$ based on the sampled probability distribution. We use the same decoding strategy as PaliGemma. Greedy decoding is used for all tasks except for downstream COCOcap (beam search $n = 2$ ) and TextCaps (beam search $n = 3$ ). For image decoding, we use a diffusion sampling process to generate continuous visual tokens with diffusion sampling step set to 100 in our implementation.",
|
| 453 |
+
"bbox": [
|
| 454 |
+
169,
|
| 455 |
+
292,
|
| 456 |
+
823,
|
| 457 |
+
377
|
| 458 |
+
],
|
| 459 |
+
"page_idx": 4
|
| 460 |
+
},
|
| 461 |
+
{
|
| 462 |
+
"type": "text",
|
| 463 |
+
"text": "As both text and image generation are performed at the token level, with predictions occurring one token at a time under a causal attention mechanism, we can efficiently utilize Key-Value (KV) caching. This optimization is applicable to both discrete text tokens and continuous visual tokens, significantly accelerating the inference process.",
|
| 464 |
+
"bbox": [
|
| 465 |
+
169,
|
| 466 |
+
383,
|
| 467 |
+
826,
|
| 468 |
+
439
|
| 469 |
+
],
|
| 470 |
+
"page_idx": 4
|
| 471 |
+
},
|
| 472 |
+
{
|
| 473 |
+
"type": "text",
|
| 474 |
+
"text": "5 Experiment",
|
| 475 |
+
"text_level": 1,
|
| 476 |
+
"bbox": [
|
| 477 |
+
171,
|
| 478 |
+
458,
|
| 479 |
+
305,
|
| 480 |
+
474
|
| 481 |
+
],
|
| 482 |
+
"page_idx": 4
|
| 483 |
+
},
|
| 484 |
+
{
|
| 485 |
+
"type": "text",
|
| 486 |
+
"text": "5.1 Setup",
|
| 487 |
+
"text_level": 1,
|
| 488 |
+
"bbox": [
|
| 489 |
+
171,
|
| 490 |
+
489,
|
| 491 |
+
254,
|
| 492 |
+
506
|
| 493 |
+
],
|
| 494 |
+
"page_idx": 4
|
| 495 |
+
},
|
| 496 |
+
{
|
| 497 |
+
"type": "text",
|
| 498 |
+
"text": "Datasets. We train our models using the WebLI dataset [7], a collection of high-quality image-text pairs. For visual generation, we follow Fluid to employ a WebLI subset of image and text descriptions specifically for the generation task. For visual understanding, consistent with PaliGemma, we leverage the image-text description pairs and image question-answer pairs that are also available within WebLI.",
|
| 499 |
+
"bbox": [
|
| 500 |
+
169,
|
| 501 |
+
516,
|
| 502 |
+
826,
|
| 503 |
+
571
|
| 504 |
+
],
|
| 505 |
+
"page_idx": 4
|
| 506 |
+
},
|
| 507 |
+
{
|
| 508 |
+
"type": "text",
|
| 509 |
+
"text": "Evaluation Metrics. We assess the image generation quality using the FID [16] score on 30K images of the MS-COCO [24] training set and evaluate performance on the GenEval [13] benchmark, where we use the original text prompt without any rewrites. For evaluating visual understanding performance, we use the caption CIDEr score on MS-COCO. Given our similar training dataset and setup to PaliGemma, we also evaluate the finetuning performance on a variety of captioning and question answering tasks. We report the average score on 4 Captioning tasks, including COCOcap [24], Screen2Words [53], TextCaps [39], WidgetCap [22], and 20 QA tasks, including OKVQA [29], AOKVQA-MC [37], AOKVQA-DA [37], GQA [17], NLVR2 [43], AI2D [19], ScienceQA [28], RSVQA-lr [27], RSVQA-hr (test/test2) [27], ChartQA (human/aug) [30], VizWizVQA [15], TallyQA (simple/complex) [1], CountBenchQA [3], TextVQA [40], DocVQA [32], InfoVQA [31], ST-VQA [4].",
|
| 510 |
+
"bbox": [
|
| 511 |
+
169,
|
| 512 |
+
578,
|
| 513 |
+
826,
|
| 514 |
+
731
|
| 515 |
+
],
|
| 516 |
+
"page_idx": 4
|
| 517 |
+
},
|
| 518 |
+
{
|
| 519 |
+
"type": "text",
|
| 520 |
+
"text": "In the following sections, we present the experimental results obtained under different configurations of UniFluid, providing insights into the relationship between the two tasks and highlighting key design choices for UniFluid training.",
|
| 521 |
+
"bbox": [
|
| 522 |
+
169,
|
| 523 |
+
736,
|
| 524 |
+
823,
|
| 525 |
+
779
|
| 526 |
+
],
|
| 527 |
+
"page_idx": 4
|
| 528 |
+
},
|
| 529 |
+
{
|
| 530 |
+
"type": "text",
|
| 531 |
+
"text": "5.2 Main Results",
|
| 532 |
+
"text_level": 1,
|
| 533 |
+
"bbox": [
|
| 534 |
+
171,
|
| 535 |
+
795,
|
| 536 |
+
305,
|
| 537 |
+
809
|
| 538 |
+
],
|
| 539 |
+
"page_idx": 4
|
| 540 |
+
},
|
| 541 |
+
{
|
| 542 |
+
"type": "text",
|
| 543 |
+
"text": "Unified Training Improves Generation Performance. To evaluate the effectiveness of the unified training framework and determine whether unified training offers advantages compared to training separate models for different tasks, we perform controlled experiments to analyze the performance of models trained with a single task.",
|
| 544 |
+
"bbox": [
|
| 545 |
+
169,
|
| 546 |
+
821,
|
| 547 |
+
823,
|
| 548 |
+
878
|
| 549 |
+
],
|
| 550 |
+
"page_idx": 4
|
| 551 |
+
},
|
| 552 |
+
{
|
| 553 |
+
"type": "text",
|
| 554 |
+
"text": "We first compare the visual generation performance of the model trained under the unified training objective with the performance of a text-to-image model (T2I only), trained solely with the visual",
|
| 555 |
+
"bbox": [
|
| 556 |
+
169,
|
| 557 |
+
883,
|
| 558 |
+
823,
|
| 559 |
+
912
|
| 560 |
+
],
|
| 561 |
+
"page_idx": 4
|
| 562 |
+
},
|
| 563 |
+
{
|
| 564 |
+
"type": "page_number",
|
| 565 |
+
"text": "5",
|
| 566 |
+
"bbox": [
|
| 567 |
+
493,
|
| 568 |
+
935,
|
| 569 |
+
504,
|
| 570 |
+
946
|
| 571 |
+
],
|
| 572 |
+
"page_idx": 4
|
| 573 |
+
},
|
| 574 |
+
{
|
| 575 |
+
"type": "table",
|
| 576 |
+
"img_path": "images/0c74970ac44a50ef2f5559ce53b6e18bbed02e637bc1ab1b59f7a8e7900b29bb.jpg",
|
| 577 |
+
"table_caption": [
|
| 578 |
+
"Table 1: Unified training achieves better generation performance than text-to-image only training. We evaluate the performance using MS-COCO zero-shot FID and GenEval score."
|
| 579 |
+
],
|
| 580 |
+
"table_footnote": [],
|
| 581 |
+
"table_body": "<table><tr><td>Training Target</td><td>Size</td><td>FID ↓</td><td>GenEval ↑</td></tr><tr><td>T2I only</td><td>0.7B</td><td>9.71</td><td>0.50</td></tr><tr><td>Unified</td><td>0.7B</td><td>8.39</td><td>0.52</td></tr><tr><td>T2I only</td><td>2B</td><td>7.88</td><td>0.59</td></tr><tr><td>Unified</td><td>2B</td><td>7.20</td><td>0.59</td></tr></table>",
|
| 582 |
+
"bbox": [
|
| 583 |
+
348,
|
| 584 |
+
130,
|
| 585 |
+
645,
|
| 586 |
+
212
|
| 587 |
+
],
|
| 588 |
+
"page_idx": 5
|
| 589 |
+
},
|
| 590 |
+
{
|
| 591 |
+
"type": "text",
|
| 592 |
+
"text": "autoregressive objective for the generation task. We ensure that the total number of visual tokens for training is the same for the visual generation loss in both the unified model training and text-to-image only training scenarios. The generation performance comparison is presented in Table 1. The unified model achieves better performance compared to the T2I only model, despite both models having observed the same number of tokens for the visual generation task. This suggests that unified model training can be beneficial for visual generation tasks, and that visual understanding ability has the potential to unlock enhanced visual generation quality.",
|
| 593 |
+
"bbox": [
|
| 594 |
+
169,
|
| 595 |
+
239,
|
| 596 |
+
823,
|
| 597 |
+
338
|
| 598 |
+
],
|
| 599 |
+
"page_idx": 5
|
| 600 |
+
},
|
| 601 |
+
{
|
| 602 |
+
"type": "text",
|
| 603 |
+
"text": "Trade-off Between Generation and Understanding. We also investigate whether the visual generation task can contribute to improved visual understanding performance. In the UniFluid unified training setup, the hyperparameter $\\lambda$ controls the balance between the losses applied to image tokens and text tokens.",
|
| 604 |
+
"bbox": [
|
| 605 |
+
169,
|
| 606 |
+
344,
|
| 607 |
+
823,
|
| 608 |
+
398
|
| 609 |
+
],
|
| 610 |
+
"page_idx": 5
|
| 611 |
+
},
|
| 612 |
+
{
|
| 613 |
+
"type": "text",
|
| 614 |
+
"text": "In Table 3 and Figure 3, we present the understanding and generation results with varying $\\lambda$ of the 0.7B model. We compare the transfer performance to downstream understanding tasks between the unified model with different $\\lambda$ and a image-to-text model (I2T only), trained solely with the image understanding objective. Within the unified training setup, a trade-off exists between visual generation and understanding tasks, which can be effectively controlled by adjusting the loss mixing weight, $\\lambda$ . While increasing $\\lambda$ can improve image understanding performance, ultimately exceeding the I2T-only baseline for downstream captioning, it concurrently diminishes the image generation capabilities.",
|
| 615 |
+
"bbox": [
|
| 616 |
+
169,
|
| 617 |
+
406,
|
| 618 |
+
823,
|
| 619 |
+
517
|
| 620 |
+
],
|
| 621 |
+
"page_idx": 5
|
| 622 |
+
},
|
| 623 |
+
{
|
| 624 |
+
"type": "text",
|
| 625 |
+
"text": "In most scenarios, a smaller $\\lambda$ value (e.g., 0.005) is advisable, maintaining a significant proportion (over $90\\%$ ) of image understanding while supporting the generation of high-fidelity images. Larger $\\lambda$ values, in contrast, strongly favor image understanding but result in a rapid drop of image generation ability, as indicated by a sharp rise in FID score when $\\lambda$ exceeds 0.1. Qualitative results for image captioning and question answering, demonstrating the understanding capabilities of the fine-tuned model based on the unified model with Gemma-2 2B as backbone LLM and $\\lambda = 0.005$ are presented in Figure 6.",
|
| 626 |
+
"bbox": [
|
| 627 |
+
169,
|
| 628 |
+
523,
|
| 629 |
+
823,
|
| 630 |
+
619
|
| 631 |
+
],
|
| 632 |
+
"page_idx": 5
|
| 633 |
+
},
|
| 634 |
+
{
|
| 635 |
+
"type": "text",
|
| 636 |
+
"text": "Better Pre-trained LLM Backbone Leads to Better Visual Generation and Understanding. We investigate the effect of pre-trained LLMs within the unified model training setup, specifically examining whether more powerful LLMs contribute to enhanced image understanding performance and superior visual generation quality. To this end, we conducted experiments using Gemma-1 2B [47] and Gemma-2 2B [47] as backbone LLMs. Gemma-2 is a stronger LLM than Gemma-1 with $10\\%$ average improvements across different text benchmarks.",
|
| 637 |
+
"bbox": [
|
| 638 |
+
169,
|
| 639 |
+
626,
|
| 640 |
+
826,
|
| 641 |
+
709
|
| 642 |
+
],
|
| 643 |
+
"page_idx": 5
|
| 644 |
+
},
|
| 645 |
+
{
|
| 646 |
+
"type": "table",
|
| 647 |
+
"img_path": "images/7b4d738402e13bf19379cf0a30676469c8f6afa22957a77ca92c17d04ce946cb.jpg",
|
| 648 |
+
"table_caption": [
|
| 649 |
+
"Table 2: Performance comparison of image generation and understanding of UniFluid trained with different LLM backbone. FID and CIDEr are measured on MS-COCO. Gemma-2 gets much better performance compared to Gemma-1, for both image understanding and generation tasks."
|
| 650 |
+
],
|
| 651 |
+
"table_footnote": [],
|
| 652 |
+
"table_body": "<table><tr><td rowspan=\"2\">Pretrained Model</td><td colspan=\"2\">Generation</td><td colspan=\"3\">Understanding</td></tr><tr><td>COCO FID ↓</td><td>GenEval ↑</td><td>COCO CIDEr ↑</td><td>Cap Avg ↑</td><td>QA Avg ↑</td></tr><tr><td>Gemma-1</td><td>9.73</td><td>0.52</td><td>38.02</td><td>113.40</td><td>60.21</td></tr><tr><td>Gemma-2</td><td>7.20</td><td>0.59</td><td>40.91</td><td>116.13</td><td>62.10</td></tr></table>",
|
| 653 |
+
"bbox": [
|
| 654 |
+
233,
|
| 655 |
+
773,
|
| 656 |
+
763,
|
| 657 |
+
839
|
| 658 |
+
],
|
| 659 |
+
"page_idx": 5
|
| 660 |
+
},
|
| 661 |
+
{
|
| 662 |
+
"type": "text",
|
| 663 |
+
"text": "The experimental results are presented in Table 2. Here we used $\\lambda = 0.005$ for all models. The results demonstrate that employing a stronger LLM is crucial for generating images with higher fidelity and quality. Gemma-2 achieves significantly lower FID scores compared to Gemma-1, highlighting that even though LLM pre-training is unimodal, without exposure to visual data, using a better LLM",
|
| 664 |
+
"bbox": [
|
| 665 |
+
169,
|
| 666 |
+
854,
|
| 667 |
+
823,
|
| 668 |
+
912
|
| 669 |
+
],
|
| 670 |
+
"page_idx": 5
|
| 671 |
+
},
|
| 672 |
+
{
|
| 673 |
+
"type": "page_number",
|
| 674 |
+
"text": "6",
|
| 675 |
+
"bbox": [
|
| 676 |
+
493,
|
| 677 |
+
936,
|
| 678 |
+
504,
|
| 679 |
+
946
|
| 680 |
+
],
|
| 681 |
+
"page_idx": 5
|
| 682 |
+
},
|
| 683 |
+
{
|
| 684 |
+
"type": "image",
|
| 685 |
+
"img_path": "images/27d4b879d1e6ed0c16ebbf7e8c1a05a9de1f4ec816c64e73bb7cd204bec2ff52.jpg",
|
| 686 |
+
"image_caption": [
|
| 687 |
+
"Figure 3: Plot of image understanding and generation performance with varying $\\lambda$ during training. We show the FID on MS-COCO, the average score for downstream captioning tasks (Cap Avg), and the average score for downstream QA tasks (QA Avg) with different $\\lambda$ on the three figures. Green dashed lines show the FID of T2I-only model, blue and red dashed lines show the downstream captioning average and QA average for I2T-only model, respectively. In practice, smaller $\\lambda$ has better trade-off between two tasks."
|
| 688 |
+
],
|
| 689 |
+
"image_footnote": [],
|
| 690 |
+
"bbox": [
|
| 691 |
+
91,
|
| 692 |
+
89,
|
| 693 |
+
344,
|
| 694 |
+
270
|
| 695 |
+
],
|
| 696 |
+
"page_idx": 6
|
| 697 |
+
},
|
| 698 |
+
{
|
| 699 |
+
"type": "image",
|
| 700 |
+
"img_path": "images/c28e0b1d3d4b55152d911ee6364845722e0f928715ebd8b2ba2f2245e04ae38f.jpg",
|
| 701 |
+
"image_caption": [],
|
| 702 |
+
"image_footnote": [],
|
| 703 |
+
"bbox": [
|
| 704 |
+
366,
|
| 705 |
+
90,
|
| 706 |
+
620,
|
| 707 |
+
270
|
| 708 |
+
],
|
| 709 |
+
"page_idx": 6
|
| 710 |
+
},
|
| 711 |
+
{
|
| 712 |
+
"type": "image",
|
| 713 |
+
"img_path": "images/f01bbba059cb1c487d2b7bad0c5ebf483e7592f5c196f7ec35b2335a3abc2546.jpg",
|
| 714 |
+
"image_caption": [],
|
| 715 |
+
"image_footnote": [],
|
| 716 |
+
"bbox": [
|
| 717 |
+
650,
|
| 718 |
+
90,
|
| 719 |
+
897,
|
| 720 |
+
268
|
| 721 |
+
],
|
| 722 |
+
"page_idx": 6
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"type": "table",
|
| 726 |
+
"img_path": "images/ed7e8a9c62dec847919f2f338a44a638eacc3f82ec9e8af56e81b4b02edc4e68.jpg",
|
| 727 |
+
"table_caption": [
|
| 728 |
+
"Table 3: Image generation and understanding results with different $\\lambda$ . $\\lambda = 0.1$ roughly makes the loss for generation and understanding of the same scale. We present MS-COCO FID and the average captioning and QA results for downstream understanding tasks, compared to the image-to-text (I2T) only baseline. A smaller $\\lambda$ like 0.005 is recommended in most cases as it preserves most of the image understanding capability while enabling the generation of high-quality images and outperforms the text-to-image (T2I) only baseline."
|
| 729 |
+
],
|
| 730 |
+
"table_footnote": [],
|
| 731 |
+
"table_body": "<table><tr><td rowspan=\"2\">Task</td><td rowspan=\"2\">T2I only</td><td rowspan=\"2\">I2T only</td><td colspan=\"7\">Unified, λ</td></tr><tr><td>0.005</td><td>0.01</td><td>0.05</td><td>0.1</td><td>0.2</td><td>1.0</td><td>2.0</td></tr><tr><td>Generation (FID) ↓</td><td>9.71</td><td>-</td><td>8.39</td><td>9.65</td><td>12.48</td><td>17.90</td><td>28.60</td><td>47.89</td><td>54.02</td></tr><tr><td>Understanding (Cap Avg) ↑</td><td>-</td><td>125.05</td><td>120.25</td><td>120.25</td><td>121.18</td><td>121.68</td><td>123.65</td><td>126.25</td><td>125.80</td></tr><tr><td>Understanding (QA Avg) ↑</td><td>-</td><td>65.95</td><td>60.29</td><td>61.37</td><td>61.48</td><td>61.87</td><td>65.17</td><td>65.64</td><td>65.88</td></tr></table>",
|
| 732 |
+
"bbox": [
|
| 733 |
+
173,
|
| 734 |
+
426,
|
| 735 |
+
821,
|
| 736 |
+
494
|
| 737 |
+
],
|
| 738 |
+
"page_idx": 6
|
| 739 |
+
},
|
| 740 |
+
{
|
| 741 |
+
"type": "image",
|
| 742 |
+
"img_path": "images/f5b44fb22df8ee423bbb5c991ec67c67c311aac003801a68e36de4d1a80aa028.jpg",
|
| 743 |
+
"image_caption": [],
|
| 744 |
+
"image_footnote": [],
|
| 745 |
+
"bbox": [
|
| 746 |
+
114,
|
| 747 |
+
529,
|
| 748 |
+
241,
|
| 749 |
+
609
|
| 750 |
+
],
|
| 751 |
+
"page_idx": 6
|
| 752 |
+
},
|
| 753 |
+
{
|
| 754 |
+
"type": "image",
|
| 755 |
+
"img_path": "images/e604f122f83cb527d28352cf1e5e4b880d31afe49ece6c4f36cdeba5b992241f.jpg",
|
| 756 |
+
"image_caption": [],
|
| 757 |
+
"image_footnote": [],
|
| 758 |
+
"bbox": [
|
| 759 |
+
245,
|
| 760 |
+
529,
|
| 761 |
+
370,
|
| 762 |
+
609
|
| 763 |
+
],
|
| 764 |
+
"page_idx": 6
|
| 765 |
+
},
|
| 766 |
+
{
|
| 767 |
+
"type": "image",
|
| 768 |
+
"img_path": "images/c46e9fbda409dd9b0d6fe25cf1f0c7088f846324cf25ae0edc1adb8a3f609780.jpg",
|
| 769 |
+
"image_caption": [],
|
| 770 |
+
"image_footnote": [],
|
| 771 |
+
"bbox": [
|
| 772 |
+
375,
|
| 773 |
+
529,
|
| 774 |
+
500,
|
| 775 |
+
609
|
| 776 |
+
],
|
| 777 |
+
"page_idx": 6
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"type": "image",
|
| 781 |
+
"img_path": "images/f47624129a101646b71e64eed6aa1212c7690071847fef6726e962f91d51a79a.jpg",
|
| 782 |
+
"image_caption": [],
|
| 783 |
+
"image_footnote": [],
|
| 784 |
+
"bbox": [
|
| 785 |
+
504,
|
| 786 |
+
529,
|
| 787 |
+
629,
|
| 788 |
+
609
|
| 789 |
+
],
|
| 790 |
+
"page_idx": 6
|
| 791 |
+
},
|
| 792 |
+
{
|
| 793 |
+
"type": "image",
|
| 794 |
+
"img_path": "images/0f43ac3efb9385ab77e2cb077a6165efd276ed993423b635fcddef5a6344bb93.jpg",
|
| 795 |
+
"image_caption": [],
|
| 796 |
+
"image_footnote": [],
|
| 797 |
+
"bbox": [
|
| 798 |
+
635,
|
| 799 |
+
529,
|
| 800 |
+
758,
|
| 801 |
+
609
|
| 802 |
+
],
|
| 803 |
+
"page_idx": 6
|
| 804 |
+
},
|
| 805 |
+
{
|
| 806 |
+
"type": "image",
|
| 807 |
+
"img_path": "images/0fa9c60d7e9f9b58905e06c1a86f5e7d8e6d6f7ac03fd1e9ce36f146966739e7.jpg",
|
| 808 |
+
"image_caption": [],
|
| 809 |
+
"image_footnote": [],
|
| 810 |
+
"bbox": [
|
| 811 |
+
766,
|
| 812 |
+
529,
|
| 813 |
+
888,
|
| 814 |
+
609
|
| 815 |
+
],
|
| 816 |
+
"page_idx": 6
|
| 817 |
+
},
|
| 818 |
+
{
|
| 819 |
+
"type": "image",
|
| 820 |
+
"img_path": "images/b7e9b91e62274b99585c58f8e8402e7149ff8e358ebfe68f4b2866fc814b6e9f.jpg",
|
| 821 |
+
"image_caption": [],
|
| 822 |
+
"image_footnote": [],
|
| 823 |
+
"bbox": [
|
| 824 |
+
116,
|
| 825 |
+
612,
|
| 826 |
+
241,
|
| 827 |
+
691
|
| 828 |
+
],
|
| 829 |
+
"page_idx": 6
|
| 830 |
+
},
|
| 831 |
+
{
|
| 832 |
+
"type": "image",
|
| 833 |
+
"img_path": "images/4af61b319ebef54c220450bb22d985e5d8a8ac0d51b79951a276a4bd1185093d.jpg",
|
| 834 |
+
"image_caption": [],
|
| 835 |
+
"image_footnote": [],
|
| 836 |
+
"bbox": [
|
| 837 |
+
245,
|
| 838 |
+
612,
|
| 839 |
+
370,
|
| 840 |
+
691
|
| 841 |
+
],
|
| 842 |
+
"page_idx": 6
|
| 843 |
+
},
|
| 844 |
+
{
|
| 845 |
+
"type": "image",
|
| 846 |
+
"img_path": "images/1268d8942fcae33e355da233b87b1ba2aa1da48b738edce9e834235d40b7d2ef.jpg",
|
| 847 |
+
"image_caption": [],
|
| 848 |
+
"image_footnote": [],
|
| 849 |
+
"bbox": [
|
| 850 |
+
375,
|
| 851 |
+
612,
|
| 852 |
+
500,
|
| 853 |
+
691
|
| 854 |
+
],
|
| 855 |
+
"page_idx": 6
|
| 856 |
+
},
|
| 857 |
+
{
|
| 858 |
+
"type": "image",
|
| 859 |
+
"img_path": "images/055827d4421ec7ad88afd48e896d72caedb2e029a4b9e0133d779db345ac66b4.jpg",
|
| 860 |
+
"image_caption": [],
|
| 861 |
+
"image_footnote": [],
|
| 862 |
+
"bbox": [
|
| 863 |
+
504,
|
| 864 |
+
612,
|
| 865 |
+
629,
|
| 866 |
+
691
|
| 867 |
+
],
|
| 868 |
+
"page_idx": 6
|
| 869 |
+
},
|
| 870 |
+
{
|
| 871 |
+
"type": "image",
|
| 872 |
+
"img_path": "images/3360fd5ca9e05220182a0d5bb8c2c82ab50272d09ff5a52bbccf97b5f84a9be0.jpg",
|
| 873 |
+
"image_caption": [],
|
| 874 |
+
"image_footnote": [],
|
| 875 |
+
"bbox": [
|
| 876 |
+
635,
|
| 877 |
+
612,
|
| 878 |
+
758,
|
| 879 |
+
691
|
| 880 |
+
],
|
| 881 |
+
"page_idx": 6
|
| 882 |
+
},
|
| 883 |
+
{
|
| 884 |
+
"type": "image",
|
| 885 |
+
"img_path": "images/69f5c722e1cfdf648c8c320239579694d5e00c5f385fd8359ec77dc239059df8.jpg",
|
| 886 |
+
"image_caption": [],
|
| 887 |
+
"image_footnote": [],
|
| 888 |
+
"bbox": [
|
| 889 |
+
766,
|
| 890 |
+
612,
|
| 891 |
+
888,
|
| 892 |
+
691
|
| 893 |
+
],
|
| 894 |
+
"page_idx": 6
|
| 895 |
+
},
|
| 896 |
+
{
|
| 897 |
+
"type": "image",
|
| 898 |
+
"img_path": "images/65d7e2652966061b99c7248f64aefbfe77768d356615fa25906dab470109ad38.jpg",
|
| 899 |
+
"image_caption": [],
|
| 900 |
+
"image_footnote": [],
|
| 901 |
+
"bbox": [
|
| 902 |
+
116,
|
| 903 |
+
695,
|
| 904 |
+
241,
|
| 905 |
+
773
|
| 906 |
+
],
|
| 907 |
+
"page_idx": 6
|
| 908 |
+
},
|
| 909 |
+
{
|
| 910 |
+
"type": "image",
|
| 911 |
+
"img_path": "images/2fd077dbbd0d8c49b3d104308ffd8f662dbabfe5228d171df188dcd388f68d7e.jpg",
|
| 912 |
+
"image_caption": [],
|
| 913 |
+
"image_footnote": [],
|
| 914 |
+
"bbox": [
|
| 915 |
+
245,
|
| 916 |
+
695,
|
| 917 |
+
370,
|
| 918 |
+
773
|
| 919 |
+
],
|
| 920 |
+
"page_idx": 6
|
| 921 |
+
},
|
| 922 |
+
{
|
| 923 |
+
"type": "image",
|
| 924 |
+
"img_path": "images/6142c847daaa0b2e444e2eaee75b2301277eeb6f084c630d7a11e3e3d7715b7e.jpg",
|
| 925 |
+
"image_caption": [],
|
| 926 |
+
"image_footnote": [],
|
| 927 |
+
"bbox": [
|
| 928 |
+
375,
|
| 929 |
+
695,
|
| 930 |
+
500,
|
| 931 |
+
773
|
| 932 |
+
],
|
| 933 |
+
"page_idx": 6
|
| 934 |
+
},
|
| 935 |
+
{
|
| 936 |
+
"type": "image",
|
| 937 |
+
"img_path": "images/db70eb466e41c1079e55240a87d754c1630717a9f61f3af9f006a8036540d3c6.jpg",
|
| 938 |
+
"image_caption": [],
|
| 939 |
+
"image_footnote": [],
|
| 940 |
+
"bbox": [
|
| 941 |
+
504,
|
| 942 |
+
695,
|
| 943 |
+
629,
|
| 944 |
+
773
|
| 945 |
+
],
|
| 946 |
+
"page_idx": 6
|
| 947 |
+
},
|
| 948 |
+
{
|
| 949 |
+
"type": "image",
|
| 950 |
+
"img_path": "images/ec817598f9c016649943bcaf3e7f9eea8c6d6ea635c3dbd66d86aac41e302c90.jpg",
|
| 951 |
+
"image_caption": [],
|
| 952 |
+
"image_footnote": [],
|
| 953 |
+
"bbox": [
|
| 954 |
+
635,
|
| 955 |
+
695,
|
| 956 |
+
758,
|
| 957 |
+
773
|
| 958 |
+
],
|
| 959 |
+
"page_idx": 6
|
| 960 |
+
},
|
| 961 |
+
{
|
| 962 |
+
"type": "image",
|
| 963 |
+
"img_path": "images/4c71302f59a6e2c12cdc6bab1d67480bad20db33ec424562bec26d4d367ff76b.jpg",
|
| 964 |
+
"image_caption": [],
|
| 965 |
+
"image_footnote": [],
|
| 966 |
+
"bbox": [
|
| 967 |
+
766,
|
| 968 |
+
695,
|
| 969 |
+
888,
|
| 970 |
+
773
|
| 971 |
+
],
|
| 972 |
+
"page_idx": 6
|
| 973 |
+
},
|
| 974 |
+
{
|
| 975 |
+
"type": "image",
|
| 976 |
+
"img_path": "images/cba4f32c6197716dd8754f4789b7a8866571df39c69c4d735d0ac593280aa660.jpg",
|
| 977 |
+
"image_caption": [
|
| 978 |
+
"Figure 4: Downstream image understanding performance (after finetuning) on each benchmark (higher is better). Blue bars show performance of model backbone initialized with Gemma-1 2B and green bars show model backbone initialized with Gemma-2 2B. Initializing with stronger LLM help improve UniFluid visual understanding performance on most datasets."
|
| 979 |
+
],
|
| 980 |
+
"image_footnote": [],
|
| 981 |
+
"bbox": [
|
| 982 |
+
116,
|
| 983 |
+
777,
|
| 984 |
+
241,
|
| 985 |
+
858
|
| 986 |
+
],
|
| 987 |
+
"page_idx": 6
|
| 988 |
+
},
|
| 989 |
+
{
|
| 990 |
+
"type": "image",
|
| 991 |
+
"img_path": "images/f181a225d21891e5163a357e3e714abad9344f5552bb4dc59b2bd38c830fa9da.jpg",
|
| 992 |
+
"image_caption": [],
|
| 993 |
+
"image_footnote": [],
|
| 994 |
+
"bbox": [
|
| 995 |
+
245,
|
| 996 |
+
777,
|
| 997 |
+
370,
|
| 998 |
+
858
|
| 999 |
+
],
|
| 1000 |
+
"page_idx": 6
|
| 1001 |
+
},
|
| 1002 |
+
{
|
| 1003 |
+
"type": "image",
|
| 1004 |
+
"img_path": "images/cb7ec8bfd66ea26fb70e6ee0daa7cf9e7b4a7c654ee225e88386ca6f754b128b.jpg",
|
| 1005 |
+
"image_caption": [],
|
| 1006 |
+
"image_footnote": [],
|
| 1007 |
+
"bbox": [
|
| 1008 |
+
375,
|
| 1009 |
+
777,
|
| 1010 |
+
500,
|
| 1011 |
+
872
|
| 1012 |
+
],
|
| 1013 |
+
"page_idx": 6
|
| 1014 |
+
},
|
| 1015 |
+
{
|
| 1016 |
+
"type": "image",
|
| 1017 |
+
"img_path": "images/7fab743345e3073b4153a8b710ead7da07fac7174f879e5a3a8ffd62deef737a.jpg",
|
| 1018 |
+
"image_caption": [],
|
| 1019 |
+
"image_footnote": [],
|
| 1020 |
+
"bbox": [
|
| 1021 |
+
504,
|
| 1022 |
+
777,
|
| 1023 |
+
629,
|
| 1024 |
+
872
|
| 1025 |
+
],
|
| 1026 |
+
"page_idx": 6
|
| 1027 |
+
},
|
| 1028 |
+
{
|
| 1029 |
+
"type": "image",
|
| 1030 |
+
"img_path": "images/11601a09dc1fd36d9290ed9af67eaa56e8cbb16f8407e81c13812bc1a5c2c8b7.jpg",
|
| 1031 |
+
"image_caption": [],
|
| 1032 |
+
"image_footnote": [],
|
| 1033 |
+
"bbox": [
|
| 1034 |
+
635,
|
| 1035 |
+
777,
|
| 1036 |
+
758,
|
| 1037 |
+
858
|
| 1038 |
+
],
|
| 1039 |
+
"page_idx": 6
|
| 1040 |
+
},
|
| 1041 |
+
{
|
| 1042 |
+
"type": "image",
|
| 1043 |
+
"img_path": "images/f0ce9ba61697d3680f8cc472b3daf1280274538d8e0d6a2eed7026b12c75840b.jpg",
|
| 1044 |
+
"image_caption": [],
|
| 1045 |
+
"image_footnote": [],
|
| 1046 |
+
"bbox": [
|
| 1047 |
+
766,
|
| 1048 |
+
777,
|
| 1049 |
+
888,
|
| 1050 |
+
858
|
| 1051 |
+
],
|
| 1052 |
+
"page_idx": 6
|
| 1053 |
+
},
|
| 1054 |
+
{
|
| 1055 |
+
"type": "page_number",
|
| 1056 |
+
"text": "7",
|
| 1057 |
+
"bbox": [
|
| 1058 |
+
493,
|
| 1059 |
+
936,
|
| 1060 |
+
504,
|
| 1061 |
+
946
|
| 1062 |
+
],
|
| 1063 |
+
"page_idx": 6
|
| 1064 |
+
},
|
| 1065 |
+
{
|
| 1066 |
+
"type": "image",
|
| 1067 |
+
"img_path": "images/c956187abea4dd8794f3600eded98ff56e89b52dfb2e2578cc297cfc9c1e2c4b.jpg",
|
| 1068 |
+
"image_caption": [
|
| 1069 |
+
"A black basketball shoe with a lightning bolt on it."
|
| 1070 |
+
],
|
| 1071 |
+
"image_footnote": [],
|
| 1072 |
+
"bbox": [
|
| 1073 |
+
75,
|
| 1074 |
+
18,
|
| 1075 |
+
267,
|
| 1076 |
+
167
|
| 1077 |
+
],
|
| 1078 |
+
"page_idx": 7
|
| 1079 |
+
},
|
| 1080 |
+
{
|
| 1081 |
+
"type": "image",
|
| 1082 |
+
"img_path": "images/1dda4edef0f15fb07c736b4c19bc3834934ff9cadc02e9f0f49659946ff6d814.jpg",
|
| 1083 |
+
"image_caption": [
|
| 1084 |
+
"A melting apple."
|
| 1085 |
+
],
|
| 1086 |
+
"image_footnote": [],
|
| 1087 |
+
"bbox": [
|
| 1088 |
+
292,
|
| 1089 |
+
18,
|
| 1090 |
+
486,
|
| 1091 |
+
167
|
| 1092 |
+
],
|
| 1093 |
+
"page_idx": 7
|
| 1094 |
+
},
|
| 1095 |
+
{
|
| 1096 |
+
"type": "image",
|
| 1097 |
+
"img_path": "images/466d2fe43bf8d14bf7059dbedc9088860424dd3ee80301f589abee1a3c733d74.jpg",
|
| 1098 |
+
"image_caption": [
|
| 1099 |
+
"A space elevator in the universe."
|
| 1100 |
+
],
|
| 1101 |
+
"image_footnote": [],
|
| 1102 |
+
"bbox": [
|
| 1103 |
+
509,
|
| 1104 |
+
18,
|
| 1105 |
+
702,
|
| 1106 |
+
167
|
| 1107 |
+
],
|
| 1108 |
+
"page_idx": 7
|
| 1109 |
+
},
|
| 1110 |
+
{
|
| 1111 |
+
"type": "image",
|
| 1112 |
+
"img_path": "images/9bd5a18be00334727765b9a18254bc1ed3b8dc9e3dc00bc5cec75969e5d733d0.jpg",
|
| 1113 |
+
"image_caption": [
|
| 1114 |
+
"human life depicted entirely out of fractals."
|
| 1115 |
+
],
|
| 1116 |
+
"image_footnote": [],
|
| 1117 |
+
"bbox": [
|
| 1118 |
+
728,
|
| 1119 |
+
18,
|
| 1120 |
+
919,
|
| 1121 |
+
167
|
| 1122 |
+
],
|
| 1123 |
+
"page_idx": 7
|
| 1124 |
+
},
|
| 1125 |
+
{
|
| 1126 |
+
"type": "image",
|
| 1127 |
+
"img_path": "images/2efd5160ecbb6255aa5aa53f9e7c7990aed82e7424c5e14d7ec64fd6295f8a8b.jpg",
|
| 1128 |
+
"image_caption": [
|
| 1129 |
+
"Graffiti of a funny dog on a street wall."
|
| 1130 |
+
],
|
| 1131 |
+
"image_footnote": [],
|
| 1132 |
+
"bbox": [
|
| 1133 |
+
75,
|
| 1134 |
+
217,
|
| 1135 |
+
266,
|
| 1136 |
+
367
|
| 1137 |
+
],
|
| 1138 |
+
"page_idx": 7
|
| 1139 |
+
},
|
| 1140 |
+
{
|
| 1141 |
+
"type": "image",
|
| 1142 |
+
"img_path": "images/eedd97bda7977dc3c2b8ddcaa4797ae048a9f58b38c7e304952ee35bdeb0e617.jpg",
|
| 1143 |
+
"image_caption": [
|
| 1144 |
+
"Turtle swimming \nunderwater, aesthetic, \nfantasy."
|
| 1145 |
+
],
|
| 1146 |
+
"image_footnote": [],
|
| 1147 |
+
"bbox": [
|
| 1148 |
+
292,
|
| 1149 |
+
215,
|
| 1150 |
+
483,
|
| 1151 |
+
364
|
| 1152 |
+
],
|
| 1153 |
+
"page_idx": 7
|
| 1154 |
+
},
|
| 1155 |
+
{
|
| 1156 |
+
"type": "image",
|
| 1157 |
+
"img_path": "images/2d819e1f841d2622c73d61555687a7195917f493c4944afb10b5dba54f3e5ca1.jpg",
|
| 1158 |
+
"image_caption": [
|
| 1159 |
+
"A transparent sculpture of a duck made out of glass."
|
| 1160 |
+
],
|
| 1161 |
+
"image_footnote": [],
|
| 1162 |
+
"bbox": [
|
| 1163 |
+
511,
|
| 1164 |
+
217,
|
| 1165 |
+
702,
|
| 1166 |
+
364
|
| 1167 |
+
],
|
| 1168 |
+
"page_idx": 7
|
| 1169 |
+
},
|
| 1170 |
+
{
|
| 1171 |
+
"type": "image",
|
| 1172 |
+
"img_path": "images/7ff46f16950151c5e77b47b936d2c67bdc5ecdccfbe3f2e8131383c933a526f9.jpg",
|
| 1173 |
+
"image_caption": [
|
| 1174 |
+
"An armchair in the shape of an avocado."
|
| 1175 |
+
],
|
| 1176 |
+
"image_footnote": [],
|
| 1177 |
+
"bbox": [
|
| 1178 |
+
730,
|
| 1179 |
+
217,
|
| 1180 |
+
919,
|
| 1181 |
+
366
|
| 1182 |
+
],
|
| 1183 |
+
"page_idx": 7
|
| 1184 |
+
},
|
| 1185 |
+
{
|
| 1186 |
+
"type": "image",
|
| 1187 |
+
"img_path": "images/db9b12aa29a17e233f26cae458623aad7335736add2998309d2ecc99fc278a1c.jpg",
|
| 1188 |
+
"image_caption": [
|
| 1189 |
+
"A hyper-detailed rendering of a geyser erupting in a colorful, geothermal landscape."
|
| 1190 |
+
],
|
| 1191 |
+
"image_footnote": [],
|
| 1192 |
+
"bbox": [
|
| 1193 |
+
73,
|
| 1194 |
+
431,
|
| 1195 |
+
267,
|
| 1196 |
+
580
|
| 1197 |
+
],
|
| 1198 |
+
"page_idx": 7
|
| 1199 |
+
},
|
| 1200 |
+
{
|
| 1201 |
+
"type": "image",
|
| 1202 |
+
"img_path": "images/f54f46e66e32e2eec00ae7b8d752e50c4814067b201f685dac85b03a07a273d6.jpg",
|
| 1203 |
+
"image_caption": [
|
| 1204 |
+
"An astronaut riding a horse on the moon, oil painting by Van Gogh."
|
| 1205 |
+
],
|
| 1206 |
+
"image_footnote": [],
|
| 1207 |
+
"bbox": [
|
| 1208 |
+
292,
|
| 1209 |
+
431,
|
| 1210 |
+
483,
|
| 1211 |
+
580
|
| 1212 |
+
],
|
| 1213 |
+
"page_idx": 7
|
| 1214 |
+
},
|
| 1215 |
+
{
|
| 1216 |
+
"type": "image",
|
| 1217 |
+
"img_path": "images/8d1ac576da2e319c5322e81c14503269b1d8b27ac23b92ebc04c6a1e63258500.jpg",
|
| 1218 |
+
"image_caption": [
|
| 1219 |
+
"photo of an eagle with a golden crown resting upon its head."
|
| 1220 |
+
],
|
| 1221 |
+
"image_footnote": [],
|
| 1222 |
+
"bbox": [
|
| 1223 |
+
511,
|
| 1224 |
+
431,
|
| 1225 |
+
702,
|
| 1226 |
+
579
|
| 1227 |
+
],
|
| 1228 |
+
"page_idx": 7
|
| 1229 |
+
},
|
| 1230 |
+
{
|
| 1231 |
+
"type": "image",
|
| 1232 |
+
"img_path": "images/a15ecf2565252d6785ad118897c8a9997898e511a7971087b628b411aee66098.jpg",
|
| 1233 |
+
"image_caption": [
|
| 1234 |
+
"A window with raindrops trickling down, overlooking a blurry city."
|
| 1235 |
+
],
|
| 1236 |
+
"image_footnote": [],
|
| 1237 |
+
"bbox": [
|
| 1238 |
+
730,
|
| 1239 |
+
431,
|
| 1240 |
+
919,
|
| 1241 |
+
579
|
| 1242 |
+
],
|
| 1243 |
+
"page_idx": 7
|
| 1244 |
+
},
|
| 1245 |
+
{
|
| 1246 |
+
"type": "image",
|
| 1247 |
+
"img_path": "images/1268a3fc86a3637bab1e17dd7e19f49d8912d1bec2f9c972de4d7c3bbe98d92a.jpg",
|
| 1248 |
+
"image_caption": [
|
| 1249 |
+
"An image of a modern architectural building with large glass windows, situated on a cliff overlooking a serene ocean at sunset."
|
| 1250 |
+
],
|
| 1251 |
+
"image_footnote": [],
|
| 1252 |
+
"bbox": [
|
| 1253 |
+
73,
|
| 1254 |
+
662,
|
| 1255 |
+
267,
|
| 1256 |
+
811
|
| 1257 |
+
],
|
| 1258 |
+
"page_idx": 7
|
| 1259 |
+
},
|
| 1260 |
+
{
|
| 1261 |
+
"type": "image",
|
| 1262 |
+
"img_path": "images/0b1b1b92592474bf268039868377a95c9b10fbd9c272acae0077e0faa47382d0.jpg",
|
| 1263 |
+
"image_caption": [
|
| 1264 |
+
"A cozy cabin in the middle of a snowy forest, surrounded by tall trees with lights glowing through the windows, a northern lights display visible in the sky."
|
| 1265 |
+
],
|
| 1266 |
+
"image_footnote": [],
|
| 1267 |
+
"bbox": [
|
| 1268 |
+
292,
|
| 1269 |
+
662,
|
| 1270 |
+
483,
|
| 1271 |
+
811
|
| 1272 |
+
],
|
| 1273 |
+
"page_idx": 7
|
| 1274 |
+
},
|
| 1275 |
+
{
|
| 1276 |
+
"type": "image",
|
| 1277 |
+
"img_path": "images/a05f6050aea65fd8c472eca0b0aa84d22ae31d07fc18623d18f8abcf22e27deb.jpg",
|
| 1278 |
+
"image_caption": [
|
| 1279 |
+
"A still life of a vase overflowing with vibrant flowers, painted in bold colors and textured brushstrokes, reminiscent of van Gogh's iconic style."
|
| 1280 |
+
],
|
| 1281 |
+
"image_footnote": [],
|
| 1282 |
+
"bbox": [
|
| 1283 |
+
511,
|
| 1284 |
+
662,
|
| 1285 |
+
702,
|
| 1286 |
+
811
|
| 1287 |
+
],
|
| 1288 |
+
"page_idx": 7
|
| 1289 |
+
},
|
| 1290 |
+
{
|
| 1291 |
+
"type": "image",
|
| 1292 |
+
"img_path": "images/66c56563088f32892912db5825e6709d4e50a71e4d13352801783949bdaace75.jpg",
|
| 1293 |
+
"image_caption": [
|
| 1294 |
+
"An otherworldly forest of giant glowing mushrooms under a vibrant night sky filled with distant planets and stars, creating a dreamlike, cosmic landscape.",
|
| 1295 |
+
"Figure 5: Images generated from UniFluid autoregressive model after aesthetic fine-tuning."
|
| 1296 |
+
],
|
| 1297 |
+
"image_footnote": [],
|
| 1298 |
+
"bbox": [
|
| 1299 |
+
730,
|
| 1300 |
+
662,
|
| 1301 |
+
919,
|
| 1302 |
+
811
|
| 1303 |
+
],
|
| 1304 |
+
"page_idx": 7
|
| 1305 |
+
},
|
| 1306 |
+
{
|
| 1307 |
+
"type": "page_number",
|
| 1308 |
+
"text": "8",
|
| 1309 |
+
"bbox": [
|
| 1310 |
+
493,
|
| 1311 |
+
936,
|
| 1312 |
+
504,
|
| 1313 |
+
946
|
| 1314 |
+
],
|
| 1315 |
+
"page_idx": 7
|
| 1316 |
+
},
|
| 1317 |
+
{
|
| 1318 |
+
"type": "image",
|
| 1319 |
+
"img_path": "images/36f4a8eceafcdb00d110ca517d7b8a9cf4704c36dd59aee4f5c7d4974d7e9486.jpg",
|
| 1320 |
+
"image_caption": [],
|
| 1321 |
+
"image_footnote": [
|
| 1322 |
+
"A close up view of a wooden pole with the word \"sushi\" in silver letters on it. The pole is in front of a parking lot. The pole is in the foreground, with a concrete sidewalk in the foreground. There are plants and trees in the background. There is a brown brick building with a black awning on the left side of it. There are trees in the background. The sky is clear and blue."
|
| 1323 |
+
],
|
| 1324 |
+
"bbox": [
|
| 1325 |
+
107,
|
| 1326 |
+
36,
|
| 1327 |
+
277,
|
| 1328 |
+
209
|
| 1329 |
+
],
|
| 1330 |
+
"page_idx": 8
|
| 1331 |
+
},
|
| 1332 |
+
{
|
| 1333 |
+
"type": "image",
|
| 1334 |
+
"img_path": "images/3b5e953dc74551e08a0fd1c416c0915653361ba3cff1e61815598e1a8cd421bb.jpg",
|
| 1335 |
+
"image_caption": [],
|
| 1336 |
+
"image_footnote": [
|
| 1337 |
+
"A close up view of a wet white and brown labradoodle dog walking on a gray cement surface. The dog has a red leash attached to its collar. The dog has a silver metal hook attached to its collar. The dog is walking towards the camera. The dog is casting a shadow on the cement surface. Behind the dog is a building with a large window on the side. A tree line is visible in the background. The sky is blue with a few white clouds."
|
| 1338 |
+
],
|
| 1339 |
+
"bbox": [
|
| 1340 |
+
496,
|
| 1341 |
+
37,
|
| 1342 |
+
661,
|
| 1343 |
+
205
|
| 1344 |
+
],
|
| 1345 |
+
"page_idx": 8
|
| 1346 |
+
},
|
| 1347 |
+
{
|
| 1348 |
+
"type": "image",
|
| 1349 |
+
"img_path": "images/04fe165de52995499a22c711c69eafb3a57240f4b865c524c8ee2c812a8e402b.jpg",
|
| 1350 |
+
"image_caption": [],
|
| 1351 |
+
"image_footnote": [
|
| 1352 |
+
"Question: In what year did Panama's population reach 4.28 million?",
|
| 1353 |
+
"Answer: 2020"
|
| 1354 |
+
],
|
| 1355 |
+
"bbox": [
|
| 1356 |
+
109,
|
| 1357 |
+
227,
|
| 1358 |
+
341,
|
| 1359 |
+
354
|
| 1360 |
+
],
|
| 1361 |
+
"page_idx": 8
|
| 1362 |
+
},
|
| 1363 |
+
{
|
| 1364 |
+
"type": "image",
|
| 1365 |
+
"img_path": "images/3affb1716f45e3f8a8e892a46ed5c32d321d2f28697ab2b0d93d6ccf76afcff1.jpg",
|
| 1366 |
+
"image_caption": [],
|
| 1367 |
+
"image_footnote": [
|
| 1368 |
+
"Question: What device's average duration of in-app engagement sessions was 6.6 minutes?",
|
| 1369 |
+
"Answer: android"
|
| 1370 |
+
],
|
| 1371 |
+
"bbox": [
|
| 1372 |
+
493,
|
| 1373 |
+
227,
|
| 1374 |
+
727,
|
| 1375 |
+
352
|
| 1376 |
+
],
|
| 1377 |
+
"page_idx": 8
|
| 1378 |
+
},
|
| 1379 |
+
{
|
| 1380 |
+
"type": "image",
|
| 1381 |
+
"img_path": "images/d247a1dbb553478afd47f826cff4a46e58101f933b13bce059a3897dcaba7b76.jpg",
|
| 1382 |
+
"image_caption": [
|
| 1383 |
+
"Figure 6: Finetuned UniFluid model demonstrates strong image-to-text capability on image captioning and question answering."
|
| 1384 |
+
],
|
| 1385 |
+
"image_footnote": [
|
| 1386 |
+
"Question: The bus is likely driving through which American city?",
|
| 1387 |
+
"Answer: new york"
|
| 1388 |
+
],
|
| 1389 |
+
"bbox": [
|
| 1390 |
+
107,
|
| 1391 |
+
369,
|
| 1392 |
+
334,
|
| 1393 |
+
503
|
| 1394 |
+
],
|
| 1395 |
+
"page_idx": 8
|
| 1396 |
+
},
|
| 1397 |
+
{
|
| 1398 |
+
"type": "image",
|
| 1399 |
+
"img_path": "images/56f7755653a55050558209e33a525944678ce07f18db276e8244c8c56aa35766.jpg",
|
| 1400 |
+
"image_caption": [],
|
| 1401 |
+
"image_footnote": [
|
| 1402 |
+
"Question: Which number birthday is probably being celebrated?",
|
| 1403 |
+
"Answer: thirty"
|
| 1404 |
+
],
|
| 1405 |
+
"bbox": [
|
| 1406 |
+
493,
|
| 1407 |
+
371,
|
| 1408 |
+
720,
|
| 1409 |
+
503
|
| 1410 |
+
],
|
| 1411 |
+
"page_idx": 8
|
| 1412 |
+
},
|
| 1413 |
+
{
|
| 1414 |
+
"type": "image",
|
| 1415 |
+
"img_path": "images/79f93e931fd94d63593c874d6ac1fb094c20d25d037a24a190c7c96be9620be3.jpg",
|
| 1416 |
+
"image_caption": [
|
| 1417 |
+
"Change the black suitcase to a white one"
|
| 1418 |
+
],
|
| 1419 |
+
"image_footnote": [],
|
| 1420 |
+
"bbox": [
|
| 1421 |
+
104,
|
| 1422 |
+
545,
|
| 1423 |
+
232,
|
| 1424 |
+
645
|
| 1425 |
+
],
|
| 1426 |
+
"page_idx": 8
|
| 1427 |
+
},
|
| 1428 |
+
{
|
| 1429 |
+
"type": "image",
|
| 1430 |
+
"img_path": "images/d19bf5d7e7f56008e163ebc8e441f8a1112671cf8a8ef094221c9f5680a30ced.jpg",
|
| 1431 |
+
"image_caption": [],
|
| 1432 |
+
"image_footnote": [],
|
| 1433 |
+
"bbox": [
|
| 1434 |
+
233,
|
| 1435 |
+
546,
|
| 1436 |
+
362,
|
| 1437 |
+
645
|
| 1438 |
+
],
|
| 1439 |
+
"page_idx": 8
|
| 1440 |
+
},
|
| 1441 |
+
{
|
| 1442 |
+
"type": "image",
|
| 1443 |
+
"img_path": "images/1d019c5e0c764eb1e8b95793df132237f61dc32dfaf1373ae7c746f9613670e2.jpg",
|
| 1444 |
+
"image_caption": [
|
| 1445 |
+
"Remove the fence from the photo"
|
| 1446 |
+
],
|
| 1447 |
+
"image_footnote": [],
|
| 1448 |
+
"bbox": [
|
| 1449 |
+
367,
|
| 1450 |
+
546,
|
| 1451 |
+
496,
|
| 1452 |
+
645
|
| 1453 |
+
],
|
| 1454 |
+
"page_idx": 8
|
| 1455 |
+
},
|
| 1456 |
+
{
|
| 1457 |
+
"type": "image",
|
| 1458 |
+
"img_path": "images/7f3981b5455644ddc8ab004d938034d1d81756f69c154a9d395fc70887dc2689.jpg",
|
| 1459 |
+
"image_caption": [],
|
| 1460 |
+
"image_footnote": [],
|
| 1461 |
+
"bbox": [
|
| 1462 |
+
496,
|
| 1463 |
+
546,
|
| 1464 |
+
625,
|
| 1465 |
+
645
|
| 1466 |
+
],
|
| 1467 |
+
"page_idx": 8
|
| 1468 |
+
},
|
| 1469 |
+
{
|
| 1470 |
+
"type": "image",
|
| 1471 |
+
"img_path": "images/857c3d2b4d90a930f09ebe7c2eaecd6e41843ff5f2eb1e4dfd4d5f0fbb8c4eae.jpg",
|
| 1472 |
+
"image_caption": [
|
| 1473 |
+
"Change this into a 1950s Flintstone cartoon art style"
|
| 1474 |
+
],
|
| 1475 |
+
"image_footnote": [],
|
| 1476 |
+
"bbox": [
|
| 1477 |
+
632,
|
| 1478 |
+
546,
|
| 1479 |
+
761,
|
| 1480 |
+
645
|
| 1481 |
+
],
|
| 1482 |
+
"page_idx": 8
|
| 1483 |
+
},
|
| 1484 |
+
{
|
| 1485 |
+
"type": "image",
|
| 1486 |
+
"img_path": "images/5d06f1f7f1bc3e93747fda98c60b99db1e7b3bfc38ea08d5f99f70ff408d4703.jpg",
|
| 1487 |
+
"image_caption": [],
|
| 1488 |
+
"image_footnote": [],
|
| 1489 |
+
"bbox": [
|
| 1490 |
+
761,
|
| 1491 |
+
546,
|
| 1492 |
+
890,
|
| 1493 |
+
645
|
| 1494 |
+
],
|
| 1495 |
+
"page_idx": 8
|
| 1496 |
+
},
|
| 1497 |
+
{
|
| 1498 |
+
"type": "image",
|
| 1499 |
+
"img_path": "images/480023cdc110c691db2b63895df6e29d8f62b6c948074069eb234e06d6a1643d.jpg",
|
| 1500 |
+
"image_caption": [
|
| 1501 |
+
"Change the trees from flowers to icicles"
|
| 1502 |
+
],
|
| 1503 |
+
"image_footnote": [],
|
| 1504 |
+
"bbox": [
|
| 1505 |
+
104,
|
| 1506 |
+
667,
|
| 1507 |
+
232,
|
| 1508 |
+
767
|
| 1509 |
+
],
|
| 1510 |
+
"page_idx": 8
|
| 1511 |
+
},
|
| 1512 |
+
{
|
| 1513 |
+
"type": "image",
|
| 1514 |
+
"img_path": "images/3cf06912c8adfa112e4cec26c1ef46e0c4e30e821c79f1b2de0b87c712b9bc5c.jpg",
|
| 1515 |
+
"image_caption": [],
|
| 1516 |
+
"image_footnote": [],
|
| 1517 |
+
"bbox": [
|
| 1518 |
+
233,
|
| 1519 |
+
667,
|
| 1520 |
+
362,
|
| 1521 |
+
767
|
| 1522 |
+
],
|
| 1523 |
+
"page_idx": 8
|
| 1524 |
+
},
|
| 1525 |
+
{
|
| 1526 |
+
"type": "image",
|
| 1527 |
+
"img_path": "images/eb0786feda26f92aea48692bb7ee90a2bd953e0f5f3bb17a29cbf991e6540cde.jpg",
|
| 1528 |
+
"image_caption": [
|
| 1529 |
+
"Change the elephant into a giant rabbit"
|
| 1530 |
+
],
|
| 1531 |
+
"image_footnote": [],
|
| 1532 |
+
"bbox": [
|
| 1533 |
+
367,
|
| 1534 |
+
667,
|
| 1535 |
+
496,
|
| 1536 |
+
767
|
| 1537 |
+
],
|
| 1538 |
+
"page_idx": 8
|
| 1539 |
+
},
|
| 1540 |
+
{
|
| 1541 |
+
"type": "image",
|
| 1542 |
+
"img_path": "images/ab435942c6a892c5e005c6f3e596ec777b7002aee1398e2d94d8acf12d940801.jpg",
|
| 1543 |
+
"image_caption": [],
|
| 1544 |
+
"image_footnote": [],
|
| 1545 |
+
"bbox": [
|
| 1546 |
+
496,
|
| 1547 |
+
667,
|
| 1548 |
+
625,
|
| 1549 |
+
767
|
| 1550 |
+
],
|
| 1551 |
+
"page_idx": 8
|
| 1552 |
+
},
|
| 1553 |
+
{
|
| 1554 |
+
"type": "image",
|
| 1555 |
+
"img_path": "images/9c446d91fda981918537212203a547eb8e4fb4c4bbdb313a91d2a0e202c7d290.jpg",
|
| 1556 |
+
"image_caption": [],
|
| 1557 |
+
"image_footnote": [],
|
| 1558 |
+
"bbox": [
|
| 1559 |
+
633,
|
| 1560 |
+
667,
|
| 1561 |
+
761,
|
| 1562 |
+
767
|
| 1563 |
+
],
|
| 1564 |
+
"page_idx": 8
|
| 1565 |
+
},
|
| 1566 |
+
{
|
| 1567 |
+
"type": "image",
|
| 1568 |
+
"img_path": "images/7f9899abfaf1d95a495617d9c2b59fbcd4e91d93fcf676ed5bac64c54a57d59d.jpg",
|
| 1569 |
+
"image_caption": [],
|
| 1570 |
+
"image_footnote": [],
|
| 1571 |
+
"bbox": [
|
| 1572 |
+
761,
|
| 1573 |
+
667,
|
| 1574 |
+
890,
|
| 1575 |
+
767
|
| 1576 |
+
],
|
| 1577 |
+
"page_idx": 8
|
| 1578 |
+
},
|
| 1579 |
+
{
|
| 1580 |
+
"type": "image",
|
| 1581 |
+
"img_path": "images/37e1926ec2131e720adf5a00ec75683674a2fdf5d3477f9049de45486d0cd9f1.jpg",
|
| 1582 |
+
"image_caption": [
|
| 1583 |
+
"Change the bird to all white"
|
| 1584 |
+
],
|
| 1585 |
+
"image_footnote": [],
|
| 1586 |
+
"bbox": [
|
| 1587 |
+
104,
|
| 1588 |
+
789,
|
| 1589 |
+
232,
|
| 1590 |
+
888
|
| 1591 |
+
],
|
| 1592 |
+
"page_idx": 8
|
| 1593 |
+
},
|
| 1594 |
+
{
|
| 1595 |
+
"type": "image",
|
| 1596 |
+
"img_path": "images/00ace96392ae997da28586fe1293583f6d92d6429a3fcfed2d0751233fbb1bb4.jpg",
|
| 1597 |
+
"image_caption": [
|
| 1598 |
+
"Figure 7: Image editing results on evaluation benchmark from UniFluid autoregressive model after fine-tuning. It handles multiple editing tasks effectively, including object removal, insertion, style and color changes."
|
| 1599 |
+
],
|
| 1600 |
+
"image_footnote": [],
|
| 1601 |
+
"bbox": [
|
| 1602 |
+
233,
|
| 1603 |
+
789,
|
| 1604 |
+
362,
|
| 1605 |
+
888
|
| 1606 |
+
],
|
| 1607 |
+
"page_idx": 8
|
| 1608 |
+
},
|
| 1609 |
+
{
|
| 1610 |
+
"type": "image",
|
| 1611 |
+
"img_path": "images/83cafdcca03ca33853f930748ce61120aaab1ee886c0c13f8fcc11862d110f05.jpg",
|
| 1612 |
+
"image_caption": [
|
| 1613 |
+
"Turn it into a Paul Cezanne still life painting"
|
| 1614 |
+
],
|
| 1615 |
+
"image_footnote": [],
|
| 1616 |
+
"bbox": [
|
| 1617 |
+
393,
|
| 1618 |
+
790,
|
| 1619 |
+
488,
|
| 1620 |
+
887
|
| 1621 |
+
],
|
| 1622 |
+
"page_idx": 8
|
| 1623 |
+
},
|
| 1624 |
+
{
|
| 1625 |
+
"type": "image",
|
| 1626 |
+
"img_path": "images/5117cd2da0dfc63a8e7e5811316065d98885423bf2410418a9ecb417bb9c410b.jpg",
|
| 1627 |
+
"image_caption": [],
|
| 1628 |
+
"image_footnote": [],
|
| 1629 |
+
"bbox": [
|
| 1630 |
+
500,
|
| 1631 |
+
790,
|
| 1632 |
+
629,
|
| 1633 |
+
888
|
| 1634 |
+
],
|
| 1635 |
+
"page_idx": 8
|
| 1636 |
+
},
|
| 1637 |
+
{
|
| 1638 |
+
"type": "image",
|
| 1639 |
+
"img_path": "images/2f04b59d14ad478ebcffecd91ec88aa335419c135b386a9254939ca44fbb078b.jpg",
|
| 1640 |
+
"image_caption": [
|
| 1641 |
+
"Change the meat into carrots"
|
| 1642 |
+
],
|
| 1643 |
+
"image_footnote": [],
|
| 1644 |
+
"bbox": [
|
| 1645 |
+
632,
|
| 1646 |
+
789,
|
| 1647 |
+
761,
|
| 1648 |
+
887
|
| 1649 |
+
],
|
| 1650 |
+
"page_idx": 8
|
| 1651 |
+
},
|
| 1652 |
+
{
|
| 1653 |
+
"type": "image",
|
| 1654 |
+
"img_path": "images/46973e49a359e902ed1f7d9f02c2def2ed4f8fe3f1d72a0e4d3fdeb7fc3d6011.jpg",
|
| 1655 |
+
"image_caption": [],
|
| 1656 |
+
"image_footnote": [],
|
| 1657 |
+
"bbox": [
|
| 1658 |
+
761,
|
| 1659 |
+
789,
|
| 1660 |
+
890,
|
| 1661 |
+
887
|
| 1662 |
+
],
|
| 1663 |
+
"page_idx": 8
|
| 1664 |
+
},
|
| 1665 |
+
{
|
| 1666 |
+
"type": "page_number",
|
| 1667 |
+
"text": "9",
|
| 1668 |
+
"bbox": [
|
| 1669 |
+
493,
|
| 1670 |
+
935,
|
| 1671 |
+
504,
|
| 1672 |
+
946
|
| 1673 |
+
],
|
| 1674 |
+
"page_idx": 8
|
| 1675 |
+
},
|
| 1676 |
+
{
|
| 1677 |
+
"type": "table",
|
| 1678 |
+
"img_path": "images/8027d54f52a4ad71cfa0c7bea294a4fb80ff464f7ea42d394d811564e25d0f95.jpg",
|
| 1679 |
+
"table_caption": [
|
| 1680 |
+
"Table 4: Performance comparison of image generation and understanding of UniFluid trained with different image generation order. FID and CIDEr is measured on MS-COCO."
|
| 1681 |
+
],
|
| 1682 |
+
"table_footnote": [],
|
| 1683 |
+
"table_body": "<table><tr><td rowspan=\"2\">Generation Order</td><td colspan=\"2\">Generation</td><td colspan=\"3\">Understanding</td></tr><tr><td>COCO FID ↓</td><td>GenEval ↑</td><td>COCO CIDEr ↑</td><td>Cap Avg ↑</td><td>QA Avg ↑</td></tr><tr><td>Raster</td><td>8.28</td><td>0.59</td><td>45.57</td><td>116.55</td><td>61.81</td></tr><tr><td>Random</td><td>7.20</td><td>0.59</td><td>40.91</td><td>116.13</td><td>62.10</td></tr></table>",
|
| 1684 |
+
"bbox": [
|
| 1685 |
+
233,
|
| 1686 |
+
123,
|
| 1687 |
+
763,
|
| 1688 |
+
189
|
| 1689 |
+
],
|
| 1690 |
+
"page_idx": 9
|
| 1691 |
+
},
|
| 1692 |
+
{
|
| 1693 |
+
"type": "text",
|
| 1694 |
+
"text": "as a backbone is important for unlocking improved visual quality in a unified model training setup. The image understanding performance also improved slightly when using stronger LLM, which is a trend also noted in PaliGemma2. We show the detailed comparison on each downstream visual understanding benchmark in Figure 4.",
|
| 1695 |
+
"bbox": [
|
| 1696 |
+
169,
|
| 1697 |
+
214,
|
| 1698 |
+
826,
|
| 1699 |
+
272
|
| 1700 |
+
],
|
| 1701 |
+
"page_idx": 9
|
| 1702 |
+
},
|
| 1703 |
+
{
|
| 1704 |
+
"type": "text",
|
| 1705 |
+
"text": "Training with Random Order Helps Generation But Not Understanding. Images inherently possess 2D patterns. As demonstrated in Fluid, raster-order training can be problematic, potentially leading to collapse and artifacts characterized by disappearing patterns. Approaches such as RAR [58] and RandAR [33] propose training image generation AR models with random-order training, which can improve ImageNet FID and result in better visual quality.",
|
| 1706 |
+
"bbox": [
|
| 1707 |
+
169,
|
| 1708 |
+
276,
|
| 1709 |
+
823,
|
| 1710 |
+
347
|
| 1711 |
+
],
|
| 1712 |
+
"page_idx": 9
|
| 1713 |
+
},
|
| 1714 |
+
{
|
| 1715 |
+
"type": "text",
|
| 1716 |
+
"text": "Here we study the effect of different visual generation orders during training within our unified framework setup. We compare the performance between random-order and raster-order training, both with Gemma-2 2B as backbone LLM. The results presented in Table 4 indicate that for per-token image generation within a unified framework, raster-order training continues to underperform compared to random-order generation. Incorporating random-order during training could ensure the generated images are of high quality. However, it does not necessarily improve the visual understanding performance, where raster-order achieves better performance on MS-COCO CIDEr and downstream captioning task average (Cap Avg).",
|
| 1717 |
+
"bbox": [
|
| 1718 |
+
169,
|
| 1719 |
+
352,
|
| 1720 |
+
826,
|
| 1721 |
+
465
|
| 1722 |
+
],
|
| 1723 |
+
"page_idx": 9
|
| 1724 |
+
},
|
| 1725 |
+
{
|
| 1726 |
+
"type": "text",
|
| 1727 |
+
"text": "5.3 More Generation Capabilities",
|
| 1728 |
+
"text_level": 1,
|
| 1729 |
+
"bbox": [
|
| 1730 |
+
171,
|
| 1731 |
+
479,
|
| 1732 |
+
421,
|
| 1733 |
+
494
|
| 1734 |
+
],
|
| 1735 |
+
"page_idx": 9
|
| 1736 |
+
},
|
| 1737 |
+
{
|
| 1738 |
+
"type": "text",
|
| 1739 |
+
"text": "We also verify the transferability of the trained model to various downstream generation tasks.",
|
| 1740 |
+
"bbox": [
|
| 1741 |
+
169,
|
| 1742 |
+
505,
|
| 1743 |
+
790,
|
| 1744 |
+
520
|
| 1745 |
+
],
|
| 1746 |
+
"page_idx": 9
|
| 1747 |
+
},
|
| 1748 |
+
{
|
| 1749 |
+
"type": "text",
|
| 1750 |
+
"text": "Aesthetic Fine-Tuning. To enhance the visual quality and aesthetic appeal of the generated images, we perform aesthetic fine-tuning on a publicly available dataset. The results are shown in Figure 5.",
|
| 1751 |
+
"bbox": [
|
| 1752 |
+
169,
|
| 1753 |
+
525,
|
| 1754 |
+
826,
|
| 1755 |
+
555
|
| 1756 |
+
],
|
| 1757 |
+
"page_idx": 9
|
| 1758 |
+
},
|
| 1759 |
+
{
|
| 1760 |
+
"type": "text",
|
| 1761 |
+
"text": "Image Editing Task. Since our unified framework is trained with multimodal inputs, it can naturally extend to image editing tasks that involve both image and text prompt inputs. We fine-tune the 2B UniFluid model with 4M image editing pairs from HQEdit [18] and UltraEdit [60]. In Figure 7, we apply the fine-tuned model to the input images and editing prompts from a public available benchmark. Although preliminary, the experiments show that UniFluid is able to adapt and generalize to tasks that involve interleaved data modalities.",
|
| 1762 |
+
"bbox": [
|
| 1763 |
+
169,
|
| 1764 |
+
560,
|
| 1765 |
+
826,
|
| 1766 |
+
643
|
| 1767 |
+
],
|
| 1768 |
+
"page_idx": 9
|
| 1769 |
+
},
|
| 1770 |
+
{
|
| 1771 |
+
"type": "text",
|
| 1772 |
+
"text": "6 Conclusion",
|
| 1773 |
+
"text_level": 1,
|
| 1774 |
+
"bbox": [
|
| 1775 |
+
171,
|
| 1776 |
+
662,
|
| 1777 |
+
302,
|
| 1778 |
+
679
|
| 1779 |
+
],
|
| 1780 |
+
"page_idx": 9
|
| 1781 |
+
},
|
| 1782 |
+
{
|
| 1783 |
+
"type": "text",
|
| 1784 |
+
"text": "In this paper, we presented UniFluid, a pure autoregressive framework for joint visual generation and understanding, utilizing continuous visual tokens. We identified an inherent trade-off between the visual generation and understanding tasks, but the two tasks can benefit each other with tuned training recipes. Careful choice of the loss balance between the two tasks allows a single unified model to achieve performance comparable to or exceeding single-task baselines. We conducted investigation of key design choices for UniFluid training, revealing the critical importance of employing strong backbone LLM and random-order generation to unlock high-quality visual generation capabilities. We believe that this work encourages future research into the exploration of continuous visual tokens for unified vision-language model training, paving the way for more efficient and powerful autoregressive multimodal systems.",
|
| 1785 |
+
"bbox": [
|
| 1786 |
+
169,
|
| 1787 |
+
693,
|
| 1788 |
+
826,
|
| 1789 |
+
833
|
| 1790 |
+
],
|
| 1791 |
+
"page_idx": 9
|
| 1792 |
+
},
|
| 1793 |
+
{
|
| 1794 |
+
"type": "text",
|
| 1795 |
+
"text": "Acknowledgements. We are grateful to Alex Rizkowsky and Amy Shen for their support in securing computational resources. We also wish to thank Charles Herrmann, Junhwa Hur, Shangbang Long, André Susano Pinto, Srinivas Kaza, David Salesin, and the VisCam team for their insightful discussions and constructive feedback, which greatly improved this work.",
|
| 1796 |
+
"bbox": [
|
| 1797 |
+
169,
|
| 1798 |
+
839,
|
| 1799 |
+
826,
|
| 1800 |
+
895
|
| 1801 |
+
],
|
| 1802 |
+
"page_idx": 9
|
| 1803 |
+
},
|
| 1804 |
+
{
|
| 1805 |
+
"type": "page_number",
|
| 1806 |
+
"text": "10",
|
| 1807 |
+
"bbox": [
|
| 1808 |
+
490,
|
| 1809 |
+
935,
|
| 1810 |
+
509,
|
| 1811 |
+
946
|
| 1812 |
+
],
|
| 1813 |
+
"page_idx": 9
|
| 1814 |
+
},
|
| 1815 |
+
{
|
| 1816 |
+
"type": "text",
|
| 1817 |
+
"text": "References",
|
| 1818 |
+
"text_level": 1,
|
| 1819 |
+
"bbox": [
|
| 1820 |
+
173,
|
| 1821 |
+
89,
|
| 1822 |
+
269,
|
| 1823 |
+
106
|
| 1824 |
+
],
|
| 1825 |
+
"page_idx": 10
|
| 1826 |
+
},
|
| 1827 |
+
{
|
| 1828 |
+
"type": "list",
|
| 1829 |
+
"sub_type": "ref_text",
|
| 1830 |
+
"list_items": [
|
| 1831 |
+
"[1] Manoj Acharya, Kushal Kafle, and Christopher Kanan. Tallyqa: Answering complex counting questions. In Proceedings of the AAAI conference on artificial intelligence, volume 33, pages 8076-8084, 2019.",
|
| 1832 |
+
"[2] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 35:23716-23736, 2022.",
|
| 1833 |
+
"[3] Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael Tschannen, Emanuele Bugliarello, et al. Paligemma: A versatile 3b vlm for transfer. arXiv preprint arXiv:2407.07726, 2024.",
|
| 1834 |
+
"[4] Ali Furkan Biten, Ruben Tito, Andres Mafla, Lluis Gomez, Marçal Rusinol, Ernest Valveny, CV Jawahar, and Dimosthenis Karatzas. Scene text visual question answering. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4291-4301, 2019.",
|
| 1835 |
+
"[5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020.",
|
| 1836 |
+
"[6] Huiwen Chang, Han Zhang, Jarred Barber, AJ Maschinot, Jose Lezama, Lu Jiang, Ming-Hsuan Yang, Kevin Murphy, William T Freeman, Michael Rubinstein, et al. Muse: Text-to-image generation via masked generative transformers. arXiv preprint arXiv:2301.00704, 2023.",
|
| 1837 |
+
"[7] Xi Chen, Xiao Wang, Soravit Changpinyo, AJ Piergiovanni, Piotr Padlewski, Daniel Salz, Sebastian Goodman, Adam Grycner, Basil Mustafa, Lucas Beyer, et al. Pali: A jointly-scaled multilingual language-image model. arXiv preprint arXiv:2209.06794, 2022.",
|
| 1838 |
+
"[8] Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Janus-pro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025.",
|
| 1839 |
+
"[9] Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. Palm: Scaling language modeling with pathways. Journal of Machine Learning Research, 24(240):1-113, 2023.",
|
| 1840 |
+
"[10] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. Instructlip: Towards general-purpose vision-language models with instruction tuning, 2023.",
|
| 1841 |
+
"[11] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies, volume 1 (long and short papers), pages 4171–4186, 2019.",
|
| 1842 |
+
"[12] Lijie Fan, Tianhong Li, Siyang Qin, Yuanzhen Li, Chen Sun, Michael Rubinstein, Deqing Sun, Kaiming He, and Yonglong Tian. Fluid: Scaling autoregressive text-to-image generative models with continuous tokens. arXiv preprint arXiv:2410.13863, 2024.",
|
| 1843 |
+
"[13] Dhruba Ghosh, Hannaneh Hajishirzi, and Ludwig Schmidt. Geneval: An object-focused framework for evaluating text-to-image alignment. Advances in Neural Information Processing Systems, 36:52132-52152, 2023.",
|
| 1844 |
+
"[14] Karol Gregor, Ivo Danihelka, Andriy Mnih, Charles Blundell, and Daan Wierstra. Deep autoregressive networks. In International Conference on Machine Learning, pages 1242–1250. PMLR, 2014.",
|
| 1845 |
+
"[15] Danna Gurari, Qing Li, Abigale J Stangl, Anhong Guo, Chi Lin, Kristen Grauman, Jiebo Luo, and Jeffrey P Bigham. Vizwiz grand challenge: Answering visual questions from blind people. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3608-3617, 2018."
|
| 1846 |
+
],
|
| 1847 |
+
"bbox": [
|
| 1848 |
+
171,
|
| 1849 |
+
112,
|
| 1850 |
+
828,
|
| 1851 |
+
910
|
| 1852 |
+
],
|
| 1853 |
+
"page_idx": 10
|
| 1854 |
+
},
|
| 1855 |
+
{
|
| 1856 |
+
"type": "page_number",
|
| 1857 |
+
"text": "11",
|
| 1858 |
+
"bbox": [
|
| 1859 |
+
490,
|
| 1860 |
+
935,
|
| 1861 |
+
506,
|
| 1862 |
+
946
|
| 1863 |
+
],
|
| 1864 |
+
"page_idx": 10
|
| 1865 |
+
},
|
| 1866 |
+
{
|
| 1867 |
+
"type": "list",
|
| 1868 |
+
"sub_type": "ref_text",
|
| 1869 |
+
"list_items": [
|
| 1870 |
+
"[16] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017.",
|
| 1871 |
+
"[17] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6700-6709, 2019.",
|
| 1872 |
+
"[18] Mude Hui, Siwei Yang, Bingchen Zhao, Yichun Shi, Heng Wang, Peng Wang, Yuyin Zhou, and Cihang Xie. Hq-edit: A high-quality dataset for instruction-based image editing. arXiv preprint arXiv:2404.09990, 2024.",
|
| 1873 |
+
"[19] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part IV 14, pages 235-251. Springer, 2016.",
|
| 1874 |
+
"[20] Taku Kudo and John Richardson. Sentencepiece: A simple and language independent subword tokenizer and detokenizer for neural text processing. arXiv preprint arXiv:1808.06226, 2018.",
|
| 1875 |
+
"[21] Tianhong Li, Yonglong Tian, He Li, Mingyang Deng, and Kaiming He. Autoregressive image generation without vector quantization. Advances in Neural Information Processing Systems, 37:56424-56445, 2024.",
|
| 1876 |
+
"[22] Yang Li, Gang Li, Luheng He, Jingjie Zheng, Hong Li, and Zhiwei Guan. Widget captioning: Generating natural language description for mobile user interface elements. arXiv preprint arXiv:2010.04295, 2020.",
|
| 1877 |
+
"[23] Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pre-training for visual language models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 26689-26699, 2024.",
|
| 1878 |
+
"[24] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer vision-ECCV 2014: 13th European conference, zurich, Switzerland, September 6-12, 2014, proceedings, part v 13, pages 740-755. Springer, 2014.",
|
| 1879 |
+
"[25] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024.",
|
| 1880 |
+
"[26] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36:34892-34916, 2023.",
|
| 1881 |
+
"[27] Sylvain Lobry, Diego Marcos, Jesse Murray, and Devis Tuia. Rsvqa: Visual question answering for remote sensing data. IEEE Transactions on Geoscience and Remote Sensing, 58(12):8555-8566, 2020.",
|
| 1882 |
+
"[28] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. Advances in Neural Information Processing Systems, 35:2507–2521, 2022.",
|
| 1883 |
+
"[29] Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. In Proceedings of the IEEE/cvf conference on computer vision and pattern recognition, pages 3195-3204, 2019.",
|
| 1884 |
+
"[30] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022.",
|
| 1885 |
+
"[31] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and CV Jawahar. Infographicvqa. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1697-1706, 2022."
|
| 1886 |
+
],
|
| 1887 |
+
"bbox": [
|
| 1888 |
+
173,
|
| 1889 |
+
90,
|
| 1890 |
+
826,
|
| 1891 |
+
912
|
| 1892 |
+
],
|
| 1893 |
+
"page_idx": 11
|
| 1894 |
+
},
|
| 1895 |
+
{
|
| 1896 |
+
"type": "page_number",
|
| 1897 |
+
"text": "12",
|
| 1898 |
+
"bbox": [
|
| 1899 |
+
490,
|
| 1900 |
+
935,
|
| 1901 |
+
508,
|
| 1902 |
+
946
|
| 1903 |
+
],
|
| 1904 |
+
"page_idx": 11
|
| 1905 |
+
},
|
| 1906 |
+
{
|
| 1907 |
+
"type": "list",
|
| 1908 |
+
"sub_type": "ref_text",
|
| 1909 |
+
"list_items": [
|
| 1910 |
+
"[32] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. Docvqa: A dataset for vqa on document images. In Proceedings of the IEEE/CVF winter conference on applications of computer vision, pages 2200-2209, 2021.",
|
| 1911 |
+
"[33] Ziqi Pang, Tianyuan Zhang, Fujun Luan, Yunze Man, Hao Tan, Kai Zhang, William T Freeman, and Yu-Xiong Wang. Randar: Decoder-only autoregressive visual generation in random orders. arXiv preprint arXiv:2412.01827, 2024.",
|
| 1912 |
+
"[34] Niki Parmar, Ashish Vaswani, Jakob Uszkoreit, Lukasz Kaiser, Noam Shazeer, Alexander Ku, and Dustin Tran. Image transformer. In International conference on machine learning, pages 4055-4064. PMLR, 2018.",
|
| 1913 |
+
"[35] Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. Improving language understanding by generative pre-training. Technical Report, 2018.",
|
| 1914 |
+
"[36] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models, 2021.",
|
| 1915 |
+
"[37] Dustin Schwenk, Apoorv Khandelwal, Christopher Clark, Kenneth Marino, and Roozbeh Mottaghi. A-okvqa: A benchmark for visual question answering using world knowledge. In European conference on computer vision, pages 146–162. Springer, 2022.",
|
| 1916 |
+
"[38] Weijia Shi, Xiaochuang Han, Chunting Zhou, Weixin Liang, Xi Victoria Lin, Luke Zettlemoyer, and Lili Yu. Llamafusion: Adapting pretrained language models for multimodal generation. arXiv preprint arXiv:2412.15188, 2024.",
|
| 1917 |
+
"[39] Oleksii Sidorov, Ronghang Hu, Marcus Rohrbach, and Amanpreet Singh. Textcaps: a dataset for image captioning with reading comprehension. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part II 16, pages 742-758. Springer, 2020.",
|
| 1918 |
+
"[40] Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8317-8326, 2019.",
|
| 1919 |
+
"[41] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020.",
|
| 1920 |
+
"[42] Andreas Steiner, André Susano Pinto, Michael Tschannen, Daniel Keysers, Xiao Wang, Yonatan Bitton, Alexey Gritsanko, Matthias Minderer, Anthony Sherbondy, Shangbang Long, et al. Paligemma 2: A family of versatile vlms for transfer. arXiv preprint arXiv:2412.03555, 2024.",
|
| 1921 |
+
"[43] Alane Suhr, Stephanie Zhou, Ally Zhang, Iris Zhang, Huajun Bai, and Yoav Artzi. A corpus for reasoning about natural language grounded in photographs. arXiv preprint arXiv:1811.00491, 2018.",
|
| 1922 |
+
"[44] Quan Sun, Qiying Yu, Yufeng Cui, Fan Zhang, Xiaosong Zhang, Yueze Wang, Hongcheng Gao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Emu: Generative pretraining in multimodality. arXiv preprint arXiv:2307.05222, 2023.",
|
| 1923 |
+
"[45] Yutao Sun, Hangbo Bao, Wenhui Wang, Zhiliang Peng, Li Dong, Shaohan Huang, Jianyong Wang, and Furu Wei. Multimodal latent language modeling with next-token diffusion. arXiv preprint arXiv:2412.08635, 2024.",
|
| 1924 |
+
"[46] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024.",
|
| 1925 |
+
"[47] Gemma Team, Thomas Mesnard, Cassidy Hardin, Robert Dadashi, Surya Bhupatiraju, Shreya Pathak, Laurent Sifre, Morgane Riviere, Mihir Sanjay Kale, Juliette Love, et al. Gemma: Open models based on gemini research and technology. arXiv preprint arXiv:2403.08295, 2024."
|
| 1926 |
+
],
|
| 1927 |
+
"bbox": [
|
| 1928 |
+
173,
|
| 1929 |
+
90,
|
| 1930 |
+
826,
|
| 1931 |
+
912
|
| 1932 |
+
],
|
| 1933 |
+
"page_idx": 12
|
| 1934 |
+
},
|
| 1935 |
+
{
|
| 1936 |
+
"type": "page_number",
|
| 1937 |
+
"text": "13",
|
| 1938 |
+
"bbox": [
|
| 1939 |
+
490,
|
| 1940 |
+
935,
|
| 1941 |
+
508,
|
| 1942 |
+
946
|
| 1943 |
+
],
|
| 1944 |
+
"page_idx": 12
|
| 1945 |
+
},
|
| 1946 |
+
{
|
| 1947 |
+
"type": "list",
|
| 1948 |
+
"sub_type": "ref_text",
|
| 1949 |
+
"list_items": [
|
| 1950 |
+
"[48] Gemma Team, Morgane Riviere, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, Léonard Hussenot, Thomas Mesnard, Bobak Shahriari, Alexandre Ramé, et al. Gemma 2: Improving open language models at a practical size. arXiv preprint arXiv:2408.00118, 2024.",
|
| 1951 |
+
"[49] Keyu Tian, Yi Jiang, Zehuan Yuan, Bingyue Peng, and Liwei Wang. Visual autoregressive modeling: Scalable image generation via next-scale prediction. Advances in neural information processing systems, 37:84839-84865, 2024.",
|
| 1952 |
+
"[50] Shengbang Tong, David Fan, Jiachen Zhu, Yunyang Xiong, Xinlei Chen, Koustuv Sinha, Michael Rabbat, Yann LeCun, Saining Xie, and Zhuang Liu. Metamorph: Multimodal understanding and generation via instruction tuning. arXiv preprint arXiv:2412.14164, 2024.",
|
| 1953 |
+
"[51] Aaron Van den Oord, Nal Kalchbrenner, Lasse Espeholt, Oriol Vinyals, Alex Graves, et al. Conditional image generation with pixelCNN decoders. Advances in neural information processing systems, 29, 2016.",
|
| 1954 |
+
"[52] Aäron Van Den Oord, Nal Kalchbrenner, and Koray Kavukcuoglu. Pixel recurrent neural networks. In International conference on machine learning, pages 1747-1756. PMLR, 2016.",
|
| 1955 |
+
"[53] Bryan Wang, Gang Li, Xin Zhou, Zhourong Chen, Tovi Grossman, and Yang Li. Screen2words: Automatic mobile ui summarization with multimodal learning. In The 34th Annual ACM Symposium on User Interface Software and Technology, pages 498-510, 2021.",
|
| 1956 |
+
"[54] Chengyue Wu, Xiaokang Chen, Zhiyu Wu, Yiyang Ma, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, Chong Ruan, et al. Janus: Decoupling visual encoding for unified multimodal understanding and generation. arXiv preprint arXiv:2410.13848, 2024.",
|
| 1957 |
+
"[55] Jinheng Xie, Weijia Mao, Zechen Bai, David Junhao Zhang, Weihao Wang, Kevin Qinghong Lin, Yuchao Gu, Zhijie Chen, Zhenheng Yang, and Mike Zheng Shou. Show-o: One single transformer to unify multimodal understanding and generation. arXiv preprint arXiv:2408.12528, 2024.",
|
| 1958 |
+
"[56] Qinghao Ye, Haiyang Xu, Guohai Xu, Jiabo Ye, Ming Yan, Yiyang Zhou, Junyang Wang, Anwen Hu, Pengcheng Shi, Yaya Shi, et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023.",
|
| 1959 |
+
"[57] Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2(3):5, 2022.",
|
| 1960 |
+
"[58] Qihang Yu, Ju He, Xueqing Deng, Xiaohui Shen, and Liang-Chieh Chen. Randomized autoregressive visual generation. arXiv preprint arXiv:2411.00776, 2024.",
|
| 1961 |
+
"[59] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF international conference on computer vision, pages 11975-11986, 2023.",
|
| 1962 |
+
"[60] Haozhe Zhao, Xiaojian Shawn Ma, Liang Chen, Shuzheng Si, Rujie Wu, Kaikai An, Peiyu Yu, Minjia Zhang, Qing Li, and Baobao Chang. Ultraedit: Instruction-based fine-grained image editing at scale. Advances in Neural Information Processing Systems, 37:3058-3093, 2024.",
|
| 1963 |
+
"[61] Chunting Zhou, Lili Yu, Arun Babu, Kushal Tirumala, Michihiro Yasunaga, Leonid Shamis, Jacob Kahn, Xuezhe Ma, Luke Zettlemoyer, and Omer Levy. Transfusion: Predict the next token and diffuse images with one multi-modal model. arXiv preprint arXiv:2408.11039, 2024.",
|
| 1964 |
+
"[62] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023."
|
| 1965 |
+
],
|
| 1966 |
+
"bbox": [
|
| 1967 |
+
173,
|
| 1968 |
+
90,
|
| 1969 |
+
826,
|
| 1970 |
+
866
|
| 1971 |
+
],
|
| 1972 |
+
"page_idx": 13
|
| 1973 |
+
},
|
| 1974 |
+
{
|
| 1975 |
+
"type": "page_number",
|
| 1976 |
+
"text": "14",
|
| 1977 |
+
"bbox": [
|
| 1978 |
+
490,
|
| 1979 |
+
935,
|
| 1980 |
+
508,
|
| 1981 |
+
946
|
| 1982 |
+
],
|
| 1983 |
+
"page_idx": 13
|
| 1984 |
+
}
|
| 1985 |
+
]
|
data/2025/2503_13xxx/2503.13436/53224727-d9f9-435a-ad74-c91800bf8f34_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13436/53224727-d9f9-435a-ad74-c91800bf8f34_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:085b1288cd16795c3f6ef8bf1df020a900b5b77e36e4f82a3eb1a3273476e707
|
| 3 |
+
size 2673740
|
data/2025/2503_13xxx/2503.13436/full.md
ADDED
|
@@ -0,0 +1,380 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Unified Autoregressive Visual Generation and Understanding with Continuous Tokens
|
| 2 |
+
|
| 3 |
+
Lijie Fan $^{1,*}$ Luming Tang $^{1,*}$ Siyang Qin $^{1,*}$ Tianhong Li $^{2}$ Xuan Yang $^{1}$ Siyuan Qiao $^{1}$ Andreas Steiner $^{1}$ Chen Sun $^{1}$ Yuanzhen Li $^{1}$ Tao Zhu $^{1}$ Michael Rubinstein $^{1}$ Michalis Raptis $^{1}$ Deqing Sun $^{1,\dagger}$ Radu Soricut $^{1,\dagger}$ Google DeepMind ${}^{2}\mathrm{MIT}\quad {*},$ equal contribution
|
| 4 |
+
|
| 5 |
+

|
| 6 |
+
Figure 1: Generated images from our UniFluid autoregressive model after aesthetic finetuning.
|
| 7 |
+
|
| 8 |
+
# Abstract
|
| 9 |
+
|
| 10 |
+
We present UniFluid, a unified autoregressive framework for joint visual generation and understanding leveraging continuous visual tokens. Our unified autoregressive architecture processes multimodal image and text inputs, generating discrete tokens for text and continuous tokens for image. We find though there is an inherent trade-off between the image generation and understanding task, a carefully tuned training recipe enables them to improve each other. By selecting an appropriate loss balance weight, the unified model achieves results comparable to or exceeding those of single-task baselines on both tasks. Furthermore, we demonstrate that employing stronger pre-trained LLMs and random-order generation during training is important to achieve high-fidelity image generation within this unified framework. Built upon the Gemma model series, UniFluid exhibits competitive performance across both image generation and understanding, demonstrating strong transferability to various downstream tasks, including image editing for generation, as well as visual captioning and question answering for understanding.
|
| 11 |
+
|
| 12 |
+
# 1 Introduction
|
| 13 |
+
|
| 14 |
+
Large Language Models (LLMs) have recently advanced from text-centric architectures, such as BERT [11] and GPT [35], toward multimodal systems capable of understanding and generating content across different modalities. GPT-3 [5] and PaLM [9] show that scaling language models leads to emergent capabilities, while Flamingo [2] further demonstrates that incorporating visual inputs facilitates unified multimodal reasoning. This trend toward unified vision-language model—using a single model for diverse tasks of visual understanding and generation—has promising potential for leveraging knowledge and reasoning abilities that transfer across different vision and language tasks, ultimately facilitating more robust and generalizable multimodal representation and modeling capabilities.
|
| 15 |
+
|
| 16 |
+
Motivated by the advantages and strong scaling properties of autoregressive models, coupled with their simplicity, we investigate a pure autoregressive framework for unified visual generation and understanding, without the limitations introduced by vector quantization (VQ). In this paper, we introduce UniFluid, a unified framework that leverages continuous visual tokens within an autoregressive architecture to jointly handle vision-language generation and understanding tasks. Building upon pre-trained Gemma [48] on large-scale text corpus, UniFluid unlocks powerful visual generation and understanding capabilities through training with paired image-text data, and further allows these two tasks to mutually benefit each other within a single architecture.
|
| 17 |
+
|
| 18 |
+
Specifically, UniFluid adopts a unified autoregressive framework where both text and continuous visual inputs are embedded as tokens in the same space, enabling seamless joint training of image generation and understanding tasks. UniFluid integrates a continuous tokenizer [12, 21] for image generation and a pre-trained SigLIP [59] image encoder for understanding tasks, while textual inputs are processed using a standard SentencePiece tokenizer [20]. The resulting multimodal sequences are modeled autoregressively using Gemma [48] as the underlying transformer backbone. Task-specific prediction heads—a diffusion-based head for image generation and a cross-entropy head for text generation—ensure effective modality-specific training and inference, enabling UniFluid to efficiently learn shared representations that mutually enhance its generation and understanding capabilities.
|
| 19 |
+
|
| 20 |
+
Our experiments demonstrate several key advantages of the proposed unified training strategy. We find though there is a trade-off between the two tasks, a carefully tuned training recipe can allow the tasks to support each other and outperform the single-task baselines. Effectively balancing the loss between the tasks allows a single model that performs both with results superior to or on par with single-task models. Moreover, the choice of pre-trained LLM backbone significantly impacts visual generation performance. We also find that while employing random generation order is essential for high-quality image synthesis, it is less critical for understanding tasks. Finally, our unified pre-trained models show strong generalization and transferability, achieving compelling results in downstream applications, including image editing and various vision-language understanding benchmarks.
|
| 21 |
+
|
| 22 |
+
# 2 Related Works
|
| 23 |
+
|
| 24 |
+
Multimodal Large Language Models. Multimodal Large Language Models [2, 10, 23, 26, 25, 3, 42] have shown significant performance in visual understanding tasks. Flamingo [2] adopted a frozen LLM and vision encoder, utilizing perceiver with cross-attention to bridge the modalities. LLaVA [26, 25] proposed instruction tuning over pre-trained LLMs with multimodal inputs to align a pre-trained image encoder into the LLM's embedding space, thereby enabling it with visual understanding and instruction following capabilities. MiniGPT-4 [62] and mPLUG-Owl [56] have shown vision encoders can be connected to LLMs through projection layers, demonstrating sophisticated visual reasoning capabilities. The PaliGemma [3, 42] series built upon the Gemma [47, 48] model family to develop versatile vision-language models capable of strong transfer to diverse downstream visual understanding tasks.
|
| 25 |
+
|
| 26 |
+
Autoregressive Image Generation. While diffusion models [41, 36] have achieved impressive success in image generation, autoregressive image generation methods have also shown significant development, driven by their simplicity and closeness to LLM training paradigms. A large body of research centers on tokenizing images into discrete tokens and applying autoregressive objectives to these discrete representations. Notable examples include Parti [57] and Muse [6]. [49] proposes an approach that operates on image scales, progressively refining resolutions from coarse to fine through
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
Figure 2: High-level illustration of UniFluid. UniFluid performs joint training of image generation and understanding tasks through next token prediction. For image embeddings, we use a VAE image tokenizer for generation, and a SigLIP image encoder for understanding. BOI/BOS stands for beginning of Image/Sentence.
|
| 30 |
+
|
| 31 |
+
next-scale prediction. Alternatively, works such as MAR [21], Fluid [12], and techniques employing per-token diffusion heads on top of LLM-predicted embeddings have explored autoregressive image generation with continuous visual tokens.
|
| 32 |
+
|
| 33 |
+
Unified Multimodal Models. There is growing research interests in unifying visual generation and understanding within a single model. VQ-based models, such as Chameleon [46], Emu [44], and Janus [54, 8], propose encoding visual inputs into discrete tokens and unifying tasks into next-token prediction within this discrete token space. Models with hybrid training targets, such as Transfusion [61, 38] and Show-O [55], aim to unify next-token prediction objectives with diffusion objectives within a single framework. MetaMorph [50] maintains the autoregressive objective by regressing visual SigLIP [59] features using an LLM, but necessitates a separate diffusion model to decode the predicted latent features into images. Our approach distinguishes itself by performing per-token autoregressive generation using continuous visual tokens. This maintains the next-token prediction objective, while not being limited by the vector quantized tokens. [45] also explores using continuous tokens to generate multimodal outputs.
|
| 34 |
+
|
| 35 |
+
# 3 Method
|
| 36 |
+
|
| 37 |
+
In this section, we illustrate the architecture of our UniFluid model. The model expects both image and text sequences as input and achieves joint training on both generation and understanding tasks, using next-token prediction as its training objective.
|
| 38 |
+
|
| 39 |
+
# 3.1 Unified Autoregressive Modeling with Continuous Visual Tokens
|
| 40 |
+
|
| 41 |
+
Our approach leverages the autoregressive paradigm to unify visual understanding and generation within a single framework. Given an ordered sequence of tokens $X = \{x^{1},\dots,x^{n}\}$ , the autoregressive model [34, 14, 51, 52] factorizes the joint probability distribution as a product of conditional probabilities, effectively framing the generation task as a sequential "next token prediction" problem: $p(X) = \prod_{i = 1}^{n}p(x^{i}\mid x^{1},\dots,x^{i - 1})$ . As shown in MAR [21] and Fluid [12], this autoregressive formulation is applicable for both discrete tokens and continuous tokens. In UniFluid, we exploit this property to enable the generation of continuous visual tokens under the unified decoder-only framework. Our model is modality-agnostic; both text and image tokens are treated as elements within a long unified sequence, and their respective logits are predicted iteratively in an autoregressive manner by the backbone transformer.
|
| 42 |
+
|
| 43 |
+
To accommodate the distinct nature of text and image modalities, we employ modality-specific prediction heads to calculate the appropriate loss functions and sampling for each modality. This unified approach allows the model to learn a shared representation space through the unified training procedure, facilitating synergistic learning and enabling seamless transitions between visual generation and understanding.
|
| 44 |
+
|
| 45 |
+
# 3.2 Architecture
|
| 46 |
+
|
| 47 |
+
As depicted in Figure 2, UniFluid employs a unified framework where both text and image inputs are tokenized and projected into a shared embedding space. This allows it to leverage a decoder-only transformer as the core backbone for the unified autoregressive task. Text inputs are tokenized using the SentencePiece tokenizer. This results in discrete tokens with a vocabulary size of $V$ . For image generation, images are encoded into continuous visual tokens using a continuous variational autoencoder. To facilitate the process of image understanding, following PaliGemma, we used SigLIP as a separate image encoder to extract high-level information from the visual inputs. UniFluid consists of a classification head to convert the transformer's text logits into a categorical distribution, and a diffusion head to convert image logits into a per-token probability distribution.
|
| 48 |
+
|
| 49 |
+
The inherent structure of text as a linear sequence aligns well with the standard 1D positional embeddings of the LLM, which are sufficient for text modeling and image understanding tasks. However, image tokens possess a 2D spatial structure. To capture this inherent 2D nature, we incorporate learnable 2D positional embeddings, which are added to the image token embeddings. Meanwhile, inspired by [58], to achieve random order generation, a position embedding for the next predicted token is also added to each image token. To enhance the model's ability to initiate and guide image generation, we prepend a "Beginning of Image" (BOI) token to the sequence of continuous image tokens. This BOI token serves as a distinct signal, indicating the start of the visual generation process. Given that the sequence length for generated image tokens is predefined (256 tokens for 256x256 images), an explicit "End of Image" token is unnecessary in our case.
|
| 50 |
+
|
| 51 |
+
# 4 Implementation
|
| 52 |
+
|
| 53 |
+
# 4.1 Training
|
| 54 |
+
|
| 55 |
+
Per-token Classification Head for Discrete Text Tokens. We employ the same SentencePiece tokenizer as Gemma for text tokenization. The transformer's output logits for text are transformed into categorical probability distributions over the vocabulary, and we apply the standard cross-entropy loss, denoted as $L_{Text}$ , to optimize the prediction of these discrete text tokens.
|
| 56 |
+
|
| 57 |
+
Per-token Diffusion Head for Continuous Visual Tokens. We adopt the same continuous tokenizer as Fluid to embed 256x256 images into 32x32x4 continuous tokens, and use a patch size of 2 to merge 4 tokens into one. To model the per-token distribution of these predicted continuous visual tokens, we employ a lightweight MLP as a diffusion head. We adopt the same diffusion process and loss function, denoted as $\mathcal{L}_{\text{Visual}}$ , as in [21, 12], which is specifically tailored for continuous visual token prediction. For the understanding task, the input image resolution is $224 \times 224$ , and we use SigLIP as the image encoder. Note that the SigLIP features are only used as prefix for the understanding task during training, and no losses are added on top of them.
|
| 58 |
+
|
| 59 |
+
# Task-Specific Training Configurations.
|
| 60 |
+
|
| 61 |
+
Image Understanding: For image understanding tasks, the model is provided with image embeddings and question tokens as input prefix. Following PaliGemma, we apply a bidirectional attention mask to both image and question tokens. A causal attention mask is applied to the answer tokens, ensuring that the model only attends to previous answer tokens during autoregressive generation. The text token loss, $\mathcal{L}_{\text{Text}}$ , is calculated specifically on the answer text tokens.
|
| 62 |
+
|
| 63 |
+
Image Generation: Conversely, for image generation tasks, text prompts are provided as conditioning inputs. To maintain the appropriate information flow, we employ a bidirectional attention mask for the text prompt tokens, enabling them to attend to all other text tokens. A causal attention mask is applied to the image tokens, ensuring that each image token only attends to preceding image tokens. The visual token loss, $L_{\text{Visual}}$ , is calculated on the generated image tokens.
|
| 64 |
+
|
| 65 |
+
Unified Loss Function. The total training loss for UniFluid is a weighted sum of the text token prediction loss and the visual token prediction loss, defined as: $\mathcal{L} = \mathcal{L}_{\text{Visual}} + \lambda \cdot \mathcal{L}_{\text{Text}}$ where $\lambda$ is a hyper-parameter that represents the weight assigned to the text token prediction loss, allowing us to balance the contributions of the two modalities during training.
|
| 66 |
+
|
| 67 |
+
Training Details. We train the model with a batch size of 2048 using the AdamW optimizer with a learning rate of 1e-4. The training process consists of 1 million steps with a constant learning rate schedule and a warm-up period of 65k steps. Following [58], for image generation, the image token order is randomly permuted during the initial 300k training iterations, then linearly anneals to raster between 300k and 600k iterations, and finally sticks to raster order for the subsequent 400k steps. Except for the comparison with Gemma-1, we use the Gemma-2 model series as the backbone transformer for all experiments in this paper.
|
| 68 |
+
|
| 69 |
+
# 4.2 Inference
|
| 70 |
+
|
| 71 |
+
For text decoding, we employ categorical sampling for each generated text prediction. The predicted token is then selected from the vocabulary $V$ based on the sampled probability distribution. We use the same decoding strategy as PaliGemma. Greedy decoding is used for all tasks except for downstream COCOcap (beam search $n = 2$ ) and TextCaps (beam search $n = 3$ ). For image decoding, we use a diffusion sampling process to generate continuous visual tokens with diffusion sampling step set to 100 in our implementation.
|
| 72 |
+
|
| 73 |
+
As both text and image generation are performed at the token level, with predictions occurring one token at a time under a causal attention mechanism, we can efficiently utilize Key-Value (KV) caching. This optimization is applicable to both discrete text tokens and continuous visual tokens, significantly accelerating the inference process.
|
| 74 |
+
|
| 75 |
+
# 5 Experiment
|
| 76 |
+
|
| 77 |
+
# 5.1 Setup
|
| 78 |
+
|
| 79 |
+
Datasets. We train our models using the WebLI dataset [7], a collection of high-quality image-text pairs. For visual generation, we follow Fluid to employ a WebLI subset of image and text descriptions specifically for the generation task. For visual understanding, consistent with PaliGemma, we leverage the image-text description pairs and image question-answer pairs that are also available within WebLI.
|
| 80 |
+
|
| 81 |
+
Evaluation Metrics. We assess the image generation quality using the FID [16] score on 30K images of the MS-COCO [24] training set and evaluate performance on the GenEval [13] benchmark, where we use the original text prompt without any rewrites. For evaluating visual understanding performance, we use the caption CIDEr score on MS-COCO. Given our similar training dataset and setup to PaliGemma, we also evaluate the finetuning performance on a variety of captioning and question answering tasks. We report the average score on 4 Captioning tasks, including COCOcap [24], Screen2Words [53], TextCaps [39], WidgetCap [22], and 20 QA tasks, including OKVQA [29], AOKVQA-MC [37], AOKVQA-DA [37], GQA [17], NLVR2 [43], AI2D [19], ScienceQA [28], RSVQA-lr [27], RSVQA-hr (test/test2) [27], ChartQA (human/aug) [30], VizWizVQA [15], TallyQA (simple/complex) [1], CountBenchQA [3], TextVQA [40], DocVQA [32], InfoVQA [31], ST-VQA [4].
|
| 82 |
+
|
| 83 |
+
In the following sections, we present the experimental results obtained under different configurations of UniFluid, providing insights into the relationship between the two tasks and highlighting key design choices for UniFluid training.
|
| 84 |
+
|
| 85 |
+
# 5.2 Main Results
|
| 86 |
+
|
| 87 |
+
Unified Training Improves Generation Performance. To evaluate the effectiveness of the unified training framework and determine whether unified training offers advantages compared to training separate models for different tasks, we perform controlled experiments to analyze the performance of models trained with a single task.
|
| 88 |
+
|
| 89 |
+
We first compare the visual generation performance of the model trained under the unified training objective with the performance of a text-to-image model (T2I only), trained solely with the visual
|
| 90 |
+
|
| 91 |
+
Table 1: Unified training achieves better generation performance than text-to-image only training. We evaluate the performance using MS-COCO zero-shot FID and GenEval score.
|
| 92 |
+
|
| 93 |
+
<table><tr><td>Training Target</td><td>Size</td><td>FID ↓</td><td>GenEval ↑</td></tr><tr><td>T2I only</td><td>0.7B</td><td>9.71</td><td>0.50</td></tr><tr><td>Unified</td><td>0.7B</td><td>8.39</td><td>0.52</td></tr><tr><td>T2I only</td><td>2B</td><td>7.88</td><td>0.59</td></tr><tr><td>Unified</td><td>2B</td><td>7.20</td><td>0.59</td></tr></table>
|
| 94 |
+
|
| 95 |
+
autoregressive objective for the generation task. We ensure that the total number of visual tokens for training is the same for the visual generation loss in both the unified model training and text-to-image only training scenarios. The generation performance comparison is presented in Table 1. The unified model achieves better performance compared to the T2I only model, despite both models having observed the same number of tokens for the visual generation task. This suggests that unified model training can be beneficial for visual generation tasks, and that visual understanding ability has the potential to unlock enhanced visual generation quality.
|
| 96 |
+
|
| 97 |
+
Trade-off Between Generation and Understanding. We also investigate whether the visual generation task can contribute to improved visual understanding performance. In the UniFluid unified training setup, the hyperparameter $\lambda$ controls the balance between the losses applied to image tokens and text tokens.
|
| 98 |
+
|
| 99 |
+
In Table 3 and Figure 3, we present the understanding and generation results with varying $\lambda$ of the 0.7B model. We compare the transfer performance to downstream understanding tasks between the unified model with different $\lambda$ and a image-to-text model (I2T only), trained solely with the image understanding objective. Within the unified training setup, a trade-off exists between visual generation and understanding tasks, which can be effectively controlled by adjusting the loss mixing weight, $\lambda$ . While increasing $\lambda$ can improve image understanding performance, ultimately exceeding the I2T-only baseline for downstream captioning, it concurrently diminishes the image generation capabilities.
|
| 100 |
+
|
| 101 |
+
In most scenarios, a smaller $\lambda$ value (e.g., 0.005) is advisable, maintaining a significant proportion (over $90\%$ ) of image understanding while supporting the generation of high-fidelity images. Larger $\lambda$ values, in contrast, strongly favor image understanding but result in a rapid drop of image generation ability, as indicated by a sharp rise in FID score when $\lambda$ exceeds 0.1. Qualitative results for image captioning and question answering, demonstrating the understanding capabilities of the fine-tuned model based on the unified model with Gemma-2 2B as backbone LLM and $\lambda = 0.005$ are presented in Figure 6.
|
| 102 |
+
|
| 103 |
+
Better Pre-trained LLM Backbone Leads to Better Visual Generation and Understanding. We investigate the effect of pre-trained LLMs within the unified model training setup, specifically examining whether more powerful LLMs contribute to enhanced image understanding performance and superior visual generation quality. To this end, we conducted experiments using Gemma-1 2B [47] and Gemma-2 2B [47] as backbone LLMs. Gemma-2 is a stronger LLM than Gemma-1 with $10\%$ average improvements across different text benchmarks.
|
| 104 |
+
|
| 105 |
+
Table 2: Performance comparison of image generation and understanding of UniFluid trained with different LLM backbone. FID and CIDEr are measured on MS-COCO. Gemma-2 gets much better performance compared to Gemma-1, for both image understanding and generation tasks.
|
| 106 |
+
|
| 107 |
+
<table><tr><td rowspan="2">Pretrained Model</td><td colspan="2">Generation</td><td colspan="3">Understanding</td></tr><tr><td>COCO FID ↓</td><td>GenEval ↑</td><td>COCO CIDEr ↑</td><td>Cap Avg ↑</td><td>QA Avg ↑</td></tr><tr><td>Gemma-1</td><td>9.73</td><td>0.52</td><td>38.02</td><td>113.40</td><td>60.21</td></tr><tr><td>Gemma-2</td><td>7.20</td><td>0.59</td><td>40.91</td><td>116.13</td><td>62.10</td></tr></table>
|
| 108 |
+
|
| 109 |
+
The experimental results are presented in Table 2. Here we used $\lambda = 0.005$ for all models. The results demonstrate that employing a stronger LLM is crucial for generating images with higher fidelity and quality. Gemma-2 achieves significantly lower FID scores compared to Gemma-1, highlighting that even though LLM pre-training is unimodal, without exposure to visual data, using a better LLM
|
| 110 |
+
|
| 111 |
+

|
| 112 |
+
Figure 3: Plot of image understanding and generation performance with varying $\lambda$ during training. We show the FID on MS-COCO, the average score for downstream captioning tasks (Cap Avg), and the average score for downstream QA tasks (QA Avg) with different $\lambda$ on the three figures. Green dashed lines show the FID of T2I-only model, blue and red dashed lines show the downstream captioning average and QA average for I2T-only model, respectively. In practice, smaller $\lambda$ has better trade-off between two tasks.
|
| 113 |
+
|
| 114 |
+

|
| 115 |
+
|
| 116 |
+

|
| 117 |
+
|
| 118 |
+
Table 3: Image generation and understanding results with different $\lambda$ . $\lambda = 0.1$ roughly makes the loss for generation and understanding of the same scale. We present MS-COCO FID and the average captioning and QA results for downstream understanding tasks, compared to the image-to-text (I2T) only baseline. A smaller $\lambda$ like 0.005 is recommended in most cases as it preserves most of the image understanding capability while enabling the generation of high-quality images and outperforms the text-to-image (T2I) only baseline.
|
| 119 |
+
|
| 120 |
+
<table><tr><td rowspan="2">Task</td><td rowspan="2">T2I only</td><td rowspan="2">I2T only</td><td colspan="7">Unified, λ</td></tr><tr><td>0.005</td><td>0.01</td><td>0.05</td><td>0.1</td><td>0.2</td><td>1.0</td><td>2.0</td></tr><tr><td>Generation (FID) ↓</td><td>9.71</td><td>-</td><td>8.39</td><td>9.65</td><td>12.48</td><td>17.90</td><td>28.60</td><td>47.89</td><td>54.02</td></tr><tr><td>Understanding (Cap Avg) ↑</td><td>-</td><td>125.05</td><td>120.25</td><td>120.25</td><td>121.18</td><td>121.68</td><td>123.65</td><td>126.25</td><td>125.80</td></tr><tr><td>Understanding (QA Avg) ↑</td><td>-</td><td>65.95</td><td>60.29</td><td>61.37</td><td>61.48</td><td>61.87</td><td>65.17</td><td>65.64</td><td>65.88</td></tr></table>
|
| 121 |
+
|
| 122 |
+

|
| 123 |
+
|
| 124 |
+

|
| 125 |
+
|
| 126 |
+

|
| 127 |
+
|
| 128 |
+

|
| 129 |
+
|
| 130 |
+

|
| 131 |
+
|
| 132 |
+

|
| 133 |
+
|
| 134 |
+

|
| 135 |
+
|
| 136 |
+

|
| 137 |
+
|
| 138 |
+

|
| 139 |
+
|
| 140 |
+

|
| 141 |
+
|
| 142 |
+

|
| 143 |
+
|
| 144 |
+

|
| 145 |
+
|
| 146 |
+

|
| 147 |
+
|
| 148 |
+

|
| 149 |
+
|
| 150 |
+

|
| 151 |
+
|
| 152 |
+

|
| 153 |
+
|
| 154 |
+

|
| 155 |
+
|
| 156 |
+

|
| 157 |
+
|
| 158 |
+

|
| 159 |
+
Figure 4: Downstream image understanding performance (after finetuning) on each benchmark (higher is better). Blue bars show performance of model backbone initialized with Gemma-1 2B and green bars show model backbone initialized with Gemma-2 2B. Initializing with stronger LLM help improve UniFluid visual understanding performance on most datasets.
|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
|
| 163 |
+

|
| 164 |
+
|
| 165 |
+

|
| 166 |
+
|
| 167 |
+

|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
|
| 171 |
+

|
| 172 |
+
A black basketball shoe with a lightning bolt on it.
|
| 173 |
+
|
| 174 |
+

|
| 175 |
+
A melting apple.
|
| 176 |
+
|
| 177 |
+

|
| 178 |
+
A space elevator in the universe.
|
| 179 |
+
|
| 180 |
+

|
| 181 |
+
human life depicted entirely out of fractals.
|
| 182 |
+
|
| 183 |
+

|
| 184 |
+
Graffiti of a funny dog on a street wall.
|
| 185 |
+
|
| 186 |
+

|
| 187 |
+
Turtle swimming
|
| 188 |
+
underwater, aesthetic,
|
| 189 |
+
fantasy.
|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
A transparent sculpture of a duck made out of glass.
|
| 193 |
+
|
| 194 |
+

|
| 195 |
+
An armchair in the shape of an avocado.
|
| 196 |
+
|
| 197 |
+

|
| 198 |
+
A hyper-detailed rendering of a geyser erupting in a colorful, geothermal landscape.
|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
An astronaut riding a horse on the moon, oil painting by Van Gogh.
|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
photo of an eagle with a golden crown resting upon its head.
|
| 205 |
+
|
| 206 |
+

|
| 207 |
+
A window with raindrops trickling down, overlooking a blurry city.
|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
An image of a modern architectural building with large glass windows, situated on a cliff overlooking a serene ocean at sunset.
|
| 211 |
+
|
| 212 |
+

|
| 213 |
+
A cozy cabin in the middle of a snowy forest, surrounded by tall trees with lights glowing through the windows, a northern lights display visible in the sky.
|
| 214 |
+
|
| 215 |
+

|
| 216 |
+
A still life of a vase overflowing with vibrant flowers, painted in bold colors and textured brushstrokes, reminiscent of van Gogh's iconic style.
|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
An otherworldly forest of giant glowing mushrooms under a vibrant night sky filled with distant planets and stars, creating a dreamlike, cosmic landscape.
|
| 220 |
+
Figure 5: Images generated from UniFluid autoregressive model after aesthetic fine-tuning.
|
| 221 |
+
|
| 222 |
+

|
| 223 |
+
A close up view of a wooden pole with the word "sushi" in silver letters on it. The pole is in front of a parking lot. The pole is in the foreground, with a concrete sidewalk in the foreground. There are plants and trees in the background. There is a brown brick building with a black awning on the left side of it. There are trees in the background. The sky is clear and blue.
|
| 224 |
+
|
| 225 |
+

|
| 226 |
+
A close up view of a wet white and brown labradoodle dog walking on a gray cement surface. The dog has a red leash attached to its collar. The dog has a silver metal hook attached to its collar. The dog is walking towards the camera. The dog is casting a shadow on the cement surface. Behind the dog is a building with a large window on the side. A tree line is visible in the background. The sky is blue with a few white clouds.
|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
Question: In what year did Panama's population reach 4.28 million?
|
| 230 |
+
Answer: 2020
|
| 231 |
+
|
| 232 |
+

|
| 233 |
+
Question: What device's average duration of in-app engagement sessions was 6.6 minutes?
|
| 234 |
+
Answer: android
|
| 235 |
+
|
| 236 |
+
Figure 6: Finetuned UniFluid model demonstrates strong image-to-text capability on image captioning and question answering.
|
| 237 |
+

|
| 238 |
+
Question: The bus is likely driving through which American city?
|
| 239 |
+
Answer: new york
|
| 240 |
+
|
| 241 |
+

|
| 242 |
+
Question: Which number birthday is probably being celebrated?
|
| 243 |
+
Answer: thirty
|
| 244 |
+
|
| 245 |
+

|
| 246 |
+
Change the black suitcase to a white one
|
| 247 |
+
|
| 248 |
+

|
| 249 |
+
|
| 250 |
+

|
| 251 |
+
Remove the fence from the photo
|
| 252 |
+
|
| 253 |
+

|
| 254 |
+
|
| 255 |
+

|
| 256 |
+
Change this into a 1950s Flintstone cartoon art style
|
| 257 |
+
|
| 258 |
+

|
| 259 |
+
|
| 260 |
+

|
| 261 |
+
Change the trees from flowers to icicles
|
| 262 |
+
|
| 263 |
+

|
| 264 |
+
|
| 265 |
+

|
| 266 |
+
Change the elephant into a giant rabbit
|
| 267 |
+
|
| 268 |
+

|
| 269 |
+
|
| 270 |
+

|
| 271 |
+
|
| 272 |
+

|
| 273 |
+
|
| 274 |
+

|
| 275 |
+
Change the bird to all white
|
| 276 |
+
|
| 277 |
+

|
| 278 |
+
Figure 7: Image editing results on evaluation benchmark from UniFluid autoregressive model after fine-tuning. It handles multiple editing tasks effectively, including object removal, insertion, style and color changes.
|
| 279 |
+
|
| 280 |
+

|
| 281 |
+
Turn it into a Paul Cezanne still life painting
|
| 282 |
+
|
| 283 |
+

|
| 284 |
+
|
| 285 |
+

|
| 286 |
+
Change the meat into carrots
|
| 287 |
+
|
| 288 |
+

|
| 289 |
+
|
| 290 |
+
Table 4: Performance comparison of image generation and understanding of UniFluid trained with different image generation order. FID and CIDEr is measured on MS-COCO.
|
| 291 |
+
|
| 292 |
+
<table><tr><td rowspan="2">Generation Order</td><td colspan="2">Generation</td><td colspan="3">Understanding</td></tr><tr><td>COCO FID ↓</td><td>GenEval ↑</td><td>COCO CIDEr ↑</td><td>Cap Avg ↑</td><td>QA Avg ↑</td></tr><tr><td>Raster</td><td>8.28</td><td>0.59</td><td>45.57</td><td>116.55</td><td>61.81</td></tr><tr><td>Random</td><td>7.20</td><td>0.59</td><td>40.91</td><td>116.13</td><td>62.10</td></tr></table>
|
| 293 |
+
|
| 294 |
+
as a backbone is important for unlocking improved visual quality in a unified model training setup. The image understanding performance also improved slightly when using stronger LLM, which is a trend also noted in PaliGemma2. We show the detailed comparison on each downstream visual understanding benchmark in Figure 4.
|
| 295 |
+
|
| 296 |
+
Training with Random Order Helps Generation But Not Understanding. Images inherently possess 2D patterns. As demonstrated in Fluid, raster-order training can be problematic, potentially leading to collapse and artifacts characterized by disappearing patterns. Approaches such as RAR [58] and RandAR [33] propose training image generation AR models with random-order training, which can improve ImageNet FID and result in better visual quality.
|
| 297 |
+
|
| 298 |
+
Here we study the effect of different visual generation orders during training within our unified framework setup. We compare the performance between random-order and raster-order training, both with Gemma-2 2B as backbone LLM. The results presented in Table 4 indicate that for per-token image generation within a unified framework, raster-order training continues to underperform compared to random-order generation. Incorporating random-order during training could ensure the generated images are of high quality. However, it does not necessarily improve the visual understanding performance, where raster-order achieves better performance on MS-COCO CIDEr and downstream captioning task average (Cap Avg).
|
| 299 |
+
|
| 300 |
+
# 5.3 More Generation Capabilities
|
| 301 |
+
|
| 302 |
+
We also verify the transferability of the trained model to various downstream generation tasks.
|
| 303 |
+
|
| 304 |
+
Aesthetic Fine-Tuning. To enhance the visual quality and aesthetic appeal of the generated images, we perform aesthetic fine-tuning on a publicly available dataset. The results are shown in Figure 5.
|
| 305 |
+
|
| 306 |
+
Image Editing Task. Since our unified framework is trained with multimodal inputs, it can naturally extend to image editing tasks that involve both image and text prompt inputs. We fine-tune the 2B UniFluid model with 4M image editing pairs from HQEdit [18] and UltraEdit [60]. In Figure 7, we apply the fine-tuned model to the input images and editing prompts from a public available benchmark. Although preliminary, the experiments show that UniFluid is able to adapt and generalize to tasks that involve interleaved data modalities.
|
| 307 |
+
|
| 308 |
+
# 6 Conclusion
|
| 309 |
+
|
| 310 |
+
In this paper, we presented UniFluid, a pure autoregressive framework for joint visual generation and understanding, utilizing continuous visual tokens. We identified an inherent trade-off between the visual generation and understanding tasks, but the two tasks can benefit each other with tuned training recipes. Careful choice of the loss balance between the two tasks allows a single unified model to achieve performance comparable to or exceeding single-task baselines. We conducted investigation of key design choices for UniFluid training, revealing the critical importance of employing strong backbone LLM and random-order generation to unlock high-quality visual generation capabilities. We believe that this work encourages future research into the exploration of continuous visual tokens for unified vision-language model training, paving the way for more efficient and powerful autoregressive multimodal systems.
|
| 311 |
+
|
| 312 |
+
Acknowledgements. We are grateful to Alex Rizkowsky and Amy Shen for their support in securing computational resources. We also wish to thank Charles Herrmann, Junhwa Hur, Shangbang Long, André Susano Pinto, Srinivas Kaza, David Salesin, and the VisCam team for their insightful discussions and constructive feedback, which greatly improved this work.
|
| 313 |
+
|
| 314 |
+
# References
|
| 315 |
+
|
| 316 |
+
[1] Manoj Acharya, Kushal Kafle, and Christopher Kanan. Tallyqa: Answering complex counting questions. In Proceedings of the AAAI conference on artificial intelligence, volume 33, pages 8076-8084, 2019.
|
| 317 |
+
[2] Jean-Baptiste Alayrac, Jeff Donahue, Pauline Luc, Antoine Miech, Iain Barr, Yana Hasson, Karel Lenc, Arthur Mensch, Katherine Millican, Malcolm Reynolds, et al. Flamingo: a visual language model for few-shot learning. Advances in neural information processing systems, 35:23716-23736, 2022.
|
| 318 |
+
[3] Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael Tschannen, Emanuele Bugliarello, et al. Paligemma: A versatile 3b vlm for transfer. arXiv preprint arXiv:2407.07726, 2024.
|
| 319 |
+
[4] Ali Furkan Biten, Ruben Tito, Andres Mafla, Lluis Gomez, Marçal Rusinol, Ernest Valveny, CV Jawahar, and Dimosthenis Karatzas. Scene text visual question answering. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4291-4301, 2019.
|
| 320 |
+
[5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020.
|
| 321 |
+
[6] Huiwen Chang, Han Zhang, Jarred Barber, AJ Maschinot, Jose Lezama, Lu Jiang, Ming-Hsuan Yang, Kevin Murphy, William T Freeman, Michael Rubinstein, et al. Muse: Text-to-image generation via masked generative transformers. arXiv preprint arXiv:2301.00704, 2023.
|
| 322 |
+
[7] Xi Chen, Xiao Wang, Soravit Changpinyo, AJ Piergiovanni, Piotr Padlewski, Daniel Salz, Sebastian Goodman, Adam Grycner, Basil Mustafa, Lucas Beyer, et al. Pali: A jointly-scaled multilingual language-image model. arXiv preprint arXiv:2209.06794, 2022.
|
| 323 |
+
[8] Xiaokang Chen, Zhiyu Wu, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, and Chong Ruan. Janus-pro: Unified multimodal understanding and generation with data and model scaling. arXiv preprint arXiv:2501.17811, 2025.
|
| 324 |
+
[9] Aakanksha Chowdhery, Sharan Narang, Jacob Devlin, Maarten Bosma, Gaurav Mishra, Adam Roberts, Paul Barham, Hyung Won Chung, Charles Sutton, Sebastian Gehrmann, et al. Palm: Scaling language modeling with pathways. Journal of Machine Learning Research, 24(240):1-113, 2023.
|
| 325 |
+
[10] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. Instructlip: Towards general-purpose vision-language models with instruction tuning, 2023.
|
| 326 |
+
[11] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 conference of the North American chapter of the association for computational linguistics: human language technologies, volume 1 (long and short papers), pages 4171–4186, 2019.
|
| 327 |
+
[12] Lijie Fan, Tianhong Li, Siyang Qin, Yuanzhen Li, Chen Sun, Michael Rubinstein, Deqing Sun, Kaiming He, and Yonglong Tian. Fluid: Scaling autoregressive text-to-image generative models with continuous tokens. arXiv preprint arXiv:2410.13863, 2024.
|
| 328 |
+
[13] Dhruba Ghosh, Hannaneh Hajishirzi, and Ludwig Schmidt. Geneval: An object-focused framework for evaluating text-to-image alignment. Advances in Neural Information Processing Systems, 36:52132-52152, 2023.
|
| 329 |
+
[14] Karol Gregor, Ivo Danihelka, Andriy Mnih, Charles Blundell, and Daan Wierstra. Deep autoregressive networks. In International Conference on Machine Learning, pages 1242–1250. PMLR, 2014.
|
| 330 |
+
[15] Danna Gurari, Qing Li, Abigale J Stangl, Anhong Guo, Chi Lin, Kristen Grauman, Jiebo Luo, and Jeffrey P Bigham. Vizwiz grand challenge: Answering visual questions from blind people. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 3608-3617, 2018.
|
| 331 |
+
|
| 332 |
+
[16] Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler, and Sepp Hochreiter. Gans trained by a two time-scale update rule converge to a local nash equilibrium. Advances in neural information processing systems, 30, 2017.
|
| 333 |
+
[17] Drew A Hudson and Christopher D Manning. Gqa: A new dataset for real-world visual reasoning and compositional question answering. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 6700-6709, 2019.
|
| 334 |
+
[18] Mude Hui, Siwei Yang, Bingchen Zhao, Yichun Shi, Heng Wang, Peng Wang, Yuyin Zhou, and Cihang Xie. Hq-edit: A high-quality dataset for instruction-based image editing. arXiv preprint arXiv:2404.09990, 2024.
|
| 335 |
+
[19] Aniruddha Kembhavi, Mike Salvato, Eric Kolve, Minjoon Seo, Hannaneh Hajishirzi, and Ali Farhadi. A diagram is worth a dozen images. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part IV 14, pages 235-251. Springer, 2016.
|
| 336 |
+
[20] Taku Kudo and John Richardson. Sentencepiece: A simple and language independent subword tokenizer and detokenizer for neural text processing. arXiv preprint arXiv:1808.06226, 2018.
|
| 337 |
+
[21] Tianhong Li, Yonglong Tian, He Li, Mingyang Deng, and Kaiming He. Autoregressive image generation without vector quantization. Advances in Neural Information Processing Systems, 37:56424-56445, 2024.
|
| 338 |
+
[22] Yang Li, Gang Li, Luheng He, Jingjie Zheng, Hong Li, and Zhiwei Guan. Widget captioning: Generating natural language description for mobile user interface elements. arXiv preprint arXiv:2010.04295, 2020.
|
| 339 |
+
[23] Ji Lin, Hongxu Yin, Wei Ping, Pavlo Molchanov, Mohammad Shoeybi, and Song Han. Vila: On pre-training for visual language models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 26689-26699, 2024.
|
| 340 |
+
[24] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft coco: Common objects in context. In Computer vision-ECCV 2014: 13th European conference, zurich, Switzerland, September 6-12, 2014, proceedings, part v 13, pages 740-755. Springer, 2014.
|
| 341 |
+
[25] Haotian Liu, Chunyuan Li, Yuheng Li, and Yong Jae Lee. Improved baselines with visual instruction tuning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26296-26306, 2024.
|
| 342 |
+
[26] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36:34892-34916, 2023.
|
| 343 |
+
[27] Sylvain Lobry, Diego Marcos, Jesse Murray, and Devis Tuia. Rsvqa: Visual question answering for remote sensing data. IEEE Transactions on Geoscience and Remote Sensing, 58(12):8555-8566, 2020.
|
| 344 |
+
[28] Pan Lu, Swaroop Mishra, Tanglin Xia, Liang Qiu, Kai-Wei Chang, Song-Chun Zhu, Oyvind Tafjord, Peter Clark, and Ashwin Kalyan. Learn to explain: Multimodal reasoning via thought chains for science question answering. Advances in Neural Information Processing Systems, 35:2507–2521, 2022.
|
| 345 |
+
[29] Kenneth Marino, Mohammad Rastegari, Ali Farhadi, and Roozbeh Mottaghi. Ok-vqa: A visual question answering benchmark requiring external knowledge. In Proceedings of the IEEE/cvf conference on computer vision and pattern recognition, pages 3195-3204, 2019.
|
| 346 |
+
[30] Ahmed Masry, Do Xuan Long, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. Chartqa: A benchmark for question answering about charts with visual and logical reasoning. arXiv preprint arXiv:2203.10244, 2022.
|
| 347 |
+
[31] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and CV Jawahar. Infographicvqa. In Proceedings of the IEEE/CVF Winter Conference on Applications of Computer Vision, pages 1697-1706, 2022.
|
| 348 |
+
|
| 349 |
+
[32] Minesh Mathew, Dimosthenis Karatzas, and CV Jawahar. Docvqa: A dataset for vqa on document images. In Proceedings of the IEEE/CVF winter conference on applications of computer vision, pages 2200-2209, 2021.
|
| 350 |
+
[33] Ziqi Pang, Tianyuan Zhang, Fujun Luan, Yunze Man, Hao Tan, Kai Zhang, William T Freeman, and Yu-Xiong Wang. Randar: Decoder-only autoregressive visual generation in random orders. arXiv preprint arXiv:2412.01827, 2024.
|
| 351 |
+
[34] Niki Parmar, Ashish Vaswani, Jakob Uszkoreit, Lukasz Kaiser, Noam Shazeer, Alexander Ku, and Dustin Tran. Image transformer. In International conference on machine learning, pages 4055-4064. PMLR, 2018.
|
| 352 |
+
[35] Alec Radford, Karthik Narasimhan, Tim Salimans, and Ilya Sutskever. Improving language understanding by generative pre-training. Technical Report, 2018.
|
| 353 |
+
[36] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models, 2021.
|
| 354 |
+
[37] Dustin Schwenk, Apoorv Khandelwal, Christopher Clark, Kenneth Marino, and Roozbeh Mottaghi. A-okvqa: A benchmark for visual question answering using world knowledge. In European conference on computer vision, pages 146–162. Springer, 2022.
|
| 355 |
+
[38] Weijia Shi, Xiaochuang Han, Chunting Zhou, Weixin Liang, Xi Victoria Lin, Luke Zettlemoyer, and Lili Yu. Llamafusion: Adapting pretrained language models for multimodal generation. arXiv preprint arXiv:2412.15188, 2024.
|
| 356 |
+
[39] Oleksii Sidorov, Ronghang Hu, Marcus Rohrbach, and Amanpreet Singh. Textcaps: a dataset for image captioning with reading comprehension. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part II 16, pages 742-758. Springer, 2020.
|
| 357 |
+
[40] Amanpreet Singh, Vivek Natarajan, Meet Shah, Yu Jiang, Xinlei Chen, Dhruv Batra, Devi Parikh, and Marcus Rohrbach. Towards vqa models that can read. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 8317-8326, 2019.
|
| 358 |
+
[41] Yang Song, Jascha Sohl-Dickstein, Diederik P Kingma, Abhishek Kumar, Stefano Ermon, and Ben Poole. Score-based generative modeling through stochastic differential equations. arXiv preprint arXiv:2011.13456, 2020.
|
| 359 |
+
[42] Andreas Steiner, André Susano Pinto, Michael Tschannen, Daniel Keysers, Xiao Wang, Yonatan Bitton, Alexey Gritsanko, Matthias Minderer, Anthony Sherbondy, Shangbang Long, et al. Paligemma 2: A family of versatile vlms for transfer. arXiv preprint arXiv:2412.03555, 2024.
|
| 360 |
+
[43] Alane Suhr, Stephanie Zhou, Ally Zhang, Iris Zhang, Huajun Bai, and Yoav Artzi. A corpus for reasoning about natural language grounded in photographs. arXiv preprint arXiv:1811.00491, 2018.
|
| 361 |
+
[44] Quan Sun, Qiying Yu, Yufeng Cui, Fan Zhang, Xiaosong Zhang, Yueze Wang, Hongcheng Gao, Jingjing Liu, Tiejun Huang, and Xinlong Wang. Emu: Generative pretraining in multimodality. arXiv preprint arXiv:2307.05222, 2023.
|
| 362 |
+
[45] Yutao Sun, Hangbo Bao, Wenhui Wang, Zhiliang Peng, Li Dong, Shaohan Huang, Jianyong Wang, and Furu Wei. Multimodal latent language modeling with next-token diffusion. arXiv preprint arXiv:2412.08635, 2024.
|
| 363 |
+
[46] Chameleon Team. Chameleon: Mixed-modal early-fusion foundation models. arXiv preprint arXiv:2405.09818, 2024.
|
| 364 |
+
[47] Gemma Team, Thomas Mesnard, Cassidy Hardin, Robert Dadashi, Surya Bhupatiraju, Shreya Pathak, Laurent Sifre, Morgane Riviere, Mihir Sanjay Kale, Juliette Love, et al. Gemma: Open models based on gemini research and technology. arXiv preprint arXiv:2403.08295, 2024.
|
| 365 |
+
|
| 366 |
+
[48] Gemma Team, Morgane Riviere, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, Léonard Hussenot, Thomas Mesnard, Bobak Shahriari, Alexandre Ramé, et al. Gemma 2: Improving open language models at a practical size. arXiv preprint arXiv:2408.00118, 2024.
|
| 367 |
+
[49] Keyu Tian, Yi Jiang, Zehuan Yuan, Bingyue Peng, and Liwei Wang. Visual autoregressive modeling: Scalable image generation via next-scale prediction. Advances in neural information processing systems, 37:84839-84865, 2024.
|
| 368 |
+
[50] Shengbang Tong, David Fan, Jiachen Zhu, Yunyang Xiong, Xinlei Chen, Koustuv Sinha, Michael Rabbat, Yann LeCun, Saining Xie, and Zhuang Liu. Metamorph: Multimodal understanding and generation via instruction tuning. arXiv preprint arXiv:2412.14164, 2024.
|
| 369 |
+
[51] Aaron Van den Oord, Nal Kalchbrenner, Lasse Espeholt, Oriol Vinyals, Alex Graves, et al. Conditional image generation with pixelCNN decoders. Advances in neural information processing systems, 29, 2016.
|
| 370 |
+
[52] Aäron Van Den Oord, Nal Kalchbrenner, and Koray Kavukcuoglu. Pixel recurrent neural networks. In International conference on machine learning, pages 1747-1756. PMLR, 2016.
|
| 371 |
+
[53] Bryan Wang, Gang Li, Xin Zhou, Zhourong Chen, Tovi Grossman, and Yang Li. Screen2words: Automatic mobile ui summarization with multimodal learning. In The 34th Annual ACM Symposium on User Interface Software and Technology, pages 498-510, 2021.
|
| 372 |
+
[54] Chengyue Wu, Xiaokang Chen, Zhiyu Wu, Yiyang Ma, Xingchao Liu, Zizheng Pan, Wen Liu, Zhenda Xie, Xingkai Yu, Chong Ruan, et al. Janus: Decoupling visual encoding for unified multimodal understanding and generation. arXiv preprint arXiv:2410.13848, 2024.
|
| 373 |
+
[55] Jinheng Xie, Weijia Mao, Zechen Bai, David Junhao Zhang, Weihao Wang, Kevin Qinghong Lin, Yuchao Gu, Zhijie Chen, Zhenheng Yang, and Mike Zheng Shou. Show-o: One single transformer to unify multimodal understanding and generation. arXiv preprint arXiv:2408.12528, 2024.
|
| 374 |
+
[56] Qinghao Ye, Haiyang Xu, Guohai Xu, Jiabo Ye, Ming Yan, Yiyang Zhou, Junyang Wang, Anwen Hu, Pengcheng Shi, Yaya Shi, et al. mplug-owl: Modularization empowers large language models with multimodality. arXiv preprint arXiv:2304.14178, 2023.
|
| 375 |
+
[57] Jiahui Yu, Yuanzhong Xu, Jing Yu Koh, Thang Luong, Gunjan Baid, Zirui Wang, Vijay Vasudevan, Alexander Ku, Yinfei Yang, Burcu Karagol Ayan, et al. Scaling autoregressive models for content-rich text-to-image generation. arXiv preprint arXiv:2206.10789, 2(3):5, 2022.
|
| 376 |
+
[58] Qihang Yu, Ju He, Xueqing Deng, Xiaohui Shen, and Liang-Chieh Chen. Randomized autoregressive visual generation. arXiv preprint arXiv:2411.00776, 2024.
|
| 377 |
+
[59] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF international conference on computer vision, pages 11975-11986, 2023.
|
| 378 |
+
[60] Haozhe Zhao, Xiaojian Shawn Ma, Liang Chen, Shuzheng Si, Rujie Wu, Kaikai An, Peiyu Yu, Minjia Zhang, Qing Li, and Baobao Chang. Ultraedit: Instruction-based fine-grained image editing at scale. Advances in Neural Information Processing Systems, 37:3058-3093, 2024.
|
| 379 |
+
[61] Chunting Zhou, Lili Yu, Arun Babu, Kushal Tirumala, Michihiro Yasunaga, Leonid Shamis, Jacob Kahn, Xuezhe Ma, Luke Zettlemoyer, and Omer Levy. Transfusion: Predict the next token and diffuse images with one multi-modal model. arXiv preprint arXiv:2408.11039, 2024.
|
| 380 |
+
[62] Deyao Zhu, Jun Chen, Xiaogian Shen, Xiang Li, and Mohamed Elhoseiny. Minigpt-4: Enhancing vision-language understanding with advanced large language models. arXiv preprint arXiv:2304.10592, 2023.
|
data/2025/2503_13xxx/2503.13436/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:579336f9a285ffbe14ab19b8bbf987d33954350d83967b2aca26c1f6c1241d26
|
| 3 |
+
size 965784
|
data/2025/2503_13xxx/2503.13436/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
data/2025/2503_13xxx/2503.13439/696dfadf-883e-4109-9010-e095246a341a_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|