Add Batch 70c7d269-92a7-4854-8541-cddb989fdb92
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +64 -0
- 2303.14xxx/2303.14524/e5470f39-aaa6-4768-8235-0e7feb89df40_content_list.json +1736 -0
- 2303.14xxx/2303.14524/e5470f39-aaa6-4768-8235-0e7feb89df40_model.json +2192 -0
- 2303.14xxx/2303.14524/e5470f39-aaa6-4768-8235-0e7feb89df40_origin.pdf +3 -0
- 2303.14xxx/2303.14524/full.md +301 -0
- 2303.14xxx/2303.14524/images.zip +3 -0
- 2303.14xxx/2303.14524/layout.json +0 -0
- 2303.14xxx/2303.14526/78d21730-a964-44d8-b842-a279fb0ebf53_content_list.json +1648 -0
- 2303.14xxx/2303.14526/78d21730-a964-44d8-b842-a279fb0ebf53_model.json +0 -0
- 2303.14xxx/2303.14526/78d21730-a964-44d8-b842-a279fb0ebf53_origin.pdf +3 -0
- 2303.14xxx/2303.14526/full.md +350 -0
- 2303.14xxx/2303.14526/images.zip +3 -0
- 2303.14xxx/2303.14526/layout.json +0 -0
- 2303.14xxx/2303.14535/67d61c55-778e-4915-b385-6adbd8fac2a2_content_list.json +0 -0
- 2303.14xxx/2303.14535/67d61c55-778e-4915-b385-6adbd8fac2a2_model.json +0 -0
- 2303.14xxx/2303.14535/67d61c55-778e-4915-b385-6adbd8fac2a2_origin.pdf +3 -0
- 2303.14xxx/2303.14535/full.md +0 -0
- 2303.14xxx/2303.14535/images.zip +3 -0
- 2303.14xxx/2303.14535/layout.json +0 -0
- 2303.14xxx/2303.14536/ee265f86-4bbe-47c9-bde5-4a431a4b61da_content_list.json +0 -0
- 2303.14xxx/2303.14536/ee265f86-4bbe-47c9-bde5-4a431a4b61da_model.json +0 -0
- 2303.14xxx/2303.14536/ee265f86-4bbe-47c9-bde5-4a431a4b61da_origin.pdf +3 -0
- 2303.14xxx/2303.14536/full.md +656 -0
- 2303.14xxx/2303.14536/images.zip +3 -0
- 2303.14xxx/2303.14536/layout.json +0 -0
- 2303.14xxx/2303.14541/e7df9d3e-9a6b-46f5-8469-0690470eefdd_content_list.json +1606 -0
- 2303.14xxx/2303.14541/e7df9d3e-9a6b-46f5-8469-0690470eefdd_model.json +0 -0
- 2303.14xxx/2303.14541/e7df9d3e-9a6b-46f5-8469-0690470eefdd_origin.pdf +3 -0
- 2303.14xxx/2303.14541/full.md +355 -0
- 2303.14xxx/2303.14541/images.zip +3 -0
- 2303.14xxx/2303.14541/layout.json +0 -0
- 2303.14xxx/2303.14605/77700755-45b8-4f62-9943-a56edcf0bf45_content_list.json +0 -0
- 2303.14xxx/2303.14605/77700755-45b8-4f62-9943-a56edcf0bf45_model.json +0 -0
- 2303.14xxx/2303.14605/77700755-45b8-4f62-9943-a56edcf0bf45_origin.pdf +3 -0
- 2303.14xxx/2303.14605/full.md +857 -0
- 2303.14xxx/2303.14605/images.zip +3 -0
- 2303.14xxx/2303.14605/layout.json +0 -0
- 2303.14xxx/2303.14612/e1a8feb8-e3e0-42e1-846f-d4b91a7fc621_content_list.json +604 -0
- 2303.14xxx/2303.14612/e1a8feb8-e3e0-42e1-846f-d4b91a7fc621_model.json +890 -0
- 2303.14xxx/2303.14612/e1a8feb8-e3e0-42e1-846f-d4b91a7fc621_origin.pdf +3 -0
- 2303.14xxx/2303.14612/full.md +124 -0
- 2303.14xxx/2303.14612/images.zip +3 -0
- 2303.14xxx/2303.14612/layout.json +0 -0
- 2303.14xxx/2303.14613/fa86a159-6fcd-438c-a2b4-6ff1e438b175_content_list.json +0 -0
- 2303.14xxx/2303.14613/fa86a159-6fcd-438c-a2b4-6ff1e438b175_model.json +0 -0
- 2303.14xxx/2303.14613/fa86a159-6fcd-438c-a2b4-6ff1e438b175_origin.pdf +3 -0
- 2303.14xxx/2303.14613/full.md +0 -0
- 2303.14xxx/2303.14613/images.zip +3 -0
- 2303.14xxx/2303.14613/layout.json +0 -0
- 2303.14xxx/2303.14617/202514b2-218d-4a82-968c-224e4f16dc55_content_list.json +0 -0
.gitattributes
CHANGED
|
@@ -9513,3 +9513,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 9513 |
2303.17xxx/2303.17486/ae388ec5-badc-478c-8838-addfcb73ffd2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9514 |
2304.00xxx/2304.00008/d44f4e5a-3791-4d7b-abf7-f27cb92589cc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9515 |
2304.02xxx/2304.02017/d0843d31-ddd0-4fd8-8cf7-d47e9313508a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9513 |
2303.17xxx/2303.17486/ae388ec5-badc-478c-8838-addfcb73ffd2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9514 |
2304.00xxx/2304.00008/d44f4e5a-3791-4d7b-abf7-f27cb92589cc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9515 |
2304.02xxx/2304.02017/d0843d31-ddd0-4fd8-8cf7-d47e9313508a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9516 |
+
2303.14xxx/2303.14524/e5470f39-aaa6-4768-8235-0e7feb89df40_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9517 |
+
2303.14xxx/2303.14526/78d21730-a964-44d8-b842-a279fb0ebf53_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9518 |
+
2303.14xxx/2303.14535/67d61c55-778e-4915-b385-6adbd8fac2a2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9519 |
+
2303.14xxx/2303.14536/ee265f86-4bbe-47c9-bde5-4a431a4b61da_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9520 |
+
2303.14xxx/2303.14541/e7df9d3e-9a6b-46f5-8469-0690470eefdd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9521 |
+
2303.14xxx/2303.14605/77700755-45b8-4f62-9943-a56edcf0bf45_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9522 |
+
2303.14xxx/2303.14612/e1a8feb8-e3e0-42e1-846f-d4b91a7fc621_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9523 |
+
2303.14xxx/2303.14613/fa86a159-6fcd-438c-a2b4-6ff1e438b175_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9524 |
+
2303.14xxx/2303.14617/202514b2-218d-4a82-968c-224e4f16dc55_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9525 |
+
2303.14xxx/2303.14622/0a7443f0-deb4-4bf8-a0f1-2b53d56050a0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9526 |
+
2303.14xxx/2303.14623/ba1a4db9-7ef0-463a-90cf-6f7146c0a5ee_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9527 |
+
2303.14xxx/2303.14626/b02eff1a-87ac-4d17-a4bd-5277fcfd9bdc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9528 |
+
2303.14xxx/2303.14644/32308b69-ba36-451b-9868-767c937e4a8b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9529 |
+
2303.14xxx/2303.14651/2b39d1c5-6a8f-46f4-a387-7b10258e3e7a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9530 |
+
2303.14xxx/2303.14652/98618334-9cf9-44cb-abc8-3aa38a8aa31f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9531 |
+
2303.14xxx/2303.14662/0204391d-e38e-4de1-9c0e-752998b42e19_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9532 |
+
2303.14xxx/2303.14717/832175f6-5617-4de1-8f9f-8a3072c265ab_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9533 |
+
2303.14xxx/2303.14725/bea43ce8-9e9a-4c3f-9e4e-5efbc73166ea_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9534 |
+
2303.14xxx/2303.14736/123b57e0-f64b-4652-b16b-8ace8d3b5f37_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9535 |
+
2303.14xxx/2303.14742/1ab6d28b-7f57-4379-b8ea-a2f219736a4a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9536 |
+
2303.14xxx/2303.14747/3fdb42a8-8565-42dc-925e-727f678f0b0b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9537 |
+
2303.14xxx/2303.14771/3b9af192-057f-4f92-93c6-cab18101d102_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9538 |
+
2303.14xxx/2303.14773/996d80cd-de9d-499d-b163-df2797fcb78b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9539 |
+
2303.14xxx/2303.14814/bd2267d6-c1c0-476a-86de-d4dba2e7f1a2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9540 |
+
2303.14xxx/2303.14816/72411b18-873c-4201-a6f2-d9409c4cd4d1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9541 |
+
2303.14xxx/2303.14822/93cda5a3-f204-4325-a056-a4624f4be942_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9542 |
+
2303.14xxx/2303.14859/b94ed85f-3c16-45ad-961d-454a57c56327_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9543 |
+
2303.14xxx/2303.14865/827d6501-753b-4c00-b74c-45034566f63c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9544 |
+
2303.14xxx/2303.14869/128064d1-d289-4c5f-bd1b-78016750bdc7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9545 |
+
2303.14xxx/2303.14877/e79e3a3b-2408-4b1e-a832-e3035b7e60d0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9546 |
+
2303.14xxx/2303.14878/288d3617-4810-4171-bb45-3ec251e3c6dc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9547 |
+
2303.14xxx/2303.14880/60564df1-0df5-44c8-a16f-4157da914125_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9548 |
+
2303.14xxx/2303.14897/db9923b6-b5ab-4f55-8db4-d9efb7e32ec1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9549 |
+
2303.14xxx/2303.14933/4ac15f96-8f78-4e17-99cd-4d4bb148134a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9550 |
+
2303.14xxx/2303.14934/d1761717-84fb-4c14-8325-5811bd47111a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9551 |
+
2303.14xxx/2303.14953/36a8960a-033c-42b8-844f-2fb00640e1c4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9552 |
+
2303.14xxx/2303.14960/ba4a8dcb-e27b-4ab7-bb25-34832e8e6ea6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9553 |
+
2303.14xxx/2303.14968/d41cd924-507a-4891-bbef-bc20bcb2fd51_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9554 |
+
2303.14xxx/2303.14978/28d4652c-5fe9-4b79-8535-ea2f5500e3cf_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9555 |
+
2303.15xxx/2303.15014/c50c5a9b-6c00-440d-a08e-96d1c387201c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9556 |
+
2303.15xxx/2303.15027/674fa650-1c15-46d6-9e0e-efa04ad5f55d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9557 |
+
2303.15xxx/2303.15046/c8da6a27-8689-4921-bb35-053ddad04bba_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9558 |
+
2303.15xxx/2303.15056/7c42115c-d364-4d89-b0c7-1011d71d0b06_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9559 |
+
2303.15xxx/2303.15078/e42cb6b2-6cc1-4a6a-851c-8f332b09d2ec_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9560 |
+
2303.15xxx/2303.15083/d82ebc93-8037-4d02-b0fe-0bbe7269e8d6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9561 |
+
2303.15xxx/2303.15105/a2affe46-9075-4dc2-9c47-272496af3d3d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9562 |
+
2303.15xxx/2303.15108/b3ab1766-5599-4fd9-855c-e401483eb944_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9563 |
+
2303.15xxx/2303.15111/24852e8b-f7a1-4eee-a050-5b9649ccfc96_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9564 |
+
2303.15xxx/2303.15140/95d05bca-d301-4cbd-9a3a-923c7c1406de_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9565 |
+
2303.15xxx/2303.15149/face1940-23c9-4d22-8987-8419ddb39d59_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9566 |
+
2303.15xxx/2303.15166/f1605bf4-9aeb-49f8-b246-04655d11c0a6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9567 |
+
2303.15xxx/2303.15167/db329b8c-fa0c-483c-aa0b-1a81fca2ae74_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9568 |
+
2303.15xxx/2303.15231/92888c8e-041e-44e7-ac50-d6a6e5f138b7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9569 |
+
2303.15xxx/2303.15233/1208b7d4-8117-4591-9ab1-f2aa3a55eb23_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9570 |
+
2303.15xxx/2303.15247/b48866c8-5e8d-4a0d-baea-176696fe03e2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9571 |
+
2303.15xxx/2303.15269/ad53eb38-8bce-4a7d-bef6-fc1a64a1893d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9572 |
+
2303.15xxx/2303.15270/58f648b4-78ee-4dbc-b19f-e6f46e23374f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9573 |
+
2303.15xxx/2303.15274/7006b412-25b0-45b3-9979-74115fd3d017_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9574 |
+
2303.15xxx/2303.15288/87a4385f-709f-446a-8280-d48a1703ad47_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9575 |
+
2303.15xxx/2303.15322/c2cf7f75-b59d-4805-a18f-10b697e8caa6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9576 |
+
2303.15xxx/2303.15991/3ae1a82d-64d1-40b3-a129-5e6b7c0d2d3c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9577 |
+
2303.18xxx/2303.18191/23a80b84-3ce4-4569-9129-b69f7debf283_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9578 |
+
2304.06xxx/2304.06632/07c5d6e8-685f-496a-a043-a4515270f4b3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 9579 |
+
2304.06xxx/2304.06678/906a5b18-59ee-4efe-a7ab-36b916a11077_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
2303.14xxx/2303.14524/e5470f39-aaa6-4768-8235-0e7feb89df40_content_list.json
ADDED
|
@@ -0,0 +1,1736 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Chat-REC: Towards Interactive and Explainable LLMs-Augmented Recommender System",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
218,
|
| 8 |
+
140,
|
| 9 |
+
782,
|
| 10 |
+
186
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Yunfan Gao $^{1}$ , Tao Sheng $^{1}$ , Youlin Xiang $^{1}$ , Yun Xiong $^{1}$ , Haofen Wang $^{2}$ , and Jiawei Zhang $^{3}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
228,
|
| 19 |
+
210,
|
| 20 |
+
772,
|
| 21 |
+
243
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "<sup>1</sup> Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University, Shanghai, China",
|
| 28 |
+
"bbox": [
|
| 29 |
+
230,
|
| 30 |
+
253,
|
| 31 |
+
771,
|
| 32 |
+
282
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "yufan1602@163.com",
|
| 39 |
+
"bbox": [
|
| 40 |
+
433,
|
| 41 |
+
284,
|
| 42 |
+
568,
|
| 43 |
+
297
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "tsheng16@fudan.edu.cn",
|
| 50 |
+
"bbox": [
|
| 51 |
+
419,
|
| 52 |
+
297,
|
| 53 |
+
583,
|
| 54 |
+
310
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "21210240365@m.fudan.edu.cn",
|
| 61 |
+
"bbox": [
|
| 62 |
+
400,
|
| 63 |
+
311,
|
| 64 |
+
602,
|
| 65 |
+
323
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "yunx@fudan.edu.cn",
|
| 72 |
+
"bbox": [
|
| 73 |
+
434,
|
| 74 |
+
325,
|
| 75 |
+
568,
|
| 76 |
+
337
|
| 77 |
+
],
|
| 78 |
+
"page_idx": 0
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"text": "2 College of Design and Innovation, Tongji University, Shanghai, China",
|
| 83 |
+
"bbox": [
|
| 84 |
+
263,
|
| 85 |
+
338,
|
| 86 |
+
738,
|
| 87 |
+
352
|
| 88 |
+
],
|
| 89 |
+
"page_idx": 0
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"text": "carter.whfcarter@gmail.com",
|
| 94 |
+
"bbox": [
|
| 95 |
+
398,
|
| 96 |
+
354,
|
| 97 |
+
602,
|
| 98 |
+
364
|
| 99 |
+
],
|
| 100 |
+
"page_idx": 0
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"text": "<sup>3</sup> IFM Lab, Department of Computer Science, University of California, Davis, CA, USA",
|
| 105 |
+
"bbox": [
|
| 106 |
+
225,
|
| 107 |
+
366,
|
| 108 |
+
776,
|
| 109 |
+
392
|
| 110 |
+
],
|
| 111 |
+
"page_idx": 0
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"text": "jiawei@ifmlab.org",
|
| 116 |
+
"bbox": [
|
| 117 |
+
434,
|
| 118 |
+
395,
|
| 119 |
+
568,
|
| 120 |
+
407
|
| 121 |
+
],
|
| 122 |
+
"page_idx": 0
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"text": "Abstract. Large language models (LLMs) have demonstrated their significant potential to be applied for addressing various application tasks. However, traditional recommender systems continue to face great challenges such as poor interactivity and explainability, which actually also hinder their broad deployment in real-world systems. To address these limitations, this paper proposes a novel paradigm called CHAT-REC (ChatGPT Augmented Recommender System) that innovatively augments LLMs for building conversational recommender systems by converting user profiles and historical interactions into prompts. CHAT-REC is demonstrated to be effective in learning user preferences and establishing connections between users and products through in-context learning, which also makes the recommendation process more interactive and explainable. What's more, within the CHAT-REC framework, user's preferences can transfer to different products for cross-domain recommendations, and prompt-based injection of information into LLMs can also handle the cold-start scenarios with new items. In our experiments, CHAT-REC effectively improve the results of top-k recommendations and performs better in zero-shot rating prediction task. CHAT-REC offers a novel approach to improving recommender systems and presents new practical scenarios for the implementation of AIGC (AI generated content) in recommender system studies.",
|
| 127 |
+
"bbox": [
|
| 128 |
+
259,
|
| 129 |
+
438,
|
| 130 |
+
746,
|
| 131 |
+
729
|
| 132 |
+
],
|
| 133 |
+
"page_idx": 0
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"text": "Keywords: LLMs $\\cdot$ Recommender System $\\cdot$ Prompt Engineering",
|
| 138 |
+
"bbox": [
|
| 139 |
+
261,
|
| 140 |
+
742,
|
| 141 |
+
700,
|
| 142 |
+
757
|
| 143 |
+
],
|
| 144 |
+
"page_idx": 0
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"text": "1 Introduction",
|
| 149 |
+
"text_level": 1,
|
| 150 |
+
"bbox": [
|
| 151 |
+
215,
|
| 152 |
+
780,
|
| 153 |
+
375,
|
| 154 |
+
796
|
| 155 |
+
],
|
| 156 |
+
"page_idx": 0
|
| 157 |
+
},
|
| 158 |
+
{
|
| 159 |
+
"type": "text",
|
| 160 |
+
"text": "With the scaling of model and corpus size, LLMs (Large Language Models) have shown remarkable capabilities, such as complex inference, knowledge infer-",
|
| 161 |
+
"bbox": [
|
| 162 |
+
212,
|
| 163 |
+
809,
|
| 164 |
+
785,
|
| 165 |
+
839
|
| 166 |
+
],
|
| 167 |
+
"page_idx": 0
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"type": "aside_text",
|
| 171 |
+
"text": "arXiv:2303.14524v2 [cs.IR] 4 Apr 2023",
|
| 172 |
+
"bbox": [
|
| 173 |
+
22,
|
| 174 |
+
279,
|
| 175 |
+
60,
|
| 176 |
+
700
|
| 177 |
+
],
|
| 178 |
+
"page_idx": 0
|
| 179 |
+
},
|
| 180 |
+
{
|
| 181 |
+
"type": "text",
|
| 182 |
+
"text": "ence, and external robustness [4,6]. These capabilities, referred to as Emergent Abilities, only become apparent after reaching a specific threshold of model parameters [20]. The emergence of LLMs has brought about a paradigm shift in research. Previously, applying models to downstream tasks typically involved adjusting model parameters through backpropagation. However, the latest development of LLMs [18] has enabled both researchers and practitioners to facilitate learning during the forward process by constructing prompts, namely In-Context Learning (ICL) [1]. In addition, the adoption of techniques such as Chain-of-Thought [21] and Instruct Learning [19] has further harnessed the reasoning capabilities and task generalization abilities of LLMs, thereby promoting their application across various domains.",
|
| 183 |
+
"bbox": [
|
| 184 |
+
212,
|
| 185 |
+
146,
|
| 186 |
+
787,
|
| 187 |
+
313
|
| 188 |
+
],
|
| 189 |
+
"page_idx": 1
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"type": "text",
|
| 193 |
+
"text": "In the era of big data, manual information searching has become infeasible and recommender systems have been widely deployed for automatically inferring people's preference and providing high-quality recommendation services. However, due to the great limitations and drawbacks in both model design and data distribution biases, most existing recommender systems still have great performance in their real-world deployment. One of the primary constraints is their poor interactivity, explainability, and lack of feedback mechanisms. Another limitation is the cold start problem, which makes it difficult to provide accurate recommendations for both new items and new users. Lastly, current recommender systems face challenges in making recommendations across multiple domains [26]. In many recommendation tasks, in order to obtain the required background or general knowledge, an external library or knowledge graph needs to be set up for retrieval [22] or multi-task learning needs to be trained on augmented data [8]. LLMs offer a promising solution to these challenges. They can generate more natural and explainable recommendations, solve the cold start problem, and make cross-domain recommendations. Additionally, LLMs have stronger interactivity and feedback mechanisms, which enhance the overall user experience. By leveraging internal knowledge, LLMs can improve the performance of recommender systems without relying on external retrievers [23].",
|
| 194 |
+
"bbox": [
|
| 195 |
+
212,
|
| 196 |
+
316,
|
| 197 |
+
787,
|
| 198 |
+
604
|
| 199 |
+
],
|
| 200 |
+
"page_idx": 1
|
| 201 |
+
},
|
| 202 |
+
{
|
| 203 |
+
"type": "text",
|
| 204 |
+
"text": "Applying LLMs for addressing the recommendation tasks has received several preliminary research experimental trials already [12,7,25]. Recommender system tasks are formulated as prompt-based natural language tasks, where user-item information and corresponding features are integrated with personalized prompt templates as model inputs. However, in the current research, LLMs are still involved in training as part of the model.",
|
| 205 |
+
"bbox": [
|
| 206 |
+
212,
|
| 207 |
+
608,
|
| 208 |
+
787,
|
| 209 |
+
699
|
| 210 |
+
],
|
| 211 |
+
"page_idx": 1
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"type": "text",
|
| 215 |
+
"text": "In this paper, we introduce a novel approach to learning conversational recommender systems augmented by LLMs, which possess both interactive and explainable capabilities. We present a paradigm called CHAT-REC (ChatGPT Augmented Recommender System) that does not require training and instead relies solely on in-context learning, resulting in more efficient and effective outcomes. With LLM-enhanced recommender system, it is beneficial to learn users' preferences during the conversation. After each step of the conversation, the user's preferences can be further drilled down to update the candidate recommendation results. In addition, users' preferences between products are linked,",
|
| 216 |
+
"bbox": [
|
| 217 |
+
212,
|
| 218 |
+
704,
|
| 219 |
+
787,
|
| 220 |
+
840
|
| 221 |
+
],
|
| 222 |
+
"page_idx": 1
|
| 223 |
+
},
|
| 224 |
+
{
|
| 225 |
+
"type": "page_number",
|
| 226 |
+
"text": "2",
|
| 227 |
+
"bbox": [
|
| 228 |
+
217,
|
| 229 |
+
114,
|
| 230 |
+
228,
|
| 231 |
+
126
|
| 232 |
+
],
|
| 233 |
+
"page_idx": 1
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"type": "header",
|
| 237 |
+
"text": "Gao et al.",
|
| 238 |
+
"bbox": [
|
| 239 |
+
271,
|
| 240 |
+
114,
|
| 241 |
+
339,
|
| 242 |
+
127
|
| 243 |
+
],
|
| 244 |
+
"page_idx": 1
|
| 245 |
+
},
|
| 246 |
+
{
|
| 247 |
+
"type": "text",
|
| 248 |
+
"text": "allowing for better cross-domain product recommendations. We conducted recommendation and rating tests on real-world datasets and experimental results show that Chat-REC achieves significant improvements. Chat-REC sheds light on a promising technical route for the application of conversation AI such as ChatGPT in multiple recommendation scenarios.",
|
| 249 |
+
"bbox": [
|
| 250 |
+
212,
|
| 251 |
+
146,
|
| 252 |
+
787,
|
| 253 |
+
222
|
| 254 |
+
],
|
| 255 |
+
"page_idx": 2
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"type": "text",
|
| 259 |
+
"text": "Our contributions are summarized as follows:",
|
| 260 |
+
"bbox": [
|
| 261 |
+
238,
|
| 262 |
+
222,
|
| 263 |
+
568,
|
| 264 |
+
237
|
| 265 |
+
],
|
| 266 |
+
"page_idx": 2
|
| 267 |
+
},
|
| 268 |
+
{
|
| 269 |
+
"type": "list",
|
| 270 |
+
"sub_type": "text",
|
| 271 |
+
"list_items": [
|
| 272 |
+
"- We introduce a novel and effective paradigm called CHAT-REC, which combines traditional recommender systems with LLMs through prompts, leveraging LLMs' ability to learn from context.",
|
| 273 |
+
"- CHAT-REC employs LLMs as a recommender system interface, enabling multi-round recommendations, enhancing interactivity and explainability.",
|
| 274 |
+
"- We evaluate our method on real-world datasets for top-k recommendation and rating prediction tasks and experiments demonstrate the effectiveness of CHAT-REC."
|
| 275 |
+
],
|
| 276 |
+
"bbox": [
|
| 277 |
+
223,
|
| 278 |
+
247,
|
| 279 |
+
784,
|
| 280 |
+
364
|
| 281 |
+
],
|
| 282 |
+
"page_idx": 2
|
| 283 |
+
},
|
| 284 |
+
{
|
| 285 |
+
"type": "text",
|
| 286 |
+
"text": "2 Related Work",
|
| 287 |
+
"text_level": 1,
|
| 288 |
+
"bbox": [
|
| 289 |
+
215,
|
| 290 |
+
392,
|
| 291 |
+
387,
|
| 292 |
+
407
|
| 293 |
+
],
|
| 294 |
+
"page_idx": 2
|
| 295 |
+
},
|
| 296 |
+
{
|
| 297 |
+
"type": "text",
|
| 298 |
+
"text": "2.1 Augmented Language Models",
|
| 299 |
+
"text_level": 1,
|
| 300 |
+
"bbox": [
|
| 301 |
+
215,
|
| 302 |
+
425,
|
| 303 |
+
504,
|
| 304 |
+
440
|
| 305 |
+
],
|
| 306 |
+
"page_idx": 2
|
| 307 |
+
},
|
| 308 |
+
{
|
| 309 |
+
"type": "text",
|
| 310 |
+
"text": "Augmented Language Models (ALMs) are a new research direction that aims to overcome the limitations of traditional Language Models (LMs) [5,1,4] by equipping them with reasoning skills and the ability to use external tools, which has served millions of users, such as the coding assistant Copilot [2], or more recently ChatGPT based on GPT3.5 and $\\mathrm{GPT4^4}$ . Reasoning is defined as breaking down complex tasks into simpler subtasks that the LM can solve more easily by itself or with the help of tools [9,15,13], while tools are external modules that the LM can call to augment its context. ALMs can use these augmentations separately or in combination to expand their context processing ability and outperform most regular LMs on several benchmarks. ALMs can learn to reason, use tools, and even act, while still performing standard natural language tasks. This new research direction has the potential to address common limitations of traditional LMs such as interpretability, consistency, and scalability issues. By jointly discussing reasoning and tools, and tools and actions, ALMs can solve a broad range of complex tasks without heuristics, thus offering better generalization capabilities.",
|
| 311 |
+
"bbox": [
|
| 312 |
+
212,
|
| 313 |
+
450,
|
| 314 |
+
787,
|
| 315 |
+
691
|
| 316 |
+
],
|
| 317 |
+
"page_idx": 2
|
| 318 |
+
},
|
| 319 |
+
{
|
| 320 |
+
"type": "text",
|
| 321 |
+
"text": "2.2 NLP for Recommendation",
|
| 322 |
+
"text_level": 1,
|
| 323 |
+
"bbox": [
|
| 324 |
+
215,
|
| 325 |
+
714,
|
| 326 |
+
478,
|
| 327 |
+
729
|
| 328 |
+
],
|
| 329 |
+
"page_idx": 2
|
| 330 |
+
},
|
| 331 |
+
{
|
| 332 |
+
"type": "text",
|
| 333 |
+
"text": "The field of recommender systems has had a long-standing relationship with natural language processing (NLP) techniques, especially when pre-trained language models (PLMs) comes out, which improve the performance of recommender systems and explainability [3,10,11]. PLMs are language models that have learned universal representations on large corpora in a self-supervised manner, and the",
|
| 334 |
+
"bbox": [
|
| 335 |
+
212,
|
| 336 |
+
739,
|
| 337 |
+
785,
|
| 338 |
+
816
|
| 339 |
+
],
|
| 340 |
+
"page_idx": 2
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"type": "header",
|
| 344 |
+
"text": "Chat-REC: LLMs-Augmented Recommender System",
|
| 345 |
+
"bbox": [
|
| 346 |
+
377,
|
| 347 |
+
114,
|
| 348 |
+
730,
|
| 349 |
+
130
|
| 350 |
+
],
|
| 351 |
+
"page_idx": 2
|
| 352 |
+
},
|
| 353 |
+
{
|
| 354 |
+
"type": "page_number",
|
| 355 |
+
"text": "3",
|
| 356 |
+
"bbox": [
|
| 357 |
+
774,
|
| 358 |
+
116,
|
| 359 |
+
784,
|
| 360 |
+
126
|
| 361 |
+
],
|
| 362 |
+
"page_idx": 2
|
| 363 |
+
},
|
| 364 |
+
{
|
| 365 |
+
"type": "page_footnote",
|
| 366 |
+
"text": "4 https://openai.com/blog/chatgpt/",
|
| 367 |
+
"bbox": [
|
| 368 |
+
217,
|
| 369 |
+
824,
|
| 370 |
+
464,
|
| 371 |
+
840
|
| 372 |
+
],
|
| 373 |
+
"page_idx": 2
|
| 374 |
+
},
|
| 375 |
+
{
|
| 376 |
+
"type": "text",
|
| 377 |
+
"text": "learned representations can be beneficial to a series of downstream NLP tasks. In the recommendation domain, PLMs can help alleviate the data sparsity issue, which is a major performance bottleneck of current deep recommendation models. By extracting and transferring knowledge from pre-trained models learned by different PLM-related training paradigms, researchers aim to improve recommendation performance from various perspectives, such as generality, sparsity, efficiency, and effectiveness. In this vibrant field, there are open issues and future research directions that need to be explored, including the connection between PLM-based training paradigms and different input data types for recommender systems. Overall, adapting language modelling paradigms for recommendation is seen as a promising direction in both academia and industry.",
|
| 378 |
+
"bbox": [
|
| 379 |
+
212,
|
| 380 |
+
146,
|
| 381 |
+
787,
|
| 382 |
+
313
|
| 383 |
+
],
|
| 384 |
+
"page_idx": 3
|
| 385 |
+
},
|
| 386 |
+
{
|
| 387 |
+
"type": "text",
|
| 388 |
+
"text": "2.3 Cold-start Recommendation",
|
| 389 |
+
"text_level": 1,
|
| 390 |
+
"bbox": [
|
| 391 |
+
214,
|
| 392 |
+
333,
|
| 393 |
+
495,
|
| 394 |
+
347
|
| 395 |
+
],
|
| 396 |
+
"page_idx": 3
|
| 397 |
+
},
|
| 398 |
+
{
|
| 399 |
+
"type": "text",
|
| 400 |
+
"text": "Cold start recommendation is a problem that arises in recommender systems when users or items have no prior interaction records with the system. This means that there is no data available for the system to make personalized recommendations. To address this issue, solutions have been proposed that either learn to model content features [16] or transfer representations from auxiliary domains [24,26]. The former approach focuses on learning about the characteristics of the items or users based on their content, such as text, images, or metadata. The latter approach involves leveraging information from other domains, such as social networks or product descriptions, to infer user preferences. Additionally, there are approaches that aim to quickly adapt to new domains instead of only providing recommendations for cold-start cases. A good generalization ability of recommendation models on startup cases is essential to ensure a better user experience and increased engagement. In our work, we use the reasoning and background knowledge of LLMs to enhance the performance of recommender systems for cold start scenarios.",
|
| 401 |
+
"bbox": [
|
| 402 |
+
212,
|
| 403 |
+
356,
|
| 404 |
+
787,
|
| 405 |
+
583
|
| 406 |
+
],
|
| 407 |
+
"page_idx": 3
|
| 408 |
+
},
|
| 409 |
+
{
|
| 410 |
+
"type": "text",
|
| 411 |
+
"text": "3 Method",
|
| 412 |
+
"text_level": 1,
|
| 413 |
+
"bbox": [
|
| 414 |
+
214,
|
| 415 |
+
604,
|
| 416 |
+
330,
|
| 417 |
+
618
|
| 418 |
+
],
|
| 419 |
+
"page_idx": 3
|
| 420 |
+
},
|
| 421 |
+
{
|
| 422 |
+
"type": "text",
|
| 423 |
+
"text": "3.1 Bridge Recommender Systems and LLMs",
|
| 424 |
+
"text_level": 1,
|
| 425 |
+
"bbox": [
|
| 426 |
+
214,
|
| 427 |
+
619,
|
| 428 |
+
604,
|
| 429 |
+
636
|
| 430 |
+
],
|
| 431 |
+
"page_idx": 3
|
| 432 |
+
},
|
| 433 |
+
{
|
| 434 |
+
"type": "text",
|
| 435 |
+
"text": "Recommender systems are designed to suggest items to users based on their preferences and behavior. Traditionally, these systems have relied on user data such as clickstream and purchase history to make recommendations. However, NLP techniques have proven to be valuable in expanding the scope of recommender systems beyond traditional user data.",
|
| 436 |
+
"bbox": [
|
| 437 |
+
212,
|
| 438 |
+
643,
|
| 439 |
+
785,
|
| 440 |
+
718
|
| 441 |
+
],
|
| 442 |
+
"page_idx": 3
|
| 443 |
+
},
|
| 444 |
+
{
|
| 445 |
+
"type": "text",
|
| 446 |
+
"text": "NLP techniques can be used to analyze user-generated content such as reviews and social media posts to gain insights into user preferences and interests. LLMs can also be used to generate natural language responses to user queries, improving the overall user experience and engagement.",
|
| 447 |
+
"bbox": [
|
| 448 |
+
212,
|
| 449 |
+
719,
|
| 450 |
+
785,
|
| 451 |
+
779
|
| 452 |
+
],
|
| 453 |
+
"page_idx": 3
|
| 454 |
+
},
|
| 455 |
+
{
|
| 456 |
+
"type": "text",
|
| 457 |
+
"text": "To bridge recommender systems and LLMs, we propose an enhanced recommender system module based on ChatGPT, a large language model trained by OpenAI. As the Fig. 1 shows, the module takes as input user-item history interactions, user profile, user query $Q_{i}$ , and history of dialogue $H_{< i}$ (if available,",
|
| 458 |
+
"bbox": [
|
| 459 |
+
212,
|
| 460 |
+
779,
|
| 461 |
+
785,
|
| 462 |
+
840
|
| 463 |
+
],
|
| 464 |
+
"page_idx": 3
|
| 465 |
+
},
|
| 466 |
+
{
|
| 467 |
+
"type": "page_number",
|
| 468 |
+
"text": "4",
|
| 469 |
+
"bbox": [
|
| 470 |
+
217,
|
| 471 |
+
114,
|
| 472 |
+
228,
|
| 473 |
+
126
|
| 474 |
+
],
|
| 475 |
+
"page_idx": 3
|
| 476 |
+
},
|
| 477 |
+
{
|
| 478 |
+
"type": "header",
|
| 479 |
+
"text": "Gao et al.",
|
| 480 |
+
"bbox": [
|
| 481 |
+
271,
|
| 482 |
+
114,
|
| 483 |
+
339,
|
| 484 |
+
127
|
| 485 |
+
],
|
| 486 |
+
"page_idx": 3
|
| 487 |
+
},
|
| 488 |
+
{
|
| 489 |
+
"type": "image",
|
| 490 |
+
"img_path": "images/a3b57aed53432b5457f0729c818f0ba2e2a0458f3a0b6aa596727d519825e006.jpg",
|
| 491 |
+
"image_caption": [
|
| 492 |
+
"Fig.1: Overview of CHAT-REC. The left side shows a dialogue between a user and ChatGPT. The middle side shows the flowchart to how CHAT-REC links traditional recommender systems with conversational AI such as ChatGPT. The right side describes the specific judgment in the process."
|
| 493 |
+
],
|
| 494 |
+
"image_footnote": [],
|
| 495 |
+
"bbox": [
|
| 496 |
+
220,
|
| 497 |
+
146,
|
| 498 |
+
781,
|
| 499 |
+
338
|
| 500 |
+
],
|
| 501 |
+
"page_idx": 4
|
| 502 |
+
},
|
| 503 |
+
{
|
| 504 |
+
"type": "text",
|
| 505 |
+
"text": "and the notation $< i$ denotes the dialogue history prior to the current query), and interfaces with any recommender system R. If the task is determined to be a recommendation task, the module uses R to generate a candidate set of items. Otherwise, it directly outputs a response to the user, such as an explanation of a generation task or a request for item details.",
|
| 506 |
+
"bbox": [
|
| 507 |
+
212,
|
| 508 |
+
446,
|
| 509 |
+
787,
|
| 510 |
+
521
|
| 511 |
+
],
|
| 512 |
+
"page_idx": 4
|
| 513 |
+
},
|
| 514 |
+
{
|
| 515 |
+
"type": "text",
|
| 516 |
+
"text": "The prompt constructor module in the enhanced recommender system takes multiple inputs to generate a natural language paragraph that captures the user's query and recommendation information. The inputs are as follows:",
|
| 517 |
+
"bbox": [
|
| 518 |
+
212,
|
| 519 |
+
522,
|
| 520 |
+
785,
|
| 521 |
+
566
|
| 522 |
+
],
|
| 523 |
+
"page_idx": 4
|
| 524 |
+
},
|
| 525 |
+
{
|
| 526 |
+
"type": "list",
|
| 527 |
+
"sub_type": "text",
|
| 528 |
+
"list_items": [
|
| 529 |
+
"- User-item history interactions, which refers to the user's past interactions with items, such as items they have clicked, purchased, or rated. This information is used to understand the user's preferences and to personalize the recommendation.",
|
| 530 |
+
"- User profile, which contains demographic and preference information about the user. This may include age, gender, location, and interests. The user profile helps the system understand the user's characteristics and preferences.",
|
| 531 |
+
"- User query $Q_{i}$ , which is the user's specific request for information or recommendation. This may include a specific item or genre they are interested in, or a more general request for recommendations in a particular category.",
|
| 532 |
+
"- History of dialogue $H_{<i}$ , which contains the previous conversation between the user and the system. This information is used to understand the context of the user's query and to provide a more personalized and relevant response."
|
| 533 |
+
],
|
| 534 |
+
"bbox": [
|
| 535 |
+
225,
|
| 536 |
+
582,
|
| 537 |
+
784,
|
| 538 |
+
780
|
| 539 |
+
],
|
| 540 |
+
"page_idx": 4
|
| 541 |
+
},
|
| 542 |
+
{
|
| 543 |
+
"type": "text",
|
| 544 |
+
"text": "As shown in Fig. 2, the CHAT-REC framework proposed in this paper empower recommender systems with the conversational interface, which makes the interactive and explainable recommendation possible. Formally, based on the",
|
| 545 |
+
"bbox": [
|
| 546 |
+
212,
|
| 547 |
+
794,
|
| 548 |
+
785,
|
| 549 |
+
839
|
| 550 |
+
],
|
| 551 |
+
"page_idx": 4
|
| 552 |
+
},
|
| 553 |
+
{
|
| 554 |
+
"type": "header",
|
| 555 |
+
"text": "Chat-REC: LLMs-Augmented Recommender System",
|
| 556 |
+
"bbox": [
|
| 557 |
+
377,
|
| 558 |
+
114,
|
| 559 |
+
730,
|
| 560 |
+
128
|
| 561 |
+
],
|
| 562 |
+
"page_idx": 4
|
| 563 |
+
},
|
| 564 |
+
{
|
| 565 |
+
"type": "page_number",
|
| 566 |
+
"text": "5",
|
| 567 |
+
"bbox": [
|
| 568 |
+
774,
|
| 569 |
+
116,
|
| 570 |
+
784,
|
| 571 |
+
126
|
| 572 |
+
],
|
| 573 |
+
"page_idx": 4
|
| 574 |
+
},
|
| 575 |
+
{
|
| 576 |
+
"type": "text",
|
| 577 |
+
"text": "aforementioned inputs, the prompt constructor module generates a natural language paragraph that summarizes the user's query and recommendation information, and provides a more personalized and relevant response to the user's request. The intermediate answer generated by the recommender system is then used to refine the prompt constructor and generate an optimized prompt to further compress and refine the candidate set. The resulting recommendation and a brief explanation are output to the user.",
|
| 578 |
+
"bbox": [
|
| 579 |
+
212,
|
| 580 |
+
146,
|
| 581 |
+
787,
|
| 582 |
+
252
|
| 583 |
+
],
|
| 584 |
+
"page_idx": 5
|
| 585 |
+
},
|
| 586 |
+
{
|
| 587 |
+
"type": "text",
|
| 588 |
+
"text": "For example, in the first round of Q&A, the user requests action movies. The system determines that a recommendation task is needed, and executes the Recommend Action Movies module using the input information. The intermediate answer $A_{1}$ contains the top-20 results, which are then reranked and adjusted in the second module using the input information to generate the final output of the top-5 results.",
|
| 589 |
+
"bbox": [
|
| 590 |
+
212,
|
| 591 |
+
252,
|
| 592 |
+
787,
|
| 593 |
+
343
|
| 594 |
+
],
|
| 595 |
+
"page_idx": 5
|
| 596 |
+
},
|
| 597 |
+
{
|
| 598 |
+
"type": "text",
|
| 599 |
+
"text": "In the second round of Q&A, the user asks why the movie \"Cargo\" was recommended. The system determines that no recommendation task is needed and instead executes the explanation for the recommendation module, using the movie title, history interaction, and user profile as inputs. The answer $A_{2}$ is then generated, which provides a brief explanation of the recommendation, including information about the user's general interests and the specific characteristics of the movie that may be appealing to the user.",
|
| 600 |
+
"bbox": [
|
| 601 |
+
212,
|
| 602 |
+
343,
|
| 603 |
+
789,
|
| 604 |
+
449
|
| 605 |
+
],
|
| 606 |
+
"page_idx": 5
|
| 607 |
+
},
|
| 608 |
+
{
|
| 609 |
+
"type": "text",
|
| 610 |
+
"text": "3.2 Recommendation Based on Candidate Set Compression",
|
| 611 |
+
"text_level": 1,
|
| 612 |
+
"bbox": [
|
| 613 |
+
214,
|
| 614 |
+
469,
|
| 615 |
+
720,
|
| 616 |
+
484
|
| 617 |
+
],
|
| 618 |
+
"page_idx": 5
|
| 619 |
+
},
|
| 620 |
+
{
|
| 621 |
+
"type": "text",
|
| 622 |
+
"text": "Traditional recommender systems typically generate a small number of sorted candidate products, each with a score that reflects the system's recommendation confidence or result quality. However, considering the huge size of the product set, the performance obtained by most existing recommender systems are all way far from satisfactory, which still have a very large room for improvement.",
|
| 623 |
+
"bbox": [
|
| 624 |
+
212,
|
| 625 |
+
492,
|
| 626 |
+
787,
|
| 627 |
+
568
|
| 628 |
+
],
|
| 629 |
+
"page_idx": 5
|
| 630 |
+
},
|
| 631 |
+
{
|
| 632 |
+
"type": "text",
|
| 633 |
+
"text": "This article proposes a method of using LLMs to improve the performance of recommender systems by narrowing down the candidate set. The recommender system generates a large set of candidate items, which can be overwhelming for the user. LLMs play several different critical roles in narrowing down the product candidate set within the system. Firstly, we convert users' profiles and historical interactions into prompts, including the item description and user rating. Secondly, LLMs are asked to summarize user preferences for items in a domain based on the above information. LLMs can learn from context and effectively capture users' background information and preferences. With this information, they can establish the relationship between product attributes and user preferences, enabling them to make better product recommendations. By utilizing in-context learning, LLMs can enhance their recommendation reasoning ability, resulting in more accurate and personalized product recommendations.",
|
| 634 |
+
"bbox": [
|
| 635 |
+
212,
|
| 636 |
+
568,
|
| 637 |
+
787,
|
| 638 |
+
763
|
| 639 |
+
],
|
| 640 |
+
"page_idx": 5
|
| 641 |
+
},
|
| 642 |
+
{
|
| 643 |
+
"type": "text",
|
| 644 |
+
"text": "Once the LLMs have learned the user's preferences, the candidate set generated by the recommender system is provided to the LLMs. The LLMs can further filter and sort the candidate set based on the user's preferences. This approach ensures that the user is presented with a smaller, more relevant set of items, increasing the likelihood that they will find something they like.",
|
| 645 |
+
"bbox": [
|
| 646 |
+
212,
|
| 647 |
+
765,
|
| 648 |
+
787,
|
| 649 |
+
839
|
| 650 |
+
],
|
| 651 |
+
"page_idx": 5
|
| 652 |
+
},
|
| 653 |
+
{
|
| 654 |
+
"type": "page_number",
|
| 655 |
+
"text": "6",
|
| 656 |
+
"bbox": [
|
| 657 |
+
217,
|
| 658 |
+
114,
|
| 659 |
+
228,
|
| 660 |
+
126
|
| 661 |
+
],
|
| 662 |
+
"page_idx": 5
|
| 663 |
+
},
|
| 664 |
+
{
|
| 665 |
+
"type": "header",
|
| 666 |
+
"text": "Gao et al.",
|
| 667 |
+
"bbox": [
|
| 668 |
+
271,
|
| 669 |
+
114,
|
| 670 |
+
339,
|
| 671 |
+
127
|
| 672 |
+
],
|
| 673 |
+
"page_idx": 5
|
| 674 |
+
},
|
| 675 |
+
{
|
| 676 |
+
"type": "image",
|
| 677 |
+
"img_path": "images/cc90a74bfae5c93b7a1a871f4e105b27e4b07a3aa7604b35c327967c661227ff.jpg",
|
| 678 |
+
"image_caption": [
|
| 679 |
+
"Fig.2: Case study of interactive recommendation. It shows two conversations between different users and LLM. Where the user profile and historical users are converted into corresponding prompts for personalized recommendations, but the input of this part of the prompts is not visible to the user. The dialogue on the left shows that when a user asks why the movie was recommended, LLM can give an explanation based on the user's preferences and specific information about the recommended movie. The dialog on the right shows that CHAT-REC can make multiple rounds of recommendations based on user feedback. Questions about the details of the movie can also be answered in a specific way. LLM also takes into account ethical and moral issues when recommending movies."
|
| 680 |
+
],
|
| 681 |
+
"image_footnote": [],
|
| 682 |
+
"bbox": [
|
| 683 |
+
173,
|
| 684 |
+
152,
|
| 685 |
+
826,
|
| 686 |
+
757
|
| 687 |
+
],
|
| 688 |
+
"page_idx": 6
|
| 689 |
+
},
|
| 690 |
+
{
|
| 691 |
+
"type": "header",
|
| 692 |
+
"text": "Chat-REC: LLMs-Augmented Recommender System",
|
| 693 |
+
"bbox": [
|
| 694 |
+
377,
|
| 695 |
+
114,
|
| 696 |
+
730,
|
| 697 |
+
128
|
| 698 |
+
],
|
| 699 |
+
"page_idx": 6
|
| 700 |
+
},
|
| 701 |
+
{
|
| 702 |
+
"type": "page_number",
|
| 703 |
+
"text": "7",
|
| 704 |
+
"bbox": [
|
| 705 |
+
774,
|
| 706 |
+
116,
|
| 707 |
+
784,
|
| 708 |
+
126
|
| 709 |
+
],
|
| 710 |
+
"page_idx": 6
|
| 711 |
+
},
|
| 712 |
+
{
|
| 713 |
+
"type": "text",
|
| 714 |
+
"text": "3.3 Cold-start Recommendations",
|
| 715 |
+
"text_level": 1,
|
| 716 |
+
"bbox": [
|
| 717 |
+
215,
|
| 718 |
+
146,
|
| 719 |
+
503,
|
| 720 |
+
161
|
| 721 |
+
],
|
| 722 |
+
"page_idx": 7
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"type": "text",
|
| 726 |
+
"text": "With the textual description and profile information about the products, regardless the new products or the old ones, LLMs can effectively relate such products with each other, which provides us with the opportunity for solving the persistent cold-start recommendation problem once and for all.",
|
| 727 |
+
"bbox": [
|
| 728 |
+
212,
|
| 729 |
+
176,
|
| 730 |
+
784,
|
| 731 |
+
236
|
| 732 |
+
],
|
| 733 |
+
"page_idx": 7
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"type": "text",
|
| 737 |
+
"text": "For example, if a user asks for recommendations for a new movie that was released in 2021, the recommender system could use text data about the movie to generate an embedding and then calculate similarities to other movies in the system to make recommendations. This capability allows recommender systems to make relevant and accurate recommendations for new items, improving the overall user experience.",
|
| 738 |
+
"bbox": [
|
| 739 |
+
212,
|
| 740 |
+
237,
|
| 741 |
+
785,
|
| 742 |
+
327
|
| 743 |
+
],
|
| 744 |
+
"page_idx": 7
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "text",
|
| 748 |
+
"text": "Large language models can use the vast amount of knowledge they contain to help recommender systems alleviate the cold-start problem of new items, i.e., recommending items that lack a large number of user interactions. However, since the knowledge held by ChatGPT is limited to September 2021, ChatGPT does not cope well when encountering unknown items, such as a user requesting to recommend some new movies released in 2023 or content related to a movie that ChatGPT is not aware of, as shown in the top part of Fig. 3. To address this issue, we introduce external information about new items, utilizing large language models to generate corresponding embedding representations and cache them. When encountering new item recommendations, we calculate the similarity between item embeddings and embeddings of user requests and preferences, then retrieve the most relevant item information based on the similarity and construct a prompt to input to ChatGPT for recommendation, as illustrated in the lower half of Fig. 3. This approach allows the recommender system to work in conjunction with ChatGPT to better recommend new items, thus enhancing the user experience.",
|
| 749 |
+
"bbox": [
|
| 750 |
+
212,
|
| 751 |
+
329,
|
| 752 |
+
787,
|
| 753 |
+
570
|
| 754 |
+
],
|
| 755 |
+
"page_idx": 7
|
| 756 |
+
},
|
| 757 |
+
{
|
| 758 |
+
"type": "text",
|
| 759 |
+
"text": "3.4 Cross-Domain Recommendations",
|
| 760 |
+
"text_level": 1,
|
| 761 |
+
"bbox": [
|
| 762 |
+
215,
|
| 763 |
+
597,
|
| 764 |
+
535,
|
| 765 |
+
612
|
| 766 |
+
],
|
| 767 |
+
"page_idx": 7
|
| 768 |
+
},
|
| 769 |
+
{
|
| 770 |
+
"type": "text",
|
| 771 |
+
"text": "The LLMs-augmented recommender system introduced above can be used to address several challenging tasks, that are hard or even impossible to be addressed with conventional recommender systems, such as cross-domain recommendation [26] and cold-start recommendation [17]. In this part, we will first talk about how to use the LLMs-augmented recommender system for the cross-domain recommendation.",
|
| 772 |
+
"bbox": [
|
| 773 |
+
212,
|
| 774 |
+
626,
|
| 775 |
+
785,
|
| 776 |
+
715
|
| 777 |
+
],
|
| 778 |
+
"page_idx": 7
|
| 779 |
+
},
|
| 780 |
+
{
|
| 781 |
+
"type": "text",
|
| 782 |
+
"text": "LLMs pre-trained with information across the Internet actually can serve as the multi-perspective knowledge base [14]. Besides the target product in one domain, such as movies, the LLMs not only has a broad knowledge about products many other domains, like music and books, but also understands the relations among the products across the domains mentioned above.",
|
| 783 |
+
"bbox": [
|
| 784 |
+
212,
|
| 785 |
+
718,
|
| 786 |
+
785,
|
| 787 |
+
792
|
| 788 |
+
],
|
| 789 |
+
"page_idx": 7
|
| 790 |
+
},
|
| 791 |
+
{
|
| 792 |
+
"type": "text",
|
| 793 |
+
"text": "For example, as illustrated in Fig. 4, once the conversation regarding movie recommendations is finished, the user inquires LLM for suggestions on other types of works. LLM then proceeds to recommend a variety of options, such as",
|
| 794 |
+
"bbox": [
|
| 795 |
+
212,
|
| 796 |
+
795,
|
| 797 |
+
785,
|
| 798 |
+
840
|
| 799 |
+
],
|
| 800 |
+
"page_idx": 7
|
| 801 |
+
},
|
| 802 |
+
{
|
| 803 |
+
"type": "page_number",
|
| 804 |
+
"text": "8",
|
| 805 |
+
"bbox": [
|
| 806 |
+
217,
|
| 807 |
+
114,
|
| 808 |
+
228,
|
| 809 |
+
126
|
| 810 |
+
],
|
| 811 |
+
"page_idx": 7
|
| 812 |
+
},
|
| 813 |
+
{
|
| 814 |
+
"type": "header",
|
| 815 |
+
"text": "Gao et al.",
|
| 816 |
+
"bbox": [
|
| 817 |
+
271,
|
| 818 |
+
114,
|
| 819 |
+
339,
|
| 820 |
+
127
|
| 821 |
+
],
|
| 822 |
+
"page_idx": 7
|
| 823 |
+
},
|
| 824 |
+
{
|
| 825 |
+
"type": "image",
|
| 826 |
+
"img_path": "images/583dde7df8cb3598cec499dc49d2de8242c17b377f13ad9b0a8b866377bfc407.jpg",
|
| 827 |
+
"image_caption": [
|
| 828 |
+
"Fig. 3: Case Study of New Item Recommendation. The top shows that ChatGPT is unable to recommend new items beyond the timeframe of its training data. The middle part demonstrates the process of how to utilize external information about new items to enable ChatGPT to handle recommendations for new items. The bottom shows that ChatGPT can effectively handle recommendations for new items after incorporating external information."
|
| 829 |
+
],
|
| 830 |
+
"image_footnote": [],
|
| 831 |
+
"bbox": [
|
| 832 |
+
222,
|
| 833 |
+
147,
|
| 834 |
+
779,
|
| 835 |
+
753
|
| 836 |
+
],
|
| 837 |
+
"page_idx": 8
|
| 838 |
+
},
|
| 839 |
+
{
|
| 840 |
+
"type": "header",
|
| 841 |
+
"text": "Chat-REC: LLMs-Augmented Recommender System",
|
| 842 |
+
"bbox": [
|
| 843 |
+
377,
|
| 844 |
+
114,
|
| 845 |
+
730,
|
| 846 |
+
128
|
| 847 |
+
],
|
| 848 |
+
"page_idx": 8
|
| 849 |
+
},
|
| 850 |
+
{
|
| 851 |
+
"type": "page_number",
|
| 852 |
+
"text": "9",
|
| 853 |
+
"bbox": [
|
| 854 |
+
774,
|
| 855 |
+
116,
|
| 856 |
+
784,
|
| 857 |
+
126
|
| 858 |
+
],
|
| 859 |
+
"page_idx": 8
|
| 860 |
+
},
|
| 861 |
+
{
|
| 862 |
+
"type": "image",
|
| 863 |
+
"img_path": "images/f9b9b0d198c6c05fc6cfc3f5e8297a5c1f217c535e4948bd2f4356cebedc62ec.jpg",
|
| 864 |
+
"image_caption": [
|
| 865 |
+
"Fig. 4: Case study of cross-domain recommendation. After the conversation about the movie's recommendation is completed. The user asks LLM to recommend works other than movies. It can be seen that LLM recommends different types of works, including books, TV series Podcasts and video games, according to the user's movie preferences. This shows that LLM can migrate the user's movie preferences to items and thus achieve cross-domain recommendations."
|
| 866 |
+
],
|
| 867 |
+
"image_footnote": [],
|
| 868 |
+
"bbox": [
|
| 869 |
+
218,
|
| 870 |
+
143,
|
| 871 |
+
781,
|
| 872 |
+
392
|
| 873 |
+
],
|
| 874 |
+
"page_idx": 9
|
| 875 |
+
},
|
| 876 |
+
{
|
| 877 |
+
"type": "text",
|
| 878 |
+
"text": "books, TV series, podcasts, and video games, based on the user's movie preferences. This demonstrates LLM's ability to transfer the user's preferences from movies to other items, resulting in cross-domain recommendations. This cross-domain recommendation capability has the potential to significantly expand the scope and relevance of recommender systems.",
|
| 879 |
+
"bbox": [
|
| 880 |
+
212,
|
| 881 |
+
527,
|
| 882 |
+
787,
|
| 883 |
+
603
|
| 884 |
+
],
|
| 885 |
+
"page_idx": 9
|
| 886 |
+
},
|
| 887 |
+
{
|
| 888 |
+
"type": "text",
|
| 889 |
+
"text": "4 Experiment",
|
| 890 |
+
"text_level": 1,
|
| 891 |
+
"bbox": [
|
| 892 |
+
215,
|
| 893 |
+
628,
|
| 894 |
+
366,
|
| 895 |
+
645
|
| 896 |
+
],
|
| 897 |
+
"page_idx": 9
|
| 898 |
+
},
|
| 899 |
+
{
|
| 900 |
+
"type": "text",
|
| 901 |
+
"text": "4.1 Dataset and Experimental Settings",
|
| 902 |
+
"text_level": 1,
|
| 903 |
+
"bbox": [
|
| 904 |
+
215,
|
| 905 |
+
661,
|
| 906 |
+
552,
|
| 907 |
+
678
|
| 908 |
+
],
|
| 909 |
+
"page_idx": 9
|
| 910 |
+
},
|
| 911 |
+
{
|
| 912 |
+
"type": "text",
|
| 913 |
+
"text": "The dataset used in our experiment is MovieLens 100K, which is a benchmark dataset of a real-world recommender system. It comprises 100,000 movie ratings provided by 943 users on a scale of 1 to 5 across 1,682 movies. Additionally, the dataset contains demographic information about the users, such as age, gender, occupation, and zip code, as well as movie information, such as title, release year, and genres. To create our experimental dataset, we randomly selected 200 users. Table 1 provides detailed statistical information about the dataset used in the experiment.",
|
| 914 |
+
"bbox": [
|
| 915 |
+
212,
|
| 916 |
+
688,
|
| 917 |
+
787,
|
| 918 |
+
808
|
| 919 |
+
],
|
| 920 |
+
"page_idx": 9
|
| 921 |
+
},
|
| 922 |
+
{
|
| 923 |
+
"type": "text",
|
| 924 |
+
"text": "When evaluating the performance of top-k recommendations, Precision, Recall, and Normalized Discounted Cumulative Gain (NDCG) are used. For rating",
|
| 925 |
+
"bbox": [
|
| 926 |
+
212,
|
| 927 |
+
810,
|
| 928 |
+
787,
|
| 929 |
+
840
|
| 930 |
+
],
|
| 931 |
+
"page_idx": 9
|
| 932 |
+
},
|
| 933 |
+
{
|
| 934 |
+
"type": "page_number",
|
| 935 |
+
"text": "10",
|
| 936 |
+
"bbox": [
|
| 937 |
+
217,
|
| 938 |
+
114,
|
| 939 |
+
235,
|
| 940 |
+
126
|
| 941 |
+
],
|
| 942 |
+
"page_idx": 9
|
| 943 |
+
},
|
| 944 |
+
{
|
| 945 |
+
"type": "header",
|
| 946 |
+
"text": "Gao et al.",
|
| 947 |
+
"bbox": [
|
| 948 |
+
271,
|
| 949 |
+
114,
|
| 950 |
+
339,
|
| 951 |
+
126
|
| 952 |
+
],
|
| 953 |
+
"page_idx": 9
|
| 954 |
+
},
|
| 955 |
+
{
|
| 956 |
+
"type": "table",
|
| 957 |
+
"img_path": "images/da4845af0d08aba0721be960f122d866b49838da507d82ef9a5074cf113d0839.jpg",
|
| 958 |
+
"table_caption": [
|
| 959 |
+
"Table 1: Details of the dataset used for evaluation."
|
| 960 |
+
],
|
| 961 |
+
"table_footnote": [],
|
| 962 |
+
"table_body": "<table><tr><td>Dataset</td><td>Users</td><td>Items</td><td>Ratings</td><td>Rating Scale</td><td>Density</td></tr><tr><td>MovieLens 100K</td><td>943</td><td>1,682</td><td>100,000</td><td>[1-5]</td><td>6.304%</td></tr></table>",
|
| 963 |
+
"bbox": [
|
| 964 |
+
258,
|
| 965 |
+
171,
|
| 966 |
+
738,
|
| 967 |
+
203
|
| 968 |
+
],
|
| 969 |
+
"page_idx": 10
|
| 970 |
+
},
|
| 971 |
+
{
|
| 972 |
+
"type": "text",
|
| 973 |
+
"text": "prediction task, the Root Mean Squared Error (RMSE) and Mean Absolute Error (MAE) are employed as evaluation metrics.",
|
| 974 |
+
"bbox": [
|
| 975 |
+
212,
|
| 976 |
+
228,
|
| 977 |
+
782,
|
| 978 |
+
258
|
| 979 |
+
],
|
| 980 |
+
"page_idx": 10
|
| 981 |
+
},
|
| 982 |
+
{
|
| 983 |
+
"type": "text",
|
| 984 |
+
"text": "4.2 Baselines",
|
| 985 |
+
"text_level": 1,
|
| 986 |
+
"bbox": [
|
| 987 |
+
214,
|
| 988 |
+
277,
|
| 989 |
+
339,
|
| 990 |
+
292
|
| 991 |
+
],
|
| 992 |
+
"page_idx": 10
|
| 993 |
+
},
|
| 994 |
+
{
|
| 995 |
+
"type": "text",
|
| 996 |
+
"text": "The baseline methods studied in the experiment include both classic recommender system models and the LLMs-augmented recommender systems proposed in this paper. Detailed information about the comparison methods studied in our experiments are provided as follows:",
|
| 997 |
+
"bbox": [
|
| 998 |
+
212,
|
| 999 |
+
300,
|
| 1000 |
+
784,
|
| 1001 |
+
359
|
| 1002 |
+
],
|
| 1003 |
+
"page_idx": 10
|
| 1004 |
+
},
|
| 1005 |
+
{
|
| 1006 |
+
"type": "list",
|
| 1007 |
+
"sub_type": "text",
|
| 1008 |
+
"list_items": [
|
| 1009 |
+
"- LightFM is a recommendation algorithm that combines collaborative filtering and content-based methods to recommend items to users.",
|
| 1010 |
+
"- LightGCN is a graph-based collaborative filtering algorithm that uses a simplified graph convolutional network (GCN) to model the user-item interactions in a recommender system.",
|
| 1011 |
+
"- Item-KNN is a neighborhood-based collaborative filtering algorithm that uses the similarity between items to make recommendations to users.",
|
| 1012 |
+
"- Matrix Factorization (MF) is a widely used collaborative filtering algorithm that represents users and items as latent factors in a low-dimensional space."
|
| 1013 |
+
],
|
| 1014 |
+
"bbox": [
|
| 1015 |
+
223,
|
| 1016 |
+
368,
|
| 1017 |
+
784,
|
| 1018 |
+
515
|
| 1019 |
+
],
|
| 1020 |
+
"page_idx": 10
|
| 1021 |
+
},
|
| 1022 |
+
{
|
| 1023 |
+
"type": "text",
|
| 1024 |
+
"text": "We select three representative models from the GPT-3 and GPT-3.5 series as LLMs in CHAT-REC:",
|
| 1025 |
+
"bbox": [
|
| 1026 |
+
212,
|
| 1027 |
+
525,
|
| 1028 |
+
782,
|
| 1029 |
+
554
|
| 1030 |
+
],
|
| 1031 |
+
"page_idx": 10
|
| 1032 |
+
},
|
| 1033 |
+
{
|
| 1034 |
+
"type": "list",
|
| 1035 |
+
"sub_type": "text",
|
| 1036 |
+
"list_items": [
|
| 1037 |
+
"- gpt-3.5-turbo is the most capable GPT-3.5 model and optimized for chat.",
|
| 1038 |
+
"- text-davinci-003 can do any language task with better quality, longer output, and consistent instruction-following.",
|
| 1039 |
+
"- text-davinci-002 is similar to text-davinci-003 but is trained with supervised fine-tuning instead of reinforcement learning."
|
| 1040 |
+
],
|
| 1041 |
+
"bbox": [
|
| 1042 |
+
223,
|
| 1043 |
+
561,
|
| 1044 |
+
782,
|
| 1045 |
+
633
|
| 1046 |
+
],
|
| 1047 |
+
"page_idx": 10
|
| 1048 |
+
},
|
| 1049 |
+
{
|
| 1050 |
+
"type": "text",
|
| 1051 |
+
"text": "The model notations, like CHAT-REC (gpt-3.5-turbo), denote the CHAT-REC framework built by adopting \"gpt-3.5-turbo\" as the backbone model.",
|
| 1052 |
+
"bbox": [
|
| 1053 |
+
212,
|
| 1054 |
+
642,
|
| 1055 |
+
782,
|
| 1056 |
+
672
|
| 1057 |
+
],
|
| 1058 |
+
"page_idx": 10
|
| 1059 |
+
},
|
| 1060 |
+
{
|
| 1061 |
+
"type": "text",
|
| 1062 |
+
"text": "4.3 Result and Analysis",
|
| 1063 |
+
"text_level": 1,
|
| 1064 |
+
"bbox": [
|
| 1065 |
+
214,
|
| 1066 |
+
691,
|
| 1067 |
+
426,
|
| 1068 |
+
707
|
| 1069 |
+
],
|
| 1070 |
+
"page_idx": 10
|
| 1071 |
+
},
|
| 1072 |
+
{
|
| 1073 |
+
"type": "text",
|
| 1074 |
+
"text": "Top-5 Recommendation. As presented in Table 2, our proposed CHAT-REC framework has demonstrated effective improvement of traditional recommender systems in the top-k recommendation task. The NDCG scores of all three GPT-3.5 models surpassed that of LightGCN, with text-davinci-003 delivering the best result and demonstrating strong contextual learning abilities. Specifically, the precision score of 0.3240 is $6.93\\%$ higher than that of LightGCN, while NDCG score of 0.3802 is $11.01\\%$ higher. However, the recall rate of 0.1404 is slightly lower than that of LightGCN by $3.51\\%$ . It is noteworthy that the performance of gpt-3.5-turbo was slightly weaker than that of text-davinci-002.",
|
| 1075 |
+
"bbox": [
|
| 1076 |
+
212,
|
| 1077 |
+
713,
|
| 1078 |
+
784,
|
| 1079 |
+
849
|
| 1080 |
+
],
|
| 1081 |
+
"page_idx": 10
|
| 1082 |
+
},
|
| 1083 |
+
{
|
| 1084 |
+
"type": "header",
|
| 1085 |
+
"text": "Chat-REC: LLMs-Augmented Recommender System",
|
| 1086 |
+
"bbox": [
|
| 1087 |
+
377,
|
| 1088 |
+
114,
|
| 1089 |
+
730,
|
| 1090 |
+
128
|
| 1091 |
+
],
|
| 1092 |
+
"page_idx": 10
|
| 1093 |
+
},
|
| 1094 |
+
{
|
| 1095 |
+
"type": "page_number",
|
| 1096 |
+
"text": "11",
|
| 1097 |
+
"bbox": [
|
| 1098 |
+
767,
|
| 1099 |
+
114,
|
| 1100 |
+
782,
|
| 1101 |
+
126
|
| 1102 |
+
],
|
| 1103 |
+
"page_idx": 10
|
| 1104 |
+
},
|
| 1105 |
+
{
|
| 1106 |
+
"type": "table",
|
| 1107 |
+
"img_path": "images/62a2b38879d80c2c098f6fabbcd82a783de3f3d7d6b75292edc4d2ac4fc2f971.jpg",
|
| 1108 |
+
"table_caption": [
|
| 1109 |
+
"Table 2: Results of top-5 recommendation."
|
| 1110 |
+
],
|
| 1111 |
+
"table_footnote": [],
|
| 1112 |
+
"table_body": "<table><tr><td>Models</td><td>Precision</td><td>Recall</td><td>NDCG</td></tr><tr><td>LightFM</td><td>0.2830</td><td>0.1410</td><td>0.2846</td></tr><tr><td>LightGCN</td><td>0.3030</td><td>0.1455</td><td>0.3425</td></tr><tr><td>CHAT-REC (gpt-3.5-turbo)</td><td>0.3103</td><td>0.1279</td><td>0.3696</td></tr><tr><td>CHAT-REC (text-davinci-003)</td><td>0.3240 (+6.93%)</td><td>0.1404 (-3.51%)</td><td>0.3802 (+11.01%)</td></tr><tr><td>CHAT-REC (text-davinci-002)</td><td>0.3031</td><td>0.1240</td><td>0.3629</td></tr></table>",
|
| 1113 |
+
"bbox": [
|
| 1114 |
+
217,
|
| 1115 |
+
172,
|
| 1116 |
+
797,
|
| 1117 |
+
258
|
| 1118 |
+
],
|
| 1119 |
+
"page_idx": 11
|
| 1120 |
+
},
|
| 1121 |
+
{
|
| 1122 |
+
"type": "text",
|
| 1123 |
+
"text": "Rating Prediction As illustrated in the Table3, CHAT-REC outperforms traditional recommender systems in predicting movie ratings. The experimental results demonstrate that LLMs can effectively learn user preferences from user portraits and historical interactions through in-context learning, without any explicit training, and accurately predict user ratings for candidate movies. Since LightGCN is not well-suited for rating prediction tasks, it was excluded from our experimental range. Among the three GPT-3.5 models tested, text-davinci-003 achieved the best result, with an RMSE of 0.785, which is $15.86\\%$ higher than that of Item-KNN, and an MAE of 0.593, which is $19.21\\%$ higher. Text-davinci-002 came in second place. However, the performance of gpt-3.5-turbo was slightly weaker than that of Item-KNN. The experimental results reveal that even without relying on recommender systems, LLMs can achieve better results in predicting user preferences for specific movies. The weaker performance of gpt-3.5-turbo is due to the model's emphasis on the ability of human-computer dialogue and its trade-off of the in-context learning abilities, which is consistent with other research conclusions. Additionally, it also can be concluded that the performance of gpt-3.5-turbo in numerical prediction tasks is weaker than that of text-davinci-003 and text-davinci-002.",
|
| 1124 |
+
"bbox": [
|
| 1125 |
+
212,
|
| 1126 |
+
284,
|
| 1127 |
+
787,
|
| 1128 |
+
556
|
| 1129 |
+
],
|
| 1130 |
+
"page_idx": 11
|
| 1131 |
+
},
|
| 1132 |
+
{
|
| 1133 |
+
"type": "table",
|
| 1134 |
+
"img_path": "images/f813199f2f8e20095b9f39a6531a3bf9c2d0e408a497ec917b0357dbf986b2a8.jpg",
|
| 1135 |
+
"table_caption": [
|
| 1136 |
+
"Table 3: Results of movie rating prediction."
|
| 1137 |
+
],
|
| 1138 |
+
"table_footnote": [],
|
| 1139 |
+
"table_body": "<table><tr><td>Models</td><td>RMSE</td><td>MAE</td></tr><tr><td>MF</td><td>0.988</td><td>0.771</td></tr><tr><td>Item-KNN</td><td>0.933</td><td>0.734</td></tr><tr><td>CHAT-REC (gpt-3.5-turbo)</td><td>0.969</td><td>0.756</td></tr><tr><td>CHAT-REC (text-davinci-003)</td><td>0.785</td><td>0.593</td></tr><tr><td>CHAT-REC (text-davinci-002)</td><td>0.8309</td><td>0.6215</td></tr></table>",
|
| 1140 |
+
"bbox": [
|
| 1141 |
+
289,
|
| 1142 |
+
582,
|
| 1143 |
+
712,
|
| 1144 |
+
667
|
| 1145 |
+
],
|
| 1146 |
+
"page_idx": 11
|
| 1147 |
+
},
|
| 1148 |
+
{
|
| 1149 |
+
"type": "text",
|
| 1150 |
+
"text": "During experiment, we discovered that CHAT-REC's most important ability is to optimize the refined candidate set of the recommender system, meaning to resort the movies that the user may like but were placed further down in the recommender system's candidate set. This requires the application of LLMs' knowledge of movies, understanding of user preferences, and the ability to reason about the matching relationship between the two. To confirm this finding, we conducted separate empirical studies and asked LLMs again, in the same conversation, about movies that appeared in the recommender system's top 5 but did not appear in LLMs' top 5. LLMs' feedback revealed that it is unlikely that the user would like the movie or it is difficult to determine whether the user would like it, with clear reasons given. The inconsistent shows that CHAT-REC's",
|
| 1151 |
+
"bbox": [
|
| 1152 |
+
212,
|
| 1153 |
+
672,
|
| 1154 |
+
787,
|
| 1155 |
+
840
|
| 1156 |
+
],
|
| 1157 |
+
"page_idx": 11
|
| 1158 |
+
},
|
| 1159 |
+
{
|
| 1160 |
+
"type": "page_number",
|
| 1161 |
+
"text": "12",
|
| 1162 |
+
"bbox": [
|
| 1163 |
+
217,
|
| 1164 |
+
114,
|
| 1165 |
+
235,
|
| 1166 |
+
126
|
| 1167 |
+
],
|
| 1168 |
+
"page_idx": 11
|
| 1169 |
+
},
|
| 1170 |
+
{
|
| 1171 |
+
"type": "header",
|
| 1172 |
+
"text": "Gao et al.",
|
| 1173 |
+
"bbox": [
|
| 1174 |
+
271,
|
| 1175 |
+
114,
|
| 1176 |
+
339,
|
| 1177 |
+
126
|
| 1178 |
+
],
|
| 1179 |
+
"page_idx": 11
|
| 1180 |
+
},
|
| 1181 |
+
{
|
| 1182 |
+
"type": "text",
|
| 1183 |
+
"text": "recommendations are entirely based on an understanding of user preferences and movie information.",
|
| 1184 |
+
"bbox": [
|
| 1185 |
+
212,
|
| 1186 |
+
146,
|
| 1187 |
+
782,
|
| 1188 |
+
176
|
| 1189 |
+
],
|
| 1190 |
+
"page_idx": 12
|
| 1191 |
+
},
|
| 1192 |
+
{
|
| 1193 |
+
"type": "text",
|
| 1194 |
+
"text": "4.4 Ablation Study",
|
| 1195 |
+
"text_level": 1,
|
| 1196 |
+
"bbox": [
|
| 1197 |
+
214,
|
| 1198 |
+
200,
|
| 1199 |
+
390,
|
| 1200 |
+
217
|
| 1201 |
+
],
|
| 1202 |
+
"page_idx": 12
|
| 1203 |
+
},
|
| 1204 |
+
{
|
| 1205 |
+
"type": "text",
|
| 1206 |
+
"text": "In this study, we select the text-davinci-003 model, which achieved the best results in both top-k recommendation and rating prediction, to investigate the impact of different prompts and temperatures on the model's performance. The result is shown in Fig. 5.",
|
| 1207 |
+
"bbox": [
|
| 1208 |
+
212,
|
| 1209 |
+
228,
|
| 1210 |
+
787,
|
| 1211 |
+
289
|
| 1212 |
+
],
|
| 1213 |
+
"page_idx": 12
|
| 1214 |
+
},
|
| 1215 |
+
{
|
| 1216 |
+
"type": "image",
|
| 1217 |
+
"img_path": "images/e6f37b3a96a466d8f6e4ee1927a6acc701d3779720387e3a29e76a1a9d96602c.jpg",
|
| 1218 |
+
"image_caption": [
|
| 1219 |
+
"Fig. 5: Performance on different prompt and temperature."
|
| 1220 |
+
],
|
| 1221 |
+
"image_footnote": [],
|
| 1222 |
+
"bbox": [
|
| 1223 |
+
163,
|
| 1224 |
+
328,
|
| 1225 |
+
370,
|
| 1226 |
+
433
|
| 1227 |
+
],
|
| 1228 |
+
"page_idx": 12
|
| 1229 |
+
},
|
| 1230 |
+
{
|
| 1231 |
+
"type": "image",
|
| 1232 |
+
"img_path": "images/50e5ba3c88df0f4357b4c782f2d5e73be0291c0cbe8db625dba659d199b1018c.jpg",
|
| 1233 |
+
"image_caption": [],
|
| 1234 |
+
"image_footnote": [],
|
| 1235 |
+
"bbox": [
|
| 1236 |
+
385,
|
| 1237 |
+
327,
|
| 1238 |
+
594,
|
| 1239 |
+
431
|
| 1240 |
+
],
|
| 1241 |
+
"page_idx": 12
|
| 1242 |
+
},
|
| 1243 |
+
{
|
| 1244 |
+
"type": "image",
|
| 1245 |
+
"img_path": "images/33077c20540f466a91df89ac22545cc1c9dbf8044918cdce15172e0a7ace5acc.jpg",
|
| 1246 |
+
"image_caption": [],
|
| 1247 |
+
"image_footnote": [],
|
| 1248 |
+
"bbox": [
|
| 1249 |
+
616,
|
| 1250 |
+
325,
|
| 1251 |
+
821,
|
| 1252 |
+
431
|
| 1253 |
+
],
|
| 1254 |
+
"page_idx": 12
|
| 1255 |
+
},
|
| 1256 |
+
{
|
| 1257 |
+
"type": "text",
|
| 1258 |
+
"text": "In the context of this study, \"w/random\" refers to the random shuffling of the 20 candidate sets generated by the recommender system before being provided to LLM as the candidate set prompt input, while \"w/top1\" indicates that the top 1 recommendation is not given as the initial background knowledge when constructing the prompt, but instead directly asks LLM to select 5 movies from the candidate set. The temperature parameter affects the answer generated by LLM, with lower temperatures indicating more certain answers, and higher for more random answers. All experiments, except for the experiment with a temperature of 0, used the average of 5 tests.",
|
| 1259 |
+
"bbox": [
|
| 1260 |
+
212,
|
| 1261 |
+
491,
|
| 1262 |
+
787,
|
| 1263 |
+
627
|
| 1264 |
+
],
|
| 1265 |
+
"page_idx": 12
|
| 1266 |
+
},
|
| 1267 |
+
{
|
| 1268 |
+
"type": "text",
|
| 1269 |
+
"text": "The results demonstrate that the effect slightly decreased after the order of the candidate set was shuffled. For example, when the temperature is 0.9, the NDCG of text-davinci-003 decreased from 0.3802 to 0.3653, representing a decrease of $3.92\\%$ . The effect of CHAT-REC decreased significantly when the recommender system's top 1 was missing in the prompt. For instance, when the temperature is 0.9, the NDCG of text-davinci-003 decreased from 0.3802 to 0.3055, which is a decrease of $19.65\\%$ . This trend was observed at different temperatures, and the experiment showed that the best results could be achieved when the temperature was 0.9.",
|
| 1270 |
+
"bbox": [
|
| 1271 |
+
212,
|
| 1272 |
+
628,
|
| 1273 |
+
787,
|
| 1274 |
+
763
|
| 1275 |
+
],
|
| 1276 |
+
"page_idx": 12
|
| 1277 |
+
},
|
| 1278 |
+
{
|
| 1279 |
+
"type": "text",
|
| 1280 |
+
"text": "It is worth noting that the existence of the recommender system was not explicitly mentioned in CHAT-REC's prompt, and the function of the recommender system was merely to provide a candidate set. However, the design of the candidate set can significantly impact CHAT-REC's performance. Our experiment revealed that CHAT-REC's prompt design can effectively inject the recommender",
|
| 1281 |
+
"bbox": [
|
| 1282 |
+
212,
|
| 1283 |
+
765,
|
| 1284 |
+
787,
|
| 1285 |
+
839
|
| 1286 |
+
],
|
| 1287 |
+
"page_idx": 12
|
| 1288 |
+
},
|
| 1289 |
+
{
|
| 1290 |
+
"type": "header",
|
| 1291 |
+
"text": "Chat-REC: LLMs-Augmented Recommender System",
|
| 1292 |
+
"bbox": [
|
| 1293 |
+
377,
|
| 1294 |
+
114,
|
| 1295 |
+
730,
|
| 1296 |
+
128
|
| 1297 |
+
],
|
| 1298 |
+
"page_idx": 12
|
| 1299 |
+
},
|
| 1300 |
+
{
|
| 1301 |
+
"type": "page_number",
|
| 1302 |
+
"text": "13",
|
| 1303 |
+
"bbox": [
|
| 1304 |
+
767,
|
| 1305 |
+
114,
|
| 1306 |
+
785,
|
| 1307 |
+
126
|
| 1308 |
+
],
|
| 1309 |
+
"page_idx": 12
|
| 1310 |
+
},
|
| 1311 |
+
{
|
| 1312 |
+
"type": "text",
|
| 1313 |
+
"text": "system's knowledge implicitly into LLMs. This implicit knowledge is reflected in the ranking of movies in the candidate set, and the use of Top1 as the background can further strengthen this information. This implicit knowledge can be captured by LLMs in in-context learning and can enhance the recommendation performance.",
|
| 1314 |
+
"bbox": [
|
| 1315 |
+
212,
|
| 1316 |
+
146,
|
| 1317 |
+
787,
|
| 1318 |
+
223
|
| 1319 |
+
],
|
| 1320 |
+
"page_idx": 13
|
| 1321 |
+
},
|
| 1322 |
+
{
|
| 1323 |
+
"type": "text",
|
| 1324 |
+
"text": "5 Conclusion",
|
| 1325 |
+
"text_level": 1,
|
| 1326 |
+
"bbox": [
|
| 1327 |
+
215,
|
| 1328 |
+
247,
|
| 1329 |
+
359,
|
| 1330 |
+
263
|
| 1331 |
+
],
|
| 1332 |
+
"page_idx": 13
|
| 1333 |
+
},
|
| 1334 |
+
{
|
| 1335 |
+
"type": "text",
|
| 1336 |
+
"text": "In this paper, we present CHAT-REC which bridges recommender system and LLMs by converting user information and user-item interactions to prompt. We evaluated our approach in the task of top-k recommendation and zero-shot movie rating prediction. In conclusion, LLMs offer significant potential for enhancing recommender systems by improving interactivity explainability and cross-domain recommendation. In addition, prompt plays an important role, and experiments prove that implicitly expressing the knowledge in the recommender system in prompt can effectively improve the recommendation effect.",
|
| 1337 |
+
"bbox": [
|
| 1338 |
+
212,
|
| 1339 |
+
282,
|
| 1340 |
+
787,
|
| 1341 |
+
402
|
| 1342 |
+
],
|
| 1343 |
+
"page_idx": 13
|
| 1344 |
+
},
|
| 1345 |
+
{
|
| 1346 |
+
"type": "text",
|
| 1347 |
+
"text": "References",
|
| 1348 |
+
"text_level": 1,
|
| 1349 |
+
"bbox": [
|
| 1350 |
+
215,
|
| 1351 |
+
428,
|
| 1352 |
+
323,
|
| 1353 |
+
444
|
| 1354 |
+
],
|
| 1355 |
+
"page_idx": 13
|
| 1356 |
+
},
|
| 1357 |
+
{
|
| 1358 |
+
"type": "list",
|
| 1359 |
+
"sub_type": "ref_text",
|
| 1360 |
+
"list_items": [
|
| 1361 |
+
"1. Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J.D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al.: Language models are few-shot learners. Advances in neural information processing systems 33, 1877-1901 (2020)",
|
| 1362 |
+
"2. Chen, M., Tworek, J., Jun, H., Yuan, Q., Pinto, H.P.d.O., Kaplan, J., Edwards, H., Burda, Y., Joseph, N., Brockman, G., et al.: Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374 (2021)",
|
| 1363 |
+
"3. Chen, X., Chen, H., Xu, H., Zhang, Y., Cao, Y., Qin, Z., Zha, H.: Personalized fashion recommendation with visual explanations based on multimodal attention network: Towards visually explainable recommendation. In: Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval. pp. 765-774 (2019)",
|
| 1364 |
+
"4. Chowdhery, A., Narang, S., Devlin, J., Bosma, M., Mishra, G., Roberts, A., Barham, P., Chung, H.W., Sutton, C., Gehrmann, S., et al.: Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311 (2022)",
|
| 1365 |
+
"5. Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)",
|
| 1366 |
+
"6. Fu, Yao; Peng, H., Khot, T.: How does gpt obtain its ability? tracing emergent abilities of language models to their sources. Yao Fu's Notion (Dec 2022), https://yaofu.notion.site/ How-does-GPT-Obtain-its-Ability-Tracing-Emergent-Abilities-of-Language-Models-to-their-Sources-b?",
|
| 1367 |
+
"7. Geng, S., Liu, S., Fu, Z., Ge, Y., Zhang, Y.: Recommendation as language processing (rlp): A unified pretrain, personalized prompt & predict paradigm (p5). In: Proceedings of the 16th ACM Conference on Recommender Systems. pp. 299-315 (2022)",
|
| 1368 |
+
"8. Khashabi, D., Min, S., Khot, T., Sabharwal, A., Tafjord, O., Clark, P., Hajishirzi, H.: Unifiedqa: Crossing format boundaries with a single qa system (2020)"
|
| 1369 |
+
],
|
| 1370 |
+
"bbox": [
|
| 1371 |
+
222,
|
| 1372 |
+
462,
|
| 1373 |
+
1000,
|
| 1374 |
+
839
|
| 1375 |
+
],
|
| 1376 |
+
"page_idx": 13
|
| 1377 |
+
},
|
| 1378 |
+
{
|
| 1379 |
+
"type": "page_number",
|
| 1380 |
+
"text": "14",
|
| 1381 |
+
"bbox": [
|
| 1382 |
+
217,
|
| 1383 |
+
114,
|
| 1384 |
+
235,
|
| 1385 |
+
126
|
| 1386 |
+
],
|
| 1387 |
+
"page_idx": 13
|
| 1388 |
+
},
|
| 1389 |
+
{
|
| 1390 |
+
"type": "header",
|
| 1391 |
+
"text": "Gao et al.",
|
| 1392 |
+
"bbox": [
|
| 1393 |
+
271,
|
| 1394 |
+
114,
|
| 1395 |
+
339,
|
| 1396 |
+
127
|
| 1397 |
+
],
|
| 1398 |
+
"page_idx": 13
|
| 1399 |
+
},
|
| 1400 |
+
{
|
| 1401 |
+
"type": "list",
|
| 1402 |
+
"sub_type": "ref_text",
|
| 1403 |
+
"list_items": [
|
| 1404 |
+
"9. LeCun, Y.: A path towards autonomous machine intelligence version 0.9.2, 2022-06-27. Open Review 62 (2022)",
|
| 1405 |
+
"10. Li, L., Zhang, Y., Chen, L.: Generate neural template explanations for recommendation. In: Proceedings of the 29th ACM International Conference on Information & Knowledge Management. pp. 755-764 (2020)",
|
| 1406 |
+
"11. Li, L., Zhang, Y., Chen, L.: Personalized transformer for explainable recommendation. arXiv preprint arXiv:2105.11601 (2021)",
|
| 1407 |
+
"12. Liu, P., Zhang, L., Gulla, J.A.: Pre-train, prompt and recommendation: A comprehensive survey of language modelling paradigm adaptations in recommender systems. arXiv preprint arXiv:2302.03735 (2023)",
|
| 1408 |
+
"13. Parisi, A., Zhao, Y., Fiedel, N.: Talm: Tool augmented language models (2022)",
|
| 1409 |
+
"14. Petroni, F., Rocktäschel, T., Lewis, P., Bakhtin, A., Wu, Y., Miller, A.H., Riedel, S.: Language models as knowledge bases? arXiv preprint arXiv:1909.01066 (2019)",
|
| 1410 |
+
"15. Schick, T., Dwivedi-Yu, J., Dessi, R., Raileanu, R., Lomeli, M., Zettlemoyer, L., Cancedda, N., Scialom, T.: Toolformer: Language models can teach themselves to use tools (2023)",
|
| 1411 |
+
"16. Shi, S., Zhang, M., Yu, X., Zhang, Y., Hao, B., Liu, Y., Ma, S.: Adaptive feature sampling for recommendation with missing content feature values. In: Proceedings of the 28th ACM International Conference on Information and Knowledge Management. pp. 1451-1460 (2019)",
|
| 1412 |
+
"17. Sun, C., Liu, H., Liu, M., Ren, Z., Gan, T., Nie, L.: Lara: Attribute-to-feature adversarial learning for new-item recommendation. In: Proceedings of the 13th international conference on web search and data mining. pp. 582-590 (2020)",
|
| 1413 |
+
"18. Touvron, H., Lavril, T., Izacard, G., Martinet, X., Lachaux, M.A., Lacroix, T., Rozière, B., Goyal, N., Hambro, E., Azhar, F., et al.: Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)",
|
| 1414 |
+
"19. Wei, J., Bosma, M., Zhao, V., Guu, K., Yu, A.W., Lester, B., Du, N., Dai, A.M., Le, Q.V.: Finetuned language models are zero-shot learners. ArXiv abs/2109.01652 (2021)",
|
| 1415 |
+
"20. Wei, J., Tay, Y., Bommasani, R., Raffel, C., Zoph, B., Borgeaud, S., Yogatama, D., Bosma, M., Zhou, D., Metzler, D., hsin Chi, E.H., Hashimoto, T., Vinyals, O., Liang, P., Dean, J., Fedus, W.: Emergent abilities of large language models. ArXiv abs/2206.07682 (2022)",
|
| 1416 |
+
"21. Wei, J., Wang, X., Schuurmans, D., Bosma, M., hsin Chi, E.H., Le, Q., Zhou, D.: Chain of thought prompting elicits reasoning in large language models. ArXiv abs/2201.11903 (2022)",
|
| 1417 |
+
"22. Xu, Y., Zhu, C., Xu, R., Liu, Y., Zeng, M., Huang, X.: Fusing context into knowledge graph for commonsense question answering (2021)",
|
| 1418 |
+
"23. Yu, W., Iter, D., Wang, S., Xu, Y., Ju, M., Sanyal, S., Zhu, C., Zeng, M., Jiang, M.: Generate rather than retrieve: Large language models are strong context generators (2023)",
|
| 1419 |
+
"24. Yuan, F., Zhang, G., Karatzoglou, A., Jose, J., Kong, B., Li, Y.: One person, one model, one world: Learning continual user representation without forgetting. In: Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval. pp. 696-705 (2021)",
|
| 1420 |
+
"25. Zhang, Y., Ding, H., Shui, Z., Ma, Y., Zou, J., Deoras, A., Wang, H.: Language models as recommender systems: Evaluations and limitations (2021)",
|
| 1421 |
+
"26. Zhu, F., Wang, Y., Chen, C., Zhou, J., Li, L., Liu, G.: Cross-domain recommendation: challenges, progress, and prospects. arXiv preprint arXiv:2103.01696 (2021)"
|
| 1422 |
+
],
|
| 1423 |
+
"bbox": [
|
| 1424 |
+
217,
|
| 1425 |
+
147,
|
| 1426 |
+
784,
|
| 1427 |
+
839
|
| 1428 |
+
],
|
| 1429 |
+
"page_idx": 14
|
| 1430 |
+
},
|
| 1431 |
+
{
|
| 1432 |
+
"type": "header",
|
| 1433 |
+
"text": "Chat-REC: LLMs-Augmented Recommender System",
|
| 1434 |
+
"bbox": [
|
| 1435 |
+
377,
|
| 1436 |
+
114,
|
| 1437 |
+
730,
|
| 1438 |
+
128
|
| 1439 |
+
],
|
| 1440 |
+
"page_idx": 14
|
| 1441 |
+
},
|
| 1442 |
+
{
|
| 1443 |
+
"type": "page_number",
|
| 1444 |
+
"text": "15",
|
| 1445 |
+
"bbox": [
|
| 1446 |
+
767,
|
| 1447 |
+
116,
|
| 1448 |
+
784,
|
| 1449 |
+
126
|
| 1450 |
+
],
|
| 1451 |
+
"page_idx": 14
|
| 1452 |
+
},
|
| 1453 |
+
{
|
| 1454 |
+
"type": "text",
|
| 1455 |
+
"text": "A Implementation Details",
|
| 1456 |
+
"text_level": 1,
|
| 1457 |
+
"bbox": [
|
| 1458 |
+
215,
|
| 1459 |
+
143,
|
| 1460 |
+
485,
|
| 1461 |
+
162
|
| 1462 |
+
],
|
| 1463 |
+
"page_idx": 15
|
| 1464 |
+
},
|
| 1465 |
+
{
|
| 1466 |
+
"type": "text",
|
| 1467 |
+
"text": "A.1 Prompts",
|
| 1468 |
+
"text_level": 1,
|
| 1469 |
+
"bbox": [
|
| 1470 |
+
215,
|
| 1471 |
+
172,
|
| 1472 |
+
339,
|
| 1473 |
+
188
|
| 1474 |
+
],
|
| 1475 |
+
"page_idx": 15
|
| 1476 |
+
},
|
| 1477 |
+
{
|
| 1478 |
+
"type": "text",
|
| 1479 |
+
"text": "Below, we list the prompts used in top-k recommendation and zero-shot movie rating tasks.",
|
| 1480 |
+
"bbox": [
|
| 1481 |
+
215,
|
| 1482 |
+
194,
|
| 1483 |
+
785,
|
| 1484 |
+
224
|
| 1485 |
+
],
|
| 1486 |
+
"page_idx": 15
|
| 1487 |
+
},
|
| 1488 |
+
{
|
| 1489 |
+
"type": "text",
|
| 1490 |
+
"text": "I want you to recommend movie to a user based on some personal information and historical records of film watching.",
|
| 1491 |
+
"bbox": [
|
| 1492 |
+
232,
|
| 1493 |
+
268,
|
| 1494 |
+
735,
|
| 1495 |
+
292
|
| 1496 |
+
],
|
| 1497 |
+
"page_idx": 15
|
| 1498 |
+
},
|
| 1499 |
+
{
|
| 1500 |
+
"type": "text",
|
| 1501 |
+
"text": "user profile:{user profile} (e.g.He is 24 years old, and work as technician.)",
|
| 1502 |
+
"bbox": [
|
| 1503 |
+
232,
|
| 1504 |
+
301,
|
| 1505 |
+
630,
|
| 1506 |
+
315
|
| 1507 |
+
],
|
| 1508 |
+
"page_idx": 15
|
| 1509 |
+
},
|
| 1510 |
+
{
|
| 1511 |
+
"type": "text",
|
| 1512 |
+
"text": "The historical records include the movie name,type and how many points he/she scored out of 5. The higher the score, the more he likes the movie. You are encouraged to learn his movie preferencen from the movies he have watched. Here are some examples:",
|
| 1513 |
+
"bbox": [
|
| 1514 |
+
232,
|
| 1515 |
+
323,
|
| 1516 |
+
759,
|
| 1517 |
+
359
|
| 1518 |
+
],
|
| 1519 |
+
"page_idx": 15
|
| 1520 |
+
},
|
| 1521 |
+
{
|
| 1522 |
+
"type": "text",
|
| 1523 |
+
"text": "{history/movie} (e.g. a Sci-Fi Thriller movie called Net, The (1995), and scored it a 3)",
|
| 1524 |
+
"bbox": [
|
| 1525 |
+
233,
|
| 1526 |
+
368,
|
| 1527 |
+
694,
|
| 1528 |
+
382
|
| 1529 |
+
],
|
| 1530 |
+
"page_idx": 15
|
| 1531 |
+
},
|
| 1532 |
+
{
|
| 1533 |
+
"type": "text",
|
| 1534 |
+
"text": "Here's a list of movies that he is likely to like: {candidate_list}",
|
| 1535 |
+
"bbox": [
|
| 1536 |
+
233,
|
| 1537 |
+
388,
|
| 1538 |
+
565,
|
| 1539 |
+
402
|
| 1540 |
+
],
|
| 1541 |
+
"page_idx": 15
|
| 1542 |
+
},
|
| 1543 |
+
{
|
| 1544 |
+
"type": "text",
|
| 1545 |
+
"text": "Please select top 5 movies in the list that is most likely to be liked. The first film to be selected is {top1/movie}. Please select the remaining 4 movies. Only Output the movie name.",
|
| 1546 |
+
"bbox": [
|
| 1547 |
+
232,
|
| 1548 |
+
412,
|
| 1549 |
+
751,
|
| 1550 |
+
436
|
| 1551 |
+
],
|
| 1552 |
+
"page_idx": 15
|
| 1553 |
+
},
|
| 1554 |
+
{
|
| 1555 |
+
"type": "text",
|
| 1556 |
+
"text": "Fig. 6: Prompt for top-k recommendation task.",
|
| 1557 |
+
"bbox": [
|
| 1558 |
+
330,
|
| 1559 |
+
464,
|
| 1560 |
+
669,
|
| 1561 |
+
479
|
| 1562 |
+
],
|
| 1563 |
+
"page_idx": 15
|
| 1564 |
+
},
|
| 1565 |
+
{
|
| 1566 |
+
"type": "text",
|
| 1567 |
+
"text": "I want you to act as a movie recommender. You task is to predict the user's rating of some movies out of 5 based on his profile and historical records of film watching.Clear scores must be given.",
|
| 1568 |
+
"bbox": [
|
| 1569 |
+
232,
|
| 1570 |
+
521,
|
| 1571 |
+
766,
|
| 1572 |
+
545
|
| 1573 |
+
],
|
| 1574 |
+
"page_idx": 15
|
| 1575 |
+
},
|
| 1576 |
+
{
|
| 1577 |
+
"type": "text",
|
| 1578 |
+
"text": "user profile:{user profile}",
|
| 1579 |
+
"bbox": [
|
| 1580 |
+
233,
|
| 1581 |
+
554,
|
| 1582 |
+
372,
|
| 1583 |
+
568
|
| 1584 |
+
],
|
| 1585 |
+
"page_idx": 15
|
| 1586 |
+
},
|
| 1587 |
+
{
|
| 1588 |
+
"type": "text",
|
| 1589 |
+
"text": "The historical records include the movie name and how many points he/she scored out of 5. The higher the score, the more he likes the movie. You are encouraged to learn his movie preferences from the movies he have watched.",
|
| 1590 |
+
"bbox": [
|
| 1591 |
+
232,
|
| 1592 |
+
577,
|
| 1593 |
+
761,
|
| 1594 |
+
611
|
| 1595 |
+
],
|
| 1596 |
+
"page_idx": 15
|
| 1597 |
+
},
|
| 1598 |
+
{
|
| 1599 |
+
"type": "text",
|
| 1600 |
+
"text": "{history/movie}",
|
| 1601 |
+
"bbox": [
|
| 1602 |
+
233,
|
| 1603 |
+
622,
|
| 1604 |
+
320,
|
| 1605 |
+
633
|
| 1606 |
+
],
|
| 1607 |
+
"page_idx": 15
|
| 1608 |
+
},
|
| 1609 |
+
{
|
| 1610 |
+
"type": "text",
|
| 1611 |
+
"text": "Here's a list of movies.:You are going to predict his ratings for these movies.The range of the score is 0-5. A definite value must be given. Seperate movie and rating by \"-\". Output should be formatted as :[movie]-[rating]",
|
| 1612 |
+
"bbox": [
|
| 1613 |
+
232,
|
| 1614 |
+
643,
|
| 1615 |
+
736,
|
| 1616 |
+
678
|
| 1617 |
+
],
|
| 1618 |
+
"page_idx": 15
|
| 1619 |
+
},
|
| 1620 |
+
{
|
| 1621 |
+
"type": "text",
|
| 1622 |
+
"text": "movie_list:{movie_list}",
|
| 1623 |
+
"bbox": [
|
| 1624 |
+
233,
|
| 1625 |
+
689,
|
| 1626 |
+
356,
|
| 1627 |
+
700
|
| 1628 |
+
],
|
| 1629 |
+
"page_idx": 15
|
| 1630 |
+
},
|
| 1631 |
+
{
|
| 1632 |
+
"type": "text",
|
| 1633 |
+
"text": "Fig. 7: Prompt for moving rating task.",
|
| 1634 |
+
"bbox": [
|
| 1635 |
+
361,
|
| 1636 |
+
724,
|
| 1637 |
+
638,
|
| 1638 |
+
739
|
| 1639 |
+
],
|
| 1640 |
+
"page_idx": 15
|
| 1641 |
+
},
|
| 1642 |
+
{
|
| 1643 |
+
"type": "text",
|
| 1644 |
+
"text": "A.2 Example Answers",
|
| 1645 |
+
"text_level": 1,
|
| 1646 |
+
"bbox": [
|
| 1647 |
+
215,
|
| 1648 |
+
787,
|
| 1649 |
+
415,
|
| 1650 |
+
803
|
| 1651 |
+
],
|
| 1652 |
+
"page_idx": 15
|
| 1653 |
+
},
|
| 1654 |
+
{
|
| 1655 |
+
"type": "text",
|
| 1656 |
+
"text": "In fact, the LLMs do not always output answers in the format we expect every time, especially at higher temperatures. In table 4, we give some failed cases",
|
| 1657 |
+
"bbox": [
|
| 1658 |
+
214,
|
| 1659 |
+
809,
|
| 1660 |
+
785,
|
| 1661 |
+
839
|
| 1662 |
+
],
|
| 1663 |
+
"page_idx": 15
|
| 1664 |
+
},
|
| 1665 |
+
{
|
| 1666 |
+
"type": "page_number",
|
| 1667 |
+
"text": "16",
|
| 1668 |
+
"bbox": [
|
| 1669 |
+
217,
|
| 1670 |
+
114,
|
| 1671 |
+
236,
|
| 1672 |
+
126
|
| 1673 |
+
],
|
| 1674 |
+
"page_idx": 15
|
| 1675 |
+
},
|
| 1676 |
+
{
|
| 1677 |
+
"type": "header",
|
| 1678 |
+
"text": "Gao et al.",
|
| 1679 |
+
"bbox": [
|
| 1680 |
+
271,
|
| 1681 |
+
114,
|
| 1682 |
+
339,
|
| 1683 |
+
127
|
| 1684 |
+
],
|
| 1685 |
+
"page_idx": 15
|
| 1686 |
+
},
|
| 1687 |
+
{
|
| 1688 |
+
"type": "text",
|
| 1689 |
+
"text": "while invoking LLMs' API to generate answers. During the experiment, output that does not match the format is automatically retried.",
|
| 1690 |
+
"bbox": [
|
| 1691 |
+
215,
|
| 1692 |
+
146,
|
| 1693 |
+
784,
|
| 1694 |
+
176
|
| 1695 |
+
],
|
| 1696 |
+
"page_idx": 16
|
| 1697 |
+
},
|
| 1698 |
+
{
|
| 1699 |
+
"type": "table",
|
| 1700 |
+
"img_path": "images/852e96518ea0768aec8571f9cd79fc992f3c9a91c4b8333b62f435440d300782.jpg",
|
| 1701 |
+
"table_caption": [
|
| 1702 |
+
"Table 4: Some cases and explanations that failed to generate canonical answers"
|
| 1703 |
+
],
|
| 1704 |
+
"table_footnote": [],
|
| 1705 |
+
"table_body": "<table><tr><td>Example</td><td>Explanation</td><td>Correct</td></tr><tr><td>... \nThe current list is: 1.Toy Story (1995) \n2.Fargo (1996) 3.Die Hard (1988) 4.Fish \nCalled Wanda, A (1988) 5. Wrong \nTrousers, The (1993)</td><td>The output conforms to the \nformatting requirements</td><td>✓</td></tr><tr><td>... \nThe current list is: 1.The Shawshank \nRedemption (1994) (It should be \n“Shawshank \nRedemption, The(1994)”) 2.A Fish Called \nWanda (1988) (It should be \n“Fish Called \nWanda, A (1988)”) ...</td><td>Failure to output film names \nin accordance with film in- \ndustry norms. such as “A” \nand “The” are not in the \nright place.</td><td>X</td></tr><tr><td>... \nThe current list is: 1.Toy Story (1995) \n2.Groundhog Day (1993) 3.Star Trek: The \nWrath of Khan (1982) 4.Fargo (1996)</td><td>Sometimes it can’t output a \nsufficient number of movies. \nIn this case, it only output 4 \nmovies while sometimes may \noutput 19 movies.</td><td>X</td></tr><tr><td>... The current list is: a:Star Wars (1977) \na:Raiders of the Lost Ark (1981) n:Back to \nthe Future (1985) m:Fargo (1996) ...</td><td>Sometimes the id infor- \nmation is lost when LLM \nis asked to output movies \nin the following format \n[id].[name].</td><td>X</td></tr></table>",
|
| 1706 |
+
"bbox": [
|
| 1707 |
+
215,
|
| 1708 |
+
229,
|
| 1709 |
+
789,
|
| 1710 |
+
559
|
| 1711 |
+
],
|
| 1712 |
+
"page_idx": 16
|
| 1713 |
+
},
|
| 1714 |
+
{
|
| 1715 |
+
"type": "header",
|
| 1716 |
+
"text": "Chat-REC: LLMs-Augmented Recommender System",
|
| 1717 |
+
"bbox": [
|
| 1718 |
+
377,
|
| 1719 |
+
114,
|
| 1720 |
+
730,
|
| 1721 |
+
128
|
| 1722 |
+
],
|
| 1723 |
+
"page_idx": 16
|
| 1724 |
+
},
|
| 1725 |
+
{
|
| 1726 |
+
"type": "page_number",
|
| 1727 |
+
"text": "17",
|
| 1728 |
+
"bbox": [
|
| 1729 |
+
767,
|
| 1730 |
+
116,
|
| 1731 |
+
784,
|
| 1732 |
+
126
|
| 1733 |
+
],
|
| 1734 |
+
"page_idx": 16
|
| 1735 |
+
}
|
| 1736 |
+
]
|
2303.14xxx/2303.14524/e5470f39-aaa6-4768-8235-0e7feb89df40_model.json
ADDED
|
@@ -0,0 +1,2192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "aside_text",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.023,
|
| 7 |
+
0.28,
|
| 8 |
+
0.061,
|
| 9 |
+
0.701
|
| 10 |
+
],
|
| 11 |
+
"angle": 270,
|
| 12 |
+
"content": "arXiv:2303.14524v2 [cs.IR] 4 Apr 2023"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "title",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.22,
|
| 18 |
+
0.141,
|
| 19 |
+
0.784,
|
| 20 |
+
0.187
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "Chat-REC: Towards Interactive and Explainable LLMs-Augmented Recommender System"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.23,
|
| 29 |
+
0.212,
|
| 30 |
+
0.773,
|
| 31 |
+
0.244
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "Yunfan Gao\\(^{1}\\), Tao Sheng\\(^{1}\\), Youlin Xiang\\(^{1}\\), Yun Xiong\\(^{1}\\), Haofen Wang\\(^{2}\\), and Jiawei Zhang\\(^{3}\\)"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.231,
|
| 40 |
+
0.254,
|
| 41 |
+
0.772,
|
| 42 |
+
0.283
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "<sup>1</sup> Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University, Shanghai, China"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.434,
|
| 51 |
+
0.285,
|
| 52 |
+
0.569,
|
| 53 |
+
0.298
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "yufan1602@163.com"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.42,
|
| 62 |
+
0.299,
|
| 63 |
+
0.584,
|
| 64 |
+
0.311
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "tsheng16@fudan.edu.cn"
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.401,
|
| 73 |
+
0.313,
|
| 74 |
+
0.603,
|
| 75 |
+
0.324
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "21210240365@m.fudan.edu.cn"
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.435,
|
| 84 |
+
0.327,
|
| 85 |
+
0.569,
|
| 86 |
+
0.338
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "yunx@fudan.edu.cn"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.264,
|
| 95 |
+
0.339,
|
| 96 |
+
0.74,
|
| 97 |
+
0.353
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "2 College of Design and Innovation, Tongji University, Shanghai, China"
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.4,
|
| 106 |
+
0.355,
|
| 107 |
+
0.604,
|
| 108 |
+
0.366
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "carter.whfcarter@gmail.com"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.227,
|
| 117 |
+
0.367,
|
| 118 |
+
0.777,
|
| 119 |
+
0.393
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "<sup>3</sup> IFM Lab, Department of Computer Science, University of California, Davis, CA, USA"
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.435,
|
| 128 |
+
0.396,
|
| 129 |
+
0.569,
|
| 130 |
+
0.408
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "jiawei@ifmlab.org"
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.261,
|
| 139 |
+
0.439,
|
| 140 |
+
0.747,
|
| 141 |
+
0.73
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "Abstract. Large language models (LLMs) have demonstrated their significant potential to be applied for addressing various application tasks. However, traditional recommender systems continue to face great challenges such as poor interactivity and explainability, which actually also hinder their broad deployment in real-world systems. To address these limitations, this paper proposes a novel paradigm called CHAT-REC (ChatGPT Augmented Recommender System) that innovatively augments LLMs for building conversational recommender systems by converting user profiles and historical interactions into prompts. CHAT-REC is demonstrated to be effective in learning user preferences and establishing connections between users and products through in-context learning, which also makes the recommendation process more interactive and explainable. What's more, within the CHAT-REC framework, user's preferences can transfer to different products for cross-domain recommendations, and prompt-based injection of information into LLMs can also handle the cold-start scenarios with new items. In our experiments, CHAT-REC effectively improve the results of top-k recommendations and performs better in zero-shot rating prediction task. CHAT-REC offers a novel approach to improving recommender systems and presents new practical scenarios for the implementation of AIGC (AI generated content) in recommender system studies."
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"bbox": [
|
| 149 |
+
0.262,
|
| 150 |
+
0.743,
|
| 151 |
+
0.702,
|
| 152 |
+
0.758
|
| 153 |
+
],
|
| 154 |
+
"angle": 0,
|
| 155 |
+
"content": "Keywords: LLMs \\(\\cdot\\) Recommender System \\(\\cdot\\) Prompt Engineering"
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "title",
|
| 159 |
+
"bbox": [
|
| 160 |
+
0.216,
|
| 161 |
+
0.781,
|
| 162 |
+
0.377,
|
| 163 |
+
0.797
|
| 164 |
+
],
|
| 165 |
+
"angle": 0,
|
| 166 |
+
"content": "1 Introduction"
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"type": "text",
|
| 170 |
+
"bbox": [
|
| 171 |
+
0.214,
|
| 172 |
+
0.81,
|
| 173 |
+
0.787,
|
| 174 |
+
0.84
|
| 175 |
+
],
|
| 176 |
+
"angle": 0,
|
| 177 |
+
"content": "With the scaling of model and corpus size, LLMs (Large Language Models) have shown remarkable capabilities, such as complex inference, knowledge infer-"
|
| 178 |
+
}
|
| 179 |
+
],
|
| 180 |
+
[
|
| 181 |
+
{
|
| 182 |
+
"type": "page_number",
|
| 183 |
+
"bbox": [
|
| 184 |
+
0.218,
|
| 185 |
+
0.116,
|
| 186 |
+
0.23,
|
| 187 |
+
0.127
|
| 188 |
+
],
|
| 189 |
+
"angle": 0,
|
| 190 |
+
"content": "2"
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "header",
|
| 194 |
+
"bbox": [
|
| 195 |
+
0.272,
|
| 196 |
+
0.115,
|
| 197 |
+
0.34,
|
| 198 |
+
0.128
|
| 199 |
+
],
|
| 200 |
+
"angle": 0,
|
| 201 |
+
"content": "Gao et al."
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "text",
|
| 205 |
+
"bbox": [
|
| 206 |
+
0.214,
|
| 207 |
+
0.147,
|
| 208 |
+
0.788,
|
| 209 |
+
0.314
|
| 210 |
+
],
|
| 211 |
+
"angle": 0,
|
| 212 |
+
"content": "ence, and external robustness [4,6]. These capabilities, referred to as Emergent Abilities, only become apparent after reaching a specific threshold of model parameters [20]. The emergence of LLMs has brought about a paradigm shift in research. Previously, applying models to downstream tasks typically involved adjusting model parameters through backpropagation. However, the latest development of LLMs [18] has enabled both researchers and practitioners to facilitate learning during the forward process by constructing prompts, namely In-Context Learning (ICL) [1]. In addition, the adoption of techniques such as Chain-of-Thought [21] and Instruct Learning [19] has further harnessed the reasoning capabilities and task generalization abilities of LLMs, thereby promoting their application across various domains."
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"type": "text",
|
| 216 |
+
"bbox": [
|
| 217 |
+
0.214,
|
| 218 |
+
0.317,
|
| 219 |
+
0.788,
|
| 220 |
+
0.606
|
| 221 |
+
],
|
| 222 |
+
"angle": 0,
|
| 223 |
+
"content": "In the era of big data, manual information searching has become infeasible and recommender systems have been widely deployed for automatically inferring people's preference and providing high-quality recommendation services. However, due to the great limitations and drawbacks in both model design and data distribution biases, most existing recommender systems still have great performance in their real-world deployment. One of the primary constraints is their poor interactivity, explainability, and lack of feedback mechanisms. Another limitation is the cold start problem, which makes it difficult to provide accurate recommendations for both new items and new users. Lastly, current recommender systems face challenges in making recommendations across multiple domains [26]. In many recommendation tasks, in order to obtain the required background or general knowledge, an external library or knowledge graph needs to be set up for retrieval [22] or multi-task learning needs to be trained on augmented data [8]. LLMs offer a promising solution to these challenges. They can generate more natural and explainable recommendations, solve the cold start problem, and make cross-domain recommendations. Additionally, LLMs have stronger interactivity and feedback mechanisms, which enhance the overall user experience. By leveraging internal knowledge, LLMs can improve the performance of recommender systems without relying on external retrievers [23]."
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "text",
|
| 227 |
+
"bbox": [
|
| 228 |
+
0.214,
|
| 229 |
+
0.609,
|
| 230 |
+
0.788,
|
| 231 |
+
0.7
|
| 232 |
+
],
|
| 233 |
+
"angle": 0,
|
| 234 |
+
"content": "Applying LLMs for addressing the recommendation tasks has received several preliminary research experimental trials already [12,7,25]. Recommender system tasks are formulated as prompt-based natural language tasks, where user-item information and corresponding features are integrated with personalized prompt templates as model inputs. However, in the current research, LLMs are still involved in training as part of the model."
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "text",
|
| 238 |
+
"bbox": [
|
| 239 |
+
0.214,
|
| 240 |
+
0.705,
|
| 241 |
+
0.788,
|
| 242 |
+
0.842
|
| 243 |
+
],
|
| 244 |
+
"angle": 0,
|
| 245 |
+
"content": "In this paper, we introduce a novel approach to learning conversational recommender systems augmented by LLMs, which possess both interactive and explainable capabilities. We present a paradigm called CHAT-REC (ChatGPT Augmented Recommender System) that does not require training and instead relies solely on in-context learning, resulting in more efficient and effective outcomes. With LLM-enhanced recommender system, it is beneficial to learn users' preferences during the conversation. After each step of the conversation, the user's preferences can be further drilled down to update the candidate recommendation results. In addition, users' preferences between products are linked,"
|
| 246 |
+
}
|
| 247 |
+
],
|
| 248 |
+
[
|
| 249 |
+
{
|
| 250 |
+
"type": "header",
|
| 251 |
+
"bbox": [
|
| 252 |
+
0.379,
|
| 253 |
+
0.115,
|
| 254 |
+
0.732,
|
| 255 |
+
0.131
|
| 256 |
+
],
|
| 257 |
+
"angle": 0,
|
| 258 |
+
"content": "Chat-REC: LLMs-Augmented Recommender System"
|
| 259 |
+
},
|
| 260 |
+
{
|
| 261 |
+
"type": "page_number",
|
| 262 |
+
"bbox": [
|
| 263 |
+
0.775,
|
| 264 |
+
0.117,
|
| 265 |
+
0.785,
|
| 266 |
+
0.127
|
| 267 |
+
],
|
| 268 |
+
"angle": 0,
|
| 269 |
+
"content": "3"
|
| 270 |
+
},
|
| 271 |
+
{
|
| 272 |
+
"type": "text",
|
| 273 |
+
"bbox": [
|
| 274 |
+
0.214,
|
| 275 |
+
0.147,
|
| 276 |
+
0.788,
|
| 277 |
+
0.223
|
| 278 |
+
],
|
| 279 |
+
"angle": 0,
|
| 280 |
+
"content": "allowing for better cross-domain product recommendations. We conducted recommendation and rating tests on real-world datasets and experimental results show that Chat-REC achieves significant improvements. Chat-REC sheds light on a promising technical route for the application of conversation AI such as ChatGPT in multiple recommendation scenarios."
|
| 281 |
+
},
|
| 282 |
+
{
|
| 283 |
+
"type": "text",
|
| 284 |
+
"bbox": [
|
| 285 |
+
0.239,
|
| 286 |
+
0.223,
|
| 287 |
+
0.569,
|
| 288 |
+
0.238
|
| 289 |
+
],
|
| 290 |
+
"angle": 0,
|
| 291 |
+
"content": "Our contributions are summarized as follows:"
|
| 292 |
+
},
|
| 293 |
+
{
|
| 294 |
+
"type": "text",
|
| 295 |
+
"bbox": [
|
| 296 |
+
0.226,
|
| 297 |
+
0.248,
|
| 298 |
+
0.784,
|
| 299 |
+
0.291
|
| 300 |
+
],
|
| 301 |
+
"angle": 0,
|
| 302 |
+
"content": "- We introduce a novel and effective paradigm called CHAT-REC, which combines traditional recommender systems with LLMs through prompts, leveraging LLMs' ability to learn from context."
|
| 303 |
+
},
|
| 304 |
+
{
|
| 305 |
+
"type": "text",
|
| 306 |
+
"bbox": [
|
| 307 |
+
0.225,
|
| 308 |
+
0.294,
|
| 309 |
+
0.785,
|
| 310 |
+
0.322
|
| 311 |
+
],
|
| 312 |
+
"angle": 0,
|
| 313 |
+
"content": "- CHAT-REC employs LLMs as a recommender system interface, enabling multi-round recommendations, enhancing interactivity and explainability."
|
| 314 |
+
},
|
| 315 |
+
{
|
| 316 |
+
"type": "text",
|
| 317 |
+
"bbox": [
|
| 318 |
+
0.225,
|
| 319 |
+
0.324,
|
| 320 |
+
0.784,
|
| 321 |
+
0.366
|
| 322 |
+
],
|
| 323 |
+
"angle": 0,
|
| 324 |
+
"content": "- We evaluate our method on real-world datasets for top-k recommendation and rating prediction tasks and experiments demonstrate the effectiveness of CHAT-REC."
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"type": "list",
|
| 328 |
+
"bbox": [
|
| 329 |
+
0.225,
|
| 330 |
+
0.248,
|
| 331 |
+
0.785,
|
| 332 |
+
0.366
|
| 333 |
+
],
|
| 334 |
+
"angle": 0,
|
| 335 |
+
"content": null
|
| 336 |
+
},
|
| 337 |
+
{
|
| 338 |
+
"type": "title",
|
| 339 |
+
"bbox": [
|
| 340 |
+
0.216,
|
| 341 |
+
0.393,
|
| 342 |
+
0.388,
|
| 343 |
+
0.409
|
| 344 |
+
],
|
| 345 |
+
"angle": 0,
|
| 346 |
+
"content": "2 Related Work"
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"type": "title",
|
| 350 |
+
"bbox": [
|
| 351 |
+
0.216,
|
| 352 |
+
0.426,
|
| 353 |
+
0.506,
|
| 354 |
+
0.441
|
| 355 |
+
],
|
| 356 |
+
"angle": 0,
|
| 357 |
+
"content": "2.1 Augmented Language Models"
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"type": "text",
|
| 361 |
+
"bbox": [
|
| 362 |
+
0.214,
|
| 363 |
+
0.451,
|
| 364 |
+
0.788,
|
| 365 |
+
0.693
|
| 366 |
+
],
|
| 367 |
+
"angle": 0,
|
| 368 |
+
"content": "Augmented Language Models (ALMs) are a new research direction that aims to overcome the limitations of traditional Language Models (LMs) [5,1,4] by equipping them with reasoning skills and the ability to use external tools, which has served millions of users, such as the coding assistant Copilot [2], or more recently ChatGPT based on GPT3.5 and \\(\\mathrm{GPT4^4}\\). Reasoning is defined as breaking down complex tasks into simpler subtasks that the LM can solve more easily by itself or with the help of tools [9,15,13], while tools are external modules that the LM can call to augment its context. ALMs can use these augmentations separately or in combination to expand their context processing ability and outperform most regular LMs on several benchmarks. ALMs can learn to reason, use tools, and even act, while still performing standard natural language tasks. This new research direction has the potential to address common limitations of traditional LMs such as interpretability, consistency, and scalability issues. By jointly discussing reasoning and tools, and tools and actions, ALMs can solve a broad range of complex tasks without heuristics, thus offering better generalization capabilities."
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"type": "title",
|
| 372 |
+
"bbox": [
|
| 373 |
+
0.216,
|
| 374 |
+
0.715,
|
| 375 |
+
0.48,
|
| 376 |
+
0.73
|
| 377 |
+
],
|
| 378 |
+
"angle": 0,
|
| 379 |
+
"content": "2.2 NLP for Recommendation"
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"type": "text",
|
| 383 |
+
"bbox": [
|
| 384 |
+
0.214,
|
| 385 |
+
0.741,
|
| 386 |
+
0.787,
|
| 387 |
+
0.817
|
| 388 |
+
],
|
| 389 |
+
"angle": 0,
|
| 390 |
+
"content": "The field of recommender systems has had a long-standing relationship with natural language processing (NLP) techniques, especially when pre-trained language models (PLMs) comes out, which improve the performance of recommender systems and explainability [3,10,11]. PLMs are language models that have learned universal representations on large corpora in a self-supervised manner, and the"
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"type": "page_footnote",
|
| 394 |
+
"bbox": [
|
| 395 |
+
0.218,
|
| 396 |
+
0.825,
|
| 397 |
+
0.465,
|
| 398 |
+
0.841
|
| 399 |
+
],
|
| 400 |
+
"angle": 0,
|
| 401 |
+
"content": "4 https://openai.com/blog/chatgpt/"
|
| 402 |
+
}
|
| 403 |
+
],
|
| 404 |
+
[
|
| 405 |
+
{
|
| 406 |
+
"type": "page_number",
|
| 407 |
+
"bbox": [
|
| 408 |
+
0.218,
|
| 409 |
+
0.116,
|
| 410 |
+
0.23,
|
| 411 |
+
0.127
|
| 412 |
+
],
|
| 413 |
+
"angle": 0,
|
| 414 |
+
"content": "4"
|
| 415 |
+
},
|
| 416 |
+
{
|
| 417 |
+
"type": "header",
|
| 418 |
+
"bbox": [
|
| 419 |
+
0.272,
|
| 420 |
+
0.115,
|
| 421 |
+
0.34,
|
| 422 |
+
0.128
|
| 423 |
+
],
|
| 424 |
+
"angle": 0,
|
| 425 |
+
"content": "Gao et al."
|
| 426 |
+
},
|
| 427 |
+
{
|
| 428 |
+
"type": "text",
|
| 429 |
+
"bbox": [
|
| 430 |
+
0.214,
|
| 431 |
+
0.147,
|
| 432 |
+
0.788,
|
| 433 |
+
0.314
|
| 434 |
+
],
|
| 435 |
+
"angle": 0,
|
| 436 |
+
"content": "learned representations can be beneficial to a series of downstream NLP tasks. In the recommendation domain, PLMs can help alleviate the data sparsity issue, which is a major performance bottleneck of current deep recommendation models. By extracting and transferring knowledge from pre-trained models learned by different PLM-related training paradigms, researchers aim to improve recommendation performance from various perspectives, such as generality, sparsity, efficiency, and effectiveness. In this vibrant field, there are open issues and future research directions that need to be explored, including the connection between PLM-based training paradigms and different input data types for recommender systems. Overall, adapting language modelling paradigms for recommendation is seen as a promising direction in both academia and industry."
|
| 437 |
+
},
|
| 438 |
+
{
|
| 439 |
+
"type": "title",
|
| 440 |
+
"bbox": [
|
| 441 |
+
0.215,
|
| 442 |
+
0.334,
|
| 443 |
+
0.496,
|
| 444 |
+
0.348
|
| 445 |
+
],
|
| 446 |
+
"angle": 0,
|
| 447 |
+
"content": "2.3 Cold-start Recommendation"
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"type": "text",
|
| 451 |
+
"bbox": [
|
| 452 |
+
0.214,
|
| 453 |
+
0.357,
|
| 454 |
+
0.789,
|
| 455 |
+
0.584
|
| 456 |
+
],
|
| 457 |
+
"angle": 0,
|
| 458 |
+
"content": "Cold start recommendation is a problem that arises in recommender systems when users or items have no prior interaction records with the system. This means that there is no data available for the system to make personalized recommendations. To address this issue, solutions have been proposed that either learn to model content features [16] or transfer representations from auxiliary domains [24,26]. The former approach focuses on learning about the characteristics of the items or users based on their content, such as text, images, or metadata. The latter approach involves leveraging information from other domains, such as social networks or product descriptions, to infer user preferences. Additionally, there are approaches that aim to quickly adapt to new domains instead of only providing recommendations for cold-start cases. A good generalization ability of recommendation models on startup cases is essential to ensure a better user experience and increased engagement. In our work, we use the reasoning and background knowledge of LLMs to enhance the performance of recommender systems for cold start scenarios."
|
| 459 |
+
},
|
| 460 |
+
{
|
| 461 |
+
"type": "title",
|
| 462 |
+
"bbox": [
|
| 463 |
+
0.215,
|
| 464 |
+
0.605,
|
| 465 |
+
0.331,
|
| 466 |
+
0.619
|
| 467 |
+
],
|
| 468 |
+
"angle": 0,
|
| 469 |
+
"content": "3 Method"
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"type": "title",
|
| 473 |
+
"bbox": [
|
| 474 |
+
0.215,
|
| 475 |
+
0.621,
|
| 476 |
+
0.605,
|
| 477 |
+
0.637
|
| 478 |
+
],
|
| 479 |
+
"angle": 0,
|
| 480 |
+
"content": "3.1 Bridge Recommender Systems and LLMs"
|
| 481 |
+
},
|
| 482 |
+
{
|
| 483 |
+
"type": "text",
|
| 484 |
+
"bbox": [
|
| 485 |
+
0.214,
|
| 486 |
+
0.644,
|
| 487 |
+
0.787,
|
| 488 |
+
0.719
|
| 489 |
+
],
|
| 490 |
+
"angle": 0,
|
| 491 |
+
"content": "Recommender systems are designed to suggest items to users based on their preferences and behavior. Traditionally, these systems have relied on user data such as clickstream and purchase history to make recommendations. However, NLP techniques have proven to be valuable in expanding the scope of recommender systems beyond traditional user data."
|
| 492 |
+
},
|
| 493 |
+
{
|
| 494 |
+
"type": "text",
|
| 495 |
+
"bbox": [
|
| 496 |
+
0.214,
|
| 497 |
+
0.72,
|
| 498 |
+
0.787,
|
| 499 |
+
0.78
|
| 500 |
+
],
|
| 501 |
+
"angle": 0,
|
| 502 |
+
"content": "NLP techniques can be used to analyze user-generated content such as reviews and social media posts to gain insights into user preferences and interests. LLMs can also be used to generate natural language responses to user queries, improving the overall user experience and engagement."
|
| 503 |
+
},
|
| 504 |
+
{
|
| 505 |
+
"type": "text",
|
| 506 |
+
"bbox": [
|
| 507 |
+
0.214,
|
| 508 |
+
0.78,
|
| 509 |
+
0.787,
|
| 510 |
+
0.842
|
| 511 |
+
],
|
| 512 |
+
"angle": 0,
|
| 513 |
+
"content": "To bridge recommender systems and LLMs, we propose an enhanced recommender system module based on ChatGPT, a large language model trained by OpenAI. As the Fig. 1 shows, the module takes as input user-item history interactions, user profile, user query \\( Q_{i} \\), and history of dialogue \\( H_{< i} \\) (if available,"
|
| 514 |
+
}
|
| 515 |
+
],
|
| 516 |
+
[
|
| 517 |
+
{
|
| 518 |
+
"type": "header",
|
| 519 |
+
"bbox": [
|
| 520 |
+
0.379,
|
| 521 |
+
0.115,
|
| 522 |
+
0.732,
|
| 523 |
+
0.129
|
| 524 |
+
],
|
| 525 |
+
"angle": 0,
|
| 526 |
+
"content": "Chat-REC: LLMs-Augmented Recommender System"
|
| 527 |
+
},
|
| 528 |
+
{
|
| 529 |
+
"type": "page_number",
|
| 530 |
+
"bbox": [
|
| 531 |
+
0.775,
|
| 532 |
+
0.117,
|
| 533 |
+
0.785,
|
| 534 |
+
0.127
|
| 535 |
+
],
|
| 536 |
+
"angle": 0,
|
| 537 |
+
"content": "5"
|
| 538 |
+
},
|
| 539 |
+
{
|
| 540 |
+
"type": "image",
|
| 541 |
+
"bbox": [
|
| 542 |
+
0.222,
|
| 543 |
+
0.147,
|
| 544 |
+
0.782,
|
| 545 |
+
0.339
|
| 546 |
+
],
|
| 547 |
+
"angle": 0,
|
| 548 |
+
"content": null
|
| 549 |
+
},
|
| 550 |
+
{
|
| 551 |
+
"type": "image_caption",
|
| 552 |
+
"bbox": [
|
| 553 |
+
0.214,
|
| 554 |
+
0.354,
|
| 555 |
+
0.788,
|
| 556 |
+
0.415
|
| 557 |
+
],
|
| 558 |
+
"angle": 0,
|
| 559 |
+
"content": "Fig.1: Overview of CHAT-REC. The left side shows a dialogue between a user and ChatGPT. The middle side shows the flowchart to how CHAT-REC links traditional recommender systems with conversational AI such as ChatGPT. The right side describes the specific judgment in the process."
|
| 560 |
+
},
|
| 561 |
+
{
|
| 562 |
+
"type": "text",
|
| 563 |
+
"bbox": [
|
| 564 |
+
0.214,
|
| 565 |
+
0.447,
|
| 566 |
+
0.788,
|
| 567 |
+
0.522
|
| 568 |
+
],
|
| 569 |
+
"angle": 0,
|
| 570 |
+
"content": "and the notation \\( < i \\) denotes the dialogue history prior to the current query), and interfaces with any recommender system R. If the task is determined to be a recommendation task, the module uses R to generate a candidate set of items. Otherwise, it directly outputs a response to the user, such as an explanation of a generation task or a request for item details."
|
| 571 |
+
},
|
| 572 |
+
{
|
| 573 |
+
"type": "text",
|
| 574 |
+
"bbox": [
|
| 575 |
+
0.214,
|
| 576 |
+
0.523,
|
| 577 |
+
0.787,
|
| 578 |
+
0.568
|
| 579 |
+
],
|
| 580 |
+
"angle": 0,
|
| 581 |
+
"content": "The prompt constructor module in the enhanced recommender system takes multiple inputs to generate a natural language paragraph that captures the user's query and recommendation information. The inputs are as follows:"
|
| 582 |
+
},
|
| 583 |
+
{
|
| 584 |
+
"type": "text",
|
| 585 |
+
"bbox": [
|
| 586 |
+
0.226,
|
| 587 |
+
0.583,
|
| 588 |
+
0.785,
|
| 589 |
+
0.641
|
| 590 |
+
],
|
| 591 |
+
"angle": 0,
|
| 592 |
+
"content": "- User-item history interactions, which refers to the user's past interactions with items, such as items they have clicked, purchased, or rated. This information is used to understand the user's preferences and to personalize the recommendation."
|
| 593 |
+
},
|
| 594 |
+
{
|
| 595 |
+
"type": "text",
|
| 596 |
+
"bbox": [
|
| 597 |
+
0.226,
|
| 598 |
+
0.644,
|
| 599 |
+
0.785,
|
| 600 |
+
0.688
|
| 601 |
+
],
|
| 602 |
+
"angle": 0,
|
| 603 |
+
"content": "- User profile, which contains demographic and preference information about the user. This may include age, gender, location, and interests. The user profile helps the system understand the user's characteristics and preferences."
|
| 604 |
+
},
|
| 605 |
+
{
|
| 606 |
+
"type": "text",
|
| 607 |
+
"bbox": [
|
| 608 |
+
0.226,
|
| 609 |
+
0.69,
|
| 610 |
+
0.785,
|
| 611 |
+
0.735
|
| 612 |
+
],
|
| 613 |
+
"angle": 0,
|
| 614 |
+
"content": "- User query \\( Q_{i} \\), which is the user's specific request for information or recommendation. This may include a specific item or genre they are interested in, or a more general request for recommendations in a particular category."
|
| 615 |
+
},
|
| 616 |
+
{
|
| 617 |
+
"type": "text",
|
| 618 |
+
"bbox": [
|
| 619 |
+
0.226,
|
| 620 |
+
0.736,
|
| 621 |
+
0.785,
|
| 622 |
+
0.781
|
| 623 |
+
],
|
| 624 |
+
"angle": 0,
|
| 625 |
+
"content": "- History of dialogue \\( H_{<i} \\), which contains the previous conversation between the user and the system. This information is used to understand the context of the user's query and to provide a more personalized and relevant response."
|
| 626 |
+
},
|
| 627 |
+
{
|
| 628 |
+
"type": "list",
|
| 629 |
+
"bbox": [
|
| 630 |
+
0.226,
|
| 631 |
+
0.583,
|
| 632 |
+
0.785,
|
| 633 |
+
0.781
|
| 634 |
+
],
|
| 635 |
+
"angle": 0,
|
| 636 |
+
"content": null
|
| 637 |
+
},
|
| 638 |
+
{
|
| 639 |
+
"type": "text",
|
| 640 |
+
"bbox": [
|
| 641 |
+
0.214,
|
| 642 |
+
0.795,
|
| 643 |
+
0.787,
|
| 644 |
+
0.84
|
| 645 |
+
],
|
| 646 |
+
"angle": 0,
|
| 647 |
+
"content": "As shown in Fig. 2, the CHAT-REC framework proposed in this paper empower recommender systems with the conversational interface, which makes the interactive and explainable recommendation possible. Formally, based on the"
|
| 648 |
+
}
|
| 649 |
+
],
|
| 650 |
+
[
|
| 651 |
+
{
|
| 652 |
+
"type": "page_number",
|
| 653 |
+
"bbox": [
|
| 654 |
+
0.218,
|
| 655 |
+
0.116,
|
| 656 |
+
0.23,
|
| 657 |
+
0.127
|
| 658 |
+
],
|
| 659 |
+
"angle": 0,
|
| 660 |
+
"content": "6"
|
| 661 |
+
},
|
| 662 |
+
{
|
| 663 |
+
"type": "header",
|
| 664 |
+
"bbox": [
|
| 665 |
+
0.272,
|
| 666 |
+
0.115,
|
| 667 |
+
0.341,
|
| 668 |
+
0.128
|
| 669 |
+
],
|
| 670 |
+
"angle": 0,
|
| 671 |
+
"content": "Gao et al."
|
| 672 |
+
},
|
| 673 |
+
{
|
| 674 |
+
"type": "text",
|
| 675 |
+
"bbox": [
|
| 676 |
+
0.214,
|
| 677 |
+
0.147,
|
| 678 |
+
0.788,
|
| 679 |
+
0.253
|
| 680 |
+
],
|
| 681 |
+
"angle": 0,
|
| 682 |
+
"content": "aforementioned inputs, the prompt constructor module generates a natural language paragraph that summarizes the user's query and recommendation information, and provides a more personalized and relevant response to the user's request. The intermediate answer generated by the recommender system is then used to refine the prompt constructor and generate an optimized prompt to further compress and refine the candidate set. The resulting recommendation and a brief explanation are output to the user."
|
| 683 |
+
},
|
| 684 |
+
{
|
| 685 |
+
"type": "text",
|
| 686 |
+
"bbox": [
|
| 687 |
+
0.214,
|
| 688 |
+
0.253,
|
| 689 |
+
0.788,
|
| 690 |
+
0.344
|
| 691 |
+
],
|
| 692 |
+
"angle": 0,
|
| 693 |
+
"content": "For example, in the first round of Q&A, the user requests action movies. The system determines that a recommendation task is needed, and executes the Recommend Action Movies module using the input information. The intermediate answer \\( A_{1} \\) contains the top-20 results, which are then reranked and adjusted in the second module using the input information to generate the final output of the top-5 results."
|
| 694 |
+
},
|
| 695 |
+
{
|
| 696 |
+
"type": "text",
|
| 697 |
+
"bbox": [
|
| 698 |
+
0.214,
|
| 699 |
+
0.344,
|
| 700 |
+
0.79,
|
| 701 |
+
0.45
|
| 702 |
+
],
|
| 703 |
+
"angle": 0,
|
| 704 |
+
"content": "In the second round of Q&A, the user asks why the movie \"Cargo\" was recommended. The system determines that no recommendation task is needed and instead executes the explanation for the recommendation module, using the movie title, history interaction, and user profile as inputs. The answer \\( A_{2} \\) is then generated, which provides a brief explanation of the recommendation, including information about the user's general interests and the specific characteristics of the movie that may be appealing to the user."
|
| 705 |
+
},
|
| 706 |
+
{
|
| 707 |
+
"type": "title",
|
| 708 |
+
"bbox": [
|
| 709 |
+
0.215,
|
| 710 |
+
0.47,
|
| 711 |
+
0.722,
|
| 712 |
+
0.485
|
| 713 |
+
],
|
| 714 |
+
"angle": 0,
|
| 715 |
+
"content": "3.2 Recommendation Based on Candidate Set Compression"
|
| 716 |
+
},
|
| 717 |
+
{
|
| 718 |
+
"type": "text",
|
| 719 |
+
"bbox": [
|
| 720 |
+
0.214,
|
| 721 |
+
0.493,
|
| 722 |
+
0.788,
|
| 723 |
+
0.569
|
| 724 |
+
],
|
| 725 |
+
"angle": 0,
|
| 726 |
+
"content": "Traditional recommender systems typically generate a small number of sorted candidate products, each with a score that reflects the system's recommendation confidence or result quality. However, considering the huge size of the product set, the performance obtained by most existing recommender systems are all way far from satisfactory, which still have a very large room for improvement."
|
| 727 |
+
},
|
| 728 |
+
{
|
| 729 |
+
"type": "text",
|
| 730 |
+
"bbox": [
|
| 731 |
+
0.214,
|
| 732 |
+
0.569,
|
| 733 |
+
0.788,
|
| 734 |
+
0.765
|
| 735 |
+
],
|
| 736 |
+
"angle": 0,
|
| 737 |
+
"content": "This article proposes a method of using LLMs to improve the performance of recommender systems by narrowing down the candidate set. The recommender system generates a large set of candidate items, which can be overwhelming for the user. LLMs play several different critical roles in narrowing down the product candidate set within the system. Firstly, we convert users' profiles and historical interactions into prompts, including the item description and user rating. Secondly, LLMs are asked to summarize user preferences for items in a domain based on the above information. LLMs can learn from context and effectively capture users' background information and preferences. With this information, they can establish the relationship between product attributes and user preferences, enabling them to make better product recommendations. By utilizing in-context learning, LLMs can enhance their recommendation reasoning ability, resulting in more accurate and personalized product recommendations."
|
| 738 |
+
},
|
| 739 |
+
{
|
| 740 |
+
"type": "text",
|
| 741 |
+
"bbox": [
|
| 742 |
+
0.214,
|
| 743 |
+
0.766,
|
| 744 |
+
0.788,
|
| 745 |
+
0.84
|
| 746 |
+
],
|
| 747 |
+
"angle": 0,
|
| 748 |
+
"content": "Once the LLMs have learned the user's preferences, the candidate set generated by the recommender system is provided to the LLMs. The LLMs can further filter and sort the candidate set based on the user's preferences. This approach ensures that the user is presented with a smaller, more relevant set of items, increasing the likelihood that they will find something they like."
|
| 749 |
+
}
|
| 750 |
+
],
|
| 751 |
+
[
|
| 752 |
+
{
|
| 753 |
+
"type": "header",
|
| 754 |
+
"bbox": [
|
| 755 |
+
0.379,
|
| 756 |
+
0.115,
|
| 757 |
+
0.732,
|
| 758 |
+
0.13
|
| 759 |
+
],
|
| 760 |
+
"angle": 0,
|
| 761 |
+
"content": "Chat-REC: LLMs-Augmented Recommender System"
|
| 762 |
+
},
|
| 763 |
+
{
|
| 764 |
+
"type": "page_number",
|
| 765 |
+
"bbox": [
|
| 766 |
+
0.775,
|
| 767 |
+
0.117,
|
| 768 |
+
0.785,
|
| 769 |
+
0.127
|
| 770 |
+
],
|
| 771 |
+
"angle": 0,
|
| 772 |
+
"content": "7"
|
| 773 |
+
},
|
| 774 |
+
{
|
| 775 |
+
"type": "image",
|
| 776 |
+
"bbox": [
|
| 777 |
+
0.174,
|
| 778 |
+
0.154,
|
| 779 |
+
0.827,
|
| 780 |
+
0.758
|
| 781 |
+
],
|
| 782 |
+
"angle": 0,
|
| 783 |
+
"content": null
|
| 784 |
+
},
|
| 785 |
+
{
|
| 786 |
+
"type": "image_caption",
|
| 787 |
+
"bbox": [
|
| 788 |
+
0.214,
|
| 789 |
+
0.776,
|
| 790 |
+
0.788,
|
| 791 |
+
0.928
|
| 792 |
+
],
|
| 793 |
+
"angle": 0,
|
| 794 |
+
"content": "Fig.2: Case study of interactive recommendation. It shows two conversations between different users and LLM. Where the user profile and historical users are converted into corresponding prompts for personalized recommendations, but the input of this part of the prompts is not visible to the user. The dialogue on the left shows that when a user asks why the movie was recommended, LLM can give an explanation based on the user's preferences and specific information about the recommended movie. The dialog on the right shows that CHAT-REC can make multiple rounds of recommendations based on user feedback. Questions about the details of the movie can also be answered in a specific way. LLM also takes into account ethical and moral issues when recommending movies."
|
| 795 |
+
}
|
| 796 |
+
],
|
| 797 |
+
[
|
| 798 |
+
{
|
| 799 |
+
"type": "page_number",
|
| 800 |
+
"bbox": [
|
| 801 |
+
0.218,
|
| 802 |
+
0.116,
|
| 803 |
+
0.23,
|
| 804 |
+
0.127
|
| 805 |
+
],
|
| 806 |
+
"angle": 0,
|
| 807 |
+
"content": "8"
|
| 808 |
+
},
|
| 809 |
+
{
|
| 810 |
+
"type": "header",
|
| 811 |
+
"bbox": [
|
| 812 |
+
0.272,
|
| 813 |
+
0.115,
|
| 814 |
+
0.341,
|
| 815 |
+
0.128
|
| 816 |
+
],
|
| 817 |
+
"angle": 0,
|
| 818 |
+
"content": "Gao et al."
|
| 819 |
+
},
|
| 820 |
+
{
|
| 821 |
+
"type": "title",
|
| 822 |
+
"bbox": [
|
| 823 |
+
0.216,
|
| 824 |
+
0.147,
|
| 825 |
+
0.504,
|
| 826 |
+
0.162
|
| 827 |
+
],
|
| 828 |
+
"angle": 0,
|
| 829 |
+
"content": "3.3 Cold-start Recommendations"
|
| 830 |
+
},
|
| 831 |
+
{
|
| 832 |
+
"type": "text",
|
| 833 |
+
"bbox": [
|
| 834 |
+
0.214,
|
| 835 |
+
0.177,
|
| 836 |
+
0.785,
|
| 837 |
+
0.237
|
| 838 |
+
],
|
| 839 |
+
"angle": 0,
|
| 840 |
+
"content": "With the textual description and profile information about the products, regardless the new products or the old ones, LLMs can effectively relate such products with each other, which provides us with the opportunity for solving the persistent cold-start recommendation problem once and for all."
|
| 841 |
+
},
|
| 842 |
+
{
|
| 843 |
+
"type": "text",
|
| 844 |
+
"bbox": [
|
| 845 |
+
0.214,
|
| 846 |
+
0.238,
|
| 847 |
+
0.787,
|
| 848 |
+
0.328
|
| 849 |
+
],
|
| 850 |
+
"angle": 0,
|
| 851 |
+
"content": "For example, if a user asks for recommendations for a new movie that was released in 2021, the recommender system could use text data about the movie to generate an embedding and then calculate similarities to other movies in the system to make recommendations. This capability allows recommender systems to make relevant and accurate recommendations for new items, improving the overall user experience."
|
| 852 |
+
},
|
| 853 |
+
{
|
| 854 |
+
"type": "text",
|
| 855 |
+
"bbox": [
|
| 856 |
+
0.214,
|
| 857 |
+
0.33,
|
| 858 |
+
0.788,
|
| 859 |
+
0.571
|
| 860 |
+
],
|
| 861 |
+
"angle": 0,
|
| 862 |
+
"content": "Large language models can use the vast amount of knowledge they contain to help recommender systems alleviate the cold-start problem of new items, i.e., recommending items that lack a large number of user interactions. However, since the knowledge held by ChatGPT is limited to September 2021, ChatGPT does not cope well when encountering unknown items, such as a user requesting to recommend some new movies released in 2023 or content related to a movie that ChatGPT is not aware of, as shown in the top part of Fig. 3. To address this issue, we introduce external information about new items, utilizing large language models to generate corresponding embedding representations and cache them. When encountering new item recommendations, we calculate the similarity between item embeddings and embeddings of user requests and preferences, then retrieve the most relevant item information based on the similarity and construct a prompt to input to ChatGPT for recommendation, as illustrated in the lower half of Fig. 3. This approach allows the recommender system to work in conjunction with ChatGPT to better recommend new items, thus enhancing the user experience."
|
| 863 |
+
},
|
| 864 |
+
{
|
| 865 |
+
"type": "title",
|
| 866 |
+
"bbox": [
|
| 867 |
+
0.216,
|
| 868 |
+
0.598,
|
| 869 |
+
0.536,
|
| 870 |
+
0.613
|
| 871 |
+
],
|
| 872 |
+
"angle": 0,
|
| 873 |
+
"content": "3.4 Cross-Domain Recommendations"
|
| 874 |
+
},
|
| 875 |
+
{
|
| 876 |
+
"type": "text",
|
| 877 |
+
"bbox": [
|
| 878 |
+
0.214,
|
| 879 |
+
0.627,
|
| 880 |
+
0.787,
|
| 881 |
+
0.717
|
| 882 |
+
],
|
| 883 |
+
"angle": 0,
|
| 884 |
+
"content": "The LLMs-augmented recommender system introduced above can be used to address several challenging tasks, that are hard or even impossible to be addressed with conventional recommender systems, such as cross-domain recommendation [26] and cold-start recommendation [17]. In this part, we will first talk about how to use the LLMs-augmented recommender system for the cross-domain recommendation."
|
| 885 |
+
},
|
| 886 |
+
{
|
| 887 |
+
"type": "text",
|
| 888 |
+
"bbox": [
|
| 889 |
+
0.214,
|
| 890 |
+
0.719,
|
| 891 |
+
0.787,
|
| 892 |
+
0.794
|
| 893 |
+
],
|
| 894 |
+
"angle": 0,
|
| 895 |
+
"content": "LLMs pre-trained with information across the Internet actually can serve as the multi-perspective knowledge base [14]. Besides the target product in one domain, such as movies, the LLMs not only has a broad knowledge about products many other domains, like music and books, but also understands the relations among the products across the domains mentioned above."
|
| 896 |
+
},
|
| 897 |
+
{
|
| 898 |
+
"type": "text",
|
| 899 |
+
"bbox": [
|
| 900 |
+
0.214,
|
| 901 |
+
0.796,
|
| 902 |
+
0.787,
|
| 903 |
+
0.842
|
| 904 |
+
],
|
| 905 |
+
"angle": 0,
|
| 906 |
+
"content": "For example, as illustrated in Fig. 4, once the conversation regarding movie recommendations is finished, the user inquires LLM for suggestions on other types of works. LLM then proceeds to recommend a variety of options, such as"
|
| 907 |
+
}
|
| 908 |
+
],
|
| 909 |
+
[
|
| 910 |
+
{
|
| 911 |
+
"type": "header",
|
| 912 |
+
"bbox": [
|
| 913 |
+
0.379,
|
| 914 |
+
0.115,
|
| 915 |
+
0.732,
|
| 916 |
+
0.129
|
| 917 |
+
],
|
| 918 |
+
"angle": 0,
|
| 919 |
+
"content": "Chat-REC: LLMs-Augmented Recommender System"
|
| 920 |
+
},
|
| 921 |
+
{
|
| 922 |
+
"type": "page_number",
|
| 923 |
+
"bbox": [
|
| 924 |
+
0.775,
|
| 925 |
+
0.117,
|
| 926 |
+
0.785,
|
| 927 |
+
0.127
|
| 928 |
+
],
|
| 929 |
+
"angle": 0,
|
| 930 |
+
"content": "9"
|
| 931 |
+
},
|
| 932 |
+
{
|
| 933 |
+
"type": "image",
|
| 934 |
+
"bbox": [
|
| 935 |
+
0.223,
|
| 936 |
+
0.148,
|
| 937 |
+
0.78,
|
| 938 |
+
0.755
|
| 939 |
+
],
|
| 940 |
+
"angle": 0,
|
| 941 |
+
"content": null
|
| 942 |
+
},
|
| 943 |
+
{
|
| 944 |
+
"type": "image_caption",
|
| 945 |
+
"bbox": [
|
| 946 |
+
0.214,
|
| 947 |
+
0.765,
|
| 948 |
+
0.788,
|
| 949 |
+
0.856
|
| 950 |
+
],
|
| 951 |
+
"angle": 0,
|
| 952 |
+
"content": "Fig. 3: Case Study of New Item Recommendation. The top shows that ChatGPT is unable to recommend new items beyond the timeframe of its training data. The middle part demonstrates the process of how to utilize external information about new items to enable ChatGPT to handle recommendations for new items. The bottom shows that ChatGPT can effectively handle recommendations for new items after incorporating external information."
|
| 953 |
+
}
|
| 954 |
+
],
|
| 955 |
+
[
|
| 956 |
+
{
|
| 957 |
+
"type": "page_number",
|
| 958 |
+
"bbox": [
|
| 959 |
+
0.218,
|
| 960 |
+
0.116,
|
| 961 |
+
0.236,
|
| 962 |
+
0.127
|
| 963 |
+
],
|
| 964 |
+
"angle": 0,
|
| 965 |
+
"content": "10"
|
| 966 |
+
},
|
| 967 |
+
{
|
| 968 |
+
"type": "header",
|
| 969 |
+
"bbox": [
|
| 970 |
+
0.272,
|
| 971 |
+
0.115,
|
| 972 |
+
0.34,
|
| 973 |
+
0.127
|
| 974 |
+
],
|
| 975 |
+
"angle": 0,
|
| 976 |
+
"content": "Gao et al."
|
| 977 |
+
},
|
| 978 |
+
{
|
| 979 |
+
"type": "image",
|
| 980 |
+
"bbox": [
|
| 981 |
+
0.219,
|
| 982 |
+
0.145,
|
| 983 |
+
0.782,
|
| 984 |
+
0.393
|
| 985 |
+
],
|
| 986 |
+
"angle": 0,
|
| 987 |
+
"content": null
|
| 988 |
+
},
|
| 989 |
+
{
|
| 990 |
+
"type": "image_caption",
|
| 991 |
+
"bbox": [
|
| 992 |
+
0.214,
|
| 993 |
+
0.407,
|
| 994 |
+
0.789,
|
| 995 |
+
0.498
|
| 996 |
+
],
|
| 997 |
+
"angle": 0,
|
| 998 |
+
"content": "Fig. 4: Case study of cross-domain recommendation. After the conversation about the movie's recommendation is completed. The user asks LLM to recommend works other than movies. It can be seen that LLM recommends different types of works, including books, TV series Podcasts and video games, according to the user's movie preferences. This shows that LLM can migrate the user's movie preferences to items and thus achieve cross-domain recommendations."
|
| 999 |
+
},
|
| 1000 |
+
{
|
| 1001 |
+
"type": "text",
|
| 1002 |
+
"bbox": [
|
| 1003 |
+
0.214,
|
| 1004 |
+
0.528,
|
| 1005 |
+
0.789,
|
| 1006 |
+
0.604
|
| 1007 |
+
],
|
| 1008 |
+
"angle": 0,
|
| 1009 |
+
"content": "books, TV series, podcasts, and video games, based on the user's movie preferences. This demonstrates LLM's ability to transfer the user's preferences from movies to other items, resulting in cross-domain recommendations. This cross-domain recommendation capability has the potential to significantly expand the scope and relevance of recommender systems."
|
| 1010 |
+
},
|
| 1011 |
+
{
|
| 1012 |
+
"type": "title",
|
| 1013 |
+
"bbox": [
|
| 1014 |
+
0.216,
|
| 1015 |
+
0.629,
|
| 1016 |
+
0.367,
|
| 1017 |
+
0.646
|
| 1018 |
+
],
|
| 1019 |
+
"angle": 0,
|
| 1020 |
+
"content": "4 Experiment"
|
| 1021 |
+
},
|
| 1022 |
+
{
|
| 1023 |
+
"type": "title",
|
| 1024 |
+
"bbox": [
|
| 1025 |
+
0.216,
|
| 1026 |
+
0.662,
|
| 1027 |
+
0.553,
|
| 1028 |
+
0.679
|
| 1029 |
+
],
|
| 1030 |
+
"angle": 0,
|
| 1031 |
+
"content": "4.1 Dataset and Experimental Settings"
|
| 1032 |
+
},
|
| 1033 |
+
{
|
| 1034 |
+
"type": "text",
|
| 1035 |
+
"bbox": [
|
| 1036 |
+
0.214,
|
| 1037 |
+
0.689,
|
| 1038 |
+
0.789,
|
| 1039 |
+
0.809
|
| 1040 |
+
],
|
| 1041 |
+
"angle": 0,
|
| 1042 |
+
"content": "The dataset used in our experiment is MovieLens 100K, which is a benchmark dataset of a real-world recommender system. It comprises 100,000 movie ratings provided by 943 users on a scale of 1 to 5 across 1,682 movies. Additionally, the dataset contains demographic information about the users, such as age, gender, occupation, and zip code, as well as movie information, such as title, release year, and genres. To create our experimental dataset, we randomly selected 200 users. Table 1 provides detailed statistical information about the dataset used in the experiment."
|
| 1043 |
+
},
|
| 1044 |
+
{
|
| 1045 |
+
"type": "text",
|
| 1046 |
+
"bbox": [
|
| 1047 |
+
0.214,
|
| 1048 |
+
0.811,
|
| 1049 |
+
0.789,
|
| 1050 |
+
0.842
|
| 1051 |
+
],
|
| 1052 |
+
"angle": 0,
|
| 1053 |
+
"content": "When evaluating the performance of top-k recommendations, Precision, Recall, and Normalized Discounted Cumulative Gain (NDCG) are used. For rating"
|
| 1054 |
+
}
|
| 1055 |
+
],
|
| 1056 |
+
[
|
| 1057 |
+
{
|
| 1058 |
+
"type": "header",
|
| 1059 |
+
"bbox": [
|
| 1060 |
+
0.379,
|
| 1061 |
+
0.115,
|
| 1062 |
+
0.732,
|
| 1063 |
+
0.129
|
| 1064 |
+
],
|
| 1065 |
+
"angle": 0,
|
| 1066 |
+
"content": "Chat-REC: LLMs-Augmented Recommender System"
|
| 1067 |
+
},
|
| 1068 |
+
{
|
| 1069 |
+
"type": "page_number",
|
| 1070 |
+
"bbox": [
|
| 1071 |
+
0.768,
|
| 1072 |
+
0.116,
|
| 1073 |
+
0.784,
|
| 1074 |
+
0.127
|
| 1075 |
+
],
|
| 1076 |
+
"angle": 0,
|
| 1077 |
+
"content": "11"
|
| 1078 |
+
},
|
| 1079 |
+
{
|
| 1080 |
+
"type": "table_caption",
|
| 1081 |
+
"bbox": [
|
| 1082 |
+
0.318,
|
| 1083 |
+
0.158,
|
| 1084 |
+
0.682,
|
| 1085 |
+
0.171
|
| 1086 |
+
],
|
| 1087 |
+
"angle": 0,
|
| 1088 |
+
"content": "Table 1: Details of the dataset used for evaluation."
|
| 1089 |
+
},
|
| 1090 |
+
{
|
| 1091 |
+
"type": "table",
|
| 1092 |
+
"bbox": [
|
| 1093 |
+
0.259,
|
| 1094 |
+
0.172,
|
| 1095 |
+
0.74,
|
| 1096 |
+
0.204
|
| 1097 |
+
],
|
| 1098 |
+
"angle": 0,
|
| 1099 |
+
"content": "<table><tr><td>Dataset</td><td>Users</td><td>Items</td><td>Ratings</td><td>Rating Scale</td><td>Density</td></tr><tr><td>MovieLens 100K</td><td>943</td><td>1,682</td><td>100,000</td><td>[1-5]</td><td>6.304%</td></tr></table>"
|
| 1100 |
+
},
|
| 1101 |
+
{
|
| 1102 |
+
"type": "text",
|
| 1103 |
+
"bbox": [
|
| 1104 |
+
0.214,
|
| 1105 |
+
0.229,
|
| 1106 |
+
0.784,
|
| 1107 |
+
0.26
|
| 1108 |
+
],
|
| 1109 |
+
"angle": 0,
|
| 1110 |
+
"content": "prediction task, the Root Mean Squared Error (RMSE) and Mean Absolute Error (MAE) are employed as evaluation metrics."
|
| 1111 |
+
},
|
| 1112 |
+
{
|
| 1113 |
+
"type": "title",
|
| 1114 |
+
"bbox": [
|
| 1115 |
+
0.215,
|
| 1116 |
+
0.279,
|
| 1117 |
+
0.341,
|
| 1118 |
+
0.293
|
| 1119 |
+
],
|
| 1120 |
+
"angle": 0,
|
| 1121 |
+
"content": "4.2 Baselines"
|
| 1122 |
+
},
|
| 1123 |
+
{
|
| 1124 |
+
"type": "text",
|
| 1125 |
+
"bbox": [
|
| 1126 |
+
0.214,
|
| 1127 |
+
0.301,
|
| 1128 |
+
0.785,
|
| 1129 |
+
0.36
|
| 1130 |
+
],
|
| 1131 |
+
"angle": 0,
|
| 1132 |
+
"content": "The baseline methods studied in the experiment include both classic recommender system models and the LLMs-augmented recommender systems proposed in this paper. Detailed information about the comparison methods studied in our experiments are provided as follows:"
|
| 1133 |
+
},
|
| 1134 |
+
{
|
| 1135 |
+
"type": "text",
|
| 1136 |
+
"bbox": [
|
| 1137 |
+
0.226,
|
| 1138 |
+
0.369,
|
| 1139 |
+
0.784,
|
| 1140 |
+
0.397
|
| 1141 |
+
],
|
| 1142 |
+
"angle": 0,
|
| 1143 |
+
"content": "- LightFM is a recommendation algorithm that combines collaborative filtering and content-based methods to recommend items to users."
|
| 1144 |
+
},
|
| 1145 |
+
{
|
| 1146 |
+
"type": "text",
|
| 1147 |
+
"bbox": [
|
| 1148 |
+
0.226,
|
| 1149 |
+
0.399,
|
| 1150 |
+
0.785,
|
| 1151 |
+
0.442
|
| 1152 |
+
],
|
| 1153 |
+
"angle": 0,
|
| 1154 |
+
"content": "- LightGCN is a graph-based collaborative filtering algorithm that uses a simplified graph convolutional network (GCN) to model the user-item interactions in a recommender system."
|
| 1155 |
+
},
|
| 1156 |
+
{
|
| 1157 |
+
"type": "text",
|
| 1158 |
+
"bbox": [
|
| 1159 |
+
0.225,
|
| 1160 |
+
0.443,
|
| 1161 |
+
0.784,
|
| 1162 |
+
0.471
|
| 1163 |
+
],
|
| 1164 |
+
"angle": 0,
|
| 1165 |
+
"content": "- Item-KNN is a neighborhood-based collaborative filtering algorithm that uses the similarity between items to make recommendations to users."
|
| 1166 |
+
},
|
| 1167 |
+
{
|
| 1168 |
+
"type": "text",
|
| 1169 |
+
"bbox": [
|
| 1170 |
+
0.225,
|
| 1171 |
+
0.473,
|
| 1172 |
+
0.784,
|
| 1173 |
+
0.516
|
| 1174 |
+
],
|
| 1175 |
+
"angle": 0,
|
| 1176 |
+
"content": "- Matrix Factorization (MF) is a widely used collaborative filtering algorithm that represents users and items as latent factors in a low-dimensional space."
|
| 1177 |
+
},
|
| 1178 |
+
{
|
| 1179 |
+
"type": "list",
|
| 1180 |
+
"bbox": [
|
| 1181 |
+
0.225,
|
| 1182 |
+
0.369,
|
| 1183 |
+
0.785,
|
| 1184 |
+
0.516
|
| 1185 |
+
],
|
| 1186 |
+
"angle": 0,
|
| 1187 |
+
"content": null
|
| 1188 |
+
},
|
| 1189 |
+
{
|
| 1190 |
+
"type": "text",
|
| 1191 |
+
"bbox": [
|
| 1192 |
+
0.214,
|
| 1193 |
+
0.526,
|
| 1194 |
+
0.784,
|
| 1195 |
+
0.555
|
| 1196 |
+
],
|
| 1197 |
+
"angle": 0,
|
| 1198 |
+
"content": "We select three representative models from the GPT-3 and GPT-3.5 series as LLMs in CHAT-REC:"
|
| 1199 |
+
},
|
| 1200 |
+
{
|
| 1201 |
+
"type": "text",
|
| 1202 |
+
"bbox": [
|
| 1203 |
+
0.225,
|
| 1204 |
+
0.562,
|
| 1205 |
+
0.784,
|
| 1206 |
+
0.577
|
| 1207 |
+
],
|
| 1208 |
+
"angle": 0,
|
| 1209 |
+
"content": "- gpt-3.5-turbo is the most capable GPT-3.5 model and optimized for chat."
|
| 1210 |
+
},
|
| 1211 |
+
{
|
| 1212 |
+
"type": "text",
|
| 1213 |
+
"bbox": [
|
| 1214 |
+
0.225,
|
| 1215 |
+
0.578,
|
| 1216 |
+
0.784,
|
| 1217 |
+
0.605
|
| 1218 |
+
],
|
| 1219 |
+
"angle": 0,
|
| 1220 |
+
"content": "- text-davinci-003 can do any language task with better quality, longer output, and consistent instruction-following."
|
| 1221 |
+
},
|
| 1222 |
+
{
|
| 1223 |
+
"type": "text",
|
| 1224 |
+
"bbox": [
|
| 1225 |
+
0.225,
|
| 1226 |
+
0.607,
|
| 1227 |
+
0.784,
|
| 1228 |
+
0.635
|
| 1229 |
+
],
|
| 1230 |
+
"angle": 0,
|
| 1231 |
+
"content": "- text-davinci-002 is similar to text-davinci-003 but is trained with supervised fine-tuning instead of reinforcement learning."
|
| 1232 |
+
},
|
| 1233 |
+
{
|
| 1234 |
+
"type": "list",
|
| 1235 |
+
"bbox": [
|
| 1236 |
+
0.225,
|
| 1237 |
+
0.562,
|
| 1238 |
+
0.784,
|
| 1239 |
+
0.635
|
| 1240 |
+
],
|
| 1241 |
+
"angle": 0,
|
| 1242 |
+
"content": null
|
| 1243 |
+
},
|
| 1244 |
+
{
|
| 1245 |
+
"type": "text",
|
| 1246 |
+
"bbox": [
|
| 1247 |
+
0.214,
|
| 1248 |
+
0.643,
|
| 1249 |
+
0.784,
|
| 1250 |
+
0.673
|
| 1251 |
+
],
|
| 1252 |
+
"angle": 0,
|
| 1253 |
+
"content": "The model notations, like CHAT-REC (gpt-3.5-turbo), denote the CHAT-REC framework built by adopting \"gpt-3.5-turbo\" as the backbone model."
|
| 1254 |
+
},
|
| 1255 |
+
{
|
| 1256 |
+
"type": "title",
|
| 1257 |
+
"bbox": [
|
| 1258 |
+
0.215,
|
| 1259 |
+
0.693,
|
| 1260 |
+
0.428,
|
| 1261 |
+
0.708
|
| 1262 |
+
],
|
| 1263 |
+
"angle": 0,
|
| 1264 |
+
"content": "4.3 Result and Analysis"
|
| 1265 |
+
},
|
| 1266 |
+
{
|
| 1267 |
+
"type": "text",
|
| 1268 |
+
"bbox": [
|
| 1269 |
+
0.214,
|
| 1270 |
+
0.714,
|
| 1271 |
+
0.785,
|
| 1272 |
+
0.851
|
| 1273 |
+
],
|
| 1274 |
+
"angle": 0,
|
| 1275 |
+
"content": "Top-5 Recommendation. As presented in Table 2, our proposed CHAT-REC framework has demonstrated effective improvement of traditional recommender systems in the top-k recommendation task. The NDCG scores of all three GPT-3.5 models surpassed that of LightGCN, with text-davinci-003 delivering the best result and demonstrating strong contextual learning abilities. Specifically, the precision score of 0.3240 is \\(6.93\\%\\) higher than that of LightGCN, while NDCG score of 0.3802 is \\(11.01\\%\\) higher. However, the recall rate of 0.1404 is slightly lower than that of LightGCN by \\(3.51\\%\\). It is noteworthy that the performance of gpt-3.5-turbo was slightly weaker than that of text-davinci-002."
|
| 1276 |
+
}
|
| 1277 |
+
],
|
| 1278 |
+
[
|
| 1279 |
+
{
|
| 1280 |
+
"type": "page_number",
|
| 1281 |
+
"bbox": [
|
| 1282 |
+
0.218,
|
| 1283 |
+
0.116,
|
| 1284 |
+
0.236,
|
| 1285 |
+
0.127
|
| 1286 |
+
],
|
| 1287 |
+
"angle": 0,
|
| 1288 |
+
"content": "12"
|
| 1289 |
+
},
|
| 1290 |
+
{
|
| 1291 |
+
"type": "header",
|
| 1292 |
+
"bbox": [
|
| 1293 |
+
0.272,
|
| 1294 |
+
0.115,
|
| 1295 |
+
0.34,
|
| 1296 |
+
0.127
|
| 1297 |
+
],
|
| 1298 |
+
"angle": 0,
|
| 1299 |
+
"content": "Gao et al."
|
| 1300 |
+
},
|
| 1301 |
+
{
|
| 1302 |
+
"type": "table_caption",
|
| 1303 |
+
"bbox": [
|
| 1304 |
+
0.348,
|
| 1305 |
+
0.158,
|
| 1306 |
+
0.654,
|
| 1307 |
+
0.171
|
| 1308 |
+
],
|
| 1309 |
+
"angle": 0,
|
| 1310 |
+
"content": "Table 2: Results of top-5 recommendation."
|
| 1311 |
+
},
|
| 1312 |
+
{
|
| 1313 |
+
"type": "table",
|
| 1314 |
+
"bbox": [
|
| 1315 |
+
0.218,
|
| 1316 |
+
0.173,
|
| 1317 |
+
0.798,
|
| 1318 |
+
0.26
|
| 1319 |
+
],
|
| 1320 |
+
"angle": 0,
|
| 1321 |
+
"content": "<table><tr><td>Models</td><td>Precision</td><td>Recall</td><td>NDCG</td></tr><tr><td>LightFM</td><td>0.2830</td><td>0.1410</td><td>0.2846</td></tr><tr><td>LightGCN</td><td>0.3030</td><td>0.1455</td><td>0.3425</td></tr><tr><td>CHAT-REC (gpt-3.5-turbo)</td><td>0.3103</td><td>0.1279</td><td>0.3696</td></tr><tr><td>CHAT-REC (text-davinci-003)</td><td>0.3240 (+6.93%)</td><td>0.1404 (-3.51%)</td><td>0.3802 (+11.01%)</td></tr><tr><td>CHAT-REC (text-davinci-002)</td><td>0.3031</td><td>0.1240</td><td>0.3629</td></tr></table>"
|
| 1322 |
+
},
|
| 1323 |
+
{
|
| 1324 |
+
"type": "text",
|
| 1325 |
+
"bbox": [
|
| 1326 |
+
0.214,
|
| 1327 |
+
0.285,
|
| 1328 |
+
0.789,
|
| 1329 |
+
0.557
|
| 1330 |
+
],
|
| 1331 |
+
"angle": 0,
|
| 1332 |
+
"content": "Rating Prediction As illustrated in the Table3, CHAT-REC outperforms traditional recommender systems in predicting movie ratings. The experimental results demonstrate that LLMs can effectively learn user preferences from user portraits and historical interactions through in-context learning, without any explicit training, and accurately predict user ratings for candidate movies. Since LightGCN is not well-suited for rating prediction tasks, it was excluded from our experimental range. Among the three GPT-3.5 models tested, text-davinci-003 achieved the best result, with an RMSE of 0.785, which is \\(15.86\\%\\) higher than that of Item-KNN, and an MAE of 0.593, which is \\(19.21\\%\\) higher. Text-davinci-002 came in second place. However, the performance of gpt-3.5-turbo was slightly weaker than that of Item-KNN. The experimental results reveal that even without relying on recommender systems, LLMs can achieve better results in predicting user preferences for specific movies. The weaker performance of gpt-3.5-turbo is due to the model's emphasis on the ability of human-computer dialogue and its trade-off of the in-context learning abilities, which is consistent with other research conclusions. Additionally, it also can be concluded that the performance of gpt-3.5-turbo in numerical prediction tasks is weaker than that of text-davinci-003 and text-davinci-002."
|
| 1333 |
+
},
|
| 1334 |
+
{
|
| 1335 |
+
"type": "table_caption",
|
| 1336 |
+
"bbox": [
|
| 1337 |
+
0.344,
|
| 1338 |
+
0.568,
|
| 1339 |
+
0.658,
|
| 1340 |
+
0.582
|
| 1341 |
+
],
|
| 1342 |
+
"angle": 0,
|
| 1343 |
+
"content": "Table 3: Results of movie rating prediction."
|
| 1344 |
+
},
|
| 1345 |
+
{
|
| 1346 |
+
"type": "table",
|
| 1347 |
+
"bbox": [
|
| 1348 |
+
0.29,
|
| 1349 |
+
0.583,
|
| 1350 |
+
0.714,
|
| 1351 |
+
0.669
|
| 1352 |
+
],
|
| 1353 |
+
"angle": 0,
|
| 1354 |
+
"content": "<table><tr><td>Models</td><td>RMSE</td><td>MAE</td></tr><tr><td>MF</td><td>0.988</td><td>0.771</td></tr><tr><td>Item-KNN</td><td>0.933</td><td>0.734</td></tr><tr><td>CHAT-REC (gpt-3.5-turbo)</td><td>0.969</td><td>0.756</td></tr><tr><td>CHAT-REC (text-davinci-003)</td><td>0.785</td><td>0.593</td></tr><tr><td>CHAT-REC (text-davinci-002)</td><td>0.8309</td><td>0.6215</td></tr></table>"
|
| 1355 |
+
},
|
| 1356 |
+
{
|
| 1357 |
+
"type": "text",
|
| 1358 |
+
"bbox": [
|
| 1359 |
+
0.214,
|
| 1360 |
+
0.674,
|
| 1361 |
+
0.789,
|
| 1362 |
+
0.841
|
| 1363 |
+
],
|
| 1364 |
+
"angle": 0,
|
| 1365 |
+
"content": "During experiment, we discovered that CHAT-REC's most important ability is to optimize the refined candidate set of the recommender system, meaning to resort the movies that the user may like but were placed further down in the recommender system's candidate set. This requires the application of LLMs' knowledge of movies, understanding of user preferences, and the ability to reason about the matching relationship between the two. To confirm this finding, we conducted separate empirical studies and asked LLMs again, in the same conversation, about movies that appeared in the recommender system's top 5 but did not appear in LLMs' top 5. LLMs' feedback revealed that it is unlikely that the user would like the movie or it is difficult to determine whether the user would like it, with clear reasons given. The inconsistent shows that CHAT-REC's"
|
| 1366 |
+
}
|
| 1367 |
+
],
|
| 1368 |
+
[
|
| 1369 |
+
{
|
| 1370 |
+
"type": "header",
|
| 1371 |
+
"bbox": [
|
| 1372 |
+
0.379,
|
| 1373 |
+
0.115,
|
| 1374 |
+
0.732,
|
| 1375 |
+
0.13
|
| 1376 |
+
],
|
| 1377 |
+
"angle": 0,
|
| 1378 |
+
"content": "Chat-REC: LLMs-Augmented Recommender System"
|
| 1379 |
+
},
|
| 1380 |
+
{
|
| 1381 |
+
"type": "page_number",
|
| 1382 |
+
"bbox": [
|
| 1383 |
+
0.769,
|
| 1384 |
+
0.116,
|
| 1385 |
+
0.786,
|
| 1386 |
+
0.127
|
| 1387 |
+
],
|
| 1388 |
+
"angle": 0,
|
| 1389 |
+
"content": "13"
|
| 1390 |
+
},
|
| 1391 |
+
{
|
| 1392 |
+
"type": "text",
|
| 1393 |
+
"bbox": [
|
| 1394 |
+
0.214,
|
| 1395 |
+
0.147,
|
| 1396 |
+
0.784,
|
| 1397 |
+
0.177
|
| 1398 |
+
],
|
| 1399 |
+
"angle": 0,
|
| 1400 |
+
"content": "recommendations are entirely based on an understanding of user preferences and movie information."
|
| 1401 |
+
},
|
| 1402 |
+
{
|
| 1403 |
+
"type": "title",
|
| 1404 |
+
"bbox": [
|
| 1405 |
+
0.215,
|
| 1406 |
+
0.202,
|
| 1407 |
+
0.391,
|
| 1408 |
+
0.218
|
| 1409 |
+
],
|
| 1410 |
+
"angle": 0,
|
| 1411 |
+
"content": "4.4 Ablation Study"
|
| 1412 |
+
},
|
| 1413 |
+
{
|
| 1414 |
+
"type": "text",
|
| 1415 |
+
"bbox": [
|
| 1416 |
+
0.214,
|
| 1417 |
+
0.229,
|
| 1418 |
+
0.788,
|
| 1419 |
+
0.29
|
| 1420 |
+
],
|
| 1421 |
+
"angle": 0,
|
| 1422 |
+
"content": "In this study, we select the text-davinci-003 model, which achieved the best results in both top-k recommendation and rating prediction, to investigate the impact of different prompts and temperatures on the model's performance. The result is shown in Fig. 5."
|
| 1423 |
+
},
|
| 1424 |
+
{
|
| 1425 |
+
"type": "image",
|
| 1426 |
+
"bbox": [
|
| 1427 |
+
0.165,
|
| 1428 |
+
0.329,
|
| 1429 |
+
0.371,
|
| 1430 |
+
0.434
|
| 1431 |
+
],
|
| 1432 |
+
"angle": 0,
|
| 1433 |
+
"content": null
|
| 1434 |
+
},
|
| 1435 |
+
{
|
| 1436 |
+
"type": "image",
|
| 1437 |
+
"bbox": [
|
| 1438 |
+
0.387,
|
| 1439 |
+
0.328,
|
| 1440 |
+
0.596,
|
| 1441 |
+
0.433
|
| 1442 |
+
],
|
| 1443 |
+
"angle": 0,
|
| 1444 |
+
"content": null
|
| 1445 |
+
},
|
| 1446 |
+
{
|
| 1447 |
+
"type": "image",
|
| 1448 |
+
"bbox": [
|
| 1449 |
+
0.617,
|
| 1450 |
+
0.327,
|
| 1451 |
+
0.822,
|
| 1452 |
+
0.433
|
| 1453 |
+
],
|
| 1454 |
+
"angle": 0,
|
| 1455 |
+
"content": null
|
| 1456 |
+
},
|
| 1457 |
+
{
|
| 1458 |
+
"type": "image_caption",
|
| 1459 |
+
"bbox": [
|
| 1460 |
+
0.292,
|
| 1461 |
+
0.446,
|
| 1462 |
+
0.71,
|
| 1463 |
+
0.461
|
| 1464 |
+
],
|
| 1465 |
+
"angle": 0,
|
| 1466 |
+
"content": "Fig. 5: Performance on different prompt and temperature."
|
| 1467 |
+
},
|
| 1468 |
+
{
|
| 1469 |
+
"type": "text",
|
| 1470 |
+
"bbox": [
|
| 1471 |
+
0.214,
|
| 1472 |
+
0.492,
|
| 1473 |
+
0.788,
|
| 1474 |
+
0.628
|
| 1475 |
+
],
|
| 1476 |
+
"angle": 0,
|
| 1477 |
+
"content": "In the context of this study, \"w/random\" refers to the random shuffling of the 20 candidate sets generated by the recommender system before being provided to LLM as the candidate set prompt input, while \"w/top1\" indicates that the top 1 recommendation is not given as the initial background knowledge when constructing the prompt, but instead directly asks LLM to select 5 movies from the candidate set. The temperature parameter affects the answer generated by LLM, with lower temperatures indicating more certain answers, and higher for more random answers. All experiments, except for the experiment with a temperature of 0, used the average of 5 tests."
|
| 1478 |
+
},
|
| 1479 |
+
{
|
| 1480 |
+
"type": "text",
|
| 1481 |
+
"bbox": [
|
| 1482 |
+
0.214,
|
| 1483 |
+
0.629,
|
| 1484 |
+
0.788,
|
| 1485 |
+
0.765
|
| 1486 |
+
],
|
| 1487 |
+
"angle": 0,
|
| 1488 |
+
"content": "The results demonstrate that the effect slightly decreased after the order of the candidate set was shuffled. For example, when the temperature is 0.9, the NDCG of text-davinci-003 decreased from 0.3802 to 0.3653, representing a decrease of \\(3.92\\%\\). The effect of CHAT-REC decreased significantly when the recommender system's top 1 was missing in the prompt. For instance, when the temperature is 0.9, the NDCG of text-davinci-003 decreased from 0.3802 to 0.3055, which is a decrease of \\(19.65\\%\\). This trend was observed at different temperatures, and the experiment showed that the best results could be achieved when the temperature was 0.9."
|
| 1489 |
+
},
|
| 1490 |
+
{
|
| 1491 |
+
"type": "text",
|
| 1492 |
+
"bbox": [
|
| 1493 |
+
0.214,
|
| 1494 |
+
0.766,
|
| 1495 |
+
0.788,
|
| 1496 |
+
0.84
|
| 1497 |
+
],
|
| 1498 |
+
"angle": 0,
|
| 1499 |
+
"content": "It is worth noting that the existence of the recommender system was not explicitly mentioned in CHAT-REC's prompt, and the function of the recommender system was merely to provide a candidate set. However, the design of the candidate set can significantly impact CHAT-REC's performance. Our experiment revealed that CHAT-REC's prompt design can effectively inject the recommender"
|
| 1500 |
+
}
|
| 1501 |
+
],
|
| 1502 |
+
[
|
| 1503 |
+
{
|
| 1504 |
+
"type": "page_number",
|
| 1505 |
+
"bbox": [
|
| 1506 |
+
0.218,
|
| 1507 |
+
0.116,
|
| 1508 |
+
0.236,
|
| 1509 |
+
0.127
|
| 1510 |
+
],
|
| 1511 |
+
"angle": 0,
|
| 1512 |
+
"content": "14"
|
| 1513 |
+
},
|
| 1514 |
+
{
|
| 1515 |
+
"type": "header",
|
| 1516 |
+
"bbox": [
|
| 1517 |
+
0.272,
|
| 1518 |
+
0.115,
|
| 1519 |
+
0.34,
|
| 1520 |
+
0.128
|
| 1521 |
+
],
|
| 1522 |
+
"angle": 0,
|
| 1523 |
+
"content": "Gao et al."
|
| 1524 |
+
},
|
| 1525 |
+
{
|
| 1526 |
+
"type": "text",
|
| 1527 |
+
"bbox": [
|
| 1528 |
+
0.214,
|
| 1529 |
+
0.147,
|
| 1530 |
+
0.788,
|
| 1531 |
+
0.224
|
| 1532 |
+
],
|
| 1533 |
+
"angle": 0,
|
| 1534 |
+
"content": "system's knowledge implicitly into LLMs. This implicit knowledge is reflected in the ranking of movies in the candidate set, and the use of Top1 as the background can further strengthen this information. This implicit knowledge can be captured by LLMs in in-context learning and can enhance the recommendation performance."
|
| 1535 |
+
},
|
| 1536 |
+
{
|
| 1537 |
+
"type": "title",
|
| 1538 |
+
"bbox": [
|
| 1539 |
+
0.216,
|
| 1540 |
+
0.248,
|
| 1541 |
+
0.36,
|
| 1542 |
+
0.265
|
| 1543 |
+
],
|
| 1544 |
+
"angle": 0,
|
| 1545 |
+
"content": "5 Conclusion"
|
| 1546 |
+
},
|
| 1547 |
+
{
|
| 1548 |
+
"type": "text",
|
| 1549 |
+
"bbox": [
|
| 1550 |
+
0.214,
|
| 1551 |
+
0.283,
|
| 1552 |
+
0.789,
|
| 1553 |
+
0.404
|
| 1554 |
+
],
|
| 1555 |
+
"angle": 0,
|
| 1556 |
+
"content": "In this paper, we present CHAT-REC which bridges recommender system and LLMs by converting user information and user-item interactions to prompt. We evaluated our approach in the task of top-k recommendation and zero-shot movie rating prediction. In conclusion, LLMs offer significant potential for enhancing recommender systems by improving interactivity explainability and cross-domain recommendation. In addition, prompt plays an important role, and experiments prove that implicitly expressing the knowledge in the recommender system in prompt can effectively improve the recommendation effect."
|
| 1557 |
+
},
|
| 1558 |
+
{
|
| 1559 |
+
"type": "title",
|
| 1560 |
+
"bbox": [
|
| 1561 |
+
0.216,
|
| 1562 |
+
0.429,
|
| 1563 |
+
0.325,
|
| 1564 |
+
0.445
|
| 1565 |
+
],
|
| 1566 |
+
"angle": 0,
|
| 1567 |
+
"content": "References"
|
| 1568 |
+
},
|
| 1569 |
+
{
|
| 1570 |
+
"type": "ref_text",
|
| 1571 |
+
"bbox": [
|
| 1572 |
+
0.223,
|
| 1573 |
+
0.463,
|
| 1574 |
+
0.787,
|
| 1575 |
+
0.504
|
| 1576 |
+
],
|
| 1577 |
+
"angle": 0,
|
| 1578 |
+
"content": "1. Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J.D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al.: Language models are few-shot learners. Advances in neural information processing systems 33, 1877-1901 (2020)"
|
| 1579 |
+
},
|
| 1580 |
+
{
|
| 1581 |
+
"type": "ref_text",
|
| 1582 |
+
"bbox": [
|
| 1583 |
+
0.223,
|
| 1584 |
+
0.506,
|
| 1585 |
+
0.787,
|
| 1586 |
+
0.546
|
| 1587 |
+
],
|
| 1588 |
+
"angle": 0,
|
| 1589 |
+
"content": "2. Chen, M., Tworek, J., Jun, H., Yuan, Q., Pinto, H.P.d.O., Kaplan, J., Edwards, H., Burda, Y., Joseph, N., Brockman, G., et al.: Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374 (2021)"
|
| 1590 |
+
},
|
| 1591 |
+
{
|
| 1592 |
+
"type": "ref_text",
|
| 1593 |
+
"bbox": [
|
| 1594 |
+
0.223,
|
| 1595 |
+
0.548,
|
| 1596 |
+
0.787,
|
| 1597 |
+
0.616
|
| 1598 |
+
],
|
| 1599 |
+
"angle": 0,
|
| 1600 |
+
"content": "3. Chen, X., Chen, H., Xu, H., Zhang, Y., Cao, Y., Qin, Z., Zha, H.: Personalized fashion recommendation with visual explanations based on multimodal attention network: Towards visually explainable recommendation. In: Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval. pp. 765-774 (2019)"
|
| 1601 |
+
},
|
| 1602 |
+
{
|
| 1603 |
+
"type": "ref_text",
|
| 1604 |
+
"bbox": [
|
| 1605 |
+
0.223,
|
| 1606 |
+
0.617,
|
| 1607 |
+
0.787,
|
| 1608 |
+
0.658
|
| 1609 |
+
],
|
| 1610 |
+
"angle": 0,
|
| 1611 |
+
"content": "4. Chowdhery, A., Narang, S., Devlin, J., Bosma, M., Mishra, G., Roberts, A., Barham, P., Chung, H.W., Sutton, C., Gehrmann, S., et al.: Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311 (2022)"
|
| 1612 |
+
},
|
| 1613 |
+
{
|
| 1614 |
+
"type": "ref_text",
|
| 1615 |
+
"bbox": [
|
| 1616 |
+
0.223,
|
| 1617 |
+
0.66,
|
| 1618 |
+
0.787,
|
| 1619 |
+
0.699
|
| 1620 |
+
],
|
| 1621 |
+
"angle": 0,
|
| 1622 |
+
"content": "5. Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)"
|
| 1623 |
+
},
|
| 1624 |
+
{
|
| 1625 |
+
"type": "ref_text",
|
| 1626 |
+
"bbox": [
|
| 1627 |
+
0.223,
|
| 1628 |
+
0.701,
|
| 1629 |
+
1.0,
|
| 1630 |
+
0.756
|
| 1631 |
+
],
|
| 1632 |
+
"angle": 0,
|
| 1633 |
+
"content": "6. Fu, Yao; Peng, H., Khot, T.: How does gpt obtain its ability? tracing emergent abilities of language models to their sources. Yao Fu's Notion (Dec 2022), https://yaofu.notion.site/ How-does-GPT-Obtain-its-Ability-Tracing-Emergent-Abilities-of-Language-Models-to-their-Sources-b?"
|
| 1634 |
+
},
|
| 1635 |
+
{
|
| 1636 |
+
"type": "ref_text",
|
| 1637 |
+
"bbox": [
|
| 1638 |
+
0.223,
|
| 1639 |
+
0.758,
|
| 1640 |
+
0.787,
|
| 1641 |
+
0.811
|
| 1642 |
+
],
|
| 1643 |
+
"angle": 0,
|
| 1644 |
+
"content": "7. Geng, S., Liu, S., Fu, Z., Ge, Y., Zhang, Y.: Recommendation as language processing (rlp): A unified pretrain, personalized prompt & predict paradigm (p5). In: Proceedings of the 16th ACM Conference on Recommender Systems. pp. 299-315 (2022)"
|
| 1645 |
+
},
|
| 1646 |
+
{
|
| 1647 |
+
"type": "ref_text",
|
| 1648 |
+
"bbox": [
|
| 1649 |
+
0.223,
|
| 1650 |
+
0.813,
|
| 1651 |
+
0.787,
|
| 1652 |
+
0.84
|
| 1653 |
+
],
|
| 1654 |
+
"angle": 0,
|
| 1655 |
+
"content": "8. Khashabi, D., Min, S., Khot, T., Sabharwal, A., Tafjord, O., Clark, P., Hajishirzi, H.: Unifiedqa: Crossing format boundaries with a single qa system (2020)"
|
| 1656 |
+
},
|
| 1657 |
+
{
|
| 1658 |
+
"type": "list",
|
| 1659 |
+
"bbox": [
|
| 1660 |
+
0.223,
|
| 1661 |
+
0.463,
|
| 1662 |
+
1.0,
|
| 1663 |
+
0.84
|
| 1664 |
+
],
|
| 1665 |
+
"angle": 0,
|
| 1666 |
+
"content": null
|
| 1667 |
+
}
|
| 1668 |
+
],
|
| 1669 |
+
[
|
| 1670 |
+
{
|
| 1671 |
+
"type": "header",
|
| 1672 |
+
"bbox": [
|
| 1673 |
+
0.379,
|
| 1674 |
+
0.115,
|
| 1675 |
+
0.732,
|
| 1676 |
+
0.129
|
| 1677 |
+
],
|
| 1678 |
+
"angle": 0,
|
| 1679 |
+
"content": "Chat-REC: LLMs-Augmented Recommender System"
|
| 1680 |
+
},
|
| 1681 |
+
{
|
| 1682 |
+
"type": "page_number",
|
| 1683 |
+
"bbox": [
|
| 1684 |
+
0.769,
|
| 1685 |
+
0.117,
|
| 1686 |
+
0.785,
|
| 1687 |
+
0.127
|
| 1688 |
+
],
|
| 1689 |
+
"angle": 0,
|
| 1690 |
+
"content": "15"
|
| 1691 |
+
},
|
| 1692 |
+
{
|
| 1693 |
+
"type": "ref_text",
|
| 1694 |
+
"bbox": [
|
| 1695 |
+
0.223,
|
| 1696 |
+
0.148,
|
| 1697 |
+
0.785,
|
| 1698 |
+
0.175
|
| 1699 |
+
],
|
| 1700 |
+
"angle": 0,
|
| 1701 |
+
"content": "9. LeCun, Y.: A path towards autonomous machine intelligence version 0.9.2, 2022-06-27. Open Review 62 (2022)"
|
| 1702 |
+
},
|
| 1703 |
+
{
|
| 1704 |
+
"type": "ref_text",
|
| 1705 |
+
"bbox": [
|
| 1706 |
+
0.218,
|
| 1707 |
+
0.177,
|
| 1708 |
+
0.785,
|
| 1709 |
+
0.218
|
| 1710 |
+
],
|
| 1711 |
+
"angle": 0,
|
| 1712 |
+
"content": "10. Li, L., Zhang, Y., Chen, L.: Generate neural template explanations for recommendation. In: Proceedings of the 29th ACM International Conference on Information & Knowledge Management. pp. 755-764 (2020)"
|
| 1713 |
+
},
|
| 1714 |
+
{
|
| 1715 |
+
"type": "ref_text",
|
| 1716 |
+
"bbox": [
|
| 1717 |
+
0.218,
|
| 1718 |
+
0.219,
|
| 1719 |
+
0.785,
|
| 1720 |
+
0.247
|
| 1721 |
+
],
|
| 1722 |
+
"angle": 0,
|
| 1723 |
+
"content": "11. Li, L., Zhang, Y., Chen, L.: Personalized transformer for explainable recommendation. arXiv preprint arXiv:2105.11601 (2021)"
|
| 1724 |
+
},
|
| 1725 |
+
{
|
| 1726 |
+
"type": "ref_text",
|
| 1727 |
+
"bbox": [
|
| 1728 |
+
0.219,
|
| 1729 |
+
0.248,
|
| 1730 |
+
0.785,
|
| 1731 |
+
0.289
|
| 1732 |
+
],
|
| 1733 |
+
"angle": 0,
|
| 1734 |
+
"content": "12. Liu, P., Zhang, L., Gulla, J.A.: Pre-train, prompt and recommendation: A comprehensive survey of language modelling paradigm adaptations in recommender systems. arXiv preprint arXiv:2302.03735 (2023)"
|
| 1735 |
+
},
|
| 1736 |
+
{
|
| 1737 |
+
"type": "ref_text",
|
| 1738 |
+
"bbox": [
|
| 1739 |
+
0.219,
|
| 1740 |
+
0.29,
|
| 1741 |
+
0.769,
|
| 1742 |
+
0.304
|
| 1743 |
+
],
|
| 1744 |
+
"angle": 0,
|
| 1745 |
+
"content": "13. Parisi, A., Zhao, Y., Fiedel, N.: Talm: Tool augmented language models (2022)"
|
| 1746 |
+
},
|
| 1747 |
+
{
|
| 1748 |
+
"type": "ref_text",
|
| 1749 |
+
"bbox": [
|
| 1750 |
+
0.219,
|
| 1751 |
+
0.305,
|
| 1752 |
+
0.785,
|
| 1753 |
+
0.332
|
| 1754 |
+
],
|
| 1755 |
+
"angle": 0,
|
| 1756 |
+
"content": "14. Petroni, F., Rocktäschel, T., Lewis, P., Bakhtin, A., Wu, Y., Miller, A.H., Riedel, S.: Language models as knowledge bases? arXiv preprint arXiv:1909.01066 (2019)"
|
| 1757 |
+
},
|
| 1758 |
+
{
|
| 1759 |
+
"type": "ref_text",
|
| 1760 |
+
"bbox": [
|
| 1761 |
+
0.219,
|
| 1762 |
+
0.334,
|
| 1763 |
+
0.785,
|
| 1764 |
+
0.374
|
| 1765 |
+
],
|
| 1766 |
+
"angle": 0,
|
| 1767 |
+
"content": "15. Schick, T., Dwivedi-Yu, J., Dessi, R., Raileanu, R., Lomeli, M., Zettlemoyer, L., Cancedda, N., Scialom, T.: Toolformer: Language models can teach themselves to use tools (2023)"
|
| 1768 |
+
},
|
| 1769 |
+
{
|
| 1770 |
+
"type": "ref_text",
|
| 1771 |
+
"bbox": [
|
| 1772 |
+
0.219,
|
| 1773 |
+
0.375,
|
| 1774 |
+
0.785,
|
| 1775 |
+
0.431
|
| 1776 |
+
],
|
| 1777 |
+
"angle": 0,
|
| 1778 |
+
"content": "16. Shi, S., Zhang, M., Yu, X., Zhang, Y., Hao, B., Liu, Y., Ma, S.: Adaptive feature sampling for recommendation with missing content feature values. In: Proceedings of the 28th ACM International Conference on Information and Knowledge Management. pp. 1451-1460 (2019)"
|
| 1779 |
+
},
|
| 1780 |
+
{
|
| 1781 |
+
"type": "ref_text",
|
| 1782 |
+
"bbox": [
|
| 1783 |
+
0.219,
|
| 1784 |
+
0.432,
|
| 1785 |
+
0.785,
|
| 1786 |
+
0.473
|
| 1787 |
+
],
|
| 1788 |
+
"angle": 0,
|
| 1789 |
+
"content": "17. Sun, C., Liu, H., Liu, M., Ren, Z., Gan, T., Nie, L.: Lara: Attribute-to-feature adversarial learning for new-item recommendation. In: Proceedings of the 13th international conference on web search and data mining. pp. 582-590 (2020)"
|
| 1790 |
+
},
|
| 1791 |
+
{
|
| 1792 |
+
"type": "ref_text",
|
| 1793 |
+
"bbox": [
|
| 1794 |
+
0.219,
|
| 1795 |
+
0.475,
|
| 1796 |
+
0.785,
|
| 1797 |
+
0.515
|
| 1798 |
+
],
|
| 1799 |
+
"angle": 0,
|
| 1800 |
+
"content": "18. Touvron, H., Lavril, T., Izacard, G., Martinet, X., Lachaux, M.A., Lacroix, T., Rozière, B., Goyal, N., Hambro, E., Azhar, F., et al.: Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)"
|
| 1801 |
+
},
|
| 1802 |
+
{
|
| 1803 |
+
"type": "ref_text",
|
| 1804 |
+
"bbox": [
|
| 1805 |
+
0.219,
|
| 1806 |
+
0.517,
|
| 1807 |
+
0.785,
|
| 1808 |
+
0.557
|
| 1809 |
+
],
|
| 1810 |
+
"angle": 0,
|
| 1811 |
+
"content": "19. Wei, J., Bosma, M., Zhao, V., Guu, K., Yu, A.W., Lester, B., Du, N., Dai, A.M., Le, Q.V.: Finetuned language models are zero-shot learners. ArXiv abs/2109.01652 (2021)"
|
| 1812 |
+
},
|
| 1813 |
+
{
|
| 1814 |
+
"type": "ref_text",
|
| 1815 |
+
"bbox": [
|
| 1816 |
+
0.219,
|
| 1817 |
+
0.559,
|
| 1818 |
+
0.785,
|
| 1819 |
+
0.613
|
| 1820 |
+
],
|
| 1821 |
+
"angle": 0,
|
| 1822 |
+
"content": "20. Wei, J., Tay, Y., Bommasani, R., Raffel, C., Zoph, B., Borgeaud, S., Yogatama, D., Bosma, M., Zhou, D., Metzler, D., hsin Chi, E.H., Hashimoto, T., Vinyals, O., Liang, P., Dean, J., Fedus, W.: Emergent abilities of large language models. ArXiv abs/2206.07682 (2022)"
|
| 1823 |
+
},
|
| 1824 |
+
{
|
| 1825 |
+
"type": "ref_text",
|
| 1826 |
+
"bbox": [
|
| 1827 |
+
0.219,
|
| 1828 |
+
0.615,
|
| 1829 |
+
0.785,
|
| 1830 |
+
0.655
|
| 1831 |
+
],
|
| 1832 |
+
"angle": 0,
|
| 1833 |
+
"content": "21. Wei, J., Wang, X., Schuurmans, D., Bosma, M., hsin Chi, E.H., Le, Q., Zhou, D.: Chain of thought prompting elicits reasoning in large language models. ArXiv abs/2201.11903 (2022)"
|
| 1834 |
+
},
|
| 1835 |
+
{
|
| 1836 |
+
"type": "ref_text",
|
| 1837 |
+
"bbox": [
|
| 1838 |
+
0.219,
|
| 1839 |
+
0.657,
|
| 1840 |
+
0.785,
|
| 1841 |
+
0.685
|
| 1842 |
+
],
|
| 1843 |
+
"angle": 0,
|
| 1844 |
+
"content": "22. Xu, Y., Zhu, C., Xu, R., Liu, Y., Zeng, M., Huang, X.: Fusing context into knowledge graph for commonsense question answering (2021)"
|
| 1845 |
+
},
|
| 1846 |
+
{
|
| 1847 |
+
"type": "ref_text",
|
| 1848 |
+
"bbox": [
|
| 1849 |
+
0.219,
|
| 1850 |
+
0.686,
|
| 1851 |
+
0.785,
|
| 1852 |
+
0.727
|
| 1853 |
+
],
|
| 1854 |
+
"angle": 0,
|
| 1855 |
+
"content": "23. Yu, W., Iter, D., Wang, S., Xu, Y., Ju, M., Sanyal, S., Zhu, C., Zeng, M., Jiang, M.: Generate rather than retrieve: Large language models are strong context generators (2023)"
|
| 1856 |
+
},
|
| 1857 |
+
{
|
| 1858 |
+
"type": "ref_text",
|
| 1859 |
+
"bbox": [
|
| 1860 |
+
0.219,
|
| 1861 |
+
0.728,
|
| 1862 |
+
0.785,
|
| 1863 |
+
0.783
|
| 1864 |
+
],
|
| 1865 |
+
"angle": 0,
|
| 1866 |
+
"content": "24. Yuan, F., Zhang, G., Karatzoglou, A., Jose, J., Kong, B., Li, Y.: One person, one model, one world: Learning continual user representation without forgetting. In: Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval. pp. 696-705 (2021)"
|
| 1867 |
+
},
|
| 1868 |
+
{
|
| 1869 |
+
"type": "ref_text",
|
| 1870 |
+
"bbox": [
|
| 1871 |
+
0.219,
|
| 1872 |
+
0.785,
|
| 1873 |
+
0.785,
|
| 1874 |
+
0.812
|
| 1875 |
+
],
|
| 1876 |
+
"angle": 0,
|
| 1877 |
+
"content": "25. Zhang, Y., Ding, H., Shui, Z., Ma, Y., Zou, J., Deoras, A., Wang, H.: Language models as recommender systems: Evaluations and limitations (2021)"
|
| 1878 |
+
},
|
| 1879 |
+
{
|
| 1880 |
+
"type": "ref_text",
|
| 1881 |
+
"bbox": [
|
| 1882 |
+
0.219,
|
| 1883 |
+
0.813,
|
| 1884 |
+
0.785,
|
| 1885 |
+
0.84
|
| 1886 |
+
],
|
| 1887 |
+
"angle": 0,
|
| 1888 |
+
"content": "26. Zhu, F., Wang, Y., Chen, C., Zhou, J., Li, L., Liu, G.: Cross-domain recommendation: challenges, progress, and prospects. arXiv preprint arXiv:2103.01696 (2021)"
|
| 1889 |
+
},
|
| 1890 |
+
{
|
| 1891 |
+
"type": "list",
|
| 1892 |
+
"bbox": [
|
| 1893 |
+
0.218,
|
| 1894 |
+
0.148,
|
| 1895 |
+
0.785,
|
| 1896 |
+
0.84
|
| 1897 |
+
],
|
| 1898 |
+
"angle": 0,
|
| 1899 |
+
"content": null
|
| 1900 |
+
}
|
| 1901 |
+
],
|
| 1902 |
+
[
|
| 1903 |
+
{
|
| 1904 |
+
"type": "page_number",
|
| 1905 |
+
"bbox": [
|
| 1906 |
+
0.218,
|
| 1907 |
+
0.116,
|
| 1908 |
+
0.237,
|
| 1909 |
+
0.127
|
| 1910 |
+
],
|
| 1911 |
+
"angle": 0,
|
| 1912 |
+
"content": "16"
|
| 1913 |
+
},
|
| 1914 |
+
{
|
| 1915 |
+
"type": "header",
|
| 1916 |
+
"bbox": [
|
| 1917 |
+
0.272,
|
| 1918 |
+
0.115,
|
| 1919 |
+
0.34,
|
| 1920 |
+
0.128
|
| 1921 |
+
],
|
| 1922 |
+
"angle": 0,
|
| 1923 |
+
"content": "Gao et al."
|
| 1924 |
+
},
|
| 1925 |
+
{
|
| 1926 |
+
"type": "title",
|
| 1927 |
+
"bbox": [
|
| 1928 |
+
0.217,
|
| 1929 |
+
0.145,
|
| 1930 |
+
0.486,
|
| 1931 |
+
0.163
|
| 1932 |
+
],
|
| 1933 |
+
"angle": 0,
|
| 1934 |
+
"content": "A Implementation Details"
|
| 1935 |
+
},
|
| 1936 |
+
{
|
| 1937 |
+
"type": "title",
|
| 1938 |
+
"bbox": [
|
| 1939 |
+
0.217,
|
| 1940 |
+
0.174,
|
| 1941 |
+
0.341,
|
| 1942 |
+
0.189
|
| 1943 |
+
],
|
| 1944 |
+
"angle": 0,
|
| 1945 |
+
"content": "A.1 Prompts"
|
| 1946 |
+
},
|
| 1947 |
+
{
|
| 1948 |
+
"type": "text",
|
| 1949 |
+
"bbox": [
|
| 1950 |
+
0.216,
|
| 1951 |
+
0.195,
|
| 1952 |
+
0.787,
|
| 1953 |
+
0.226
|
| 1954 |
+
],
|
| 1955 |
+
"angle": 0,
|
| 1956 |
+
"content": "Below, we list the prompts used in top-k recommendation and zero-shot movie rating tasks."
|
| 1957 |
+
},
|
| 1958 |
+
{
|
| 1959 |
+
"type": "text",
|
| 1960 |
+
"bbox": [
|
| 1961 |
+
0.233,
|
| 1962 |
+
0.269,
|
| 1963 |
+
0.736,
|
| 1964 |
+
0.293
|
| 1965 |
+
],
|
| 1966 |
+
"angle": 0,
|
| 1967 |
+
"content": "I want you to recommend movie to a user based on some personal information and historical records of film watching."
|
| 1968 |
+
},
|
| 1969 |
+
{
|
| 1970 |
+
"type": "text",
|
| 1971 |
+
"bbox": [
|
| 1972 |
+
0.233,
|
| 1973 |
+
0.302,
|
| 1974 |
+
0.632,
|
| 1975 |
+
0.316
|
| 1976 |
+
],
|
| 1977 |
+
"angle": 0,
|
| 1978 |
+
"content": "user profile:{user profile} (e.g.He is 24 years old, and work as technician.)"
|
| 1979 |
+
},
|
| 1980 |
+
{
|
| 1981 |
+
"type": "text",
|
| 1982 |
+
"bbox": [
|
| 1983 |
+
0.233,
|
| 1984 |
+
0.324,
|
| 1985 |
+
0.761,
|
| 1986 |
+
0.36
|
| 1987 |
+
],
|
| 1988 |
+
"angle": 0,
|
| 1989 |
+
"content": "The historical records include the movie name,type and how many points he/she scored out of 5. The higher the score, the more he likes the movie. You are encouraged to learn his movie preferencen from the movies he have watched. Here are some examples:"
|
| 1990 |
+
},
|
| 1991 |
+
{
|
| 1992 |
+
"type": "text",
|
| 1993 |
+
"bbox": [
|
| 1994 |
+
0.234,
|
| 1995 |
+
0.369,
|
| 1996 |
+
0.696,
|
| 1997 |
+
0.383
|
| 1998 |
+
],
|
| 1999 |
+
"angle": 0,
|
| 2000 |
+
"content": "{history/movie} (e.g. a Sci-Fi Thriller movie called Net, The (1995), and scored it a 3)"
|
| 2001 |
+
},
|
| 2002 |
+
{
|
| 2003 |
+
"type": "text",
|
| 2004 |
+
"bbox": [
|
| 2005 |
+
0.235,
|
| 2006 |
+
0.39,
|
| 2007 |
+
0.566,
|
| 2008 |
+
0.404
|
| 2009 |
+
],
|
| 2010 |
+
"angle": 0,
|
| 2011 |
+
"content": "Here's a list of movies that he is likely to like: {candidate_list}"
|
| 2012 |
+
},
|
| 2013 |
+
{
|
| 2014 |
+
"type": "text",
|
| 2015 |
+
"bbox": [
|
| 2016 |
+
0.233,
|
| 2017 |
+
0.413,
|
| 2018 |
+
0.752,
|
| 2019 |
+
0.438
|
| 2020 |
+
],
|
| 2021 |
+
"angle": 0,
|
| 2022 |
+
"content": "Please select top 5 movies in the list that is most likely to be liked. The first film to be selected is {top1/movie}. Please select the remaining 4 movies. Only Output the movie name."
|
| 2023 |
+
},
|
| 2024 |
+
{
|
| 2025 |
+
"type": "image_caption",
|
| 2026 |
+
"bbox": [
|
| 2027 |
+
0.331,
|
| 2028 |
+
0.465,
|
| 2029 |
+
0.67,
|
| 2030 |
+
0.481
|
| 2031 |
+
],
|
| 2032 |
+
"angle": 0,
|
| 2033 |
+
"content": "Fig. 6: Prompt for top-k recommendation task."
|
| 2034 |
+
},
|
| 2035 |
+
{
|
| 2036 |
+
"type": "text",
|
| 2037 |
+
"bbox": [
|
| 2038 |
+
0.233,
|
| 2039 |
+
0.522,
|
| 2040 |
+
0.767,
|
| 2041 |
+
0.546
|
| 2042 |
+
],
|
| 2043 |
+
"angle": 0,
|
| 2044 |
+
"content": "I want you to act as a movie recommender. You task is to predict the user's rating of some movies out of 5 based on his profile and historical records of film watching.Clear scores must be given."
|
| 2045 |
+
},
|
| 2046 |
+
{
|
| 2047 |
+
"type": "text",
|
| 2048 |
+
"bbox": [
|
| 2049 |
+
0.234,
|
| 2050 |
+
0.555,
|
| 2051 |
+
0.373,
|
| 2052 |
+
0.569
|
| 2053 |
+
],
|
| 2054 |
+
"angle": 0,
|
| 2055 |
+
"content": "user profile:{user profile}"
|
| 2056 |
+
},
|
| 2057 |
+
{
|
| 2058 |
+
"type": "text",
|
| 2059 |
+
"bbox": [
|
| 2060 |
+
0.233,
|
| 2061 |
+
0.578,
|
| 2062 |
+
0.763,
|
| 2063 |
+
0.612
|
| 2064 |
+
],
|
| 2065 |
+
"angle": 0,
|
| 2066 |
+
"content": "The historical records include the movie name and how many points he/she scored out of 5. The higher the score, the more he likes the movie. You are encouraged to learn his movie preferences from the movies he have watched."
|
| 2067 |
+
},
|
| 2068 |
+
{
|
| 2069 |
+
"type": "text",
|
| 2070 |
+
"bbox": [
|
| 2071 |
+
0.235,
|
| 2072 |
+
0.623,
|
| 2073 |
+
0.321,
|
| 2074 |
+
0.635
|
| 2075 |
+
],
|
| 2076 |
+
"angle": 0,
|
| 2077 |
+
"content": "{history/movie}"
|
| 2078 |
+
},
|
| 2079 |
+
{
|
| 2080 |
+
"type": "text",
|
| 2081 |
+
"bbox": [
|
| 2082 |
+
0.233,
|
| 2083 |
+
0.645,
|
| 2084 |
+
0.737,
|
| 2085 |
+
0.679
|
| 2086 |
+
],
|
| 2087 |
+
"angle": 0,
|
| 2088 |
+
"content": "Here's a list of movies.:You are going to predict his ratings for these movies.The range of the score is 0-5. A definite value must be given. Seperate movie and rating by \"-\". Output should be formatted as :[movie]-[rating]"
|
| 2089 |
+
},
|
| 2090 |
+
{
|
| 2091 |
+
"type": "text",
|
| 2092 |
+
"bbox": [
|
| 2093 |
+
0.234,
|
| 2094 |
+
0.69,
|
| 2095 |
+
0.357,
|
| 2096 |
+
0.702
|
| 2097 |
+
],
|
| 2098 |
+
"angle": 0,
|
| 2099 |
+
"content": "movie_list:{movie_list}"
|
| 2100 |
+
},
|
| 2101 |
+
{
|
| 2102 |
+
"type": "image_caption",
|
| 2103 |
+
"bbox": [
|
| 2104 |
+
0.362,
|
| 2105 |
+
0.725,
|
| 2106 |
+
0.639,
|
| 2107 |
+
0.741
|
| 2108 |
+
],
|
| 2109 |
+
"angle": 0,
|
| 2110 |
+
"content": "Fig. 7: Prompt for moving rating task."
|
| 2111 |
+
},
|
| 2112 |
+
{
|
| 2113 |
+
"type": "title",
|
| 2114 |
+
"bbox": [
|
| 2115 |
+
0.216,
|
| 2116 |
+
0.788,
|
| 2117 |
+
0.416,
|
| 2118 |
+
0.804
|
| 2119 |
+
],
|
| 2120 |
+
"angle": 0,
|
| 2121 |
+
"content": "A.2 Example Answers"
|
| 2122 |
+
},
|
| 2123 |
+
{
|
| 2124 |
+
"type": "text",
|
| 2125 |
+
"bbox": [
|
| 2126 |
+
0.215,
|
| 2127 |
+
0.81,
|
| 2128 |
+
0.787,
|
| 2129 |
+
0.84
|
| 2130 |
+
],
|
| 2131 |
+
"angle": 0,
|
| 2132 |
+
"content": "In fact, the LLMs do not always output answers in the format we expect every time, especially at higher temperatures. In table 4, we give some failed cases"
|
| 2133 |
+
}
|
| 2134 |
+
],
|
| 2135 |
+
[
|
| 2136 |
+
{
|
| 2137 |
+
"type": "header",
|
| 2138 |
+
"bbox": [
|
| 2139 |
+
0.379,
|
| 2140 |
+
0.115,
|
| 2141 |
+
0.732,
|
| 2142 |
+
0.13
|
| 2143 |
+
],
|
| 2144 |
+
"angle": 0,
|
| 2145 |
+
"content": "Chat-REC: LLMs-Augmented Recommender System"
|
| 2146 |
+
},
|
| 2147 |
+
{
|
| 2148 |
+
"type": "page_number",
|
| 2149 |
+
"bbox": [
|
| 2150 |
+
0.769,
|
| 2151 |
+
0.117,
|
| 2152 |
+
0.785,
|
| 2153 |
+
0.127
|
| 2154 |
+
],
|
| 2155 |
+
"angle": 0,
|
| 2156 |
+
"content": "17"
|
| 2157 |
+
},
|
| 2158 |
+
{
|
| 2159 |
+
"type": "text",
|
| 2160 |
+
"bbox": [
|
| 2161 |
+
0.217,
|
| 2162 |
+
0.147,
|
| 2163 |
+
0.785,
|
| 2164 |
+
0.178
|
| 2165 |
+
],
|
| 2166 |
+
"angle": 0,
|
| 2167 |
+
"content": "while invoking LLMs' API to generate answers. During the experiment, output that does not match the format is automatically retried."
|
| 2168 |
+
},
|
| 2169 |
+
{
|
| 2170 |
+
"type": "table_caption",
|
| 2171 |
+
"bbox": [
|
| 2172 |
+
0.217,
|
| 2173 |
+
0.217,
|
| 2174 |
+
0.787,
|
| 2175 |
+
0.231
|
| 2176 |
+
],
|
| 2177 |
+
"angle": 0,
|
| 2178 |
+
"content": "Table 4: Some cases and explanations that failed to generate canonical answers"
|
| 2179 |
+
},
|
| 2180 |
+
{
|
| 2181 |
+
"type": "table",
|
| 2182 |
+
"bbox": [
|
| 2183 |
+
0.217,
|
| 2184 |
+
0.23,
|
| 2185 |
+
0.79,
|
| 2186 |
+
0.56
|
| 2187 |
+
],
|
| 2188 |
+
"angle": 0,
|
| 2189 |
+
"content": "<table><tr><td>Example</td><td>Explanation</td><td>Correct</td></tr><tr><td>... \nThe current list is: 1.Toy Story (1995) \n2.Fargo (1996) 3.Die Hard (1988) 4.Fish \nCalled Wanda, A (1988) 5. Wrong \nTrousers, The (1993)</td><td>The output conforms to the \nformatting requirements</td><td>✓</td></tr><tr><td>... \nThe current list is: 1.The Shawshank \nRedemption (1994) (It should be \n“Shawshank \nRedemption, The(1994)”) 2.A Fish Called \nWanda (1988) (It should be \n“Fish Called \nWanda, A (1988)”) ...</td><td>Failure to output film names \nin accordance with film in- \ndustry norms. such as “A” \nand “The” are not in the \nright place.</td><td>X</td></tr><tr><td>... \nThe current list is: 1.Toy Story (1995) \n2.Groundhog Day (1993) 3.Star Trek: The \nWrath of Khan (1982) 4.Fargo (1996)</td><td>Sometimes it can’t output a \nsufficient number of movies. \nIn this case, it only output 4 \nmovies while sometimes may \noutput 19 movies.</td><td>X</td></tr><tr><td>... The current list is: a:Star Wars (1977) \na:Raiders of the Lost Ark (1981) n:Back to \nthe Future (1985) m:Fargo (1996) ...</td><td>Sometimes the id infor- \nmation is lost when LLM \nis asked to output movies \nin the following format \n[id].[name].</td><td>X</td></tr></table>"
|
| 2190 |
+
}
|
| 2191 |
+
]
|
| 2192 |
+
]
|
2303.14xxx/2303.14524/e5470f39-aaa6-4768-8235-0e7feb89df40_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:246da7358a531ba3b6b641756f0ef0f02e5414ac2275a8477e5058b116cf5b69
|
| 3 |
+
size 1430487
|
2303.14xxx/2303.14524/full.md
ADDED
|
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Chat-REC: Towards Interactive and Explainable LLMs-Augmented Recommender System
|
| 2 |
+
|
| 3 |
+
Yunfan Gao $^{1}$ , Tao Sheng $^{1}$ , Youlin Xiang $^{1}$ , Yun Xiong $^{1}$ , Haofen Wang $^{2}$ , and Jiawei Zhang $^{3}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup> Shanghai Key Laboratory of Data Science, School of Computer Science, Fudan University, Shanghai, China
|
| 6 |
+
|
| 7 |
+
yufan1602@163.com
|
| 8 |
+
|
| 9 |
+
tsheng16@fudan.edu.cn
|
| 10 |
+
|
| 11 |
+
21210240365@m.fudan.edu.cn
|
| 12 |
+
|
| 13 |
+
yunx@fudan.edu.cn
|
| 14 |
+
|
| 15 |
+
2 College of Design and Innovation, Tongji University, Shanghai, China
|
| 16 |
+
|
| 17 |
+
carter.whfcarter@gmail.com
|
| 18 |
+
|
| 19 |
+
<sup>3</sup> IFM Lab, Department of Computer Science, University of California, Davis, CA, USA
|
| 20 |
+
|
| 21 |
+
jiawei@ifmlab.org
|
| 22 |
+
|
| 23 |
+
Abstract. Large language models (LLMs) have demonstrated their significant potential to be applied for addressing various application tasks. However, traditional recommender systems continue to face great challenges such as poor interactivity and explainability, which actually also hinder their broad deployment in real-world systems. To address these limitations, this paper proposes a novel paradigm called CHAT-REC (ChatGPT Augmented Recommender System) that innovatively augments LLMs for building conversational recommender systems by converting user profiles and historical interactions into prompts. CHAT-REC is demonstrated to be effective in learning user preferences and establishing connections between users and products through in-context learning, which also makes the recommendation process more interactive and explainable. What's more, within the CHAT-REC framework, user's preferences can transfer to different products for cross-domain recommendations, and prompt-based injection of information into LLMs can also handle the cold-start scenarios with new items. In our experiments, CHAT-REC effectively improve the results of top-k recommendations and performs better in zero-shot rating prediction task. CHAT-REC offers a novel approach to improving recommender systems and presents new practical scenarios for the implementation of AIGC (AI generated content) in recommender system studies.
|
| 24 |
+
|
| 25 |
+
Keywords: LLMs $\cdot$ Recommender System $\cdot$ Prompt Engineering
|
| 26 |
+
|
| 27 |
+
# 1 Introduction
|
| 28 |
+
|
| 29 |
+
With the scaling of model and corpus size, LLMs (Large Language Models) have shown remarkable capabilities, such as complex inference, knowledge infer-
|
| 30 |
+
|
| 31 |
+
ence, and external robustness [4,6]. These capabilities, referred to as Emergent Abilities, only become apparent after reaching a specific threshold of model parameters [20]. The emergence of LLMs has brought about a paradigm shift in research. Previously, applying models to downstream tasks typically involved adjusting model parameters through backpropagation. However, the latest development of LLMs [18] has enabled both researchers and practitioners to facilitate learning during the forward process by constructing prompts, namely In-Context Learning (ICL) [1]. In addition, the adoption of techniques such as Chain-of-Thought [21] and Instruct Learning [19] has further harnessed the reasoning capabilities and task generalization abilities of LLMs, thereby promoting their application across various domains.
|
| 32 |
+
|
| 33 |
+
In the era of big data, manual information searching has become infeasible and recommender systems have been widely deployed for automatically inferring people's preference and providing high-quality recommendation services. However, due to the great limitations and drawbacks in both model design and data distribution biases, most existing recommender systems still have great performance in their real-world deployment. One of the primary constraints is their poor interactivity, explainability, and lack of feedback mechanisms. Another limitation is the cold start problem, which makes it difficult to provide accurate recommendations for both new items and new users. Lastly, current recommender systems face challenges in making recommendations across multiple domains [26]. In many recommendation tasks, in order to obtain the required background or general knowledge, an external library or knowledge graph needs to be set up for retrieval [22] or multi-task learning needs to be trained on augmented data [8]. LLMs offer a promising solution to these challenges. They can generate more natural and explainable recommendations, solve the cold start problem, and make cross-domain recommendations. Additionally, LLMs have stronger interactivity and feedback mechanisms, which enhance the overall user experience. By leveraging internal knowledge, LLMs can improve the performance of recommender systems without relying on external retrievers [23].
|
| 34 |
+
|
| 35 |
+
Applying LLMs for addressing the recommendation tasks has received several preliminary research experimental trials already [12,7,25]. Recommender system tasks are formulated as prompt-based natural language tasks, where user-item information and corresponding features are integrated with personalized prompt templates as model inputs. However, in the current research, LLMs are still involved in training as part of the model.
|
| 36 |
+
|
| 37 |
+
In this paper, we introduce a novel approach to learning conversational recommender systems augmented by LLMs, which possess both interactive and explainable capabilities. We present a paradigm called CHAT-REC (ChatGPT Augmented Recommender System) that does not require training and instead relies solely on in-context learning, resulting in more efficient and effective outcomes. With LLM-enhanced recommender system, it is beneficial to learn users' preferences during the conversation. After each step of the conversation, the user's preferences can be further drilled down to update the candidate recommendation results. In addition, users' preferences between products are linked,
|
| 38 |
+
|
| 39 |
+
allowing for better cross-domain product recommendations. We conducted recommendation and rating tests on real-world datasets and experimental results show that Chat-REC achieves significant improvements. Chat-REC sheds light on a promising technical route for the application of conversation AI such as ChatGPT in multiple recommendation scenarios.
|
| 40 |
+
|
| 41 |
+
Our contributions are summarized as follows:
|
| 42 |
+
|
| 43 |
+
- We introduce a novel and effective paradigm called CHAT-REC, which combines traditional recommender systems with LLMs through prompts, leveraging LLMs' ability to learn from context.
|
| 44 |
+
- CHAT-REC employs LLMs as a recommender system interface, enabling multi-round recommendations, enhancing interactivity and explainability.
|
| 45 |
+
- We evaluate our method on real-world datasets for top-k recommendation and rating prediction tasks and experiments demonstrate the effectiveness of CHAT-REC.
|
| 46 |
+
|
| 47 |
+
# 2 Related Work
|
| 48 |
+
|
| 49 |
+
# 2.1 Augmented Language Models
|
| 50 |
+
|
| 51 |
+
Augmented Language Models (ALMs) are a new research direction that aims to overcome the limitations of traditional Language Models (LMs) [5,1,4] by equipping them with reasoning skills and the ability to use external tools, which has served millions of users, such as the coding assistant Copilot [2], or more recently ChatGPT based on GPT3.5 and $\mathrm{GPT4^4}$ . Reasoning is defined as breaking down complex tasks into simpler subtasks that the LM can solve more easily by itself or with the help of tools [9,15,13], while tools are external modules that the LM can call to augment its context. ALMs can use these augmentations separately or in combination to expand their context processing ability and outperform most regular LMs on several benchmarks. ALMs can learn to reason, use tools, and even act, while still performing standard natural language tasks. This new research direction has the potential to address common limitations of traditional LMs such as interpretability, consistency, and scalability issues. By jointly discussing reasoning and tools, and tools and actions, ALMs can solve a broad range of complex tasks without heuristics, thus offering better generalization capabilities.
|
| 52 |
+
|
| 53 |
+
# 2.2 NLP for Recommendation
|
| 54 |
+
|
| 55 |
+
The field of recommender systems has had a long-standing relationship with natural language processing (NLP) techniques, especially when pre-trained language models (PLMs) comes out, which improve the performance of recommender systems and explainability [3,10,11]. PLMs are language models that have learned universal representations on large corpora in a self-supervised manner, and the
|
| 56 |
+
|
| 57 |
+
learned representations can be beneficial to a series of downstream NLP tasks. In the recommendation domain, PLMs can help alleviate the data sparsity issue, which is a major performance bottleneck of current deep recommendation models. By extracting and transferring knowledge from pre-trained models learned by different PLM-related training paradigms, researchers aim to improve recommendation performance from various perspectives, such as generality, sparsity, efficiency, and effectiveness. In this vibrant field, there are open issues and future research directions that need to be explored, including the connection between PLM-based training paradigms and different input data types for recommender systems. Overall, adapting language modelling paradigms for recommendation is seen as a promising direction in both academia and industry.
|
| 58 |
+
|
| 59 |
+
# 2.3 Cold-start Recommendation
|
| 60 |
+
|
| 61 |
+
Cold start recommendation is a problem that arises in recommender systems when users or items have no prior interaction records with the system. This means that there is no data available for the system to make personalized recommendations. To address this issue, solutions have been proposed that either learn to model content features [16] or transfer representations from auxiliary domains [24,26]. The former approach focuses on learning about the characteristics of the items or users based on their content, such as text, images, or metadata. The latter approach involves leveraging information from other domains, such as social networks or product descriptions, to infer user preferences. Additionally, there are approaches that aim to quickly adapt to new domains instead of only providing recommendations for cold-start cases. A good generalization ability of recommendation models on startup cases is essential to ensure a better user experience and increased engagement. In our work, we use the reasoning and background knowledge of LLMs to enhance the performance of recommender systems for cold start scenarios.
|
| 62 |
+
|
| 63 |
+
# 3 Method
|
| 64 |
+
|
| 65 |
+
# 3.1 Bridge Recommender Systems and LLMs
|
| 66 |
+
|
| 67 |
+
Recommender systems are designed to suggest items to users based on their preferences and behavior. Traditionally, these systems have relied on user data such as clickstream and purchase history to make recommendations. However, NLP techniques have proven to be valuable in expanding the scope of recommender systems beyond traditional user data.
|
| 68 |
+
|
| 69 |
+
NLP techniques can be used to analyze user-generated content such as reviews and social media posts to gain insights into user preferences and interests. LLMs can also be used to generate natural language responses to user queries, improving the overall user experience and engagement.
|
| 70 |
+
|
| 71 |
+
To bridge recommender systems and LLMs, we propose an enhanced recommender system module based on ChatGPT, a large language model trained by OpenAI. As the Fig. 1 shows, the module takes as input user-item history interactions, user profile, user query $Q_{i}$ , and history of dialogue $H_{< i}$ (if available,
|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
Fig.1: Overview of CHAT-REC. The left side shows a dialogue between a user and ChatGPT. The middle side shows the flowchart to how CHAT-REC links traditional recommender systems with conversational AI such as ChatGPT. The right side describes the specific judgment in the process.
|
| 75 |
+
|
| 76 |
+
and the notation $< i$ denotes the dialogue history prior to the current query), and interfaces with any recommender system R. If the task is determined to be a recommendation task, the module uses R to generate a candidate set of items. Otherwise, it directly outputs a response to the user, such as an explanation of a generation task or a request for item details.
|
| 77 |
+
|
| 78 |
+
The prompt constructor module in the enhanced recommender system takes multiple inputs to generate a natural language paragraph that captures the user's query and recommendation information. The inputs are as follows:
|
| 79 |
+
|
| 80 |
+
- User-item history interactions, which refers to the user's past interactions with items, such as items they have clicked, purchased, or rated. This information is used to understand the user's preferences and to personalize the recommendation.
|
| 81 |
+
- User profile, which contains demographic and preference information about the user. This may include age, gender, location, and interests. The user profile helps the system understand the user's characteristics and preferences.
|
| 82 |
+
- User query $Q_{i}$ , which is the user's specific request for information or recommendation. This may include a specific item or genre they are interested in, or a more general request for recommendations in a particular category.
|
| 83 |
+
- History of dialogue $H_{<i}$ , which contains the previous conversation between the user and the system. This information is used to understand the context of the user's query and to provide a more personalized and relevant response.
|
| 84 |
+
|
| 85 |
+
As shown in Fig. 2, the CHAT-REC framework proposed in this paper empower recommender systems with the conversational interface, which makes the interactive and explainable recommendation possible. Formally, based on the
|
| 86 |
+
|
| 87 |
+
aforementioned inputs, the prompt constructor module generates a natural language paragraph that summarizes the user's query and recommendation information, and provides a more personalized and relevant response to the user's request. The intermediate answer generated by the recommender system is then used to refine the prompt constructor and generate an optimized prompt to further compress and refine the candidate set. The resulting recommendation and a brief explanation are output to the user.
|
| 88 |
+
|
| 89 |
+
For example, in the first round of Q&A, the user requests action movies. The system determines that a recommendation task is needed, and executes the Recommend Action Movies module using the input information. The intermediate answer $A_{1}$ contains the top-20 results, which are then reranked and adjusted in the second module using the input information to generate the final output of the top-5 results.
|
| 90 |
+
|
| 91 |
+
In the second round of Q&A, the user asks why the movie "Cargo" was recommended. The system determines that no recommendation task is needed and instead executes the explanation for the recommendation module, using the movie title, history interaction, and user profile as inputs. The answer $A_{2}$ is then generated, which provides a brief explanation of the recommendation, including information about the user's general interests and the specific characteristics of the movie that may be appealing to the user.
|
| 92 |
+
|
| 93 |
+
# 3.2 Recommendation Based on Candidate Set Compression
|
| 94 |
+
|
| 95 |
+
Traditional recommender systems typically generate a small number of sorted candidate products, each with a score that reflects the system's recommendation confidence or result quality. However, considering the huge size of the product set, the performance obtained by most existing recommender systems are all way far from satisfactory, which still have a very large room for improvement.
|
| 96 |
+
|
| 97 |
+
This article proposes a method of using LLMs to improve the performance of recommender systems by narrowing down the candidate set. The recommender system generates a large set of candidate items, which can be overwhelming for the user. LLMs play several different critical roles in narrowing down the product candidate set within the system. Firstly, we convert users' profiles and historical interactions into prompts, including the item description and user rating. Secondly, LLMs are asked to summarize user preferences for items in a domain based on the above information. LLMs can learn from context and effectively capture users' background information and preferences. With this information, they can establish the relationship between product attributes and user preferences, enabling them to make better product recommendations. By utilizing in-context learning, LLMs can enhance their recommendation reasoning ability, resulting in more accurate and personalized product recommendations.
|
| 98 |
+
|
| 99 |
+
Once the LLMs have learned the user's preferences, the candidate set generated by the recommender system is provided to the LLMs. The LLMs can further filter and sort the candidate set based on the user's preferences. This approach ensures that the user is presented with a smaller, more relevant set of items, increasing the likelihood that they will find something they like.
|
| 100 |
+
|
| 101 |
+

|
| 102 |
+
Fig.2: Case study of interactive recommendation. It shows two conversations between different users and LLM. Where the user profile and historical users are converted into corresponding prompts for personalized recommendations, but the input of this part of the prompts is not visible to the user. The dialogue on the left shows that when a user asks why the movie was recommended, LLM can give an explanation based on the user's preferences and specific information about the recommended movie. The dialog on the right shows that CHAT-REC can make multiple rounds of recommendations based on user feedback. Questions about the details of the movie can also be answered in a specific way. LLM also takes into account ethical and moral issues when recommending movies.
|
| 103 |
+
|
| 104 |
+
# 3.3 Cold-start Recommendations
|
| 105 |
+
|
| 106 |
+
With the textual description and profile information about the products, regardless the new products or the old ones, LLMs can effectively relate such products with each other, which provides us with the opportunity for solving the persistent cold-start recommendation problem once and for all.
|
| 107 |
+
|
| 108 |
+
For example, if a user asks for recommendations for a new movie that was released in 2021, the recommender system could use text data about the movie to generate an embedding and then calculate similarities to other movies in the system to make recommendations. This capability allows recommender systems to make relevant and accurate recommendations for new items, improving the overall user experience.
|
| 109 |
+
|
| 110 |
+
Large language models can use the vast amount of knowledge they contain to help recommender systems alleviate the cold-start problem of new items, i.e., recommending items that lack a large number of user interactions. However, since the knowledge held by ChatGPT is limited to September 2021, ChatGPT does not cope well when encountering unknown items, such as a user requesting to recommend some new movies released in 2023 or content related to a movie that ChatGPT is not aware of, as shown in the top part of Fig. 3. To address this issue, we introduce external information about new items, utilizing large language models to generate corresponding embedding representations and cache them. When encountering new item recommendations, we calculate the similarity between item embeddings and embeddings of user requests and preferences, then retrieve the most relevant item information based on the similarity and construct a prompt to input to ChatGPT for recommendation, as illustrated in the lower half of Fig. 3. This approach allows the recommender system to work in conjunction with ChatGPT to better recommend new items, thus enhancing the user experience.
|
| 111 |
+
|
| 112 |
+
# 3.4 Cross-Domain Recommendations
|
| 113 |
+
|
| 114 |
+
The LLMs-augmented recommender system introduced above can be used to address several challenging tasks, that are hard or even impossible to be addressed with conventional recommender systems, such as cross-domain recommendation [26] and cold-start recommendation [17]. In this part, we will first talk about how to use the LLMs-augmented recommender system for the cross-domain recommendation.
|
| 115 |
+
|
| 116 |
+
LLMs pre-trained with information across the Internet actually can serve as the multi-perspective knowledge base [14]. Besides the target product in one domain, such as movies, the LLMs not only has a broad knowledge about products many other domains, like music and books, but also understands the relations among the products across the domains mentioned above.
|
| 117 |
+
|
| 118 |
+
For example, as illustrated in Fig. 4, once the conversation regarding movie recommendations is finished, the user inquires LLM for suggestions on other types of works. LLM then proceeds to recommend a variety of options, such as
|
| 119 |
+
|
| 120 |
+

|
| 121 |
+
Fig. 3: Case Study of New Item Recommendation. The top shows that ChatGPT is unable to recommend new items beyond the timeframe of its training data. The middle part demonstrates the process of how to utilize external information about new items to enable ChatGPT to handle recommendations for new items. The bottom shows that ChatGPT can effectively handle recommendations for new items after incorporating external information.
|
| 122 |
+
|
| 123 |
+

|
| 124 |
+
Fig. 4: Case study of cross-domain recommendation. After the conversation about the movie's recommendation is completed. The user asks LLM to recommend works other than movies. It can be seen that LLM recommends different types of works, including books, TV series Podcasts and video games, according to the user's movie preferences. This shows that LLM can migrate the user's movie preferences to items and thus achieve cross-domain recommendations.
|
| 125 |
+
|
| 126 |
+
books, TV series, podcasts, and video games, based on the user's movie preferences. This demonstrates LLM's ability to transfer the user's preferences from movies to other items, resulting in cross-domain recommendations. This cross-domain recommendation capability has the potential to significantly expand the scope and relevance of recommender systems.
|
| 127 |
+
|
| 128 |
+
# 4 Experiment
|
| 129 |
+
|
| 130 |
+
# 4.1 Dataset and Experimental Settings
|
| 131 |
+
|
| 132 |
+
The dataset used in our experiment is MovieLens 100K, which is a benchmark dataset of a real-world recommender system. It comprises 100,000 movie ratings provided by 943 users on a scale of 1 to 5 across 1,682 movies. Additionally, the dataset contains demographic information about the users, such as age, gender, occupation, and zip code, as well as movie information, such as title, release year, and genres. To create our experimental dataset, we randomly selected 200 users. Table 1 provides detailed statistical information about the dataset used in the experiment.
|
| 133 |
+
|
| 134 |
+
When evaluating the performance of top-k recommendations, Precision, Recall, and Normalized Discounted Cumulative Gain (NDCG) are used. For rating
|
| 135 |
+
|
| 136 |
+
Table 1: Details of the dataset used for evaluation.
|
| 137 |
+
|
| 138 |
+
<table><tr><td>Dataset</td><td>Users</td><td>Items</td><td>Ratings</td><td>Rating Scale</td><td>Density</td></tr><tr><td>MovieLens 100K</td><td>943</td><td>1,682</td><td>100,000</td><td>[1-5]</td><td>6.304%</td></tr></table>
|
| 139 |
+
|
| 140 |
+
prediction task, the Root Mean Squared Error (RMSE) and Mean Absolute Error (MAE) are employed as evaluation metrics.
|
| 141 |
+
|
| 142 |
+
# 4.2 Baselines
|
| 143 |
+
|
| 144 |
+
The baseline methods studied in the experiment include both classic recommender system models and the LLMs-augmented recommender systems proposed in this paper. Detailed information about the comparison methods studied in our experiments are provided as follows:
|
| 145 |
+
|
| 146 |
+
- LightFM is a recommendation algorithm that combines collaborative filtering and content-based methods to recommend items to users.
|
| 147 |
+
- LightGCN is a graph-based collaborative filtering algorithm that uses a simplified graph convolutional network (GCN) to model the user-item interactions in a recommender system.
|
| 148 |
+
- Item-KNN is a neighborhood-based collaborative filtering algorithm that uses the similarity between items to make recommendations to users.
|
| 149 |
+
- Matrix Factorization (MF) is a widely used collaborative filtering algorithm that represents users and items as latent factors in a low-dimensional space.
|
| 150 |
+
|
| 151 |
+
We select three representative models from the GPT-3 and GPT-3.5 series as LLMs in CHAT-REC:
|
| 152 |
+
|
| 153 |
+
- gpt-3.5-turbo is the most capable GPT-3.5 model and optimized for chat.
|
| 154 |
+
- text-davinci-003 can do any language task with better quality, longer output, and consistent instruction-following.
|
| 155 |
+
- text-davinci-002 is similar to text-davinci-003 but is trained with supervised fine-tuning instead of reinforcement learning.
|
| 156 |
+
|
| 157 |
+
The model notations, like CHAT-REC (gpt-3.5-turbo), denote the CHAT-REC framework built by adopting "gpt-3.5-turbo" as the backbone model.
|
| 158 |
+
|
| 159 |
+
# 4.3 Result and Analysis
|
| 160 |
+
|
| 161 |
+
Top-5 Recommendation. As presented in Table 2, our proposed CHAT-REC framework has demonstrated effective improvement of traditional recommender systems in the top-k recommendation task. The NDCG scores of all three GPT-3.5 models surpassed that of LightGCN, with text-davinci-003 delivering the best result and demonstrating strong contextual learning abilities. Specifically, the precision score of 0.3240 is $6.93\%$ higher than that of LightGCN, while NDCG score of 0.3802 is $11.01\%$ higher. However, the recall rate of 0.1404 is slightly lower than that of LightGCN by $3.51\%$ . It is noteworthy that the performance of gpt-3.5-turbo was slightly weaker than that of text-davinci-002.
|
| 162 |
+
|
| 163 |
+
Table 2: Results of top-5 recommendation.
|
| 164 |
+
|
| 165 |
+
<table><tr><td>Models</td><td>Precision</td><td>Recall</td><td>NDCG</td></tr><tr><td>LightFM</td><td>0.2830</td><td>0.1410</td><td>0.2846</td></tr><tr><td>LightGCN</td><td>0.3030</td><td>0.1455</td><td>0.3425</td></tr><tr><td>CHAT-REC (gpt-3.5-turbo)</td><td>0.3103</td><td>0.1279</td><td>0.3696</td></tr><tr><td>CHAT-REC (text-davinci-003)</td><td>0.3240 (+6.93%)</td><td>0.1404 (-3.51%)</td><td>0.3802 (+11.01%)</td></tr><tr><td>CHAT-REC (text-davinci-002)</td><td>0.3031</td><td>0.1240</td><td>0.3629</td></tr></table>
|
| 166 |
+
|
| 167 |
+
Rating Prediction As illustrated in the Table3, CHAT-REC outperforms traditional recommender systems in predicting movie ratings. The experimental results demonstrate that LLMs can effectively learn user preferences from user portraits and historical interactions through in-context learning, without any explicit training, and accurately predict user ratings for candidate movies. Since LightGCN is not well-suited for rating prediction tasks, it was excluded from our experimental range. Among the three GPT-3.5 models tested, text-davinci-003 achieved the best result, with an RMSE of 0.785, which is $15.86\%$ higher than that of Item-KNN, and an MAE of 0.593, which is $19.21\%$ higher. Text-davinci-002 came in second place. However, the performance of gpt-3.5-turbo was slightly weaker than that of Item-KNN. The experimental results reveal that even without relying on recommender systems, LLMs can achieve better results in predicting user preferences for specific movies. The weaker performance of gpt-3.5-turbo is due to the model's emphasis on the ability of human-computer dialogue and its trade-off of the in-context learning abilities, which is consistent with other research conclusions. Additionally, it also can be concluded that the performance of gpt-3.5-turbo in numerical prediction tasks is weaker than that of text-davinci-003 and text-davinci-002.
|
| 168 |
+
|
| 169 |
+
Table 3: Results of movie rating prediction.
|
| 170 |
+
|
| 171 |
+
<table><tr><td>Models</td><td>RMSE</td><td>MAE</td></tr><tr><td>MF</td><td>0.988</td><td>0.771</td></tr><tr><td>Item-KNN</td><td>0.933</td><td>0.734</td></tr><tr><td>CHAT-REC (gpt-3.5-turbo)</td><td>0.969</td><td>0.756</td></tr><tr><td>CHAT-REC (text-davinci-003)</td><td>0.785</td><td>0.593</td></tr><tr><td>CHAT-REC (text-davinci-002)</td><td>0.8309</td><td>0.6215</td></tr></table>
|
| 172 |
+
|
| 173 |
+
During experiment, we discovered that CHAT-REC's most important ability is to optimize the refined candidate set of the recommender system, meaning to resort the movies that the user may like but were placed further down in the recommender system's candidate set. This requires the application of LLMs' knowledge of movies, understanding of user preferences, and the ability to reason about the matching relationship between the two. To confirm this finding, we conducted separate empirical studies and asked LLMs again, in the same conversation, about movies that appeared in the recommender system's top 5 but did not appear in LLMs' top 5. LLMs' feedback revealed that it is unlikely that the user would like the movie or it is difficult to determine whether the user would like it, with clear reasons given. The inconsistent shows that CHAT-REC's
|
| 174 |
+
|
| 175 |
+
recommendations are entirely based on an understanding of user preferences and movie information.
|
| 176 |
+
|
| 177 |
+
# 4.4 Ablation Study
|
| 178 |
+
|
| 179 |
+
In this study, we select the text-davinci-003 model, which achieved the best results in both top-k recommendation and rating prediction, to investigate the impact of different prompts and temperatures on the model's performance. The result is shown in Fig. 5.
|
| 180 |
+
|
| 181 |
+

|
| 182 |
+
Fig. 5: Performance on different prompt and temperature.
|
| 183 |
+
|
| 184 |
+

|
| 185 |
+
|
| 186 |
+

|
| 187 |
+
|
| 188 |
+
In the context of this study, "w/random" refers to the random shuffling of the 20 candidate sets generated by the recommender system before being provided to LLM as the candidate set prompt input, while "w/top1" indicates that the top 1 recommendation is not given as the initial background knowledge when constructing the prompt, but instead directly asks LLM to select 5 movies from the candidate set. The temperature parameter affects the answer generated by LLM, with lower temperatures indicating more certain answers, and higher for more random answers. All experiments, except for the experiment with a temperature of 0, used the average of 5 tests.
|
| 189 |
+
|
| 190 |
+
The results demonstrate that the effect slightly decreased after the order of the candidate set was shuffled. For example, when the temperature is 0.9, the NDCG of text-davinci-003 decreased from 0.3802 to 0.3653, representing a decrease of $3.92\%$ . The effect of CHAT-REC decreased significantly when the recommender system's top 1 was missing in the prompt. For instance, when the temperature is 0.9, the NDCG of text-davinci-003 decreased from 0.3802 to 0.3055, which is a decrease of $19.65\%$ . This trend was observed at different temperatures, and the experiment showed that the best results could be achieved when the temperature was 0.9.
|
| 191 |
+
|
| 192 |
+
It is worth noting that the existence of the recommender system was not explicitly mentioned in CHAT-REC's prompt, and the function of the recommender system was merely to provide a candidate set. However, the design of the candidate set can significantly impact CHAT-REC's performance. Our experiment revealed that CHAT-REC's prompt design can effectively inject the recommender
|
| 193 |
+
|
| 194 |
+
system's knowledge implicitly into LLMs. This implicit knowledge is reflected in the ranking of movies in the candidate set, and the use of Top1 as the background can further strengthen this information. This implicit knowledge can be captured by LLMs in in-context learning and can enhance the recommendation performance.
|
| 195 |
+
|
| 196 |
+
# 5 Conclusion
|
| 197 |
+
|
| 198 |
+
In this paper, we present CHAT-REC which bridges recommender system and LLMs by converting user information and user-item interactions to prompt. We evaluated our approach in the task of top-k recommendation and zero-shot movie rating prediction. In conclusion, LLMs offer significant potential for enhancing recommender systems by improving interactivity explainability and cross-domain recommendation. In addition, prompt plays an important role, and experiments prove that implicitly expressing the knowledge in the recommender system in prompt can effectively improve the recommendation effect.
|
| 199 |
+
|
| 200 |
+
# References
|
| 201 |
+
|
| 202 |
+
1. Brown, T., Mann, B., Ryder, N., Subbiah, M., Kaplan, J.D., Dhariwal, P., Neelakantan, A., Shyam, P., Sastry, G., Askell, A., et al.: Language models are few-shot learners. Advances in neural information processing systems 33, 1877-1901 (2020)
|
| 203 |
+
2. Chen, M., Tworek, J., Jun, H., Yuan, Q., Pinto, H.P.d.O., Kaplan, J., Edwards, H., Burda, Y., Joseph, N., Brockman, G., et al.: Evaluating large language models trained on code. arXiv preprint arXiv:2107.03374 (2021)
|
| 204 |
+
3. Chen, X., Chen, H., Xu, H., Zhang, Y., Cao, Y., Qin, Z., Zha, H.: Personalized fashion recommendation with visual explanations based on multimodal attention network: Towards visually explainable recommendation. In: Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval. pp. 765-774 (2019)
|
| 205 |
+
4. Chowdhery, A., Narang, S., Devlin, J., Bosma, M., Mishra, G., Roberts, A., Barham, P., Chung, H.W., Sutton, C., Gehrmann, S., et al.: Palm: Scaling language modeling with pathways. arXiv preprint arXiv:2204.02311 (2022)
|
| 206 |
+
5. Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)
|
| 207 |
+
6. Fu, Yao; Peng, H., Khot, T.: How does gpt obtain its ability? tracing emergent abilities of language models to their sources. Yao Fu's Notion (Dec 2022), https://yaofu.notion.site/ How-does-GPT-Obtain-its-Ability-Tracing-Emergent-Abilities-of-Language-Models-to-their-Sources-b?
|
| 208 |
+
7. Geng, S., Liu, S., Fu, Z., Ge, Y., Zhang, Y.: Recommendation as language processing (rlp): A unified pretrain, personalized prompt & predict paradigm (p5). In: Proceedings of the 16th ACM Conference on Recommender Systems. pp. 299-315 (2022)
|
| 209 |
+
8. Khashabi, D., Min, S., Khot, T., Sabharwal, A., Tafjord, O., Clark, P., Hajishirzi, H.: Unifiedqa: Crossing format boundaries with a single qa system (2020)
|
| 210 |
+
|
| 211 |
+
9. LeCun, Y.: A path towards autonomous machine intelligence version 0.9.2, 2022-06-27. Open Review 62 (2022)
|
| 212 |
+
10. Li, L., Zhang, Y., Chen, L.: Generate neural template explanations for recommendation. In: Proceedings of the 29th ACM International Conference on Information & Knowledge Management. pp. 755-764 (2020)
|
| 213 |
+
11. Li, L., Zhang, Y., Chen, L.: Personalized transformer for explainable recommendation. arXiv preprint arXiv:2105.11601 (2021)
|
| 214 |
+
12. Liu, P., Zhang, L., Gulla, J.A.: Pre-train, prompt and recommendation: A comprehensive survey of language modelling paradigm adaptations in recommender systems. arXiv preprint arXiv:2302.03735 (2023)
|
| 215 |
+
13. Parisi, A., Zhao, Y., Fiedel, N.: Talm: Tool augmented language models (2022)
|
| 216 |
+
14. Petroni, F., Rocktäschel, T., Lewis, P., Bakhtin, A., Wu, Y., Miller, A.H., Riedel, S.: Language models as knowledge bases? arXiv preprint arXiv:1909.01066 (2019)
|
| 217 |
+
15. Schick, T., Dwivedi-Yu, J., Dessi, R., Raileanu, R., Lomeli, M., Zettlemoyer, L., Cancedda, N., Scialom, T.: Toolformer: Language models can teach themselves to use tools (2023)
|
| 218 |
+
16. Shi, S., Zhang, M., Yu, X., Zhang, Y., Hao, B., Liu, Y., Ma, S.: Adaptive feature sampling for recommendation with missing content feature values. In: Proceedings of the 28th ACM International Conference on Information and Knowledge Management. pp. 1451-1460 (2019)
|
| 219 |
+
17. Sun, C., Liu, H., Liu, M., Ren, Z., Gan, T., Nie, L.: Lara: Attribute-to-feature adversarial learning for new-item recommendation. In: Proceedings of the 13th international conference on web search and data mining. pp. 582-590 (2020)
|
| 220 |
+
18. Touvron, H., Lavril, T., Izacard, G., Martinet, X., Lachaux, M.A., Lacroix, T., Rozière, B., Goyal, N., Hambro, E., Azhar, F., et al.: Llama: Open and efficient foundation language models. arXiv preprint arXiv:2302.13971 (2023)
|
| 221 |
+
19. Wei, J., Bosma, M., Zhao, V., Guu, K., Yu, A.W., Lester, B., Du, N., Dai, A.M., Le, Q.V.: Finetuned language models are zero-shot learners. ArXiv abs/2109.01652 (2021)
|
| 222 |
+
20. Wei, J., Tay, Y., Bommasani, R., Raffel, C., Zoph, B., Borgeaud, S., Yogatama, D., Bosma, M., Zhou, D., Metzler, D., hsin Chi, E.H., Hashimoto, T., Vinyals, O., Liang, P., Dean, J., Fedus, W.: Emergent abilities of large language models. ArXiv abs/2206.07682 (2022)
|
| 223 |
+
21. Wei, J., Wang, X., Schuurmans, D., Bosma, M., hsin Chi, E.H., Le, Q., Zhou, D.: Chain of thought prompting elicits reasoning in large language models. ArXiv abs/2201.11903 (2022)
|
| 224 |
+
22. Xu, Y., Zhu, C., Xu, R., Liu, Y., Zeng, M., Huang, X.: Fusing context into knowledge graph for commonsense question answering (2021)
|
| 225 |
+
23. Yu, W., Iter, D., Wang, S., Xu, Y., Ju, M., Sanyal, S., Zhu, C., Zeng, M., Jiang, M.: Generate rather than retrieve: Large language models are strong context generators (2023)
|
| 226 |
+
24. Yuan, F., Zhang, G., Karatzoglou, A., Jose, J., Kong, B., Li, Y.: One person, one model, one world: Learning continual user representation without forgetting. In: Proceedings of the 44th International ACM SIGIR Conference on Research and Development in Information Retrieval. pp. 696-705 (2021)
|
| 227 |
+
25. Zhang, Y., Ding, H., Shui, Z., Ma, Y., Zou, J., Deoras, A., Wang, H.: Language models as recommender systems: Evaluations and limitations (2021)
|
| 228 |
+
26. Zhu, F., Wang, Y., Chen, C., Zhou, J., Li, L., Liu, G.: Cross-domain recommendation: challenges, progress, and prospects. arXiv preprint arXiv:2103.01696 (2021)
|
| 229 |
+
|
| 230 |
+
# A Implementation Details
|
| 231 |
+
|
| 232 |
+
# A.1 Prompts
|
| 233 |
+
|
| 234 |
+
Below, we list the prompts used in top-k recommendation and zero-shot movie rating tasks.
|
| 235 |
+
|
| 236 |
+
I want you to recommend movie to a user based on some personal information and historical records of film watching.
|
| 237 |
+
|
| 238 |
+
user profile:{user profile} (e.g.He is 24 years old, and work as technician.)
|
| 239 |
+
|
| 240 |
+
The historical records include the movie name,type and how many points he/she scored out of 5. The higher the score, the more he likes the movie. You are encouraged to learn his movie preferencen from the movies he have watched. Here are some examples:
|
| 241 |
+
|
| 242 |
+
{history/movie} (e.g. a Sci-Fi Thriller movie called Net, The (1995), and scored it a 3)
|
| 243 |
+
|
| 244 |
+
Here's a list of movies that he is likely to like: {candidate_list}
|
| 245 |
+
|
| 246 |
+
Please select top 5 movies in the list that is most likely to be liked. The first film to be selected is {top1/movie}. Please select the remaining 4 movies. Only Output the movie name.
|
| 247 |
+
|
| 248 |
+
Fig. 6: Prompt for top-k recommendation task.
|
| 249 |
+
|
| 250 |
+
I want you to act as a movie recommender. You task is to predict the user's rating of some movies out of 5 based on his profile and historical records of film watching.Clear scores must be given.
|
| 251 |
+
|
| 252 |
+
user profile:{user profile}
|
| 253 |
+
|
| 254 |
+
The historical records include the movie name and how many points he/she scored out of 5. The higher the score, the more he likes the movie. You are encouraged to learn his movie preferences from the movies he have watched.
|
| 255 |
+
|
| 256 |
+
{history/movie}
|
| 257 |
+
|
| 258 |
+
Here's a list of movies.:You are going to predict his ratings for these movies.The range of the score is 0-5. A definite value must be given. Seperate movie and rating by "-". Output should be formatted as :[movie]-[rating]
|
| 259 |
+
|
| 260 |
+
movie_list:{movie_list}
|
| 261 |
+
|
| 262 |
+
Fig. 7: Prompt for moving rating task.
|
| 263 |
+
|
| 264 |
+
# A.2 Example Answers
|
| 265 |
+
|
| 266 |
+
In fact, the LLMs do not always output answers in the format we expect every time, especially at higher temperatures. In table 4, we give some failed cases
|
| 267 |
+
|
| 268 |
+
while invoking LLMs' API to generate answers. During the experiment, output that does not match the format is automatically retried.
|
| 269 |
+
|
| 270 |
+
Table 4: Some cases and explanations that failed to generate canonical answers
|
| 271 |
+
|
| 272 |
+
<table><tr><td>Example</td><td>Explanation</td><td>Correct</td></tr><tr><td>...
|
| 273 |
+
The current list is: 1.Toy Story (1995)
|
| 274 |
+
2.Fargo (1996) 3.Die Hard (1988) 4.Fish
|
| 275 |
+
Called Wanda, A (1988) 5. Wrong
|
| 276 |
+
Trousers, The (1993)</td><td>The output conforms to the
|
| 277 |
+
formatting requirements</td><td>✓</td></tr><tr><td>...
|
| 278 |
+
The current list is: 1.The Shawshank
|
| 279 |
+
Redemption (1994) (It should be
|
| 280 |
+
“Shawshank
|
| 281 |
+
Redemption, The(1994)”) 2.A Fish Called
|
| 282 |
+
Wanda (1988) (It should be
|
| 283 |
+
“Fish Called
|
| 284 |
+
Wanda, A (1988)”) ...</td><td>Failure to output film names
|
| 285 |
+
in accordance with film in-
|
| 286 |
+
dustry norms. such as “A”
|
| 287 |
+
and “The” are not in the
|
| 288 |
+
right place.</td><td>X</td></tr><tr><td>...
|
| 289 |
+
The current list is: 1.Toy Story (1995)
|
| 290 |
+
2.Groundhog Day (1993) 3.Star Trek: The
|
| 291 |
+
Wrath of Khan (1982) 4.Fargo (1996)</td><td>Sometimes it can’t output a
|
| 292 |
+
sufficient number of movies.
|
| 293 |
+
In this case, it only output 4
|
| 294 |
+
movies while sometimes may
|
| 295 |
+
output 19 movies.</td><td>X</td></tr><tr><td>... The current list is: a:Star Wars (1977)
|
| 296 |
+
a:Raiders of the Lost Ark (1981) n:Back to
|
| 297 |
+
the Future (1985) m:Fargo (1996) ...</td><td>Sometimes the id infor-
|
| 298 |
+
mation is lost when LLM
|
| 299 |
+
is asked to output movies
|
| 300 |
+
in the following format
|
| 301 |
+
[id].[name].</td><td>X</td></tr></table>
|
2303.14xxx/2303.14524/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:79faaa55ca6341559d2306945bfe9a4048487e0bbd6848abc9978d054afef425
|
| 3 |
+
size 766665
|
2303.14xxx/2303.14524/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14526/78d21730-a964-44d8-b842-a279fb0ebf53_content_list.json
ADDED
|
@@ -0,0 +1,1648 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Selective Structured State-Spaces for Long-Form Video Understanding",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
125,
|
| 8 |
+
130,
|
| 9 |
+
841,
|
| 10 |
+
152
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Jue Wang Wentao Zhu Pichao Wang Xiang Yu Linda Liu Mohamed Omar Raffay Hamid Amazon Prime Video",
|
| 17 |
+
"bbox": [
|
| 18 |
+
89,
|
| 19 |
+
180,
|
| 20 |
+
879,
|
| 21 |
+
215
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "{juewangn, zhuwent, wpichao, xiangnyu, lindliu, omarmk, raffay}@amazon.com",
|
| 28 |
+
"bbox": [
|
| 29 |
+
155,
|
| 30 |
+
219,
|
| 31 |
+
810,
|
| 32 |
+
234
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Abstract",
|
| 39 |
+
"text_level": 1,
|
| 40 |
+
"bbox": [
|
| 41 |
+
233,
|
| 42 |
+
268,
|
| 43 |
+
313,
|
| 44 |
+
284
|
| 45 |
+
],
|
| 46 |
+
"page_idx": 0
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"type": "text",
|
| 50 |
+
"text": "Effective modeling of complex spatiotemporal dependencies in long-form videos remains an open problem. The recently proposed Structured State-Space Sequence (S4) model with its linear complexity offers a promising direction in this space. However, we demonstrate that treating all imagedokens equally as done by S4 model can adversely affect its efficiency and accuracy. To address this limitation, we present a novel Selective S4 (i.e., S5) model that employs a lightweight mask generator to adaptively select informative image tokens resulting in more efficient and accurate modeling of long-term spatiotemporal dependencies in videos. Unlike previous mask-based token reduction methods used in transformers, our S5 model avoids the dense self-attention calculation by making use of the guidance of the momentum-updated S4 model. This enables our model to efficiently discard less informative tokens and adapt to various long-form video understanding tasks more effectively. However, as is the case for most token reduction methods, the informative image tokens could be dropped incorrectly. To improve the robustness and the temporal horizon of our model, we propose a novel long-short masked contrastive learning (LSMCL) approach that enables our model to predict longer temporal context using shorter input videos. We present extensive comparative results using three challenging long-form video understanding datasets (LVU, COIN and Breakfast), demonstrating that our approach consistently outperforms the previous state-of-the-art S4 model by up to $9.6\\%$ accuracy while reducing its memory footprint by $23\\%$ .",
|
| 51 |
+
"bbox": [
|
| 52 |
+
75,
|
| 53 |
+
300,
|
| 54 |
+
473,
|
| 55 |
+
739
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "1. Introduction",
|
| 62 |
+
"text_level": 1,
|
| 63 |
+
"bbox": [
|
| 64 |
+
76,
|
| 65 |
+
768,
|
| 66 |
+
209,
|
| 67 |
+
784
|
| 68 |
+
],
|
| 69 |
+
"page_idx": 0
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"type": "text",
|
| 73 |
+
"text": "Video understanding is an active research area where a variety of different models have been explored including e.g., two-stream networks [19, 20, 57], recurrent neural networks [3, 68, 77] and 3-D convolutional networks [64-66]. However, most of these methods have primarily focused on short-form videos that are typically with a few seconds in length, and are not designed to model the complex long-",
|
| 74 |
+
"bbox": [
|
| 75 |
+
75,
|
| 76 |
+
794,
|
| 77 |
+
468,
|
| 78 |
+
902
|
| 79 |
+
],
|
| 80 |
+
"page_idx": 0
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"type": "image",
|
| 84 |
+
"img_path": "images/67a13663c6b83fdabc46b870dc70d6cc648732dde97350d845cbd501a9890f83.jpg",
|
| 85 |
+
"image_caption": [
|
| 86 |
+
"Figure 1. Illustration of long-form videos - Evenly sampled frames from two long-form videos, that have long duration (more than 1 minute) and distinct categories in the Breakfast [39] dataset (grayscale frames are shown for better visualization). The video on top shows the activity of making scrambled eggs, while the one on the bottom shows the activity of making cereal. These two videos heavily overlap in terms of objects (e.g., eggs, saucepan and stove), and actions (e.g., picking, whisking and pouring). To effectively distinguish these two videos, it is important to model long-term spatiotemporal dependencies, which is also the key in long-form video understanding."
|
| 87 |
+
],
|
| 88 |
+
"image_footnote": [],
|
| 89 |
+
"bbox": [
|
| 90 |
+
501,
|
| 91 |
+
267,
|
| 92 |
+
893,
|
| 93 |
+
404
|
| 94 |
+
],
|
| 95 |
+
"page_idx": 0
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"type": "text",
|
| 99 |
+
"text": "term spatiotemporal dependencies often found in long-form videos (see Figure 1 for an illustrative example). The recent vision transformer (ViT) [14] has shown promising capability in modeling long-range dependencies, and several variants [1,4,15,44,49,54,70] have successfully adopted the transformer architecture for video modeling. However, for a video with T frames and S spatial tokens, the complexity of standard video transformer architecture is $\\mathcal{O}(\\mathrm{S}^2\\mathrm{T}^2)$ , which poses prohibitively high computation and memory costs when modeling long-form videos. Various attempts [59,73] have been proposed to improve this efficiency, but the ViT pyramid architecture prevents them from developing long-term dependencies on low-level features.",
|
| 100 |
+
"bbox": [
|
| 101 |
+
496,
|
| 102 |
+
582,
|
| 103 |
+
893,
|
| 104 |
+
777
|
| 105 |
+
],
|
| 106 |
+
"page_idx": 0
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"type": "text",
|
| 110 |
+
"text": "In addition to ViT, a recent ViS4mer [32] method has tried to apply the Structured State-Spaces Sequence (S4) model [24] as an effective way to model the long-term video dependencies. However, by introducing simple masking techniques we empirically reveal that the S4 model can have different temporal reasoning preferences for different downstream tasks. This makes applying the same image token selection method as done by ViS4mer [32] for all long-",
|
| 111 |
+
"bbox": [
|
| 112 |
+
496,
|
| 113 |
+
779,
|
| 114 |
+
893,
|
| 115 |
+
902
|
| 116 |
+
],
|
| 117 |
+
"page_idx": 0
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"type": "aside_text",
|
| 121 |
+
"text": "arXiv:2303.14526v1 [cs.CV] 25 Mar 2023",
|
| 122 |
+
"bbox": [
|
| 123 |
+
22,
|
| 124 |
+
258,
|
| 125 |
+
58,
|
| 126 |
+
705
|
| 127 |
+
],
|
| 128 |
+
"page_idx": 0
|
| 129 |
+
},
|
| 130 |
+
{
|
| 131 |
+
"type": "page_number",
|
| 132 |
+
"text": "1",
|
| 133 |
+
"bbox": [
|
| 134 |
+
480,
|
| 135 |
+
924,
|
| 136 |
+
488,
|
| 137 |
+
936
|
| 138 |
+
],
|
| 139 |
+
"page_idx": 0
|
| 140 |
+
},
|
| 141 |
+
{
|
| 142 |
+
"type": "text",
|
| 143 |
+
"text": "form video understanding tasks suboptimal.",
|
| 144 |
+
"bbox": [
|
| 145 |
+
76,
|
| 146 |
+
90,
|
| 147 |
+
367,
|
| 148 |
+
104
|
| 149 |
+
],
|
| 150 |
+
"page_idx": 1
|
| 151 |
+
},
|
| 152 |
+
{
|
| 153 |
+
"type": "text",
|
| 154 |
+
"text": "To address this challenge, we propose a cost-efficient adaptive token selection module, termed S5 (i.e., selective S4) model, which adaptively selects informative image tokens for the S4 model, thereby learning discriminative long-form video representations. Previous token reduction methods for efficient image transformers [40, 46, 55, 71, 75, 76] heavily rely on a dense self-attention calculation, which makes them less effective in practice despite their theoretical guarantees about efficiency gains. In contrast, our S5 model avoids the dense self-attention calculation by leveraging S4 features in a gumble-softmax sampling [33] based mask generator to adaptively select more informative image tokens. Our mask generator leverages S4 feature for its global sequence-context information and is further guided by the momentum distillation from the S4 model.",
|
| 155 |
+
"bbox": [
|
| 156 |
+
75,
|
| 157 |
+
107,
|
| 158 |
+
468,
|
| 159 |
+
332
|
| 160 |
+
],
|
| 161 |
+
"page_idx": 1
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"type": "text",
|
| 165 |
+
"text": "To further improve the robustness and the temporal predictability of our S5 model, we introduce a novel long-short mask contrastive learning (LSMCL) to pre-train our model. In LSMCL, randomly selected image tokens from long and short clips include the scenario that the less informative image tokens are chosen, and the representation of them are learned to match each other. As a result, the LSMCL not only significantly boosts the efficiency compared to the previous video contrastive learning methods [17, 56, 69], but also increases the robustness of our S5 model when dealing with the mis-predicted image tokens. We empirically demonstrate that the S5 model with LSMCL pre-training can employ shorter-length clips to achieve on-par performance with using longer-range clips without incorporating LSMCL pre-training.",
|
| 166 |
+
"bbox": [
|
| 167 |
+
75,
|
| 168 |
+
334,
|
| 169 |
+
470,
|
| 170 |
+
561
|
| 171 |
+
],
|
| 172 |
+
"page_idx": 1
|
| 173 |
+
},
|
| 174 |
+
{
|
| 175 |
+
"type": "text",
|
| 176 |
+
"text": "We summarize our key contributions as the following:",
|
| 177 |
+
"bbox": [
|
| 178 |
+
76,
|
| 179 |
+
566,
|
| 180 |
+
442,
|
| 181 |
+
580
|
| 182 |
+
],
|
| 183 |
+
"page_idx": 1
|
| 184 |
+
},
|
| 185 |
+
{
|
| 186 |
+
"type": "list",
|
| 187 |
+
"sub_type": "text",
|
| 188 |
+
"list_items": [
|
| 189 |
+
"- We propose a Selective S4 (S5) model that leverages the global sequence-context information from S4 features to adaptively choose informative image tokens in a task-specific way.",
|
| 190 |
+
"- We introduce a novel long-short masked contrastive learning approach (LSMCL) that enables our model to be tolerant to the mis-predicted tokens and exploit longer duration spatiotemporal context by using shorter duration input videos, leading to improved robustness in the S5 model.",
|
| 191 |
+
"- We demonstrate that two proposed novel techniques (S5 model and LSMCL) are seamlessly suitable and effective for long-form video understanding, achieving the state-of-the-art performance on three challenging benchmarks. Notably, our method achieves up to $9.6\\%$ improvement on LVU dataset compared to the previous state-of-the-art S4 method, while reducing the memory footprint by $23\\%$ ."
|
| 192 |
+
],
|
| 193 |
+
"bbox": [
|
| 194 |
+
76,
|
| 195 |
+
582,
|
| 196 |
+
468,
|
| 197 |
+
825
|
| 198 |
+
],
|
| 199 |
+
"page_idx": 1
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"type": "text",
|
| 203 |
+
"text": "2. Related Work",
|
| 204 |
+
"text_level": 1,
|
| 205 |
+
"bbox": [
|
| 206 |
+
76,
|
| 207 |
+
844,
|
| 208 |
+
218,
|
| 209 |
+
859
|
| 210 |
+
],
|
| 211 |
+
"page_idx": 1
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"type": "text",
|
| 215 |
+
"text": "We discuss the literature with respect to the three most relevant fields: video understanding with long-form format,",
|
| 216 |
+
"bbox": [
|
| 217 |
+
76,
|
| 218 |
+
869,
|
| 219 |
+
468,
|
| 220 |
+
900
|
| 221 |
+
],
|
| 222 |
+
"page_idx": 1
|
| 223 |
+
},
|
| 224 |
+
{
|
| 225 |
+
"type": "text",
|
| 226 |
+
"text": "efficient token selection for vision transformer training, and self-supervised learning with videos.",
|
| 227 |
+
"bbox": [
|
| 228 |
+
500,
|
| 229 |
+
90,
|
| 230 |
+
890,
|
| 231 |
+
121
|
| 232 |
+
],
|
| 233 |
+
"page_idx": 1
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"type": "text",
|
| 237 |
+
"text": "a. Long-Form Video Modeling: Transformers have shown excellent performance in modeling long-term dependencies, e.g., in natural language processing (NLP) [5, 12, 13]. But the high computational cost caused by dense self-attention calculation becomes a bottleneck to apply in not only NLP but also computer vision. Much subsequent work [11, 34, 36, 43, 44, 52, 70] focuses on improving the transformer efficiency. However, they are not designed for dealing with plethora of spatial and temporal image tokens that are common in long-form video scenarios. LF-VILA [59] develops a hierarchical feeding architecture to include more frames in the model, thus capturing longer temporal information. Similarly, MeMViT [73] better utilizes temporal information by emerging the previously cached \"memory\" from the past. The pyramid structure leveraged by LF-VILA and MeMViT shows efficiency improvements, but may lose low-level spatial-temporal contextual information. Gu et al. [24] proposed a Structured State-Space Sequence (S4) model, a novel alternative to CNNs or transformers, to model the long-range dependencies by simulating a linear time invariant (LTI) system. Subsequently, S4ND [50] and ViS4mer [32] extend S4 model to the video classification task. ViS4mer [32] stacks multiple S4 layers with different scales in modeling long-form videos, and S4ND [50] substitutes the traditional convolutional layer with the proposed S4ND layer in image and short-form video classification tasks. The equal importance assumption to all the image tokens by ViS4mer and S4ND can be further improved by introducing suitable token selection mechanisms, especially when dealing with the long-form input sequences. Consequently, we propose a token Selection S4 (S5) model to further enhance the efficiency while maintaining the long-form representation power.",
|
| 238 |
+
"bbox": [
|
| 239 |
+
496,
|
| 240 |
+
133,
|
| 241 |
+
890,
|
| 242 |
+
633
|
| 243 |
+
],
|
| 244 |
+
"page_idx": 1
|
| 245 |
+
},
|
| 246 |
+
{
|
| 247 |
+
"type": "text",
|
| 248 |
+
"text": "b. Adaptive Token Selection: Adaptive token selection is widely used to improve model efficiency. Traditional CNN methods such as SCsampler [37] filter informative clips by using motion and audio embeddings. Adaframe [74] utilizes memory-augmented LSTMs as agents, which predict where to look in the next time step. AR-NET [47] uses LSTM as decision maker to select useful frames and their resolutions. [40, 46, 55, 71, 75] apply this selection idea to transformers to adaptively select tokens for increased efficiency. For instance, STTS [71] leverages a token selection module, the named scorer network, to provide the importance score for each token and select the top-K frames with the highest scores. AdaViT [46] extends this idea to develop instance-specific policies, guiding the activation of patches, self-attention heads and transformer blocks. All of the above methods demonstrate how a light-weight token selection module can improve inference efficiency. However,",
|
| 249 |
+
"bbox": [
|
| 250 |
+
496,
|
| 251 |
+
643,
|
| 252 |
+
890,
|
| 253 |
+
900
|
| 254 |
+
],
|
| 255 |
+
"page_idx": 1
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"type": "page_number",
|
| 259 |
+
"text": "2",
|
| 260 |
+
"bbox": [
|
| 261 |
+
478,
|
| 262 |
+
924,
|
| 263 |
+
491,
|
| 264 |
+
936
|
| 265 |
+
],
|
| 266 |
+
"page_idx": 1
|
| 267 |
+
},
|
| 268 |
+
{
|
| 269 |
+
"type": "text",
|
| 270 |
+
"text": "these methods are essentially designed for images, and may require non-trivial adaptation to the long-form video scenarios, i.e., the video-level long-range reasoning and computationally expensive self-attention calculation. To avoid this dense self-attention calculation, our proposed S5 model leverages S4 features to model the long-term dependencies and adaptively pick informative tokens.",
|
| 271 |
+
"bbox": [
|
| 272 |
+
75,
|
| 273 |
+
90,
|
| 274 |
+
472,
|
| 275 |
+
196
|
| 276 |
+
],
|
| 277 |
+
"page_idx": 2
|
| 278 |
+
},
|
| 279 |
+
{
|
| 280 |
+
"type": "text",
|
| 281 |
+
"text": "c. Video Self-Supervised Learning (SSL): Previous work on token reduction rarely considers the negative impact of mis-dropped tokens. EViT [40] simply fuses the unattended tokens and concatenates with the remaining ones. From the recent successful image SSL works [8, 9, 22, 26, 27], many follow-up works [16, 18, 56, 63, 69] learn discriminative video features with great generalization ability in downstream tasks. Specifically, LSTCL [69] and BraVe [56] utilize long and short clips in the concept of SSL, which enables the model to learn an effective representation by predicting temporal context captured from a longer temporal extent. This essentially broadens the temporal horizon of the model for predicting longer temporal context with fewer from shorter input frames. In this paper, we adopt this idea with an additional random masking strategy to increase the efficiency of contrastive learning in long-form videos, and to further improve the robustness and the temporal predictability of our S5 model in downstream tasks.",
|
| 282 |
+
"bbox": [
|
| 283 |
+
75,
|
| 284 |
+
203,
|
| 285 |
+
472,
|
| 286 |
+
476
|
| 287 |
+
],
|
| 288 |
+
"page_idx": 2
|
| 289 |
+
},
|
| 290 |
+
{
|
| 291 |
+
"type": "text",
|
| 292 |
+
"text": "3. Approach",
|
| 293 |
+
"text_level": 1,
|
| 294 |
+
"bbox": [
|
| 295 |
+
76,
|
| 296 |
+
489,
|
| 297 |
+
187,
|
| 298 |
+
507
|
| 299 |
+
],
|
| 300 |
+
"page_idx": 2
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "text",
|
| 304 |
+
"text": "We start by summarizing Structured State-Space Sequence (S4) [24] model and ViS4mer [32] ( $\\S 3.1$ ), followed by empirical analysis of S4 model in various long-form video understanding tasks ( $\\S 3.2$ ), and then providing the details of our proposed approach to address these limitations ( $\\S 3.3$ and $\\S 3.4$ ).",
|
| 305 |
+
"bbox": [
|
| 306 |
+
75,
|
| 307 |
+
513,
|
| 308 |
+
472,
|
| 309 |
+
604
|
| 310 |
+
],
|
| 311 |
+
"page_idx": 2
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"text": "3.1. Preliminaries",
|
| 316 |
+
"text_level": 1,
|
| 317 |
+
"bbox": [
|
| 318 |
+
76,
|
| 319 |
+
614,
|
| 320 |
+
218,
|
| 321 |
+
628
|
| 322 |
+
],
|
| 323 |
+
"page_idx": 2
|
| 324 |
+
},
|
| 325 |
+
{
|
| 326 |
+
"type": "text",
|
| 327 |
+
"text": "3.1.1 S4 Model",
|
| 328 |
+
"text_level": 1,
|
| 329 |
+
"bbox": [
|
| 330 |
+
76,
|
| 331 |
+
637,
|
| 332 |
+
197,
|
| 333 |
+
652
|
| 334 |
+
],
|
| 335 |
+
"page_idx": 2
|
| 336 |
+
},
|
| 337 |
+
{
|
| 338 |
+
"type": "text",
|
| 339 |
+
"text": "Recall that a simple State-Space Model i.e., a linear time invariant (LTI) system can be written as:",
|
| 340 |
+
"bbox": [
|
| 341 |
+
75,
|
| 342 |
+
660,
|
| 343 |
+
468,
|
| 344 |
+
691
|
| 345 |
+
],
|
| 346 |
+
"page_idx": 2
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"type": "equation",
|
| 350 |
+
"text": "\n$$\n\\mathbf {x} ^ {\\prime} (t) = \\mathbf {A x} (t) + \\mathbf {B u} (t) \\tag {1}\n$$\n",
|
| 351 |
+
"text_format": "latex",
|
| 352 |
+
"bbox": [
|
| 353 |
+
184,
|
| 354 |
+
702,
|
| 355 |
+
468,
|
| 356 |
+
726
|
| 357 |
+
],
|
| 358 |
+
"page_idx": 2
|
| 359 |
+
},
|
| 360 |
+
{
|
| 361 |
+
"type": "equation",
|
| 362 |
+
"text": "\n$$\n\\mathbf {y} (t) = \\mathbf {C x} (t) + \\mathbf {D u} (t).\n$$\n",
|
| 363 |
+
"text_format": "latex",
|
| 364 |
+
"bbox": [
|
| 365 |
+
192,
|
| 366 |
+
723,
|
| 367 |
+
357,
|
| 368 |
+
738
|
| 369 |
+
],
|
| 370 |
+
"page_idx": 2
|
| 371 |
+
},
|
| 372 |
+
{
|
| 373 |
+
"type": "text",
|
| 374 |
+
"text": "Under deep learning setting, $\\mathbf{A}$ , $\\mathbf{B}$ and $\\mathbf{C}$ are learned via gradient descent while $+D\\mathbf{u}(t)$ is replaced by a residual connection. This formulation projects an input signal $\\mathbf{u}(t)$ from one-dimensional space to an N-dimensional latent space $\\mathbf{x}(t)$ , which is then mapped back to a one-dimensional output signal $\\mathbf{y}(t)$ . Similar to RNNs, it has been found in previous work that Equation 1 also suffers from gradient vanish or exploding issues when modeling longer sequences. To tackle this issue, the work in [24] leveraged HiPPO theory [23] to initialize the $\\mathbf{A}$ matrix.",
|
| 375 |
+
"bbox": [
|
| 376 |
+
75,
|
| 377 |
+
750,
|
| 378 |
+
472,
|
| 379 |
+
901
|
| 380 |
+
],
|
| 381 |
+
"page_idx": 2
|
| 382 |
+
},
|
| 383 |
+
{
|
| 384 |
+
"type": "text",
|
| 385 |
+
"text": "HiPPO specifies a certain expression of $\\mathbf{A} \\in \\mathbb{R}^{N \\times N}$ (see Equation 2), which allows the hidden state to memorize the input $\\mathbf{u}(t)^1$ .",
|
| 386 |
+
"bbox": [
|
| 387 |
+
498,
|
| 388 |
+
90,
|
| 389 |
+
893,
|
| 390 |
+
137
|
| 391 |
+
],
|
| 392 |
+
"page_idx": 2
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"type": "equation",
|
| 396 |
+
"text": "\n$$\n\\operatorname {H i P P O}: \\mathbf {A} _ {n, k} = - \\left\\{ \\begin{array}{l l} (2 n + 1) ^ {0. 5} (2 k + 1) ^ {0. 5} & \\text {i f} n > k \\\\ n + 1 & \\text {i f} n = k \\\\ 0 & \\text {i f} n < k, \\end{array} \\right. \\tag {2}\n$$\n",
|
| 397 |
+
"text_format": "latex",
|
| 398 |
+
"bbox": [
|
| 399 |
+
506,
|
| 400 |
+
159,
|
| 401 |
+
890,
|
| 402 |
+
227
|
| 403 |
+
],
|
| 404 |
+
"page_idx": 2
|
| 405 |
+
},
|
| 406 |
+
{
|
| 407 |
+
"type": "text",
|
| 408 |
+
"text": "where $n$ and $k$ indicate the row and column indices of $\\mathbf{A}$ . To implement Equation 1 using discrete inputs such as word or image tokens, the work in [24] leverages the bi-linear discretization method [67] and a discretized version of Equation 1 using a step size $\\Delta$ is rewritten as:",
|
| 409 |
+
"bbox": [
|
| 410 |
+
496,
|
| 411 |
+
228,
|
| 412 |
+
893,
|
| 413 |
+
304
|
| 414 |
+
],
|
| 415 |
+
"page_idx": 2
|
| 416 |
+
},
|
| 417 |
+
{
|
| 418 |
+
"type": "equation",
|
| 419 |
+
"text": "\n$$\n\\mathbf {x} _ {k} = \\bar {\\mathbf {A}} \\mathbf {x} _ {k - 1} + \\bar {\\mathbf {B}} \\mathbf {u} _ {k} \\tag {3}\n$$\n",
|
| 420 |
+
"text_format": "latex",
|
| 421 |
+
"bbox": [
|
| 422 |
+
620,
|
| 423 |
+
313,
|
| 424 |
+
890,
|
| 425 |
+
338
|
| 426 |
+
],
|
| 427 |
+
"page_idx": 2
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"type": "equation",
|
| 431 |
+
"text": "\n$$\n\\mathbf {y} _ {k} = \\bar {\\mathbf {C}} \\mathbf {x} _ {k},\n$$\n",
|
| 432 |
+
"text_format": "latex",
|
| 433 |
+
"bbox": [
|
| 434 |
+
622,
|
| 435 |
+
334,
|
| 436 |
+
702,
|
| 437 |
+
349
|
| 438 |
+
],
|
| 439 |
+
"page_idx": 2
|
| 440 |
+
},
|
| 441 |
+
{
|
| 442 |
+
"type": "text",
|
| 443 |
+
"text": "where $\\bar{\\mathbf{A}} = (\\mathbf{I} + \\frac{\\Delta\\cdot\\mathbf{A}}{2}) / (\\mathbf{I} - \\frac{\\Delta\\cdot\\mathbf{A}}{2}),\\bar{\\mathbf{B}} = \\Delta \\cdot \\mathbf{B} / (I - \\frac{\\Delta\\cdot\\mathbf{A}}{2})$ and $\\bar{\\mathbf{C}} = \\mathbf{C}$ Equation 3 can be solved using a discrete convolution [24]:",
|
| 444 |
+
"bbox": [
|
| 445 |
+
496,
|
| 446 |
+
359,
|
| 447 |
+
890,
|
| 448 |
+
406
|
| 449 |
+
],
|
| 450 |
+
"page_idx": 2
|
| 451 |
+
},
|
| 452 |
+
{
|
| 453 |
+
"type": "equation",
|
| 454 |
+
"text": "\n$$\n\\mathbf {y} = \\bar {\\mathbf {K}} * \\mathbf {u}, \\tag {4}\n$$\n",
|
| 455 |
+
"text_format": "latex",
|
| 456 |
+
"bbox": [
|
| 457 |
+
651,
|
| 458 |
+
406,
|
| 459 |
+
890,
|
| 460 |
+
422
|
| 461 |
+
],
|
| 462 |
+
"page_idx": 2
|
| 463 |
+
},
|
| 464 |
+
{
|
| 465 |
+
"type": "text",
|
| 466 |
+
"text": "where $\\mathbf{u} = \\{u_0, u_1, \\dots, u_{k-1}, u_k\\}$ and $\\bar{\\mathbf{K}} \\in \\mathbb{R}^L := \\{\\bar{\\mathbf{C}}\\bar{\\mathbf{B}}, \\bar{\\mathbf{C}}\\bar{\\mathbf{A}}\\bar{\\mathbf{B}}, \\dots, \\bar{\\mathbf{C}}\\bar{\\mathbf{A}}^{L-1}\\bar{\\mathbf{B}}\\}$ is a structured convolutional kernel and $\\mathbf{L}$ is the sequence length. Equation 4 is the core formulation of S4 model whose computational cost is linear to the input length and can be efficiently computed using fast Fourier transform (FFT) and inverse FFT. Moreover, to control the convolution kernel width, the work in [25] set $\\Delta$ as a learnable parameter.",
|
| 467 |
+
"bbox": [
|
| 468 |
+
496,
|
| 469 |
+
428,
|
| 470 |
+
893,
|
| 471 |
+
550
|
| 472 |
+
],
|
| 473 |
+
"page_idx": 2
|
| 474 |
+
},
|
| 475 |
+
{
|
| 476 |
+
"type": "text",
|
| 477 |
+
"text": "3.1.2 ViS4mer Model",
|
| 478 |
+
"text_level": 1,
|
| 479 |
+
"bbox": [
|
| 480 |
+
500,
|
| 481 |
+
566,
|
| 482 |
+
663,
|
| 483 |
+
580
|
| 484 |
+
],
|
| 485 |
+
"page_idx": 2
|
| 486 |
+
},
|
| 487 |
+
{
|
| 488 |
+
"type": "text",
|
| 489 |
+
"text": "By utilizing the S4 model, the ViS4mer [32] achieves promising results in the long-form video understanding tasks. We start with defining some notations to help summarize the adaptation of S4 model in computer vision. Given a video clip $\\mathbf{X} \\in \\mathbb{R}^{\\mathrm{H} \\times \\mathrm{W} \\times 3 \\times \\mathrm{T}}$ consisting of T RGB frames sampled from the video, we convert it into a sequence of S·T image tokens $\\mathbf{x}_s^t \\in \\mathbb{R}^{\\mathrm{D}}$ for $s = 1, \\ldots, \\mathrm{S}$ and $t = 1, \\ldots, \\mathrm{T}$ . The tokens $\\mathbf{z}_s^t$ are obtained by decomposing each frame into S patches which are then projected to a D-dimensional space through a learnable linear transformation. This tokenization can be implemented by linearly mapping the RGB patches of each frame [4, 49]. Separate learnable positional encodings $\\mathbf{e}_s$ and $\\mathbf{e}^t$ are then applied to the patch embeddings $\\mathbf{z}_s^t$ for the spatial and the temporal dimensions: $\\mathbf{x}_s^t = \\mathbf{z}_s^t + \\mathbf{e}_s + \\mathbf{e}^t$ , formulating $\\mathbf{x}_{\\mathrm{input}} = \\{x_0^0, x_1^0, x_S^0, x_0^1, \\ldots, x_S^T\\}$ .",
|
| 490 |
+
"bbox": [
|
| 491 |
+
496,
|
| 492 |
+
590,
|
| 493 |
+
890,
|
| 494 |
+
833
|
| 495 |
+
],
|
| 496 |
+
"page_idx": 2
|
| 497 |
+
},
|
| 498 |
+
{
|
| 499 |
+
"type": "text",
|
| 500 |
+
"text": "In ViS4mer [32], a multi-scale S4 decoder is introduced for learning the long-term temporal reasoning. As is mentioned in § 3.1.1, S4 model has a linear computation and",
|
| 501 |
+
"bbox": [
|
| 502 |
+
496,
|
| 503 |
+
833,
|
| 504 |
+
893,
|
| 505 |
+
878
|
| 506 |
+
],
|
| 507 |
+
"page_idx": 2
|
| 508 |
+
},
|
| 509 |
+
{
|
| 510 |
+
"type": "page_footnote",
|
| 511 |
+
"text": "1Please refer to [23] for more details and relevant proofs.",
|
| 512 |
+
"bbox": [
|
| 513 |
+
517,
|
| 514 |
+
886,
|
| 515 |
+
821,
|
| 516 |
+
900
|
| 517 |
+
],
|
| 518 |
+
"page_idx": 2
|
| 519 |
+
},
|
| 520 |
+
{
|
| 521 |
+
"type": "page_number",
|
| 522 |
+
"text": "3",
|
| 523 |
+
"bbox": [
|
| 524 |
+
478,
|
| 525 |
+
924,
|
| 526 |
+
491,
|
| 527 |
+
936
|
| 528 |
+
],
|
| 529 |
+
"page_idx": 2
|
| 530 |
+
},
|
| 531 |
+
{
|
| 532 |
+
"type": "image",
|
| 533 |
+
"img_path": "images/c8257e832e659be3a30686bfdc5f070145e62a210dde34cd77ba7b24d5bcb3e4.jpg",
|
| 534 |
+
"image_caption": [
|
| 535 |
+
"(a). Performance Impact with increasing number of input frames",
|
| 536 |
+
"(b). Performance Impact with increasing masking ratio"
|
| 537 |
+
],
|
| 538 |
+
"image_footnote": [],
|
| 539 |
+
"bbox": [
|
| 540 |
+
80,
|
| 541 |
+
98,
|
| 542 |
+
468,
|
| 543 |
+
170
|
| 544 |
+
],
|
| 545 |
+
"page_idx": 3
|
| 546 |
+
},
|
| 547 |
+
{
|
| 548 |
+
"type": "image",
|
| 549 |
+
"img_path": "images/23cc9916ee7f7fe853c293f93b3ab4e115f0bfd03cbc67c1eb4b32f8cc4bd144.jpg",
|
| 550 |
+
"image_caption": [
|
| 551 |
+
"Figure 2. Performance gain/loss of ViS4mer on LVU dataset [72] with different settings of input frames and random masking ratio, where we conclude: (a). The performance is not substantially improved with increasing number of input frames. (b). Random masking strategy cannot effectively reduce redundant tokens."
|
| 552 |
+
],
|
| 553 |
+
"image_footnote": [],
|
| 554 |
+
"bbox": [
|
| 555 |
+
78,
|
| 556 |
+
181,
|
| 557 |
+
468,
|
| 558 |
+
255
|
| 559 |
+
],
|
| 560 |
+
"page_idx": 3
|
| 561 |
+
},
|
| 562 |
+
{
|
| 563 |
+
"type": "text",
|
| 564 |
+
"text": "memory dependency with respect to the input length, which has significantly lower computational cost than the self-attention in transformers. The formulation of S4 decoder can be written as:",
|
| 565 |
+
"bbox": [
|
| 566 |
+
75,
|
| 567 |
+
353,
|
| 568 |
+
468,
|
| 569 |
+
412
|
| 570 |
+
],
|
| 571 |
+
"page_idx": 3
|
| 572 |
+
},
|
| 573 |
+
{
|
| 574 |
+
"type": "equation",
|
| 575 |
+
"text": "\n$$\n\\begin{array}{l} \\mathbf {x} _ {s _ {4}} = \\mathrm {S} _ {4} (\\mathrm {L N} (\\mathbf {x} _ {\\text {i n p u t}})) \\\\ \\mathbf {x} _ {m l p} = \\operatorname {M L P} \\left(\\mathrm {P} \\left(\\mathbf {x} _ {s _ {4}}\\right)\\right) \\tag {5} \\\\ \\mathbf {x} _ {\\text {s k i p}} = \\operatorname {L i n e a r} \\left(\\mathrm {P} \\left(\\mathbf {x} _ {\\text {i n p u t}}\\right)\\right) \\\\ \\mathbf {x} _ {\\text {o u t}} = \\mathbf {x} _ {\\text {s k i p}} + \\mathbf {x} _ {\\text {m l p}}, \\\\ \\end{array}\n$$\n",
|
| 576 |
+
"text_format": "latex",
|
| 577 |
+
"bbox": [
|
| 578 |
+
179,
|
| 579 |
+
422,
|
| 580 |
+
468,
|
| 581 |
+
494
|
| 582 |
+
],
|
| 583 |
+
"page_idx": 3
|
| 584 |
+
},
|
| 585 |
+
{
|
| 586 |
+
"type": "text",
|
| 587 |
+
"text": "Where $\\mathrm{LN}(\\cdot),\\mathrm{MLP}(\\cdot),\\mathrm{Linear}(\\cdot)$ and $\\mathrm{P}(\\cdot)$ represent the layer normalization [2], the multi-layer perception, linear layer and pooling layer, and $\\mathbf{x}_{s_4}$ is the $\\mathbf{y}$ in Equation 4.",
|
| 588 |
+
"bbox": [
|
| 589 |
+
75,
|
| 590 |
+
505,
|
| 591 |
+
468,
|
| 592 |
+
551
|
| 593 |
+
],
|
| 594 |
+
"page_idx": 3
|
| 595 |
+
},
|
| 596 |
+
{
|
| 597 |
+
"type": "text",
|
| 598 |
+
"text": "3.2. S4 Model in Long-form Video Understanding",
|
| 599 |
+
"text_level": 1,
|
| 600 |
+
"bbox": [
|
| 601 |
+
76,
|
| 602 |
+
560,
|
| 603 |
+
460,
|
| 604 |
+
575
|
| 605 |
+
],
|
| 606 |
+
"page_idx": 3
|
| 607 |
+
},
|
| 608 |
+
{
|
| 609 |
+
"type": "text",
|
| 610 |
+
"text": "To better understand the S4 model and long-form video understanding tasks, we re-implement ViS4mer [32] with different settings on LVU dataset [72] and demonstrate the result in Figure 2. From the observation that short-form video understanding tasks often benefit from longer input clips [4, 15, 44, 69], we wonder if the performance of S4 model on different long-form video tasks would also be substantially improved with the increasing number of input frames. In Figure 2 (a), we gradually increase the temporal extent from 60 seconds to 120 seconds. Compared to the performance of using 60 second input, we report the impact ratio of using 80, 100, 120 second inputs in each task. From this Figure, we realize that not all long-form video tasks benefit from longer input context, and for those improved tasks, the performance is not necessarily improved with the longer input content. As a result, we raise the hypothesis that capturing long-term relationships is task- and data-dependent, and that additional performance improvements for those temporally-intensive tasks would also be hindered by the redundant spatiotemporal tokens produced by longer input content. Recalling Equation 3 and 4, each",
|
| 611 |
+
"bbox": [
|
| 612 |
+
75,
|
| 613 |
+
583,
|
| 614 |
+
468,
|
| 615 |
+
900
|
| 616 |
+
],
|
| 617 |
+
"page_idx": 3
|
| 618 |
+
},
|
| 619 |
+
{
|
| 620 |
+
"type": "text",
|
| 621 |
+
"text": "output token from S4 model is the result of structured discrete convolution for all previous inputs. Thus, we argue that treating all input token equally as ViS4mer [32] does not appealing for S4 model to capture effective long-term dependencies, as not all tokens have the temporal relations and each task may also favor tokens in different space-time locations. To naively reduce the redundant tokens, we generate random masks on the 60 second input clips to drop tokens and increase the masking ratio from $20\\%$ to $80\\%$ . Compared to the performance of un-masked input, we report the impact ratio of using random mask with masking ratio of $20\\%$ , $50\\%$ and $80\\%$ in Figure 2 (b). Despite the minor improvement in some tasks, random masking degenerates the performance of most tasks, so it is not an effective method for reducing the redundancies. To this end, we are motivated to propose a selective S4 model which adaptively pick discriminative image tokens for the S4 model in different long-form video understanding tasks.",
|
| 622 |
+
"bbox": [
|
| 623 |
+
496,
|
| 624 |
+
90,
|
| 625 |
+
890,
|
| 626 |
+
363
|
| 627 |
+
],
|
| 628 |
+
"page_idx": 3
|
| 629 |
+
},
|
| 630 |
+
{
|
| 631 |
+
"type": "text",
|
| 632 |
+
"text": "3.3. Adaptive Token in Long-form Videos",
|
| 633 |
+
"text_level": 1,
|
| 634 |
+
"bbox": [
|
| 635 |
+
498,
|
| 636 |
+
369,
|
| 637 |
+
818,
|
| 638 |
+
386
|
| 639 |
+
],
|
| 640 |
+
"page_idx": 3
|
| 641 |
+
},
|
| 642 |
+
{
|
| 643 |
+
"type": "text",
|
| 644 |
+
"text": "To pick out discriminative image tokens from the long-form videos among various tasks, we extend the concept of adaptive token learning, formulating our Selective S5 (i.e., selective S4) model. Unlike previous image-based adaptive token learning works [40, 46, 55, 75] that rely on dense self-attention for capturing token-wise relationships, our S5 model avoids the self-attention computation in long-form videos by leveraging S4 features generated from the simulated linear time-invariant (LTI) system. Inherited from the linear complexity of the S4 model, our S5 model can receive long-form video token dependencies with low cost, thus making the adaptive token learning possible in long-form videos. In addition, we propose a momentum updated S4 model to dynamically produce S4 features from the long-form video data in different tasks. Figure 3 (a) demonstrates the pipeline of our S5 model, where the momentum updated S4 model is the moving average of the S4 model.",
|
| 645 |
+
"bbox": [
|
| 646 |
+
496,
|
| 647 |
+
393,
|
| 648 |
+
890,
|
| 649 |
+
651
|
| 650 |
+
],
|
| 651 |
+
"page_idx": 3
|
| 652 |
+
},
|
| 653 |
+
{
|
| 654 |
+
"type": "text",
|
| 655 |
+
"text": "Specifically, we cast our selective module in the S5 model as an adaptive mask learning problem. Given a mask generator $\\mathrm{MG}(\\cdot)$ and its input $\\mathbf{x}_{s_4}$ , the mask generator is a lightweight architecture, which will be ablated in the Section 4. It will be trained for a classification task on predefined category space $\\mathbb{C} = \\{C_1,\\dots ,C_{\\mathrm{ST}}\\}$ , where $\\mathrm{S}\\cdot \\mathrm{T}$ is the total number of image tokens in the video. Let's denote $p(c|\\mathbf{x}_{s_4})\\in [0,1]$ be the normalized probabilistic output of $MG(\\mathbf{x}_{s_4})$ , so that $\\sum_{c = C_1}^{c = C_{\\mathrm{ST}}}p(c|\\mathbf{x}_{s_4}) = 1$ . Then, we sample $K$ categories without replacement from the probabilistic outputs of the mask generator. Finally, the $k^{th}$ selected image tokens can be written as:",
|
| 656 |
+
"bbox": [
|
| 657 |
+
496,
|
| 658 |
+
651,
|
| 659 |
+
890,
|
| 660 |
+
833
|
| 661 |
+
],
|
| 662 |
+
"page_idx": 3
|
| 663 |
+
},
|
| 664 |
+
{
|
| 665 |
+
"type": "equation",
|
| 666 |
+
"text": "\n$$\nx _ {\\text {i n}} ^ {k} = \\mathbf {X} ^ {\\mathbf {T}} c ^ {k} \\tag {6}\n$$\n",
|
| 667 |
+
"text_format": "latex",
|
| 668 |
+
"bbox": [
|
| 669 |
+
651,
|
| 670 |
+
840,
|
| 671 |
+
890,
|
| 672 |
+
859
|
| 673 |
+
],
|
| 674 |
+
"page_idx": 3
|
| 675 |
+
},
|
| 676 |
+
{
|
| 677 |
+
"type": "text",
|
| 678 |
+
"text": "Where $\\mathbf{X} \\in \\mathbb{R}^{ST \\times D}$ represents S·T D-dimensional image tokens and $c^k$ is a one-hot vector that selects $k^{th}$ token from",
|
| 679 |
+
"bbox": [
|
| 680 |
+
498,
|
| 681 |
+
869,
|
| 682 |
+
890,
|
| 683 |
+
898
|
| 684 |
+
],
|
| 685 |
+
"page_idx": 3
|
| 686 |
+
},
|
| 687 |
+
{
|
| 688 |
+
"type": "page_number",
|
| 689 |
+
"text": "4",
|
| 690 |
+
"bbox": [
|
| 691 |
+
478,
|
| 692 |
+
924,
|
| 693 |
+
490,
|
| 694 |
+
935
|
| 695 |
+
],
|
| 696 |
+
"page_idx": 3
|
| 697 |
+
},
|
| 698 |
+
{
|
| 699 |
+
"type": "image",
|
| 700 |
+
"img_path": "images/2577833ca283d9f8942f241f809fe0c505d38534ff3ee42ae6f4cd66b578d6ce.jpg",
|
| 701 |
+
"image_caption": [
|
| 702 |
+
"a. Illustration of our proposed S5 model"
|
| 703 |
+
],
|
| 704 |
+
"image_footnote": [],
|
| 705 |
+
"bbox": [
|
| 706 |
+
119,
|
| 707 |
+
99,
|
| 708 |
+
501,
|
| 709 |
+
238
|
| 710 |
+
],
|
| 711 |
+
"page_idx": 4
|
| 712 |
+
},
|
| 713 |
+
{
|
| 714 |
+
"type": "image",
|
| 715 |
+
"img_path": "images/b22a2225072dfc30d08836264065498d1e5b483e1ed31f27830ba92ad91c7084.jpg",
|
| 716 |
+
"image_caption": [
|
| 717 |
+
"b. Illustration of our proposed LSMCL algorithm",
|
| 718 |
+
"Figure 3. (a) A visualization of our proposed S5 model. Compared to the S4 model, we introduce a selective token picking strategy \"mask generator\", leveraging the S4 feature from the momentum S4 model. The momentum S4 model is updated by the S4 model in the moving average manner. Both S4 model and momentum S4 model are consisted of a S4 layer [24, 32] and a LN layer [2]. (b) An illustration of the proposed LSMCL pretraining framework, that initializes our S5 model to enrich the robustness."
|
| 719 |
+
],
|
| 720 |
+
"image_footnote": [],
|
| 721 |
+
"bbox": [
|
| 722 |
+
506,
|
| 723 |
+
102,
|
| 724 |
+
851,
|
| 725 |
+
242
|
| 726 |
+
],
|
| 727 |
+
"page_idx": 4
|
| 728 |
+
},
|
| 729 |
+
{
|
| 730 |
+
"type": "text",
|
| 731 |
+
"text": "the $\\mathbf{X}$ . The sampling process is important as it prevents the bias in the training that is potentially caused by the top-K selection. To make this sampling differentiable, we adopt the Gumbel-Softmax with Straight-Through tricks [33], which is widely used in [41, 46]. Specifically, we introduce an additional gumbel noise $g\\in \\mathbb{R}^{1\\times \\mathrm{ST}}$ into the predicted probability distribution $p\\in \\mathbb{R}^{1\\times \\mathrm{ST}}$ , where $g = -\\log (-\\log (u + \\epsilon) + \\epsilon)$ ( $u\\sim \\mathrm{Uniform}(0,1)$ , and $\\epsilon$ is a small value for arithmetic robustness consideration). Then, we sample the top-K tokens from the re-parameterized distribution $p + g$ . During the back-propagation, we estimate the gradient for each selected token $c$ as:",
|
| 732 |
+
"bbox": [
|
| 733 |
+
75,
|
| 734 |
+
325,
|
| 735 |
+
472,
|
| 736 |
+
505
|
| 737 |
+
],
|
| 738 |
+
"page_idx": 4
|
| 739 |
+
},
|
| 740 |
+
{
|
| 741 |
+
"type": "equation",
|
| 742 |
+
"text": "\n$$\nG \\approx \\bigtriangledown_ {\\mathrm {M G}} \\frac {\\exp \\left(\\left(\\log p \\left(c | \\mathbf {x} _ {s _ {4}}\\right) + g (c)\\right) / \\rho\\right)}{\\sum_ {c ^ {\\prime} = C _ {1}} ^ {c ^ {\\prime} = C _ {\\mathrm {S T}}} \\exp \\left(\\left(\\log p \\left(c ^ {\\prime} | \\mathbf {x} _ {s _ {4}}\\right) + g \\left(c ^ {\\prime}\\right)\\right) / \\rho\\right)} \\tag {7}\n$$\n",
|
| 743 |
+
"text_format": "latex",
|
| 744 |
+
"bbox": [
|
| 745 |
+
91,
|
| 746 |
+
510,
|
| 747 |
+
468,
|
| 748 |
+
547
|
| 749 |
+
],
|
| 750 |
+
"page_idx": 4
|
| 751 |
+
},
|
| 752 |
+
{
|
| 753 |
+
"type": "text",
|
| 754 |
+
"text": "where $\\rho$ is the temperature factor controlling the sharpness.",
|
| 755 |
+
"bbox": [
|
| 756 |
+
76,
|
| 757 |
+
553,
|
| 758 |
+
467,
|
| 759 |
+
569
|
| 760 |
+
],
|
| 761 |
+
"page_idx": 4
|
| 762 |
+
},
|
| 763 |
+
{
|
| 764 |
+
"type": "text",
|
| 765 |
+
"text": "3.4. Long-Short Mask Contrastive Learning",
|
| 766 |
+
"text_level": 1,
|
| 767 |
+
"bbox": [
|
| 768 |
+
76,
|
| 769 |
+
575,
|
| 770 |
+
419,
|
| 771 |
+
590
|
| 772 |
+
],
|
| 773 |
+
"page_idx": 4
|
| 774 |
+
},
|
| 775 |
+
{
|
| 776 |
+
"type": "text",
|
| 777 |
+
"text": "Previous token reduction/adaptive learning works rarely take model robustness into consideration. Informative tokens might be incorrectly dropped during training, which could hurt the performance of the model. In this paper, in addition to our proposed S5 model that explicitly picks informative tokens for various long-form video understanding tasks, we also propose Long-Short Mask Contrastive Learning (LSMCL) pretraining, which implicitly learns long-form video representations with better generalizability. Specifically, we equip the recent video contrastive learning framework LSTCL [69] with a random masking strategy on both long and short input clips, which mimics all possible scenarios that the selective module could produce in the S5 model. As a result, our S5 model with LSMCL pretraining would be more robust to and tolerant of errors from the selective module. Moreover, the long-short contrastive set-up will further improve the temporal predictability of our S5 model.",
|
| 778 |
+
"bbox": [
|
| 779 |
+
75,
|
| 780 |
+
598,
|
| 781 |
+
468,
|
| 782 |
+
869
|
| 783 |
+
],
|
| 784 |
+
"page_idx": 4
|
| 785 |
+
},
|
| 786 |
+
{
|
| 787 |
+
"type": "text",
|
| 788 |
+
"text": "Formally, we sample a long clip $(x_{L})$ and a short clip $(x_{S})$ from each video sequence with largely differ",
|
| 789 |
+
"bbox": [
|
| 790 |
+
75,
|
| 791 |
+
869,
|
| 792 |
+
468,
|
| 793 |
+
901
|
| 794 |
+
],
|
| 795 |
+
"page_idx": 4
|
| 796 |
+
},
|
| 797 |
+
{
|
| 798 |
+
"type": "text",
|
| 799 |
+
"text": "ent sampling strides $\\tau_{L}$ and $\\tau_{S}$ , where $\\tau_{S} < \\tau_{L}$ . Unlike LSTCL [69] and BraVe [56] that apply independent random sampling, in our paper the temporal span of long clips includes the one of short clips, which prevents dissimilar semantics from two clips in long-form videos. Then, we independently generate binary random masks with a masking ratio of $\\eta$ for each clip, which can be written as: $\\mathcal{R}_{\\mathrm{mask}}(x,\\eta), x \\in \\{x_L, x_S\\}$ . We set S4 model as the backbone of the query encoder $(f_q)$ and also adopt a momentum key encoder $(f_k)$ in the pipeline, which is widely accepted in MoCo [27], BYOL [22] and LSTCL [69]. Our query encoder and key encoder follow the same design with [22, 27, 69], that consist of the backbone, projection and prediction heads. Denoting the parameter of $f_{q}$ as $\\theta_{q}$ and the one of $f_{k}$ as $\\theta_{k}$ , we have: $\\theta_{k} = m\\theta_{k} + (1 - m)\\theta_{q}$ where $m \\in [0,1]$ is a momentum coefficient. Similarly, the LSMCL adopts similar objective as the InfoNCE [51]:",
|
| 800 |
+
"bbox": [
|
| 801 |
+
496,
|
| 802 |
+
325,
|
| 803 |
+
893,
|
| 804 |
+
583
|
| 805 |
+
],
|
| 806 |
+
"page_idx": 4
|
| 807 |
+
},
|
| 808 |
+
{
|
| 809 |
+
"type": "equation",
|
| 810 |
+
"text": "\n$$\n\\text {G i v e n :} q = f _ {q} \\left(\\mathcal {R} _ {\\text {m a s k}} \\left(x _ {S}, \\eta\\right)\\right), k = f _ {k} \\left(\\mathcal {R} _ {\\text {m a s k}} \\left(x _ {L}, \\eta\\right)\\right)\n$$\n",
|
| 811 |
+
"text_format": "latex",
|
| 812 |
+
"bbox": [
|
| 813 |
+
500,
|
| 814 |
+
618,
|
| 815 |
+
849,
|
| 816 |
+
636
|
| 817 |
+
],
|
| 818 |
+
"page_idx": 4
|
| 819 |
+
},
|
| 820 |
+
{
|
| 821 |
+
"type": "equation",
|
| 822 |
+
"text": "\n$$\n\\mathcal {L} _ {\\mathrm {L S M C L}} = \\sum_ {i} - \\log \\frac {\\exp \\left(q ^ {i} {} ^ {\\top} k ^ {i} / \\rho\\right)}{\\exp \\left(q ^ {i} {} ^ {\\top} k ^ {i} / \\rho\\right) + \\sum_ {j \\neq i} \\exp \\left(q ^ {i} {} ^ {\\top} k ^ {j} / \\rho\\right)} \\tag {8}\n$$\n",
|
| 823 |
+
"text_format": "latex",
|
| 824 |
+
"bbox": [
|
| 825 |
+
500,
|
| 826 |
+
638,
|
| 827 |
+
893,
|
| 828 |
+
694
|
| 829 |
+
],
|
| 830 |
+
"page_idx": 4
|
| 831 |
+
},
|
| 832 |
+
{
|
| 833 |
+
"type": "text",
|
| 834 |
+
"text": "where $\\rho$ is the temperature hyperparameter. As is commonly done in [6,9,10,22], we symmetrize the loss function by switching $x_{S}$ and $x_{L}$ in $f_{q}$ and $f_{k}$ . In our LSMCL, the S4 model is learned to find the correct step size $\\Delta$ and SSM parameters to match the representation of random masked long and short clips. Given our S5 model takes adaptively learned image tokens in the downstream task, we believe the LSMCL could improve the robustness as well as the temporal modeling ability of S5 model when dealing with partially sampled image tokens. In Section 4, our S5 model with LSMCL empirically shows significantly improved results in long-form video understanding.",
|
| 835 |
+
"bbox": [
|
| 836 |
+
496,
|
| 837 |
+
719,
|
| 838 |
+
893,
|
| 839 |
+
900
|
| 840 |
+
],
|
| 841 |
+
"page_idx": 4
|
| 842 |
+
},
|
| 843 |
+
{
|
| 844 |
+
"type": "page_number",
|
| 845 |
+
"text": "5",
|
| 846 |
+
"bbox": [
|
| 847 |
+
478,
|
| 848 |
+
924,
|
| 849 |
+
490,
|
| 850 |
+
936
|
| 851 |
+
],
|
| 852 |
+
"page_idx": 4
|
| 853 |
+
},
|
| 854 |
+
{
|
| 855 |
+
"type": "table",
|
| 856 |
+
"img_path": "images/de1dcb6457e0e44625be7a0c5a48c9d2f545c11e64e94d1a909fb49198e72f60.jpg",
|
| 857 |
+
"table_caption": [],
|
| 858 |
+
"table_footnote": [],
|
| 859 |
+
"table_body": "<table><tr><td rowspan=\"2\">Mask Generator</td><td colspan=\"3\">Content (↑)</td><td colspan=\"4\">Metadata (↑)</td><td colspan=\"2\">User (↓)</td></tr><tr><td>Relation</td><td>Speak</td><td>Scene</td><td>Director</td><td>Genre</td><td>Writer</td><td>Year</td><td>Like</td><td>View</td></tr><tr><td>No Mask (ViS4mer [32])</td><td>57.14</td><td>40.79</td><td>67.44</td><td>62.61</td><td>54.71</td><td>48.80</td><td>44.75</td><td>0.26</td><td>3.63</td></tr><tr><td>Random</td><td>54.81</td><td>38.22</td><td>67.44</td><td>63.60</td><td>54.97</td><td>47.00</td><td>42.70</td><td>0.25</td><td>4.00</td></tr><tr><td>Single TX</td><td>57.85</td><td>40.79</td><td>68.66</td><td>63.98</td><td>55.12</td><td>48.85</td><td>43.46</td><td>0.26</td><td>3.82</td></tr><tr><td>Single TXs4</td><td>60.54</td><td>41.21</td><td>69.83</td><td>66.43</td><td>57.55</td><td>49.47</td><td>44.15</td><td>0.25</td><td>3.51</td></tr><tr><td>Stacked TXs</td><td>59.51</td><td>41.21</td><td>69.83</td><td>64.91</td><td>55.12</td><td>51.83</td><td>47.55</td><td>0.25</td><td>3.42</td></tr><tr><td>Stacked TXs4</td><td>61.98</td><td>41.75</td><td>70.94</td><td>67.34</td><td>59.16</td><td>51.83</td><td>47.55</td><td>0.24</td><td>3.42</td></tr><tr><td>Linear</td><td>54.81</td><td>40.28</td><td>67.44</td><td>63.90</td><td>54.97</td><td>48.17</td><td>42.77</td><td>0.26</td><td>3.95</td></tr><tr><td>Linears4</td><td>61.98</td><td>41.75</td><td>69.88</td><td>66.40</td><td>58.80</td><td>50.60</td><td>47.70</td><td>0.25</td><td>3.51</td></tr></table>",
|
| 860 |
+
"bbox": [
|
| 861 |
+
106,
|
| 862 |
+
88,
|
| 863 |
+
851,
|
| 864 |
+
229
|
| 865 |
+
],
|
| 866 |
+
"page_idx": 5
|
| 867 |
+
},
|
| 868 |
+
{
|
| 869 |
+
"type": "text",
|
| 870 |
+
"text": "Table 1. Performance of various mask generators in LVU [72] dataset, where we adopt 60 frames per clip and $50\\%$ masking ratio. The bold results demonstrate the performance of using S4 feature $(x_{S_4}$ in Equation 5). We also provide the average improvement ratio (in green) of nine jobs using S4 features compared to ViT features at the conclusion of each bold row.",
|
| 871 |
+
"bbox": [
|
| 872 |
+
75,
|
| 873 |
+
239,
|
| 874 |
+
893,
|
| 875 |
+
282
|
| 876 |
+
],
|
| 877 |
+
"page_idx": 5
|
| 878 |
+
},
|
| 879 |
+
{
|
| 880 |
+
"type": "text",
|
| 881 |
+
"text": "4. Experiments",
|
| 882 |
+
"text_level": 1,
|
| 883 |
+
"bbox": [
|
| 884 |
+
76,
|
| 885 |
+
306,
|
| 886 |
+
209,
|
| 887 |
+
324
|
| 888 |
+
],
|
| 889 |
+
"page_idx": 5
|
| 890 |
+
},
|
| 891 |
+
{
|
| 892 |
+
"type": "text",
|
| 893 |
+
"text": "4.1. Dataset",
|
| 894 |
+
"text_level": 1,
|
| 895 |
+
"bbox": [
|
| 896 |
+
76,
|
| 897 |
+
330,
|
| 898 |
+
171,
|
| 899 |
+
345
|
| 900 |
+
],
|
| 901 |
+
"page_idx": 5
|
| 902 |
+
},
|
| 903 |
+
{
|
| 904 |
+
"type": "text",
|
| 905 |
+
"text": "LVU dataset [72]: is constructed from Movie Clip dataset [60]. It contains $\\sim 30K$ videos from $\\sim 3K$ movies. Each video lasts one to three minutes. The benchmark contains nine tasks covering a wide range of long-form video understanding tasks, which are further folded into three main categories: (i) content understanding, consisting of ('relationship', 'speaking style', 'scene/place') prediction, (ii) metadata prediction, including ('director', 'genre', 'writer', and 'movie release year') classification, and (iii) user engagement, predicting ('YouTube like ratio', and 'YouTube popularity'). For classification and regression tasks, we report accuracy (for content understanding and metadata prediction) and mean-squared error (MSE) (for user engagement) as the evaluation metrics.",
|
| 906 |
+
"bbox": [
|
| 907 |
+
75,
|
| 908 |
+
354,
|
| 909 |
+
470,
|
| 910 |
+
566
|
| 911 |
+
],
|
| 912 |
+
"page_idx": 5
|
| 913 |
+
},
|
| 914 |
+
{
|
| 915 |
+
"type": "text",
|
| 916 |
+
"text": "COIN [61, 62]: consists of 11,827 videos with 180 distinct procedural tasks, which are all collected from YouTube. These videos cover 12 domains, such as nursing & caring, vehicles, leisure & performance, gadgets, electric appliances, household items, science & craft, plants & fruits, snacks & drinks dishes, sports, and housework. The average length of a video is 2.36 minutes.",
|
| 917 |
+
"bbox": [
|
| 918 |
+
75,
|
| 919 |
+
573,
|
| 920 |
+
468,
|
| 921 |
+
679
|
| 922 |
+
],
|
| 923 |
+
"page_idx": 5
|
| 924 |
+
},
|
| 925 |
+
{
|
| 926 |
+
"type": "text",
|
| 927 |
+
"text": "Breakfast [39]: contains 1,712 videos of 10 complex cooking activities, which are performed by 52 different individuals in 18 different kitchens, resulting in over 77 hours of video footage. The averaged length of video in this dataset is around 2.7 minutes. Ten cooking activities include: making coffee, chocolate milk, juice, tea, cereals, fried egg, pancakes, fruit salad, sandwich and scrambled egg.",
|
| 928 |
+
"bbox": [
|
| 929 |
+
75,
|
| 930 |
+
686,
|
| 931 |
+
468,
|
| 932 |
+
792
|
| 933 |
+
],
|
| 934 |
+
"page_idx": 5
|
| 935 |
+
},
|
| 936 |
+
{
|
| 937 |
+
"type": "text",
|
| 938 |
+
"text": "4.2. Implementation Details",
|
| 939 |
+
"text_level": 1,
|
| 940 |
+
"bbox": [
|
| 941 |
+
76,
|
| 942 |
+
801,
|
| 943 |
+
294,
|
| 944 |
+
816
|
| 945 |
+
],
|
| 946 |
+
"page_idx": 5
|
| 947 |
+
},
|
| 948 |
+
{
|
| 949 |
+
"type": "text",
|
| 950 |
+
"text": "Following [32, 72], we stack three structure blocks, which share similar structure to that described in Equation 5, and sample video frames at 1 fps. Unlike previous work, we include an adaptive mask generator to effectively pick image tokens before feeding the input into S4 model. As",
|
| 951 |
+
"bbox": [
|
| 952 |
+
75,
|
| 953 |
+
824,
|
| 954 |
+
468,
|
| 955 |
+
902
|
| 956 |
+
],
|
| 957 |
+
"page_idx": 5
|
| 958 |
+
},
|
| 959 |
+
{
|
| 960 |
+
"type": "text",
|
| 961 |
+
"text": "the advantages of our S5 model will naturally be diminished on less redundant sequences, we follow the same architecture of ViS4mer [32] but adopt the S5 model as the first block. For data argumentation, we resize each video frame to the spatial resolution of $224 \\times 224$ and use a patch size of $16 \\times 16$ . In addition, we use ViT-L [14] pretrained on ImageNet-21K [38] as the feature extractor in the LVU dataset; Swin-B [43] pretrained on Kinetics-600 [35] as the feature extractor in COIN and Breakfast datasets. The size of the input in each dataset is also the same as [32]: we adopt 60-second input for the LVU dataset and 64-second input for the COIN and Breakfast datasets. In the LSMCL, we adopt the setting from LSTCL [69] and apply independent global random masking on long and short clips, which share the same masking ratio with the adaptive mask generator. Unless otherwise noted, we conduct our ablation studies on the LVU dataset due to its diverse tasks in the long-form video understanding. Finally, we report the best performance of our model on all three datasets and compare with the previous state-of-the-art works.",
|
| 962 |
+
"bbox": [
|
| 963 |
+
496,
|
| 964 |
+
308,
|
| 965 |
+
893,
|
| 966 |
+
609
|
| 967 |
+
],
|
| 968 |
+
"page_idx": 5
|
| 969 |
+
},
|
| 970 |
+
{
|
| 971 |
+
"type": "text",
|
| 972 |
+
"text": "4.3. Ablation Study",
|
| 973 |
+
"text_level": 1,
|
| 974 |
+
"bbox": [
|
| 975 |
+
498,
|
| 976 |
+
619,
|
| 977 |
+
653,
|
| 978 |
+
636
|
| 979 |
+
],
|
| 980 |
+
"page_idx": 5
|
| 981 |
+
},
|
| 982 |
+
{
|
| 983 |
+
"type": "text",
|
| 984 |
+
"text": "a. Our S5 is better than S4 and random masking: To demonstrate the effectiveness of our proposed S5 model, we compare the performance of S4 models with no mask, random mask, and mask generators of different architectures. Specifically, we utilize one Transformer (TX), two stacked Transformers (TXs), and one linear layer as the mask generator and evaluate on 9 tasks on the LVU dataset (Table 1). In addition, we also evaluate the effectiveness of using S4 features from the momentum-updated S4 model. For each architecture, we compare the result of using ViT features and S4 features as the mask generator input. As can be seen from the Table 1, the performance of each task substantially increases with the computational complexity of the mask generator. Results show our design significantly outperforms ViS4mer [32] and the random masking strategy, and the performance of each task is further improved by using S4 features. Notably, the mask generator with one",
|
| 985 |
+
"bbox": [
|
| 986 |
+
496,
|
| 987 |
+
643,
|
| 988 |
+
893,
|
| 989 |
+
900
|
| 990 |
+
],
|
| 991 |
+
"page_idx": 5
|
| 992 |
+
},
|
| 993 |
+
{
|
| 994 |
+
"type": "page_number",
|
| 995 |
+
"text": "6",
|
| 996 |
+
"bbox": [
|
| 997 |
+
478,
|
| 998 |
+
925,
|
| 999 |
+
491,
|
| 1000 |
+
936
|
| 1001 |
+
],
|
| 1002 |
+
"page_idx": 5
|
| 1003 |
+
},
|
| 1004 |
+
{
|
| 1005 |
+
"type": "image",
|
| 1006 |
+
"img_path": "images/aab0ed33a14bff65b714093460247da8e99c5ad022496916cd35a5a24886ad09.jpg",
|
| 1007 |
+
"image_caption": [
|
| 1008 |
+
"Figure 4. Efficiency evaluation of each method in Table 1, which demonstrates the GPU memory usage as well as throughput. Our proposed S5 model with linear mask generator saves $25\\%$ memory cost and achieves on par throughput with ViS4mer [32]."
|
| 1009 |
+
],
|
| 1010 |
+
"image_footnote": [],
|
| 1011 |
+
"bbox": [
|
| 1012 |
+
135,
|
| 1013 |
+
87,
|
| 1014 |
+
410,
|
| 1015 |
+
195
|
| 1016 |
+
],
|
| 1017 |
+
"page_idx": 6
|
| 1018 |
+
},
|
| 1019 |
+
{
|
| 1020 |
+
"type": "text",
|
| 1021 |
+
"text": "linear layer achieves on par performance to one of the more complex transformer architectures.",
|
| 1022 |
+
"bbox": [
|
| 1023 |
+
75,
|
| 1024 |
+
287,
|
| 1025 |
+
467,
|
| 1026 |
+
318
|
| 1027 |
+
],
|
| 1028 |
+
"page_idx": 6
|
| 1029 |
+
},
|
| 1030 |
+
{
|
| 1031 |
+
"type": "text",
|
| 1032 |
+
"text": "b. Our S5 reduces up to $25\\%$ memory usage: In Figure 4, we also demonstrate the efficiency of our S5 model with the different masking architectures mentioned previously. Compared to ViS4mer (the one without masking strategies) using same number of input frames, our S5 model with linear mask generator reduces the memory footprint by $25\\%$ while maintaining the same level of throughput. Memory consumption and throughput are not improved by the intricate transformer mask generators. Since the linear mask generator has a smaller memory footprint and performs tasks more effectively overall, we use it in our S5 model in the following experiments.",
|
| 1033 |
+
"bbox": [
|
| 1034 |
+
75,
|
| 1035 |
+
325,
|
| 1036 |
+
467,
|
| 1037 |
+
507
|
| 1038 |
+
],
|
| 1039 |
+
"page_idx": 6
|
| 1040 |
+
},
|
| 1041 |
+
{
|
| 1042 |
+
"type": "text",
|
| 1043 |
+
"text": "c. Impact of Masking Ratio and Sequence Length: In Figure 5a and 5b, we study the effect of masking ratio and sequence length with our S5 model. We set ViS4mer [32] (60 frames without mask generator) as baseline and report the average improvement percentage of 9 tasks on LVU dataset by using S5 model with variant masking ratio/sequence length. To demonstrate the effectiveness of our S5 model, we also compare the performance of ViS4mer [32] with different settings in these two figures. Figure 5a clearly shows that the performance of our S5 model increases initially as the masking ratio increases, which indicates that our selective model effectively picks informative image tokens for the S4 model. However, the performance starts to drop dramatically when the masking ratio is over $50\\%$ . This is because when the masking ratio increases to be above certain level, the informative tokens are forced to be dropped. As a result, we adopt $50\\%$ masking ratio in our following experiments. In Figure 5b, we observe substantial improvement of S5 model with increasing number of input frames. In contrast to the performance of ViS4mer [32], our proposed S5 model is indeed able to capture longer term dependencies while reducing the spatial-temporal redundancy in the input.",
|
| 1044 |
+
"bbox": [
|
| 1045 |
+
75,
|
| 1046 |
+
515,
|
| 1047 |
+
467,
|
| 1048 |
+
863
|
| 1049 |
+
],
|
| 1050 |
+
"page_idx": 6
|
| 1051 |
+
},
|
| 1052 |
+
{
|
| 1053 |
+
"type": "text",
|
| 1054 |
+
"text": "d. Effect of Multiple S5 models: As shown in Figure 3, multiple S5 models can be stacked in the pipeline, similar",
|
| 1055 |
+
"bbox": [
|
| 1056 |
+
75,
|
| 1057 |
+
869,
|
| 1058 |
+
467,
|
| 1059 |
+
900
|
| 1060 |
+
],
|
| 1061 |
+
"page_idx": 6
|
| 1062 |
+
},
|
| 1063 |
+
{
|
| 1064 |
+
"type": "text",
|
| 1065 |
+
"text": "to what is commonly done in Transformer [4, 14, 73] and ViS4mer [32]. In the previous setup, we only adopt one S5 model, leaving the remaining blocks as S4 models. By stacking multiple S5 models, we find a further $0.5\\%$ average improvement on the LVU dataset. Less redundant sequences will inevitably reduce the performance gain from our S5 model, decreasing the benefit from stacking additional S5 blocks. As a result, we utilize only one S5 model after the video encoder for maximum memory efficiency gain and throughput.",
|
| 1066 |
+
"bbox": [
|
| 1067 |
+
496,
|
| 1068 |
+
90,
|
| 1069 |
+
890,
|
| 1070 |
+
242
|
| 1071 |
+
],
|
| 1072 |
+
"page_idx": 6
|
| 1073 |
+
},
|
| 1074 |
+
{
|
| 1075 |
+
"type": "text",
|
| 1076 |
+
"text": "e. Ablation on LSMCL: In Figure 5c and 5d, we evaluate the effectiveness of our proposed LSMCL with different sampling strides and random masking ratios. For both figures, we set the performance of ViS4mer [32] as the baseline and report the average improvement ratio (in percentage) of 9 tasks from LVU with different settings. From Figure 5c, our S5 model with LSMCL can achieve better performance even when $\\tau_{L} = \\tau_{S}$ , which suggests that LSMCL can increase the robustness of our S5 model and help it handle incorrectly picked tokens. When we gradually increase the $\\frac{\\tau_L}{\\tau_S}$ , the performance of S5 model is further improved as the model is able to capture longer temporal context via the proposed LSMCL. Indeed, the performance using LSMCL approaches the performance without LSMCL with $66\\%$ more input frames (shown in Figure 5b both around $6\\%$ boost). In Figure 5d, we further ablate the random masking ratio used in LSMCL. When the masking ratio of LSMCL is over $50\\%$ , the benefit from LSMCL is insignificant as the input does not provide sufficient information. Thus, we consider $50\\%$ masking ratio in LSMCL for better efficiency in the long-form video contrastive learning.",
|
| 1077 |
+
"bbox": [
|
| 1078 |
+
496,
|
| 1079 |
+
250,
|
| 1080 |
+
890,
|
| 1081 |
+
566
|
| 1082 |
+
],
|
| 1083 |
+
"page_idx": 6
|
| 1084 |
+
},
|
| 1085 |
+
{
|
| 1086 |
+
"type": "text",
|
| 1087 |
+
"text": "4.4. Comparison with the State-Of-The-Arts",
|
| 1088 |
+
"text_level": 1,
|
| 1089 |
+
"bbox": [
|
| 1090 |
+
498,
|
| 1091 |
+
575,
|
| 1092 |
+
841,
|
| 1093 |
+
590
|
| 1094 |
+
],
|
| 1095 |
+
"page_idx": 6
|
| 1096 |
+
},
|
| 1097 |
+
{
|
| 1098 |
+
"type": "text",
|
| 1099 |
+
"text": "In Table 2, we compare our method on LVU dataset with previous state-of-the-art methods. Specifically, the LST [32] adopt the same architecture with ours, but substitutes the S5/S4 model to the transformer architecture. Whereas the Performer [11] and Orthoformer [54] apply the efficient attention in the transformer architecture, that do not require quadratic complexity w.r.t. the input length. When compared to baseline ViS4mer [32], we achieve up to $9.6\\%$ improvement. When compared to other methods, ours outperforms by an even more significant margin. This shows that our method is consistently more effective in understanding the long-form videos.",
|
| 1100 |
+
"bbox": [
|
| 1101 |
+
496,
|
| 1102 |
+
598,
|
| 1103 |
+
890,
|
| 1104 |
+
779
|
| 1105 |
+
],
|
| 1106 |
+
"page_idx": 6
|
| 1107 |
+
},
|
| 1108 |
+
{
|
| 1109 |
+
"type": "text",
|
| 1110 |
+
"text": "To demonstrate the generalizability of our method, we evaluate our S5 model on COIN [61, 62] and Breakfast [39] datasets, which are challenging long-range procedural activity classification datasets. Our proposed method achieves $2.4\\%$ and $5.5\\%$ over the ViS4mer [32] and outperforms the other state-of-the-arts by $0.81\\%$ and $0.80\\%$ respectively. Notice that D-Sprv. [42] leverages HowTo100M dataset [48] for pretraining, which volume is much larger",
|
| 1111 |
+
"bbox": [
|
| 1112 |
+
496,
|
| 1113 |
+
780,
|
| 1114 |
+
890,
|
| 1115 |
+
900
|
| 1116 |
+
],
|
| 1117 |
+
"page_idx": 6
|
| 1118 |
+
},
|
| 1119 |
+
{
|
| 1120 |
+
"type": "page_number",
|
| 1121 |
+
"text": "7",
|
| 1122 |
+
"bbox": [
|
| 1123 |
+
478,
|
| 1124 |
+
924,
|
| 1125 |
+
488,
|
| 1126 |
+
935
|
| 1127 |
+
],
|
| 1128 |
+
"page_idx": 6
|
| 1129 |
+
},
|
| 1130 |
+
{
|
| 1131 |
+
"type": "image",
|
| 1132 |
+
"img_path": "images/b3fc889fb246fe73689280c6c29bd0fc1fe9f04512765bcd1e4552753faaca26.jpg",
|
| 1133 |
+
"image_caption": [
|
| 1134 |
+
"(a)"
|
| 1135 |
+
],
|
| 1136 |
+
"image_footnote": [],
|
| 1137 |
+
"bbox": [
|
| 1138 |
+
80,
|
| 1139 |
+
88,
|
| 1140 |
+
279,
|
| 1141 |
+
188
|
| 1142 |
+
],
|
| 1143 |
+
"page_idx": 7
|
| 1144 |
+
},
|
| 1145 |
+
{
|
| 1146 |
+
"type": "image",
|
| 1147 |
+
"img_path": "images/e5a6d315ce6f32bf1a5207c5067cd01d9da0f0c08c26260c99d766ee105c8267.jpg",
|
| 1148 |
+
"image_caption": [
|
| 1149 |
+
"(b)",
|
| 1150 |
+
"Figure 5. Compared to the baseline performance, average improvement performance of our method on LVU dataset. Unless otherwise noted, the default number of input frame and masking ratio is 60 and $50\\%$ . (a). We compared our S5 model and S4 model with random masking with increasing masking ratio; (b). We compare our S5 model and S4 model with increasing number of input frames; (c). We show the effect of LSMCL pretraining with different long-short sampling stride ratio. In addition, we provide the performance of S5 model without LSMCL and S5 model with 100 input frames; (d). We show the impact of the increasing masking ratio in the LSMCL pretraining."
|
| 1151 |
+
],
|
| 1152 |
+
"image_footnote": [],
|
| 1153 |
+
"bbox": [
|
| 1154 |
+
282,
|
| 1155 |
+
88,
|
| 1156 |
+
480,
|
| 1157 |
+
188
|
| 1158 |
+
],
|
| 1159 |
+
"page_idx": 7
|
| 1160 |
+
},
|
| 1161 |
+
{
|
| 1162 |
+
"type": "image",
|
| 1163 |
+
"img_path": "images/48fdf624d87e58516086f4347ee0b95f44788c9b9d625301bd19cb9e213434c3.jpg",
|
| 1164 |
+
"image_caption": [
|
| 1165 |
+
"(c)"
|
| 1166 |
+
],
|
| 1167 |
+
"image_footnote": [],
|
| 1168 |
+
"bbox": [
|
| 1169 |
+
483,
|
| 1170 |
+
88,
|
| 1171 |
+
678,
|
| 1172 |
+
188
|
| 1173 |
+
],
|
| 1174 |
+
"page_idx": 7
|
| 1175 |
+
},
|
| 1176 |
+
{
|
| 1177 |
+
"type": "image",
|
| 1178 |
+
"img_path": "images/6b4e79b91c4edcd73555857733a12d7cb863c9e1ee21d5cb0b6f80f103aabdbb.jpg",
|
| 1179 |
+
"image_caption": [
|
| 1180 |
+
"(d)"
|
| 1181 |
+
],
|
| 1182 |
+
"image_footnote": [],
|
| 1183 |
+
"bbox": [
|
| 1184 |
+
686,
|
| 1185 |
+
88,
|
| 1186 |
+
885,
|
| 1187 |
+
188
|
| 1188 |
+
],
|
| 1189 |
+
"page_idx": 7
|
| 1190 |
+
},
|
| 1191 |
+
{
|
| 1192 |
+
"type": "table",
|
| 1193 |
+
"img_path": "images/4dac03718e73fb9727794d9b93c536aa65e702bffc3695da4bc8eed73d8d35e5.jpg",
|
| 1194 |
+
"table_caption": [],
|
| 1195 |
+
"table_footnote": [],
|
| 1196 |
+
"table_body": "<table><tr><td rowspan=\"2\">Model</td><td colspan=\"3\">Content (↑)</td><td colspan=\"4\">Metadata (↑)</td><td colspan=\"2\">User (↓)</td><td rowspan=\"2\">GPU Usage (GB) (↓)</td></tr><tr><td>Relation</td><td>Speak</td><td>Scene</td><td>Director</td><td>Genre</td><td>Writer</td><td>Year</td><td>Like</td><td>View</td></tr><tr><td>Obj. T4mer [72]</td><td>54.76</td><td>33.17</td><td>52.94</td><td>47.66</td><td>52.74</td><td>36.30</td><td>37.76</td><td>0.30</td><td>3.68</td><td>N/A</td></tr><tr><td>Performer [11]</td><td>50.00</td><td>38.80</td><td>60.46</td><td>58.87</td><td>49.45</td><td>48.21</td><td>41.25</td><td>0.31</td><td>3.93</td><td>5.93</td></tr><tr><td>Orthoformer [54]</td><td>50.00</td><td>38.30</td><td>66.27</td><td>55.14</td><td>55.79</td><td>47.02</td><td>43.35</td><td>0.29</td><td>3.86</td><td>5.56</td></tr><tr><td>VideoBERT [58]</td><td>52.80</td><td>37.90</td><td>54.90</td><td>47.30</td><td>51.90</td><td>38.50</td><td>36.10</td><td>0.32</td><td>4.46</td><td>N/A</td></tr><tr><td>LST [32]</td><td>52.38</td><td>37.31</td><td>62.79</td><td>56.07</td><td>52.70</td><td>42.26</td><td>39.16</td><td>0.31</td><td>3.83</td><td>41.38</td></tr><tr><td>ViS4mer [32]</td><td>57.14</td><td>40.79</td><td>67.44</td><td>62.61</td><td>54.71</td><td>48.80</td><td>44.75</td><td>0.26</td><td>3.63</td><td>5.15</td></tr><tr><td>Ours60 frames</td><td>61.98</td><td>41.75</td><td>69.88</td><td>66.40</td><td>58.80</td><td>50.60</td><td>47.70</td><td>0.25</td><td>3.51</td><td>3.85</td></tr><tr><td>Ours60 frames+LSMCL</td><td>61.98</td><td>41.75</td><td>72.53</td><td>66.40</td><td>61.34</td><td>50.60</td><td>47.70</td><td>0.24</td><td>3.51</td><td>3.85</td></tr><tr><td>Ours100 frames</td><td>66.71</td><td>41.78</td><td>73.28</td><td>66.64</td><td>63.65</td><td>50.60</td><td>47.85</td><td>0.25</td><td>3.51</td><td>3.95</td></tr><tr><td>Ours100 frames+LSMCL</td><td>67.11</td><td>42.12</td><td>73.49</td><td>67.32</td><td>65.41</td><td>51.27</td><td>47.95</td><td>0.24</td><td>3.51</td><td>3.95</td></tr></table>",
|
| 1197 |
+
"bbox": [
|
| 1198 |
+
117,
|
| 1199 |
+
297,
|
| 1200 |
+
854,
|
| 1201 |
+
476
|
| 1202 |
+
],
|
| 1203 |
+
"page_idx": 7
|
| 1204 |
+
},
|
| 1205 |
+
{
|
| 1206 |
+
"type": "table",
|
| 1207 |
+
"img_path": "images/65df3f70426fd9ea45afca471bb7a2af66face8c035714783f828db5b99e0a2b.jpg",
|
| 1208 |
+
"table_caption": [
|
| 1209 |
+
"Table 2. Comparison to the state-of-the-art methods on LVU dataset testing set."
|
| 1210 |
+
],
|
| 1211 |
+
"table_footnote": [],
|
| 1212 |
+
"table_body": "<table><tr><td>Method</td><td>P.T. Dataset</td><td>P.T. Samples</td><td>Accuracy</td></tr><tr><td>TSN [62]</td><td>Kinetics-400</td><td>306K</td><td>73.40</td></tr><tr><td>D-Sprv. [42]</td><td>HowTo100M</td><td>136M</td><td>90.00</td></tr><tr><td>ViS4mer [32]</td><td>Kinetics-600</td><td>495K</td><td>88.41</td></tr><tr><td>Ours</td><td>Kinetics-600</td><td>495K</td><td>90.42</td></tr><tr><td>Ours+LSMCL</td><td>Kinetics-600</td><td>495K</td><td>90.81</td></tr></table>",
|
| 1213 |
+
"bbox": [
|
| 1214 |
+
86,
|
| 1215 |
+
523,
|
| 1216 |
+
459,
|
| 1217 |
+
616
|
| 1218 |
+
],
|
| 1219 |
+
"page_idx": 7
|
| 1220 |
+
},
|
| 1221 |
+
{
|
| 1222 |
+
"type": "table",
|
| 1223 |
+
"img_path": "images/d580dfa072f696d3ee715a0f29218dbe9eff037f9d6bba8504982f1115f08738.jpg",
|
| 1224 |
+
"table_caption": [
|
| 1225 |
+
"Table 3. Comparison to the state-of-the-art methods on COIN dataset. P.T. stands for pretraining."
|
| 1226 |
+
],
|
| 1227 |
+
"table_footnote": [],
|
| 1228 |
+
"table_body": "<table><tr><td>Method</td><td>P.T. Dataset</td><td>P.T. Samples</td><td>Accuracy</td></tr><tr><td>VideoGraph [30]</td><td>Kinetics-400</td><td>306K</td><td>69.50</td></tr><tr><td>Timeception [29]</td><td>Kinetics-400</td><td>306K</td><td>71.30</td></tr><tr><td>GHRM [78]</td><td>Kinetics-400</td><td>306K</td><td>75.50</td></tr><tr><td>D-Sprv. [42]</td><td>HowTo100M</td><td>136M</td><td>89.90</td></tr><tr><td>ViS4mer [32]</td><td>Kinetics-600</td><td>495K</td><td>85.10*</td></tr><tr><td>Ours</td><td>Kinetics-600</td><td>495K</td><td>90.14</td></tr><tr><td>Ours+LSMCL</td><td>Kinetics-600</td><td>495K</td><td>90.70</td></tr></table>",
|
| 1229 |
+
"bbox": [
|
| 1230 |
+
78,
|
| 1231 |
+
656,
|
| 1232 |
+
470,
|
| 1233 |
+
776
|
| 1234 |
+
],
|
| 1235 |
+
"page_idx": 7
|
| 1236 |
+
},
|
| 1237 |
+
{
|
| 1238 |
+
"type": "text",
|
| 1239 |
+
"text": "Table 4. Comparison to the state-of-the-art methods on Breakfast dataset. P.T. stands for pretraining. *We were not able to reproduce the $88.17\\%$ baseline result reported in [32], but our proposed S5 model still largely improves from $85.10\\%$ , and achieves the new state-of-the-art result.",
|
| 1240 |
+
"bbox": [
|
| 1241 |
+
75,
|
| 1242 |
+
787,
|
| 1243 |
+
468,
|
| 1244 |
+
856
|
| 1245 |
+
],
|
| 1246 |
+
"page_idx": 7
|
| 1247 |
+
},
|
| 1248 |
+
{
|
| 1249 |
+
"type": "text",
|
| 1250 |
+
"text": "than our pre-training dataset (Kinetics-600 [7]). Putting together the aforementioned performance gain and mem",
|
| 1251 |
+
"bbox": [
|
| 1252 |
+
75,
|
| 1253 |
+
869,
|
| 1254 |
+
468,
|
| 1255 |
+
902
|
| 1256 |
+
],
|
| 1257 |
+
"page_idx": 7
|
| 1258 |
+
},
|
| 1259 |
+
{
|
| 1260 |
+
"type": "text",
|
| 1261 |
+
"text": "ory efficiency gain, our S5 model successfully demonstrates its efficiency and effectiveness in learning discriminative representation via selecting informative image tokens from long-form video sequences.",
|
| 1262 |
+
"bbox": [
|
| 1263 |
+
498,
|
| 1264 |
+
527,
|
| 1265 |
+
890,
|
| 1266 |
+
589
|
| 1267 |
+
],
|
| 1268 |
+
"page_idx": 7
|
| 1269 |
+
},
|
| 1270 |
+
{
|
| 1271 |
+
"type": "text",
|
| 1272 |
+
"text": "5. Conclusion",
|
| 1273 |
+
"text_level": 1,
|
| 1274 |
+
"bbox": [
|
| 1275 |
+
500,
|
| 1276 |
+
614,
|
| 1277 |
+
617,
|
| 1278 |
+
630
|
| 1279 |
+
],
|
| 1280 |
+
"page_idx": 7
|
| 1281 |
+
},
|
| 1282 |
+
{
|
| 1283 |
+
"type": "text",
|
| 1284 |
+
"text": "In this paper, we proposed a selective structured state-space sequence (S5) model for long-form video understanding, where we adopt a lightweight mask generator to adaptively pick informative tokens from long-form videos. Our mask generator avoids dense self-attention computation as what is applied in previous works. It leverages the sequential output of the simulated linear time invariant (LTI) system, and benefits from the momentum distillation of S4 model, enabling our S5 model to dynamically learn from informative tokens for different long-form video tasks. To mitigate the negative impact of picking less informative tokens, we also propose a LSMCL pretraining to improve the robustness and further broaden the temporal horizon of our model. Through extensive experiments, we demonstrate the effectiveness of each proposed component in our S5 model, achieving the new state-of-the-art performance in three challenging long-form video understanding benchmarks.",
|
| 1285 |
+
"bbox": [
|
| 1286 |
+
496,
|
| 1287 |
+
643,
|
| 1288 |
+
890,
|
| 1289 |
+
900
|
| 1290 |
+
],
|
| 1291 |
+
"page_idx": 7
|
| 1292 |
+
},
|
| 1293 |
+
{
|
| 1294 |
+
"type": "page_number",
|
| 1295 |
+
"text": "8",
|
| 1296 |
+
"bbox": [
|
| 1297 |
+
478,
|
| 1298 |
+
924,
|
| 1299 |
+
490,
|
| 1300 |
+
936
|
| 1301 |
+
],
|
| 1302 |
+
"page_idx": 7
|
| 1303 |
+
},
|
| 1304 |
+
{
|
| 1305 |
+
"type": "text",
|
| 1306 |
+
"text": "References",
|
| 1307 |
+
"text_level": 1,
|
| 1308 |
+
"bbox": [
|
| 1309 |
+
78,
|
| 1310 |
+
89,
|
| 1311 |
+
173,
|
| 1312 |
+
104
|
| 1313 |
+
],
|
| 1314 |
+
"page_idx": 8
|
| 1315 |
+
},
|
| 1316 |
+
{
|
| 1317 |
+
"type": "list",
|
| 1318 |
+
"sub_type": "ref_text",
|
| 1319 |
+
"list_items": [
|
| 1320 |
+
"[1] Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lučić, and Cordelia Schmid. Vivit: A video vision transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 6836-6846, October 2021. 1",
|
| 1321 |
+
"[2] Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016. 4, 5",
|
| 1322 |
+
"[3] Moez Baccouche, Franck Mamalet, Christian Wolf, Christophe Garcia, and Atilla Baskurt. Sequential deep learning for human action recognition. In International workshop on human behavior understanding, pages 29-39. Springer, 2011. 1",
|
| 1323 |
+
"[4] Gedas Bertasius, Heng Wang, and Lorenzo Torresani. Is space-time attention all you need for video understanding? In Marina Meila and Tong Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pages 813-824. PMLR, 2021. 1, 3, 4, 7",
|
| 1324 |
+
"[5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 2",
|
| 1325 |
+
"[6] Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments. arXiv preprint arXiv:2006.09882, 2020. 5",
|
| 1326 |
+
"[7] Joao Carreira, Eric Noland, Andras Banki-Horvath, Chloe Hillier, and Andrew Zisserman. A short note about kinetics-600. arXiv preprint arXiv:1808.01340, 2018. 8",
|
| 1327 |
+
"[8] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International conference on machine learning, pages 1597-1607. PMLR, 2020. 3, 12",
|
| 1328 |
+
"[9] Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. arXiv preprint arXiv:2011.10566, 2020.3,5",
|
| 1329 |
+
"[10] Xinlei Chen, Saining Xie, and Kaiming He. An empirical study of training self-supervised visual transformers. arXiv preprint arXiv:2104.02057, 2021. 5, 12",
|
| 1330 |
+
"[11] Krzysztof Choromanski, Valerii Likhosherstov, David Dohan, Xingyou Song, Andreea Gane, Tamas Sarlos, Peter Hawkins, Jared Davis, Afroz Mohiuddin, Lukasz Kaiser, et al. Rethinking attention with performers. arXiv preprint arXiv:2009.14794, 2020. 2, 7, 8",
|
| 1331 |
+
"[12] Zihang Dai, Zhilin Yang, Yiming Yang, Jaime Carbonell, Quoc V Le, and Ruslan Salakhutdinov. Transformer-xl: Attentive language models beyond a fixed-length context. arXiv preprint arXiv:1901.02860, 2019. 2",
|
| 1332 |
+
"[13] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. 2"
|
| 1333 |
+
],
|
| 1334 |
+
"bbox": [
|
| 1335 |
+
78,
|
| 1336 |
+
114,
|
| 1337 |
+
470,
|
| 1338 |
+
898
|
| 1339 |
+
],
|
| 1340 |
+
"page_idx": 8
|
| 1341 |
+
},
|
| 1342 |
+
{
|
| 1343 |
+
"type": "list",
|
| 1344 |
+
"sub_type": "ref_text",
|
| 1345 |
+
"list_items": [
|
| 1346 |
+
"[14] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 1, 6, 7",
|
| 1347 |
+
"[15] Haoqi Fan, Bo Xiong, Karttikeya Mangalam, Yanghao Li, Zhicheng Yan, Jitendra Malik, and Christoph Feichtenhofer. Multiscale vision transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 6824-6835, October 2021. 1, 4",
|
| 1348 |
+
"[16] Christoph Feichtenhofer, Haoqi Fan, Yanghao Li, and Kaiming He. Masked autoencoders as spatiotemporal learners. arXiv preprint arXiv:2205.09113, 2022. 3",
|
| 1349 |
+
"[17] Christoph Feichtenhofer, Haoqi Fan, Bo Xiong, Ross Girshick, and Kaiming He. A large-scale study on unsupervised spatiotemporal representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3299-3309, 2021. 2",
|
| 1350 |
+
"[18] Christoph Feichtenhofer, Haoqi Fan, Bo Xiong, Ross B. Girshick, and Kaiming He. A large-scale study on unsupervised spatiotemporal representation learning. In IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2021, virtual, June 19-25, 2021, pages 3299-3309. Computer Vision Foundation / IEEE, 2021. 3",
|
| 1351 |
+
"[19] Christoph Feichtenhofer, Axel Pinz, and Richard P Wildes. Spatiotemporal multiplier networks for video action recognition. In CVPR, 2017. 1",
|
| 1352 |
+
"[20] Christoph Feichtenhofer, Axel Pinz, and Richard P Wildes. Temporal residual networks for dynamic scene recognition. In CVPR, 2017. 1",
|
| 1353 |
+
"[21] Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch sgd: Training imagenet in 1 hour. arXiv preprint arXiv:1706.02677, 2017. 12",
|
| 1354 |
+
"[22] Jean-Bastien Grill, Florian Strub, Florent Altché, Corentin Tallec, Pierre H Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Daniel Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent: A new approach to self-supervised learning. arXiv preprint arXiv:2006.07733, 2020. 3, 5",
|
| 1355 |
+
"[23] Albert Gu, Tri Dao, Stefano Ermon, Atri Rudra, and Christopher Ré. Hippo: Recurrent memory with optimal polynomial projections. Advances in Neural Information Processing Systems, 33:1474-1487, 2020. 3",
|
| 1356 |
+
"[24] Albert Gu, Karan Goel, and Christopher Ré. Efficiently modeling long sequences with structured state spaces. arXiv preprint arXiv:2111.00396, 2021. 1, 2, 3, 5",
|
| 1357 |
+
"[25] Albert Gu, Isys Johnson, Karan Goel, Khaled Saab, Tri Dao, Atri Rudra, and Christopher Ré. Combining recurrent, convolutional, and continuous-time models with linear state space layers. Advances in neural information processing systems, 34:572-585, 2021. 3",
|
| 1358 |
+
"[26] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference"
|
| 1359 |
+
],
|
| 1360 |
+
"bbox": [
|
| 1361 |
+
501,
|
| 1362 |
+
92,
|
| 1363 |
+
893,
|
| 1364 |
+
900
|
| 1365 |
+
],
|
| 1366 |
+
"page_idx": 8
|
| 1367 |
+
},
|
| 1368 |
+
{
|
| 1369 |
+
"type": "page_number",
|
| 1370 |
+
"text": "9",
|
| 1371 |
+
"bbox": [
|
| 1372 |
+
478,
|
| 1373 |
+
924,
|
| 1374 |
+
491,
|
| 1375 |
+
936
|
| 1376 |
+
],
|
| 1377 |
+
"page_idx": 8
|
| 1378 |
+
},
|
| 1379 |
+
{
|
| 1380 |
+
"type": "list",
|
| 1381 |
+
"sub_type": "ref_text",
|
| 1382 |
+
"list_items": [
|
| 1383 |
+
"on Computer Vision and Pattern Recognition, pages 16000-16009, 2022. 3",
|
| 1384 |
+
"[27] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9729-9738, 2020. 3, 5",
|
| 1385 |
+
"[28] Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016. 12",
|
| 1386 |
+
"[29] Noureldien Hussein, Efstratios Gavves, and Arnold WM Smeulders. Timeception for complex action recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 254-263, 2019. 8",
|
| 1387 |
+
"[30] Noureldien Hussein, Efstratios Gavves, and Arnold WM Smeulders. Videograph: Recognizing minutes-long human activities in videos. arXiv preprint arXiv:1905.05143, 2019. 8",
|
| 1388 |
+
"[31] Sergey Ioffe. Batch renormalization: Towards reducing minibatch dependence in batch-normalized models. Advances in neural information processing systems, 30, 2017. 12",
|
| 1389 |
+
"[32] Md Mohaiminul Islam and Gedas Bertasius. Long movie clip classification with state-space video models. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 1, 2, 3, 4, 5, 6, 7, 8, 12, 13",
|
| 1390 |
+
"[33] Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. arXiv preprint arXiv:1611.01144, 2016. 2, 5",
|
| 1391 |
+
"[34] Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. Transformers are rnns: Fast autoregressive transformers with linear attention. In International Conference on Machine Learning, pages 5156-5165. PMLR, 2020. 2",
|
| 1392 |
+
"[35] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, et al. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017. The Kinetics-400 dataset is licensed under the Creative Commons Attribution-NonCommercial 4.0 International License. 6",
|
| 1393 |
+
"[36] Nikita Kitaev, Łukasz Kaiser, and Anselm Levskaya. Reformer: The efficient transformer. arXiv preprint arXiv:2001.04451, 2020. 2",
|
| 1394 |
+
"[37] Bruno Korbar, Du Tran, and Lorenzo Torresani. Scsampler: Sampling salient clips from video for efficient action recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6232-6242, 2019. 2",
|
| 1395 |
+
"[38] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Advances in neural information processing systems, 25:1097-1105, 2012. 6",
|
| 1396 |
+
"[39] Hilde Kuehne, Ali Arslan, and Thomas Serre. The language of actions: Recovering the syntax and semantics of goal-directed human activities. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 780-787, 2014. 1, 6, 7, 12",
|
| 1397 |
+
"[40] Youwei Liang, Chongjian Ge, Zhan Tong, Yibing Song, Jue Wang, and Pengtao Xie. Not all patches are what you"
|
| 1398 |
+
],
|
| 1399 |
+
"bbox": [
|
| 1400 |
+
78,
|
| 1401 |
+
92,
|
| 1402 |
+
468,
|
| 1403 |
+
900
|
| 1404 |
+
],
|
| 1405 |
+
"page_idx": 9
|
| 1406 |
+
},
|
| 1407 |
+
{
|
| 1408 |
+
"type": "list",
|
| 1409 |
+
"sub_type": "ref_text",
|
| 1410 |
+
"list_items": [
|
| 1411 |
+
"need: Expediting vision transformers via token reorganizations. arXiv preprint arXiv:2202.07800, 2022. 2, 3, 4",
|
| 1412 |
+
"[41] Xudong Lin, Gedas Bertasius, Jue Wang, Shih-Fu Chang, Devi Parikh, and Lorenzo Torresani. Vx2text: End-to-end learning of video-based text generation from multimodal inputs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7005-7015, 2021. 5",
|
| 1413 |
+
"[42] Xudong Lin, Fabio Petroni, Gedas Bertasius, Marcus Rohrbach, Shih-Fu Chang, and Lorenzo Torresani. Learning to recognize procedural activities with distant supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13853-13863, 2022. 7, 8",
|
| 1414 |
+
"[43] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. arXiv preprint arXiv:2103.14030, 2021. 2, 6",
|
| 1415 |
+
"[44] Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. arXiv preprint arXiv:2106.13230, 2021. 1, 2, 4",
|
| 1416 |
+
"[45] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 12",
|
| 1417 |
+
"[46] Lingchen Meng, Hengduo Li, Bor-Chun Chen, Shiyi Lan, Zuxuan Wu, Yu-Gang Jiang, and Ser-Nam Lim. Adavit: Adaptive vision transformers for efficient image recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12309-12318, 2022. 2, 4, 5",
|
| 1418 |
+
"[47] Yue Meng, Chung-Ching Lin, Rameswar Panda, Prasanna Sattigeri, Leonid Karlinsky, Aude Oliva, Kate Saenko, and Rogerio Feris. Ar-net: Adaptive frame resolution for efficient action recognition. In European Conference on Computer Vision, pages 86-104. Springer, 2020. 2",
|
| 1419 |
+
"[48] Antoine Miech, Dimitri Zhukov, Jean-Baptiste Alayrac, Makarand Tapaswi, Ivan Laptev, and Josef Sivic. Howto100m: Learning a text-video embedding by watching hundred million narrated video clips. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2630-2640, 2019. 7",
|
| 1420 |
+
"[49] Daniel Neimark, Omri Bar, Maya Zohar, and Dotan Asselmann. Video transformer network. arXiv preprint arXiv:2102.00719, 2021. 1, 3",
|
| 1421 |
+
"[50] Eric Nguyen, Karan Goel, Albert Gu, Gordon W Downs, Preey Shah, Tri Dao, Stephen A Baccus, and Christopher Ré. S4nd: Modeling images and videos as multidimensional signals using state spaces. Advances in neural information processing systems, 2022. 2",
|
| 1422 |
+
"[51] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018. 5",
|
| 1423 |
+
"[52] Zizheng Pan, Bohan Zhuang, Jing Liu, Haoyu He, and Jianfei Cai. Scalable vision transformers with hierarchical pooling. In Proceedings of the IEEE/cvf international conference on computer vision, pages 377-386, 2021. 2",
|
| 1424 |
+
"[53] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming"
|
| 1425 |
+
],
|
| 1426 |
+
"bbox": [
|
| 1427 |
+
501,
|
| 1428 |
+
92,
|
| 1429 |
+
890,
|
| 1430 |
+
900
|
| 1431 |
+
],
|
| 1432 |
+
"page_idx": 9
|
| 1433 |
+
},
|
| 1434 |
+
{
|
| 1435 |
+
"type": "page_number",
|
| 1436 |
+
"text": "10",
|
| 1437 |
+
"bbox": [
|
| 1438 |
+
477,
|
| 1439 |
+
924,
|
| 1440 |
+
495,
|
| 1441 |
+
936
|
| 1442 |
+
],
|
| 1443 |
+
"page_idx": 9
|
| 1444 |
+
},
|
| 1445 |
+
{
|
| 1446 |
+
"type": "list",
|
| 1447 |
+
"sub_type": "ref_text",
|
| 1448 |
+
"list_items": [
|
| 1449 |
+
"Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. arXiv preprint arXiv:1912.01703, 2019. 12",
|
| 1450 |
+
"[54] Mandela Patrick, Dylan Campbell, Yuki Asano, Ishan Misra, Florian Metze, Christoph Feichtenhofer, Andrea Vedaldi, and João F Henriques. Keeping your eye on the ball: Trajectory attention in video transformers. Advances in neural information processing systems, 34:12493-12506, 2021. 1, 7, 8",
|
| 1451 |
+
"[55] Yongming Rao, Wenliang Zhao, Benlin Liu, Jiwen Lu, Jie Zhou, and Cho-Jui Hsieh. Dynamicvit: Efficient vision transformers with dynamic token sparsification. Advances in neural information processing systems, 34:13937-13949, 2021. 2, 4",
|
| 1452 |
+
"[56] Adrià Recasens, Pauline Luc, Jean-Baptiste Alayrac, Luyu Wang, Florian Strub, Corentin Tallec, Mateusz Malinowski, Viorica Patraucean, Florent Altché, Michal Valko, et al. Broaden your views for self-supervised video learning. arXiv preprint arXiv:2103.16559, 2021. 2, 3, 5",
|
| 1453 |
+
"[57] Karen Simonyan and Andrew Zisserman. Two-stream convolutional networks for action recognition in videos. arXiv preprint arXiv:1406.2199, 2014. 1",
|
| 1454 |
+
"[58] Chen Sun, Austin Myers, Carl Vondrick, Kevin Murphy, and Cordelia Schmid. Videobert: A joint model for video and language representation learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7464-7473, 2019. 8",
|
| 1455 |
+
"[59] Yuchong Sun, Bei Liu, Hongwei Xue, Ruihua Sone, Huan Yang, and Jianlong Fu. Long-form video-language pretraining with multimodal temporal contrastive learning. Advances in neural information processing systems, 2022. 1, 2",
|
| 1456 |
+
"[60] Yansong Tang, Dajun Ding, Yongming Rao, Yu Zheng, Danyang Zhang, Lili Zhao, Jiwen Lu, and Jie Zhou. Coin: A large-scale dataset for comprehensive instructional video analysis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 6",
|
| 1457 |
+
"[61] Yansong Tang, Dajun Ding, Yongming Rao, Yu Zheng, Danyang Zhang, Lili Zhao, Jiwen Lu, and Jie Zhou. Coin: A large-scale dataset for comprehensive instructional video analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1207-1216, 2019. 6, 7, 12",
|
| 1458 |
+
"[62] Yansong Tang, Jiwen Lu, and Jie Zhou. Comprehensive instructional video analysis: The coin dataset and performance evaluation. IEEE transactions on pattern analysis and machine intelligence, 43(9):3138-3153, 2020. 6, 7, 8, 12",
|
| 1459 |
+
"[63] Zhan Tong, Yibing Song, Jue Wang, and Limin Wang. Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training. arXiv preprint arXiv:2203.12602, 2022. 3",
|
| 1460 |
+
"[64] Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani, and Manohar Paluri. Learning spatiotemporal features with 3d convolutional networks. In Proceedings of the IEEE international conference on computer vision, pages 4489-4497, 2015. 1",
|
| 1461 |
+
"[65] Du Tran, Heng Wang, Lorenzo Torresani, and Matt Feiszli. Video classification with channel-separated convolu"
|
| 1462 |
+
],
|
| 1463 |
+
"bbox": [
|
| 1464 |
+
78,
|
| 1465 |
+
90,
|
| 1466 |
+
468,
|
| 1467 |
+
900
|
| 1468 |
+
],
|
| 1469 |
+
"page_idx": 10
|
| 1470 |
+
},
|
| 1471 |
+
{
|
| 1472 |
+
"type": "list",
|
| 1473 |
+
"sub_type": "ref_text",
|
| 1474 |
+
"list_items": [
|
| 1475 |
+
"tional networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5552-5561, 2019. 1",
|
| 1476 |
+
"[66] Du Tran, Heng Wang, Lorenzo Torresani, Jamie Ray, Yann LeCun, and Manohar Paluri. A closer look at spatiotemporal convolutions for action recognition. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 6450-6459, 2018. 1",
|
| 1477 |
+
"[67] Arnold Tustin. A method of analysing the behaviour of linear systems in terms of time series. Journal of the Institution of Electrical Engineers-Part IIA: Automatic Regulators and Servo Mechanisms, 94(1):130–142, 1947. 3",
|
| 1478 |
+
"[68] Vivek Veeriah, Naifan Zhuang, and Guo-Jun Qi. Differential recurrent neural networks for action recognition. In Proceedings of the IEEE international conference on computer vision, pages 4041-4049, 2015. 1",
|
| 1479 |
+
"[69] Jue Wang, Gedas Bertasius, Du Tran, and Lorenzo Torresani. Long-short temporal contrastive learning of video transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14010-14020, 2022. 2, 3, 4, 5, 6, 12",
|
| 1480 |
+
"[70] Jue Wang and Lorenzo Torresani. Deformable video transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14053-14062, 2022. 1, 2",
|
| 1481 |
+
"[71] Junke Wang, Xitong Yang, Hengduo Li, Zuxuan Wu, and Yu-Gang Jiang. Efficient video transformers with spatial-temporal token selection. arXiv preprint arXiv:2111.11591, 2021. 2",
|
| 1482 |
+
"[72] Chao-Yuan Wu and Philipp Kähenbuhl. Towards Long-Form Video Understanding. In CVPR, 2021. 4, 6, 8, 12, 13",
|
| 1483 |
+
"[73] Chao-Yuan Wu, Yanghao Li, Karttikeya Mangalam, Haoqi Fan, Bo Xiong, Jitendra Malik, and Christoph Feichtenhofer. Memvit: Memory-augmented multiscale vision transformer for efficient long-term video recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13587-13597, 2022. 1, 2, 7",
|
| 1484 |
+
"[74] Zuxuan Wu, Caiming Xiong, Chih-Yao Ma, Richard Socher, and Larry S Davis. Adaframe: Adaptive frame selection for fast video recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1278-1287, 2019. 2",
|
| 1485 |
+
"[75] Hongxu Yin, Arash Vahdat, Jose Alvarez, Arun Mallya, Jan Kautz, and Pavlo Molchanov. Adavit: Adaptive tokens for efficient vision transformer. arXiv preprint arXiv:2112.07658, 2021. 2, 4",
|
| 1486 |
+
"[76] Hongxu Yin, Arash Vahdat, Jose M Alvarez, Arun Mallya, Jan Kautz, and Pavlo Molchanov. A-vit: Adaptive tokens for efficient vision transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10809-10818, 2022. 2",
|
| 1487 |
+
"[77] Pengfei Zhang, Cuiling Lan, Junliang Xing, Wenjun Zeng, Jianru Xue, and Nanning Zheng. View adaptive recurrent neural networks for high performance human action recognition from skeleton data. In Proceedings of the IEEE international conference on computer vision, pages 2117-2126, 2017. 1"
|
| 1488 |
+
],
|
| 1489 |
+
"bbox": [
|
| 1490 |
+
501,
|
| 1491 |
+
92,
|
| 1492 |
+
890,
|
| 1493 |
+
898
|
| 1494 |
+
],
|
| 1495 |
+
"page_idx": 10
|
| 1496 |
+
},
|
| 1497 |
+
{
|
| 1498 |
+
"type": "page_number",
|
| 1499 |
+
"text": "11",
|
| 1500 |
+
"bbox": [
|
| 1501 |
+
475,
|
| 1502 |
+
924,
|
| 1503 |
+
491,
|
| 1504 |
+
935
|
| 1505 |
+
],
|
| 1506 |
+
"page_idx": 10
|
| 1507 |
+
},
|
| 1508 |
+
{
|
| 1509 |
+
"type": "ref_text",
|
| 1510 |
+
"text": "[78] Jiaming Zhou, Kun-Yu Lin, Haoxin Li, and Wei-Shi Zheng. Graph-based high-order relation modeling for long-term action recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8984-8993, 2021. 8",
|
| 1511 |
+
"bbox": [
|
| 1512 |
+
78,
|
| 1513 |
+
90,
|
| 1514 |
+
470,
|
| 1515 |
+
161
|
| 1516 |
+
],
|
| 1517 |
+
"page_idx": 11
|
| 1518 |
+
},
|
| 1519 |
+
{
|
| 1520 |
+
"type": "text",
|
| 1521 |
+
"text": "A. Implementation Details",
|
| 1522 |
+
"text_level": 1,
|
| 1523 |
+
"bbox": [
|
| 1524 |
+
500,
|
| 1525 |
+
89,
|
| 1526 |
+
725,
|
| 1527 |
+
107
|
| 1528 |
+
],
|
| 1529 |
+
"page_idx": 11
|
| 1530 |
+
},
|
| 1531 |
+
{
|
| 1532 |
+
"type": "text",
|
| 1533 |
+
"text": "In addition to the implementation details introduced in Section 4.2 of the main paper, we provide more information of training our S5 model and LSMCL below.",
|
| 1534 |
+
"bbox": [
|
| 1535 |
+
500,
|
| 1536 |
+
114,
|
| 1537 |
+
893,
|
| 1538 |
+
161
|
| 1539 |
+
],
|
| 1540 |
+
"page_idx": 11
|
| 1541 |
+
},
|
| 1542 |
+
{
|
| 1543 |
+
"type": "text",
|
| 1544 |
+
"text": "A.1. S5 model",
|
| 1545 |
+
"text_level": 1,
|
| 1546 |
+
"bbox": [
|
| 1547 |
+
500,
|
| 1548 |
+
172,
|
| 1549 |
+
612,
|
| 1550 |
+
189
|
| 1551 |
+
],
|
| 1552 |
+
"page_idx": 11
|
| 1553 |
+
},
|
| 1554 |
+
{
|
| 1555 |
+
"type": "text",
|
| 1556 |
+
"text": "Following ViS4mer [32], we introduce a MLP layer in each block to reduce the feature dimension by a factor of $2 \\times$ . Each MLP layer is consisted of a linear layer, a GELU activation layer [28] and a dropout layer, where the dropout rate is 0.2. For updating the momentum S4 model, we explore different values of momentum coefficient and set it as 0.01 to produce the best performance. For all of our experiments of S5 model, we use AdamW optimizer [45] with a learning rate of $10^{-3} \\times \\frac{\\text{batch size}}{16}$ , and with a weight decay of 0.01. For COIN [61, 62], Breakfast [39] and each task on LVU dataset [72], we train our S5 model for 100 epochs and reduce the learning rate by a factor of 0.2 when the training loss has stopped reducing in the past 1 epoch. We train our S5 model by using $8 \\times$ NVIDIA Tesla V100 16G GPUs with a batch size of 16. All the implementations are coded with PyTorch [53].",
|
| 1557 |
+
"bbox": [
|
| 1558 |
+
500,
|
| 1559 |
+
199,
|
| 1560 |
+
893,
|
| 1561 |
+
441
|
| 1562 |
+
],
|
| 1563 |
+
"page_idx": 11
|
| 1564 |
+
},
|
| 1565 |
+
{
|
| 1566 |
+
"type": "text",
|
| 1567 |
+
"text": "A.2. LSMCL",
|
| 1568 |
+
"text_level": 1,
|
| 1569 |
+
"bbox": [
|
| 1570 |
+
500,
|
| 1571 |
+
453,
|
| 1572 |
+
604,
|
| 1573 |
+
468
|
| 1574 |
+
],
|
| 1575 |
+
"page_idx": 11
|
| 1576 |
+
},
|
| 1577 |
+
{
|
| 1578 |
+
"type": "text",
|
| 1579 |
+
"text": "In LSMCL, we sample two clips with different sampling strides from the video sequence, and the clip shape is consistent with the one in finetuning the S5 model. Specifically, we sample input clips of size $60 \\times 3 \\times 224 \\times 224$ on LVU dataset [72] and $64 \\times 3 \\times 224 \\times 224$ on COIN [61, 62] and Breakfast datasets [39]. The sampling stride ratio is set to be $\\frac{\\tau_L}{\\tau_S} = 1.5$ , which is also ablated in the Figure 5(c) in the main paper. Following LSTCL [69], we adopt a query encoder and a key encoder in the LSMCL. The query encoder consists of a S4 model backbone, a MLP projection head and an additional prediction MLP head. The purpose of the prediction layer is to transform the representation of the query clip to match the key. The key encoder consists of a S4 model backbone and a MLP projection head. The momentum coefficient for updating the key encoder is 0.99. Following [10, 69], the MLP projection head has 3 layers while the MLP prediction head has 2 layers. The hidden layers of both MLPs are 4096-D and are with ReLU; the output layers of both MLPs are 256-D, without ReLU. In LSMCL, all layers in both MLPs have BN [31], which follows [8, 10]. In terms of the optimizer, we adopt AdamW [45] with a learning rate of $10^{-4} \\times \\frac{\\text{batch size}}{256}$ , and with a weight decay of 0.05. We train our LSMCL for 300 epochs in total, and adopt learning rate warm-up [21] for the first 40 epochs. We train LSMCL by using $8 \\times$ NVIDIA Tesla V100 16G GPUs with a batch size of 64 and we optimize the model with the loss in Equation 8, where the $\\rho = 0.2$ .",
|
| 1580 |
+
"bbox": [
|
| 1581 |
+
500,
|
| 1582 |
+
477,
|
| 1583 |
+
893,
|
| 1584 |
+
902
|
| 1585 |
+
],
|
| 1586 |
+
"page_idx": 11
|
| 1587 |
+
},
|
| 1588 |
+
{
|
| 1589 |
+
"type": "page_number",
|
| 1590 |
+
"text": "12",
|
| 1591 |
+
"bbox": [
|
| 1592 |
+
475,
|
| 1593 |
+
924,
|
| 1594 |
+
495,
|
| 1595 |
+
936
|
| 1596 |
+
],
|
| 1597 |
+
"page_idx": 11
|
| 1598 |
+
},
|
| 1599 |
+
{
|
| 1600 |
+
"type": "image",
|
| 1601 |
+
"img_path": "images/f45d8ac5bf4b4db71016d164a6c303daaaf3127882a316f00eb2340961437988.jpg",
|
| 1602 |
+
"image_caption": [
|
| 1603 |
+
"Figure 6. Compared to the baseline performance, average improvement performance of our method with different settings on LVU dataset. Unless otherwise noted, the default number of input frame and masking ratio is 60 and $50\\%$ . We study the effect of leveraging multiple S5 models in our work, where we substitutes more S4 model in original ViS4mer [32] with our S5 model."
|
| 1604 |
+
],
|
| 1605 |
+
"image_footnote": [],
|
| 1606 |
+
"bbox": [
|
| 1607 |
+
143,
|
| 1608 |
+
85,
|
| 1609 |
+
405,
|
| 1610 |
+
220
|
| 1611 |
+
],
|
| 1612 |
+
"page_idx": 12
|
| 1613 |
+
},
|
| 1614 |
+
{
|
| 1615 |
+
"type": "text",
|
| 1616 |
+
"text": "B. Effectof Multiple S5 Models",
|
| 1617 |
+
"text_level": 1,
|
| 1618 |
+
"bbox": [
|
| 1619 |
+
76,
|
| 1620 |
+
338,
|
| 1621 |
+
339,
|
| 1622 |
+
356
|
| 1623 |
+
],
|
| 1624 |
+
"page_idx": 12
|
| 1625 |
+
},
|
| 1626 |
+
{
|
| 1627 |
+
"type": "text",
|
| 1628 |
+
"text": "In this paper, we improve the previous S4 model by introducing a novel selective module, formulating the Selective S4 (S5) model. For fair comparison, we follow the architecture introduced in the ViS4mer [32], which utilizes three S4 models with pooling and MLP layers in between. As the advantages of our S5 model will naturally be diminished on less redundant sequences, our default setting is to substitute the first S4 model in ViS4mer [32] with our proposed S5 model while keep the rest architecture the same with ViS4mer [32]. In this section, we study the impact of using more S5 models in the ViS4mer [32] architecture. In Figure 6, we gradually increase the number of blocks that use S5 model instead of S4 model. We set the performance of ViS4mer as the baseline, and report the averaged improvement percentage over 9 tasks on LVU dataset [72]. Compared to the method of using S4 models, our method achieves substantial improvement by including more S5 models. However, less duplicated sequences will definitely result in a decrease in our S5 model's performance gain, which will lessen the advantage of stacking additional S5 blocks.",
|
| 1629 |
+
"bbox": [
|
| 1630 |
+
75,
|
| 1631 |
+
363,
|
| 1632 |
+
468,
|
| 1633 |
+
680
|
| 1634 |
+
],
|
| 1635 |
+
"page_idx": 12
|
| 1636 |
+
},
|
| 1637 |
+
{
|
| 1638 |
+
"type": "page_number",
|
| 1639 |
+
"text": "13",
|
| 1640 |
+
"bbox": [
|
| 1641 |
+
477,
|
| 1642 |
+
922,
|
| 1643 |
+
493,
|
| 1644 |
+
936
|
| 1645 |
+
],
|
| 1646 |
+
"page_idx": 12
|
| 1647 |
+
}
|
| 1648 |
+
]
|
2303.14xxx/2303.14526/78d21730-a964-44d8-b842-a279fb0ebf53_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14526/78d21730-a964-44d8-b842-a279fb0ebf53_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0893b77ccddd114bed72240538e67646311bc19581d1b5e11e8d8dd5cb431a15
|
| 3 |
+
size 4402724
|
2303.14xxx/2303.14526/full.md
ADDED
|
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Selective Structured State-Spaces for Long-Form Video Understanding
|
| 2 |
+
|
| 3 |
+
Jue Wang Wentao Zhu Pichao Wang Xiang Yu Linda Liu Mohamed Omar Raffay Hamid Amazon Prime Video
|
| 4 |
+
|
| 5 |
+
{juewangn, zhuwent, wpichao, xiangnyu, lindliu, omarmk, raffay}@amazon.com
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Effective modeling of complex spatiotemporal dependencies in long-form videos remains an open problem. The recently proposed Structured State-Space Sequence (S4) model with its linear complexity offers a promising direction in this space. However, we demonstrate that treating all imagedokens equally as done by S4 model can adversely affect its efficiency and accuracy. To address this limitation, we present a novel Selective S4 (i.e., S5) model that employs a lightweight mask generator to adaptively select informative image tokens resulting in more efficient and accurate modeling of long-term spatiotemporal dependencies in videos. Unlike previous mask-based token reduction methods used in transformers, our S5 model avoids the dense self-attention calculation by making use of the guidance of the momentum-updated S4 model. This enables our model to efficiently discard less informative tokens and adapt to various long-form video understanding tasks more effectively. However, as is the case for most token reduction methods, the informative image tokens could be dropped incorrectly. To improve the robustness and the temporal horizon of our model, we propose a novel long-short masked contrastive learning (LSMCL) approach that enables our model to predict longer temporal context using shorter input videos. We present extensive comparative results using three challenging long-form video understanding datasets (LVU, COIN and Breakfast), demonstrating that our approach consistently outperforms the previous state-of-the-art S4 model by up to $9.6\%$ accuracy while reducing its memory footprint by $23\%$ .
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
Video understanding is an active research area where a variety of different models have been explored including e.g., two-stream networks [19, 20, 57], recurrent neural networks [3, 68, 77] and 3-D convolutional networks [64-66]. However, most of these methods have primarily focused on short-form videos that are typically with a few seconds in length, and are not designed to model the complex long-
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
Figure 1. Illustration of long-form videos - Evenly sampled frames from two long-form videos, that have long duration (more than 1 minute) and distinct categories in the Breakfast [39] dataset (grayscale frames are shown for better visualization). The video on top shows the activity of making scrambled eggs, while the one on the bottom shows the activity of making cereal. These two videos heavily overlap in terms of objects (e.g., eggs, saucepan and stove), and actions (e.g., picking, whisking and pouring). To effectively distinguish these two videos, it is important to model long-term spatiotemporal dependencies, which is also the key in long-form video understanding.
|
| 17 |
+
|
| 18 |
+
term spatiotemporal dependencies often found in long-form videos (see Figure 1 for an illustrative example). The recent vision transformer (ViT) [14] has shown promising capability in modeling long-range dependencies, and several variants [1,4,15,44,49,54,70] have successfully adopted the transformer architecture for video modeling. However, for a video with T frames and S spatial tokens, the complexity of standard video transformer architecture is $\mathcal{O}(\mathrm{S}^2\mathrm{T}^2)$ , which poses prohibitively high computation and memory costs when modeling long-form videos. Various attempts [59,73] have been proposed to improve this efficiency, but the ViT pyramid architecture prevents them from developing long-term dependencies on low-level features.
|
| 19 |
+
|
| 20 |
+
In addition to ViT, a recent ViS4mer [32] method has tried to apply the Structured State-Spaces Sequence (S4) model [24] as an effective way to model the long-term video dependencies. However, by introducing simple masking techniques we empirically reveal that the S4 model can have different temporal reasoning preferences for different downstream tasks. This makes applying the same image token selection method as done by ViS4mer [32] for all long-
|
| 21 |
+
|
| 22 |
+
form video understanding tasks suboptimal.
|
| 23 |
+
|
| 24 |
+
To address this challenge, we propose a cost-efficient adaptive token selection module, termed S5 (i.e., selective S4) model, which adaptively selects informative image tokens for the S4 model, thereby learning discriminative long-form video representations. Previous token reduction methods for efficient image transformers [40, 46, 55, 71, 75, 76] heavily rely on a dense self-attention calculation, which makes them less effective in practice despite their theoretical guarantees about efficiency gains. In contrast, our S5 model avoids the dense self-attention calculation by leveraging S4 features in a gumble-softmax sampling [33] based mask generator to adaptively select more informative image tokens. Our mask generator leverages S4 feature for its global sequence-context information and is further guided by the momentum distillation from the S4 model.
|
| 25 |
+
|
| 26 |
+
To further improve the robustness and the temporal predictability of our S5 model, we introduce a novel long-short mask contrastive learning (LSMCL) to pre-train our model. In LSMCL, randomly selected image tokens from long and short clips include the scenario that the less informative image tokens are chosen, and the representation of them are learned to match each other. As a result, the LSMCL not only significantly boosts the efficiency compared to the previous video contrastive learning methods [17, 56, 69], but also increases the robustness of our S5 model when dealing with the mis-predicted image tokens. We empirically demonstrate that the S5 model with LSMCL pre-training can employ shorter-length clips to achieve on-par performance with using longer-range clips without incorporating LSMCL pre-training.
|
| 27 |
+
|
| 28 |
+
We summarize our key contributions as the following:
|
| 29 |
+
|
| 30 |
+
- We propose a Selective S4 (S5) model that leverages the global sequence-context information from S4 features to adaptively choose informative image tokens in a task-specific way.
|
| 31 |
+
- We introduce a novel long-short masked contrastive learning approach (LSMCL) that enables our model to be tolerant to the mis-predicted tokens and exploit longer duration spatiotemporal context by using shorter duration input videos, leading to improved robustness in the S5 model.
|
| 32 |
+
- We demonstrate that two proposed novel techniques (S5 model and LSMCL) are seamlessly suitable and effective for long-form video understanding, achieving the state-of-the-art performance on three challenging benchmarks. Notably, our method achieves up to $9.6\%$ improvement on LVU dataset compared to the previous state-of-the-art S4 method, while reducing the memory footprint by $23\%$ .
|
| 33 |
+
|
| 34 |
+
# 2. Related Work
|
| 35 |
+
|
| 36 |
+
We discuss the literature with respect to the three most relevant fields: video understanding with long-form format,
|
| 37 |
+
|
| 38 |
+
efficient token selection for vision transformer training, and self-supervised learning with videos.
|
| 39 |
+
|
| 40 |
+
a. Long-Form Video Modeling: Transformers have shown excellent performance in modeling long-term dependencies, e.g., in natural language processing (NLP) [5, 12, 13]. But the high computational cost caused by dense self-attention calculation becomes a bottleneck to apply in not only NLP but also computer vision. Much subsequent work [11, 34, 36, 43, 44, 52, 70] focuses on improving the transformer efficiency. However, they are not designed for dealing with plethora of spatial and temporal image tokens that are common in long-form video scenarios. LF-VILA [59] develops a hierarchical feeding architecture to include more frames in the model, thus capturing longer temporal information. Similarly, MeMViT [73] better utilizes temporal information by emerging the previously cached "memory" from the past. The pyramid structure leveraged by LF-VILA and MeMViT shows efficiency improvements, but may lose low-level spatial-temporal contextual information. Gu et al. [24] proposed a Structured State-Space Sequence (S4) model, a novel alternative to CNNs or transformers, to model the long-range dependencies by simulating a linear time invariant (LTI) system. Subsequently, S4ND [50] and ViS4mer [32] extend S4 model to the video classification task. ViS4mer [32] stacks multiple S4 layers with different scales in modeling long-form videos, and S4ND [50] substitutes the traditional convolutional layer with the proposed S4ND layer in image and short-form video classification tasks. The equal importance assumption to all the image tokens by ViS4mer and S4ND can be further improved by introducing suitable token selection mechanisms, especially when dealing with the long-form input sequences. Consequently, we propose a token Selection S4 (S5) model to further enhance the efficiency while maintaining the long-form representation power.
|
| 41 |
+
|
| 42 |
+
b. Adaptive Token Selection: Adaptive token selection is widely used to improve model efficiency. Traditional CNN methods such as SCsampler [37] filter informative clips by using motion and audio embeddings. Adaframe [74] utilizes memory-augmented LSTMs as agents, which predict where to look in the next time step. AR-NET [47] uses LSTM as decision maker to select useful frames and their resolutions. [40, 46, 55, 71, 75] apply this selection idea to transformers to adaptively select tokens for increased efficiency. For instance, STTS [71] leverages a token selection module, the named scorer network, to provide the importance score for each token and select the top-K frames with the highest scores. AdaViT [46] extends this idea to develop instance-specific policies, guiding the activation of patches, self-attention heads and transformer blocks. All of the above methods demonstrate how a light-weight token selection module can improve inference efficiency. However,
|
| 43 |
+
|
| 44 |
+
these methods are essentially designed for images, and may require non-trivial adaptation to the long-form video scenarios, i.e., the video-level long-range reasoning and computationally expensive self-attention calculation. To avoid this dense self-attention calculation, our proposed S5 model leverages S4 features to model the long-term dependencies and adaptively pick informative tokens.
|
| 45 |
+
|
| 46 |
+
c. Video Self-Supervised Learning (SSL): Previous work on token reduction rarely considers the negative impact of mis-dropped tokens. EViT [40] simply fuses the unattended tokens and concatenates with the remaining ones. From the recent successful image SSL works [8, 9, 22, 26, 27], many follow-up works [16, 18, 56, 63, 69] learn discriminative video features with great generalization ability in downstream tasks. Specifically, LSTCL [69] and BraVe [56] utilize long and short clips in the concept of SSL, which enables the model to learn an effective representation by predicting temporal context captured from a longer temporal extent. This essentially broadens the temporal horizon of the model for predicting longer temporal context with fewer from shorter input frames. In this paper, we adopt this idea with an additional random masking strategy to increase the efficiency of contrastive learning in long-form videos, and to further improve the robustness and the temporal predictability of our S5 model in downstream tasks.
|
| 47 |
+
|
| 48 |
+
# 3. Approach
|
| 49 |
+
|
| 50 |
+
We start by summarizing Structured State-Space Sequence (S4) [24] model and ViS4mer [32] ( $\S 3.1$ ), followed by empirical analysis of S4 model in various long-form video understanding tasks ( $\S 3.2$ ), and then providing the details of our proposed approach to address these limitations ( $\S 3.3$ and $\S 3.4$ ).
|
| 51 |
+
|
| 52 |
+
# 3.1. Preliminaries
|
| 53 |
+
|
| 54 |
+
# 3.1.1 S4 Model
|
| 55 |
+
|
| 56 |
+
Recall that a simple State-Space Model i.e., a linear time invariant (LTI) system can be written as:
|
| 57 |
+
|
| 58 |
+
$$
|
| 59 |
+
\mathbf {x} ^ {\prime} (t) = \mathbf {A x} (t) + \mathbf {B u} (t) \tag {1}
|
| 60 |
+
$$
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
\mathbf {y} (t) = \mathbf {C x} (t) + \mathbf {D u} (t).
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
Under deep learning setting, $\mathbf{A}$ , $\mathbf{B}$ and $\mathbf{C}$ are learned via gradient descent while $+D\mathbf{u}(t)$ is replaced by a residual connection. This formulation projects an input signal $\mathbf{u}(t)$ from one-dimensional space to an N-dimensional latent space $\mathbf{x}(t)$ , which is then mapped back to a one-dimensional output signal $\mathbf{y}(t)$ . Similar to RNNs, it has been found in previous work that Equation 1 also suffers from gradient vanish or exploding issues when modeling longer sequences. To tackle this issue, the work in [24] leveraged HiPPO theory [23] to initialize the $\mathbf{A}$ matrix.
|
| 67 |
+
|
| 68 |
+
HiPPO specifies a certain expression of $\mathbf{A} \in \mathbb{R}^{N \times N}$ (see Equation 2), which allows the hidden state to memorize the input $\mathbf{u}(t)^1$ .
|
| 69 |
+
|
| 70 |
+
$$
|
| 71 |
+
\operatorname {H i P P O}: \mathbf {A} _ {n, k} = - \left\{ \begin{array}{l l} (2 n + 1) ^ {0. 5} (2 k + 1) ^ {0. 5} & \text {i f} n > k \\ n + 1 & \text {i f} n = k \\ 0 & \text {i f} n < k, \end{array} \right. \tag {2}
|
| 72 |
+
$$
|
| 73 |
+
|
| 74 |
+
where $n$ and $k$ indicate the row and column indices of $\mathbf{A}$ . To implement Equation 1 using discrete inputs such as word or image tokens, the work in [24] leverages the bi-linear discretization method [67] and a discretized version of Equation 1 using a step size $\Delta$ is rewritten as:
|
| 75 |
+
|
| 76 |
+
$$
|
| 77 |
+
\mathbf {x} _ {k} = \bar {\mathbf {A}} \mathbf {x} _ {k - 1} + \bar {\mathbf {B}} \mathbf {u} _ {k} \tag {3}
|
| 78 |
+
$$
|
| 79 |
+
|
| 80 |
+
$$
|
| 81 |
+
\mathbf {y} _ {k} = \bar {\mathbf {C}} \mathbf {x} _ {k},
|
| 82 |
+
$$
|
| 83 |
+
|
| 84 |
+
where $\bar{\mathbf{A}} = (\mathbf{I} + \frac{\Delta\cdot\mathbf{A}}{2}) / (\mathbf{I} - \frac{\Delta\cdot\mathbf{A}}{2}),\bar{\mathbf{B}} = \Delta \cdot \mathbf{B} / (I - \frac{\Delta\cdot\mathbf{A}}{2})$ and $\bar{\mathbf{C}} = \mathbf{C}$ Equation 3 can be solved using a discrete convolution [24]:
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
\mathbf {y} = \bar {\mathbf {K}} * \mathbf {u}, \tag {4}
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
where $\mathbf{u} = \{u_0, u_1, \dots, u_{k-1}, u_k\}$ and $\bar{\mathbf{K}} \in \mathbb{R}^L := \{\bar{\mathbf{C}}\bar{\mathbf{B}}, \bar{\mathbf{C}}\bar{\mathbf{A}}\bar{\mathbf{B}}, \dots, \bar{\mathbf{C}}\bar{\mathbf{A}}^{L-1}\bar{\mathbf{B}}\}$ is a structured convolutional kernel and $\mathbf{L}$ is the sequence length. Equation 4 is the core formulation of S4 model whose computational cost is linear to the input length and can be efficiently computed using fast Fourier transform (FFT) and inverse FFT. Moreover, to control the convolution kernel width, the work in [25] set $\Delta$ as a learnable parameter.
|
| 91 |
+
|
| 92 |
+
# 3.1.2 ViS4mer Model
|
| 93 |
+
|
| 94 |
+
By utilizing the S4 model, the ViS4mer [32] achieves promising results in the long-form video understanding tasks. We start with defining some notations to help summarize the adaptation of S4 model in computer vision. Given a video clip $\mathbf{X} \in \mathbb{R}^{\mathrm{H} \times \mathrm{W} \times 3 \times \mathrm{T}}$ consisting of T RGB frames sampled from the video, we convert it into a sequence of S·T image tokens $\mathbf{x}_s^t \in \mathbb{R}^{\mathrm{D}}$ for $s = 1, \ldots, \mathrm{S}$ and $t = 1, \ldots, \mathrm{T}$ . The tokens $\mathbf{z}_s^t$ are obtained by decomposing each frame into S patches which are then projected to a D-dimensional space through a learnable linear transformation. This tokenization can be implemented by linearly mapping the RGB patches of each frame [4, 49]. Separate learnable positional encodings $\mathbf{e}_s$ and $\mathbf{e}^t$ are then applied to the patch embeddings $\mathbf{z}_s^t$ for the spatial and the temporal dimensions: $\mathbf{x}_s^t = \mathbf{z}_s^t + \mathbf{e}_s + \mathbf{e}^t$ , formulating $\mathbf{x}_{\mathrm{input}} = \{x_0^0, x_1^0, x_S^0, x_0^1, \ldots, x_S^T\}$ .
|
| 95 |
+
|
| 96 |
+
In ViS4mer [32], a multi-scale S4 decoder is introduced for learning the long-term temporal reasoning. As is mentioned in § 3.1.1, S4 model has a linear computation and
|
| 97 |
+
|
| 98 |
+

|
| 99 |
+
(a). Performance Impact with increasing number of input frames
|
| 100 |
+
(b). Performance Impact with increasing masking ratio
|
| 101 |
+
|
| 102 |
+

|
| 103 |
+
Figure 2. Performance gain/loss of ViS4mer on LVU dataset [72] with different settings of input frames and random masking ratio, where we conclude: (a). The performance is not substantially improved with increasing number of input frames. (b). Random masking strategy cannot effectively reduce redundant tokens.
|
| 104 |
+
|
| 105 |
+
memory dependency with respect to the input length, which has significantly lower computational cost than the self-attention in transformers. The formulation of S4 decoder can be written as:
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
\begin{array}{l} \mathbf {x} _ {s _ {4}} = \mathrm {S} _ {4} (\mathrm {L N} (\mathbf {x} _ {\text {i n p u t}})) \\ \mathbf {x} _ {m l p} = \operatorname {M L P} \left(\mathrm {P} \left(\mathbf {x} _ {s _ {4}}\right)\right) \tag {5} \\ \mathbf {x} _ {\text {s k i p}} = \operatorname {L i n e a r} \left(\mathrm {P} \left(\mathbf {x} _ {\text {i n p u t}}\right)\right) \\ \mathbf {x} _ {\text {o u t}} = \mathbf {x} _ {\text {s k i p}} + \mathbf {x} _ {\text {m l p}}, \\ \end{array}
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
Where $\mathrm{LN}(\cdot),\mathrm{MLP}(\cdot),\mathrm{Linear}(\cdot)$ and $\mathrm{P}(\cdot)$ represent the layer normalization [2], the multi-layer perception, linear layer and pooling layer, and $\mathbf{x}_{s_4}$ is the $\mathbf{y}$ in Equation 4.
|
| 112 |
+
|
| 113 |
+
# 3.2. S4 Model in Long-form Video Understanding
|
| 114 |
+
|
| 115 |
+
To better understand the S4 model and long-form video understanding tasks, we re-implement ViS4mer [32] with different settings on LVU dataset [72] and demonstrate the result in Figure 2. From the observation that short-form video understanding tasks often benefit from longer input clips [4, 15, 44, 69], we wonder if the performance of S4 model on different long-form video tasks would also be substantially improved with the increasing number of input frames. In Figure 2 (a), we gradually increase the temporal extent from 60 seconds to 120 seconds. Compared to the performance of using 60 second input, we report the impact ratio of using 80, 100, 120 second inputs in each task. From this Figure, we realize that not all long-form video tasks benefit from longer input context, and for those improved tasks, the performance is not necessarily improved with the longer input content. As a result, we raise the hypothesis that capturing long-term relationships is task- and data-dependent, and that additional performance improvements for those temporally-intensive tasks would also be hindered by the redundant spatiotemporal tokens produced by longer input content. Recalling Equation 3 and 4, each
|
| 116 |
+
|
| 117 |
+
output token from S4 model is the result of structured discrete convolution for all previous inputs. Thus, we argue that treating all input token equally as ViS4mer [32] does not appealing for S4 model to capture effective long-term dependencies, as not all tokens have the temporal relations and each task may also favor tokens in different space-time locations. To naively reduce the redundant tokens, we generate random masks on the 60 second input clips to drop tokens and increase the masking ratio from $20\%$ to $80\%$ . Compared to the performance of un-masked input, we report the impact ratio of using random mask with masking ratio of $20\%$ , $50\%$ and $80\%$ in Figure 2 (b). Despite the minor improvement in some tasks, random masking degenerates the performance of most tasks, so it is not an effective method for reducing the redundancies. To this end, we are motivated to propose a selective S4 model which adaptively pick discriminative image tokens for the S4 model in different long-form video understanding tasks.
|
| 118 |
+
|
| 119 |
+
# 3.3. Adaptive Token in Long-form Videos
|
| 120 |
+
|
| 121 |
+
To pick out discriminative image tokens from the long-form videos among various tasks, we extend the concept of adaptive token learning, formulating our Selective S5 (i.e., selective S4) model. Unlike previous image-based adaptive token learning works [40, 46, 55, 75] that rely on dense self-attention for capturing token-wise relationships, our S5 model avoids the self-attention computation in long-form videos by leveraging S4 features generated from the simulated linear time-invariant (LTI) system. Inherited from the linear complexity of the S4 model, our S5 model can receive long-form video token dependencies with low cost, thus making the adaptive token learning possible in long-form videos. In addition, we propose a momentum updated S4 model to dynamically produce S4 features from the long-form video data in different tasks. Figure 3 (a) demonstrates the pipeline of our S5 model, where the momentum updated S4 model is the moving average of the S4 model.
|
| 122 |
+
|
| 123 |
+
Specifically, we cast our selective module in the S5 model as an adaptive mask learning problem. Given a mask generator $\mathrm{MG}(\cdot)$ and its input $\mathbf{x}_{s_4}$ , the mask generator is a lightweight architecture, which will be ablated in the Section 4. It will be trained for a classification task on predefined category space $\mathbb{C} = \{C_1,\dots ,C_{\mathrm{ST}}\}$ , where $\mathrm{S}\cdot \mathrm{T}$ is the total number of image tokens in the video. Let's denote $p(c|\mathbf{x}_{s_4})\in [0,1]$ be the normalized probabilistic output of $MG(\mathbf{x}_{s_4})$ , so that $\sum_{c = C_1}^{c = C_{\mathrm{ST}}}p(c|\mathbf{x}_{s_4}) = 1$ . Then, we sample $K$ categories without replacement from the probabilistic outputs of the mask generator. Finally, the $k^{th}$ selected image tokens can be written as:
|
| 124 |
+
|
| 125 |
+
$$
|
| 126 |
+
x _ {\text {i n}} ^ {k} = \mathbf {X} ^ {\mathbf {T}} c ^ {k} \tag {6}
|
| 127 |
+
$$
|
| 128 |
+
|
| 129 |
+
Where $\mathbf{X} \in \mathbb{R}^{ST \times D}$ represents S·T D-dimensional image tokens and $c^k$ is a one-hot vector that selects $k^{th}$ token from
|
| 130 |
+
|
| 131 |
+

|
| 132 |
+
a. Illustration of our proposed S5 model
|
| 133 |
+
|
| 134 |
+

|
| 135 |
+
b. Illustration of our proposed LSMCL algorithm
|
| 136 |
+
Figure 3. (a) A visualization of our proposed S5 model. Compared to the S4 model, we introduce a selective token picking strategy "mask generator", leveraging the S4 feature from the momentum S4 model. The momentum S4 model is updated by the S4 model in the moving average manner. Both S4 model and momentum S4 model are consisted of a S4 layer [24, 32] and a LN layer [2]. (b) An illustration of the proposed LSMCL pretraining framework, that initializes our S5 model to enrich the robustness.
|
| 137 |
+
|
| 138 |
+
the $\mathbf{X}$ . The sampling process is important as it prevents the bias in the training that is potentially caused by the top-K selection. To make this sampling differentiable, we adopt the Gumbel-Softmax with Straight-Through tricks [33], which is widely used in [41, 46]. Specifically, we introduce an additional gumbel noise $g\in \mathbb{R}^{1\times \mathrm{ST}}$ into the predicted probability distribution $p\in \mathbb{R}^{1\times \mathrm{ST}}$ , where $g = -\log (-\log (u + \epsilon) + \epsilon)$ ( $u\sim \mathrm{Uniform}(0,1)$ , and $\epsilon$ is a small value for arithmetic robustness consideration). Then, we sample the top-K tokens from the re-parameterized distribution $p + g$ . During the back-propagation, we estimate the gradient for each selected token $c$ as:
|
| 139 |
+
|
| 140 |
+
$$
|
| 141 |
+
G \approx \bigtriangledown_ {\mathrm {M G}} \frac {\exp \left(\left(\log p \left(c | \mathbf {x} _ {s _ {4}}\right) + g (c)\right) / \rho\right)}{\sum_ {c ^ {\prime} = C _ {1}} ^ {c ^ {\prime} = C _ {\mathrm {S T}}} \exp \left(\left(\log p \left(c ^ {\prime} | \mathbf {x} _ {s _ {4}}\right) + g \left(c ^ {\prime}\right)\right) / \rho\right)} \tag {7}
|
| 142 |
+
$$
|
| 143 |
+
|
| 144 |
+
where $\rho$ is the temperature factor controlling the sharpness.
|
| 145 |
+
|
| 146 |
+
# 3.4. Long-Short Mask Contrastive Learning
|
| 147 |
+
|
| 148 |
+
Previous token reduction/adaptive learning works rarely take model robustness into consideration. Informative tokens might be incorrectly dropped during training, which could hurt the performance of the model. In this paper, in addition to our proposed S5 model that explicitly picks informative tokens for various long-form video understanding tasks, we also propose Long-Short Mask Contrastive Learning (LSMCL) pretraining, which implicitly learns long-form video representations with better generalizability. Specifically, we equip the recent video contrastive learning framework LSTCL [69] with a random masking strategy on both long and short input clips, which mimics all possible scenarios that the selective module could produce in the S5 model. As a result, our S5 model with LSMCL pretraining would be more robust to and tolerant of errors from the selective module. Moreover, the long-short contrastive set-up will further improve the temporal predictability of our S5 model.
|
| 149 |
+
|
| 150 |
+
Formally, we sample a long clip $(x_{L})$ and a short clip $(x_{S})$ from each video sequence with largely differ
|
| 151 |
+
|
| 152 |
+
ent sampling strides $\tau_{L}$ and $\tau_{S}$ , where $\tau_{S} < \tau_{L}$ . Unlike LSTCL [69] and BraVe [56] that apply independent random sampling, in our paper the temporal span of long clips includes the one of short clips, which prevents dissimilar semantics from two clips in long-form videos. Then, we independently generate binary random masks with a masking ratio of $\eta$ for each clip, which can be written as: $\mathcal{R}_{\mathrm{mask}}(x,\eta), x \in \{x_L, x_S\}$ . We set S4 model as the backbone of the query encoder $(f_q)$ and also adopt a momentum key encoder $(f_k)$ in the pipeline, which is widely accepted in MoCo [27], BYOL [22] and LSTCL [69]. Our query encoder and key encoder follow the same design with [22, 27, 69], that consist of the backbone, projection and prediction heads. Denoting the parameter of $f_{q}$ as $\theta_{q}$ and the one of $f_{k}$ as $\theta_{k}$ , we have: $\theta_{k} = m\theta_{k} + (1 - m)\theta_{q}$ where $m \in [0,1]$ is a momentum coefficient. Similarly, the LSMCL adopts similar objective as the InfoNCE [51]:
|
| 153 |
+
|
| 154 |
+
$$
|
| 155 |
+
\text {G i v e n :} q = f _ {q} \left(\mathcal {R} _ {\text {m a s k}} \left(x _ {S}, \eta\right)\right), k = f _ {k} \left(\mathcal {R} _ {\text {m a s k}} \left(x _ {L}, \eta\right)\right)
|
| 156 |
+
$$
|
| 157 |
+
|
| 158 |
+
$$
|
| 159 |
+
\mathcal {L} _ {\mathrm {L S M C L}} = \sum_ {i} - \log \frac {\exp \left(q ^ {i} {} ^ {\top} k ^ {i} / \rho\right)}{\exp \left(q ^ {i} {} ^ {\top} k ^ {i} / \rho\right) + \sum_ {j \neq i} \exp \left(q ^ {i} {} ^ {\top} k ^ {j} / \rho\right)} \tag {8}
|
| 160 |
+
$$
|
| 161 |
+
|
| 162 |
+
where $\rho$ is the temperature hyperparameter. As is commonly done in [6,9,10,22], we symmetrize the loss function by switching $x_{S}$ and $x_{L}$ in $f_{q}$ and $f_{k}$ . In our LSMCL, the S4 model is learned to find the correct step size $\Delta$ and SSM parameters to match the representation of random masked long and short clips. Given our S5 model takes adaptively learned image tokens in the downstream task, we believe the LSMCL could improve the robustness as well as the temporal modeling ability of S5 model when dealing with partially sampled image tokens. In Section 4, our S5 model with LSMCL empirically shows significantly improved results in long-form video understanding.
|
| 163 |
+
|
| 164 |
+
<table><tr><td rowspan="2">Mask Generator</td><td colspan="3">Content (↑)</td><td colspan="4">Metadata (↑)</td><td colspan="2">User (↓)</td></tr><tr><td>Relation</td><td>Speak</td><td>Scene</td><td>Director</td><td>Genre</td><td>Writer</td><td>Year</td><td>Like</td><td>View</td></tr><tr><td>No Mask (ViS4mer [32])</td><td>57.14</td><td>40.79</td><td>67.44</td><td>62.61</td><td>54.71</td><td>48.80</td><td>44.75</td><td>0.26</td><td>3.63</td></tr><tr><td>Random</td><td>54.81</td><td>38.22</td><td>67.44</td><td>63.60</td><td>54.97</td><td>47.00</td><td>42.70</td><td>0.25</td><td>4.00</td></tr><tr><td>Single TX</td><td>57.85</td><td>40.79</td><td>68.66</td><td>63.98</td><td>55.12</td><td>48.85</td><td>43.46</td><td>0.26</td><td>3.82</td></tr><tr><td>Single TXs4</td><td>60.54</td><td>41.21</td><td>69.83</td><td>66.43</td><td>57.55</td><td>49.47</td><td>44.15</td><td>0.25</td><td>3.51</td></tr><tr><td>Stacked TXs</td><td>59.51</td><td>41.21</td><td>69.83</td><td>64.91</td><td>55.12</td><td>51.83</td><td>47.55</td><td>0.25</td><td>3.42</td></tr><tr><td>Stacked TXs4</td><td>61.98</td><td>41.75</td><td>70.94</td><td>67.34</td><td>59.16</td><td>51.83</td><td>47.55</td><td>0.24</td><td>3.42</td></tr><tr><td>Linear</td><td>54.81</td><td>40.28</td><td>67.44</td><td>63.90</td><td>54.97</td><td>48.17</td><td>42.77</td><td>0.26</td><td>3.95</td></tr><tr><td>Linears4</td><td>61.98</td><td>41.75</td><td>69.88</td><td>66.40</td><td>58.80</td><td>50.60</td><td>47.70</td><td>0.25</td><td>3.51</td></tr></table>
|
| 165 |
+
|
| 166 |
+
Table 1. Performance of various mask generators in LVU [72] dataset, where we adopt 60 frames per clip and $50\%$ masking ratio. The bold results demonstrate the performance of using S4 feature $(x_{S_4}$ in Equation 5). We also provide the average improvement ratio (in green) of nine jobs using S4 features compared to ViT features at the conclusion of each bold row.
|
| 167 |
+
|
| 168 |
+
# 4. Experiments
|
| 169 |
+
|
| 170 |
+
# 4.1. Dataset
|
| 171 |
+
|
| 172 |
+
LVU dataset [72]: is constructed from Movie Clip dataset [60]. It contains $\sim 30K$ videos from $\sim 3K$ movies. Each video lasts one to three minutes. The benchmark contains nine tasks covering a wide range of long-form video understanding tasks, which are further folded into three main categories: (i) content understanding, consisting of ('relationship', 'speaking style', 'scene/place') prediction, (ii) metadata prediction, including ('director', 'genre', 'writer', and 'movie release year') classification, and (iii) user engagement, predicting ('YouTube like ratio', and 'YouTube popularity'). For classification and regression tasks, we report accuracy (for content understanding and metadata prediction) and mean-squared error (MSE) (for user engagement) as the evaluation metrics.
|
| 173 |
+
|
| 174 |
+
COIN [61, 62]: consists of 11,827 videos with 180 distinct procedural tasks, which are all collected from YouTube. These videos cover 12 domains, such as nursing & caring, vehicles, leisure & performance, gadgets, electric appliances, household items, science & craft, plants & fruits, snacks & drinks dishes, sports, and housework. The average length of a video is 2.36 minutes.
|
| 175 |
+
|
| 176 |
+
Breakfast [39]: contains 1,712 videos of 10 complex cooking activities, which are performed by 52 different individuals in 18 different kitchens, resulting in over 77 hours of video footage. The averaged length of video in this dataset is around 2.7 minutes. Ten cooking activities include: making coffee, chocolate milk, juice, tea, cereals, fried egg, pancakes, fruit salad, sandwich and scrambled egg.
|
| 177 |
+
|
| 178 |
+
# 4.2. Implementation Details
|
| 179 |
+
|
| 180 |
+
Following [32, 72], we stack three structure blocks, which share similar structure to that described in Equation 5, and sample video frames at 1 fps. Unlike previous work, we include an adaptive mask generator to effectively pick image tokens before feeding the input into S4 model. As
|
| 181 |
+
|
| 182 |
+
the advantages of our S5 model will naturally be diminished on less redundant sequences, we follow the same architecture of ViS4mer [32] but adopt the S5 model as the first block. For data argumentation, we resize each video frame to the spatial resolution of $224 \times 224$ and use a patch size of $16 \times 16$ . In addition, we use ViT-L [14] pretrained on ImageNet-21K [38] as the feature extractor in the LVU dataset; Swin-B [43] pretrained on Kinetics-600 [35] as the feature extractor in COIN and Breakfast datasets. The size of the input in each dataset is also the same as [32]: we adopt 60-second input for the LVU dataset and 64-second input for the COIN and Breakfast datasets. In the LSMCL, we adopt the setting from LSTCL [69] and apply independent global random masking on long and short clips, which share the same masking ratio with the adaptive mask generator. Unless otherwise noted, we conduct our ablation studies on the LVU dataset due to its diverse tasks in the long-form video understanding. Finally, we report the best performance of our model on all three datasets and compare with the previous state-of-the-art works.
|
| 183 |
+
|
| 184 |
+
# 4.3. Ablation Study
|
| 185 |
+
|
| 186 |
+
a. Our S5 is better than S4 and random masking: To demonstrate the effectiveness of our proposed S5 model, we compare the performance of S4 models with no mask, random mask, and mask generators of different architectures. Specifically, we utilize one Transformer (TX), two stacked Transformers (TXs), and one linear layer as the mask generator and evaluate on 9 tasks on the LVU dataset (Table 1). In addition, we also evaluate the effectiveness of using S4 features from the momentum-updated S4 model. For each architecture, we compare the result of using ViT features and S4 features as the mask generator input. As can be seen from the Table 1, the performance of each task substantially increases with the computational complexity of the mask generator. Results show our design significantly outperforms ViS4mer [32] and the random masking strategy, and the performance of each task is further improved by using S4 features. Notably, the mask generator with one
|
| 187 |
+
|
| 188 |
+

|
| 189 |
+
Figure 4. Efficiency evaluation of each method in Table 1, which demonstrates the GPU memory usage as well as throughput. Our proposed S5 model with linear mask generator saves $25\%$ memory cost and achieves on par throughput with ViS4mer [32].
|
| 190 |
+
|
| 191 |
+
linear layer achieves on par performance to one of the more complex transformer architectures.
|
| 192 |
+
|
| 193 |
+
b. Our S5 reduces up to $25\%$ memory usage: In Figure 4, we also demonstrate the efficiency of our S5 model with the different masking architectures mentioned previously. Compared to ViS4mer (the one without masking strategies) using same number of input frames, our S5 model with linear mask generator reduces the memory footprint by $25\%$ while maintaining the same level of throughput. Memory consumption and throughput are not improved by the intricate transformer mask generators. Since the linear mask generator has a smaller memory footprint and performs tasks more effectively overall, we use it in our S5 model in the following experiments.
|
| 194 |
+
|
| 195 |
+
c. Impact of Masking Ratio and Sequence Length: In Figure 5a and 5b, we study the effect of masking ratio and sequence length with our S5 model. We set ViS4mer [32] (60 frames without mask generator) as baseline and report the average improvement percentage of 9 tasks on LVU dataset by using S5 model with variant masking ratio/sequence length. To demonstrate the effectiveness of our S5 model, we also compare the performance of ViS4mer [32] with different settings in these two figures. Figure 5a clearly shows that the performance of our S5 model increases initially as the masking ratio increases, which indicates that our selective model effectively picks informative image tokens for the S4 model. However, the performance starts to drop dramatically when the masking ratio is over $50\%$ . This is because when the masking ratio increases to be above certain level, the informative tokens are forced to be dropped. As a result, we adopt $50\%$ masking ratio in our following experiments. In Figure 5b, we observe substantial improvement of S5 model with increasing number of input frames. In contrast to the performance of ViS4mer [32], our proposed S5 model is indeed able to capture longer term dependencies while reducing the spatial-temporal redundancy in the input.
|
| 196 |
+
|
| 197 |
+
d. Effect of Multiple S5 models: As shown in Figure 3, multiple S5 models can be stacked in the pipeline, similar
|
| 198 |
+
|
| 199 |
+
to what is commonly done in Transformer [4, 14, 73] and ViS4mer [32]. In the previous setup, we only adopt one S5 model, leaving the remaining blocks as S4 models. By stacking multiple S5 models, we find a further $0.5\%$ average improvement on the LVU dataset. Less redundant sequences will inevitably reduce the performance gain from our S5 model, decreasing the benefit from stacking additional S5 blocks. As a result, we utilize only one S5 model after the video encoder for maximum memory efficiency gain and throughput.
|
| 200 |
+
|
| 201 |
+
e. Ablation on LSMCL: In Figure 5c and 5d, we evaluate the effectiveness of our proposed LSMCL with different sampling strides and random masking ratios. For both figures, we set the performance of ViS4mer [32] as the baseline and report the average improvement ratio (in percentage) of 9 tasks from LVU with different settings. From Figure 5c, our S5 model with LSMCL can achieve better performance even when $\tau_{L} = \tau_{S}$ , which suggests that LSMCL can increase the robustness of our S5 model and help it handle incorrectly picked tokens. When we gradually increase the $\frac{\tau_L}{\tau_S}$ , the performance of S5 model is further improved as the model is able to capture longer temporal context via the proposed LSMCL. Indeed, the performance using LSMCL approaches the performance without LSMCL with $66\%$ more input frames (shown in Figure 5b both around $6\%$ boost). In Figure 5d, we further ablate the random masking ratio used in LSMCL. When the masking ratio of LSMCL is over $50\%$ , the benefit from LSMCL is insignificant as the input does not provide sufficient information. Thus, we consider $50\%$ masking ratio in LSMCL for better efficiency in the long-form video contrastive learning.
|
| 202 |
+
|
| 203 |
+
# 4.4. Comparison with the State-Of-The-Arts
|
| 204 |
+
|
| 205 |
+
In Table 2, we compare our method on LVU dataset with previous state-of-the-art methods. Specifically, the LST [32] adopt the same architecture with ours, but substitutes the S5/S4 model to the transformer architecture. Whereas the Performer [11] and Orthoformer [54] apply the efficient attention in the transformer architecture, that do not require quadratic complexity w.r.t. the input length. When compared to baseline ViS4mer [32], we achieve up to $9.6\%$ improvement. When compared to other methods, ours outperforms by an even more significant margin. This shows that our method is consistently more effective in understanding the long-form videos.
|
| 206 |
+
|
| 207 |
+
To demonstrate the generalizability of our method, we evaluate our S5 model on COIN [61, 62] and Breakfast [39] datasets, which are challenging long-range procedural activity classification datasets. Our proposed method achieves $2.4\%$ and $5.5\%$ over the ViS4mer [32] and outperforms the other state-of-the-arts by $0.81\%$ and $0.80\%$ respectively. Notice that D-Sprv. [42] leverages HowTo100M dataset [48] for pretraining, which volume is much larger
|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
(a)
|
| 211 |
+
|
| 212 |
+

|
| 213 |
+
(b)
|
| 214 |
+
Figure 5. Compared to the baseline performance, average improvement performance of our method on LVU dataset. Unless otherwise noted, the default number of input frame and masking ratio is 60 and $50\%$ . (a). We compared our S5 model and S4 model with random masking with increasing masking ratio; (b). We compare our S5 model and S4 model with increasing number of input frames; (c). We show the effect of LSMCL pretraining with different long-short sampling stride ratio. In addition, we provide the performance of S5 model without LSMCL and S5 model with 100 input frames; (d). We show the impact of the increasing masking ratio in the LSMCL pretraining.
|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
(c)
|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
(d)
|
| 221 |
+
|
| 222 |
+
<table><tr><td rowspan="2">Model</td><td colspan="3">Content (↑)</td><td colspan="4">Metadata (↑)</td><td colspan="2">User (↓)</td><td rowspan="2">GPU Usage (GB) (↓)</td></tr><tr><td>Relation</td><td>Speak</td><td>Scene</td><td>Director</td><td>Genre</td><td>Writer</td><td>Year</td><td>Like</td><td>View</td></tr><tr><td>Obj. T4mer [72]</td><td>54.76</td><td>33.17</td><td>52.94</td><td>47.66</td><td>52.74</td><td>36.30</td><td>37.76</td><td>0.30</td><td>3.68</td><td>N/A</td></tr><tr><td>Performer [11]</td><td>50.00</td><td>38.80</td><td>60.46</td><td>58.87</td><td>49.45</td><td>48.21</td><td>41.25</td><td>0.31</td><td>3.93</td><td>5.93</td></tr><tr><td>Orthoformer [54]</td><td>50.00</td><td>38.30</td><td>66.27</td><td>55.14</td><td>55.79</td><td>47.02</td><td>43.35</td><td>0.29</td><td>3.86</td><td>5.56</td></tr><tr><td>VideoBERT [58]</td><td>52.80</td><td>37.90</td><td>54.90</td><td>47.30</td><td>51.90</td><td>38.50</td><td>36.10</td><td>0.32</td><td>4.46</td><td>N/A</td></tr><tr><td>LST [32]</td><td>52.38</td><td>37.31</td><td>62.79</td><td>56.07</td><td>52.70</td><td>42.26</td><td>39.16</td><td>0.31</td><td>3.83</td><td>41.38</td></tr><tr><td>ViS4mer [32]</td><td>57.14</td><td>40.79</td><td>67.44</td><td>62.61</td><td>54.71</td><td>48.80</td><td>44.75</td><td>0.26</td><td>3.63</td><td>5.15</td></tr><tr><td>Ours60 frames</td><td>61.98</td><td>41.75</td><td>69.88</td><td>66.40</td><td>58.80</td><td>50.60</td><td>47.70</td><td>0.25</td><td>3.51</td><td>3.85</td></tr><tr><td>Ours60 frames+LSMCL</td><td>61.98</td><td>41.75</td><td>72.53</td><td>66.40</td><td>61.34</td><td>50.60</td><td>47.70</td><td>0.24</td><td>3.51</td><td>3.85</td></tr><tr><td>Ours100 frames</td><td>66.71</td><td>41.78</td><td>73.28</td><td>66.64</td><td>63.65</td><td>50.60</td><td>47.85</td><td>0.25</td><td>3.51</td><td>3.95</td></tr><tr><td>Ours100 frames+LSMCL</td><td>67.11</td><td>42.12</td><td>73.49</td><td>67.32</td><td>65.41</td><td>51.27</td><td>47.95</td><td>0.24</td><td>3.51</td><td>3.95</td></tr></table>
|
| 223 |
+
|
| 224 |
+
Table 2. Comparison to the state-of-the-art methods on LVU dataset testing set.
|
| 225 |
+
|
| 226 |
+
<table><tr><td>Method</td><td>P.T. Dataset</td><td>P.T. Samples</td><td>Accuracy</td></tr><tr><td>TSN [62]</td><td>Kinetics-400</td><td>306K</td><td>73.40</td></tr><tr><td>D-Sprv. [42]</td><td>HowTo100M</td><td>136M</td><td>90.00</td></tr><tr><td>ViS4mer [32]</td><td>Kinetics-600</td><td>495K</td><td>88.41</td></tr><tr><td>Ours</td><td>Kinetics-600</td><td>495K</td><td>90.42</td></tr><tr><td>Ours+LSMCL</td><td>Kinetics-600</td><td>495K</td><td>90.81</td></tr></table>
|
| 227 |
+
|
| 228 |
+
Table 3. Comparison to the state-of-the-art methods on COIN dataset. P.T. stands for pretraining.
|
| 229 |
+
|
| 230 |
+
<table><tr><td>Method</td><td>P.T. Dataset</td><td>P.T. Samples</td><td>Accuracy</td></tr><tr><td>VideoGraph [30]</td><td>Kinetics-400</td><td>306K</td><td>69.50</td></tr><tr><td>Timeception [29]</td><td>Kinetics-400</td><td>306K</td><td>71.30</td></tr><tr><td>GHRM [78]</td><td>Kinetics-400</td><td>306K</td><td>75.50</td></tr><tr><td>D-Sprv. [42]</td><td>HowTo100M</td><td>136M</td><td>89.90</td></tr><tr><td>ViS4mer [32]</td><td>Kinetics-600</td><td>495K</td><td>85.10*</td></tr><tr><td>Ours</td><td>Kinetics-600</td><td>495K</td><td>90.14</td></tr><tr><td>Ours+LSMCL</td><td>Kinetics-600</td><td>495K</td><td>90.70</td></tr></table>
|
| 231 |
+
|
| 232 |
+
Table 4. Comparison to the state-of-the-art methods on Breakfast dataset. P.T. stands for pretraining. *We were not able to reproduce the $88.17\%$ baseline result reported in [32], but our proposed S5 model still largely improves from $85.10\%$ , and achieves the new state-of-the-art result.
|
| 233 |
+
|
| 234 |
+
than our pre-training dataset (Kinetics-600 [7]). Putting together the aforementioned performance gain and mem
|
| 235 |
+
|
| 236 |
+
ory efficiency gain, our S5 model successfully demonstrates its efficiency and effectiveness in learning discriminative representation via selecting informative image tokens from long-form video sequences.
|
| 237 |
+
|
| 238 |
+
# 5. Conclusion
|
| 239 |
+
|
| 240 |
+
In this paper, we proposed a selective structured state-space sequence (S5) model for long-form video understanding, where we adopt a lightweight mask generator to adaptively pick informative tokens from long-form videos. Our mask generator avoids dense self-attention computation as what is applied in previous works. It leverages the sequential output of the simulated linear time invariant (LTI) system, and benefits from the momentum distillation of S4 model, enabling our S5 model to dynamically learn from informative tokens for different long-form video tasks. To mitigate the negative impact of picking less informative tokens, we also propose a LSMCL pretraining to improve the robustness and further broaden the temporal horizon of our model. Through extensive experiments, we demonstrate the effectiveness of each proposed component in our S5 model, achieving the new state-of-the-art performance in three challenging long-form video understanding benchmarks.
|
| 241 |
+
|
| 242 |
+
# References
|
| 243 |
+
|
| 244 |
+
[1] Anurag Arnab, Mostafa Dehghani, Georg Heigold, Chen Sun, Mario Lučić, and Cordelia Schmid. Vivit: A video vision transformer. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 6836-6846, October 2021. 1
|
| 245 |
+
[2] Jimmy Lei Ba, Jamie Ryan Kiros, and Geoffrey E Hinton. Layer normalization. arXiv preprint arXiv:1607.06450, 2016. 4, 5
|
| 246 |
+
[3] Moez Baccouche, Franck Mamalet, Christian Wolf, Christophe Garcia, and Atilla Baskurt. Sequential deep learning for human action recognition. In International workshop on human behavior understanding, pages 29-39. Springer, 2011. 1
|
| 247 |
+
[4] Gedas Bertasius, Heng Wang, and Lorenzo Torresani. Is space-time attention all you need for video understanding? In Marina Meila and Tong Zhang, editors, Proceedings of the 38th International Conference on Machine Learning, ICML 2021, 18-24 July 2021, Virtual Event, volume 139 of Proceedings of Machine Learning Research, pages 813-824. PMLR, 2021. 1, 3, 4, 7
|
| 248 |
+
[5] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901, 2020. 2
|
| 249 |
+
[6] Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments. arXiv preprint arXiv:2006.09882, 2020. 5
|
| 250 |
+
[7] Joao Carreira, Eric Noland, Andras Banki-Horvath, Chloe Hillier, and Andrew Zisserman. A short note about kinetics-600. arXiv preprint arXiv:1808.01340, 2018. 8
|
| 251 |
+
[8] Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In International conference on machine learning, pages 1597-1607. PMLR, 2020. 3, 12
|
| 252 |
+
[9] Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. arXiv preprint arXiv:2011.10566, 2020.3,5
|
| 253 |
+
[10] Xinlei Chen, Saining Xie, and Kaiming He. An empirical study of training self-supervised visual transformers. arXiv preprint arXiv:2104.02057, 2021. 5, 12
|
| 254 |
+
[11] Krzysztof Choromanski, Valerii Likhosherstov, David Dohan, Xingyou Song, Andreea Gane, Tamas Sarlos, Peter Hawkins, Jared Davis, Afroz Mohiuddin, Lukasz Kaiser, et al. Rethinking attention with performers. arXiv preprint arXiv:2009.14794, 2020. 2, 7, 8
|
| 255 |
+
[12] Zihang Dai, Zhilin Yang, Yiming Yang, Jaime Carbonell, Quoc V Le, and Ruslan Salakhutdinov. Transformer-xl: Attentive language models beyond a fixed-length context. arXiv preprint arXiv:1901.02860, 2019. 2
|
| 256 |
+
[13] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. 2
|
| 257 |
+
|
| 258 |
+
[14] Alexey Dosovitskiy, Lucas Beyer, Alexander Kolesnikov, Dirk Weissenborn, Xiaohua Zhai, Thomas Unterthiner, Mostafa Dehghani, Matthias Minderer, Georg Heigold, Sylvain Gelly, et al. An image is worth 16x16 words: Transformers for image recognition at scale. arXiv preprint arXiv:2010.11929, 2020. 1, 6, 7
|
| 259 |
+
[15] Haoqi Fan, Bo Xiong, Karttikeya Mangalam, Yanghao Li, Zhicheng Yan, Jitendra Malik, and Christoph Feichtenhofer. Multiscale vision transformers. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), pages 6824-6835, October 2021. 1, 4
|
| 260 |
+
[16] Christoph Feichtenhofer, Haoqi Fan, Yanghao Li, and Kaiming He. Masked autoencoders as spatiotemporal learners. arXiv preprint arXiv:2205.09113, 2022. 3
|
| 261 |
+
[17] Christoph Feichtenhofer, Haoqi Fan, Bo Xiong, Ross Girshick, and Kaiming He. A large-scale study on unsupervised spatiotemporal representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3299-3309, 2021. 2
|
| 262 |
+
[18] Christoph Feichtenhofer, Haoqi Fan, Bo Xiong, Ross B. Girshick, and Kaiming He. A large-scale study on unsupervised spatiotemporal representation learning. In IEEE Conference on Computer Vision and Pattern Recognition, CVPR 2021, virtual, June 19-25, 2021, pages 3299-3309. Computer Vision Foundation / IEEE, 2021. 3
|
| 263 |
+
[19] Christoph Feichtenhofer, Axel Pinz, and Richard P Wildes. Spatiotemporal multiplier networks for video action recognition. In CVPR, 2017. 1
|
| 264 |
+
[20] Christoph Feichtenhofer, Axel Pinz, and Richard P Wildes. Temporal residual networks for dynamic scene recognition. In CVPR, 2017. 1
|
| 265 |
+
[21] Priya Goyal, Piotr Dólar, Ross Girshick, Pieter Noordhuis, Lukasz Wesolowski, Aapo Kyrola, Andrew Tulloch, Yangqing Jia, and Kaiming He. Accurate, large minibatch sgd: Training imagenet in 1 hour. arXiv preprint arXiv:1706.02677, 2017. 12
|
| 266 |
+
[22] Jean-Bastien Grill, Florian Strub, Florent Altché, Corentin Tallec, Pierre H Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Daniel Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent: A new approach to self-supervised learning. arXiv preprint arXiv:2006.07733, 2020. 3, 5
|
| 267 |
+
[23] Albert Gu, Tri Dao, Stefano Ermon, Atri Rudra, and Christopher Ré. Hippo: Recurrent memory with optimal polynomial projections. Advances in Neural Information Processing Systems, 33:1474-1487, 2020. 3
|
| 268 |
+
[24] Albert Gu, Karan Goel, and Christopher Ré. Efficiently modeling long sequences with structured state spaces. arXiv preprint arXiv:2111.00396, 2021. 1, 2, 3, 5
|
| 269 |
+
[25] Albert Gu, Isys Johnson, Karan Goel, Khaled Saab, Tri Dao, Atri Rudra, and Christopher Ré. Combining recurrent, convolutional, and continuous-time models with linear state space layers. Advances in neural information processing systems, 34:572-585, 2021. 3
|
| 270 |
+
[26] Kaiming He, Xinlei Chen, Saining Xie, Yanghao Li, Piotr Dollár, and Ross Girshick. Masked autoencoders are scalable vision learners. In Proceedings of the IEEE/CVF Conference
|
| 271 |
+
|
| 272 |
+
on Computer Vision and Pattern Recognition, pages 16000-16009, 2022. 3
|
| 273 |
+
[27] Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9729-9738, 2020. 3, 5
|
| 274 |
+
[28] Dan Hendrycks and Kevin Gimpel. Gaussian error linear units (gelus). arXiv preprint arXiv:1606.08415, 2016. 12
|
| 275 |
+
[29] Noureldien Hussein, Efstratios Gavves, and Arnold WM Smeulders. Timeception for complex action recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 254-263, 2019. 8
|
| 276 |
+
[30] Noureldien Hussein, Efstratios Gavves, and Arnold WM Smeulders. Videograph: Recognizing minutes-long human activities in videos. arXiv preprint arXiv:1905.05143, 2019. 8
|
| 277 |
+
[31] Sergey Ioffe. Batch renormalization: Towards reducing minibatch dependence in batch-normalized models. Advances in neural information processing systems, 30, 2017. 12
|
| 278 |
+
[32] Md Mohaiminul Islam and Gedas Bertasius. Long movie clip classification with state-space video models. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 1, 2, 3, 4, 5, 6, 7, 8, 12, 13
|
| 279 |
+
[33] Eric Jang, Shixiang Gu, and Ben Poole. Categorical reparameterization with gumbel-softmax. arXiv preprint arXiv:1611.01144, 2016. 2, 5
|
| 280 |
+
[34] Angelos Katharopoulos, Apoorv Vyas, Nikolaos Pappas, and François Fleuret. Transformers are rnns: Fast autoregressive transformers with linear attention. In International Conference on Machine Learning, pages 5156-5165. PMLR, 2020. 2
|
| 281 |
+
[35] Will Kay, Joao Carreira, Karen Simonyan, Brian Zhang, Chloe Hillier, Sudheendra Vijayanarasimhan, Fabio Viola, Tim Green, Trevor Back, Paul Natsev, et al. The kinetics human action video dataset. arXiv preprint arXiv:1705.06950, 2017. The Kinetics-400 dataset is licensed under the Creative Commons Attribution-NonCommercial 4.0 International License. 6
|
| 282 |
+
[36] Nikita Kitaev, Łukasz Kaiser, and Anselm Levskaya. Reformer: The efficient transformer. arXiv preprint arXiv:2001.04451, 2020. 2
|
| 283 |
+
[37] Bruno Korbar, Du Tran, and Lorenzo Torresani. Scsampler: Sampling salient clips from video for efficient action recognition. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 6232-6242, 2019. 2
|
| 284 |
+
[38] Alex Krizhevsky, Ilya Sutskever, and Geoffrey E Hinton. Imagenet classification with deep convolutional neural networks. Advances in neural information processing systems, 25:1097-1105, 2012. 6
|
| 285 |
+
[39] Hilde Kuehne, Ali Arslan, and Thomas Serre. The language of actions: Recovering the syntax and semantics of goal-directed human activities. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 780-787, 2014. 1, 6, 7, 12
|
| 286 |
+
[40] Youwei Liang, Chongjian Ge, Zhan Tong, Yibing Song, Jue Wang, and Pengtao Xie. Not all patches are what you
|
| 287 |
+
|
| 288 |
+
need: Expediting vision transformers via token reorganizations. arXiv preprint arXiv:2202.07800, 2022. 2, 3, 4
|
| 289 |
+
[41] Xudong Lin, Gedas Bertasius, Jue Wang, Shih-Fu Chang, Devi Parikh, and Lorenzo Torresani. Vx2text: End-to-end learning of video-based text generation from multimodal inputs. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 7005-7015, 2021. 5
|
| 290 |
+
[42] Xudong Lin, Fabio Petroni, Gedas Bertasius, Marcus Rohrbach, Shih-Fu Chang, and Lorenzo Torresani. Learning to recognize procedural activities with distant supervision. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13853-13863, 2022. 7, 8
|
| 291 |
+
[43] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. arXiv preprint arXiv:2103.14030, 2021. 2, 6
|
| 292 |
+
[44] Ze Liu, Jia Ning, Yue Cao, Yixuan Wei, Zheng Zhang, Stephen Lin, and Han Hu. Video swim transformer. arXiv preprint arXiv:2106.13230, 2021. 1, 2, 4
|
| 293 |
+
[45] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv preprint arXiv:1711.05101, 2017. 12
|
| 294 |
+
[46] Lingchen Meng, Hengduo Li, Bor-Chun Chen, Shiyi Lan, Zuxuan Wu, Yu-Gang Jiang, and Ser-Nam Lim. Adavit: Adaptive vision transformers for efficient image recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 12309-12318, 2022. 2, 4, 5
|
| 295 |
+
[47] Yue Meng, Chung-Ching Lin, Rameswar Panda, Prasanna Sattigeri, Leonid Karlinsky, Aude Oliva, Kate Saenko, and Rogerio Feris. Ar-net: Adaptive frame resolution for efficient action recognition. In European Conference on Computer Vision, pages 86-104. Springer, 2020. 2
|
| 296 |
+
[48] Antoine Miech, Dimitri Zhukov, Jean-Baptiste Alayrac, Makarand Tapaswi, Ivan Laptev, and Josef Sivic. Howto100m: Learning a text-video embedding by watching hundred million narrated video clips. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2630-2640, 2019. 7
|
| 297 |
+
[49] Daniel Neimark, Omri Bar, Maya Zohar, and Dotan Asselmann. Video transformer network. arXiv preprint arXiv:2102.00719, 2021. 1, 3
|
| 298 |
+
[50] Eric Nguyen, Karan Goel, Albert Gu, Gordon W Downs, Preey Shah, Tri Dao, Stephen A Baccus, and Christopher Ré. S4nd: Modeling images and videos as multidimensional signals using state spaces. Advances in neural information processing systems, 2022. 2
|
| 299 |
+
[51] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018. 5
|
| 300 |
+
[52] Zizheng Pan, Bohan Zhuang, Jing Liu, Haoyu He, and Jianfei Cai. Scalable vision transformers with hierarchical pooling. In Proceedings of the IEEE/cvf international conference on computer vision, pages 377-386, 2021. 2
|
| 301 |
+
[53] Adam Paszke, Sam Gross, Francisco Massa, Adam Lerer, James Bradbury, Gregory Chanan, Trevor Killeen, Zeming
|
| 302 |
+
|
| 303 |
+
Lin, Natalia Gimelshein, Luca Antiga, et al. Pytorch: An imperative style, high-performance deep learning library. arXiv preprint arXiv:1912.01703, 2019. 12
|
| 304 |
+
[54] Mandela Patrick, Dylan Campbell, Yuki Asano, Ishan Misra, Florian Metze, Christoph Feichtenhofer, Andrea Vedaldi, and João F Henriques. Keeping your eye on the ball: Trajectory attention in video transformers. Advances in neural information processing systems, 34:12493-12506, 2021. 1, 7, 8
|
| 305 |
+
[55] Yongming Rao, Wenliang Zhao, Benlin Liu, Jiwen Lu, Jie Zhou, and Cho-Jui Hsieh. Dynamicvit: Efficient vision transformers with dynamic token sparsification. Advances in neural information processing systems, 34:13937-13949, 2021. 2, 4
|
| 306 |
+
[56] Adrià Recasens, Pauline Luc, Jean-Baptiste Alayrac, Luyu Wang, Florian Strub, Corentin Tallec, Mateusz Malinowski, Viorica Patraucean, Florent Altché, Michal Valko, et al. Broaden your views for self-supervised video learning. arXiv preprint arXiv:2103.16559, 2021. 2, 3, 5
|
| 307 |
+
[57] Karen Simonyan and Andrew Zisserman. Two-stream convolutional networks for action recognition in videos. arXiv preprint arXiv:1406.2199, 2014. 1
|
| 308 |
+
[58] Chen Sun, Austin Myers, Carl Vondrick, Kevin Murphy, and Cordelia Schmid. Videobert: A joint model for video and language representation learning. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7464-7473, 2019. 8
|
| 309 |
+
[59] Yuchong Sun, Bei Liu, Hongwei Xue, Ruihua Sone, Huan Yang, and Jianlong Fu. Long-form video-language pretraining with multimodal temporal contrastive learning. Advances in neural information processing systems, 2022. 1, 2
|
| 310 |
+
[60] Yansong Tang, Dajun Ding, Yongming Rao, Yu Zheng, Danyang Zhang, Lili Zhao, Jiwen Lu, and Jie Zhou. Coin: A large-scale dataset for comprehensive instructional video analysis. In IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2019. 6
|
| 311 |
+
[61] Yansong Tang, Dajun Ding, Yongming Rao, Yu Zheng, Danyang Zhang, Lili Zhao, Jiwen Lu, and Jie Zhou. Coin: A large-scale dataset for comprehensive instructional video analysis. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1207-1216, 2019. 6, 7, 12
|
| 312 |
+
[62] Yansong Tang, Jiwen Lu, and Jie Zhou. Comprehensive instructional video analysis: The coin dataset and performance evaluation. IEEE transactions on pattern analysis and machine intelligence, 43(9):3138-3153, 2020. 6, 7, 8, 12
|
| 313 |
+
[63] Zhan Tong, Yibing Song, Jue Wang, and Limin Wang. Videomae: Masked autoencoders are data-efficient learners for self-supervised video pre-training. arXiv preprint arXiv:2203.12602, 2022. 3
|
| 314 |
+
[64] Du Tran, Lubomir Bourdev, Rob Fergus, Lorenzo Torresani, and Manohar Paluri. Learning spatiotemporal features with 3d convolutional networks. In Proceedings of the IEEE international conference on computer vision, pages 4489-4497, 2015. 1
|
| 315 |
+
[65] Du Tran, Heng Wang, Lorenzo Torresani, and Matt Feiszli. Video classification with channel-separated convolu
|
| 316 |
+
|
| 317 |
+
tional networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 5552-5561, 2019. 1
|
| 318 |
+
[66] Du Tran, Heng Wang, Lorenzo Torresani, Jamie Ray, Yann LeCun, and Manohar Paluri. A closer look at spatiotemporal convolutions for action recognition. In Proceedings of the IEEE conference on Computer Vision and Pattern Recognition, pages 6450-6459, 2018. 1
|
| 319 |
+
[67] Arnold Tustin. A method of analysing the behaviour of linear systems in terms of time series. Journal of the Institution of Electrical Engineers-Part IIA: Automatic Regulators and Servo Mechanisms, 94(1):130–142, 1947. 3
|
| 320 |
+
[68] Vivek Veeriah, Naifan Zhuang, and Guo-Jun Qi. Differential recurrent neural networks for action recognition. In Proceedings of the IEEE international conference on computer vision, pages 4041-4049, 2015. 1
|
| 321 |
+
[69] Jue Wang, Gedas Bertasius, Du Tran, and Lorenzo Torresani. Long-short temporal contrastive learning of video transformers. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14010-14020, 2022. 2, 3, 4, 5, 6, 12
|
| 322 |
+
[70] Jue Wang and Lorenzo Torresani. Deformable video transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14053-14062, 2022. 1, 2
|
| 323 |
+
[71] Junke Wang, Xitong Yang, Hengduo Li, Zuxuan Wu, and Yu-Gang Jiang. Efficient video transformers with spatial-temporal token selection. arXiv preprint arXiv:2111.11591, 2021. 2
|
| 324 |
+
[72] Chao-Yuan Wu and Philipp Kähenbuhl. Towards Long-Form Video Understanding. In CVPR, 2021. 4, 6, 8, 12, 13
|
| 325 |
+
[73] Chao-Yuan Wu, Yanghao Li, Karttikeya Mangalam, Haoqi Fan, Bo Xiong, Jitendra Malik, and Christoph Feichtenhofer. Memvit: Memory-augmented multiscale vision transformer for efficient long-term video recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13587-13597, 2022. 1, 2, 7
|
| 326 |
+
[74] Zuxuan Wu, Caiming Xiong, Chih-Yao Ma, Richard Socher, and Larry S Davis. Adaframe: Adaptive frame selection for fast video recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1278-1287, 2019. 2
|
| 327 |
+
[75] Hongxu Yin, Arash Vahdat, Jose Alvarez, Arun Mallya, Jan Kautz, and Pavlo Molchanov. Adavit: Adaptive tokens for efficient vision transformer. arXiv preprint arXiv:2112.07658, 2021. 2, 4
|
| 328 |
+
[76] Hongxu Yin, Arash Vahdat, Jose M Alvarez, Arun Mallya, Jan Kautz, and Pavlo Molchanov. A-vit: Adaptive tokens for efficient vision transformer. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 10809-10818, 2022. 2
|
| 329 |
+
[77] Pengfei Zhang, Cuiling Lan, Junliang Xing, Wenjun Zeng, Jianru Xue, and Nanning Zheng. View adaptive recurrent neural networks for high performance human action recognition from skeleton data. In Proceedings of the IEEE international conference on computer vision, pages 2117-2126, 2017. 1
|
| 330 |
+
|
| 331 |
+
[78] Jiaming Zhou, Kun-Yu Lin, Haoxin Li, and Wei-Shi Zheng. Graph-based high-order relation modeling for long-term action recognition. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 8984-8993, 2021. 8
|
| 332 |
+
|
| 333 |
+
# A. Implementation Details
|
| 334 |
+
|
| 335 |
+
In addition to the implementation details introduced in Section 4.2 of the main paper, we provide more information of training our S5 model and LSMCL below.
|
| 336 |
+
|
| 337 |
+
# A.1. S5 model
|
| 338 |
+
|
| 339 |
+
Following ViS4mer [32], we introduce a MLP layer in each block to reduce the feature dimension by a factor of $2 \times$ . Each MLP layer is consisted of a linear layer, a GELU activation layer [28] and a dropout layer, where the dropout rate is 0.2. For updating the momentum S4 model, we explore different values of momentum coefficient and set it as 0.01 to produce the best performance. For all of our experiments of S5 model, we use AdamW optimizer [45] with a learning rate of $10^{-3} \times \frac{\text{batch size}}{16}$ , and with a weight decay of 0.01. For COIN [61, 62], Breakfast [39] and each task on LVU dataset [72], we train our S5 model for 100 epochs and reduce the learning rate by a factor of 0.2 when the training loss has stopped reducing in the past 1 epoch. We train our S5 model by using $8 \times$ NVIDIA Tesla V100 16G GPUs with a batch size of 16. All the implementations are coded with PyTorch [53].
|
| 340 |
+
|
| 341 |
+
# A.2. LSMCL
|
| 342 |
+
|
| 343 |
+
In LSMCL, we sample two clips with different sampling strides from the video sequence, and the clip shape is consistent with the one in finetuning the S5 model. Specifically, we sample input clips of size $60 \times 3 \times 224 \times 224$ on LVU dataset [72] and $64 \times 3 \times 224 \times 224$ on COIN [61, 62] and Breakfast datasets [39]. The sampling stride ratio is set to be $\frac{\tau_L}{\tau_S} = 1.5$ , which is also ablated in the Figure 5(c) in the main paper. Following LSTCL [69], we adopt a query encoder and a key encoder in the LSMCL. The query encoder consists of a S4 model backbone, a MLP projection head and an additional prediction MLP head. The purpose of the prediction layer is to transform the representation of the query clip to match the key. The key encoder consists of a S4 model backbone and a MLP projection head. The momentum coefficient for updating the key encoder is 0.99. Following [10, 69], the MLP projection head has 3 layers while the MLP prediction head has 2 layers. The hidden layers of both MLPs are 4096-D and are with ReLU; the output layers of both MLPs are 256-D, without ReLU. In LSMCL, all layers in both MLPs have BN [31], which follows [8, 10]. In terms of the optimizer, we adopt AdamW [45] with a learning rate of $10^{-4} \times \frac{\text{batch size}}{256}$ , and with a weight decay of 0.05. We train our LSMCL for 300 epochs in total, and adopt learning rate warm-up [21] for the first 40 epochs. We train LSMCL by using $8 \times$ NVIDIA Tesla V100 16G GPUs with a batch size of 64 and we optimize the model with the loss in Equation 8, where the $\rho = 0.2$ .
|
| 344 |
+
|
| 345 |
+

|
| 346 |
+
Figure 6. Compared to the baseline performance, average improvement performance of our method with different settings on LVU dataset. Unless otherwise noted, the default number of input frame and masking ratio is 60 and $50\%$ . We study the effect of leveraging multiple S5 models in our work, where we substitutes more S4 model in original ViS4mer [32] with our S5 model.
|
| 347 |
+
|
| 348 |
+
# B. Effectof Multiple S5 Models
|
| 349 |
+
|
| 350 |
+
In this paper, we improve the previous S4 model by introducing a novel selective module, formulating the Selective S4 (S5) model. For fair comparison, we follow the architecture introduced in the ViS4mer [32], which utilizes three S4 models with pooling and MLP layers in between. As the advantages of our S5 model will naturally be diminished on less redundant sequences, our default setting is to substitute the first S4 model in ViS4mer [32] with our proposed S5 model while keep the rest architecture the same with ViS4mer [32]. In this section, we study the impact of using more S5 models in the ViS4mer [32] architecture. In Figure 6, we gradually increase the number of blocks that use S5 model instead of S4 model. We set the performance of ViS4mer as the baseline, and report the averaged improvement percentage over 9 tasks on LVU dataset [72]. Compared to the method of using S4 models, our method achieves substantial improvement by including more S5 models. However, less duplicated sequences will definitely result in a decrease in our S5 model's performance gain, which will lessen the advantage of stacking additional S5 blocks.
|
2303.14xxx/2303.14526/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c1dfc75423c9ab179a59acd7c171806851ae9e4d3f131a91455aaa28f8102f31
|
| 3 |
+
size 547298
|
2303.14xxx/2303.14526/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14535/67d61c55-778e-4915-b385-6adbd8fac2a2_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14535/67d61c55-778e-4915-b385-6adbd8fac2a2_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14535/67d61c55-778e-4915-b385-6adbd8fac2a2_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:798b9ad6a1a47c8e64f469222b8f9db8d1a8a6dc5e663c3b43956d33422fec14
|
| 3 |
+
size 12923266
|
2303.14xxx/2303.14535/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14535/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9e3d3c728da98c24797c2cde6c7518dec95429aedf411b5a0651f71714c27aa2
|
| 3 |
+
size 1711380
|
2303.14xxx/2303.14535/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14536/ee265f86-4bbe-47c9-bde5-4a431a4b61da_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14536/ee265f86-4bbe-47c9-bde5-4a431a4b61da_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14536/ee265f86-4bbe-47c9-bde5-4a431a4b61da_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:069d4ad543124928a6029c72828a708b324436cb144f5c7fc74207d4e75fab57
|
| 3 |
+
size 3078210
|
2303.14xxx/2303.14536/full.md
ADDED
|
@@ -0,0 +1,656 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SUDS: Scalable Urban Dynamic Scenes
|
| 2 |
+
|
| 3 |
+
Haithem Turki $^{1*}$
|
| 4 |
+
|
| 5 |
+
Jason Y. Zhang<sup>1</sup> Francesco Ferroni<sup>2</sup>
|
| 6 |
+
|
| 7 |
+
$^{1}$ Carnegie Mellon University $^{2}$ Argo AI
|
| 8 |
+
|
| 9 |
+
Deva Ramanan
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
We extend neural radiance fields (NeRFs) to dynamic large-scale urban scenes. Prior work tends to reconstruct single video clips of short durations (up to 10 seconds). Two reasons are that such methods (a) tend to scale linearly with the number of moving objects and input videos because a separate model is built for each and (b) tend to require supervision via 3D bounding boxes and panoptic labels, obtained manually or via category-specific models. As a step towards truly open-world reconstructions of dynamic cities, we introduce two key innovations: (a) we factorize the scene into three separate hash table data structures to efficiently encode static, dynamic, and far-field radiance fields, and (b) we make use of unlabeled target signals consisting of RGB images, sparse LiDAR, off-the-shelf self-supervised 2D descriptors, and most importantly, 2D optical flow. Operationalizing such inputs via photometric, geometric, and feature-metric reconstruction losses enables SUDS to decompose dynamic scenes into the static background, individual objects, and their motions. When combined with our multi-branch table representation, such reconstructions can be scaled to tens of thousands of objects across 1.2 million frames from 1700 videos spanning geospatial footprints of hundreds of kilometers, (to our knowledge) the largest dynamic NeRF built to date. We present qualitative initial results on a variety of tasks enabled by our representations, including novel-view synthesis of dynamic urban scenes, unsupervised 3D instance segmentation, and unsupervised 3D cuboid detection. To compare to prior work, we also evaluate on KITTI and Virtual KITTI 2, surpassing state-of-the-art methods that rely on ground truth 3D bounding box annotations while being $10x$ quicker to train.
|
| 14 |
+
|
| 15 |
+
# 1. Introduction
|
| 16 |
+
|
| 17 |
+
Scalable geometric reconstructions of cities have transformed our daily lives, with tools such as Google Maps and Streetview [6] becoming fundamental to how we navigate and interact with our environments. A watershed moment
|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
RGB
|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
Static
|
| 26 |
+
|
| 27 |
+

|
| 28 |
+
Dynamic
|
| 29 |
+
|
| 30 |
+

|
| 31 |
+
Depth
|
| 32 |
+
Figure 1. SUDS. We scale neural reconstructions to city scale by dividing the area into multiple cells and training hash table representations for each. We show our full city-scale reconstruction above and the derived representations below. Unlike prior methods, our approach handles dynamism across multiple videos, disentangling dynamic objects from static background and modeling shadow effects. We use unlabeled inputs to learn scene flow and semantic predictions, enabling category- and object-level scene manipulation.
|
| 33 |
+
|
| 34 |
+

|
| 35 |
+
Instances
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
Flow
|
| 39 |
+
|
| 40 |
+
in the development of such technology was the ability to scale structure-from-motion (SfM) algorithms to city-scale footprints [4]. Since then, the advent of Neural Radiance Fields (NeRFs) [33] has transformed this domain by allowing for photorealistic interaction with a reconstructed scene via view synthesis.
|
| 41 |
+
|
| 42 |
+
Recent works have attempted to scale such representations to neighborhood-scale reconstructions for virtual drive-throughs [47] and photorealistic fly-throughs [52]. However, these maps remain static and frozen in time. This makes capturing bustling human environments—
|
| 43 |
+
|
| 44 |
+
complete with moving vehicles, pedestrians, and objects—impossible, limiting the usefulness of the representation.
|
| 45 |
+
|
| 46 |
+
Challenges. One possible solution is a dynamic NeRF that conditions on time or warps a canonical space with a time-dependent deformation [38]. However, reconstructing dynamic scenes is notoriously challenging because the problem is inherently under-constrained, particularly when input data is constrained to limited viewpoints, as is typical from egocentric video capture [20]. One attractive solution is to scale up reconstructions to many videos, perhaps collected at different days (e.g., by an autonomous vehicle fleet). However, this creates additional challenges in jointly modeling fixed geometry that holds for all time (such as buildings), geometry that is locally static but transient across the videos (such as a parked car), and geometry that is truly dynamic (such as a moving person).
|
| 47 |
+
|
| 48 |
+
SUDS. In this paper, we propose SUDS: Scalable Urban Dynamic Scenes, a 4D representation that targets both scale and dynamism. Our key insight is twofold; (1) SUDS makes use of a rich suite of informative but freely available input signals, such as LiDAR depth measurements and optical flow. Other dynamic scene representations [27, 37] require supervised inputs such as panoptic segmentation labels or bounding boxes, which are difficult to acquire with high accuracy for our in-the-wild captures. (2) SUDS decomposes the world into 3 components: a static branch that models stationary topography that is consistent across videos, a dynamic branch that handles both transient (e.g., parked cars) and truly dynamic objects (e.g., pedestrians), and an environment map that handles far-field objects and sky. We model each branch using a multi-resolution hash table with scene partitioning, allowing SUDS to scale to an entire city spanning over $100km^2$ .
|
| 49 |
+
|
| 50 |
+
Contributions. We make the following contributions: (1) to our knowledge, we build the first large-scale dynamic NeRF, (2) we introduce a scalable three-branch hash table representation for 4D reconstruction, (3) we present state-of-the-art reconstruction on 3 different datasets. Finally, (4) we showcase a variety of downstream tasks enabled by our representation, including free-viewpoint synthesis, 3D scene flow estimation, and even unsupervised instance segmentation and 3D cuboid detection.
|
| 51 |
+
|
| 52 |
+
# 2. Related Work
|
| 53 |
+
|
| 54 |
+
The original Neural Radiance Fields (NeRF) paper [33] inspired a wide body of follow-up work based on the original approach. Below, we describe a non-exhaustive list of such approaches along axes relevant to our work.
|
| 55 |
+
|
| 56 |
+
Scale. The original NeRF operated with bounded scenes. $\mathrm{NeRF}++$ [63] and mip-NeRF 360 [7] use non-linear scene parameterization to model unbounded scenes. However, scaling up the size of the scene with a fixed size MLP leads to blurry details and training instability while the cost of
|
| 57 |
+
|
| 58 |
+
naively increasing the size of the MLP quickly becomes intractable. BungeeNeRF [58] introduced a coarse-to-fine approach that progressively adds more capacity to the network representation. Block-NeRF [47] and Mega-NeRF [52] partition the scene spatially and train separate NeRFs for each partition. To model appearance variation, they incorporate per-image embeddings like NeRF-W [31]. Our approach similarly partitions the scene into sub-NeRFs, making use of depth to improve partition efficiency and scaling over an area 200x larger than Block-NeRF's Alamo Square Dataset. Both of these methods work only on static scenes.
|
| 59 |
+
|
| 60 |
+
Dynamics. Neural 3D Video Synthesis [28] and Spacetime Neural Irradiance Fields [57] add time as an input to handle dynamic scenes. Similar to our work, NSFF [29], NeRFlow [15], and DyNeRF [19] incorporate 2D optical flow input and warping-based regularization losses to enforce plausible transitions between observed frames. Multiple methods [38-40, 50] instead disentangle scenes into a canonical template and per-frame deformation field. BANMo [60] further incorporates deformable shape models and canonical embeddings to train articulated 3D models from multiple videos. These methods focus on single-object scenes, and all but [28] and [60] use single video sequences.
|
| 61 |
+
|
| 62 |
+
While many of the previous works use segmentation data to factorize dynamic from static objects, $\mathrm{D}^2\mathrm{NeRF}$ [56] does this automatically through regularization and explicitly handling shadows. Neural Groundplans [44] uses synthetic data to do this decomposition from a single image. We borrow some of these ideas and scale beyond synthetic and indoor scenes.
|
| 63 |
+
|
| 64 |
+
Object-centric approaches. Several approaches [24,36, 37, 59, 61, 62] represent scenes as the composition of per-object NeRF models and a background model. NSG [37] is most similar to us as it also targets automotive data but cannot handle ego-motion as our approach can. None of these methods target multi-video representations and are fundamentally constrained by the memory required to represent each object, with NSG needing over 1TB of memory to represent a 30 second video in our experience.
|
| 65 |
+
|
| 66 |
+
Semantics. Follow-up works have explored additional semantic outputs in addition to predicting color. Semantic-NeRF [65] adds an extra head to NeRF that predicts extra semantic category logits for any 3D position. Panoptic-NeRF [16] and Panoptic Neural Fields [27] extend this to produce panoptic segmentations and the latter uses a similar bounding-box based object and background decomposition as NSG. NeSF [53] generalizes the notion of a semantic field to unobserved scenes. As these methods are highly reliant on accurate annotations which are difficult to reliably obtain in the wild at our scale, we instead use a similar approach to recent works [26, 51] that distill the outputs of 2D self-supervised feature descriptors into 3D radiance fields to enable semantic understanding without the use of human
|
| 67 |
+
|
| 68 |
+

|
| 69 |
+
(a) Voxel Lookup
|
| 70 |
+
|
| 71 |
+
(b) Indexing
|
| 72 |
+
Figure 2. Model Architecture. (a) For a given input coordinate, we find the surrounding voxels at $L$ resolution levels for both the static and dynamic branches (far-field branch omitted for clarity). (b) We assign indices to their corners by hashing based on position in the static branch and position, time, and video id in the dynamic branch. We look up the feature vectors corresponding to the corners and interpolate according to the relative position of the input coordinate within the voxel. (c) We concatenate the result of each level, along with auxiliary inputs such as viewing direction, and pass the resulting vector into an MLP to obtain per-branch color, density, and feature logits along with scene flow and the shadow ratio. (d) We blend scolor, opacity, and feature logits as the weighted sum of the branches.
|
| 73 |
+

|
| 74 |
+
$(\mathbf{c},\sigma ,\phi ,s_{\mathbf{t} - 1},s_{\mathbf{t} + 1})$
|
| 75 |
+
(d) Output Blending
|
| 76 |
+
|
| 77 |
+
labels and extend them to larger dynamic settings.
|
| 78 |
+
|
| 79 |
+
Fast training. The original NeRF took 1-2 days to train. Plenoxels [43] and DVGO [46] directly optimize a voxel representation instead of an MLP to train in minutes or even seconds. TensoRF [11] stores its representation as the outer product of low-rank tensors, reducing memory usage. Instant-NGP [35] takes this further by encoding features in a multi-resolution hash table, allowing training and rendering to happen in real-time. We use these tables as the base block of our three-branch representation and use our own hashing method to support dynamics across multiple videos.
|
| 80 |
+
|
| 81 |
+
Depth. Depth provides a valuable supervisory signal for learning high-quality geometry. DS-NeRF [14] and Dense Depth Priors [42] incorporate noisy point clouds obtained by structure from motion (SfM) in the loss function during optimization. Urban Radiance Fields [41] supervises with collected LiDAR data. We also use LiDAR but demonstrate results on dynamic environments.
|
| 82 |
+
|
| 83 |
+
# 3. Approach
|
| 84 |
+
|
| 85 |
+
# 3.1. Inputs
|
| 86 |
+
|
| 87 |
+
Our goal is to learn a global representation that facilitates free-viewpoint rendering, semantic decomposition, and 3D scene flow at arbitrary poses and time steps. Our method takes as input ordered RGB images from $N$ videos (taken at different days with diverse weather and lighting conditions) and their associated camera poses. Crucially, we make use of additional data as "free" sources of supervision given contemporary sensor rigs and feature descriptors. Specifically, we use (1) aligned sparse LiDAR depth measurements, (2) 2D self-supervised pixel (DINO [10]) descriptors to enable semantic manipulation, and (3) 2D optical flow predictions to model scene dynamics. All model inputs are generated without any human labeling or intervention.
|
| 88 |
+
|
| 89 |
+
# 3.2. Representation
|
| 90 |
+
|
| 91 |
+
Preliminaries. We build upon NeRF [33], which represents a scene within a continuous volumetric radiance field that captures both geometry and view-dependent appearance. It encodes the scene within the weights of a multilayer perceptron (MLP). At render time, NeRF projects a camera ray $\mathbf{r}$ for each image pixel and samples along the ray, querying the MLP at sample position $\mathbf{x}_i$ and ray viewing direction $\mathbf{d}$ to obtain opacity and color values $\sigma_i$ and $\mathbf{c}_i$ . It then composites a color prediction $\hat{C}(\mathbf{r})$ for the ray using numerical quadrature $\sum_{i=0}^{N-1} T_i (1 - \exp(-\sigma_i \delta_i)) \mathbf{c}_i$ , where $T_i = \exp(-\sum_{j=0}^{i-1} \sigma_j \delta_j)$ and $\delta_i$ is the distance between samples. The training process optimizes the model by sampling batches $R$ of image pixels and minimizing the loss function $\sum_{\mathbf{r} \in \mathcal{R}} \|C(\mathbf{r}) - \hat{C}(\mathbf{r})\|^2$ . NeRF samples rays through a two-stage hierarchical sampling process and uses frequency encoding to capture high-frequency details. We refer the reader to [33] for more details.
|
| 92 |
+
|
| 93 |
+
Scene composition. To model large-scale dynamic environments, SUDS factorizes the scene into three branches: (a) a static branch containing non-moving topography consistent across videos, (b) a dynamic branch to disentangle video-specific objects [19,29,56], moving or otherwise, and (c) a far-field environment map to represent far-away objects and the sky, which we found important to separately model in large-scale urban scenes [41, 52, 63].
|
| 94 |
+
|
| 95 |
+
However, conventional NeRF training with MLPs is computationally prohibitive at our target scales. Inspired by Instant-NGP [35], we implement each branch using multiresolution hash tables of $F$ -dimensional feature vectors followed by a small MLP, along with our own hash functions to index across videos.
|
| 96 |
+
|
| 97 |
+
Hash tables (Fig. 2). For a given input coordinate $(\mathbf{x}, \mathbf{d}, \mathbf{t}, \mathbf{v}\mathbf{id})$ denoting the position $\mathbf{x} \in \mathbb{R}^3$ , viewing direction $\mathbf{d} \in \mathbb{R}^3$ , frame index $F \in \{1, \dots, T\}$ , and video id
|
| 98 |
+
|
| 99 |
+
$\mathbf{vid} \in \{1, \dots, N\}$ , we find the surrounding voxels in each table at $l \in L$ resolution levels, doubling the resolution between levels, which we denote as $\mathbf{v}_{l,s}$ , $\mathbf{v}_{l,d}$ , $\mathbf{v}_{l,e}$ for the static, dynamic, and far-field. The static branch makes use of 3D spatial voxels $\mathbf{v}_{l,s}$ , while the dynamic branch makes use of 4D spacetime voxels $\mathbf{v}_{l,d}$ . Finally, the far-field branch makes use of 3D voxels $\mathbf{v}_{l,e}$ (implemented via normalized 3D direction vectors) that index an environment map. Similar to Instant-NGP [35], rather than storing features at voxel corners, we compute hash indices $\mathbf{i}_{l,s}$ (or $\mathbf{i}_{l,d}$ or $\mathbf{i}_{l,e}$ ) for each corner with the following hash functions:
|
| 100 |
+
|
| 101 |
+
$$
|
| 102 |
+
\mathbf {i} _ {l, s} = \operatorname {s t a t i c} \operatorname {h a s h} \left(\operatorname {s p a c e} \left(\mathbf {v} _ {l, s}\right)\right) \tag {1}
|
| 103 |
+
$$
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
\mathbf {i} _ {l, d} = \text {d y n a m i c} \text {h a s h} \left(\operatorname {s p a c e} \left(\mathbf {v} _ {l, d}\right), \operatorname {t i m e} \left(\mathbf {v} _ {l, d}\right), \mathbf {v i d}\right) \tag {2}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\mathbf {i} _ {l, e} = \operatorname {e n v} \text {h a s h} (d i r (\mathbf {v} _ {l, e}), \mathbf {v i d}) \tag {3}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
We linearly interpolate features up to the nearest voxel vertices (but now relying on quadlinear interpolation for the dynamic 4D branch) and rely on gradient averaging to handle hash collisions. Finally, to model the fact that different videos likely contain distinct moving objects and illumination conditions, we add vid as an auxiliary input to the hash, but do not use it for interpolation (since averaging across distinct movers is unnatural). From this perspective, we leverage hashing to effectively index separate interpolating functions for each video, without a linear growth in memory with the number of videos. We concatenate the result of each level into a feature vector $f \in \mathbb{R}^{LF}$ , along with auxiliary inputs such as viewing direction, and pass the resulting vector into an MLP to obtain per-branch outputs.
|
| 114 |
+
|
| 115 |
+
Static branch. We generate RGB images by combining the outputs of our three branches. The static branch maps the feature vector obtained from the hash table into a view-dependent color $\mathbf{c}_s$ and a view-independent density $\sigma_s$ . To model lighting variations which could be dramatic across videos but smooth within a video, we condition on a latent embedding computed as a product of a video-specific matrix $A_{vid}$ and a fourier-encoded time index $\mathcal{F}(t)$ (as in [60]):
|
| 116 |
+
|
| 117 |
+
$$
|
| 118 |
+
\sigma_ {s} (\mathbf {x}) \in \mathbb {R} \tag {4}
|
| 119 |
+
$$
|
| 120 |
+
|
| 121 |
+
$$
|
| 122 |
+
\mathbf {c} _ {s} (\mathbf {x}, \mathbf {d}, A _ {v i d} \mathcal {F} (t)) \in \mathbb {R} ^ {3}. \tag {5}
|
| 123 |
+
$$
|
| 124 |
+
|
| 125 |
+
Dynamic branch. While the static branch assumes the density $\sigma_{s}$ is static, the dynamic branch allows both the density $\sigma_{d}$ and color $\mathbf{c}_d$ to depend on time (and video). We therefore omit the latent code when computing the dynamic radiance. Because we find shadows to play a crucial role in the appearance of urban scenes (Fig. 3), we explicitly model a shadow field of scalar values $\rho_{d} \in [0,1]$ , used to scale down the static color $\mathbf{c}_{s}$ (as done in [56]):
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\sigma_ {d} (\mathbf {x}, \mathbf {t}, \mathbf {v i d}) \in \mathbb {R} \tag {6}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
$$
|
| 132 |
+
\rho_ {d} (\mathbf {x}, \mathbf {t}, \mathbf {v i d}) \in [ 0, 1 ] \tag {7}
|
| 133 |
+
$$
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
\mathbf {c} _ {d} (\mathbf {x}, \mathbf {t}, \mathbf {v i d}, \mathbf {d}) \in \mathbb {R} ^ {3} \tag {8}
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+

|
| 140 |
+
Full RGB
|
| 141 |
+
|
| 142 |
+

|
| 143 |
+
Depth
|
| 144 |
+
|
| 145 |
+

|
| 146 |
+
|
| 147 |
+

|
| 148 |
+
Shadow Intensity
|
| 149 |
+
|
| 150 |
+

|
| 151 |
+
RGB (Without Shadow)
|
| 152 |
+
Dynamic RGB
|
| 153 |
+
|
| 154 |
+

|
| 155 |
+
Static RGB
|
| 156 |
+
|
| 157 |
+

|
| 158 |
+
(a) Shadow Field
|
| 159 |
+
Full RGB
|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
Depth
|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
Dynamic RGB
|
| 166 |
+
(b) No Shadow Field
|
| 167 |
+
Figure 3. Shadows. We learn an explicit shadow field (a) as a pointwise reduction on static color, enabling better depth reconstruction and static/dynamic factorization than without (b).
|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
Static RGB
|
| 171 |
+
|
| 172 |
+
Far-field branch. Because the sky requires reasoning about far-field radiance and because it can change dramatically across videos, we model far-field radiance with an environment map $\mathbf{c}_e(\mathbf{d},\mathbf{vid})\in \mathbb{R}^3$ that depends on viewing direction d [22,41] and a video id vid.
|
| 173 |
+
|
| 174 |
+
Rendering. We derive a single density and radiance value for any position by computing the weighted sum of the static and dynamic components, combined with the pointwise shadow reduction:
|
| 175 |
+
|
| 176 |
+
$$
|
| 177 |
+
\begin{array}{l} \sigma (\mathbf {x}, \mathbf {t}, \mathbf {v i d}) = \sigma_ {s} (\mathbf {x}) + \sigma_ {d} (\mathbf {x}, \mathbf {t}, \mathbf {v i d}) (9) \\ \mathbf {c} (\mathbf {x}, \mathbf {t}, \mathbf {v i d}, \mathbf {d}) = \frac {\sigma_ {s}}{\sigma} (1 - \rho_ {d}) \mathbf {c} _ {s} (\mathbf {x}, \mathbf {d}, A _ {v i d} \mathcal {F} (t)) \\ + \frac {\sigma_ {d}}{\sigma} \mathbf {c} _ {d} (\mathbf {x}, \mathbf {t}, \mathbf {v i d}, \mathbf {d}) (10) \\ \end{array}
|
| 178 |
+
$$
|
| 179 |
+
|
| 180 |
+
We then calculate the color $\hat{C}$ for a camera ray $\mathbf{r}$ with direction $\mathbf{d}$ at a given frame $\mathbf{t}$ and video vid by accumulating the transmittance along sampled points $\mathbf{r}(t)$ along the ray, forcing the ray to intersect the far-field environment map if it does not hit geometry within the foreground:
|
| 181 |
+
|
| 182 |
+
$$
|
| 183 |
+
\begin{array}{l} \hat {C} (\mathbf {r}, \mathbf {t}, \mathbf {v i d}) = \int_ {0} ^ {+ \infty} T (t) \sigma (\mathbf {r} (t), \mathbf {t}, \mathbf {v i d}) \mathbf {c} (\mathbf {r} (t), \mathbf {t}, \mathbf {v i d}, \mathbf {d}) d t \\ + T (+ \infty) \mathbf {c} _ {e} (\mathbf {d}, \mathbf {v i d}), \tag {11} \\ \end{array}
|
| 184 |
+
$$
|
| 185 |
+
|
| 186 |
+
$$
|
| 187 |
+
\text {w h e r e} T (t) = \exp \left(- \int_ {0} ^ {t} \sigma (\mathbf {r} (s), \mathbf {t}, \mathbf {v i d}) d s\right). \tag {12}
|
| 188 |
+
$$
|
| 189 |
+
|
| 190 |
+
Feature distillation. We build semantic awareness into SUDS to enable the open-world tasks described in Sec. 4.2. Similar to recent work [26, 51], we distill the outputs of a self-supervised 2D feature extractor, namely DINO [10], as a teacher model into our network. For a feature extractor that transforms an image into a dense $\mathbb{R}^{H\times W\times C}$ feature grid, we add a $C$ -dimensional output head to each of our branches:
|
| 191 |
+
|
| 192 |
+
$$
|
| 193 |
+
\Phi_ {s} (\mathbf {x}) \in \mathbb {R} ^ {C} \tag {13}
|
| 194 |
+
$$
|
| 195 |
+
|
| 196 |
+
$$
|
| 197 |
+
\Phi_ {d} (\mathbf {x}, \mathbf {t}, \mathbf {v i d}) \in \mathbb {R} ^ {C} \tag {14}
|
| 198 |
+
$$
|
| 199 |
+
|
| 200 |
+
$$
|
| 201 |
+
\Phi_ {e} (\mathbf {d}, \mathbf {v i d}) \in \mathbb {R} ^ {C}, \tag {15}
|
| 202 |
+
$$
|
| 203 |
+
|
| 204 |
+
which are combined into a single value $\Phi$ at any 3D location and rendered into $\hat{F} (\mathbf{r})$ per camera ray, following the equations for color (10, 11).
|
| 205 |
+
|
| 206 |
+
Scene flow. We train our model to predict 3D scene flow and model scene dynamics. Inspired by previous work [15, 19, 29], we augment our dynamic branch to predict forward and backward 3D scene flow vectors $s_{t' \in [-1,1]}(\mathbf{x},\mathbf{t},\mathbf{v}\mathbf{i}\mathbf{d}) \in \mathbb{R}^3$ . We make use of these vectors to enforce consistency between observed time steps through multiple loss terms (Sec. 3.3), which we find crucial to generating plausible renderings at novel time steps (Table 4).
|
| 207 |
+
|
| 208 |
+
Spatial partitioning. We scale our representation to arbitrarily large environments by decomposing the scene into individually trained models [47, 52], each with its own static, dynamic, and far-field branch. Intuitively, the reconstruction for neighborhood X can be done largely independently of the reconstruction in neighborhood Y, provided one can assign the relevant input data to each reconstruction. To do so, we follow the approach of Mega-NeRF [52] and split the scene into $K$ spatial cells with centroids $k \in \mathbb{R}^3$ . Crucially, we generate separate training datasets for each spatial cell by making use of visibility reasoning [17]. Mega-NeRF includes only those datapoints whose associated camera rays intersect the spatial cell. However, this may still include datapoints that are not visible due to an intervening occluder (e.g., a particular camera in neighborhood X can be pointed at neighborhood Y, but may not see anything there due to occluding buildings). To remedy this, we make use of depth measurements to prune irrelevant pixel rays that do not terminate within the spatial cell of interest (making use of nearest-neighbor interpolation to impute depth for pixels without a LiDAR depth measurement). This further reduces the size of each trainset by $2x$ relative to Mega-NeRF. Finally, given such separate reconstructions, one can still produce a globally consistent rendering by querying the appropriate spatial cell when sampling points along new-view rays (as in [52]).
|
| 209 |
+
|
| 210 |
+
# 3.3. Optimization
|
| 211 |
+
|
| 212 |
+
We jointly optimize all three of our model branches along with the per-video weight matrices $A_{vid}$ by sampling
|
| 213 |
+
|
| 214 |
+

|
| 215 |
+
RGB
|
| 216 |
+
|
| 217 |
+

|
| 218 |
+
Features
|
| 219 |
+
|
| 220 |
+

|
| 221 |
+
Forward Flow (Input)
|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
Backward Flow (Input)
|
| 225 |
+
|
| 226 |
+

|
| 227 |
+
Forward Flow (Predicted)
|
| 228 |
+
Figure 4. Scene Flow. We minimize the photometric and feature-metric loss of warped renderings relative to ground truth inputs (top). We use 2D optical flow from off-the-shelf estimators or sparse correspondences computed directly from 2D DINO features [5] (middle) to supervise our flow predictions (bottom).
|
| 229 |
+
|
| 230 |
+

|
| 231 |
+
Backward Flow (Predicted)
|
| 232 |
+
|
| 233 |
+
random batches of rays across our $N$ input videos and minimizing the following loss:
|
| 234 |
+
|
| 235 |
+
$$
|
| 236 |
+
\mathcal {L} = \underbrace {\left(\mathcal {L} _ {c} + \lambda_ {f} \mathcal {L} _ {f} + \lambda_ {d} \mathcal {L} _ {d} + \lambda_ {o} \mathcal {L} _ {o}\right)} _ {\text {r e c o n s t r u c t i o n l o s s e s}} + \underbrace {\left(\mathcal {L} _ {c} ^ {w} + \lambda_ {f} \mathcal {L} _ {f} ^ {w}\right)} _ {\text {w a r p i n g l o s s e s}}
|
| 237 |
+
$$
|
| 238 |
+
|
| 239 |
+
$$
|
| 240 |
+
\lambda_ {f l o} \underbrace {\left(\mathcal {L} _ {c y c} + \mathcal {L} _ {s m} + \mathcal {L} _ {s l o}\right)} _ {\text {f l o w l o s s e s}} + \underbrace {\left(\lambda_ {e} \mathcal {L} _ {e} + \lambda_ {d} \mathcal {L} _ {d}\right)} _ {\text {s t a t i c - d y n a m i c f a c t o r i z a t i o n}} + \lambda_ {\rho} \mathcal {L} _ {\rho} \tag {16}
|
| 241 |
+
$$
|
| 242 |
+
|
| 243 |
+
Reconstruction losses. We minimize the L2 photometric loss $\mathcal{L}_c(\mathbf{r}) = \left\| C(\mathbf{r}) - \hat{C} (\mathbf{r})\right\| ^2$ as in the original NeRF equation [33]. We similarly minimize the L1 difference $\mathcal{L}_f(\mathbf{r}) = \left\| F(\mathbf{r}) - \hat{F} (\mathbf{r})\right\| _1$ between the feature outputs of the teacher model and that of our network.
|
| 244 |
+
|
| 245 |
+
To make use of our depth measurements, we project the LiDAR sweeps onto the camera plane and compare the expected depth $\hat{D}(r)$ with the measurement $D(\mathbf{r})$ [14, 41]:
|
| 246 |
+
|
| 247 |
+
$$
|
| 248 |
+
\mathcal {L} _ {d} (\mathbf {r}) = \left\| D (\mathbf {r}) - \hat {D} (\mathbf {r}) \right\| ^ {2} \tag {17}
|
| 249 |
+
$$
|
| 250 |
+
|
| 251 |
+
$$
|
| 252 |
+
\text {w h e r e} \hat {D} (\mathbf {r}) = \int_ {0} ^ {+ \infty} T (s) \sigma (\mathbf {r} (s)) d s \tag {18}
|
| 253 |
+
$$
|
| 254 |
+
|
| 255 |
+
Flow. We supervise our 3D scene flow predictions based on 2D optical flow (Sec. 4.1). We generate a 2D displacement vector for each camera ray by first predicting its position in 3D space as the weighted sum of the scene flow neighbors along the ray:
|
| 256 |
+
|
| 257 |
+
$$
|
| 258 |
+
\hat {X} _ {t ^ {\prime}} (\mathbf {r}) = \int_ {0} ^ {+ \infty} T (t) \sigma (r (t)) (r (t) + s _ {t ^ {\prime}} (\mathbf {r} (t))) d t \tag {19}
|
| 259 |
+
$$
|
| 260 |
+
|
| 261 |
+
which we then "render" into 2D using the camera matrix of the neighboring frame index. We minimize its distance from the observed optical flow via $\mathcal{L}_o(\mathbf{r}) =$
|
| 262 |
+
|
| 263 |
+
$\sum_{t' \in [-1,1]} \left\| X(\mathbf{o}) - \hat{X}_{t'}(\mathbf{r}) \right\|_1$ . We anneal $\lambda_o$ over time as these estimates are noisy.
|
| 264 |
+
|
| 265 |
+
3D warping. The above loss ensures that rendered 3D flow will be consistent with the observed 2D flow. We also found it useful to enforce 3D color (and feature) constancy; i.e., colors remain constant even when moving. To do so, we use the predicted forward and backward 3D flow $s_{\mathbf{t} + 1}$ and $s_{\mathbf{t} - 1}$ to advect each sample along the ray into the next/previous frame:
|
| 266 |
+
|
| 267 |
+
$$
|
| 268 |
+
\sigma_ {t ^ {\prime}} ^ {w} \left(\mathbf {x} + s _ {t ^ {\prime}}, \mathbf {t} + t ^ {\prime}, \mathbf {v i d}\right) \in \mathbb {R} \tag {20}
|
| 269 |
+
$$
|
| 270 |
+
|
| 271 |
+
$$
|
| 272 |
+
\mathbf {c} _ {t ^ {\prime}} ^ {w} \left(\mathbf {x} + s _ {t ^ {\prime}}, \mathbf {t} + t ^ {\prime}, \mathbf {v i d}, \mathbf {d}\right) \in \mathbb {R} ^ {3} \tag {21}
|
| 273 |
+
$$
|
| 274 |
+
|
| 275 |
+
$$
|
| 276 |
+
\Phi_ {t ^ {\prime}} ^ {w} \left(\mathbf {x} + s _ {t ^ {\prime}}, \mathbf {t} + t ^ {\prime}, \mathbf {v i d}\right) \in \mathbb {R} ^ {C} \tag {22}
|
| 277 |
+
$$
|
| 278 |
+
|
| 279 |
+
The warped radiance $\mathbf{c}^w$ and density $\sigma^w$ are rendered into warped color $\hat{C}^w (\mathbf{r})$ and feature $\hat{F}^{w}(\mathbf{r})$ (10, 11). We add a loss to ensure that the warped color (and feature) match the ground-truth input for the current frame, similar to [19, 29]. As in NSFF [29], we found it important to downweight this loss in ambiguous regions that may contain occlusions. However, instead of learning explicit occlusion weights, we take inspiration from Kwea's method [1] and use the difference between the dynamic geometry and the warped dynamic geometry to downweight the loss:
|
| 280 |
+
|
| 281 |
+
$$
|
| 282 |
+
w _ {t ^ {\prime}} (\mathbf {x}, \mathbf {t}, \mathbf {v i d}) = \left| \frac {\sigma_ {d}}{\sigma} - \frac {\sigma_ {t ^ {\prime}} ^ {w}}{\sigma} \right| \tag {23}
|
| 283 |
+
$$
|
| 284 |
+
|
| 285 |
+
$$
|
| 286 |
+
\hat {W} _ {t ^ {\prime}} (\mathbf {r}) = \int_ {0} ^ {+ \infty} T (t) \sigma (r (t)) w _ {t ^ {\prime}} (r (t)) d t \tag {24}
|
| 287 |
+
$$
|
| 288 |
+
|
| 289 |
+
resulting in the following warping loss terms:
|
| 290 |
+
|
| 291 |
+
$$
|
| 292 |
+
\mathcal {L} _ {c} ^ {w} (\mathbf {r}) = \sum_ {t ^ {\prime} \in [ - 1, 1 ]} \left(1 - W _ {t ^ {\prime}}\right) (\mathbf {r})) \| C (\mathbf {r}) - \hat {C} _ {t ^ {\prime}} ^ {w} (\mathbf {r}) \| ^ {2} \tag {25}
|
| 293 |
+
$$
|
| 294 |
+
|
| 295 |
+
$$
|
| 296 |
+
\mathcal {L} _ {f} ^ {w} (\mathbf {r}) = \sum_ {t ^ {\prime} \in [ - 1, 1 ]} \left(1 - W _ {t ^ {\prime}}\right) (\mathbf {r}) \| F (\mathbf {r}) - \hat {F} _ {t ^ {\prime}} ^ {w} (\mathbf {r}) \| _ {1} \tag {26}
|
| 297 |
+
$$
|
| 298 |
+
|
| 299 |
+
Flow regularization. As in prior work [19, 29] we use a 3D scene flow cycle term to encourage consistency between forward and backward scene flow predictions, downweighting the loss in areas ambiguous due to occlusions:
|
| 300 |
+
|
| 301 |
+
$$
|
| 302 |
+
\mathcal {L} _ {\text {c y c}} (\mathbf {r}) = \sum_ {t ^ {\prime} \in [ - 1, 1 ]} \sum_ {\mathbf {x}} w _ {t ^ {\prime}} (\mathbf {x}, \mathbf {t}) \| s _ {t ^ {\prime}} (\mathbf {x}, \mathbf {t}) + s _ {\mathbf {t}} (\mathbf {x} + s _ {t ^ {\prime}}, \mathbf {t} - t ^ {\prime}) \| _ {1}, \tag {27}
|
| 303 |
+
$$
|
| 304 |
+
|
| 305 |
+
with $\mathbf{vd}$ omitted for brevity. We also encourage spatial and temporal smoothness through $\mathcal{L}_{sm}(\mathbf{r})$ as described in Sec. C. We finally regularize the magnitude of predicted scene flow vectors to encourage the scene to be static through $\mathcal{L}_{slo}(\mathbf{r}) = \sum_{t' \in [\mathbf{t} - 1, \mathbf{t} + 1]} \sum_{\mathbf{x}} \left\| s_{t'}(\mathbf{x}, \mathbf{t}) \right\|_1$ .
|
| 306 |
+
|
| 307 |
+
Static-dynamic factorization. As physically plausible solutions should have any point in space occupied by either a static or dynamic object, we encourage the spatial ratio of static vs dynamic density to either be 0 or 1 through a
|
| 308 |
+
|
| 309 |
+
skewed binary entropy loss that favors static explanations of the scene [56]:
|
| 310 |
+
|
| 311 |
+
$$
|
| 312 |
+
\mathcal {L} _ {e} (\mathbf {r}) = \int_ {0} ^ {+ \infty} H \left(\frac {\sigma_ {d} (\mathbf {r} (t))}{\sigma_ {s} (\mathbf {r} (t)) + \sigma_ {d} (\mathbf {r} (t))} ^ {k}\right) d t \tag {28}
|
| 313 |
+
$$
|
| 314 |
+
|
| 315 |
+
where $H(x) = -(x\cdot \log (x) + (1 - x)\cdot \log (1 - x))$
|
| 316 |
+
|
| 317 |
+
and with $k$ set to 1.75, and further penalize the maximum dynamic ratio $\mathcal{L}_d(\mathbf{r}) = \max (\frac{\sigma_d(\mathbf{r}(t))}{\sigma_e + \sigma_d})$ along each ray.
|
| 318 |
+
|
| 319 |
+
**Shadow loss.** We penalize the squared magnitude of the shadow ratio $\mathcal{L}_{\rho}(\mathbf{r}) = \int_0^{+\infty}\rho_d(\mathbf{r}(t))^2 dt$ along each ray to prevent it from over-explaining dark regions [56].
|
| 320 |
+
|
| 321 |
+
# 4. Experiments
|
| 322 |
+
|
| 323 |
+
We demonstrate SUDS's city-scale reconstruction capabilities by presenting quantitative results against baseline methods (Table 1). We also show initial qualitative results for a variety of downstream tasks (Sec. 4.2). Even though we focus on reconstructing dynamic scenes at city scale, to facilitate comparisons with prior work, we also show results on small-scale but highly-benchmarked datasets such as KITTI and Virtual KITTI 2 (Sec. 4.3). We evaluate the various components of our method in Sec. 4.4.
|
| 324 |
+
|
| 325 |
+
# 4.1. Experimental Setup
|
| 326 |
+
|
| 327 |
+
2D feature extraction. We use Amir et al's feature extractor implementation [5] based on the dino_vits8 model. We downsample our images to fit into GPU memory and then upsample with nearest neighbor interpolation. We L2-normalize the features at the 11th layer of the model and reduce the dimensionality to 64 through incremental PCA [3].
|
| 328 |
+
|
| 329 |
+
Flow supervision. We explored using an estimator trained on synthetic data [49] in addition to directly computing 2D correspondences from DINO itself [5]. Although the correspondences are sparse (less than $5\%$ of pixels) and expensive to compute, we found its estimates more robust and use it for our experiments unless otherwise stated.
|
| 330 |
+
|
| 331 |
+
Training. We train SUDS for 250,000 iterations with 4098 rays per batch and use a proposal sampling strategy similar to Mip-NeRF 360 [7] (Sec. B). We use Adam [25] with a learning rate of $5 \times 10^{-3}$ decaying to $5 \times 10^{-4}$ .
|
| 332 |
+
|
| 333 |
+
Metrics. We report quantitative results based on PSNR, SSIM [54], and the AlexNet implementation of LPIPS [64].
|
| 334 |
+
|
| 335 |
+
# 4.2. City-Scale Reconstruction
|
| 336 |
+
|
| 337 |
+
City-1M dataset. We evaluate SUDS's large-scale reconstruction abilities on our collection of 1.28 million images across 1700 videos gathered across a $105~km^2$ urban area using a vehicle-mounted platform with seven ring cameras and two LiDAR sensors. Due to the scale, we supervise optical flow with an off-the-shelf estimator trained on synthetic data [49] instead of DINO for efficiency.
|
| 338 |
+
|
| 339 |
+

|
| 340 |
+
|
| 341 |
+

|
| 342 |
+
|
| 343 |
+

|
| 344 |
+
|
| 345 |
+

|
| 346 |
+
|
| 347 |
+

|
| 348 |
+
|
| 349 |
+

|
| 350 |
+
|
| 351 |
+

|
| 352 |
+
RGB
|
| 353 |
+
|
| 354 |
+

|
| 355 |
+
Static
|
| 356 |
+
|
| 357 |
+

|
| 358 |
+
Dynamic
|
| 359 |
+
Figure 5. City-1M. We demonstrate SUDS's capabilities on multiple downstream tasks, including instance segmentation and 3D bounding box estimation without any labeled data (by just making use of geometric clustering). In the last column, we show category-level semantic classification by matching 3D (DINO) descriptors to a held-out video annotated with semantic labels. Please see text for more details.
|
| 360 |
+
|
| 361 |
+

|
| 362 |
+
|
| 363 |
+

|
| 364 |
+
|
| 365 |
+

|
| 366 |
+
Instances
|
| 367 |
+
|
| 368 |
+

|
| 369 |
+
|
| 370 |
+

|
| 371 |
+
|
| 372 |
+

|
| 373 |
+
Bounding Boxes
|
| 374 |
+
|
| 375 |
+

|
| 376 |
+
|
| 377 |
+

|
| 378 |
+
|
| 379 |
+

|
| 380 |
+
Categories
|
| 381 |
+
|
| 382 |
+
<table><tr><td></td><td>Mega-NeRF [52]</td><td>Mega-NeRF-T</td><td>Mega-NeRF-A</td><td>SUDS</td></tr><tr><td>PSNR ↑</td><td>16.42</td><td>16.46</td><td>16.70</td><td>21.67</td></tr><tr><td>SSIM ↑</td><td>0.493</td><td>0.493</td><td>0.493</td><td>0.562</td></tr><tr><td>LPIPS ↓</td><td>0.879</td><td>0.877</td><td>0.850</td><td>0.554</td></tr></table>
|
| 383 |
+
|
| 384 |
+
Table 1. City-scale view synthesis on City-1M. SUDS outperforms all baselines by a wide margin.
|
| 385 |
+
|
| 386 |
+
Baselines. We compare SUDS to the official Mega-NeRF [52] implementation alongside two variants: Mega-NeRF-T which directly adds time as an input parameter to compute density and radiance, and Mega-NeRF-A which instead uses the latent embedding $A_{\text{vid}} \mathcal{F}(t)$ used by SUDS.
|
| 387 |
+
|
| 388 |
+
Results. We train both SUDS and the baselines using 48 cells and summarize our results in Table 1. SUDS outperforms all Mega-NeRF variants by a large margin. We provide qualitative results on view synthesis, static/dynamic factorization, unsupervised 3D instance segmentation and unsupervised 3D cuboid detection in Fig. 5. We present additional qualitative tracking results in Fig. 7.
|
| 389 |
+
|
| 390 |
+
Instance segmentation. We derive the instance count as in prior work [44] by sampling dynamic density values $\sigma_{d}$ , projecting those above a given threshold onto a discretized ground plane before applying connected component labeling. We apply k-means to obtain 3D centroids and volume render instance predictions as for semantic segmentation.
|
| 391 |
+
|
| 392 |
+
3D cuboid detection. After computing point-wise instance assignments in 3D, we derive oriented bounding boxes based on the PCA of the convex hull of points belonging to each instance [2].
|
| 393 |
+
|
| 394 |
+
Semantic segmentation. Note the above tasks of instance segmentation and 3D cuboid detection do not require any additional labels as they make use of geometric clustering. We now show that the representation learned by SUDS can also enable downstream semantic tasks, by making use
|
| 395 |
+
|
| 396 |
+
of a small number of 2D segmentation labels provided on a held-out video sequence. We compute the average 2D DINO descriptor for each semantic class from the held out frames and derive 3D semantic labels for all reconstructions by matching each 3D descriptor to the closest class centroid. This allows to produce 3D semantic label fields that can then be rendered in 2D as shown in Fig. 5.
|
| 397 |
+
|
| 398 |
+
# 4.3. KITTI Benchmarks
|
| 399 |
+
|
| 400 |
+
Baselines. We compare SUDS to SRN [45], the original NeRF implementation [33], a variant of NeRF taking time as an additional input, NSG [37], and PNF [27]. Both NSG and PNF are trained and evaluated using ground truth object bounding box and category-level annotations.
|
| 401 |
+
|
| 402 |
+
Image reconstruction. We compare SUDS's reconstruction capabilities using the same KITTI [21] subsequences and experimental setup as prior work [27, 37]. We present results in Table 3. As PNF's implementation is not publicly available, we rely on their reported numbers. SUDS surpasses the state-of-the-art in PSNR and SSIM.
|
| 403 |
+
|
| 404 |
+
Novel view synthesis. We demonstrate SUDS's capabilities to generate plausible renderings at time steps unseen during training. As NSG does not handle scenes with ego-motion, we use subsequences of KITTI and Virtual KITTI 2 [18] with little camera movement. We evaluate the methods using different train/test splits, holding out every 4th time step, every other time step, and finally training with only one in every four time steps. We summarize our findings in Table 2 along with qualitative results in Fig. 6. SUDS achieves the best results across all splits and metrics. Both NeRF variants fail to properly represent the scene, especially in dynamic areas. Although we provide NSG with the ground truth object poses at render time, it fails to learn a clean decomposition between objects and the background,
|
| 405 |
+
|
| 406 |
+

|
| 407 |
+
Figure 6. KITTI and VKITTI2 view synthesis. Prior work fails to represent the scene and NSG [37] renders ghosting artifacts near areas of movement. Our method forecasts plausible trajectories and generates higher-quality renderings.
|
| 408 |
+
|
| 409 |
+

|
| 410 |
+
|
| 411 |
+

|
| 412 |
+
|
| 413 |
+

|
| 414 |
+
|
| 415 |
+

|
| 416 |
+
|
| 417 |
+
<table><tr><td rowspan="2"></td><td colspan="3">KITTI - 75%</td><td colspan="3">KITTI - 50%</td><td colspan="3">KITTI - 25%</td></tr><tr><td>↑PSNR</td><td>↑SSIM</td><td>↓LPIPS</td><td>↑PSNR</td><td>↑SSIM</td><td>↓LPIPS</td><td>↑PSNR</td><td>↑SSIM</td><td>↓LPIPS</td></tr><tr><td>NeRF [33]</td><td>18.56</td><td>0.557</td><td>0.554</td><td>19.12</td><td>0.587</td><td>0.497</td><td>18.61</td><td>0.570</td><td>0.510</td></tr><tr><td>NeRF + Time</td><td>21.01</td><td>0.612</td><td>0.492</td><td>21.34</td><td>0.635</td><td>0.448</td><td>19.55</td><td>0.586</td><td>0.505</td></tr><tr><td>NSG [37]</td><td>21.53</td><td>0.673</td><td>0.254</td><td>21.26</td><td>0.659</td><td>0.266</td><td>20.00</td><td>0.632</td><td>0.281</td></tr><tr><td>SUDS</td><td>22.77</td><td>0.797</td><td>0.171</td><td>23.12</td><td>0.821</td><td>0.135</td><td>20.76</td><td>0.747</td><td>0.198</td></tr><tr><td rowspan="2"></td><td colspan="3">VKITTI2 - 75%</td><td colspan="3">VKITTI2 - 50%</td><td colspan="3">VKITTI2 - 25%</td></tr><tr><td>↑PSNR</td><td>↑SSIM</td><td>↓LPIPS</td><td>↑PSNR</td><td>↑SSIM</td><td>↓LPIPS</td><td>↑PSNR</td><td>↑SSIM</td><td>↓LPIPS</td></tr><tr><td>NeRF [33]</td><td>18.67</td><td>0.548</td><td>0.634</td><td>18.58</td><td>0.544</td><td>0.635</td><td>18.17</td><td>0.537</td><td>0.644</td></tr><tr><td>NeRF + Time</td><td>19.03</td><td>0.574</td><td>0.587</td><td>18.90</td><td>0.565</td><td>0.610</td><td>18.04</td><td>0.545</td><td>0.626</td></tr><tr><td>NSG [37]</td><td>23.41</td><td>0.689</td><td>0.317</td><td>23.23</td><td>0.679</td><td>0.325</td><td>21.29</td><td>0.666</td><td>0.317</td></tr><tr><td>SUDS</td><td>23.87</td><td>0.846</td><td>0.150</td><td>23.78</td><td>0.851</td><td>0.142</td><td>22.18</td><td>0.829</td><td>0.160</td></tr></table>
|
| 418 |
+
|
| 419 |
+
Table 2. Novel View Synthesis. As the fraction of training views decreases, accuracy drops for all methods. However, SUDS consistently outperforms prior work, presumably due to more accurate representations learned by our diverse input signals (such as depth and flow).
|
| 420 |
+
|
| 421 |
+
<table><tr><td></td><td>SRN [45]</td><td>NeRF [33]</td><td>NeRF + Time</td><td>NSG [37]</td><td>PNF [27]</td><td>Ours</td></tr><tr><td>PSNR ↑</td><td>18.83</td><td>23.34</td><td>24.18</td><td>26.66</td><td>27.48</td><td>28.31</td></tr><tr><td>SSIM ↑</td><td>0.590</td><td>0.662</td><td>0.677</td><td>0.806</td><td>0.870</td><td>0.876</td></tr></table>
|
| 422 |
+
|
| 423 |
+
Table 3. KITTI image reconstruction. We outperform past work on image reconstruction accuracy, following their experimental protocol and self-reported accuracies [27, 37].
|
| 424 |
+
|
| 425 |
+
<table><tr><td></td><td>↑PSNR</td><td>↑SSIM</td><td>↓LPIPS</td></tr><tr><td>w/o Depth loss</td><td>22.74</td><td>0.715</td><td>0.292</td></tr><tr><td>w/o Optical flow loss</td><td>22.18</td><td>0.708</td><td>0.302</td></tr><tr><td>w/o Warping loss</td><td>17.53</td><td>0.622</td><td>0.478</td></tr><tr><td>w/o Appearance embedding</td><td>22.54</td><td>0.704</td><td>0.296</td></tr><tr><td>w/o Occlusion weights</td><td>22.56</td><td>0.711</td><td>0.297</td></tr><tr><td>w/o Separate branches</td><td>19.73</td><td>0.570</td><td>0.475</td></tr><tr><td>Full Method</td><td>22.95</td><td>0.718</td><td>0.289</td></tr></table>
|
| 426 |
+
|
| 427 |
+
Table 4. Diagnostics. Flow-based warping is the single-most important input, while depth is the least crucial input.
|
| 428 |
+
|
| 429 |
+
especially as the number of training view decreases, and generates ghosting artifacts near areas of movement.
|
| 430 |
+
|
| 431 |
+
# 4.4. Diagnostics
|
| 432 |
+
|
| 433 |
+
We ablate the importance of major SUDS components by removing their respective loss terms along with occlusion weights, the latent embedding $A_{\text{vid}} \mathcal{F}(t)$ used to compute
|
| 434 |
+
|
| 435 |
+
static color $\mathbf{c}_s$ , and separate model branches (Sec. D). We run all approaches for 125,000 iterations across our datasets and summarize the results in Table 4. Although all components help performance, flow-based warping is by far the single most important input. Interestingly, depth is the least crucial input, suggesting that SUDS can generalize to settings where depth measurements are not available.
|
| 436 |
+
|
| 437 |
+
# 5. Conclusion
|
| 438 |
+
|
| 439 |
+
We present a modular approach towards building dynamic neural representations at previously unexplored scale. Our multi-branch hash table structure enables us to disentangle and efficiently encode static geometry and transient objects across thousands of videos. SUDS makes use of unlabeled inputs to learn semantic awareness and scene flow, allowing it to perform several downstream tasks while surpassing state-of-the-art methods that rely on human labeling. Although we present a first attempt at building city-scale dynamic environments, many open challenges remain ahead of building truly photorealistic representations.
|
| 440 |
+
|
| 441 |
+
# Acknowledgments
|
| 442 |
+
|
| 443 |
+
This research was supported by the CMU Argo AI Center for Autonomous Vehicle Research.
|
| 444 |
+
|
| 445 |
+
# References
|
| 446 |
+
|
| 447 |
+
[1] Kwea123's nsff implementation. https://github.com/kwea123/nsff_pl. Accessed: 2022-10-29.6
|
| 448 |
+
[2] Open3d oriented bounding box implementation. http://www.open3d.org/docs/latest/python_api/open3dgeometry.OrientedBoundingBox.html #open3d geometry.OrientedBoundingBox.create_from_axisAlignedBounding_box.Accessed: 2022-11-06.7
|
| 449 |
+
[3] Scikit incremental pca. https://scikit-learn.org/stable/modules/generated/sklearn.decomposition.IncrementalPCA.html. Accessed:2022-10-29.6
|
| 450 |
+
[4] Sameer Agarwal, Yasutaka Furukawa, Noah Snavely, Ian Simon, Brian Curless, Steven M Seitz, and Richard Szeliski. Building rome in a day. Communications of the ACM, 54(10):105-112, 2011. 1
|
| 451 |
+
[5] Shir Amir, Yossi Gandelsman, Shai Bagon, and Tali Dekel. Deep vit features as dense visual descriptors. arXiv preprint arXiv:2112.05814, 2021. 5, 6, 13
|
| 452 |
+
[6] Dragomir Anguelov, Carole Dulong, Daniel Filip, Christian Frueh, Stephane Lafon, Richard Lyon, Abhijit Ogale, Luc Vincent, and Josh Weaver. Google street view: Capturing the world at street level. Computer, 43(6):32-38, 2010. 1
|
| 453 |
+
[7] Jonathan T. Barron, Ben Mildenhall, Dor Verbin, Pratul P. Srinivasan, and Peter Hedman. Mip-nerf 360: Unbounded anti-aliased neural radiance fields. In CVPR, 2022. 2, 6, 12
|
| 454 |
+
[8] G. Bradski. The OpenCV Library. Dr. Dobb's Journal of Software Tools, 2000. 13
|
| 455 |
+
[9] Lars Buitinck, Gilles Louppe, Mathieu Blondel, Fabian Pedregosa, Andreas Mueller, Olivier Grisel, Vlad Niculae, Peter Prettenhofer, Alexandre Gramfort, Jaques Grobler, Robert Layton, Jake VanderPlas, Arnaud Joly, Brian Holt, and Gael Varoquaux. API design for machine learning software: experiences from the scikit-learn project. In ECML PKDD Workshop: Languages for Data Mining and Machine Learning, pages 108–122, 2013. 13
|
| 456 |
+
[10] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. 2021. 3, 5
|
| 457 |
+
[11] Anpei Chen, Zexiang Xu, Andreas Geiger, Jingyi Yu, and Hao Su. Tensorf: Tensorial radiance fields. In ECCV, 2022. 3
|
| 458 |
+
[12] Shin-Fang Chng, Sameera Ramasinghe, Jamie Sherrah, and Simon Lucey. Gaussian activated neural radiance fields for high fidelity reconstruction and pose estimation. In ECCV, page 264-280, Berlin, Heidelberg, 2022. Springer-Verlag. 13
|
| 459 |
+
[13] Tali Dekel, Shaul Oron, Michael Rubinstein, Shai Avidan, and William T. Freeman. Best-buddies similarity for robust template matching. In CVPR, pages 2021-2029, 2015. 12
|
| 460 |
+
[14] Kangle Deng, Andrew Liu, Jun-Yan Zhu, and Deva Ramanan. Depth-supervised NeRF: Fewer views and faster training for free. In CVPR, June 2022. 3, 5
|
| 461 |
+
|
| 462 |
+
[15] Yilun Du, Yinan Zhang, Hong-Xing Yu, Joshua B. Tenenbaum, and Jiajun Wu. Neural radiance flow for 4d view synthesis and video processing. In ICCV, 2021. 2, 5
|
| 463 |
+
[16] Xiao Fu, Shangzhan Zhang, Tianrun Chen, Yichong Lu, Lanyun Zhu, Xiaowei Zhou, Andreas Geiger, and Yiyi Liao. Panoptic nerf: 3d-to-2d label transfer for panoptic urban scene segmentation. In International Conference on 3D Vision (3DV), 2022. 2
|
| 464 |
+
[17] Thomas A Funkhouser, Carlo H Sequin, and Seth J Teller. Management of large amounts of data in interactive building walkthroughs. In Proceedings of the 1992 symposium on Interactive 3D graphics, pages 11-20, 1992. 5
|
| 465 |
+
[18] Adrien Gaidon, Qiao Wang, Yohann Cabon, and Eleonora Vig. Virtual worlds as proxy for multi-object tracking analysis. In CVPR, pages 4340-4349, 2016. 7, 13
|
| 466 |
+
[19] Chen Gao, Ayush Saraf, Johannes Kopf, and Jia-Bin Huang. Dynamic view synthesis from dynamic monocular video. In ICCV, 2021. 2, 3, 5, 6
|
| 467 |
+
[20] Hang Gao, Ruilong Li, Shubham Tulsiani, Bryan Russell, and Angjoo Kanazawa. Monocular dynamic view synthesis: A reality check. In NeurIPS, 2022. 2
|
| 468 |
+
[21] Andreas Geiger, Philip Lenz, and Raquel Urtasun. Are we ready for autonomous driving? the kitti vision benchmark suite. In CVPR, 2012. 7, 13
|
| 469 |
+
[22] Zekun Hao, Arun Mallya, Serge Belongie, and Ming-Yu Liu. GANcraft: Unsupervised 3D Neural Rendering of Minecraft Worlds. In ICCV, 2021. 4
|
| 470 |
+
[23] Yoonwoo Jeong, Seokjun Ahn, Christopher Choy, Anima Anandkumar, Minsu Cho, and Jaesik Park. Self-calibrating neural radiance fields. In ICCV, pages 5846-5854, October 2021. 13
|
| 471 |
+
[24] Zhang Jiakai, Liu Xinhang, Ye Xinyi, Zhao Fuqiang, Zhang Yanshun, Wu Minye, Zhang Yingliang, Xu Lan, and Yu Jingyi. Editable free-viewpoint video using a layered neural representation. In ACM SIGGRAPH, 2021. 2
|
| 472 |
+
[25] DP Kingma, J Ba, Y Bengio, and Y LeCun. Adam: A method for stochastic optimization. In 3rd International Conference on Learning Representations, 2015. 6
|
| 473 |
+
[26] Sosuke Kobayashi, Eiichi Matsumoto, and Vincent Sitzmann. Decomposing nerf for editing via feature field distillation. In Advances in Neural Information Processing Systems, volume 35, 2022. 2, 5
|
| 474 |
+
[27] Abhijit Kundu, Kyle Genova, Xiaoqi Yin, Alireza Fathi, Caroline Pantofaru, Leonidas Guibas, Andrea Tagliasacchi, Frank Dellaert, and Thomas Funkhouser. Panoptic Neural Fields: A Semantic Object-Aware Neural Scene Representation. In CVPR, 2022. 2, 7, 8
|
| 475 |
+
[28] Tianye Li, Mira Slavcheva, Michael Zollhoefer, Simon Green, Christoph Lassner, Changil Kim, Tanner Schmidt, Steven Lovegrove, Michael Goesele, and Zhaoyang Lv. Neural 3d video synthesis. In CVPR, 2022. 2
|
| 476 |
+
[29] Zhengqi Li, Simon Niklaus, Noah Snavely, and Oliver Wang. Neural scene flow fields for space-time view synthesis of dynamic scenes. In CVPR, 2021. 2, 3, 5, 6, 12
|
| 477 |
+
[30] Chen-Hsuan Lin, Wei-Chiu Ma, Antonio Torralba, and Simon Lucey. Barf: Bundle-adjusting neural radiance fields. In IEEE International Conference on Computer Vision (ICCV), 2021. 13
|
| 478 |
+
|
| 479 |
+
[31] Ricardo Martin-Brualla, Noha Radwan, Mehdi S. M. Sajjadi, Jonathan T. Barron, Alexey Dosovitskiy, and Daniel Duckworth. NeRF in the Wild: Neural Radiance Fields for Unconstrained Photo Collections. In CVPR, 2021. 2
|
| 480 |
+
[32] Quan Meng, Anpei Chen, Haimin Luo, Minye Wu, Hao Su, Lan Xu, Xuming He, and Jingyi Yu. GNeRF: GAN-based Neural Radiance Field without Posed Camera. In ICCV, 2021. 13
|
| 481 |
+
[33] Ben Mildenhall, Pratul P. Srinivasan, Matthew Tancik, Jonathan T. Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. In ECCV, 2020. 1, 2, 3, 5, 7, 8
|
| 482 |
+
[34] Thomas Müller. tiny-cuda-nn, 4 2021. 13
|
| 483 |
+
[35] Thomas Müller, Alex Evans, Christoph Schied, and Alexander Keller. Instant neural graphics primitives with a multiresolution hash encoding. ACM Trans. Graph., 41(4):102:1-102:15, July 2022. 3, 4
|
| 484 |
+
[36] Michael Niemeyer and Andreas Geiger. Giraffe: Representing scenes as compositional generative neural feature fields. In CVPR, 2021. 2
|
| 485 |
+
[37] Julian Ost, Fahim Mannan, Nils Thuerey, Julian Knodt, and Felix Heide. Neural scene graphs for dynamic scenes. In CVPR, pages 2856-2865, June 2021. 2, 7, 8
|
| 486 |
+
[38] Keunhong Park, Utkarsh Sinha, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Steven M. Seitz, and Ricardo Martin-Brualla. Nerfies: Deformable neural radiance fields. In ICCV, 2021. 2
|
| 487 |
+
[39] Keunhong Park, Utkarsh Sinha, Peter Hedman, Jonathan T. Barron, Sofien Bouaziz, Dan B Goldman, Ricardo Martin-Brualla, and Steven M. Seitz. Hypernerf: A higher-dimensional representation for topologically varying neural radiance fields. ACM Trans. Graph., 40(6), dec 2021. 2
|
| 488 |
+
[40] Albert Pumarola, Enric Corona, Gerard Pons-Moll, and Francesc Moreno-Noguer. D-NeRF: Neural Radiance Fields for Dynamic Scenes. In CVPR, 2020. 2
|
| 489 |
+
[41] Konstantinos Rematas, Andrew Liu, Pratul P. Srinivasan, Jonathan T. Barron, Andrea Tagliasacchi, Tom Funkhouser, and Vittorio Ferrari. Urban radiance fields. CVPR, 2022. 3, 4, 5
|
| 490 |
+
[42] Barbara Roessle, Jonathan T. Barron, Ben Mildenhall, Pratul P. Srinivasan, and Matthias Nießner. Dense depth priors for neural radiance fields from sparse input views. In CVPR, June 2022. 3
|
| 491 |
+
[43] Sara Fridovich-Keil and Alex Yu, Matthew Tancik, Qinhong Chen, Benjamin Recht, and Angjoo Kanazawa. Plenoxels: Radiance fields without neural networks. In CVPR, 2022. 3
|
| 492 |
+
[44] Prafull Sharma, Ayush Tewari, Yilun Du, Sergey Zakharov, Rares Andrei Ambrus, Adrien Gaidon, William T. Freeman, Fredo Durand, Joshua B. Tenenbaum, and Vincent Sitzmann. Neural groundplans: Persistent neural scene representations from a single image, 2023. 2, 7
|
| 493 |
+
[45] Vincent Sitzmann, Michael Zollhöfer, and Gordon Wetzstein. Scene representation networks: Continuous 3d-structure-aware neural scene representations. In Advances in Neural Information Processing Systems, 2019. 7, 8
|
| 494 |
+
[46] Cheng Sun, Min Sun, and Hwann-Tzong Chen. Direct voxel grid optimization: Super-fast convergence for radiance fields reconstruction. In CVPR, 2022. 3
|
| 495 |
+
|
| 496 |
+
[47] Matthew Tancik, Vincent Casser, Xinchen Yan, Sabeek Pradhan, Ben Mildenhall, Pratul P. Srinivasan, Jonathan T. Barron, and Henrik Kretzschmar. Block-nerf: Scalable large scene neural view synthesis. In CVPR, pages 8248–8258, June 2022. 1, 2, 5
|
| 497 |
+
[48] Matthew Tancik, Ethan Weber, Evonne Ng, Ruilong Li, Brent Yi, Justin Kerr, Terrance Wang, Alexander Kristoffersen, Jake Austin, Kamyar Salahi, Abhik Ahuja, David McAllister, and Angjoo Kanazawa. Nerfstudio: A modular framework for neural radiance field development. arXiv preprint arXiv:2302.04264, 2023. 13
|
| 498 |
+
[49] Zachary Teed and Jia Deng. Raft: Recurrent all-pairs field transforms for optical flow (extended abstract). In *IJCAI*, 2021. 6
|
| 499 |
+
[50] Edgar Tretschk, Ayush Tewari, Vladislav Golyanik, Michael Zollhöfer, Christoph Lassner, and Christian Theobalt. Non-rigid neural radiance fields: Reconstruction and novel view synthesis of a dynamic scene from monocular video. In ICCV. IEEE, 2021. 2
|
| 500 |
+
[51] Vadim Tschernezki, Iro Laina, Diane Larlus, and Andrea Vedaldi. Neural Feature Fusion Fields: 3D distillation of self-supervised 2D image representation. In Proceedings of the International Conference on 3D Vision (3DV), 2022. 2, 5
|
| 501 |
+
[52] Haithem Turki, Deva Ramanan, and Mahadev Satyanarayanan. Mega-nerf: Scalable construction of large-scale nerfs for virtual fly-throughs. In CVPR, pages 12922–12931, June 2022. 1, 2, 3, 5, 7
|
| 502 |
+
[53] Suhani Vora*, Noha Radwan*, Klaus Greff, Henning Meyer, Kyle Genova, Mehdi S. M. Sajjadi, Etienne Pot, Andrea Tagliasacchi, and Daniel Duckworth. Nesf: Neural semantic fields for generalizable semantic segmentation of 3d scenes. Transactions on Machine Learning Research, 2022. https://openreview.net/forum?id=ggPhsYCsm9.2
|
| 503 |
+
[54] Zhou Wang, A.C. Bovik, H.R. Sheikh, and E.P. Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE Transactions on Image Processing, 13(4):600-612, 2004. 6
|
| 504 |
+
[55] Zirui Wang, Shangzhe Wu, Weidi Xie, Min Chen, and Victor Adrian Prisacariu. NeRF---: Neural radiance fields without known camera parameters. arXiv preprint arXiv:2102.07064, 2021. 13
|
| 505 |
+
[56] Tianhao Wu, Fangcheng Zhong, Andrea Tagliasacchi, Forrester Cole, and Cengiz Oztireli. $\mathbf{D}^2$ nerf: Self-supervised decoupling of dynamic and static objects from a monocular video. In Advances in Neural Information Processing Systems, 2022. 2, 3, 4, 6
|
| 506 |
+
[57] Wenqi Xian, Jia-Bin Huang, Johannes Kopf, and Changil Kim. Space-time neural irradiance fields for free-viewpoint video. In CVPR, pages 9421-9431, 2021. 2
|
| 507 |
+
[58] Yuanbo Xiangli, Linning Xu, Xingang Pan, Nanxuan Zhao, Anyi Rao, Christian Theobalt, Bo Dai, and Dahua Lin. Bungeenerf: Progressive neural radiance field for extreme multi-scale scene rendering. In ECCV, 2022. 2
|
| 508 |
+
[59] Bangbang Yang, Yinda Zhang, Yinghao Xu, Yijin Li, Han Zhou, Hujun Bao, Guofeng Zhang, and Zhaopeng Cui. Learning object-compositional neural radiance field for ed-itable scene rendering. In ICCV, October 2021. 2
|
| 509 |
+
|
| 510 |
+
[60] Gengshan Yang, Minh Vo, Neverova Natalia, Deva Ramanan, Vedaldi Andrea, and Joo Hanbyul. Banmo: Building animatable 3d neural models from many casual videos. In CVPR, 2022. 2, 4
|
| 511 |
+
[61] Hong-Xing Yu, Leonidas J. Guibas, and Jiajun Wu. Unsupervised discovery of object radiance fields. In International Conference on Learning Representations, 2022. 2
|
| 512 |
+
[62] Wentao Yuan, Zhaoyang Lv, Tanner Schmidt, and Steven Lovegrove. Star: Self-supervised tracking and reconstruction of rigid objects in motion with neural rendering. In CVPR, pages 13144-13152, 2021. 2
|
| 513 |
+
[63] Kai Zhang, Gernot Riegler, Noah Snavely, and Vladlen Koltun. Nerf++: Analyzing and improving neural radiance fields. arXiv:2010.07492, 2020. 2, 3
|
| 514 |
+
[64] Richard Zhang, Phillip Isola, Alexei A Efros, Eli Shechtman, and Oliver Wang. The unreasonable effectiveness of deep features as a perceptual metric. In CVPR, 2018. 6
|
| 515 |
+
[65] Shuaifeng Zhi, Tristan Laidlow, Stefan Leutenegger, and Andrew Davison. In-place scene labelling and understanding with implicit scene representation. In ICCV, 2021. 2
|
| 516 |
+
|
| 517 |
+
# Supplemental Materials
|
| 518 |
+
|
| 519 |
+
# A. Tracking
|
| 520 |
+
|
| 521 |
+
We can compute mask and keypoint-level correspondences across frames after detecting instances (Sec. 4.2) by using Best-Buddies similarity [13] on features $\Phi$ within or between instances. As a 3D representation, SUDS can track correspondences through 2D occluders. We show an example in Fig. 7.
|
| 522 |
+
|
| 523 |
+
# B. Proposal Sampling
|
| 524 |
+
|
| 525 |
+
We use a proposal sampling strategy similar to Mip-NeRF 360 [7] that first queries a lightweight occupancy proposal network at uniform intervals along each camera ray and then picks additional samples based on the initial samples. We model our proposal network with separate hash table-backed static and dynamic branches as in Sec. 3.2. We train each branch of the proposal network with histogram loss [7] using the weights of the respective branch of our main model and regularize the resulting sample distances and weights using distortion loss [7]. We find that proposal sampling gives a 2-4x speedup.
|
| 526 |
+
|
| 527 |
+
# C. Smoothness Priors
|
| 528 |
+
|
| 529 |
+
We use the same spatial and temporal smoothness priors as NSFF [29] to regularize our scene flow. We specifically denote:
|
| 530 |
+
|
| 531 |
+
$$
|
| 532 |
+
\begin{array}{l} \mathcal {L} _ {s m} (\mathbf {r}) = \sum_ {\mathbf {x}} \sum_ {t ^ {\prime} \in [ - 1, 1 ]} e ^ {- 2 \left\| \mathbf {x} - \mathbf {x} ^ {\prime} \right\| _ {2}} \left\| s _ {t ^ {\prime}} (\mathbf {x}, \mathbf {t}) - s _ {t ^ {\prime}} (\mathbf {x} ^ {\prime}, \mathbf {t}) \right\| _ {1} \\ + \sum_ {\mathbf {x}} \left\| s _ {\mathbf {t} - 1} (\mathbf {x}, \mathbf {t}) + s _ {\mathbf {t} + 1} (\mathbf {x}, \mathbf {t}) \right\| _ {1}, \tag {29} \\ \end{array}
|
| 533 |
+
$$
|
| 534 |
+
|
| 535 |
+
where $\mathbf{x}$ and $\mathbf{x}'$ indicate neighboring points along the camera ray $\mathbf{r}$ .
|
| 536 |
+
|
| 537 |
+
# D. Ablation Details
|
| 538 |
+
|
| 539 |
+
w/o Depth loss. We remove depth from the reconstruction loss term:
|
| 540 |
+
|
| 541 |
+
$$
|
| 542 |
+
\mathcal {L} _ {r e c} = \mathcal {L} _ {c} + \lambda_ {f} \mathcal {L} _ {f} + \lambda_ {o} \mathcal {L} _ {o} \tag {30}
|
| 543 |
+
$$
|
| 544 |
+
|
| 545 |
+
w/o Optical flow loss. We remove optical flow from the reconstruction loss term:
|
| 546 |
+
|
| 547 |
+
$$
|
| 548 |
+
\mathcal {L} _ {r e c} = \mathcal {L} _ {c} + \lambda_ {f} \mathcal {L} _ {f} + \lambda_ {d} \mathcal {L} _ {d} \tag {31}
|
| 549 |
+
$$
|
| 550 |
+
|
| 551 |
+
w/o Warping loss. We remove all warping and flow-related loss terms:
|
| 552 |
+
|
| 553 |
+
$$
|
| 554 |
+
\mathcal {L} = \underbrace {\left(\mathcal {L} _ {c} + \lambda_ {f} \mathcal {L} _ {f} + \lambda_ {d} \mathcal {L} _ {d}\right)} _ {\text {r e c o n s t r u c t i o n l o s s e s}} + \underbrace {\left(\lambda_ {e} \mathcal {L} _ {e} + \lambda_ {d} \mathcal {L} _ {d}\right)} _ {\text {s t a t i c - d y n a m i c f a c t o r i z a t i o n}} + \lambda_ {\rho} \mathcal {L} _ {\rho}. \tag {32}
|
| 555 |
+
$$
|
| 556 |
+
|
| 557 |
+
w/o Appearance embedding. We compute static color without the latent embedding vector $A_{vld}\mathcal{F}(t)$ :
|
| 558 |
+
|
| 559 |
+
$$
|
| 560 |
+
\mathbf {c} _ {s} (\mathbf {x}, \mathbf {d}) \in \mathbb {R} ^ {3} \tag {33}
|
| 561 |
+
$$
|
| 562 |
+
|
| 563 |
+
w/o Occlusion weights. We do not use occlusion weights (24) to downweight the warping loss terms (25, 26):
|
| 564 |
+
|
| 565 |
+
$$
|
| 566 |
+
\mathcal {L} _ {c} ^ {w} (\mathbf {r}) = \sum_ {t ^ {\prime} \in [ - 1, 1 ]} \left\| C (\mathbf {r}) - \hat {C} _ {t ^ {\prime}} ^ {w} (\mathbf {r}) \right\| ^ {2} \tag {34}
|
| 567 |
+
$$
|
| 568 |
+
|
| 569 |
+
$$
|
| 570 |
+
\mathcal {L} _ {f} ^ {w} (\mathbf {r}) = \sum_ {t ^ {\prime} \in [ - 1, 1 ]} \left\| F (\mathbf {r}) - \hat {F} _ {t ^ {\prime}} ^ {w} (\mathbf {r}) \right\| _ {1} \tag {35}
|
| 571 |
+
$$
|
| 572 |
+
|
| 573 |
+
w/o Separate branches. We generate all model outputs using a single time-dependent branch:
|
| 574 |
+
|
| 575 |
+
$$
|
| 576 |
+
\sigma (\mathbf {x}, \mathbf {t}, \mathbf {v i d}) \in \mathbb {R} \tag {36}
|
| 577 |
+
$$
|
| 578 |
+
|
| 579 |
+
$$
|
| 580 |
+
\mathbf {c} (\mathbf {x}, \mathbf {t}, \mathbf {v i d}, \mathbf {d}) \in \mathbb {R} ^ {3} \tag {37}
|
| 581 |
+
$$
|
| 582 |
+
|
| 583 |
+
$$
|
| 584 |
+
\Phi (\mathbf {x}, \mathbf {t}, \mathbf {v i d}) \in \mathbb {R} ^ {C} \tag {38}
|
| 585 |
+
$$
|
| 586 |
+
|
| 587 |
+
$$
|
| 588 |
+
s _ {t ^ {\prime} \in [ - 1, 1 ]} (\mathbf {x}, \mathbf {t}, \mathbf {v i d}) \in \mathbb {R} ^ {3} \tag {39}
|
| 589 |
+
$$
|
| 590 |
+
|
| 591 |
+
We accordingly remove factorization-related loss terms:
|
| 592 |
+
|
| 593 |
+
$$
|
| 594 |
+
\mathcal {L} = \underbrace {\left(\mathcal {L} _ {c} + \lambda_ {f} \mathcal {L} _ {f} + \lambda_ {d} \mathcal {L} _ {d} + \lambda_ {o} \mathcal {L} _ {o}\right)} _ {\text {r e c o n s t r u c t i o n l o s s e s}} + \underbrace {\left(\mathcal {L} _ {c} ^ {w} + \lambda_ {f} \mathcal {L} _ {f} ^ {w}\right)} _ {\text {w a r p i n g l o s s e s}}
|
| 595 |
+
$$
|
| 596 |
+
|
| 597 |
+
$$
|
| 598 |
+
\lambda_ {f l o} \underbrace {\left(\mathcal {L} _ {c y c} + \mathcal {L} _ {s m} + \mathcal {L} _ {s l o}\right)} _ {\text {f l o w l o s s e s}} \tag {40}
|
| 599 |
+
$$
|
| 600 |
+
|
| 601 |
+
# E. Additional Training Details
|
| 602 |
+
|
| 603 |
+
We divide City-1M into 48 cells using camera-based k-means clustering. Each cell covers $2.9\, \text{km}^2$ and $32\, \text{k}$ frames across 98 videos on average. We evaluate the effect of geographic coverage and number of frames/videos on cell quality in Table 5. We train with 1 A100 (40 GB) GPU per cell for 2 days (same for each KITTI scene). We can fit all cells on a single A100 at inference time.
|
| 604 |
+
|
| 605 |
+
# F. Assets
|
| 606 |
+
|
| 607 |
+
City-1M. Our dataset is constructed from street-level videos collected across a vehicle fleet with seven ring cameras that collect $2048 \times 1550$ resolution images at $20 \mathrm{~Hz}$ with a combined $360^{\circ}$ field of view. Both VLP-32C LiDAR sensors are synchronized with the cameras and produce point clouds with 100,000 points at $10 \mathrm{~Hz}$ on average. We localize camera poses using a combination of GPS-based and sensor-based methods.
|
| 608 |
+
|
| 609 |
+

|
| 610 |
+
|
| 611 |
+

|
| 612 |
+
Figure 7. Tracking. We track keypoints (above) and instance masks (below) across several frames. As a 3D representation, SUDS can track correspondences through 2D occluders.
|
| 613 |
+
|
| 614 |
+

|
| 615 |
+
|
| 616 |
+

|
| 617 |
+
|
| 618 |
+

|
| 619 |
+
|
| 620 |
+

|
| 621 |
+
|
| 622 |
+

|
| 623 |
+
|
| 624 |
+

|
| 625 |
+
|
| 626 |
+

|
| 627 |
+
|
| 628 |
+

|
| 629 |
+
|
| 630 |
+
<table><tr><td></td><td>≤ 15k</td><td>15-30k</td><td>30-45k</td><td>≥ 45k</td><td></td><td>≤ 60</td><td>60-90</td><td>90-120</td><td>≥ 120</td></tr><tr><td>↑PSNR</td><td>22.86</td><td>21.99</td><td>21.35</td><td>20.75</td><td>↑PSNR</td><td>22.47</td><td>21.72</td><td>21.68</td><td>21.11</td></tr><tr><td>↑SSIM</td><td>0.583</td><td>0.569</td><td>0.557</td><td>0.538</td><td>↑SSIM</td><td>0.587</td><td>0.556</td><td>0.559</td><td>0.555</td></tr><tr><td>↓LPIPS</td><td>0.516</td><td>0.545</td><td>0.564</td><td>0.578</td><td>↓LPIPS</td><td>0.526</td><td>0.557</td><td>0.557</td><td>0.565</td></tr><tr><td colspan="5">Images</td><td colspan="5">Videos</td></tr><tr><td colspan="3"></td><td>≤ 2 km2</td><td>2-3 km2</td><td>3-4 km2</td><td colspan="4">≥ 4 km2</td></tr><tr><td colspan="3"></td><td>↑PSNR</td><td>22.73</td><td>21.47</td><td>21.53</td><td>22.18</td><td></td><td></td></tr><tr><td colspan="3"></td><td>↑SSIM</td><td>0.609</td><td>0.556</td><td>0.561</td><td>0.557</td><td></td><td></td></tr><tr><td colspan="3"></td><td>↓LPIPS</td><td>0.512</td><td>0.564</td><td>0.555</td><td>0.536</td><td></td><td></td></tr><tr><td colspan="10">Area</td></tr></table>
|
| 631 |
+
|
| 632 |
+
Table 5. City-1M scaling. We evaluate the effect of geographic coverage and the number of images and videos on cell quality. Although performance degrades sublinearly across all metrics, image and video counts have the largest impact.
|
| 633 |
+
|
| 634 |
+
Third-party assets. We primarily base the SUDS implementation on Nerfstudio [48] and tiny-cuda-nn [34] along with various utilities from OpenCV [8], Scikit [9], and Amir et al's feature extractor implementation [5], all of which are freely available for noncommercial use. KITTI [21] is similarly available under an Apache license, whereas VKITTI2 [18] uses the noncommercial CC BY-NC-SA 3.0 license.
|
| 635 |
+
|
| 636 |
+
# G. Limitations
|
| 637 |
+
|
| 638 |
+
Video boundaries. Although our global representation of static geometry is consistent across all videos used for reconstruction, all dynamic objects are video-specific. Put otherwise, our method does not allow us to extrapolate the movement of objects outside of the boundaries of videos from which they were captured, nor does it provide a straightforward way of rendering dynamic visuals at boundaries where camera rays intersect regions with training data originating from disjoint video sequences.
|
| 639 |
+
|
| 640 |
+
Camera accuracy. Accurate camera extrinsics and intrinsics are arguably the largest contributors to high NeRF rendering quality. Although multiple efforts [12, 23, 30, 32, 55] attempt to jointly optimize camera parameters during
|
| 641 |
+
|
| 642 |
+
NeRF optimization, we found the results lacking relative to using offline structure-from-motion based approaches as a preprocessing step.
|
| 643 |
+
|
| 644 |
+
Flow quality. Although our method tolerates some degree of noisiness in the supervisory optical flow input, high-quality flow still has a measurable impact on model performance (and completely incorrect supervision degrades quality). We also assume that flow is linear between observed timestamps to simplify our scene flow representation.
|
| 645 |
+
|
| 646 |
+
Resources. Modeling city scale requires a large amount of dataset preprocessing, including, but not limited to: extracting DINO features, computing optical flow, deriving normalized coordinate bounds, and storing randomized batches of training data to disk. Collectively, our intermediate representation required more than 20TB of storage even after compression.
|
| 647 |
+
|
| 648 |
+
Shadows. SUDS attempts to disentangle shadows underneath transient objects. However, if a shadow is present in all observations for a given location (eg: a parking spot that is always occupied, even by different cars), SUDS may attribute the darkness to the static topology, as evidenced in several of our videos, even if the origin of the shadow is
|
| 649 |
+
|
| 650 |
+
correctly assigned to the dynamic branch.
|
| 651 |
+
|
| 652 |
+
Instance-level tasks. Although we provide initial qualitative results on instance-level tasks as a first step towards true 3D segmentation backed by neural radiance field, SUDSis not competitive with conventional approaches.
|
| 653 |
+
|
| 654 |
+
# H. Societal Impact
|
| 655 |
+
|
| 656 |
+
As SUDS attempts to model dynamic urban scenes with pedestrians and vehicles, our approach carries surveillance and privacy concerns related to the intentional or inadvertent capture or privacy-sensitive information such as human faces and vehicle license plate numbers. As we distill semantic knowledge into SUDS, we are able to (imperfectly) filter out either entire categories (people) or components (faces) at render time. However this information would still reside in the model itself. This could in turn be mitigated by preprocessing the input data used to train the model.
|
2303.14xxx/2303.14536/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b95c530cb195f081049d2869a49d68b8e9543cbe94f347521e40fb094ae590d2
|
| 3 |
+
size 830100
|
2303.14xxx/2303.14536/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14541/e7df9d3e-9a6b-46f5-8469-0690470eefdd_content_list.json
ADDED
|
@@ -0,0 +1,1606 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "UnScene3D: Unsupervised 3D Instance Segmentation for Indoor Scenes",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
124,
|
| 8 |
+
130,
|
| 9 |
+
846,
|
| 10 |
+
152
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "David Rozenberszki<sup>1</sup> Or Litany<sup>2,3</sup> Angela Dai<sup>1</sup>",
|
| 17 |
+
"bbox": [
|
| 18 |
+
264,
|
| 19 |
+
179,
|
| 20 |
+
699,
|
| 21 |
+
199
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "$^{1}$ Technical University of Munich $^{2}$ Technion $^{3}$ NVIDIA",
|
| 28 |
+
"bbox": [
|
| 29 |
+
254,
|
| 30 |
+
204,
|
| 31 |
+
710,
|
| 32 |
+
223
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "https://rozdavid.github.io/unscene3d",
|
| 39 |
+
"bbox": [
|
| 40 |
+
336,
|
| 41 |
+
229,
|
| 42 |
+
627,
|
| 43 |
+
248
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "image",
|
| 49 |
+
"img_path": "images/badfebd0016669321b354cc667f061a7a412843fbe18dbcf06361b23d88503d2.jpg",
|
| 50 |
+
"image_caption": [
|
| 51 |
+
"Figure 1. We propose UnScene3D, a fully-unsupervised 3D instance segmentation method, effectively separating semantic instances without requiring any manual annotations. We utilize geometric primitives to ensure crisp masks, and due to our self-training loop, we can also obtain a dense set of predictions, even in cluttered indoor scenarios."
|
| 52 |
+
],
|
| 53 |
+
"image_footnote": [],
|
| 54 |
+
"bbox": [
|
| 55 |
+
83,
|
| 56 |
+
263,
|
| 57 |
+
883,
|
| 58 |
+
441
|
| 59 |
+
],
|
| 60 |
+
"page_idx": 0
|
| 61 |
+
},
|
| 62 |
+
{
|
| 63 |
+
"type": "text",
|
| 64 |
+
"text": "Abstract",
|
| 65 |
+
"text_level": 1,
|
| 66 |
+
"bbox": [
|
| 67 |
+
233,
|
| 68 |
+
502,
|
| 69 |
+
313,
|
| 70 |
+
517
|
| 71 |
+
],
|
| 72 |
+
"page_idx": 0
|
| 73 |
+
},
|
| 74 |
+
{
|
| 75 |
+
"type": "text",
|
| 76 |
+
"text": "3D instance segmentation is fundamental to geometric understanding of the world around us. Existing methods for instance segmentation of 3D scenes rely on supervision from expensive, manual 3D annotations. We propose UnScene3D, the first fully unsupervised 3D learning approach for class-agnostic 3D instance segmentation of indoor scans. UnScene3D first generates pseudo masks by leveraging self-supervised color and geometry features to find potential object regions. We operate on a basis of geometric oversegmentation, enabling efficient representation and learning on high-resolution 3D data. The coarse proposals are then refined through self-training our model on its predictions. Our approach improves over clustering-based alternatives to unsupervised 3D instance segmentation methods by more than $300\\%$ Average Precision score, demonstrating effective instance segmentation even in challenging, cluttered 3D scenes.",
|
| 77 |
+
"bbox": [
|
| 78 |
+
73,
|
| 79 |
+
536,
|
| 80 |
+
472,
|
| 81 |
+
792
|
| 82 |
+
],
|
| 83 |
+
"page_idx": 0
|
| 84 |
+
},
|
| 85 |
+
{
|
| 86 |
+
"type": "text",
|
| 87 |
+
"text": "1. Introduction",
|
| 88 |
+
"text_level": 1,
|
| 89 |
+
"bbox": [
|
| 90 |
+
76,
|
| 91 |
+
828,
|
| 92 |
+
209,
|
| 93 |
+
843
|
| 94 |
+
],
|
| 95 |
+
"page_idx": 0
|
| 96 |
+
},
|
| 97 |
+
{
|
| 98 |
+
"type": "text",
|
| 99 |
+
"text": "The increasing availability of commodity RGB-D sensors, now widely available on iPhones as well as with the Microsoft Kinect or Intel RealSense, has enabled consumer",
|
| 100 |
+
"bbox": [
|
| 101 |
+
75,
|
| 102 |
+
854,
|
| 103 |
+
468,
|
| 104 |
+
900
|
| 105 |
+
],
|
| 106 |
+
"page_idx": 0
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"type": "text",
|
| 110 |
+
"text": "level capture of 3D geometry of real-world environments. To enable applications in robotics, autonomous navigation, and mixed reality in such scenes, semantic 3D scene understanding is necessary. In particular, 3D instance segmentation is critical to 3D perception, providing dense instance mask predictions, thus enabling physical and geometric reasoning about objects in an environment. While various 3D deep learning approaches have been developed for 3D instance segmentation [5, 14, 17, 18, 21, 22, 30, 32, 42-45, 50-52, 54, 57, 58], they require full supervision from expensive, manual, dense annotations on 3D scenes.",
|
| 111 |
+
"bbox": [
|
| 112 |
+
496,
|
| 113 |
+
503,
|
| 114 |
+
890,
|
| 115 |
+
669
|
| 116 |
+
],
|
| 117 |
+
"page_idx": 0
|
| 118 |
+
},
|
| 119 |
+
{
|
| 120 |
+
"type": "text",
|
| 121 |
+
"text": "We introduce UnScene3D, a novel approach designed for class-agnostic 3D instance segmentation. Our aim is to identify objects in real-world 3D scans by predicting their dense instance masks, without any constraints to a predefined set of class categories. Moreover, we avoid expensive data annotation requirements by operating in an unsupervised fashion, instead leveraging self-supervised 2D and 3D features for segmentation.",
|
| 122 |
+
"bbox": [
|
| 123 |
+
496,
|
| 124 |
+
671,
|
| 125 |
+
892,
|
| 126 |
+
792
|
| 127 |
+
],
|
| 128 |
+
"page_idx": 0
|
| 129 |
+
},
|
| 130 |
+
{
|
| 131 |
+
"type": "text",
|
| 132 |
+
"text": "UnScene3D comprises two essential elements. First, we observe that for RGB-D scan data, self-supervised representation learning methods [19, 60] can provide an innate signal indicating object-ness through feature similarity. We thus generate pseudo masks over 3D segment primitives, based on multimodal analysis of self-supervised color and geometry features from the RGB-D data. By considering",
|
| 133 |
+
"bbox": [
|
| 134 |
+
496,
|
| 135 |
+
795,
|
| 136 |
+
893,
|
| 137 |
+
902
|
| 138 |
+
],
|
| 139 |
+
"page_idx": 0
|
| 140 |
+
},
|
| 141 |
+
{
|
| 142 |
+
"type": "aside_text",
|
| 143 |
+
"text": "arXiv:2303.14541v2 [cs.CV] 30 Apr 2024",
|
| 144 |
+
"bbox": [
|
| 145 |
+
19,
|
| 146 |
+
261,
|
| 147 |
+
60,
|
| 148 |
+
705
|
| 149 |
+
],
|
| 150 |
+
"page_idx": 0
|
| 151 |
+
},
|
| 152 |
+
{
|
| 153 |
+
"type": "text",
|
| 154 |
+
"text": "mesh segments rather than voxels or points, our approach efficiently scales with high-resolution 3D data in large scene environments while inherently promoting contiguous segmentation masks. As we require strong features for these initial coarse estimates, we fuse information from both geometric and 2D color features in a complementary fashion. Second, following the pseudo mask generation, we train our model through iterative self-training on both the initial pseudo masks and the current confident model predictions. Through multiple rounds of self-training with noise robust losses achieve improved object recognition and segmentation. At inference time, we do not require any 2D color signal and can produce class-agnostic 3D instance segmentation for a new geometric observation of a 3D environment. Experiments on challenging, cluttered indoor environments from the ScanNet [10], S3DIS [1] and ARKit [2] datasets show that UnScene3D improves significantly over unsupervised, clustering-based state of the art. In summary, our contributions are:",
|
| 155 |
+
"bbox": [
|
| 156 |
+
76,
|
| 157 |
+
90,
|
| 158 |
+
472,
|
| 159 |
+
377
|
| 160 |
+
],
|
| 161 |
+
"page_idx": 1
|
| 162 |
+
},
|
| 163 |
+
{
|
| 164 |
+
"type": "list",
|
| 165 |
+
"sub_type": "text",
|
| 166 |
+
"list_items": [
|
| 167 |
+
"- We propose an unsupervised 3D instance segmentation approach for indoor RGB-D scans, without requiring any human annotation.",
|
| 168 |
+
"- We generate sparse 3D pseudo masks for unsupervised training based on a multi-modal fusion of color and geometric signal from RGB-D scan data. We achieve robustness and efficiency through a geometry-aware scene coarsening.",
|
| 169 |
+
"- Our generated pseudo masks are iteratively refined by self-training for 3D instances to improve 3D instance segmentation performance."
|
| 170 |
+
],
|
| 171 |
+
"bbox": [
|
| 172 |
+
76,
|
| 173 |
+
378,
|
| 174 |
+
468,
|
| 175 |
+
544
|
| 176 |
+
],
|
| 177 |
+
"page_idx": 1
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"type": "text",
|
| 181 |
+
"text": "2. Related Work",
|
| 182 |
+
"text_level": 1,
|
| 183 |
+
"bbox": [
|
| 184 |
+
76,
|
| 185 |
+
556,
|
| 186 |
+
218,
|
| 187 |
+
571
|
| 188 |
+
],
|
| 189 |
+
"page_idx": 1
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"type": "text",
|
| 193 |
+
"text": "Self-supervised 3D pretraining While significant progress has been made in fully supervised 3D instance segmentation [8, 14, 16-18, 20, 42, 44, 50, 51] the amount of densely annotated 3D data is scarce. Inspired by success in the 2D domain, various 3D pretraining methods have been developed to boost semantic and instance segmentation performance when fine-tuning with annotated semantic labels. Such methods leverage instance discrimination based on different camera views [19, 60], local augmentations [62], or multiple LIDAR sweeps [39]. While these methods can provide powerful 3D feature extraction, they do not construct any notion of object instances.",
|
| 194 |
+
"bbox": [
|
| 195 |
+
76,
|
| 196 |
+
580,
|
| 197 |
+
472,
|
| 198 |
+
763
|
| 199 |
+
],
|
| 200 |
+
"page_idx": 1
|
| 201 |
+
},
|
| 202 |
+
{
|
| 203 |
+
"type": "text",
|
| 204 |
+
"text": "Weakly-supervised 3D segmentation Classical methods have leveraged object template information to match or retrieve templates to local geometry in a scene [4, 25, 28, 31, 36, 37], thereby identifying potential object locations. Other methods formulated 3D dense instance segmentation with only 3D box annotation [6, 41] or single-point supervision and active-learning [34, 53]. More recent methods have focused on exploiting knowledge from powerful pre-trained",
|
| 205 |
+
"bbox": [
|
| 206 |
+
76,
|
| 207 |
+
779,
|
| 208 |
+
472,
|
| 209 |
+
902
|
| 210 |
+
],
|
| 211 |
+
"page_idx": 1
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"type": "text",
|
| 215 |
+
"text": "vision-language models to inform text-guided queries in 3D scenes [12, 24, 33, 40, 46]; however, such methods still rely on large-scale annotated data in the 2D domain.",
|
| 216 |
+
"bbox": [
|
| 217 |
+
498,
|
| 218 |
+
90,
|
| 219 |
+
890,
|
| 220 |
+
137
|
| 221 |
+
],
|
| 222 |
+
"page_idx": 1
|
| 223 |
+
},
|
| 224 |
+
{
|
| 225 |
+
"type": "text",
|
| 226 |
+
"text": "Clustering-based segmentation There has been very little work done in fully unsupervised 3D instance segmentation, but classical clustering methods have been used to group regions with similar geometric properties together. A particularly notable approach is the density-based clustering of DBSCAN [13] and its hierarchical counterpart HDBSCAN [35]. These methods can be used to group point clusters in a 3D scene based on point normals and colors. The ScanNet dataset [10] showed that the Felzenswalb algorithm [15] originally developed for image over-segmentation, can generate useful geometric segment clusters. We also exploit such geometric primitives to guide dimensionality reduction and feature aggregation.",
|
| 227 |
+
"bbox": [
|
| 228 |
+
496,
|
| 229 |
+
169,
|
| 230 |
+
890,
|
| 231 |
+
367
|
| 232 |
+
],
|
| 233 |
+
"page_idx": 1
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"type": "text",
|
| 237 |
+
"text": "Finally, recent methods have been developed to detect instances with self-supervised pretrained features in driving scenarios. These methods often leverage the unique properties of such data including dynamics and instance sparsity. Song et. al. [48] identify object instances through motion, showing promise for self-driving scenarios, but limited to moving objects. Nunes et. al. [38] additionally propose a clustering and graph cut based refinement on pre-trained 3D features, focusing on sparse outdoor scenarios to identify spatially separate objects. Our solution aims to segments instances in complex, cluttered indoor environments.",
|
| 238 |
+
"bbox": [
|
| 239 |
+
496,
|
| 240 |
+
369,
|
| 241 |
+
892,
|
| 242 |
+
536
|
| 243 |
+
],
|
| 244 |
+
"page_idx": 1
|
| 245 |
+
},
|
| 246 |
+
{
|
| 247 |
+
"type": "text",
|
| 248 |
+
"text": "Unsupervised 2D instance segmentation Classical graph-cut algorithms [7, 11, 47, 59] can be used to detect objects in scenes, employing low-level feature clustering to identify self-similar regions. Recent advances in self-supervised feature learning have been employed in 2D unsupervised instance segmentation methods, which use two-stage training pipelines to achieve remarkable segmentation results [55, 56]. These methods first generate a set of coarse pseudo masks building on the insights of graph-cut algorithms and then refine them with a series of self-training iterations. In particular, FreeSolo [55] uses multi-branch feature extraction to obtain self-similar regions as mask proposals, producing a dense set of initial pseudo-annotated instances. CutLER [56] uses the normalized cut (NCut) algorithm [47] with deep self-supervised features from DINO [3] to identify multiple prominent regions as pseudo masks. Inspired by such approaches we also leverage pseudo mask generation and self-training, but to handle high-dimensional, noisy real-world 3D scan data, we employ a multi-modal feature reasoning and geometric graph coarsening for robust unsupervised 3D instance segmentation.",
|
| 249 |
+
"bbox": [
|
| 250 |
+
496,
|
| 251 |
+
568,
|
| 252 |
+
893,
|
| 253 |
+
902
|
| 254 |
+
],
|
| 255 |
+
"page_idx": 1
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"type": "text",
|
| 259 |
+
"text": "3. Method",
|
| 260 |
+
"text_level": 1,
|
| 261 |
+
"bbox": [
|
| 262 |
+
76,
|
| 263 |
+
89,
|
| 264 |
+
168,
|
| 265 |
+
104
|
| 266 |
+
],
|
| 267 |
+
"page_idx": 2
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"text": "Problem definition We propose an unsupervised learning-based method for 3D instance segmentation. We operate on a set of training 3D scenes $\\{X_i\\}_{i=1}^{n_t}$ , represented as mesh graphs $G = (V, E)$ , of vertices $V$ and triangular face edges $E$ , where each scene $X_i$ contains an unknown set of $n_i$ objects in the $i^{th}$ scene. We aim to train a model that can predict for a previously unseen input scene $X$ , a set of 3D masks representing the different object instances in that scene.",
|
| 272 |
+
"bbox": [
|
| 273 |
+
75,
|
| 274 |
+
117,
|
| 275 |
+
472,
|
| 276 |
+
253
|
| 277 |
+
],
|
| 278 |
+
"page_idx": 2
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "text",
|
| 282 |
+
"text": "Method overview In order to achieve unsupervised 3D instance segmentation we first break down the scenes into $N$ geometric primitives $S_{N}$ , which we use to initialize an adjacency matrix $W$ to extract an initial set of pseudo masks $M^0$ , representing instance hypotheses based on combining 2D and 3D inputs $\\mathcal{F}_{2D} / \\mathcal{F}_{3D} \\in R^{N \\times D_{2D/3D}}$ , where $D_{2D}, D_{3D}$ are the dimensions of the $2D/3D$ self-supervised features. We regularize the per-segment similarities over geometric primitives for mitigating noise and enabling efficient 3D reasoning. We then employ a series of self-training cycles, updating pseudo mask supervision with new predicted masks, in order to produce final 3D instances. An overview of our approach is shown in Figure 2.",
|
| 283 |
+
"bbox": [
|
| 284 |
+
75,
|
| 285 |
+
277,
|
| 286 |
+
472,
|
| 287 |
+
474
|
| 288 |
+
],
|
| 289 |
+
"page_idx": 2
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "text",
|
| 293 |
+
"text": "3.1. Initial pseudo mask generation",
|
| 294 |
+
"text_level": 1,
|
| 295 |
+
"bbox": [
|
| 296 |
+
76,
|
| 297 |
+
492,
|
| 298 |
+
351,
|
| 299 |
+
508
|
| 300 |
+
],
|
| 301 |
+
"page_idx": 2
|
| 302 |
+
},
|
| 303 |
+
{
|
| 304 |
+
"type": "text",
|
| 305 |
+
"text": "In order to initiate self-training, we first generate an initial set of pseudo masks, leveraging complementary information from 2D and 3D signal in $\\{X_{i}\\}$ .",
|
| 306 |
+
"bbox": [
|
| 307 |
+
75,
|
| 308 |
+
518,
|
| 309 |
+
470,
|
| 310 |
+
565
|
| 311 |
+
],
|
| 312 |
+
"page_idx": 2
|
| 313 |
+
},
|
| 314 |
+
{
|
| 315 |
+
"type": "text",
|
| 316 |
+
"text": "3.1.1 Feature aggregation",
|
| 317 |
+
"text_level": 1,
|
| 318 |
+
"bbox": [
|
| 319 |
+
76,
|
| 320 |
+
599,
|
| 321 |
+
272,
|
| 322 |
+
614
|
| 323 |
+
],
|
| 324 |
+
"page_idx": 2
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"type": "text",
|
| 328 |
+
"text": "To encourage effective initial pseudo mask generation, we employ joint reasoning across both self-supervised color and geometry features, as they can provide complementary information regarding objects. As RGB-D scans often contain color image information and reconstructed 3D scan geometry, we can associate both 2D and 3D features in 3D by back-projecting the 2D extracted features using the corresponding depth and camera pose information for each image. Both 2D and 3D features are extracted through state-of-the-art self-supervised feature learning methods [3, 19]. As real-world camera estimation often contains small misalignment errors and noise or oversmoothing in reconstructed scan geometry, these self-supervised features can often also contain high-frequency noise, which we address in Sec. 3.1.2 when reasoning over these features. Note that while we employ both 2D and 3D signal when available for training, we do not require any aligned color image inputs for inference, enabling more general applicability.",
|
| 329 |
+
"bbox": [
|
| 330 |
+
75,
|
| 331 |
+
628,
|
| 332 |
+
472,
|
| 333 |
+
902
|
| 334 |
+
],
|
| 335 |
+
"page_idx": 2
|
| 336 |
+
},
|
| 337 |
+
{
|
| 338 |
+
"type": "text",
|
| 339 |
+
"text": "3.1.2 3D Graph Cut",
|
| 340 |
+
"text_level": 1,
|
| 341 |
+
"bbox": [
|
| 342 |
+
500,
|
| 343 |
+
90,
|
| 344 |
+
656,
|
| 345 |
+
106
|
| 346 |
+
],
|
| 347 |
+
"page_idx": 2
|
| 348 |
+
},
|
| 349 |
+
{
|
| 350 |
+
"type": "text",
|
| 351 |
+
"text": "To generate pseudo masks from the 2D and 3D self-supervised features, we employ graph cut to estimate class-agnostic instances from the background. More precisely, we leverage the principle of Normalized Cut [47] (NCut), which employs eigenvalue decomposition from an adjacency matrix $W \\in R^{N \\times N}$ over a graph to identify self-similar regions potentially representing semantic instances, where a set of potential instances can be extracted iteratively. Given a graph representing the 3D scene, we build an adjacency matrix $W$ and self-supervised features with a corresponding degree matrix $D \\in R^{N \\times N}$ , where $D(i, i) = \\Sigma_j W(i, j)$ and $(D - W)v = \\lambda Dv$ . In this system, finding the second smallest eigenvalue $\\lambda$ and its corresponding eigenvector $v$ is a close approximation for the minimized cost. From $v$ , we obtain foreground separation by taking all node activations where the eigenvector components were larger than their mean. To identify multiple foreground objects, this process is repeated iteratively.",
|
| 352 |
+
"bbox": [
|
| 353 |
+
496,
|
| 354 |
+
116,
|
| 355 |
+
893,
|
| 356 |
+
388
|
| 357 |
+
],
|
| 358 |
+
"page_idx": 2
|
| 359 |
+
},
|
| 360 |
+
{
|
| 361 |
+
"type": "text",
|
| 362 |
+
"text": "Unfortunately, applying this approach directly to the 3D scenes $\\{X_{i}\\}$ in common 3D representations such as voxels or points is not only computationally infeasible, but unreliable due to the noise in camera pose estimation and geometric reconstruction of 3D scan data. Thus, we propose to regularize the graph cut across geometric primitives.",
|
| 363 |
+
"bbox": [
|
| 364 |
+
496,
|
| 365 |
+
388,
|
| 366 |
+
893,
|
| 367 |
+
481
|
| 368 |
+
],
|
| 369 |
+
"page_idx": 2
|
| 370 |
+
},
|
| 371 |
+
{
|
| 372 |
+
"type": "text",
|
| 373 |
+
"text": "3.1.3 Geometric Primitives",
|
| 374 |
+
"text_level": 1,
|
| 375 |
+
"bbox": [
|
| 376 |
+
498,
|
| 377 |
+
506,
|
| 378 |
+
702,
|
| 379 |
+
520
|
| 380 |
+
],
|
| 381 |
+
"page_idx": 2
|
| 382 |
+
},
|
| 383 |
+
{
|
| 384 |
+
"type": "text",
|
| 385 |
+
"text": "To employ efficient reasoning across high-dimensional 3D data and enable robust 3D regularization of noisy features, we propose to operate on geometric primitives acquired through a graph coarsening process. For a 3D scene $X_{i}$ we construct the graph $G = (V,E)$ where $V$ and $E$ being the mesh vertices and face edges. Then, nodes with similar normals and colors are aggregated and clustered based on the mesh topology following [15] and resulting in a set $S_{N} = \\{C_{1}\\dots C_{N}\\}$ and $\\bigcup (S_N) = V$ where $C_n$ represent a single primitive. This reduces the graph size by multiple orders of magnitude, and enables effective regularization of noise in the used self-supervised 2D and 3D features.",
|
| 386 |
+
"bbox": [
|
| 387 |
+
496,
|
| 388 |
+
531,
|
| 389 |
+
893,
|
| 390 |
+
714
|
| 391 |
+
],
|
| 392 |
+
"page_idx": 2
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"type": "text",
|
| 396 |
+
"text": "3.1.4 NCut on Geometric Primitives",
|
| 397 |
+
"text_level": 1,
|
| 398 |
+
"bbox": [
|
| 399 |
+
498,
|
| 400 |
+
738,
|
| 401 |
+
767,
|
| 402 |
+
753
|
| 403 |
+
],
|
| 404 |
+
"page_idx": 2
|
| 405 |
+
},
|
| 406 |
+
{
|
| 407 |
+
"type": "text",
|
| 408 |
+
"text": "After addressing the challenge of dimensionality reduction and effectively mitigating speckle noise in our features using geometric primitives, we can leverage the capabilities of the Normalized Cut algorithm to achieve a clean partitioning of scene graphs. For this, we iteratively apply NCut to our aggregated features for the extraction of initial pseudo masks denoted as $M$ . Starting with an empty set $M^0 = \\{\\}$ , we iteratively compute the adjacency matrix over $S_N$ and retrieve the masks $m \\subset S_N$ . We start",
|
| 409 |
+
"bbox": [
|
| 410 |
+
496,
|
| 411 |
+
763,
|
| 412 |
+
893,
|
| 413 |
+
902
|
| 414 |
+
],
|
| 415 |
+
"page_idx": 2
|
| 416 |
+
},
|
| 417 |
+
{
|
| 418 |
+
"type": "image",
|
| 419 |
+
"img_path": "images/a318c5f8c1f7f1a1d9a802f5dd2ed2debf5e8eca734846ecee12e7614bdfa2bd.jpg",
|
| 420 |
+
"image_caption": [
|
| 421 |
+
"Figure 2. UnScene3D first generates a set of pseudo masks (top) to initiate self-training (bottom) for unsupervised 3D instance segmentation. We leverage features from 3D self-supervised pre-training in combination with 2D self-supervised features on an input mesh. These multi-modal features are then aggregated on geometric primitives, integrating low- and high-level signals for pseudo mask segmentation. These initial pseudo masks are then used as supervision for a 3D transformer-based model to produce updated instance masks that are integrated into the supervision of multiple self-training cycles. Finally, we obtain clean and dense instance segmentation without using any manual annotations."
|
| 422 |
+
],
|
| 423 |
+
"image_footnote": [],
|
| 424 |
+
"bbox": [
|
| 425 |
+
80,
|
| 426 |
+
89,
|
| 427 |
+
890,
|
| 428 |
+
361
|
| 429 |
+
],
|
| 430 |
+
"page_idx": 3
|
| 431 |
+
},
|
| 432 |
+
{
|
| 433 |
+
"type": "text",
|
| 434 |
+
"text": "from $N$ geometric segments with their corresponding $D$ -dimensional features $\\mathcal{F} \\in \\mathcal{R}^{N \\times D}$ , and construct the similarity matrix $A = \\text{sim}(\\mathcal{F})$ , where $\\text{sim}$ denotes cosine similarity. Additionally, for the multi-modal setup we calculate similarity matrices $A_{2D}$ and $A_{3D}$ independently and take their weighted average to obtain the final scores. Empirically, we found this to be more robust than direct feature fusion of the different modalities, due to their different statistical characteristics. We obtain $W_{j}$ introduced in Section 3.1.2 by thresholding $A$ at $\\tau_{cut}$ , where $j$ denotes the $j^{th}$ NCut iteration. Using $W_{j}$ , we solve for the second eigenvector $v_{j}$ and threshold it to retrieve the partition $m_{j}$ . We keep all separated foregrounds in $M^0$ , where for each upcoming iteration, we mask out the row and column vectors from $W_{j}$ , where $m_{i} \\in M^{0}$ was already accepted as a foreground instance and $i$ being the previous segment ids. This allows greedy separation of instances in order of confidence in every cut iteration. Examples of our generated pseudo masks are visualized in Figures 5 and 6.",
|
| 435 |
+
"bbox": [
|
| 436 |
+
75,
|
| 437 |
+
457,
|
| 438 |
+
472,
|
| 439 |
+
744
|
| 440 |
+
],
|
| 441 |
+
"page_idx": 3
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
"type": "text",
|
| 445 |
+
"text": "As the adjacency graph is unaware of the mesh connectivity, NCut often results in masks that span spatially separated scene regions. In 3D, we can leverage knowledge of physical distance and connectivity of $G$ to constrain masks to be contiguous in the coarsened scene connectivity graph. We thus filter masks $m_j$ that have separated components, keeping only the parts $\\tilde{m}_j$ that contain the item with the maximum absolute value in $v_j$ . Separation based on connectivity is performed before saving $\\tilde{m}_j$ into $M^0$ , thus allowing for repeated detection of the dropped part over the",
|
| 446 |
+
"bbox": [
|
| 447 |
+
75,
|
| 448 |
+
744,
|
| 449 |
+
472,
|
| 450 |
+
897
|
| 451 |
+
],
|
| 452 |
+
"page_idx": 3
|
| 453 |
+
},
|
| 454 |
+
{
|
| 455 |
+
"type": "text",
|
| 456 |
+
"text": "next NCut iterations. Finally, we iterate until the maximum number of instances $M^0 = \\{m_i\\}_{i=1}^{N_m}$ are obtained, or there are no segments left in the scene. Moreover, we favor generating a reliable set of masks at the cost of restricting to a sparse initial set (i.e., missing potential instances rather than generating noisy masks for them) through a stricter $\\tau_{cut}$ or lower number of instances.",
|
| 457 |
+
"bbox": [
|
| 458 |
+
496,
|
| 459 |
+
458,
|
| 460 |
+
893,
|
| 461 |
+
564
|
| 462 |
+
],
|
| 463 |
+
"page_idx": 3
|
| 464 |
+
},
|
| 465 |
+
{
|
| 466 |
+
"type": "text",
|
| 467 |
+
"text": "3.2. Self-Training",
|
| 468 |
+
"text_level": 1,
|
| 469 |
+
"bbox": [
|
| 470 |
+
498,
|
| 471 |
+
574,
|
| 472 |
+
640,
|
| 473 |
+
590
|
| 474 |
+
],
|
| 475 |
+
"page_idx": 3
|
| 476 |
+
},
|
| 477 |
+
{
|
| 478 |
+
"type": "text",
|
| 479 |
+
"text": "Our initial pseudo masks can provide a set of proposed instances $M^0$ ; however, these pseudo masks are quite sparse in the scenes and sometimes over- or under-split nearby instances. We thus refine the pseudo mask data through an iterative self-training strategy, producing final instance segmentation predictions $M'$ with more dense and complete instance proposals.",
|
| 480 |
+
"bbox": [
|
| 481 |
+
496,
|
| 482 |
+
598,
|
| 483 |
+
890,
|
| 484 |
+
703
|
| 485 |
+
],
|
| 486 |
+
"page_idx": 3
|
| 487 |
+
},
|
| 488 |
+
{
|
| 489 |
+
"type": "text",
|
| 490 |
+
"text": "We leverage a state-of-the-art 3D transformer-based backbone [45] for our self-training from pseudo mask data as mask-head supervision, while the class-head is collapsed to foreground and background classes. Through multiple training cycles we save the proposals of the $t^{th}$ iteration into $M^t$ , from the self-trained model, and save these masks as an extension to the original pseudo dataset obtaining $M^t \\supseteq M^0$ . From the second training iteration, we can extract the most confident $K$ predictions and sample these new instance proposals as an addition to the pseudo annotations. Further, we only accept new instances if the added information value is larger than the minimum threshold, measured by simple segment IoU scores. This way, we can effectively",
|
| 491 |
+
"bbox": [
|
| 492 |
+
496,
|
| 493 |
+
704,
|
| 494 |
+
892,
|
| 495 |
+
902
|
| 496 |
+
],
|
| 497 |
+
"page_idx": 3
|
| 498 |
+
},
|
| 499 |
+
{
|
| 500 |
+
"type": "text",
|
| 501 |
+
"text": "densify the originally sparse annotations, but without limiting the quality of the originally clean pseudo masks.",
|
| 502 |
+
"bbox": [
|
| 503 |
+
76,
|
| 504 |
+
90,
|
| 505 |
+
468,
|
| 506 |
+
122
|
| 507 |
+
],
|
| 508 |
+
"page_idx": 4
|
| 509 |
+
},
|
| 510 |
+
{
|
| 511 |
+
"type": "text",
|
| 512 |
+
"text": "3.3. Implementation Details",
|
| 513 |
+
"text_level": 1,
|
| 514 |
+
"bbox": [
|
| 515 |
+
76,
|
| 516 |
+
131,
|
| 517 |
+
294,
|
| 518 |
+
148
|
| 519 |
+
],
|
| 520 |
+
"page_idx": 4
|
| 521 |
+
},
|
| 522 |
+
{
|
| 523 |
+
"type": "text",
|
| 524 |
+
"text": "Backbones. We use a Res16UNet34C sparse-voxel UNet implemented in the MinkowskiEngine [8] for 3D pretrained feature extraction as well as for the 3D transformer during self-training. For the pretrained features we use our own trained weights of [19] for compatibility reasons.",
|
| 525 |
+
"bbox": [
|
| 526 |
+
76,
|
| 527 |
+
155,
|
| 528 |
+
468,
|
| 529 |
+
231
|
| 530 |
+
],
|
| 531 |
+
"page_idx": 4
|
| 532 |
+
},
|
| 533 |
+
{
|
| 534 |
+
"type": "text",
|
| 535 |
+
"text": "Self-training. We employ the 3D transformer architecture of [45], initialized from scratch. The first self-training cycle is trained for 600 epochs with a batch size of 8 until convergence, which takes $\\approx 3$ days on a single NVIDIA RTX A6000 GPU. Further self-training cycles are all initialized from the previous state and finetuned for an additional 50 epochs in $\\approx 4$ hours and for a total of 4 training cycles to produce the final set of instance predictions $S$ . For the Hungarian assignment, we take the original weighted combination of dice and binary cross-entropy losses and only apply the DropLoss condition in the backpropagation phase.",
|
| 536 |
+
"bbox": [
|
| 537 |
+
75,
|
| 538 |
+
251,
|
| 539 |
+
470,
|
| 540 |
+
417
|
| 541 |
+
],
|
| 542 |
+
"page_idx": 4
|
| 543 |
+
},
|
| 544 |
+
{
|
| 545 |
+
"type": "text",
|
| 546 |
+
"text": "4. Experiments",
|
| 547 |
+
"text_level": 1,
|
| 548 |
+
"bbox": [
|
| 549 |
+
76,
|
| 550 |
+
431,
|
| 551 |
+
209,
|
| 552 |
+
450
|
| 553 |
+
],
|
| 554 |
+
"page_idx": 4
|
| 555 |
+
},
|
| 556 |
+
{
|
| 557 |
+
"type": "text",
|
| 558 |
+
"text": "We demonstrate the effectiveness of UnScene3D for unsupervised class-agnostic 3D instance segmentation on challenging real-world 3D scan datasets containing a large diversity of objects and significant clutter. We train our method and all learned baselines on ScanNet [10], using the official train split. Note that no semantic annotation data is used for training, only the RGB-D reconstructions. Additionally, we show that our approach trained on ScanNet data can effectively transfer to class-agnostic 3D instance segmentation on ARKitScenes [2] data.",
|
| 559 |
+
"bbox": [
|
| 560 |
+
75,
|
| 561 |
+
457,
|
| 562 |
+
468,
|
| 563 |
+
609
|
| 564 |
+
],
|
| 565 |
+
"page_idx": 4
|
| 566 |
+
},
|
| 567 |
+
{
|
| 568 |
+
"type": "text",
|
| 569 |
+
"text": "Datasets. We train and evaluate UnScene3D on RGB-D scan data from ScanNet [10], using the official train split. We use the raw RGB images, and registered camera poses for training our approach, while the semantic annotations are used only for evaluation. We use the official ScanNet train split for both the pre-trained 3D features from [19] and our self-training iterations. We additionally evaluate our method on ARKitScenes [2], on an 884/120 train/test split of indoor LIDAR scans. For ARKitScenes, we use 3D pre-trained features from ScanNet, followed by pseudo mask generation and self-training on the ARKitScenes train scenes. We convert the LIDAR scan data to meshes with Poisson Surface Reconstruction [26, 27] prior to our graph coarsening. Note that all baselines using learned features are trained on the same ScanNet data as ours.",
|
| 570 |
+
"bbox": [
|
| 571 |
+
75,
|
| 572 |
+
618,
|
| 573 |
+
468,
|
| 574 |
+
845
|
| 575 |
+
],
|
| 576 |
+
"page_idx": 4
|
| 577 |
+
},
|
| 578 |
+
{
|
| 579 |
+
"type": "text",
|
| 580 |
+
"text": "Evaluation metrics. We evaluate class-agnostic 3D instance segmentation performance with the widely-used Average Precision score on the full-resolution mesh vertices.",
|
| 581 |
+
"bbox": [
|
| 582 |
+
76,
|
| 583 |
+
854,
|
| 584 |
+
468,
|
| 585 |
+
902
|
| 586 |
+
],
|
| 587 |
+
"page_idx": 4
|
| 588 |
+
},
|
| 589 |
+
{
|
| 590 |
+
"type": "table",
|
| 591 |
+
"img_path": "images/f4e7621534b76f02708abf058378e58229df39b24bb0f65697aedf3d6c5c8e9c.jpg",
|
| 592 |
+
"table_caption": [],
|
| 593 |
+
"table_footnote": [],
|
| 594 |
+
"table_body": "<table><tr><td>ScanNet</td><td>AP@25</td><td>AP@50</td><td>AP</td></tr><tr><td>HDBSCAN [35]</td><td>32.1</td><td>5.5</td><td>1.6</td></tr><tr><td>Nunes et al. [38]</td><td>30.5</td><td>7.3</td><td>2.3</td></tr><tr><td>Felzenswalb [15]</td><td>38.9</td><td>12.7</td><td>5.0</td></tr><tr><td>CutLER Projection [56]</td><td>7.0</td><td>0.2</td><td>0.3</td></tr><tr><td>Ours</td><td>58.5</td><td>32.2</td><td>15.9</td></tr></table>",
|
| 595 |
+
"bbox": [
|
| 596 |
+
511,
|
| 597 |
+
88,
|
| 598 |
+
883,
|
| 599 |
+
196
|
| 600 |
+
],
|
| 601 |
+
"page_idx": 4
|
| 602 |
+
},
|
| 603 |
+
{
|
| 604 |
+
"type": "text",
|
| 605 |
+
"text": "Table 1. Unsupervised class-agnostic 3D instance segmentation on ScanNet [10]. Our approach improves significantly over baselines (3x improvement in AP) due to our pseudo mask generation and self-training strategy.",
|
| 606 |
+
"bbox": [
|
| 607 |
+
498,
|
| 608 |
+
205,
|
| 609 |
+
893,
|
| 610 |
+
262
|
| 611 |
+
],
|
| 612 |
+
"page_idx": 4
|
| 613 |
+
},
|
| 614 |
+
{
|
| 615 |
+
"type": "text",
|
| 616 |
+
"text": "Following the strategy of the supervised benchmark [10] we report scores at IoU scores of $25\\%$ and $50\\%$ (AP@25, AP@50) and averaged over all overlaps between $[50\\%$ and $95\\%]$ at $5\\%$ steps (AP). Note that since predictions are class agnostic, all methods evaluate only instance mask AP values without considering any semantic class labels. For ScanNet, we evaluate against ground truth instance masks from the established 20-class benchmark. Since ARK-itScenes does not contain any ground truth instance mask annotations, we evaluate all methods qualitatively.",
|
| 617 |
+
"bbox": [
|
| 618 |
+
498,
|
| 619 |
+
273,
|
| 620 |
+
892,
|
| 621 |
+
428
|
| 622 |
+
],
|
| 623 |
+
"page_idx": 4
|
| 624 |
+
},
|
| 625 |
+
{
|
| 626 |
+
"type": "text",
|
| 627 |
+
"text": "Comparison to the state of the art. We evaluate our approach in comparison to state-of-the-art traditional clustering methods HDBSCAN [35] and Felzenszwalb's algorithm [15], in addition to the unsupervised approach of Nunes et. al. [38] leveraging learned feature clustering and refinement. All baselines are provided with input mesh vertices, colors, and normals, while our approach and Nunes et. al. also operate on sparse voxel scene representations. Table 1 and Figure 3 show comparisons on ScanNet data; our UnScene3D approach improves significantly over state of the art by effectively leveraging signal from self-supervised 3D features to guide our model through self-training. Note that since Nunes et. al. has been designed for outdoor applications, even while leveraging ScanNet-trained features, it uses ground removal and relies on physical object separation, making segmentation difficult in cluttered scenes.",
|
| 628 |
+
"bbox": [
|
| 629 |
+
496,
|
| 630 |
+
436,
|
| 631 |
+
892,
|
| 632 |
+
678
|
| 633 |
+
],
|
| 634 |
+
"page_idx": 4
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"type": "text",
|
| 638 |
+
"text": "Additionally, we demonstrate the importance of reasoning in 3D, and compare with a state-of-the-art unsupervised 2D instance segmentation approach CutLER [56] run on the RGB frames of the scans, and projected to 3D using the corresponding camera poses. Here, the difficulty lies in resolving view inconsistencies, occlusions, and lack of knowledge of geometric structure resulting in poor 3D segmentation performance despite plausible 2D proposals.",
|
| 639 |
+
"bbox": [
|
| 640 |
+
496,
|
| 641 |
+
678,
|
| 642 |
+
890,
|
| 643 |
+
800
|
| 644 |
+
],
|
| 645 |
+
"page_idx": 4
|
| 646 |
+
},
|
| 647 |
+
{
|
| 648 |
+
"type": "text",
|
| 649 |
+
"text": "Evaluation on other datasets We quantitatively evaluate UnScene3D on the Area_5 of the S3DIS dataset [1] using only 3D features pretrained on [10]. Comparison with 3D-only state-of-the-art can be seen in Table 2.",
|
| 650 |
+
"bbox": [
|
| 651 |
+
496,
|
| 652 |
+
809,
|
| 653 |
+
890,
|
| 654 |
+
869
|
| 655 |
+
],
|
| 656 |
+
"page_idx": 4
|
| 657 |
+
},
|
| 658 |
+
{
|
| 659 |
+
"type": "text",
|
| 660 |
+
"text": "We additionally compare with state of the art on ARK-ittScenes [2] data in Figure 7. Here we show only qualitative",
|
| 661 |
+
"bbox": [
|
| 662 |
+
498,
|
| 663 |
+
869,
|
| 664 |
+
890,
|
| 665 |
+
901
|
| 666 |
+
],
|
| 667 |
+
"page_idx": 4
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"type": "image",
|
| 671 |
+
"img_path": "images/fce23a501b6d2af1b0910aa0245a424d9b8c1bfe20f44a4715ac76e679df88d1.jpg",
|
| 672 |
+
"image_caption": [
|
| 673 |
+
"Figure 3. Qualitative comparison on ScanNet [10] scenes with projected predictions from the 2D method CutLER [56], traditional clustering-based methods Felzenszwalb [15] and HDBSCAN [35], and the GraphCut-based cluster refinement method [38]. Our approach leverages strong pseudo mask prediction and a self-training strategy to produce cleaner, more accurate instance segmentation."
|
| 674 |
+
],
|
| 675 |
+
"image_footnote": [],
|
| 676 |
+
"bbox": [
|
| 677 |
+
83,
|
| 678 |
+
89,
|
| 679 |
+
883,
|
| 680 |
+
797
|
| 681 |
+
],
|
| 682 |
+
"page_idx": 5
|
| 683 |
+
},
|
| 684 |
+
{
|
| 685 |
+
"type": "table",
|
| 686 |
+
"img_path": "images/63174b1408d61287e59c90f6d58ca7f22d999920827bad5fa558876e9bd17f87.jpg",
|
| 687 |
+
"table_caption": [],
|
| 688 |
+
"table_footnote": [],
|
| 689 |
+
"table_body": "<table><tr><td>S3DIS</td><td>AP@25</td><td>AP@50</td><td>AP</td></tr><tr><td>HDBSCAN [35]</td><td>27.9</td><td>11.2</td><td>5.0</td></tr><tr><td>Felzenswalb [15]</td><td>23.5</td><td>10.7</td><td>5.0</td></tr><tr><td>Nunes et al. [38]</td><td>20.1</td><td>10.5</td><td>5.5</td></tr><tr><td>Ours</td><td>52.6</td><td>40.3</td><td>21.4</td></tr></table>",
|
| 690 |
+
"bbox": [
|
| 691 |
+
109,
|
| 692 |
+
88,
|
| 693 |
+
436,
|
| 694 |
+
179
|
| 695 |
+
],
|
| 696 |
+
"page_idx": 6
|
| 697 |
+
},
|
| 698 |
+
{
|
| 699 |
+
"type": "text",
|
| 700 |
+
"text": "results due to the absence of ground truth instance mask annotations. UnScene3D effectively produces cleaner, more accurate segmentations in these complex environments.",
|
| 701 |
+
"bbox": [
|
| 702 |
+
75,
|
| 703 |
+
250,
|
| 704 |
+
468,
|
| 705 |
+
295
|
| 706 |
+
],
|
| 707 |
+
"page_idx": 6
|
| 708 |
+
},
|
| 709 |
+
{
|
| 710 |
+
"type": "text",
|
| 711 |
+
"text": "UnScene3D as data-efficient pretraining UnScene3D is able to learn powerful object properties and dense segmentation even in a fully unsupervised fashion. We demonstrate the potential of our strong learned features for downstream 3D instance segmentation with limited annotated data. We follow the setup introduced by CSC [19] with limited reconstructions available for downstream fine-tuning. We show our method as a strong pretraining strategy in Figure 4, notably outperforming both training from scratch as well as the state-of-the-art 3D pretraining of CSC. For more details we refer to our supplementary material.",
|
| 712 |
+
"bbox": [
|
| 713 |
+
75,
|
| 714 |
+
305,
|
| 715 |
+
468,
|
| 716 |
+
472
|
| 717 |
+
],
|
| 718 |
+
"page_idx": 6
|
| 719 |
+
},
|
| 720 |
+
{
|
| 721 |
+
"type": "image",
|
| 722 |
+
"img_path": "images/041b8b28de2959157fa4e0d042c7bb6bf10b57d18941d61ed9df8152ed393e9a.jpg",
|
| 723 |
+
"image_caption": [
|
| 724 |
+
"Figure 4. Our unsupervised self-training produces strong 3D features that can serve as a powerful pretraining strategy for 3D instance segmentation in limited data scenarios. UnScene3D significantly outperforms state-of-the-art self-supervised 3D pretraining [19] on ScanNet instance segmentation."
|
| 725 |
+
],
|
| 726 |
+
"image_footnote": [],
|
| 727 |
+
"bbox": [
|
| 728 |
+
81,
|
| 729 |
+
486,
|
| 730 |
+
467,
|
| 731 |
+
659
|
| 732 |
+
],
|
| 733 |
+
"page_idx": 6
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"type": "text",
|
| 737 |
+
"text": "What is the effect of multi-modal signal for pseudo mask generation? We evaluate the effect self-supervised color and geometry signals for generating pseudo annotations in Table 3. We consider using only self-supervised geometric features (3D), only self-supervised color features (2D) that are projected to the 3D scans, and both together (both). We find that the color and geometry provide complementary signals. We also note that color features are only used for the initial pseudo mask generation, during self-training iterations and test time only 3D features were used.",
|
| 738 |
+
"bbox": [
|
| 739 |
+
75,
|
| 740 |
+
757,
|
| 741 |
+
468,
|
| 742 |
+
909
|
| 743 |
+
],
|
| 744 |
+
"page_idx": 6
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "table",
|
| 748 |
+
"img_path": "images/79f36740ae84de5069dac8e790889567623243a92608a3187341593144eeaa5a.jpg",
|
| 749 |
+
"table_caption": [
|
| 750 |
+
"Table 2. Evaluation on S3DIS dataset (Area_5). UnScene3D is able to adapt to other datasets as well and shows a significant improvement over previous SOTA methods."
|
| 751 |
+
],
|
| 752 |
+
"table_footnote": [],
|
| 753 |
+
"table_body": "<table><tr><td></td><td>Modality</td><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP Final</td></tr><tr><td>FreeMask</td><td>3D</td><td>14.4</td><td>3.6</td><td>1.3</td><td>2.0</td></tr><tr><td>Ours</td><td>3D</td><td>45.4</td><td>16.7</td><td>9.2</td><td>13.3</td></tr><tr><td>FreeMask</td><td>2D</td><td>31.1</td><td>15.1</td><td>6.8</td><td>13.8</td></tr><tr><td>Ours</td><td>2D</td><td>51.3</td><td>21.8</td><td>9.4</td><td>15.7</td></tr><tr><td>FreeMask</td><td>both</td><td>23.7</td><td>10.1</td><td>5.7</td><td>12.1</td></tr><tr><td>Ours</td><td>both</td><td>52.9</td><td>23.2</td><td>10.4</td><td>15.9</td></tr></table>",
|
| 754 |
+
"bbox": [
|
| 755 |
+
501,
|
| 756 |
+
88,
|
| 757 |
+
906,
|
| 758 |
+
214
|
| 759 |
+
],
|
| 760 |
+
"page_idx": 6
|
| 761 |
+
},
|
| 762 |
+
{
|
| 763 |
+
"type": "text",
|
| 764 |
+
"text": "Table 3. We compare pseudo mask generation from 3D-only features (3D), color-only features (2D), and both color and geometry (both) signal, as well as with pseudo annotation generation algorithm FreeMask [55]. In this table we report method performances after a single iteration of self-training initialized from the different pseudo annotation methods and the final AP scores after 4 self-training iterations.",
|
| 765 |
+
"bbox": [
|
| 766 |
+
496,
|
| 767 |
+
224,
|
| 768 |
+
890,
|
| 769 |
+
321
|
| 770 |
+
],
|
| 771 |
+
"page_idx": 6
|
| 772 |
+
},
|
| 773 |
+
{
|
| 774 |
+
"type": "image",
|
| 775 |
+
"img_path": "images/210c437a909f49c4d872e82d0e097455d194b26b80aacc21ada892c836715ad2.jpg",
|
| 776 |
+
"image_caption": [
|
| 777 |
+
"Figure 5. Initial pseudo masks generated by UnScene3D in comparison with a 3D-lifted FreeMask [55]. FreeMask tends to produce a larger set of noisier pseudo masks, while we rely on a cleaner but sparser set for our self-training."
|
| 778 |
+
],
|
| 779 |
+
"image_footnote": [],
|
| 780 |
+
"bbox": [
|
| 781 |
+
506,
|
| 782 |
+
330,
|
| 783 |
+
885,
|
| 784 |
+
491
|
| 785 |
+
],
|
| 786 |
+
"page_idx": 6
|
| 787 |
+
},
|
| 788 |
+
{
|
| 789 |
+
"type": "text",
|
| 790 |
+
"text": "What is the effect of pseudo annotations? We also evaluate the effect of our pseudo mask generation in Table 3 and Figure 5, in comparison to the 3D adaptation of the FreeMask [55] approach operating on our geometric segments. FreeMask tends to estimate a larger but noisier set of initial pseudo masks, while our approach is focusing on a sparser set of more reliable pseudo masks and produces significantly better performance. The strong difference in performance can be explained by the nature of the samples. While a sparser set of examples can be extended with multiple iterations of self-training, noisy samples will propagate through the full pipeline, and thus directly degrade the final performance. Further details of our adaptations of the FreeMask 3D method can be found in our supplemental.",
|
| 791 |
+
"bbox": [
|
| 792 |
+
496,
|
| 793 |
+
585,
|
| 794 |
+
890,
|
| 795 |
+
799
|
| 796 |
+
],
|
| 797 |
+
"page_idx": 6
|
| 798 |
+
},
|
| 799 |
+
{
|
| 800 |
+
"type": "text",
|
| 801 |
+
"text": "What is the impact of self-training? We observe that while self-training iterations are always improving the qualitative performance, their effective added information value is saturating after a limited number of cycles. We report on Table 4 through the first 4 steps, and observe a significant relative improvement in both modalities.",
|
| 802 |
+
"bbox": [
|
| 803 |
+
496,
|
| 804 |
+
809,
|
| 805 |
+
890,
|
| 806 |
+
900
|
| 807 |
+
],
|
| 808 |
+
"page_idx": 6
|
| 809 |
+
},
|
| 810 |
+
{
|
| 811 |
+
"type": "image",
|
| 812 |
+
"img_path": "images/ee5c3e636ca1d38ce0e3c8f47728a6920599bcd840403367d8846add84b8af08.jpg",
|
| 813 |
+
"image_caption": [
|
| 814 |
+
"Figure 6. UnScene3D employs self-training to refine the initial sparse set of proposals. We can see consistent improvement over both the number of predicted instances and the quality of the instance masks. Here we show results using the pseudo annotations obtained from both modalities."
|
| 815 |
+
],
|
| 816 |
+
"image_footnote": [],
|
| 817 |
+
"bbox": [
|
| 818 |
+
84,
|
| 819 |
+
89,
|
| 820 |
+
467,
|
| 821 |
+
531
|
| 822 |
+
],
|
| 823 |
+
"page_idx": 7
|
| 824 |
+
},
|
| 825 |
+
{
|
| 826 |
+
"type": "table",
|
| 827 |
+
"img_path": "images/e15ecb57c25d74a8c03ef457b9c0848ca0fe6df1ca0386821af2362ba56fc5a6.jpg",
|
| 828 |
+
"table_caption": [],
|
| 829 |
+
"table_footnote": [],
|
| 830 |
+
"table_body": "<table><tr><td rowspan=\"2\"></td><td colspan=\"3\">3D Only</td><td colspan=\"3\">3D & 2D</td></tr><tr><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP@25</td><td>AP@50</td><td>AP</td></tr><tr><td>\\( S^0 \\)pseudo masks</td><td>13.8</td><td>4.7</td><td>2</td><td>19.9</td><td>10.0</td><td>5.9</td></tr><tr><td>\\( 1^{st} Self-train \\)</td><td>45.4</td><td>16.7</td><td>9.2</td><td>52.9</td><td>23.2</td><td>10.4</td></tr><tr><td>\\( 2^{nd} Self-train \\)</td><td>50.0</td><td>24.1</td><td>12.0</td><td>56.5</td><td>29.8</td><td>15.0</td></tr><tr><td>\\( 3^{rd} Self-train \\)</td><td>52.2</td><td>25.8</td><td>12.8</td><td>58.8</td><td>31.9</td><td>15.9</td></tr><tr><td>\\( 4^{st} Self-train \\)</td><td>52.7</td><td>26.2</td><td>13.3</td><td>58.5</td><td>32.2</td><td>15.9</td></tr></table>",
|
| 831 |
+
"bbox": [
|
| 832 |
+
78,
|
| 833 |
+
616,
|
| 834 |
+
467,
|
| 835 |
+
713
|
| 836 |
+
],
|
| 837 |
+
"page_idx": 7
|
| 838 |
+
},
|
| 839 |
+
{
|
| 840 |
+
"type": "text",
|
| 841 |
+
"text": "Table 4. Multiple iterations of self-training significantly improve performance, saturating around 4 iterations.",
|
| 842 |
+
"bbox": [
|
| 843 |
+
75,
|
| 844 |
+
713,
|
| 845 |
+
468,
|
| 846 |
+
742
|
| 847 |
+
],
|
| 848 |
+
"page_idx": 7
|
| 849 |
+
},
|
| 850 |
+
{
|
| 851 |
+
"type": "text",
|
| 852 |
+
"text": "Limitations While UnScene3D offers a promising step towards unsupervised 3D instance segmentation, various limitations remain. We rely on a mesh representation for graph coarsening, but believe this could be extended to alternative representations through neighborhood reasoning. Additionally, our graph coarsening step may cause very small objects (e.g., pens, cell phones) to be missed in the pseudo annotation generation. Finally, employing a fixed set of pseudo masks from the initial stage that are used",
|
| 853 |
+
"bbox": [
|
| 854 |
+
75,
|
| 855 |
+
763,
|
| 856 |
+
470,
|
| 857 |
+
902
|
| 858 |
+
],
|
| 859 |
+
"page_idx": 7
|
| 860 |
+
},
|
| 861 |
+
{
|
| 862 |
+
"type": "image",
|
| 863 |
+
"img_path": "images/5fc9aab24e00873834188223d999fc378d13c6e4d1a168454ea7d0c1a41742b9.jpg",
|
| 864 |
+
"image_caption": [
|
| 865 |
+
"Figure 7. As UnScene3D does not require any human annotation, so we can also train and test our method on the ARKitScenes [2] dataset. We leverages 3D features followed by a series of self-training iterations for cleaner, more accurate instance segmentation. Qualitative results show consistently better results than our baselines."
|
| 866 |
+
],
|
| 867 |
+
"image_footnote": [],
|
| 868 |
+
"bbox": [
|
| 869 |
+
504,
|
| 870 |
+
90,
|
| 871 |
+
890,
|
| 872 |
+
415
|
| 873 |
+
],
|
| 874 |
+
"page_idx": 7
|
| 875 |
+
},
|
| 876 |
+
{
|
| 877 |
+
"type": "text",
|
| 878 |
+
"text": "through self-training could reinforce noisy predictions.",
|
| 879 |
+
"bbox": [
|
| 880 |
+
500,
|
| 881 |
+
526,
|
| 882 |
+
862,
|
| 883 |
+
542
|
| 884 |
+
],
|
| 885 |
+
"page_idx": 7
|
| 886 |
+
},
|
| 887 |
+
{
|
| 888 |
+
"type": "text",
|
| 889 |
+
"text": "5. Conclusion",
|
| 890 |
+
"text_level": 1,
|
| 891 |
+
"bbox": [
|
| 892 |
+
500,
|
| 893 |
+
559,
|
| 894 |
+
619,
|
| 895 |
+
574
|
| 896 |
+
],
|
| 897 |
+
"page_idx": 7
|
| 898 |
+
},
|
| 899 |
+
{
|
| 900 |
+
"type": "text",
|
| 901 |
+
"text": "We introduced UnScene3D, a novel approach towards achieving fully-unsupervised 3D instance segmentation in cluttered indoor scenes. Our approach effectively combined low-level geometric properties to regularize multi-modal self-supervised deep features for initial pseudo mask extraction, and our self-training notably improved performance by refining these proposals to a more complete, dense set of instances. As 3D instance segmentation is a crucial aspect of 3D scene understanding, UnScene3D's ability to achieve this without requiring any manual annotations opens up new possibilities for 3D semantic understanding.",
|
| 902 |
+
"bbox": [
|
| 903 |
+
496,
|
| 904 |
+
585,
|
| 905 |
+
890,
|
| 906 |
+
752
|
| 907 |
+
],
|
| 908 |
+
"page_idx": 7
|
| 909 |
+
},
|
| 910 |
+
{
|
| 911 |
+
"type": "text",
|
| 912 |
+
"text": "6. Acknowledgements",
|
| 913 |
+
"text_level": 1,
|
| 914 |
+
"bbox": [
|
| 915 |
+
500,
|
| 916 |
+
768,
|
| 917 |
+
687,
|
| 918 |
+
786
|
| 919 |
+
],
|
| 920 |
+
"page_idx": 7
|
| 921 |
+
},
|
| 922 |
+
{
|
| 923 |
+
"type": "text",
|
| 924 |
+
"text": "This project is funded by the Bavarian State Ministry of Science and the Arts and coordinated by the Bavarian Research Institute for Digital Transformation (bidt), the ERC Starting Grant SpatialSem (101076253), and supported in part by a Google research gift. Or Litany is a Taub fellow and is supported by the Azrieli Foundation Early Career Faculty Fellowship.",
|
| 925 |
+
"bbox": [
|
| 926 |
+
496,
|
| 927 |
+
794,
|
| 928 |
+
890,
|
| 929 |
+
902
|
| 930 |
+
],
|
| 931 |
+
"page_idx": 7
|
| 932 |
+
},
|
| 933 |
+
{
|
| 934 |
+
"type": "text",
|
| 935 |
+
"text": "References",
|
| 936 |
+
"text_level": 1,
|
| 937 |
+
"bbox": [
|
| 938 |
+
78,
|
| 939 |
+
89,
|
| 940 |
+
174,
|
| 941 |
+
106
|
| 942 |
+
],
|
| 943 |
+
"page_idx": 8
|
| 944 |
+
},
|
| 945 |
+
{
|
| 946 |
+
"type": "list",
|
| 947 |
+
"sub_type": "ref_text",
|
| 948 |
+
"list_items": [
|
| 949 |
+
"[1] Iro Armeni, Ozan Sener, Amir R. Zamir, Helen Jiang, Ioannis Brilakis, Martin Fischer, and Silvio Savarese. 3d semantic parsing of large-scale indoor spaces. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1534-1543, 2016. 2, 5",
|
| 950 |
+
"[2] Gilad Baruch, Zhuoyuan Chen, Afshin Dehghan, Tal Dimry, Yuri Feigin, Peter Fu, Thomas Gebauer, Brandon Joffe, Daniel Kurz, Arik Schwartz, and Elad Shulman. ARK-scenes - a diverse real-world dataset for 3d indoor scene understanding using mobile RGB-d data. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1), 2021. 2, 5, 8, 12, 13",
|
| 951 |
+
"[3] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2, 3, 16",
|
| 952 |
+
"[4] Kang Chen, Yu-Kun Lai, Yu-Xin Wu, Ralph Martin, and ShiMin Hu. Automatic semantic modeling of indoor scenes from low-quality rgb-d data using contextual information. ACM Transactions on Graphics, 33(6), 2014. 2",
|
| 953 |
+
"[5] Shaoyu Chen, Jiemin Fang, Qian Zhang, Wenyu Liu, and Xinggang Wang. Hierarchical aggregation for 3d instance segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15467-15476, 2021. 1",
|
| 954 |
+
"[6] Julian Chibane, Francis Engelmann, Tuan Anh Tran, and Gerard Pons-Moll. Box2mask: Weakly supervised 3d semantic instance segmentation using bounding boxes. In European Conference on Computer Vision (ECCV). Springer, 2022. 2",
|
| 955 |
+
"[7] Sunil Chopra and M. R. Rao. The partition problem. Mathematical Programming, 59:87-115, 1993. 2",
|
| 956 |
+
"[8] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3075-3084, 2019. 2, 5",
|
| 957 |
+
"[9] Angela Dai and Matthias Nießner. 3dmv: Joint 3d-multiview prediction for 3d semantic scene segmentation. In European Conference on Computer Vision, 2018. 12",
|
| 958 |
+
"[10] Angela Dai, Angel X. Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proc. Computer Vision and Pattern Recognition (CVPR), IEEE, 2017. 2, 5, 6, 12, 14",
|
| 959 |
+
"[11] Michel Deza and Monique Laurent. Geometry of cuts and metrics. In Algorithms and Combinatorics, 2009. 2",
|
| 960 |
+
"[12] Runyu Ding, Jihan Yang, Chuhui Xue, Wenqing Zhang, Song Bai, and Xiaojuan Qi. Pla: Language-driven open-vocabulary 3d scene understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023. 2",
|
| 961 |
+
"[13] Martin Ester, Hans-Peter Kriegel, Jörg Sander, and Xiaowei Xu. A density-based algorithm for discovering clusters in"
|
| 962 |
+
],
|
| 963 |
+
"bbox": [
|
| 964 |
+
78,
|
| 965 |
+
114,
|
| 966 |
+
470,
|
| 967 |
+
901
|
| 968 |
+
],
|
| 969 |
+
"page_idx": 8
|
| 970 |
+
},
|
| 971 |
+
{
|
| 972 |
+
"type": "list",
|
| 973 |
+
"sub_type": "ref_text",
|
| 974 |
+
"list_items": [
|
| 975 |
+
"large spatial databases with noise. In Proceedings of the Second International Conference on Knowledge Discovery and Data Mining, page 226-231. AAAI Press, 1996. 2",
|
| 976 |
+
"[14] Siqi Fan, Qiulei Dong, Fenghua Zhu, Yisheng Lv, Peijun Ye, and Fei-Yue Wang. Scf-net: Learning spatial contextual features for large-scale point cloud segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14504-14513, 2021. 1, 2",
|
| 977 |
+
"[15] Pedro F Felzenszwalb and Daniel P Huttenlocher. Efficient graph-based image segmentation. International journal of computer vision, 59:167-181, 2004. 2, 3, 5, 6, 7",
|
| 978 |
+
"[16] Benjamin Graham, Martin Engelcke, and Laurens van der Maaten. 3d semantic segmentation with submanifold sparse convolutional networks. CVPR, 2018. 2",
|
| 979 |
+
"[17] Lei Han, Tian Zheng, Lan Xu, and Lu Fang. Occuseg: Occupancy-aware 3d instance segmentation. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2937-2946, 2020. 1",
|
| 980 |
+
"[18] Ji Hou, Angela Dai, and Matthias Nießner. 3d-sis: 3d semantic instance segmentation of rgb-d scans. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4421-4430, 2019. 1, 2, 12",
|
| 981 |
+
"[19] Ji Hou, Benjamin Graham, Matthias Nießner, and Saining Xie. Exploring data-efficient 3d scene understanding with contrastive scene contexts. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15587-15597, 2021. 1, 2, 3, 5, 7, 12, 15, 16",
|
| 982 |
+
"[20] Ji Hou, Xiaoliang Dai, Zijian He, Angela Dai, and Matthias Nießner. Mask3d: Pre-training 2d vision transformers by learning masked 3d priors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13510-13519, 2023. 2, 12",
|
| 983 |
+
"[21] Qingyong Hu, Bo Yang, Linhai Xie, Stefano Rosa, Yulan Guo, Zhihua Wang, Niki Trigoni, and Andrew Markham. Randla-net: Efficient semantic segmentation of large-scale point clouds. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11108-11117, 2020. 1",
|
| 984 |
+
"[22] Le Hui, Linghua Tang, Yaqi Shen, Jin Xie, and Jian Yang. Learning superpoint graph cut for 3d instance segmentation. In NeurIPS, 2022. 1",
|
| 985 |
+
"[23] Maximilian Jaritz, Jiayuan Gu, and Hao Su. Multi-view pointnet for 3d scene understanding. In ICCV Workshop 2019, 2019. 12",
|
| 986 |
+
"[24] Krishna Murthy Jatavallabhula, Alihusein Kuwajerwala, Qiao Gu, Mohd Omama, Tao Chen, Shuang Li, Ganesh Iyer, Soroush Saryazdi, Nikhil Keetha, Ayush Tewari, Joshua B. Tenenbaum, Celso Miguel de Melo, Madhava Krishna, Liam Paull, Florian Shkurti, and Antonio Torralba. Conceptfusion: Open-set multimodal 3d mapping. arXiv, 2023. 2",
|
| 987 |
+
"[25] Andrej Karpathy, Stephen Miller, and Li Fei-Fei. Object discovery in 3d scenes via shape analysis. In 2013 IEEE international conference on robotics and automation, pages 2088–2095. IEEE, 2013. 2",
|
| 988 |
+
"[26] Michael Kazhdan and Hugues Hoppe. Screened poisson surface reconstruction. ACM Transactions on Graphics (ToG), 32(3):1-13, 2013. 5"
|
| 989 |
+
],
|
| 990 |
+
"bbox": [
|
| 991 |
+
503,
|
| 992 |
+
92,
|
| 993 |
+
893,
|
| 994 |
+
900
|
| 995 |
+
],
|
| 996 |
+
"page_idx": 8
|
| 997 |
+
},
|
| 998 |
+
{
|
| 999 |
+
"type": "list",
|
| 1000 |
+
"sub_type": "ref_text",
|
| 1001 |
+
"list_items": [
|
| 1002 |
+
"[27] Michael Kazhdan, Matthew Bolitho, and Hugues Hoppe. Poisson surface reconstruction. In Proceedings of the fourth Eurographics symposium on Geometry processing, page 0, 2006. 5",
|
| 1003 |
+
"[28] Young Min Kim, Niloy J Mitra, Dong-Ming Yan, and Leonidas Guibas. Acquiring 3d indoor environments with variability and repetition. ACM Transactions on Graphics (TOG), 31(6):1-11, 2012. 2",
|
| 1004 |
+
"[29] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 15",
|
| 1005 |
+
"[30] Maksim Kolodiazhnyi, Danila Rukhovich, Anna Vorontsova, and Anton Konushin. Top-down beats bottom-up in 3d instance segmentation, 2023. 1",
|
| 1006 |
+
"[31] Yangyan Li, Angela Dai, Leonidas Guibas, and Matthias Nießner. Database-assisted object retrieval for real-time 3d reconstruction. In Computer graphics forum, pages 435-446. Wiley Online Library, 2015. 2",
|
| 1007 |
+
"[32] Zhihao Liang, Zhihao Li, Songcen Xu, Mingkui Tan, and Kui Jia. Instance segmentation in 3d scenes using semantic superpoint tree networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2783-2792, 2021. 1",
|
| 1008 |
+
"[33] Minghua Liu, Yinhao Zhu, H. Cai, Shizhong Han, Z. Ling, Fatih Murat Porikli, and Hao Su. Partslip: Low-shot part segmentation for 3d point clouds via pretrained image-language models. 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 21736-21746, 2022. 2",
|
| 1009 |
+
"[34] Zhengzhe Liu, Xiaojuan Qi, and Chi-Wing Fu. One thing one click: A self-training approach for weakly supervised 3d semantic segmentation. 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1726-1736, 2021. 2",
|
| 1010 |
+
"[35] Leland McInnes and John Healy. Accelerated hierarchical density based clustering. In 2017 IEEE International Conference on Data Mining Workshops (ICDMW), pages 33-42. IEEE, 2017. 2, 5, 6, 7",
|
| 1011 |
+
"[36] Yoshikatsu Nakajima, Byeongkeun Kang, Hideo Saito, and Kris Kitani. Incremental class discovery for semantic segmentation with rgbd sensing. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 2",
|
| 1012 |
+
"[37] Liangliang Nan, Ke Xie, and Andrei Sharf. A search-classify approach for cluttered indoor scene understanding. ACM Trans. Graph., 31(6), 2012. 2",
|
| 1013 |
+
"[38] Lucas Nunes, Xieyuanli Chen, Rodrigo Marcuzzi, Aljosa Osep, Laura Leal-Taixe, Cyril Stachniss, and Jens Behley. Unsupervised class-agnostic instance segmentation of 3d lidar data for autonomous vehicles. IEEE Robotics and Automation Letters, 7(4):8713-8720, 2022. 2, 5, 6, 7",
|
| 1014 |
+
"[39] Lucas Nunes, Rodrigo Marcuzzi, Xieyuanli Chen, Jens Behley, and Cyril Stachniss. Segcontrast: 3d point cloud feature representation learning through self-supervised segment discrimination. IEEE Robotics and Automation Letters, 7(2):2116-2123, 2022. 2, 12"
|
| 1015 |
+
],
|
| 1016 |
+
"bbox": [
|
| 1017 |
+
78,
|
| 1018 |
+
90,
|
| 1019 |
+
468,
|
| 1020 |
+
898
|
| 1021 |
+
],
|
| 1022 |
+
"page_idx": 9
|
| 1023 |
+
},
|
| 1024 |
+
{
|
| 1025 |
+
"type": "list",
|
| 1026 |
+
"sub_type": "ref_text",
|
| 1027 |
+
"list_items": [
|
| 1028 |
+
"[40] Songyou Peng, Kyle Genova, Chiyu Jiang, Andrea Tagliasacchi, Marc Pollefeys, Thomas Funkhouser, et al. Openscene: 3d scene understanding with open vocabularies. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 815-824, 2023. 2",
|
| 1029 |
+
"[41] Yinyin Peng, Hui Feng, Tao Chen, and Bo Hu. Point cloud instance segmentation with inaccurate bounding-box annotations. Sensors (Basel, Switzerland), 23, 2023. 2",
|
| 1030 |
+
"[42] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 1, 2, 16",
|
| 1031 |
+
"[43] Dario Rethage, Johanna Wald, Jurgen Sturm, Nassir Navab, and Federico Tombari. Fully-convolutional point networks for large-scale point clouds. In Proceedings of the European Conference on Computer Vision (ECCV), pages 596-611, 2018.",
|
| 1032 |
+
"[44] David Rozenberszki, Or Litany, and Angela Dai. Language-grounded indoor 3d semantic segmentation in the wild. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 2",
|
| 1033 |
+
"[45] Jonas Schult, Francis Engelmann, Alexander Hermans, Or Litany, Siyu Tang, and Bastian Leibe. Mask3D for 3D Semantic Instance Segmentation. In International Conference on Robotics and Automation (ICRA), 2023. 1, 4, 5, 15",
|
| 1034 |
+
"[46] Nur Muhammad Mahi Shafiullah, Chris Paxton, Lerrel Pinto, Soumith Chintala, and Arthur Szlam. Clip-fields: Weakly supervised semantic fields for robotic memory. arXiv preprint arXiv: Arxiv-2210.05663, 2022. 2",
|
| 1035 |
+
"[47] Jianbo Shi and Jitendra Malik. Normalized cuts and image segmentation. IEEE Transactions on pattern analysis and machine intelligence, 22(8):888-905, 2000. 2, 3",
|
| 1036 |
+
"[48] Ziyang Song and Bo Yang. OGC: Unsupervised 3D Object Segmentation from Rigid Dynamics of Point Clouds. In NeurIPS, 2022. 2",
|
| 1037 |
+
"[49] Carole H Sudre, Wenqi Li, Tom Vercauteren, Sebastian Ourselin, and M Jorge Cardoso. Generalised dice overlap as a deep learning loss function for highly unbalanced segmentations. In Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support: Third International Workshop, DLMIA 2017, and 7th International Workshop, ML-CDS 2017, Held in Conjunction with MICCAI 2017, Quebec City, QC, Canada, September 14, Proceedings 3, pages 240-248. Springer, 2017. 12",
|
| 1038 |
+
"[50] Jiahao Sun, Chunmei Qing, Junpeng Tan, and Xiangmin Xu. Superpoint transformer for 3d scene instance segmentation. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 2393-2401, 2023. 1, 2",
|
| 1039 |
+
"[51] Thang Vu, Kookhoi Kim, Tung M Luu, Thanh Nguyen, Junyeong Kim, and Chang D Yoo. Softgroup++: Scalable 3d instance segmentation with octree pyramid grouping. arXiv preprint arXiv:2209.08263, 2022. 2",
|
| 1040 |
+
"[52] Thang Vu, Kookhoi Kim, Tung M. Luu, Xuan Thanh Nguyen, and Chang D. Yoo. Softgroup for 3d instance segmentation on 3d point clouds. In CVPR, 2022. 1",
|
| 1041 |
+
"[53] Puzuo Wang, Wei Yao, and Jie Shao. One class one click: Quasi scene-level weakly supervised point cloud semantic"
|
| 1042 |
+
],
|
| 1043 |
+
"bbox": [
|
| 1044 |
+
501,
|
| 1045 |
+
92,
|
| 1046 |
+
890,
|
| 1047 |
+
900
|
| 1048 |
+
],
|
| 1049 |
+
"page_idx": 9
|
| 1050 |
+
},
|
| 1051 |
+
{
|
| 1052 |
+
"type": "list",
|
| 1053 |
+
"sub_type": "ref_text",
|
| 1054 |
+
"list_items": [
|
| 1055 |
+
"segmentation with active learning. ISPRS Journal of Photogrammetry and Remote Sensing, 204:89-104, 2023. 2",
|
| 1056 |
+
"[54] Weiyue Wang, Ronald Yu, Qiangui Huang, and Ulrich Neumann. Sgpn: Similarity group proposal network for 3d point cloud instance segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2569-2578, 2018. 1",
|
| 1057 |
+
"[55] Xinlong Wang, Zhiding Yu, Shalini De Mello, Jan Kautz, Anima Anandkumar, Chunhua Shen, and Jose M Alvarez. Freesolo: Learning to segment objects without annotations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14176-14186, 2022. 2, 7, 12, 15, 16, 17",
|
| 1058 |
+
"[56] Xudong Wang, Rohit Girdhar, Stella X Yu, and Ishan Misra. Cut and learn for unsupervised object detection and instance segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3124-3134, 2023. 2, 5, 6, 12, 15",
|
| 1059 |
+
"[57] Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E Sarma, Michael M Bronstein, and Justin M Solomon. Dynamic graph cnn for learning on point clouds. Acm Transactions On Graphics (tog), 38(5):1-12, 2019. 1",
|
| 1060 |
+
"[58] Wenxuan Wu, Zhongang Qi, and Li Fuxin. Pointconv: Deep convolutional networks on 3d point clouds. In Proceedings of the IEEE/CVF Conference on computer vision and pattern recognition, pages 9621-9630, 2019. 1",
|
| 1061 |
+
"[59] Z. Wu and R. Leahy. An optimal graph theoretic approach to data clustering: theory and its application to image segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 15(11):1101-1113, 1993. 2",
|
| 1062 |
+
"[60] Saining Xie, Jiatao Gu, Demi Guo, Charles R Qi, Leonidas Guibas, and Or Litany. Pointcontrast: Unsupervised pretraining for 3d point cloud understanding. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part III 16, pages 574-591. Springer, 2020. 1, 2, 12",
|
| 1063 |
+
"[61] Yunhan Yang, Xiaoyang Wu, Tong He, Hengshuang Zhao, and Xihui Liu. Sam3d: Segment anything in 3d scenes. arXiv preprint arXiv:2306.03908, 2023. 15",
|
| 1064 |
+
"[62] Zaiwei Zhang, Rohit Girdhar, Armand Joulin, and Ishan Misra. Self-supervised pretraining of 3d features on any point-cloud. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10252-10263, 2021. 2, 12"
|
| 1065 |
+
],
|
| 1066 |
+
"bbox": [
|
| 1067 |
+
78,
|
| 1068 |
+
90,
|
| 1069 |
+
468,
|
| 1070 |
+
710
|
| 1071 |
+
],
|
| 1072 |
+
"page_idx": 10
|
| 1073 |
+
},
|
| 1074 |
+
{
|
| 1075 |
+
"type": "text",
|
| 1076 |
+
"text": "7. Appendix",
|
| 1077 |
+
"text_level": 1,
|
| 1078 |
+
"bbox": [
|
| 1079 |
+
76,
|
| 1080 |
+
89,
|
| 1081 |
+
184,
|
| 1082 |
+
107
|
| 1083 |
+
],
|
| 1084 |
+
"page_idx": 11
|
| 1085 |
+
},
|
| 1086 |
+
{
|
| 1087 |
+
"type": "text",
|
| 1088 |
+
"text": "7.1. UnScene3D as Data Efficient Pretraining",
|
| 1089 |
+
"text_level": 1,
|
| 1090 |
+
"bbox": [
|
| 1091 |
+
76,
|
| 1092 |
+
114,
|
| 1093 |
+
426,
|
| 1094 |
+
132
|
| 1095 |
+
],
|
| 1096 |
+
"page_idx": 11
|
| 1097 |
+
},
|
| 1098 |
+
{
|
| 1099 |
+
"type": "text",
|
| 1100 |
+
"text": "We report additional qualitative details on the data efficient pretraining performance of UnScene3D in Table 5.",
|
| 1101 |
+
"bbox": [
|
| 1102 |
+
76,
|
| 1103 |
+
140,
|
| 1104 |
+
468,
|
| 1105 |
+
170
|
| 1106 |
+
],
|
| 1107 |
+
"page_idx": 11
|
| 1108 |
+
},
|
| 1109 |
+
{
|
| 1110 |
+
"type": "text",
|
| 1111 |
+
"text": "We also note that the 3D contrastive pre-training of CSC, similar to other 3D pre-training methods developed for nontransformer backbones [19, 39, 60, 62], was not beneficial for a transformer-based model. A similar observation was also reported in a recent pretraining method [20]. We thus also compare with CSC pretraining on their original 3D backbone (which demonstrated improvement over training from scratch on the same backbone). Our approach can improves notably over both alternatives.",
|
| 1112 |
+
"bbox": [
|
| 1113 |
+
75,
|
| 1114 |
+
171,
|
| 1115 |
+
467,
|
| 1116 |
+
308
|
| 1117 |
+
],
|
| 1118 |
+
"page_idx": 11
|
| 1119 |
+
},
|
| 1120 |
+
{
|
| 1121 |
+
"type": "text",
|
| 1122 |
+
"text": "7.2. The effect of noise robust losses.",
|
| 1123 |
+
"text_level": 1,
|
| 1124 |
+
"bbox": [
|
| 1125 |
+
76,
|
| 1126 |
+
321,
|
| 1127 |
+
357,
|
| 1128 |
+
335
|
| 1129 |
+
],
|
| 1130 |
+
"page_idx": 11
|
| 1131 |
+
},
|
| 1132 |
+
{
|
| 1133 |
+
"type": "text",
|
| 1134 |
+
"text": "We adopt DropLoss [56] for our self-training cycles, which is robust to sparse data and missing annotations. In particular, we use a weighted combination of cross-entropy and Dice [49] losses for bipartite-matching with pseudo annotations. We then drop losses for backpropagation which do not have at least $\\tau_{drop}$ overlap with the annotations from the previous cycle. We evaluate the effect of different noise robust losses for self-training in Table 6. We compare our baseline losses with a 3D extension of the projection loss of [55], and our adaptation of DropLoss from [56]. Our approach does not penalize for missing pseudo masks, which enables more effective self-training to discover previously missed instances.",
|
| 1135 |
+
"bbox": [
|
| 1136 |
+
75,
|
| 1137 |
+
345,
|
| 1138 |
+
468,
|
| 1139 |
+
541
|
| 1140 |
+
],
|
| 1141 |
+
"page_idx": 11
|
| 1142 |
+
},
|
| 1143 |
+
{
|
| 1144 |
+
"type": "text",
|
| 1145 |
+
"text": "7.3. Additional Qualitative Results",
|
| 1146 |
+
"text_level": 1,
|
| 1147 |
+
"bbox": [
|
| 1148 |
+
76,
|
| 1149 |
+
556,
|
| 1150 |
+
346,
|
| 1151 |
+
571
|
| 1152 |
+
],
|
| 1153 |
+
"page_idx": 11
|
| 1154 |
+
},
|
| 1155 |
+
{
|
| 1156 |
+
"type": "text",
|
| 1157 |
+
"text": "We show more qualitative results from our method trained on ARKitScenes [2] in Figure 8 and on ScanNet [10] in Figure 9.",
|
| 1158 |
+
"bbox": [
|
| 1159 |
+
76,
|
| 1160 |
+
580,
|
| 1161 |
+
467,
|
| 1162 |
+
625
|
| 1163 |
+
],
|
| 1164 |
+
"page_idx": 11
|
| 1165 |
+
},
|
| 1166 |
+
{
|
| 1167 |
+
"type": "text",
|
| 1168 |
+
"text": "7.4. Pseudo Mask Generation Ablations",
|
| 1169 |
+
"text_level": 1,
|
| 1170 |
+
"bbox": [
|
| 1171 |
+
76,
|
| 1172 |
+
638,
|
| 1173 |
+
383,
|
| 1174 |
+
652
|
| 1175 |
+
],
|
| 1176 |
+
"page_idx": 11
|
| 1177 |
+
},
|
| 1178 |
+
{
|
| 1179 |
+
"type": "text",
|
| 1180 |
+
"text": "We also ablate the saliency threshold, oversegmentation parameters, and separation strategy in our pseudo mask generation. If not explicitly stated otherwise in Table 12, we use both 2D and 3D modality features for the pseudo mask generation.",
|
| 1181 |
+
"bbox": [
|
| 1182 |
+
76,
|
| 1183 |
+
662,
|
| 1184 |
+
467,
|
| 1185 |
+
738
|
| 1186 |
+
],
|
| 1187 |
+
"page_idx": 11
|
| 1188 |
+
},
|
| 1189 |
+
{
|
| 1190 |
+
"type": "text",
|
| 1191 |
+
"text": "What is the effect of the saliency threshold in pseudo mask generation? We threshold the saliency matrix $A$ with $\\tau_{cut} = 0.55$ for geometric-only features and $\\tau_{cut} = 0.65$ for combined modalities. Table 7 shows that our approach maintains robust performance across a large range of $\\tau_{cut}$ thresholds used to estimate salient areas for pseudo masks. In this table we report results using features from combined modalities, but similar behaviour can be observed for the other scenarios as well.",
|
| 1192 |
+
"bbox": [
|
| 1193 |
+
75,
|
| 1194 |
+
763,
|
| 1195 |
+
467,
|
| 1196 |
+
900
|
| 1197 |
+
],
|
| 1198 |
+
"page_idx": 11
|
| 1199 |
+
},
|
| 1200 |
+
{
|
| 1201 |
+
"type": "text",
|
| 1202 |
+
"text": "The effect of iterative mask densification. We designed a strategy to leverage a sparse set of relatively clean initial pseudo masks, which are progressively extended with confident self-predictions during later iterations. This leads to a 3x improvement over state of the art in the Average Precision Metric. We could also consider different mask refinement strategies using a mixture of segments, initial masks or self-trained instances. Tab. 8 ablates a mask refinement strategy of discarding previous masks and retaining current predictions. We also consider using Felzenswalb segments directly instead of feature-based pseudo labels. Both these strategies lead to lower performance due to the increased presence of noisy labels, which dominate the training signal.",
|
| 1203 |
+
"bbox": [
|
| 1204 |
+
496,
|
| 1205 |
+
90,
|
| 1206 |
+
890,
|
| 1207 |
+
301
|
| 1208 |
+
],
|
| 1209 |
+
"page_idx": 11
|
| 1210 |
+
},
|
| 1211 |
+
{
|
| 1212 |
+
"type": "text",
|
| 1213 |
+
"text": "Robustness to oversegmentation parameters. Table 9 shows that our approach maintains strong robustness to a wide range of oversegmentation parameters for our geometric segments (our used parameters denoted in bold).",
|
| 1214 |
+
"bbox": [
|
| 1215 |
+
496,
|
| 1216 |
+
309,
|
| 1217 |
+
890,
|
| 1218 |
+
369
|
| 1219 |
+
],
|
| 1220 |
+
"page_idx": 11
|
| 1221 |
+
},
|
| 1222 |
+
{
|
| 1223 |
+
"type": "text",
|
| 1224 |
+
"text": "Additionally, we also test the effect of other hyperparameters in out NCut-based pseudo mask generation module, including used distance metrics in the similarity matrix and different methods to separate unconnected patches in the predicted foregrounds. During the foreground separation in the Normalized Cut algorithm, we had an additional condition for the minimum number of foreground segments for the bipartitions. This conditions was able effectively filter out suboptimal partitioning of the full graph leading to separated parts from the full instances. Reducing the size of this parameter can directly lead to a more dense set of initial pseudo masks, with the cost of higher false positive rate. In Table 9 we report a sparser and denser version of the datasets with a minimum number of foreground segments of 8 and 2 accordingly, and show the initial higher scores of the pseudo annotation doesn't necessarily propagate to better downstream self-trained performance.",
|
| 1225 |
+
"bbox": [
|
| 1226 |
+
496,
|
| 1227 |
+
387,
|
| 1228 |
+
890,
|
| 1229 |
+
657
|
| 1230 |
+
],
|
| 1231 |
+
"page_idx": 11
|
| 1232 |
+
},
|
| 1233 |
+
{
|
| 1234 |
+
"type": "text",
|
| 1235 |
+
"text": "Finally, we also ablate the effect of our physical connectivity-based foreground separation introduced in Section 3.1. In our main method we separate all set of connected components in the foreground, but only keep the component with the highest eigenvector activation (Max). As an alternative we also test a method where we calculate the highest average activation in the connected component (Avg.), a method where we keep the component with the largest surface value (Largest) and finally, to test the effect of this module, without any kind of connectivity-based separation (No Sep.).",
|
| 1236 |
+
"bbox": [
|
| 1237 |
+
496,
|
| 1238 |
+
659,
|
| 1239 |
+
890,
|
| 1240 |
+
824
|
| 1241 |
+
],
|
| 1242 |
+
"page_idx": 11
|
| 1243 |
+
},
|
| 1244 |
+
{
|
| 1245 |
+
"type": "text",
|
| 1246 |
+
"text": "7.5. Comparison with methods from the 2D domain",
|
| 1247 |
+
"text_level": 1,
|
| 1248 |
+
"bbox": [
|
| 1249 |
+
500,
|
| 1250 |
+
832,
|
| 1251 |
+
890,
|
| 1252 |
+
847
|
| 1253 |
+
],
|
| 1254 |
+
"page_idx": 11
|
| 1255 |
+
},
|
| 1256 |
+
{
|
| 1257 |
+
"type": "text",
|
| 1258 |
+
"text": "To ensure a fair evaluation of methods operating on different input domains in Table 1. we followed the established procedure of well-known baselines [9, 18, 23]. This involves",
|
| 1259 |
+
"bbox": [
|
| 1260 |
+
496,
|
| 1261 |
+
854,
|
| 1262 |
+
890,
|
| 1263 |
+
900
|
| 1264 |
+
],
|
| 1265 |
+
"page_idx": 11
|
| 1266 |
+
},
|
| 1267 |
+
{
|
| 1268 |
+
"type": "image",
|
| 1269 |
+
"img_path": "images/16e6800a0e8007b83a09af33106a21be0b91f995b1f784dee9ace9eedaa0ee57.jpg",
|
| 1270 |
+
"image_caption": [
|
| 1271 |
+
"Figure 8. Additional results on the ARKitScenes dataset [2], compared to geometric clustering and oversegmentation-based baselines."
|
| 1272 |
+
],
|
| 1273 |
+
"image_footnote": [],
|
| 1274 |
+
"bbox": [
|
| 1275 |
+
181,
|
| 1276 |
+
87,
|
| 1277 |
+
790,
|
| 1278 |
+
845
|
| 1279 |
+
],
|
| 1280 |
+
"page_idx": 12
|
| 1281 |
+
},
|
| 1282 |
+
{
|
| 1283 |
+
"type": "image",
|
| 1284 |
+
"img_path": "images/817cb962810168ea168558a7200bf5d8296745b6addeb37467a1a6357c349987.jpg",
|
| 1285 |
+
"image_caption": [
|
| 1286 |
+
"Figure 9. Additional results on the ScanNet dataset [10], compared to geometric clustering and oversegmentation-based baselines."
|
| 1287 |
+
],
|
| 1288 |
+
"image_footnote": [],
|
| 1289 |
+
"bbox": [
|
| 1290 |
+
140,
|
| 1291 |
+
87,
|
| 1292 |
+
836,
|
| 1293 |
+
849
|
| 1294 |
+
],
|
| 1295 |
+
"page_idx": 13
|
| 1296 |
+
},
|
| 1297 |
+
{
|
| 1298 |
+
"type": "table",
|
| 1299 |
+
"img_path": "images/0cee57dee083090d8cf6e72b4b662766ddb8fbefbc6906c6afa120957c848481.jpg",
|
| 1300 |
+
"table_caption": [],
|
| 1301 |
+
"table_footnote": [],
|
| 1302 |
+
"table_body": "<table><tr><td rowspan=\"2\">Model</td><td rowspan=\"2\">Backbone</td><td colspan=\"3\">1%</td><td colspan=\"3\">5%</td><td colspan=\"3\">10%</td><td colspan=\"3\">20%</td><td colspan=\"3\">50%</td></tr><tr><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP@25</td><td>AP@50</td><td>AP</td></tr><tr><td>Scratch</td><td>Bottom-up</td><td>22.6</td><td>14.1</td><td>6.8</td><td>45.5</td><td>33.3</td><td>18.1</td><td>54.8</td><td>39.2</td><td>21.9</td><td>61.0</td><td>43.4</td><td>25.5</td><td>67.0</td><td>51.4</td><td>30.3</td></tr><tr><td>CSC [19]</td><td>Bottom-up</td><td>35.6</td><td>22.1</td><td>12.5</td><td>52.7</td><td>39.9</td><td>23.3</td><td>59.8</td><td>43.8</td><td>25.0</td><td>63.8</td><td>48.9</td><td>29.6</td><td>70.5</td><td>56.0</td><td>33.6</td></tr><tr><td>Scratch</td><td>Transformer</td><td>24.7</td><td>9.3</td><td>4.6</td><td>48.1</td><td>27.6</td><td>16.3</td><td>59.2</td><td>39.1</td><td>23.4</td><td>66.4</td><td>49.6</td><td>33.1</td><td>78.9</td><td>67.5</td><td>49.8</td></tr><tr><td>CSC</td><td>Transformer</td><td>17.0</td><td>6.8</td><td>3.8</td><td>44.2</td><td>22.7</td><td>13.1</td><td>55.2</td><td>32.3</td><td>19.1</td><td>62.0</td><td>41.2</td><td>26.0</td><td>73.7</td><td>58.2</td><td>40.0</td></tr><tr><td>Ours</td><td>Transformer</td><td>43.5</td><td>28.4</td><td>15.8</td><td>63.2</td><td>46.8</td><td>28.3</td><td>70.3</td><td>55.7</td><td>36.7</td><td>72.4</td><td>60.7</td><td>41.5</td><td>78.9</td><td>68.0</td><td>48.2</td></tr></table>",
|
| 1303 |
+
"bbox": [
|
| 1304 |
+
81,
|
| 1305 |
+
88,
|
| 1306 |
+
890,
|
| 1307 |
+
180
|
| 1308 |
+
],
|
| 1309 |
+
"page_idx": 14
|
| 1310 |
+
},
|
| 1311 |
+
{
|
| 1312 |
+
"type": "table",
|
| 1313 |
+
"img_path": "images/d56e5d85e0a754996b5c318e47ec34eac3e1e09bd3f244095f6a107b938b2b25.jpg",
|
| 1314 |
+
"table_caption": [
|
| 1315 |
+
"Table 5. Unsupervised class-agnostic pretraining with our method can also act as a powerful pretraining strategy, advancing over state of the art. We report pretraining with CSC [19] and UnScene3D, and evaluate the downstream weakly-supervised instance segmentation performance on ScanNet with percentage of limited annotated scenes used denoted in the top row. As we found that CSC degraded performance when using a transformer-based backbone, we also report the performance of training from scratch and CSC on their originally proposed backbone of a sparse UNet with bottom-up voting."
|
| 1316 |
+
],
|
| 1317 |
+
"table_footnote": [],
|
| 1318 |
+
"table_body": "<table><tr><td></td><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP Final</td></tr><tr><td>Initial Pseudo Masks</td><td>19.9</td><td>10.0</td><td>5.9</td><td>-</td></tr><tr><td>Baseline losses [45]</td><td>42.3</td><td>16.9</td><td>7.2</td><td>14.2</td></tr><tr><td>Projection loss [55]</td><td>35.7</td><td>12.1</td><td>4.7</td><td>7.2</td></tr><tr><td>DropLoss [56]</td><td>52.9</td><td>23.2</td><td>10.4</td><td>15.9</td></tr></table>",
|
| 1319 |
+
"bbox": [
|
| 1320 |
+
78,
|
| 1321 |
+
282,
|
| 1322 |
+
475,
|
| 1323 |
+
369
|
| 1324 |
+
],
|
| 1325 |
+
"page_idx": 14
|
| 1326 |
+
},
|
| 1327 |
+
{
|
| 1328 |
+
"type": "table",
|
| 1329 |
+
"img_path": "images/4e60dc5205acaed0c35c0df828eed20c3786a488cfa263d9e71844a54cd2b6bd.jpg",
|
| 1330 |
+
"table_caption": [
|
| 1331 |
+
"Table 6. A 3D projection loss struggles with under-determined associations, while DropLoss helps UnScene3D to discover parts of the scene that were missed by the source supervision. We report all metrics after a single iteration and the AP scores after 4 iterations of self-training."
|
| 1332 |
+
],
|
| 1333 |
+
"table_footnote": [],
|
| 1334 |
+
"table_body": "<table><tr><td>τcut</td><td>AP@25</td><td>AP@50</td><td>AP</td></tr><tr><td>0.40</td><td>16.7</td><td>9.0</td><td>5.2</td></tr><tr><td>0.50</td><td>20.8</td><td>10.7</td><td>5.7</td></tr><tr><td>0.55</td><td>21.0</td><td>10.8</td><td>5.7</td></tr><tr><td>0.60</td><td>21.3</td><td>11.3</td><td>5.8</td></tr><tr><td>0.65</td><td>19.9</td><td>10.0</td><td>5.9</td></tr><tr><td>0.70</td><td>18.2</td><td>9.9</td><td>5.6</td></tr><tr><td>0.80</td><td>11.8</td><td>5.0</td><td>2.6</td></tr></table>",
|
| 1335 |
+
"bbox": [
|
| 1336 |
+
163,
|
| 1337 |
+
465,
|
| 1338 |
+
383,
|
| 1339 |
+
592
|
| 1340 |
+
],
|
| 1341 |
+
"page_idx": 14
|
| 1342 |
+
},
|
| 1343 |
+
{
|
| 1344 |
+
"type": "text",
|
| 1345 |
+
"text": "using depth information to project 2D predictions into 3D such that all methods are evaluated in the same 3D domain and aggregate multiple predictions through consensus by majority voting or accepting the maximum confidence scores for every voxel location. We also show results evaluated against 2D ScanNet images by projecting our method's predictions into 2D in Tab. 10, and comparing it to the current state of the art 2D unsupervised segmentation method [56] which demonstrates the usefulness of 3D reasoning.",
|
| 1346 |
+
"bbox": [
|
| 1347 |
+
75,
|
| 1348 |
+
728,
|
| 1349 |
+
468,
|
| 1350 |
+
864
|
| 1351 |
+
],
|
| 1352 |
+
"page_idx": 14
|
| 1353 |
+
},
|
| 1354 |
+
{
|
| 1355 |
+
"type": "text",
|
| 1356 |
+
"text": "We also compare to weakly-supervised instance segmentation method SAM3D [61], where powerful class-agnostic",
|
| 1357 |
+
"bbox": [
|
| 1358 |
+
76,
|
| 1359 |
+
869,
|
| 1360 |
+
468,
|
| 1361 |
+
901
|
| 1362 |
+
],
|
| 1363 |
+
"page_idx": 14
|
| 1364 |
+
},
|
| 1365 |
+
{
|
| 1366 |
+
"type": "table",
|
| 1367 |
+
"img_path": "images/f8fcb4da70ddaf3e5de7d3c475994676889ed724419b8d3249f5f3ea2d89e182.jpg",
|
| 1368 |
+
"table_caption": [
|
| 1369 |
+
"Table 7. Our pseudo mask generation quality, as measured by AP metrics, maintains robustness to a large range of $\\tau$ thresholds that extract saliency. Note that this measures the quality of only the pseudo masks; our full approach with self-training produces significantly improved results. In this table we show results and parameters used by our method in bold and report pseudo mask performance generated from both modalities."
|
| 1370 |
+
],
|
| 1371 |
+
"table_footnote": [],
|
| 1372 |
+
"table_body": "<table><tr><td></td><td>AP@25</td><td>AP@50</td><td>AP</td></tr><tr><td>Felzenswalb Masks</td><td>35.5</td><td>20.6</td><td>10.3</td></tr><tr><td>Mask Refinement</td><td>43.7</td><td>24.4</td><td>12.4</td></tr><tr><td>Mask Addition (Ours)</td><td>58.6</td><td>32.0</td><td>16.0</td></tr></table>",
|
| 1373 |
+
"bbox": [
|
| 1374 |
+
542,
|
| 1375 |
+
282,
|
| 1376 |
+
849,
|
| 1377 |
+
339
|
| 1378 |
+
],
|
| 1379 |
+
"page_idx": 14
|
| 1380 |
+
},
|
| 1381 |
+
{
|
| 1382 |
+
"type": "text",
|
| 1383 |
+
"text": "Table 8. Instead of using masks from previous iteration directly it is the best to keep the initial masks fixed, and iteratively sample plausible predictions to enrich the pseudo dataset during self-training. This method strikes a balance between relatively clean, but sparse labels and increasing number of confident samples. Finally, even though Felzenswalb oversegmentation yields to higher precision, then our initial mask prediction algorithm, it also includes more background into the training, and this way plateauing at a lower self-training performance.",
|
| 1384 |
+
"bbox": [
|
| 1385 |
+
496,
|
| 1386 |
+
349,
|
| 1387 |
+
890,
|
| 1388 |
+
474
|
| 1389 |
+
],
|
| 1390 |
+
"page_idx": 14
|
| 1391 |
+
},
|
| 1392 |
+
{
|
| 1393 |
+
"type": "text",
|
| 1394 |
+
"text": "2D masks are extracted by the powerful SAM model [29]. Here the projected 2D masks are merged into 3D masks iteratively with a bottom-up bidirectional merging approach to achieved cleaner and more view-independent 3D instances. A qualitative comparison on ScanNet can be seen in Table 11, with qualitative comparisons in Figure 10.",
|
| 1395 |
+
"bbox": [
|
| 1396 |
+
498,
|
| 1397 |
+
494,
|
| 1398 |
+
890,
|
| 1399 |
+
585
|
| 1400 |
+
],
|
| 1401 |
+
"page_idx": 14
|
| 1402 |
+
},
|
| 1403 |
+
{
|
| 1404 |
+
"type": "image",
|
| 1405 |
+
"img_path": "images/bb936be4c367aa4042c7615d959b62a805e05863d94b2a0d2e79a13cc63f8b42.jpg",
|
| 1406 |
+
"image_caption": [
|
| 1407 |
+
"Figure 10. While SAM has powerful capabilities in crisp 2D mask generation, when aggregated on 3D, SAM3D tends to oversegment object instances."
|
| 1408 |
+
],
|
| 1409 |
+
"image_footnote": [],
|
| 1410 |
+
"bbox": [
|
| 1411 |
+
500,
|
| 1412 |
+
592,
|
| 1413 |
+
890,
|
| 1414 |
+
676
|
| 1415 |
+
],
|
| 1416 |
+
"page_idx": 14
|
| 1417 |
+
},
|
| 1418 |
+
{
|
| 1419 |
+
"type": "text",
|
| 1420 |
+
"text": "SAM3D must resolve view inconsistencies and SAM's tendency to over-segment objects, which results in SAM3D splitting instances, while UnScene3D is able to achieve complete masks through multi-modal reasoning. We believe integrating SAM or other (weakly-) supervised 2D models into our pipeline to enable multi-modal reasoning is an interesting avenue for future work.",
|
| 1421 |
+
"bbox": [
|
| 1422 |
+
496,
|
| 1423 |
+
722,
|
| 1424 |
+
890,
|
| 1425 |
+
828
|
| 1426 |
+
],
|
| 1427 |
+
"page_idx": 14
|
| 1428 |
+
},
|
| 1429 |
+
{
|
| 1430 |
+
"type": "text",
|
| 1431 |
+
"text": "7.6. Additional Implementation Details",
|
| 1432 |
+
"text_level": 1,
|
| 1433 |
+
"bbox": [
|
| 1434 |
+
500,
|
| 1435 |
+
844,
|
| 1436 |
+
802,
|
| 1437 |
+
861
|
| 1438 |
+
],
|
| 1439 |
+
"page_idx": 14
|
| 1440 |
+
},
|
| 1441 |
+
{
|
| 1442 |
+
"type": "text",
|
| 1443 |
+
"text": "Here, we further explain the implementation details of our pseudo mask generation.",
|
| 1444 |
+
"bbox": [
|
| 1445 |
+
498,
|
| 1446 |
+
869,
|
| 1447 |
+
890,
|
| 1448 |
+
901
|
| 1449 |
+
],
|
| 1450 |
+
"page_idx": 14
|
| 1451 |
+
},
|
| 1452 |
+
{
|
| 1453 |
+
"type": "table",
|
| 1454 |
+
"img_path": "images/8b5b89d1c9b7ad846179a5481c9602e0f428749b5e3ad505c1eebe4a15aed0f2.jpg",
|
| 1455 |
+
"table_caption": [],
|
| 1456 |
+
"table_footnote": [],
|
| 1457 |
+
"table_body": "<table><tr><td colspan=\"4\">Generation Params.</td><td colspan=\"4\">Initial Pseudo Mask</td><td colspan=\"3\">1 Iteration of Self-Training</td><td colspan=\"3\">4 Iterations of Self-Training</td></tr><tr><td>Segment Size</td><td>Metric</td><td>Separation</td><td>Min. # of Foreground</td><td># of Instances</td><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP@25</td><td>AP@50</td><td>AP</td></tr><tr><td>30</td><td>Cos</td><td>Max</td><td>8</td><td>2169</td><td>21.9</td><td>11.5</td><td>6.3</td><td>53.7</td><td>26.2</td><td>12.4</td><td>55.4</td><td>30.3</td><td>15.3</td></tr><tr><td>50</td><td>Cos</td><td>Max</td><td>8</td><td>1414</td><td>19.9</td><td>10.0</td><td>5.9</td><td>52.9</td><td>23.2</td><td>10.4</td><td>58.5</td><td>32.2</td><td>15.9</td></tr><tr><td>100</td><td>Cos</td><td>Max</td><td>8</td><td>1090</td><td>17.4</td><td>8.0</td><td>4.2</td><td>33.1</td><td>10.2</td><td>3.9</td><td>39.6</td><td>13.7</td><td>5.3</td></tr><tr><td>200</td><td>Cos</td><td>Max</td><td>8</td><td>584</td><td>11.0</td><td>3.7</td><td>1.8</td><td>24.3</td><td>8.7</td><td>2.1</td><td>26.1</td><td>9.7</td><td>2.4</td></tr><tr><td>400</td><td>Cos</td><td>Max</td><td>8</td><td>319</td><td>6.4</td><td>2.5</td><td>1.1</td><td>19.1</td><td>3.9</td><td>1.2</td><td>19.9</td><td>3.2</td><td>1.0</td></tr><tr><td>50</td><td>L2</td><td>Max</td><td>8</td><td>1539</td><td>20.1</td><td>10.6</td><td>5.4</td><td>49.0</td><td>21.7</td><td>9.8</td><td>55.3</td><td>38.4</td><td>14.3</td></tr><tr><td>100</td><td>L2</td><td>Max</td><td>8</td><td>805</td><td>13.3</td><td>5.3</td><td>2.6</td><td>30.8</td><td>8.3</td><td>2.8</td><td>39.0</td><td>12.7</td><td>5.0</td></tr><tr><td>50</td><td>Cos</td><td>No Sep.</td><td>8</td><td>125</td><td>4.3</td><td>0.3</td><td>0.1</td><td>4.3</td><td>0.5</td><td>0.2</td><td>4.9</td><td>0.6</td><td>0.2</td></tr><tr><td>50</td><td>Cos</td><td>Largest</td><td>8</td><td>620</td><td>11.5</td><td>4.9</td><td>2.5</td><td>11.5</td><td>1.5</td><td>0.4</td><td>12.9</td><td>2.2</td><td>12.9</td></tr><tr><td>50</td><td>Cos</td><td>Avg.</td><td>8</td><td>1078</td><td>16.8</td><td>9.1</td><td>5.1</td><td>36.4</td><td>12.5</td><td>4.9</td><td>43.8</td><td>17.8</td><td>7.5</td></tr><tr><td>30</td><td>Cos</td><td>Max</td><td>2</td><td>2909</td><td>29.0</td><td>15.6</td><td>8.7</td><td>53.6</td><td>28.6</td><td>14.2</td><td>54.2</td><td>29.8</td><td>15.4</td></tr><tr><td>50</td><td>Cos</td><td>Max</td><td>2</td><td>2512</td><td>24.9</td><td>12.4</td><td>7.2</td><td>56.5</td><td>29.8</td><td>15.0</td><td>51.3</td><td>26.2</td><td>12.6</td></tr><tr><td>100</td><td>Cos</td><td>Max</td><td>2</td><td>2317</td><td>23.1</td><td>12.3</td><td>6.8</td><td>51.8</td><td>24.4</td><td>11.6</td><td>57.1</td><td>31.3</td><td>15.6</td></tr><tr><td>200</td><td>Cos</td><td>Max</td><td>2</td><td>2181</td><td>28.4</td><td>15.5</td><td>8.9</td><td>54.6</td><td>28.7</td><td>13.7</td><td>56.6</td><td>31.4</td><td>15.6</td></tr><tr><td>400</td><td>Cos</td><td>Max</td><td>2</td><td>1373</td><td>20.6</td><td>11.1</td><td>6.3</td><td>51.0</td><td>24.8</td><td>11.8</td><td>55.8</td><td>30.3</td><td>15.2</td></tr><tr><td>50</td><td>L2</td><td>Max</td><td>2</td><td>2496</td><td>28.6</td><td>15.8</td><td>9.0</td><td>55.8</td><td>29.6</td><td>14.6</td><td>54.8</td><td>30.3</td><td>15.3</td></tr><tr><td>100</td><td>L2</td><td>Max</td><td>2</td><td>1668</td><td>23.4</td><td>12.7</td><td>7.3</td><td>53.1</td><td>25.0</td><td>11.3</td><td>56.3</td><td>27.7</td><td>12.9</td></tr><tr><td>50</td><td>Cos</td><td>No Sep.</td><td>2</td><td>159</td><td>0.2</td><td>0.5</td><td>3.6</td><td>5.4</td><td>0.6</td><td>0.3</td><td>3.9</td><td>0.4</td><td>0.2</td></tr><tr><td>50</td><td>Cos</td><td>Largest</td><td>2</td><td>1026</td><td>14.1</td><td>7.2</td><td>3.9</td><td>11.5</td><td>1.8</td><td>0.5</td><td>14.5</td><td>2.5</td><td>0.7</td></tr><tr><td>50</td><td>Cos</td><td>Avg.</td><td>2</td><td>2053</td><td>23.3</td><td>12.0</td><td>6.8</td><td>52.5</td><td>27.4</td><td>12.7</td><td>54.9</td><td>29.9</td><td>14.9</td></tr></table>",
|
| 1458 |
+
"bbox": [
|
| 1459 |
+
80,
|
| 1460 |
+
88,
|
| 1461 |
+
888,
|
| 1462 |
+
371
|
| 1463 |
+
],
|
| 1464 |
+
"page_idx": 15
|
| 1465 |
+
},
|
| 1466 |
+
{
|
| 1467 |
+
"type": "table",
|
| 1468 |
+
"img_path": "images/8b5e22c35929489fb0ec88523c42d04ffcd37839f6957873a6782023b8baa9d0.jpg",
|
| 1469 |
+
"table_caption": [
|
| 1470 |
+
"Table 9. We denote the parameters used by our method in bold. We show that our method is robust to a wide range of numbers regarding segments sizes and different similarity metrics, and only degrades somewhat in performance when segments are constrained to be too large. We also show that the separation of physically distant foreground patches is important and it is beneficial to use the activation of the eigenvector for the best results. Finally, we show that denser initial mask predictions lead to quantitatively better initial pseudo annotations, and even better self-training performance after a single iteration, but underperforming in their final scores. This behaviour can be explained by the larger false positive ratio in the denser initial predictions, which is propagating through all iterations, but thanks to the noise robust losses and iterative refinement of predictions the sparse set of labels can be effectively used. In this table we report results using both modalities for the initial pseudo mask generation, and number predicted pseudo instances in the official validation split of the ScanNet dataset.",
|
| 1471 |
+
"AP@25 (2D) AP@50 (2D) AP (2D)"
|
| 1472 |
+
],
|
| 1473 |
+
"table_footnote": [],
|
| 1474 |
+
"table_body": "<table><tr><td>CutLER (2D)</td><td>7.8</td><td>2.8</td><td>0.7</td></tr><tr><td>Ours (projected)</td><td>60.0</td><td>38.1</td><td>21.1</td></tr></table>",
|
| 1475 |
+
"bbox": [
|
| 1476 |
+
81,
|
| 1477 |
+
544,
|
| 1478 |
+
468,
|
| 1479 |
+
578
|
| 1480 |
+
],
|
| 1481 |
+
"page_idx": 15
|
| 1482 |
+
},
|
| 1483 |
+
{
|
| 1484 |
+
"type": "table",
|
| 1485 |
+
"img_path": "images/4508e86db97080296fd5a5dacb4ed7aef0e3d14a1fc02e926413caf496ff6681.jpg",
|
| 1486 |
+
"table_caption": [
|
| 1487 |
+
"Table 10. 2D evaluation on ScanNet images.",
|
| 1488 |
+
"AP@25 AP@50 AP"
|
| 1489 |
+
],
|
| 1490 |
+
"table_footnote": [],
|
| 1491 |
+
"table_body": "<table><tr><td>SAM3D</td><td>37.2</td><td>11.8</td><td>3.7</td></tr><tr><td>SAM3D with GT Segments</td><td>47.6</td><td>24.1</td><td>10.8</td></tr><tr><td>Ours</td><td>58.5</td><td>32.2</td><td>15.9</td></tr></table>",
|
| 1492 |
+
"bbox": [
|
| 1493 |
+
106,
|
| 1494 |
+
621,
|
| 1495 |
+
439,
|
| 1496 |
+
662
|
| 1497 |
+
],
|
| 1498 |
+
"page_idx": 15
|
| 1499 |
+
},
|
| 1500 |
+
{
|
| 1501 |
+
"type": "text",
|
| 1502 |
+
"text": "Table 11. UnScene3D achieves significantly better performance on ScanNet than SAM3D through our strong multi-modal reasoning.",
|
| 1503 |
+
"bbox": [
|
| 1504 |
+
75,
|
| 1505 |
+
664,
|
| 1506 |
+
468,
|
| 1507 |
+
705
|
| 1508 |
+
],
|
| 1509 |
+
"page_idx": 15
|
| 1510 |
+
},
|
| 1511 |
+
{
|
| 1512 |
+
"type": "text",
|
| 1513 |
+
"text": "Pseudo code for masked NCut We show the pseudo code-style implementation for the masked normalized cut algorithm generating multiple instances as pseudo masks. The full algorithm can be seen in 1.",
|
| 1514 |
+
"bbox": [
|
| 1515 |
+
75,
|
| 1516 |
+
709,
|
| 1517 |
+
468,
|
| 1518 |
+
770
|
| 1519 |
+
],
|
| 1520 |
+
"page_idx": 15
|
| 1521 |
+
},
|
| 1522 |
+
{
|
| 1523 |
+
"type": "text",
|
| 1524 |
+
"text": "3D Adaptation of FreeMask We also evaluate an alternative pseudo mask segmentation algorithm besides the masked NCut method. In the 2D domain FreeSOLO [55] also followed a two stage pipeline first generating the pseudo annotations, and then refine those predictions through a series of self-training cycles. We followed their intuition to take a self-supervised pretrained backbone and",
|
| 1525 |
+
"bbox": [
|
| 1526 |
+
75,
|
| 1527 |
+
794,
|
| 1528 |
+
470,
|
| 1529 |
+
902
|
| 1530 |
+
],
|
| 1531 |
+
"page_idx": 15
|
| 1532 |
+
},
|
| 1533 |
+
{
|
| 1534 |
+
"type": "text",
|
| 1535 |
+
"text": "extract it's deep features at multiple levels of the decoder. While in standard pretrained UNet-style models early features represent global context, final features and local semantic meaning, intermediate features can act as an useful proxy to extract self-similar regions in the input samples. In our implementation we used the same backbone features of [3, 19] for the same 2D-3D setup and extracted the penultimate layer features for the self-similarity calculation. Then sampled the feature space with the Furthest Point Sampling [42] strategy to get a more limited set of anchor points, later used to extract self-similar regions. For every seed point we took similarity scores with the other features of the full scene and thresholded it to extract salient regions. Finally, we used the efficient Non Maximum Suppression implementation from [55] to sort the predicted salient areas and filter out overlapping regions. We also used average similarity score combined with the salient region area to get maskness scores for every salient region, directly following the original implementation. We report comparative results of the masked NCut algorithm and our FreeMask 3D adaptation after self-training in Table 3. of the main paper and in Table 12 of the initial pseudo mask scores.",
|
| 1536 |
+
"bbox": [
|
| 1537 |
+
496,
|
| 1538 |
+
532,
|
| 1539 |
+
890,
|
| 1540 |
+
864
|
| 1541 |
+
],
|
| 1542 |
+
"page_idx": 15
|
| 1543 |
+
},
|
| 1544 |
+
{
|
| 1545 |
+
"type": "text",
|
| 1546 |
+
"text": "We also note here that while there is a difference in the initial pseudo mask qualities for the different methods, the",
|
| 1547 |
+
"bbox": [
|
| 1548 |
+
498,
|
| 1549 |
+
869,
|
| 1550 |
+
892,
|
| 1551 |
+
901
|
| 1552 |
+
],
|
| 1553 |
+
"page_idx": 15
|
| 1554 |
+
},
|
| 1555 |
+
{
|
| 1556 |
+
"type": "code",
|
| 1557 |
+
"sub_type": "algorithm",
|
| 1558 |
+
"code_caption": [
|
| 1559 |
+
"Algorithm 1: Masked NCut on 3D segments"
|
| 1560 |
+
],
|
| 1561 |
+
"code_body": "Data: $\\mathcal{S} = \\{s_i,\\dots ,s_N\\}$ $\\mathcal{F}\\in \\mathcal{R}^{NxD}$ $\\mathcal{C} = \\{(s_1,s_k),(s_1,s_l),\\ldots \\}$ Result: $\\mathcal{M} = \\{m_j,\\dots ,m_M\\}$ \n1 $\\mathcal{M}\\gets \\{\\}$ \n2 while $j\\leq$ max_inst_num do \n3 $\\mathcal{F}'\\gets \\mathcal{F}$ \n4 $\\mathcal{F}'[\\mathcal{M}]\\gets 0.\\quad / /$ Mask out previous insts. \n5 $\\mathcal{W}\\leftarrow \\mathcal{F}\\times \\mathcal{F}^T$ //Feature similarity //Saliency with connected graph \n6 $\\mathcal{W}_{i,k} = \\left\\{ \\begin{array}{ll}1. & \\text{if}\\mathcal{W}_{i,k}\\geq \\tau_{cut}\\\\ \\epsilon & \\text{if}\\mathcal{W}_{i,k} < \\tau_{cut} \\end{array} \\right.$ \n7 $\\mathcal{D}_{i,i} = \\sum_{k}W_{i,k}$ //Get $2^{nd}$ smallest eigenvector \n8 $\\lambda ,\\mathbf{v}\\gets eigh(\\mathcal{D} - \\mathcal{W},\\mathcal{D}, - 2)$ \n9 $m_{i} = \\left\\{ \\begin{array}{ll}1 & \\text{if} v_{i}\\geq mean(\\mathbf{v})\\\\ 0 & \\text{if} v_{i} < mean(\\mathbf{v}) \\end{array} \\right.$ //Invert bipartition if too large \n10 if sum(m)>D/2 then \n11 m=1-m \n12 v=-1.\\*v \n// Separate unconnected components \n13 $v_{max} = max(v)$ \n14 $\\tilde{\\mathbf{m}} = sep(\\mathbf{v},v_{max},\\mathcal{C})$ \n15 M M U {m}",
|
| 1562 |
+
"bbox": [
|
| 1563 |
+
78,
|
| 1564 |
+
111,
|
| 1565 |
+
468,
|
| 1566 |
+
488
|
| 1567 |
+
],
|
| 1568 |
+
"page_idx": 16
|
| 1569 |
+
},
|
| 1570 |
+
{
|
| 1571 |
+
"type": "table",
|
| 1572 |
+
"img_path": "images/a3c366e876d0fabe86a71f031af7691093c87b4da90bbf6df75d9853e93119fc.jpg",
|
| 1573 |
+
"table_caption": [],
|
| 1574 |
+
"table_footnote": [],
|
| 1575 |
+
"table_body": "<table><tr><td></td><td>Modality</td><td>AP@25</td><td>AP@50</td><td>AP</td></tr><tr><td>FreeMask</td><td>3D</td><td>13.7</td><td>7.2</td><td>3.7</td></tr><tr><td>Ours</td><td>3D</td><td>13.8</td><td>4.7</td><td>2.0</td></tr><tr><td>FreeMask</td><td>2D</td><td>15.3</td><td>6.6</td><td>2.9</td></tr><tr><td>Ours</td><td>2D</td><td>15.6</td><td>7.2</td><td>3.6</td></tr><tr><td>FreeMask</td><td>both</td><td>17.9</td><td>7.5</td><td>3.7</td></tr><tr><td>Ours</td><td>both</td><td>19.9</td><td>10.0</td><td>5.9</td></tr></table>",
|
| 1576 |
+
"bbox": [
|
| 1577 |
+
109,
|
| 1578 |
+
507,
|
| 1579 |
+
436,
|
| 1580 |
+
633
|
| 1581 |
+
],
|
| 1582 |
+
"page_idx": 16
|
| 1583 |
+
},
|
| 1584 |
+
{
|
| 1585 |
+
"type": "text",
|
| 1586 |
+
"text": "Table 12. We compare pseudo mask generation from 3D-only features (3D), color-only features (2D), and both color and geometry (both) signal, as well as with pseudo annotation generation algorithm FreeMask. We compare the quality of the initial pseudo mask dataset using our masked NCut algorithm and the adaptation of FreeMask [55] to 3D. We see that the normalized cut-based method is superior for both modalities.",
|
| 1587 |
+
"bbox": [
|
| 1588 |
+
75,
|
| 1589 |
+
645,
|
| 1590 |
+
468,
|
| 1591 |
+
742
|
| 1592 |
+
],
|
| 1593 |
+
"page_idx": 16
|
| 1594 |
+
},
|
| 1595 |
+
{
|
| 1596 |
+
"type": "text",
|
| 1597 |
+
"text": "downstream performance is way more significant. This can be explained by the nature of the pseudo masks. NCut provides a clean and sparse set of annotation, which is easy to identify for following iterations. On the other hand, the more dense, but noisy FreeMask predictions remain in the training for the duration of the whole training, hindering the performance of the self-trained model with noisy supervision.",
|
| 1598 |
+
"bbox": [
|
| 1599 |
+
75,
|
| 1600 |
+
768,
|
| 1601 |
+
467,
|
| 1602 |
+
875
|
| 1603 |
+
],
|
| 1604 |
+
"page_idx": 16
|
| 1605 |
+
}
|
| 1606 |
+
]
|
2303.14xxx/2303.14541/e7df9d3e-9a6b-46f5-8469-0690470eefdd_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14541/e7df9d3e-9a6b-46f5-8469-0690470eefdd_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:70bd16b15fb4038c635dc0cfb0732fa34ad90cdc221a5f0e1769b6ba5511a9c3
|
| 3 |
+
size 8765754
|
2303.14xxx/2303.14541/full.md
ADDED
|
@@ -0,0 +1,355 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# UnScene3D: Unsupervised 3D Instance Segmentation for Indoor Scenes
|
| 2 |
+
|
| 3 |
+
David Rozenberszki<sup>1</sup> Or Litany<sup>2,3</sup> Angela Dai<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
$^{1}$ Technical University of Munich $^{2}$ Technion $^{3}$ NVIDIA
|
| 6 |
+
|
| 7 |
+
https://rozdavid.github.io/unscene3d
|
| 8 |
+
|
| 9 |
+

|
| 10 |
+
Figure 1. We propose UnScene3D, a fully-unsupervised 3D instance segmentation method, effectively separating semantic instances without requiring any manual annotations. We utilize geometric primitives to ensure crisp masks, and due to our self-training loop, we can also obtain a dense set of predictions, even in cluttered indoor scenarios.
|
| 11 |
+
|
| 12 |
+
# Abstract
|
| 13 |
+
|
| 14 |
+
3D instance segmentation is fundamental to geometric understanding of the world around us. Existing methods for instance segmentation of 3D scenes rely on supervision from expensive, manual 3D annotations. We propose UnScene3D, the first fully unsupervised 3D learning approach for class-agnostic 3D instance segmentation of indoor scans. UnScene3D first generates pseudo masks by leveraging self-supervised color and geometry features to find potential object regions. We operate on a basis of geometric oversegmentation, enabling efficient representation and learning on high-resolution 3D data. The coarse proposals are then refined through self-training our model on its predictions. Our approach improves over clustering-based alternatives to unsupervised 3D instance segmentation methods by more than $300\%$ Average Precision score, demonstrating effective instance segmentation even in challenging, cluttered 3D scenes.
|
| 15 |
+
|
| 16 |
+
# 1. Introduction
|
| 17 |
+
|
| 18 |
+
The increasing availability of commodity RGB-D sensors, now widely available on iPhones as well as with the Microsoft Kinect or Intel RealSense, has enabled consumer
|
| 19 |
+
|
| 20 |
+
level capture of 3D geometry of real-world environments. To enable applications in robotics, autonomous navigation, and mixed reality in such scenes, semantic 3D scene understanding is necessary. In particular, 3D instance segmentation is critical to 3D perception, providing dense instance mask predictions, thus enabling physical and geometric reasoning about objects in an environment. While various 3D deep learning approaches have been developed for 3D instance segmentation [5, 14, 17, 18, 21, 22, 30, 32, 42-45, 50-52, 54, 57, 58], they require full supervision from expensive, manual, dense annotations on 3D scenes.
|
| 21 |
+
|
| 22 |
+
We introduce UnScene3D, a novel approach designed for class-agnostic 3D instance segmentation. Our aim is to identify objects in real-world 3D scans by predicting their dense instance masks, without any constraints to a predefined set of class categories. Moreover, we avoid expensive data annotation requirements by operating in an unsupervised fashion, instead leveraging self-supervised 2D and 3D features for segmentation.
|
| 23 |
+
|
| 24 |
+
UnScene3D comprises two essential elements. First, we observe that for RGB-D scan data, self-supervised representation learning methods [19, 60] can provide an innate signal indicating object-ness through feature similarity. We thus generate pseudo masks over 3D segment primitives, based on multimodal analysis of self-supervised color and geometry features from the RGB-D data. By considering
|
| 25 |
+
|
| 26 |
+
mesh segments rather than voxels or points, our approach efficiently scales with high-resolution 3D data in large scene environments while inherently promoting contiguous segmentation masks. As we require strong features for these initial coarse estimates, we fuse information from both geometric and 2D color features in a complementary fashion. Second, following the pseudo mask generation, we train our model through iterative self-training on both the initial pseudo masks and the current confident model predictions. Through multiple rounds of self-training with noise robust losses achieve improved object recognition and segmentation. At inference time, we do not require any 2D color signal and can produce class-agnostic 3D instance segmentation for a new geometric observation of a 3D environment. Experiments on challenging, cluttered indoor environments from the ScanNet [10], S3DIS [1] and ARKit [2] datasets show that UnScene3D improves significantly over unsupervised, clustering-based state of the art. In summary, our contributions are:
|
| 27 |
+
|
| 28 |
+
- We propose an unsupervised 3D instance segmentation approach for indoor RGB-D scans, without requiring any human annotation.
|
| 29 |
+
- We generate sparse 3D pseudo masks for unsupervised training based on a multi-modal fusion of color and geometric signal from RGB-D scan data. We achieve robustness and efficiency through a geometry-aware scene coarsening.
|
| 30 |
+
- Our generated pseudo masks are iteratively refined by self-training for 3D instances to improve 3D instance segmentation performance.
|
| 31 |
+
|
| 32 |
+
# 2. Related Work
|
| 33 |
+
|
| 34 |
+
Self-supervised 3D pretraining While significant progress has been made in fully supervised 3D instance segmentation [8, 14, 16-18, 20, 42, 44, 50, 51] the amount of densely annotated 3D data is scarce. Inspired by success in the 2D domain, various 3D pretraining methods have been developed to boost semantic and instance segmentation performance when fine-tuning with annotated semantic labels. Such methods leverage instance discrimination based on different camera views [19, 60], local augmentations [62], or multiple LIDAR sweeps [39]. While these methods can provide powerful 3D feature extraction, they do not construct any notion of object instances.
|
| 35 |
+
|
| 36 |
+
Weakly-supervised 3D segmentation Classical methods have leveraged object template information to match or retrieve templates to local geometry in a scene [4, 25, 28, 31, 36, 37], thereby identifying potential object locations. Other methods formulated 3D dense instance segmentation with only 3D box annotation [6, 41] or single-point supervision and active-learning [34, 53]. More recent methods have focused on exploiting knowledge from powerful pre-trained
|
| 37 |
+
|
| 38 |
+
vision-language models to inform text-guided queries in 3D scenes [12, 24, 33, 40, 46]; however, such methods still rely on large-scale annotated data in the 2D domain.
|
| 39 |
+
|
| 40 |
+
Clustering-based segmentation There has been very little work done in fully unsupervised 3D instance segmentation, but classical clustering methods have been used to group regions with similar geometric properties together. A particularly notable approach is the density-based clustering of DBSCAN [13] and its hierarchical counterpart HDBSCAN [35]. These methods can be used to group point clusters in a 3D scene based on point normals and colors. The ScanNet dataset [10] showed that the Felzenswalb algorithm [15] originally developed for image over-segmentation, can generate useful geometric segment clusters. We also exploit such geometric primitives to guide dimensionality reduction and feature aggregation.
|
| 41 |
+
|
| 42 |
+
Finally, recent methods have been developed to detect instances with self-supervised pretrained features in driving scenarios. These methods often leverage the unique properties of such data including dynamics and instance sparsity. Song et. al. [48] identify object instances through motion, showing promise for self-driving scenarios, but limited to moving objects. Nunes et. al. [38] additionally propose a clustering and graph cut based refinement on pre-trained 3D features, focusing on sparse outdoor scenarios to identify spatially separate objects. Our solution aims to segments instances in complex, cluttered indoor environments.
|
| 43 |
+
|
| 44 |
+
Unsupervised 2D instance segmentation Classical graph-cut algorithms [7, 11, 47, 59] can be used to detect objects in scenes, employing low-level feature clustering to identify self-similar regions. Recent advances in self-supervised feature learning have been employed in 2D unsupervised instance segmentation methods, which use two-stage training pipelines to achieve remarkable segmentation results [55, 56]. These methods first generate a set of coarse pseudo masks building on the insights of graph-cut algorithms and then refine them with a series of self-training iterations. In particular, FreeSolo [55] uses multi-branch feature extraction to obtain self-similar regions as mask proposals, producing a dense set of initial pseudo-annotated instances. CutLER [56] uses the normalized cut (NCut) algorithm [47] with deep self-supervised features from DINO [3] to identify multiple prominent regions as pseudo masks. Inspired by such approaches we also leverage pseudo mask generation and self-training, but to handle high-dimensional, noisy real-world 3D scan data, we employ a multi-modal feature reasoning and geometric graph coarsening for robust unsupervised 3D instance segmentation.
|
| 45 |
+
|
| 46 |
+
# 3. Method
|
| 47 |
+
|
| 48 |
+
Problem definition We propose an unsupervised learning-based method for 3D instance segmentation. We operate on a set of training 3D scenes $\{X_i\}_{i=1}^{n_t}$ , represented as mesh graphs $G = (V, E)$ , of vertices $V$ and triangular face edges $E$ , where each scene $X_i$ contains an unknown set of $n_i$ objects in the $i^{th}$ scene. We aim to train a model that can predict for a previously unseen input scene $X$ , a set of 3D masks representing the different object instances in that scene.
|
| 49 |
+
|
| 50 |
+
Method overview In order to achieve unsupervised 3D instance segmentation we first break down the scenes into $N$ geometric primitives $S_{N}$ , which we use to initialize an adjacency matrix $W$ to extract an initial set of pseudo masks $M^0$ , representing instance hypotheses based on combining 2D and 3D inputs $\mathcal{F}_{2D} / \mathcal{F}_{3D} \in R^{N \times D_{2D/3D}}$ , where $D_{2D}, D_{3D}$ are the dimensions of the $2D/3D$ self-supervised features. We regularize the per-segment similarities over geometric primitives for mitigating noise and enabling efficient 3D reasoning. We then employ a series of self-training cycles, updating pseudo mask supervision with new predicted masks, in order to produce final 3D instances. An overview of our approach is shown in Figure 2.
|
| 51 |
+
|
| 52 |
+
# 3.1. Initial pseudo mask generation
|
| 53 |
+
|
| 54 |
+
In order to initiate self-training, we first generate an initial set of pseudo masks, leveraging complementary information from 2D and 3D signal in $\{X_{i}\}$ .
|
| 55 |
+
|
| 56 |
+
# 3.1.1 Feature aggregation
|
| 57 |
+
|
| 58 |
+
To encourage effective initial pseudo mask generation, we employ joint reasoning across both self-supervised color and geometry features, as they can provide complementary information regarding objects. As RGB-D scans often contain color image information and reconstructed 3D scan geometry, we can associate both 2D and 3D features in 3D by back-projecting the 2D extracted features using the corresponding depth and camera pose information for each image. Both 2D and 3D features are extracted through state-of-the-art self-supervised feature learning methods [3, 19]. As real-world camera estimation often contains small misalignment errors and noise or oversmoothing in reconstructed scan geometry, these self-supervised features can often also contain high-frequency noise, which we address in Sec. 3.1.2 when reasoning over these features. Note that while we employ both 2D and 3D signal when available for training, we do not require any aligned color image inputs for inference, enabling more general applicability.
|
| 59 |
+
|
| 60 |
+
# 3.1.2 3D Graph Cut
|
| 61 |
+
|
| 62 |
+
To generate pseudo masks from the 2D and 3D self-supervised features, we employ graph cut to estimate class-agnostic instances from the background. More precisely, we leverage the principle of Normalized Cut [47] (NCut), which employs eigenvalue decomposition from an adjacency matrix $W \in R^{N \times N}$ over a graph to identify self-similar regions potentially representing semantic instances, where a set of potential instances can be extracted iteratively. Given a graph representing the 3D scene, we build an adjacency matrix $W$ and self-supervised features with a corresponding degree matrix $D \in R^{N \times N}$ , where $D(i, i) = \Sigma_j W(i, j)$ and $(D - W)v = \lambda Dv$ . In this system, finding the second smallest eigenvalue $\lambda$ and its corresponding eigenvector $v$ is a close approximation for the minimized cost. From $v$ , we obtain foreground separation by taking all node activations where the eigenvector components were larger than their mean. To identify multiple foreground objects, this process is repeated iteratively.
|
| 63 |
+
|
| 64 |
+
Unfortunately, applying this approach directly to the 3D scenes $\{X_{i}\}$ in common 3D representations such as voxels or points is not only computationally infeasible, but unreliable due to the noise in camera pose estimation and geometric reconstruction of 3D scan data. Thus, we propose to regularize the graph cut across geometric primitives.
|
| 65 |
+
|
| 66 |
+
# 3.1.3 Geometric Primitives
|
| 67 |
+
|
| 68 |
+
To employ efficient reasoning across high-dimensional 3D data and enable robust 3D regularization of noisy features, we propose to operate on geometric primitives acquired through a graph coarsening process. For a 3D scene $X_{i}$ we construct the graph $G = (V,E)$ where $V$ and $E$ being the mesh vertices and face edges. Then, nodes with similar normals and colors are aggregated and clustered based on the mesh topology following [15] and resulting in a set $S_{N} = \{C_{1}\dots C_{N}\}$ and $\bigcup (S_N) = V$ where $C_n$ represent a single primitive. This reduces the graph size by multiple orders of magnitude, and enables effective regularization of noise in the used self-supervised 2D and 3D features.
|
| 69 |
+
|
| 70 |
+
# 3.1.4 NCut on Geometric Primitives
|
| 71 |
+
|
| 72 |
+
After addressing the challenge of dimensionality reduction and effectively mitigating speckle noise in our features using geometric primitives, we can leverage the capabilities of the Normalized Cut algorithm to achieve a clean partitioning of scene graphs. For this, we iteratively apply NCut to our aggregated features for the extraction of initial pseudo masks denoted as $M$ . Starting with an empty set $M^0 = \{\}$ , we iteratively compute the adjacency matrix over $S_N$ and retrieve the masks $m \subset S_N$ . We start
|
| 73 |
+
|
| 74 |
+

|
| 75 |
+
Figure 2. UnScene3D first generates a set of pseudo masks (top) to initiate self-training (bottom) for unsupervised 3D instance segmentation. We leverage features from 3D self-supervised pre-training in combination with 2D self-supervised features on an input mesh. These multi-modal features are then aggregated on geometric primitives, integrating low- and high-level signals for pseudo mask segmentation. These initial pseudo masks are then used as supervision for a 3D transformer-based model to produce updated instance masks that are integrated into the supervision of multiple self-training cycles. Finally, we obtain clean and dense instance segmentation without using any manual annotations.
|
| 76 |
+
|
| 77 |
+
from $N$ geometric segments with their corresponding $D$ -dimensional features $\mathcal{F} \in \mathcal{R}^{N \times D}$ , and construct the similarity matrix $A = \text{sim}(\mathcal{F})$ , where $\text{sim}$ denotes cosine similarity. Additionally, for the multi-modal setup we calculate similarity matrices $A_{2D}$ and $A_{3D}$ independently and take their weighted average to obtain the final scores. Empirically, we found this to be more robust than direct feature fusion of the different modalities, due to their different statistical characteristics. We obtain $W_{j}$ introduced in Section 3.1.2 by thresholding $A$ at $\tau_{cut}$ , where $j$ denotes the $j^{th}$ NCut iteration. Using $W_{j}$ , we solve for the second eigenvector $v_{j}$ and threshold it to retrieve the partition $m_{j}$ . We keep all separated foregrounds in $M^0$ , where for each upcoming iteration, we mask out the row and column vectors from $W_{j}$ , where $m_{i} \in M^{0}$ was already accepted as a foreground instance and $i$ being the previous segment ids. This allows greedy separation of instances in order of confidence in every cut iteration. Examples of our generated pseudo masks are visualized in Figures 5 and 6.
|
| 78 |
+
|
| 79 |
+
As the adjacency graph is unaware of the mesh connectivity, NCut often results in masks that span spatially separated scene regions. In 3D, we can leverage knowledge of physical distance and connectivity of $G$ to constrain masks to be contiguous in the coarsened scene connectivity graph. We thus filter masks $m_j$ that have separated components, keeping only the parts $\tilde{m}_j$ that contain the item with the maximum absolute value in $v_j$ . Separation based on connectivity is performed before saving $\tilde{m}_j$ into $M^0$ , thus allowing for repeated detection of the dropped part over the
|
| 80 |
+
|
| 81 |
+
next NCut iterations. Finally, we iterate until the maximum number of instances $M^0 = \{m_i\}_{i=1}^{N_m}$ are obtained, or there are no segments left in the scene. Moreover, we favor generating a reliable set of masks at the cost of restricting to a sparse initial set (i.e., missing potential instances rather than generating noisy masks for them) through a stricter $\tau_{cut}$ or lower number of instances.
|
| 82 |
+
|
| 83 |
+
# 3.2. Self-Training
|
| 84 |
+
|
| 85 |
+
Our initial pseudo masks can provide a set of proposed instances $M^0$ ; however, these pseudo masks are quite sparse in the scenes and sometimes over- or under-split nearby instances. We thus refine the pseudo mask data through an iterative self-training strategy, producing final instance segmentation predictions $M'$ with more dense and complete instance proposals.
|
| 86 |
+
|
| 87 |
+
We leverage a state-of-the-art 3D transformer-based backbone [45] for our self-training from pseudo mask data as mask-head supervision, while the class-head is collapsed to foreground and background classes. Through multiple training cycles we save the proposals of the $t^{th}$ iteration into $M^t$ , from the self-trained model, and save these masks as an extension to the original pseudo dataset obtaining $M^t \supseteq M^0$ . From the second training iteration, we can extract the most confident $K$ predictions and sample these new instance proposals as an addition to the pseudo annotations. Further, we only accept new instances if the added information value is larger than the minimum threshold, measured by simple segment IoU scores. This way, we can effectively
|
| 88 |
+
|
| 89 |
+
densify the originally sparse annotations, but without limiting the quality of the originally clean pseudo masks.
|
| 90 |
+
|
| 91 |
+
# 3.3. Implementation Details
|
| 92 |
+
|
| 93 |
+
Backbones. We use a Res16UNet34C sparse-voxel UNet implemented in the MinkowskiEngine [8] for 3D pretrained feature extraction as well as for the 3D transformer during self-training. For the pretrained features we use our own trained weights of [19] for compatibility reasons.
|
| 94 |
+
|
| 95 |
+
Self-training. We employ the 3D transformer architecture of [45], initialized from scratch. The first self-training cycle is trained for 600 epochs with a batch size of 8 until convergence, which takes $\approx 3$ days on a single NVIDIA RTX A6000 GPU. Further self-training cycles are all initialized from the previous state and finetuned for an additional 50 epochs in $\approx 4$ hours and for a total of 4 training cycles to produce the final set of instance predictions $S$ . For the Hungarian assignment, we take the original weighted combination of dice and binary cross-entropy losses and only apply the DropLoss condition in the backpropagation phase.
|
| 96 |
+
|
| 97 |
+
# 4. Experiments
|
| 98 |
+
|
| 99 |
+
We demonstrate the effectiveness of UnScene3D for unsupervised class-agnostic 3D instance segmentation on challenging real-world 3D scan datasets containing a large diversity of objects and significant clutter. We train our method and all learned baselines on ScanNet [10], using the official train split. Note that no semantic annotation data is used for training, only the RGB-D reconstructions. Additionally, we show that our approach trained on ScanNet data can effectively transfer to class-agnostic 3D instance segmentation on ARKitScenes [2] data.
|
| 100 |
+
|
| 101 |
+
Datasets. We train and evaluate UnScene3D on RGB-D scan data from ScanNet [10], using the official train split. We use the raw RGB images, and registered camera poses for training our approach, while the semantic annotations are used only for evaluation. We use the official ScanNet train split for both the pre-trained 3D features from [19] and our self-training iterations. We additionally evaluate our method on ARKitScenes [2], on an 884/120 train/test split of indoor LIDAR scans. For ARKitScenes, we use 3D pre-trained features from ScanNet, followed by pseudo mask generation and self-training on the ARKitScenes train scenes. We convert the LIDAR scan data to meshes with Poisson Surface Reconstruction [26, 27] prior to our graph coarsening. Note that all baselines using learned features are trained on the same ScanNet data as ours.
|
| 102 |
+
|
| 103 |
+
Evaluation metrics. We evaluate class-agnostic 3D instance segmentation performance with the widely-used Average Precision score on the full-resolution mesh vertices.
|
| 104 |
+
|
| 105 |
+
<table><tr><td>ScanNet</td><td>AP@25</td><td>AP@50</td><td>AP</td></tr><tr><td>HDBSCAN [35]</td><td>32.1</td><td>5.5</td><td>1.6</td></tr><tr><td>Nunes et al. [38]</td><td>30.5</td><td>7.3</td><td>2.3</td></tr><tr><td>Felzenswalb [15]</td><td>38.9</td><td>12.7</td><td>5.0</td></tr><tr><td>CutLER Projection [56]</td><td>7.0</td><td>0.2</td><td>0.3</td></tr><tr><td>Ours</td><td>58.5</td><td>32.2</td><td>15.9</td></tr></table>
|
| 106 |
+
|
| 107 |
+
Table 1. Unsupervised class-agnostic 3D instance segmentation on ScanNet [10]. Our approach improves significantly over baselines (3x improvement in AP) due to our pseudo mask generation and self-training strategy.
|
| 108 |
+
|
| 109 |
+
Following the strategy of the supervised benchmark [10] we report scores at IoU scores of $25\%$ and $50\%$ (AP@25, AP@50) and averaged over all overlaps between $[50\%$ and $95\%]$ at $5\%$ steps (AP). Note that since predictions are class agnostic, all methods evaluate only instance mask AP values without considering any semantic class labels. For ScanNet, we evaluate against ground truth instance masks from the established 20-class benchmark. Since ARK-itScenes does not contain any ground truth instance mask annotations, we evaluate all methods qualitatively.
|
| 110 |
+
|
| 111 |
+
Comparison to the state of the art. We evaluate our approach in comparison to state-of-the-art traditional clustering methods HDBSCAN [35] and Felzenszwalb's algorithm [15], in addition to the unsupervised approach of Nunes et. al. [38] leveraging learned feature clustering and refinement. All baselines are provided with input mesh vertices, colors, and normals, while our approach and Nunes et. al. also operate on sparse voxel scene representations. Table 1 and Figure 3 show comparisons on ScanNet data; our UnScene3D approach improves significantly over state of the art by effectively leveraging signal from self-supervised 3D features to guide our model through self-training. Note that since Nunes et. al. has been designed for outdoor applications, even while leveraging ScanNet-trained features, it uses ground removal and relies on physical object separation, making segmentation difficult in cluttered scenes.
|
| 112 |
+
|
| 113 |
+
Additionally, we demonstrate the importance of reasoning in 3D, and compare with a state-of-the-art unsupervised 2D instance segmentation approach CutLER [56] run on the RGB frames of the scans, and projected to 3D using the corresponding camera poses. Here, the difficulty lies in resolving view inconsistencies, occlusions, and lack of knowledge of geometric structure resulting in poor 3D segmentation performance despite plausible 2D proposals.
|
| 114 |
+
|
| 115 |
+
Evaluation on other datasets We quantitatively evaluate UnScene3D on the Area_5 of the S3DIS dataset [1] using only 3D features pretrained on [10]. Comparison with 3D-only state-of-the-art can be seen in Table 2.
|
| 116 |
+
|
| 117 |
+
We additionally compare with state of the art on ARK-ittScenes [2] data in Figure 7. Here we show only qualitative
|
| 118 |
+
|
| 119 |
+

|
| 120 |
+
Figure 3. Qualitative comparison on ScanNet [10] scenes with projected predictions from the 2D method CutLER [56], traditional clustering-based methods Felzenszwalb [15] and HDBSCAN [35], and the GraphCut-based cluster refinement method [38]. Our approach leverages strong pseudo mask prediction and a self-training strategy to produce cleaner, more accurate instance segmentation.
|
| 121 |
+
|
| 122 |
+
<table><tr><td>S3DIS</td><td>AP@25</td><td>AP@50</td><td>AP</td></tr><tr><td>HDBSCAN [35]</td><td>27.9</td><td>11.2</td><td>5.0</td></tr><tr><td>Felzenswalb [15]</td><td>23.5</td><td>10.7</td><td>5.0</td></tr><tr><td>Nunes et al. [38]</td><td>20.1</td><td>10.5</td><td>5.5</td></tr><tr><td>Ours</td><td>52.6</td><td>40.3</td><td>21.4</td></tr></table>
|
| 123 |
+
|
| 124 |
+
results due to the absence of ground truth instance mask annotations. UnScene3D effectively produces cleaner, more accurate segmentations in these complex environments.
|
| 125 |
+
|
| 126 |
+
UnScene3D as data-efficient pretraining UnScene3D is able to learn powerful object properties and dense segmentation even in a fully unsupervised fashion. We demonstrate the potential of our strong learned features for downstream 3D instance segmentation with limited annotated data. We follow the setup introduced by CSC [19] with limited reconstructions available for downstream fine-tuning. We show our method as a strong pretraining strategy in Figure 4, notably outperforming both training from scratch as well as the state-of-the-art 3D pretraining of CSC. For more details we refer to our supplementary material.
|
| 127 |
+
|
| 128 |
+

|
| 129 |
+
Figure 4. Our unsupervised self-training produces strong 3D features that can serve as a powerful pretraining strategy for 3D instance segmentation in limited data scenarios. UnScene3D significantly outperforms state-of-the-art self-supervised 3D pretraining [19] on ScanNet instance segmentation.
|
| 130 |
+
|
| 131 |
+
What is the effect of multi-modal signal for pseudo mask generation? We evaluate the effect self-supervised color and geometry signals for generating pseudo annotations in Table 3. We consider using only self-supervised geometric features (3D), only self-supervised color features (2D) that are projected to the 3D scans, and both together (both). We find that the color and geometry provide complementary signals. We also note that color features are only used for the initial pseudo mask generation, during self-training iterations and test time only 3D features were used.
|
| 132 |
+
|
| 133 |
+
Table 2. Evaluation on S3DIS dataset (Area_5). UnScene3D is able to adapt to other datasets as well and shows a significant improvement over previous SOTA methods.
|
| 134 |
+
|
| 135 |
+
<table><tr><td></td><td>Modality</td><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP Final</td></tr><tr><td>FreeMask</td><td>3D</td><td>14.4</td><td>3.6</td><td>1.3</td><td>2.0</td></tr><tr><td>Ours</td><td>3D</td><td>45.4</td><td>16.7</td><td>9.2</td><td>13.3</td></tr><tr><td>FreeMask</td><td>2D</td><td>31.1</td><td>15.1</td><td>6.8</td><td>13.8</td></tr><tr><td>Ours</td><td>2D</td><td>51.3</td><td>21.8</td><td>9.4</td><td>15.7</td></tr><tr><td>FreeMask</td><td>both</td><td>23.7</td><td>10.1</td><td>5.7</td><td>12.1</td></tr><tr><td>Ours</td><td>both</td><td>52.9</td><td>23.2</td><td>10.4</td><td>15.9</td></tr></table>
|
| 136 |
+
|
| 137 |
+
Table 3. We compare pseudo mask generation from 3D-only features (3D), color-only features (2D), and both color and geometry (both) signal, as well as with pseudo annotation generation algorithm FreeMask [55]. In this table we report method performances after a single iteration of self-training initialized from the different pseudo annotation methods and the final AP scores after 4 self-training iterations.
|
| 138 |
+
|
| 139 |
+

|
| 140 |
+
Figure 5. Initial pseudo masks generated by UnScene3D in comparison with a 3D-lifted FreeMask [55]. FreeMask tends to produce a larger set of noisier pseudo masks, while we rely on a cleaner but sparser set for our self-training.
|
| 141 |
+
|
| 142 |
+
What is the effect of pseudo annotations? We also evaluate the effect of our pseudo mask generation in Table 3 and Figure 5, in comparison to the 3D adaptation of the FreeMask [55] approach operating on our geometric segments. FreeMask tends to estimate a larger but noisier set of initial pseudo masks, while our approach is focusing on a sparser set of more reliable pseudo masks and produces significantly better performance. The strong difference in performance can be explained by the nature of the samples. While a sparser set of examples can be extended with multiple iterations of self-training, noisy samples will propagate through the full pipeline, and thus directly degrade the final performance. Further details of our adaptations of the FreeMask 3D method can be found in our supplemental.
|
| 143 |
+
|
| 144 |
+
What is the impact of self-training? We observe that while self-training iterations are always improving the qualitative performance, their effective added information value is saturating after a limited number of cycles. We report on Table 4 through the first 4 steps, and observe a significant relative improvement in both modalities.
|
| 145 |
+
|
| 146 |
+

|
| 147 |
+
Figure 6. UnScene3D employs self-training to refine the initial sparse set of proposals. We can see consistent improvement over both the number of predicted instances and the quality of the instance masks. Here we show results using the pseudo annotations obtained from both modalities.
|
| 148 |
+
|
| 149 |
+
<table><tr><td rowspan="2"></td><td colspan="3">3D Only</td><td colspan="3">3D & 2D</td></tr><tr><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP@25</td><td>AP@50</td><td>AP</td></tr><tr><td>\( S^0 \)pseudo masks</td><td>13.8</td><td>4.7</td><td>2</td><td>19.9</td><td>10.0</td><td>5.9</td></tr><tr><td>\( 1^{st} Self-train \)</td><td>45.4</td><td>16.7</td><td>9.2</td><td>52.9</td><td>23.2</td><td>10.4</td></tr><tr><td>\( 2^{nd} Self-train \)</td><td>50.0</td><td>24.1</td><td>12.0</td><td>56.5</td><td>29.8</td><td>15.0</td></tr><tr><td>\( 3^{rd} Self-train \)</td><td>52.2</td><td>25.8</td><td>12.8</td><td>58.8</td><td>31.9</td><td>15.9</td></tr><tr><td>\( 4^{st} Self-train \)</td><td>52.7</td><td>26.2</td><td>13.3</td><td>58.5</td><td>32.2</td><td>15.9</td></tr></table>
|
| 150 |
+
|
| 151 |
+
Table 4. Multiple iterations of self-training significantly improve performance, saturating around 4 iterations.
|
| 152 |
+
|
| 153 |
+
Limitations While UnScene3D offers a promising step towards unsupervised 3D instance segmentation, various limitations remain. We rely on a mesh representation for graph coarsening, but believe this could be extended to alternative representations through neighborhood reasoning. Additionally, our graph coarsening step may cause very small objects (e.g., pens, cell phones) to be missed in the pseudo annotation generation. Finally, employing a fixed set of pseudo masks from the initial stage that are used
|
| 154 |
+
|
| 155 |
+

|
| 156 |
+
Figure 7. As UnScene3D does not require any human annotation, so we can also train and test our method on the ARKitScenes [2] dataset. We leverages 3D features followed by a series of self-training iterations for cleaner, more accurate instance segmentation. Qualitative results show consistently better results than our baselines.
|
| 157 |
+
|
| 158 |
+
through self-training could reinforce noisy predictions.
|
| 159 |
+
|
| 160 |
+
# 5. Conclusion
|
| 161 |
+
|
| 162 |
+
We introduced UnScene3D, a novel approach towards achieving fully-unsupervised 3D instance segmentation in cluttered indoor scenes. Our approach effectively combined low-level geometric properties to regularize multi-modal self-supervised deep features for initial pseudo mask extraction, and our self-training notably improved performance by refining these proposals to a more complete, dense set of instances. As 3D instance segmentation is a crucial aspect of 3D scene understanding, UnScene3D's ability to achieve this without requiring any manual annotations opens up new possibilities for 3D semantic understanding.
|
| 163 |
+
|
| 164 |
+
# 6. Acknowledgements
|
| 165 |
+
|
| 166 |
+
This project is funded by the Bavarian State Ministry of Science and the Arts and coordinated by the Bavarian Research Institute for Digital Transformation (bidt), the ERC Starting Grant SpatialSem (101076253), and supported in part by a Google research gift. Or Litany is a Taub fellow and is supported by the Azrieli Foundation Early Career Faculty Fellowship.
|
| 167 |
+
|
| 168 |
+
# References
|
| 169 |
+
|
| 170 |
+
[1] Iro Armeni, Ozan Sener, Amir R. Zamir, Helen Jiang, Ioannis Brilakis, Martin Fischer, and Silvio Savarese. 3d semantic parsing of large-scale indoor spaces. In 2016 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), pages 1534-1543, 2016. 2, 5
|
| 171 |
+
[2] Gilad Baruch, Zhuoyuan Chen, Afshin Dehghan, Tal Dimry, Yuri Feigin, Peter Fu, Thomas Gebauer, Brandon Joffe, Daniel Kurz, Arik Schwartz, and Elad Shulman. ARK-scenes - a diverse real-world dataset for 3d indoor scene understanding using mobile RGB-d data. In Thirty-fifth Conference on Neural Information Processing Systems Datasets and Benchmarks Track (Round 1), 2021. 2, 5, 8, 12, 13
|
| 172 |
+
[3] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the International Conference on Computer Vision (ICCV), 2021. 2, 3, 16
|
| 173 |
+
[4] Kang Chen, Yu-Kun Lai, Yu-Xin Wu, Ralph Martin, and ShiMin Hu. Automatic semantic modeling of indoor scenes from low-quality rgb-d data using contextual information. ACM Transactions on Graphics, 33(6), 2014. 2
|
| 174 |
+
[5] Shaoyu Chen, Jiemin Fang, Qian Zhang, Wenyu Liu, and Xinggang Wang. Hierarchical aggregation for 3d instance segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15467-15476, 2021. 1
|
| 175 |
+
[6] Julian Chibane, Francis Engelmann, Tuan Anh Tran, and Gerard Pons-Moll. Box2mask: Weakly supervised 3d semantic instance segmentation using bounding boxes. In European Conference on Computer Vision (ECCV). Springer, 2022. 2
|
| 176 |
+
[7] Sunil Chopra and M. R. Rao. The partition problem. Mathematical Programming, 59:87-115, 1993. 2
|
| 177 |
+
[8] Christopher Choy, JunYoung Gwak, and Silvio Savarese. 4d spatio-temporal convnets: Minkowski convolutional neural networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 3075-3084, 2019. 2, 5
|
| 178 |
+
[9] Angela Dai and Matthias Nießner. 3dmv: Joint 3d-multiview prediction for 3d semantic scene segmentation. In European Conference on Computer Vision, 2018. 12
|
| 179 |
+
[10] Angela Dai, Angel X. Chang, Manolis Savva, Maciej Halber, Thomas Funkhouser, and Matthias Nießner. Scannet: Richly-annotated 3d reconstructions of indoor scenes. In Proc. Computer Vision and Pattern Recognition (CVPR), IEEE, 2017. 2, 5, 6, 12, 14
|
| 180 |
+
[11] Michel Deza and Monique Laurent. Geometry of cuts and metrics. In Algorithms and Combinatorics, 2009. 2
|
| 181 |
+
[12] Runyu Ding, Jihan Yang, Chuhui Xue, Wenqing Zhang, Song Bai, and Xiaojuan Qi. Pla: Language-driven open-vocabulary 3d scene understanding. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2023. 2
|
| 182 |
+
[13] Martin Ester, Hans-Peter Kriegel, Jörg Sander, and Xiaowei Xu. A density-based algorithm for discovering clusters in
|
| 183 |
+
|
| 184 |
+
large spatial databases with noise. In Proceedings of the Second International Conference on Knowledge Discovery and Data Mining, page 226-231. AAAI Press, 1996. 2
|
| 185 |
+
[14] Siqi Fan, Qiulei Dong, Fenghua Zhu, Yisheng Lv, Peijun Ye, and Fei-Yue Wang. Scf-net: Learning spatial contextual features for large-scale point cloud segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14504-14513, 2021. 1, 2
|
| 186 |
+
[15] Pedro F Felzenszwalb and Daniel P Huttenlocher. Efficient graph-based image segmentation. International journal of computer vision, 59:167-181, 2004. 2, 3, 5, 6, 7
|
| 187 |
+
[16] Benjamin Graham, Martin Engelcke, and Laurens van der Maaten. 3d semantic segmentation with submanifold sparse convolutional networks. CVPR, 2018. 2
|
| 188 |
+
[17] Lei Han, Tian Zheng, Lan Xu, and Lu Fang. Occuseg: Occupancy-aware 3d instance segmentation. In 2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 2937-2946, 2020. 1
|
| 189 |
+
[18] Ji Hou, Angela Dai, and Matthias Nießner. 3d-sis: 3d semantic instance segmentation of rgb-d scans. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4421-4430, 2019. 1, 2, 12
|
| 190 |
+
[19] Ji Hou, Benjamin Graham, Matthias Nießner, and Saining Xie. Exploring data-efficient 3d scene understanding with contrastive scene contexts. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 15587-15597, 2021. 1, 2, 3, 5, 7, 12, 15, 16
|
| 191 |
+
[20] Ji Hou, Xiaoliang Dai, Zijian He, Angela Dai, and Matthias Nießner. Mask3d: Pre-training 2d vision transformers by learning masked 3d priors. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13510-13519, 2023. 2, 12
|
| 192 |
+
[21] Qingyong Hu, Bo Yang, Linhai Xie, Stefano Rosa, Yulan Guo, Zhihua Wang, Niki Trigoni, and Andrew Markham. Randla-net: Efficient semantic segmentation of large-scale point clouds. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11108-11117, 2020. 1
|
| 193 |
+
[22] Le Hui, Linghua Tang, Yaqi Shen, Jin Xie, and Jian Yang. Learning superpoint graph cut for 3d instance segmentation. In NeurIPS, 2022. 1
|
| 194 |
+
[23] Maximilian Jaritz, Jiayuan Gu, and Hao Su. Multi-view pointnet for 3d scene understanding. In ICCV Workshop 2019, 2019. 12
|
| 195 |
+
[24] Krishna Murthy Jatavallabhula, Alihusein Kuwajerwala, Qiao Gu, Mohd Omama, Tao Chen, Shuang Li, Ganesh Iyer, Soroush Saryazdi, Nikhil Keetha, Ayush Tewari, Joshua B. Tenenbaum, Celso Miguel de Melo, Madhava Krishna, Liam Paull, Florian Shkurti, and Antonio Torralba. Conceptfusion: Open-set multimodal 3d mapping. arXiv, 2023. 2
|
| 196 |
+
[25] Andrej Karpathy, Stephen Miller, and Li Fei-Fei. Object discovery in 3d scenes via shape analysis. In 2013 IEEE international conference on robotics and automation, pages 2088–2095. IEEE, 2013. 2
|
| 197 |
+
[26] Michael Kazhdan and Hugues Hoppe. Screened poisson surface reconstruction. ACM Transactions on Graphics (ToG), 32(3):1-13, 2013. 5
|
| 198 |
+
|
| 199 |
+
[27] Michael Kazhdan, Matthew Bolitho, and Hugues Hoppe. Poisson surface reconstruction. In Proceedings of the fourth Eurographics symposium on Geometry processing, page 0, 2006. 5
|
| 200 |
+
[28] Young Min Kim, Niloy J Mitra, Dong-Ming Yan, and Leonidas Guibas. Acquiring 3d indoor environments with variability and repetition. ACM Transactions on Graphics (TOG), 31(6):1-11, 2012. 2
|
| 201 |
+
[29] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 15
|
| 202 |
+
[30] Maksim Kolodiazhnyi, Danila Rukhovich, Anna Vorontsova, and Anton Konushin. Top-down beats bottom-up in 3d instance segmentation, 2023. 1
|
| 203 |
+
[31] Yangyan Li, Angela Dai, Leonidas Guibas, and Matthias Nießner. Database-assisted object retrieval for real-time 3d reconstruction. In Computer graphics forum, pages 435-446. Wiley Online Library, 2015. 2
|
| 204 |
+
[32] Zhihao Liang, Zhihao Li, Songcen Xu, Mingkui Tan, and Kui Jia. Instance segmentation in 3d scenes using semantic superpoint tree networks. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 2783-2792, 2021. 1
|
| 205 |
+
[33] Minghua Liu, Yinhao Zhu, H. Cai, Shizhong Han, Z. Ling, Fatih Murat Porikli, and Hao Su. Partslip: Low-shot part segmentation for 3d point clouds via pretrained image-language models. 2023 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 21736-21746, 2022. 2
|
| 206 |
+
[34] Zhengzhe Liu, Xiaojuan Qi, and Chi-Wing Fu. One thing one click: A self-training approach for weakly supervised 3d semantic segmentation. 2021 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), pages 1726-1736, 2021. 2
|
| 207 |
+
[35] Leland McInnes and John Healy. Accelerated hierarchical density based clustering. In 2017 IEEE International Conference on Data Mining Workshops (ICDMW), pages 33-42. IEEE, 2017. 2, 5, 6, 7
|
| 208 |
+
[36] Yoshikatsu Nakajima, Byeongkeun Kang, Hideo Saito, and Kris Kitani. Incremental class discovery for semantic segmentation with rgbd sensing. In Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV), 2019. 2
|
| 209 |
+
[37] Liangliang Nan, Ke Xie, and Andrei Sharf. A search-classify approach for cluttered indoor scene understanding. ACM Trans. Graph., 31(6), 2012. 2
|
| 210 |
+
[38] Lucas Nunes, Xieyuanli Chen, Rodrigo Marcuzzi, Aljosa Osep, Laura Leal-Taixe, Cyril Stachniss, and Jens Behley. Unsupervised class-agnostic instance segmentation of 3d lidar data for autonomous vehicles. IEEE Robotics and Automation Letters, 7(4):8713-8720, 2022. 2, 5, 6, 7
|
| 211 |
+
[39] Lucas Nunes, Rodrigo Marcuzzi, Xieyuanli Chen, Jens Behley, and Cyril Stachniss. Segcontrast: 3d point cloud feature representation learning through self-supervised segment discrimination. IEEE Robotics and Automation Letters, 7(2):2116-2123, 2022. 2, 12
|
| 212 |
+
|
| 213 |
+
[40] Songyou Peng, Kyle Genova, Chiyu Jiang, Andrea Tagliasacchi, Marc Pollefeys, Thomas Funkhouser, et al. Openscene: 3d scene understanding with open vocabularies. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 815-824, 2023. 2
|
| 214 |
+
[41] Yinyin Peng, Hui Feng, Tao Chen, and Bo Hu. Point cloud instance segmentation with inaccurate bounding-box annotations. Sensors (Basel, Switzerland), 23, 2023. 2
|
| 215 |
+
[42] Charles Ruizhongtai Qi, Li Yi, Hao Su, and Leonidas J Guibas. Pointnet++: Deep hierarchical feature learning on point sets in a metric space. Advances in neural information processing systems, 30, 2017. 1, 2, 16
|
| 216 |
+
[43] Dario Rethage, Johanna Wald, Jurgen Sturm, Nassir Navab, and Federico Tombari. Fully-convolutional point networks for large-scale point clouds. In Proceedings of the European Conference on Computer Vision (ECCV), pages 596-611, 2018.
|
| 217 |
+
[44] David Rozenberszki, Or Litany, and Angela Dai. Language-grounded indoor 3d semantic segmentation in the wild. In Proceedings of the European Conference on Computer Vision (ECCV), 2022. 2
|
| 218 |
+
[45] Jonas Schult, Francis Engelmann, Alexander Hermans, Or Litany, Siyu Tang, and Bastian Leibe. Mask3D for 3D Semantic Instance Segmentation. In International Conference on Robotics and Automation (ICRA), 2023. 1, 4, 5, 15
|
| 219 |
+
[46] Nur Muhammad Mahi Shafiullah, Chris Paxton, Lerrel Pinto, Soumith Chintala, and Arthur Szlam. Clip-fields: Weakly supervised semantic fields for robotic memory. arXiv preprint arXiv: Arxiv-2210.05663, 2022. 2
|
| 220 |
+
[47] Jianbo Shi and Jitendra Malik. Normalized cuts and image segmentation. IEEE Transactions on pattern analysis and machine intelligence, 22(8):888-905, 2000. 2, 3
|
| 221 |
+
[48] Ziyang Song and Bo Yang. OGC: Unsupervised 3D Object Segmentation from Rigid Dynamics of Point Clouds. In NeurIPS, 2022. 2
|
| 222 |
+
[49] Carole H Sudre, Wenqi Li, Tom Vercauteren, Sebastian Ourselin, and M Jorge Cardoso. Generalised dice overlap as a deep learning loss function for highly unbalanced segmentations. In Deep Learning in Medical Image Analysis and Multimodal Learning for Clinical Decision Support: Third International Workshop, DLMIA 2017, and 7th International Workshop, ML-CDS 2017, Held in Conjunction with MICCAI 2017, Quebec City, QC, Canada, September 14, Proceedings 3, pages 240-248. Springer, 2017. 12
|
| 223 |
+
[50] Jiahao Sun, Chunmei Qing, Junpeng Tan, and Xiangmin Xu. Superpoint transformer for 3d scene instance segmentation. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 2393-2401, 2023. 1, 2
|
| 224 |
+
[51] Thang Vu, Kookhoi Kim, Tung M Luu, Thanh Nguyen, Junyeong Kim, and Chang D Yoo. Softgroup++: Scalable 3d instance segmentation with octree pyramid grouping. arXiv preprint arXiv:2209.08263, 2022. 2
|
| 225 |
+
[52] Thang Vu, Kookhoi Kim, Tung M. Luu, Xuan Thanh Nguyen, and Chang D. Yoo. Softgroup for 3d instance segmentation on 3d point clouds. In CVPR, 2022. 1
|
| 226 |
+
[53] Puzuo Wang, Wei Yao, and Jie Shao. One class one click: Quasi scene-level weakly supervised point cloud semantic
|
| 227 |
+
|
| 228 |
+
segmentation with active learning. ISPRS Journal of Photogrammetry and Remote Sensing, 204:89-104, 2023. 2
|
| 229 |
+
[54] Weiyue Wang, Ronald Yu, Qiangui Huang, and Ulrich Neumann. Sgpn: Similarity group proposal network for 3d point cloud instance segmentation. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2569-2578, 2018. 1
|
| 230 |
+
[55] Xinlong Wang, Zhiding Yu, Shalini De Mello, Jan Kautz, Anima Anandkumar, Chunhua Shen, and Jose M Alvarez. Freesolo: Learning to segment objects without annotations. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14176-14186, 2022. 2, 7, 12, 15, 16, 17
|
| 231 |
+
[56] Xudong Wang, Rohit Girdhar, Stella X Yu, and Ishan Misra. Cut and learn for unsupervised object detection and instance segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3124-3134, 2023. 2, 5, 6, 12, 15
|
| 232 |
+
[57] Yue Wang, Yongbin Sun, Ziwei Liu, Sanjay E Sarma, Michael M Bronstein, and Justin M Solomon. Dynamic graph cnn for learning on point clouds. Acm Transactions On Graphics (tog), 38(5):1-12, 2019. 1
|
| 233 |
+
[58] Wenxuan Wu, Zhongang Qi, and Li Fuxin. Pointconv: Deep convolutional networks on 3d point clouds. In Proceedings of the IEEE/CVF Conference on computer vision and pattern recognition, pages 9621-9630, 2019. 1
|
| 234 |
+
[59] Z. Wu and R. Leahy. An optimal graph theoretic approach to data clustering: theory and its application to image segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 15(11):1101-1113, 1993. 2
|
| 235 |
+
[60] Saining Xie, Jiatao Gu, Demi Guo, Charles R Qi, Leonidas Guibas, and Or Litany. Pointcontrast: Unsupervised pretraining for 3d point cloud understanding. In Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part III 16, pages 574-591. Springer, 2020. 1, 2, 12
|
| 236 |
+
[61] Yunhan Yang, Xiaoyang Wu, Tong He, Hengshuang Zhao, and Xihui Liu. Sam3d: Segment anything in 3d scenes. arXiv preprint arXiv:2306.03908, 2023. 15
|
| 237 |
+
[62] Zaiwei Zhang, Rohit Girdhar, Armand Joulin, and Ishan Misra. Self-supervised pretraining of 3d features on any point-cloud. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 10252-10263, 2021. 2, 12
|
| 238 |
+
|
| 239 |
+
# 7. Appendix
|
| 240 |
+
|
| 241 |
+
# 7.1. UnScene3D as Data Efficient Pretraining
|
| 242 |
+
|
| 243 |
+
We report additional qualitative details on the data efficient pretraining performance of UnScene3D in Table 5.
|
| 244 |
+
|
| 245 |
+
We also note that the 3D contrastive pre-training of CSC, similar to other 3D pre-training methods developed for nontransformer backbones [19, 39, 60, 62], was not beneficial for a transformer-based model. A similar observation was also reported in a recent pretraining method [20]. We thus also compare with CSC pretraining on their original 3D backbone (which demonstrated improvement over training from scratch on the same backbone). Our approach can improves notably over both alternatives.
|
| 246 |
+
|
| 247 |
+
# 7.2. The effect of noise robust losses.
|
| 248 |
+
|
| 249 |
+
We adopt DropLoss [56] for our self-training cycles, which is robust to sparse data and missing annotations. In particular, we use a weighted combination of cross-entropy and Dice [49] losses for bipartite-matching with pseudo annotations. We then drop losses for backpropagation which do not have at least $\tau_{drop}$ overlap with the annotations from the previous cycle. We evaluate the effect of different noise robust losses for self-training in Table 6. We compare our baseline losses with a 3D extension of the projection loss of [55], and our adaptation of DropLoss from [56]. Our approach does not penalize for missing pseudo masks, which enables more effective self-training to discover previously missed instances.
|
| 250 |
+
|
| 251 |
+
# 7.3. Additional Qualitative Results
|
| 252 |
+
|
| 253 |
+
We show more qualitative results from our method trained on ARKitScenes [2] in Figure 8 and on ScanNet [10] in Figure 9.
|
| 254 |
+
|
| 255 |
+
# 7.4. Pseudo Mask Generation Ablations
|
| 256 |
+
|
| 257 |
+
We also ablate the saliency threshold, oversegmentation parameters, and separation strategy in our pseudo mask generation. If not explicitly stated otherwise in Table 12, we use both 2D and 3D modality features for the pseudo mask generation.
|
| 258 |
+
|
| 259 |
+
What is the effect of the saliency threshold in pseudo mask generation? We threshold the saliency matrix $A$ with $\tau_{cut} = 0.55$ for geometric-only features and $\tau_{cut} = 0.65$ for combined modalities. Table 7 shows that our approach maintains robust performance across a large range of $\tau_{cut}$ thresholds used to estimate salient areas for pseudo masks. In this table we report results using features from combined modalities, but similar behaviour can be observed for the other scenarios as well.
|
| 260 |
+
|
| 261 |
+
The effect of iterative mask densification. We designed a strategy to leverage a sparse set of relatively clean initial pseudo masks, which are progressively extended with confident self-predictions during later iterations. This leads to a 3x improvement over state of the art in the Average Precision Metric. We could also consider different mask refinement strategies using a mixture of segments, initial masks or self-trained instances. Tab. 8 ablates a mask refinement strategy of discarding previous masks and retaining current predictions. We also consider using Felzenswalb segments directly instead of feature-based pseudo labels. Both these strategies lead to lower performance due to the increased presence of noisy labels, which dominate the training signal.
|
| 262 |
+
|
| 263 |
+
Robustness to oversegmentation parameters. Table 9 shows that our approach maintains strong robustness to a wide range of oversegmentation parameters for our geometric segments (our used parameters denoted in bold).
|
| 264 |
+
|
| 265 |
+
Additionally, we also test the effect of other hyperparameters in out NCut-based pseudo mask generation module, including used distance metrics in the similarity matrix and different methods to separate unconnected patches in the predicted foregrounds. During the foreground separation in the Normalized Cut algorithm, we had an additional condition for the minimum number of foreground segments for the bipartitions. This conditions was able effectively filter out suboptimal partitioning of the full graph leading to separated parts from the full instances. Reducing the size of this parameter can directly lead to a more dense set of initial pseudo masks, with the cost of higher false positive rate. In Table 9 we report a sparser and denser version of the datasets with a minimum number of foreground segments of 8 and 2 accordingly, and show the initial higher scores of the pseudo annotation doesn't necessarily propagate to better downstream self-trained performance.
|
| 266 |
+
|
| 267 |
+
Finally, we also ablate the effect of our physical connectivity-based foreground separation introduced in Section 3.1. In our main method we separate all set of connected components in the foreground, but only keep the component with the highest eigenvector activation (Max). As an alternative we also test a method where we calculate the highest average activation in the connected component (Avg.), a method where we keep the component with the largest surface value (Largest) and finally, to test the effect of this module, without any kind of connectivity-based separation (No Sep.).
|
| 268 |
+
|
| 269 |
+
# 7.5. Comparison with methods from the 2D domain
|
| 270 |
+
|
| 271 |
+
To ensure a fair evaluation of methods operating on different input domains in Table 1. we followed the established procedure of well-known baselines [9, 18, 23]. This involves
|
| 272 |
+
|
| 273 |
+

|
| 274 |
+
Figure 8. Additional results on the ARKitScenes dataset [2], compared to geometric clustering and oversegmentation-based baselines.
|
| 275 |
+
|
| 276 |
+

|
| 277 |
+
Figure 9. Additional results on the ScanNet dataset [10], compared to geometric clustering and oversegmentation-based baselines.
|
| 278 |
+
|
| 279 |
+
<table><tr><td rowspan="2">Model</td><td rowspan="2">Backbone</td><td colspan="3">1%</td><td colspan="3">5%</td><td colspan="3">10%</td><td colspan="3">20%</td><td colspan="3">50%</td></tr><tr><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP@25</td><td>AP@50</td><td>AP</td></tr><tr><td>Scratch</td><td>Bottom-up</td><td>22.6</td><td>14.1</td><td>6.8</td><td>45.5</td><td>33.3</td><td>18.1</td><td>54.8</td><td>39.2</td><td>21.9</td><td>61.0</td><td>43.4</td><td>25.5</td><td>67.0</td><td>51.4</td><td>30.3</td></tr><tr><td>CSC [19]</td><td>Bottom-up</td><td>35.6</td><td>22.1</td><td>12.5</td><td>52.7</td><td>39.9</td><td>23.3</td><td>59.8</td><td>43.8</td><td>25.0</td><td>63.8</td><td>48.9</td><td>29.6</td><td>70.5</td><td>56.0</td><td>33.6</td></tr><tr><td>Scratch</td><td>Transformer</td><td>24.7</td><td>9.3</td><td>4.6</td><td>48.1</td><td>27.6</td><td>16.3</td><td>59.2</td><td>39.1</td><td>23.4</td><td>66.4</td><td>49.6</td><td>33.1</td><td>78.9</td><td>67.5</td><td>49.8</td></tr><tr><td>CSC</td><td>Transformer</td><td>17.0</td><td>6.8</td><td>3.8</td><td>44.2</td><td>22.7</td><td>13.1</td><td>55.2</td><td>32.3</td><td>19.1</td><td>62.0</td><td>41.2</td><td>26.0</td><td>73.7</td><td>58.2</td><td>40.0</td></tr><tr><td>Ours</td><td>Transformer</td><td>43.5</td><td>28.4</td><td>15.8</td><td>63.2</td><td>46.8</td><td>28.3</td><td>70.3</td><td>55.7</td><td>36.7</td><td>72.4</td><td>60.7</td><td>41.5</td><td>78.9</td><td>68.0</td><td>48.2</td></tr></table>
|
| 280 |
+
|
| 281 |
+
Table 5. Unsupervised class-agnostic pretraining with our method can also act as a powerful pretraining strategy, advancing over state of the art. We report pretraining with CSC [19] and UnScene3D, and evaluate the downstream weakly-supervised instance segmentation performance on ScanNet with percentage of limited annotated scenes used denoted in the top row. As we found that CSC degraded performance when using a transformer-based backbone, we also report the performance of training from scratch and CSC on their originally proposed backbone of a sparse UNet with bottom-up voting.
|
| 282 |
+
|
| 283 |
+
<table><tr><td></td><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP Final</td></tr><tr><td>Initial Pseudo Masks</td><td>19.9</td><td>10.0</td><td>5.9</td><td>-</td></tr><tr><td>Baseline losses [45]</td><td>42.3</td><td>16.9</td><td>7.2</td><td>14.2</td></tr><tr><td>Projection loss [55]</td><td>35.7</td><td>12.1</td><td>4.7</td><td>7.2</td></tr><tr><td>DropLoss [56]</td><td>52.9</td><td>23.2</td><td>10.4</td><td>15.9</td></tr></table>
|
| 284 |
+
|
| 285 |
+
Table 6. A 3D projection loss struggles with under-determined associations, while DropLoss helps UnScene3D to discover parts of the scene that were missed by the source supervision. We report all metrics after a single iteration and the AP scores after 4 iterations of self-training.
|
| 286 |
+
|
| 287 |
+
<table><tr><td>τcut</td><td>AP@25</td><td>AP@50</td><td>AP</td></tr><tr><td>0.40</td><td>16.7</td><td>9.0</td><td>5.2</td></tr><tr><td>0.50</td><td>20.8</td><td>10.7</td><td>5.7</td></tr><tr><td>0.55</td><td>21.0</td><td>10.8</td><td>5.7</td></tr><tr><td>0.60</td><td>21.3</td><td>11.3</td><td>5.8</td></tr><tr><td>0.65</td><td>19.9</td><td>10.0</td><td>5.9</td></tr><tr><td>0.70</td><td>18.2</td><td>9.9</td><td>5.6</td></tr><tr><td>0.80</td><td>11.8</td><td>5.0</td><td>2.6</td></tr></table>
|
| 288 |
+
|
| 289 |
+
using depth information to project 2D predictions into 3D such that all methods are evaluated in the same 3D domain and aggregate multiple predictions through consensus by majority voting or accepting the maximum confidence scores for every voxel location. We also show results evaluated against 2D ScanNet images by projecting our method's predictions into 2D in Tab. 10, and comparing it to the current state of the art 2D unsupervised segmentation method [56] which demonstrates the usefulness of 3D reasoning.
|
| 290 |
+
|
| 291 |
+
We also compare to weakly-supervised instance segmentation method SAM3D [61], where powerful class-agnostic
|
| 292 |
+
|
| 293 |
+
Table 7. Our pseudo mask generation quality, as measured by AP metrics, maintains robustness to a large range of $\tau$ thresholds that extract saliency. Note that this measures the quality of only the pseudo masks; our full approach with self-training produces significantly improved results. In this table we show results and parameters used by our method in bold and report pseudo mask performance generated from both modalities.
|
| 294 |
+
|
| 295 |
+
<table><tr><td></td><td>AP@25</td><td>AP@50</td><td>AP</td></tr><tr><td>Felzenswalb Masks</td><td>35.5</td><td>20.6</td><td>10.3</td></tr><tr><td>Mask Refinement</td><td>43.7</td><td>24.4</td><td>12.4</td></tr><tr><td>Mask Addition (Ours)</td><td>58.6</td><td>32.0</td><td>16.0</td></tr></table>
|
| 296 |
+
|
| 297 |
+
Table 8. Instead of using masks from previous iteration directly it is the best to keep the initial masks fixed, and iteratively sample plausible predictions to enrich the pseudo dataset during self-training. This method strikes a balance between relatively clean, but sparse labels and increasing number of confident samples. Finally, even though Felzenswalb oversegmentation yields to higher precision, then our initial mask prediction algorithm, it also includes more background into the training, and this way plateauing at a lower self-training performance.
|
| 298 |
+
|
| 299 |
+
2D masks are extracted by the powerful SAM model [29]. Here the projected 2D masks are merged into 3D masks iteratively with a bottom-up bidirectional merging approach to achieved cleaner and more view-independent 3D instances. A qualitative comparison on ScanNet can be seen in Table 11, with qualitative comparisons in Figure 10.
|
| 300 |
+
|
| 301 |
+

|
| 302 |
+
Figure 10. While SAM has powerful capabilities in crisp 2D mask generation, when aggregated on 3D, SAM3D tends to oversegment object instances.
|
| 303 |
+
|
| 304 |
+
SAM3D must resolve view inconsistencies and SAM's tendency to over-segment objects, which results in SAM3D splitting instances, while UnScene3D is able to achieve complete masks through multi-modal reasoning. We believe integrating SAM or other (weakly-) supervised 2D models into our pipeline to enable multi-modal reasoning is an interesting avenue for future work.
|
| 305 |
+
|
| 306 |
+
# 7.6. Additional Implementation Details
|
| 307 |
+
|
| 308 |
+
Here, we further explain the implementation details of our pseudo mask generation.
|
| 309 |
+
|
| 310 |
+
<table><tr><td colspan="4">Generation Params.</td><td colspan="4">Initial Pseudo Mask</td><td colspan="3">1 Iteration of Self-Training</td><td colspan="3">4 Iterations of Self-Training</td></tr><tr><td>Segment Size</td><td>Metric</td><td>Separation</td><td>Min. # of Foreground</td><td># of Instances</td><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP@25</td><td>AP@50</td><td>AP</td><td>AP@25</td><td>AP@50</td><td>AP</td></tr><tr><td>30</td><td>Cos</td><td>Max</td><td>8</td><td>2169</td><td>21.9</td><td>11.5</td><td>6.3</td><td>53.7</td><td>26.2</td><td>12.4</td><td>55.4</td><td>30.3</td><td>15.3</td></tr><tr><td>50</td><td>Cos</td><td>Max</td><td>8</td><td>1414</td><td>19.9</td><td>10.0</td><td>5.9</td><td>52.9</td><td>23.2</td><td>10.4</td><td>58.5</td><td>32.2</td><td>15.9</td></tr><tr><td>100</td><td>Cos</td><td>Max</td><td>8</td><td>1090</td><td>17.4</td><td>8.0</td><td>4.2</td><td>33.1</td><td>10.2</td><td>3.9</td><td>39.6</td><td>13.7</td><td>5.3</td></tr><tr><td>200</td><td>Cos</td><td>Max</td><td>8</td><td>584</td><td>11.0</td><td>3.7</td><td>1.8</td><td>24.3</td><td>8.7</td><td>2.1</td><td>26.1</td><td>9.7</td><td>2.4</td></tr><tr><td>400</td><td>Cos</td><td>Max</td><td>8</td><td>319</td><td>6.4</td><td>2.5</td><td>1.1</td><td>19.1</td><td>3.9</td><td>1.2</td><td>19.9</td><td>3.2</td><td>1.0</td></tr><tr><td>50</td><td>L2</td><td>Max</td><td>8</td><td>1539</td><td>20.1</td><td>10.6</td><td>5.4</td><td>49.0</td><td>21.7</td><td>9.8</td><td>55.3</td><td>38.4</td><td>14.3</td></tr><tr><td>100</td><td>L2</td><td>Max</td><td>8</td><td>805</td><td>13.3</td><td>5.3</td><td>2.6</td><td>30.8</td><td>8.3</td><td>2.8</td><td>39.0</td><td>12.7</td><td>5.0</td></tr><tr><td>50</td><td>Cos</td><td>No Sep.</td><td>8</td><td>125</td><td>4.3</td><td>0.3</td><td>0.1</td><td>4.3</td><td>0.5</td><td>0.2</td><td>4.9</td><td>0.6</td><td>0.2</td></tr><tr><td>50</td><td>Cos</td><td>Largest</td><td>8</td><td>620</td><td>11.5</td><td>4.9</td><td>2.5</td><td>11.5</td><td>1.5</td><td>0.4</td><td>12.9</td><td>2.2</td><td>12.9</td></tr><tr><td>50</td><td>Cos</td><td>Avg.</td><td>8</td><td>1078</td><td>16.8</td><td>9.1</td><td>5.1</td><td>36.4</td><td>12.5</td><td>4.9</td><td>43.8</td><td>17.8</td><td>7.5</td></tr><tr><td>30</td><td>Cos</td><td>Max</td><td>2</td><td>2909</td><td>29.0</td><td>15.6</td><td>8.7</td><td>53.6</td><td>28.6</td><td>14.2</td><td>54.2</td><td>29.8</td><td>15.4</td></tr><tr><td>50</td><td>Cos</td><td>Max</td><td>2</td><td>2512</td><td>24.9</td><td>12.4</td><td>7.2</td><td>56.5</td><td>29.8</td><td>15.0</td><td>51.3</td><td>26.2</td><td>12.6</td></tr><tr><td>100</td><td>Cos</td><td>Max</td><td>2</td><td>2317</td><td>23.1</td><td>12.3</td><td>6.8</td><td>51.8</td><td>24.4</td><td>11.6</td><td>57.1</td><td>31.3</td><td>15.6</td></tr><tr><td>200</td><td>Cos</td><td>Max</td><td>2</td><td>2181</td><td>28.4</td><td>15.5</td><td>8.9</td><td>54.6</td><td>28.7</td><td>13.7</td><td>56.6</td><td>31.4</td><td>15.6</td></tr><tr><td>400</td><td>Cos</td><td>Max</td><td>2</td><td>1373</td><td>20.6</td><td>11.1</td><td>6.3</td><td>51.0</td><td>24.8</td><td>11.8</td><td>55.8</td><td>30.3</td><td>15.2</td></tr><tr><td>50</td><td>L2</td><td>Max</td><td>2</td><td>2496</td><td>28.6</td><td>15.8</td><td>9.0</td><td>55.8</td><td>29.6</td><td>14.6</td><td>54.8</td><td>30.3</td><td>15.3</td></tr><tr><td>100</td><td>L2</td><td>Max</td><td>2</td><td>1668</td><td>23.4</td><td>12.7</td><td>7.3</td><td>53.1</td><td>25.0</td><td>11.3</td><td>56.3</td><td>27.7</td><td>12.9</td></tr><tr><td>50</td><td>Cos</td><td>No Sep.</td><td>2</td><td>159</td><td>0.2</td><td>0.5</td><td>3.6</td><td>5.4</td><td>0.6</td><td>0.3</td><td>3.9</td><td>0.4</td><td>0.2</td></tr><tr><td>50</td><td>Cos</td><td>Largest</td><td>2</td><td>1026</td><td>14.1</td><td>7.2</td><td>3.9</td><td>11.5</td><td>1.8</td><td>0.5</td><td>14.5</td><td>2.5</td><td>0.7</td></tr><tr><td>50</td><td>Cos</td><td>Avg.</td><td>2</td><td>2053</td><td>23.3</td><td>12.0</td><td>6.8</td><td>52.5</td><td>27.4</td><td>12.7</td><td>54.9</td><td>29.9</td><td>14.9</td></tr></table>
|
| 311 |
+
|
| 312 |
+
Table 9. We denote the parameters used by our method in bold. We show that our method is robust to a wide range of numbers regarding segments sizes and different similarity metrics, and only degrades somewhat in performance when segments are constrained to be too large. We also show that the separation of physically distant foreground patches is important and it is beneficial to use the activation of the eigenvector for the best results. Finally, we show that denser initial mask predictions lead to quantitatively better initial pseudo annotations, and even better self-training performance after a single iteration, but underperforming in their final scores. This behaviour can be explained by the larger false positive ratio in the denser initial predictions, which is propagating through all iterations, but thanks to the noise robust losses and iterative refinement of predictions the sparse set of labels can be effectively used. In this table we report results using both modalities for the initial pseudo mask generation, and number predicted pseudo instances in the official validation split of the ScanNet dataset.
|
| 313 |
+
AP@25 (2D) AP@50 (2D) AP (2D)
|
| 314 |
+
|
| 315 |
+
<table><tr><td>CutLER (2D)</td><td>7.8</td><td>2.8</td><td>0.7</td></tr><tr><td>Ours (projected)</td><td>60.0</td><td>38.1</td><td>21.1</td></tr></table>
|
| 316 |
+
|
| 317 |
+
Table 10. 2D evaluation on ScanNet images.
|
| 318 |
+
AP@25 AP@50 AP
|
| 319 |
+
|
| 320 |
+
<table><tr><td>SAM3D</td><td>37.2</td><td>11.8</td><td>3.7</td></tr><tr><td>SAM3D with GT Segments</td><td>47.6</td><td>24.1</td><td>10.8</td></tr><tr><td>Ours</td><td>58.5</td><td>32.2</td><td>15.9</td></tr></table>
|
| 321 |
+
|
| 322 |
+
Table 11. UnScene3D achieves significantly better performance on ScanNet than SAM3D through our strong multi-modal reasoning.
|
| 323 |
+
|
| 324 |
+
Pseudo code for masked NCut We show the pseudo code-style implementation for the masked normalized cut algorithm generating multiple instances as pseudo masks. The full algorithm can be seen in 1.
|
| 325 |
+
|
| 326 |
+
3D Adaptation of FreeMask We also evaluate an alternative pseudo mask segmentation algorithm besides the masked NCut method. In the 2D domain FreeSOLO [55] also followed a two stage pipeline first generating the pseudo annotations, and then refine those predictions through a series of self-training cycles. We followed their intuition to take a self-supervised pretrained backbone and
|
| 327 |
+
|
| 328 |
+
extract it's deep features at multiple levels of the decoder. While in standard pretrained UNet-style models early features represent global context, final features and local semantic meaning, intermediate features can act as an useful proxy to extract self-similar regions in the input samples. In our implementation we used the same backbone features of [3, 19] for the same 2D-3D setup and extracted the penultimate layer features for the self-similarity calculation. Then sampled the feature space with the Furthest Point Sampling [42] strategy to get a more limited set of anchor points, later used to extract self-similar regions. For every seed point we took similarity scores with the other features of the full scene and thresholded it to extract salient regions. Finally, we used the efficient Non Maximum Suppression implementation from [55] to sort the predicted salient areas and filter out overlapping regions. We also used average similarity score combined with the salient region area to get maskness scores for every salient region, directly following the original implementation. We report comparative results of the masked NCut algorithm and our FreeMask 3D adaptation after self-training in Table 3. of the main paper and in Table 12 of the initial pseudo mask scores.
|
| 329 |
+
|
| 330 |
+
We also note here that while there is a difference in the initial pseudo mask qualities for the different methods, the
|
| 331 |
+
|
| 332 |
+
Algorithm 1: Masked NCut on 3D segments
|
| 333 |
+
Data: $\mathcal{S} = \{s_i,\dots ,s_N\}$ $\mathcal{F}\in \mathcal{R}^{NxD}$ $\mathcal{C} = \{(s_1,s_k),(s_1,s_l),\ldots \}$ Result: $\mathcal{M} = \{m_j,\dots ,m_M\}$
|
| 334 |
+
1 $\mathcal{M}\gets \{\}$
|
| 335 |
+
2 while $j\leq$ max_inst_num do
|
| 336 |
+
3 $\mathcal{F}'\gets \mathcal{F}$
|
| 337 |
+
4 $\mathcal{F}'[\mathcal{M}]\gets 0.\quad / /$ Mask out previous insts.
|
| 338 |
+
5 $\mathcal{W}\leftarrow \mathcal{F}\times \mathcal{F}^T$ //Feature similarity //Saliency with connected graph
|
| 339 |
+
6 $\mathcal{W}_{i,k} = \left\{ \begin{array}{ll}1. & \text{if}\mathcal{W}_{i,k}\geq \tau_{cut}\\ \epsilon & \text{if}\mathcal{W}_{i,k} < \tau_{cut} \end{array} \right.$
|
| 340 |
+
7 $\mathcal{D}_{i,i} = \sum_{k}W_{i,k}$ //Get $2^{nd}$ smallest eigenvector
|
| 341 |
+
8 $\lambda ,\mathbf{v}\gets eigh(\mathcal{D} - \mathcal{W},\mathcal{D}, - 2)$
|
| 342 |
+
9 $m_{i} = \left\{ \begin{array}{ll}1 & \text{if} v_{i}\geq mean(\mathbf{v})\\ 0 & \text{if} v_{i} < mean(\mathbf{v}) \end{array} \right.$ //Invert bipartition if too large
|
| 343 |
+
10 if sum(m)>D/2 then
|
| 344 |
+
11 m=1-m
|
| 345 |
+
12 v=-1.\*v
|
| 346 |
+
// Separate unconnected components
|
| 347 |
+
13 $v_{max} = max(v)$
|
| 348 |
+
14 $\tilde{\mathbf{m}} = sep(\mathbf{v},v_{max},\mathcal{C})$
|
| 349 |
+
15 M M U {m}
|
| 350 |
+
|
| 351 |
+
<table><tr><td></td><td>Modality</td><td>AP@25</td><td>AP@50</td><td>AP</td></tr><tr><td>FreeMask</td><td>3D</td><td>13.7</td><td>7.2</td><td>3.7</td></tr><tr><td>Ours</td><td>3D</td><td>13.8</td><td>4.7</td><td>2.0</td></tr><tr><td>FreeMask</td><td>2D</td><td>15.3</td><td>6.6</td><td>2.9</td></tr><tr><td>Ours</td><td>2D</td><td>15.6</td><td>7.2</td><td>3.6</td></tr><tr><td>FreeMask</td><td>both</td><td>17.9</td><td>7.5</td><td>3.7</td></tr><tr><td>Ours</td><td>both</td><td>19.9</td><td>10.0</td><td>5.9</td></tr></table>
|
| 352 |
+
|
| 353 |
+
Table 12. We compare pseudo mask generation from 3D-only features (3D), color-only features (2D), and both color and geometry (both) signal, as well as with pseudo annotation generation algorithm FreeMask. We compare the quality of the initial pseudo mask dataset using our masked NCut algorithm and the adaptation of FreeMask [55] to 3D. We see that the normalized cut-based method is superior for both modalities.
|
| 354 |
+
|
| 355 |
+
downstream performance is way more significant. This can be explained by the nature of the pseudo masks. NCut provides a clean and sparse set of annotation, which is easy to identify for following iterations. On the other hand, the more dense, but noisy FreeMask predictions remain in the training for the duration of the whole training, hindering the performance of the self-trained model with noisy supervision.
|
2303.14xxx/2303.14541/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e5c11b44309c60ed49b31c88ddfd9f157de293101ca26203926f849d6977e2f2
|
| 3 |
+
size 1626873
|
2303.14xxx/2303.14541/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14605/77700755-45b8-4f62-9943-a56edcf0bf45_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14605/77700755-45b8-4f62-9943-a56edcf0bf45_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14605/77700755-45b8-4f62-9943-a56edcf0bf45_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4de7bb89ac9fc75b592ce53ee2f61628bbc9369e7ee8451de175e70b5ab59c63
|
| 3 |
+
size 278634
|
2303.14xxx/2303.14605/full.md
ADDED
|
@@ -0,0 +1,857 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# The Subspace Flatness Conjecture and Faster Integer Programming
|
| 2 |
+
|
| 3 |
+
Victor Reis* and Thomas Rothvoss†
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
In a seminal paper, Kannan and Lovász (1988) considered a quantity $\mu_{KL}(\Lambda, K)$ which denotes the best volume-based lower bound on the covering radius $\mu(\Lambda, K)$ of a convex body $K$ with respect to a lattice $\Lambda$ . Kannan and Lovász proved that $\mu(\Lambda, K) \leq n \cdot \mu_{KL}(\Lambda, K)$ and the Subspace Flatness Conjecture by Dadush (2012) claims a $O(\log(2n))$ factor suffices, which would match the lower bound from the work of Kannan and Lovász. We settle this conjecture up to a constant in the exponent by proving that $\mu(\Lambda, K) \leq O(\log^3(2n)) \cdot \mu_{KL}(\Lambda, K)$ . Our proof is based on the Reverse Minkowski Theorem due to Regev and Stephens-Davidowitz (2017). Following the work of Dadush (2012, 2019), we obtain a $(\log(2n))^{O(n)}$ -time randomized algorithm to solve integer programs in $n$ variables. Another implication of our main result is a near-optimal flatness constant of $O(n \log^3(2n))$ .
|
| 8 |
+
|
| 9 |
+
# 1 Introduction
|
| 10 |
+
|
| 11 |
+
Lattices are fundamental objects studied in various areas of mathematics and computer science. Here, a lattice $\Lambda$ is a discrete subgroup of $\mathbb{R}^n$ . If $B \in \mathbb{R}^{n \times k}$ is a matrix with linearly independent columns $b_1, \ldots, b_k$ , then we may write a lattice in the form $\Lambda(B) := \{\sum_{i=1}^{k} y_i b_i : y_i \in \mathbb{Z}\}$ . In mathematics, lattices are the central object of study in the geometry of numbers with many applications for example to number theory, see e.g. [KL88]. On the computer science side, lattices found applications for example in lattice-based cryptography [Reg09b] and cryptanalysis [Odl90]. One of the most important algorithms at least in this area
|
| 12 |
+
|
| 13 |
+
is the LLL-algorithm by Lenstra, Lenstra and Lovász [LLL82] which finds an approximately orthogonal basis for a given lattice in polynomial time. One of the consequences of the LLL-reduction is a polynomial time $2^{n/2}$ -approximation algorithm for the problem of finding a (nonzero) shortest vector in a lattice. We should also mention that the problem of finding a shortest vector in any norm can be solved in time $2^{O(n)}$ using a variation of the sieving algorithm [AKS01] while in the Euclidean norm, even the closest vector to any given target vector can be found in time $2^{O(n)}$ [MV13]. A more general problem with tremendous applications in combinatorial optimization and operations research is the one of finding an integer point in an arbitrary convex body or polytope. Lenstra [Len83] used the then-recent lattice basis reduction algorithm to solve any $n$ -variable integer program in time $2^{O(n^2)}$ . This was later improved by Kannan [Kan87] to $n^{O(n)}$ and then by Dadush [Dad12] and by Dadush, Eisenbrand and Rothvoss [DER22] to $2^{O(n)}n^n$ .
|
| 14 |
+
|
| 15 |
+
A parameter appearing in the geometry of numbers is the covering radius
|
| 16 |
+
|
| 17 |
+
$$
|
| 18 |
+
\mu (\Lambda , K) := \min \left\{r \geq 0 \mid \Lambda + r K = \operatorname {s p a n} (\Lambda) \right\}
|
| 19 |
+
$$
|
| 20 |
+
|
| 21 |
+
of a lattice $\Lambda \subseteq \mathbb{R}^n$ with respect to a compact convex set $K \subseteq \mathbb{R}^n$ with $\operatorname{span}(\Lambda) =$ affine.hull $(K)$ . This quantity seems to be substantially harder computationally, in the sense that the question whether $\mu(\Lambda, K)$ is at least/at most a given threshold seems to be neither in NP nor in coNP. In terms of approximating $\mu(\Lambda, K)$ , one can quickly observe that one has the lower bound of $\mu(\Lambda, K) \geq (\frac{\operatorname*{det}(\Lambda)}{\operatorname{Vol}_n(K)})^{1/n}$ , simply because for $r < (\frac{\operatorname*{det}(\Lambda)}{\operatorname{Vol}_n(K)})^{1/n}$ , the average density of the translates $\Lambda + rK$ is less than 1. However, this lower bound may be arbitrarily far off the real covering radius, for example if $\Lambda = \mathbb{Z}^2$ and $K = [-\frac{1}{M}, \frac{1}{M}] \times [-M, M]$ with $M \to \infty$ . On the other hand, for any subspace $W \subseteq \mathbb{R}^n$ one trivially has $\mu(\Lambda, K) \geq \mu(\Pi_W(\Lambda), \Pi_W(K))$ , where $\Pi_W$ is the orthogonal projection into $W$ . Hence, following Kannan and Lovász [KL88], one might instead consider the best volume based lower bound for any projection, i.e.
|
| 22 |
+
|
| 23 |
+
$$
|
| 24 |
+
\mu_{KL}(\Lambda ,K):= \max_{\substack{W\subseteq \operatorname {span}(\Lambda)\text{subspace}\\ d:= \dim (W)}}\Bigl(\frac{\det(\Pi_{W}(\Lambda))}{\operatorname{Vol}_{d}(\Pi_{W}(K))}\Bigr)^{1 / d}.
|
| 25 |
+
$$
|
| 26 |
+
|
| 27 |
+
Kannan and Lovász [KL88] indeed provide an upper bound of
|
| 28 |
+
|
| 29 |
+
$$
|
| 30 |
+
\mu_ {K L} (\Lambda , K) \leq \mu (\Lambda , K) \leq n \cdot \mu_ {K L} (\Lambda , K).
|
| 31 |
+
$$
|
| 32 |
+
|
| 33 |
+
On the other hand, they also construct a simplex $K \subseteq \mathbb{R}^n$ for which $\mu(\mathbb{Z}^n, K) \geq \Omega(\log(2n)) \cdot \mu_{KL}(\mathbb{Z}^n, K)$ holds. Dadush [Dad12] states the following conjecture, attributing it to Kannan and Lovász [KL88]:
|
| 34 |
+
|
| 35 |
+
Conjecture 1 (Subspace Flatness Conjecture). For any full rank lattice $\Lambda \subseteq \mathbb{R}^n$ and any convex body $K \subseteq \mathbb{R}^n$ one has
|
| 36 |
+
|
| 37 |
+
$$
|
| 38 |
+
\mu_ {K L} (\Lambda , K) \leq \mu (\Lambda , K) \leq O (\log (2 n)) \cdot \mu_ {K L} (\Lambda , K).
|
| 39 |
+
$$
|
| 40 |
+
|
| 41 |
+
Dadush also realized the tremendous implications of this conjecture to optimization and showed that it would imply a $O(\log (2n))^{n}$ -time algorithm to solve $n$ -variable integer programs, assuming that the subspace $W$ attaining $\mu_{KL}(\Lambda ,K)$ could also be found in the same time. Later, Dadush and Regev [DR16] conjectured a Reverse Minkowski-type Inequality, which intuitively says that any lattice without dense sublattices should contain only few short vectors. Among other applications, they proved that this conjecture would imply Conjecture 1 (with some logarithmic loss) at least for the case that $K$ is an ellipsoid. The conjecture of [DR16] was then resolved by Regev and Stephens-Davidowitz [RS17] with a rather ingenious proof. More precisely, they prove the following:
|
| 42 |
+
|
| 43 |
+
Theorem 1 (Reverse Minkowski Theorem [RS17]). Let $\Lambda \subseteq \mathbb{R}^n$ be a lattice that satisfies $\operatorname*{det}(\Lambda') \geq 1$ for all sublattices $\Lambda' \subseteq \Lambda$ . Then for a large enough constant $C > 0$ and $s = C \log(2n)$ one has $\rho_{1/s}(\Lambda) \leq \frac{3}{2}$ .
|
| 44 |
+
|
| 45 |
+
Here, one has $\rho_t(x) \coloneqq \exp(-\pi \|x/t\|_2^2)$ where $t > 0$ and for a discrete set $S \subseteq \mathbb{R}^n$ we abbreviate $\rho_t(S) \coloneqq \sum_{x \in S} \rho_t(x)$ . To understand the power of this result compared to classical arguments, note that from $\operatorname{det}(\Lambda') \geq 1$ for all $\Lambda' \subseteq \Lambda$ one can derive that each vector $x \in \Lambda \setminus \{\mathbf{0}\}$ has length $\|x\|_2 \geq 1$ and so by a standard packing argument we know that for any $r \geq 1$ one has $|\Lambda \cap rB_2^n| \leq (3r)^n$ , which is exponential in $n$ . On the other hand, again under the assumption that $\operatorname{det}(\Lambda') \geq 1$ for all $\Lambda' \subseteq \Lambda$ , the Reverse Minkowski Theorem implies that $|\Lambda \cap rB_2^n| \leq \exp(\Theta(\log^2(2n)) \cdot r^2)$ which is quasi-polynomial in $n$ . Also, [RS17] tighten the reduction to the Subspace Flatness Conjecture and show that it holds for any ellipsoid with a factor of $O(\log^{3/2}(2n))$ . While for any convex body $K$ , there is an ellipsoid $E$ and a center $c$ so that $c + E \subseteq K \subseteq c + nE$ [Joh48], this factor of $n$ is the best possible, and hence there does not seem to be a blackbox reduction from the general case of Conjecture 1 to the one of ellipsoids.
|
| 46 |
+
|
| 47 |
+
# 1.1 Our contribution
|
| 48 |
+
|
| 49 |
+
Our main result is as follows:
|
| 50 |
+
|
| 51 |
+
Theorem 2. For any full rank lattice $\Lambda \subseteq \mathbb{R}^n$ and any convex body $K \subseteq \mathbb{R}^n$ one has
|
| 52 |
+
|
| 53 |
+
$$
|
| 54 |
+
\mu_ {K L} (\Lambda , K) \leq \mu (\Lambda , K) \leq O (\log^ {3} (2 n)) \cdot \mu_ {K L} (\Lambda , K).
|
| 55 |
+
$$
|
| 56 |
+
|
| 57 |
+
We will break the proof into two parts that can be found in Section 4. Our result is constructive in the following sense:
|
| 58 |
+
|
| 59 |
+
Theorem 3. Given a full rank lattice $\Lambda := \Lambda(B)$ and a convex body $K \subseteq \mathbb{R}^n$ with $c + r_0B_2^n \subseteq K \subseteq r_1B_2^n$ , there is a randomized algorithm to find a subspace $W \subseteq \mathbb{R}^n$ with $d := \dim(W)$ so that
|
| 60 |
+
|
| 61 |
+
$$
|
| 62 |
+
\mu (\Lambda , K) \leq O (\log^ {4} (2 n)) \cdot \left(\frac {\det (\Pi_ {W} (\Lambda))}{\mathrm {V o l} _ {d} (\Pi_ {W} (K))}\right) ^ {1 / d}.
|
| 63 |
+
$$
|
| 64 |
+
|
| 65 |
+
The running time of that algorithm is $2^{O(n)}$ times a polynomial in $\log \left( \frac{1}{r_0} \right)$ , $\log (r_1)$ and in the encoding length of $B$ .
|
| 66 |
+
|
| 67 |
+
Here, a separation oracle suffices for $K$ . See Section 5 for a proof. Following the framework layed out by Dadush [Dad12], this implies a faster algorithm to find a lattice point in a convex body:
|
| 68 |
+
|
| 69 |
+
Theorem 4. Given a convex body $K \subseteq rB_2^n$ represented by a separation oracle and a lattice $\Lambda = \Lambda(B)$ , there is a randomized algorithm that with high probability finds a point in $K \cap \Lambda$ or correctly decides that there is none. The running time is $(\log(2n))^{O(n)}$ times a polynomial in $\log(r)$ and the encoding length of $B$ .
|
| 70 |
+
|
| 71 |
+
The proof can be found in Section 6. Applying Theorem 4 to integer programming we obtain the following:
|
| 72 |
+
|
| 73 |
+
Theorem 5. Given $A \in \mathbb{Q}^{m \times n}$ , $b \in \mathbb{Q}^m$ and $c \in \mathbb{Q}^n$ , the integer linear program $\max \{c^T x \mid Ax \leq b, x \in \mathbb{Z}^n\}$ can be solved in time $(\log(2n))^{O(n)}$ times a polynomial in the encoding length of $A$ , $b$ and $c$ .
|
| 74 |
+
|
| 75 |
+
An immediate consequence of our main result (Theorem 2) is that $K$ can be replaced by a larger symmetric body without decreasing the covering radius significantly:
|
| 76 |
+
|
| 77 |
+
Theorem 6. For any full rank lattice $\Lambda \subseteq \mathbb{R}^n$ and any convex body $K\subseteq \mathbb{R}^n$ one has
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
\mu (\Lambda , K - K) \leq \mu (\Lambda , K) \leq O (\log^ {3} (2 n)) \cdot \mu (\Lambda , K - K).
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
Another consequence is that the flatness constant in dimension $n$ is bounded by $O(n\log^3 (2n))$ , which is an improvement from the previously known bound of $O(n^{4 / 3}\log^{O(1)}(2n))$ obtained by combining the result of Rudelson [Rud98] with [BLPS99].
|
| 84 |
+
|
| 85 |
+
Theorem 7. For any convex body $K \subseteq \mathbb{R}^n$ and any full rank lattice $\Lambda \subseteq \mathbb{R}^n$ one has
|
| 86 |
+
|
| 87 |
+
$$
|
| 88 |
+
\mu (\Lambda , K) \cdot \lambda_ {1} (\Lambda^ {*}, (K - K) ^ {\circ}) \leq O (n \log^ {3} (2 n)).
|
| 89 |
+
$$
|
| 90 |
+
|
| 91 |
+
It is well known that Theorem 7 can also be rephrased in the following convenient form:
|
| 92 |
+
|
| 93 |
+
Corollary 8. Let $K \subseteq \mathbb{R}^n$ be a convex body with $K \cap \mathbb{Z}^n = \emptyset$ . Then there is a vector $c \in \mathbb{Z}^n \setminus \{\mathbf{0}\}$ so that at most $O(n\log^3 (2n))$ many hyperplanes of the form $\langle c, x \rangle = \delta$ with $\delta \in \mathbb{Z}$ intersect $K$ .
|
| 94 |
+
|
| 95 |
+
We will prove Theorem 6, Theorem 7 and Corollary 8 in Section 7.
|
| 96 |
+
|
| 97 |
+
# 2 Preliminaries
|
| 98 |
+
|
| 99 |
+
In this section, we introduce the tools that we rely on later. We write $A \lesssim B$ if there is a universal constant $C > 0$ so that $A \leq C \cdot B$ holds. We write $A \asymp B$ if both $A \lesssim B$ and $B \lesssim A$ hold.
|
| 100 |
+
|
| 101 |
+
# 2.1 Lattices
|
| 102 |
+
|
| 103 |
+
For a lattice $\Lambda = \Lambda(B)$ given by a matrix $B \in \mathbb{R}^{n \times k}$ with linearly independent columns, we define the rank as $\operatorname{rank}(\Lambda) := k = \dim(\operatorname{span}(\Lambda))$ and the determinant as $\operatorname{det}(\Lambda) = \sqrt{\operatorname{det}_k(B^T B)}$ . A lattice $\Lambda \subseteq \mathbb{R}^n$ with $\operatorname{rank}(\Lambda) = n$ has full rank. For a lattice $\Lambda \subseteq \mathbb{R}^n$ , we define the dual lattice as $\Lambda^* := \{x \in \operatorname{span}(\Lambda) \mid \langle x, y \rangle \in \mathbb{Z} \forall y \in \Lambda\}$ . Recall that $\operatorname{det}(\Lambda) \cdot \operatorname{det}(\Lambda^*) = 1$ . A consequence of the Poisson Summation Formula is as follows:
|
| 104 |
+
|
| 105 |
+
Lemma 9. For any full rank lattice $\Lambda \subseteq \mathbb{R}^n$ , vector $u \in \mathbb{R}^n$ and any $s > 0$ one has
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
| \rho_ {s} (\Lambda + u) - s ^ {n} \det (\Lambda^ {*}) | \leq s ^ {n} \det (\Lambda^ {*}) \cdot \rho_ {1 / s} (\Lambda^ {*} \backslash \{\mathbf {0} \}).
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
A set $K \subseteq \mathbb{R}^n$ is called a convex body if it is convex, compact (i.e. bounded and closed) and has a non-empty interior $\operatorname{int}(K)$ . A set $Q$ is called symmetric if $-Q = Q$ . For a symmetric convex set $Q$ , the norm $\| x \|_Q$ is defined as the least scaling $r \geq 0$ so that $x \in rQ$ . For a lattice $\Lambda$ and a symmetric convex body $Q$ we denote the length of the shortest vector by
|
| 112 |
+
|
| 113 |
+
$$
|
| 114 |
+
\lambda_{1}(\Lambda ,Q):= \min_{x\in \Lambda \setminus \{\mathbf{0}\}}\| x\|_{Q}.
|
| 115 |
+
$$
|
| 116 |
+
|
| 117 |
+
Later we will also need a classical bound on short vectors in a lattice:
|
| 118 |
+
|
| 119 |
+
Theorem 10 (Minkowski's First Theorem). Let $\Lambda \subseteq \mathbb{R}^n$ be a full rank lattice and $Q \subseteq \mathbb{R}^n$ be a symmetric convex body. Then $\lambda_1(\Lambda, Q) \leq 2\left(\frac{\operatorname*{det}(\Lambda)}{\operatorname{Vol}_n(Q)}\right)^{1/n}$ .
|
| 120 |
+
|
| 121 |
+
We recommend the excellent notes of Regev [Reg09a] for background.
|
| 122 |
+
|
| 123 |
+
# 2.2 Stable lattices and the canonical filtration
|
| 124 |
+
|
| 125 |
+
A subspace $W \subseteq \mathbb{R}^n$ is a lattice subspace of a lattice $\Lambda \subseteq \mathbb{R}^n$ if $\operatorname{span}(W \cap \Lambda) = W$ . Similarly, a sublattice $\Lambda' \subseteq \Lambda$ is called primitive if there is a subspace $W$ with $\Lambda \cap W = \Lambda'$ . For a lattice $\Lambda$ and a primitive sublattice $\Lambda' \subseteq \Lambda$ , we define the quotient lattice as $\Lambda / \Lambda' := \Pi_{\operatorname{span}(\Lambda')^\perp}(\Lambda)$ . In many ways one can imagine that the quotient operation factors $\Lambda$ into two lattices $\Lambda'$ and $\Lambda / \Lambda'$ . In particular $\Lambda'$ and $\Lambda / \Lambda'$ are orthogonal and $\det(\Lambda) = \det(\Lambda') \cdot \det(\Lambda / \Lambda')$ .
|
| 126 |
+
|
| 127 |
+
A lattice $\Lambda \subseteq \mathbb{R}^n$ is called stable if $\operatorname*{det}(\Lambda) = 1$ and $\operatorname*{det}(\Lambda') \geq 1$ for all sublattices $\Lambda' \subseteq \Lambda$ . That means a stable lattice does not contain any sublattice that is denser than the lattice itself. One can easily verify that for example $\mathbb{Z}^n$ is stable. We denote $\mathrm{nd}(\Lambda) := \operatorname*{det}(\Lambda)^{1/\mathrm{rank}(\Lambda)}$ as the normalized determinant. One can prove that the extreme points of the 2-dimensional convex hull of the points $\left\{\left(\mathrm{rank}(\Lambda'), \ln(\operatorname*{det}(\Lambda'))\right)\right|$ sublattice $\Lambda' \subseteq \Lambda\}$ correspond to a unique chain of nested sublattices $\{\mathbf{0}\} = \Lambda_0 \subset \Lambda_1 \subset \ldots \subset \Lambda_k = \Lambda$ . That chain is called the canonical filtration. It is useful to observe that each $\Lambda_i$ in this sequence is the unique densest sublattice of $\Lambda$ with given dimension $\mathrm{rank}(\Lambda_i)$ . Moreover, the quotient lattices $\Lambda_i / \Lambda_{i-1}$ are all scalars of a stable lattice and one can prove that $\mathrm{nd}(\Lambda_i / \Lambda_{i-1})$ are strictly increasing in $i$ . We refer to the thesis of [Ste17] for details.
|
| 128 |
+
|
| 129 |
+

|
| 130 |
+
|
| 131 |
+
It will be useful to replace the canonical filtration by an approximate filtration where the normalized determinants grow exponentially. We make the following definition:
|
| 132 |
+
|
| 133 |
+
Definition 11. We call a lattice $\Lambda \subseteq \mathbb{R}^n$ $t$ -stable with $t \geq 1$ if the following holds:
|
| 134 |
+
|
| 135 |
+
(I) For any sublattice $\tilde{\Lambda} \subseteq \Lambda$ one has $\mathrm{nd}(\tilde{\Lambda}) \geq t^{-1}$ .
|
| 136 |
+
(II) For any sublattice $\tilde{\Lambda} \subseteq \Lambda^{*}$ one has $\mathrm{nd}(\tilde{\Lambda}) \geq t^{-1}$ .
|
| 137 |
+
|
| 138 |
+
Note that a lattice is 1-stable if and only if it is stable. We can similarly define $t$ -stable filtrations:
|
| 139 |
+
|
| 140 |
+
Definition 12. Given a lattice $\Lambda \subseteq \mathbb{R}^n$ , we call a sequence $\{\mathbf{0}\} = \Lambda_0 \subset \ldots \subset \Lambda_k = \Lambda$ a $t$ -stable filtration of $\Lambda$ if the following holds:
|
| 141 |
+
|
| 142 |
+
(a) The normalized determinants $r_i \coloneqq \mathrm{nd}(\Lambda_i / \Lambda_{i-1})$ satisfy $r_1 < \ldots < r_k$ .
|
| 143 |
+
(b) The lattices $\frac{1}{r_i} (\Lambda_i / \Lambda_{i - 1})$ are $t$ -stable for all $i = 1,\ldots ,k$ .
|
| 144 |
+
|
| 145 |
+
We call a $t$ -stable filtration well-separated if additionally the following holds:
|
| 146 |
+
|
| 147 |
+
(c) One has $r_i \leq \frac{1}{2} r_{i+2}$ for all $i = 1, \ldots, k-2$ .
|
| 148 |
+
|
| 149 |
+
For example, the canonical filtration is 1-stable. It turns out we can make any $t$ -stable filtration well-separated:
|
| 150 |
+
|
| 151 |
+
Theorem 13. Given a lattice $\Lambda \subseteq \mathbb{R}^n$ and a $t$ -stable filtration $\{\mathbf{0}\} = \Lambda_0 \subset \ldots \subset \Lambda_k = \Lambda$ , in polynomial time we can compute a $2t$ -stable well-separated filtration $\{\mathbf{0}\} = \tilde{\Lambda}_0 \subseteq \ldots \subseteq \tilde{\Lambda}_{\tilde{k}} = \Lambda$ .
|
| 152 |
+
|
| 153 |
+
We defer the proof to Appendix A. Using the canonical filtration as input to Theorem 13 yields:
|
| 154 |
+
|
| 155 |
+
Corollary 14. For any lattice $\Lambda \subseteq \mathbb{R}^n$ , there exists a 2-stable well-separated filtration $\{\mathbf{0}\} = \Lambda_0 \subset \ldots \subset \Lambda_k = \Lambda$ .
|
| 156 |
+
|
| 157 |
+
We collect a few more properties of $t$ -stable lattices:
|
| 158 |
+
|
| 159 |
+
Lemma 15. There is a universal constant $C > 0$ so that the following holds: Let $\Lambda$ be a $t$ -stable lattice for $t \geq 1$ . Then for $s = C \log(2n)$ one has
|
| 160 |
+
|
| 161 |
+
(a) $\Lambda^{*}$ is $t$ -stable.
|
| 162 |
+
(b) $\rho_{1/(st)}(\Lambda) \leq \frac{3}{2}$ .
|
| 163 |
+
(c) For any $u \in \mathbb{R}^n$ one has $\frac{\rho_{st}(\Lambda + u)}{\rho_{st}(\Lambda)} \geq \frac{1}{3}$ .
|
| 164 |
+
|
| 165 |
+
Proof. (a) is immediate from the definition of $t$ -stability. Next, let $s = C\log(2n)$ be the parameter from Theorem 1. For (b), we can see that for any $\Lambda' \subseteq t\Lambda$ one has $\det(\Lambda') \geq 1$ and so the Reverse Minkowski Theorem (Theorem 1) applies to the lattice $t\Lambda$ . Then $\rho_{1/(st)}(\Lambda) = \rho_{1/s}(t\Lambda) \leq \frac{3}{2}$ which gives (b). For (c), applying Lemma 9 twice gives
|
| 166 |
+
|
| 167 |
+
$$
|
| 168 |
+
\frac {\rho_ {s t} (\Lambda + u)}{\rho_ {s t} (\Lambda)} \geq \frac {(s t) ^ {n} \det (\Lambda^ {*}) \cdot (1 - \rho_ {1 / (s t)} (\Lambda^ {*} \setminus \{\mathbf {0} \}))}{(s t) ^ {n} \det (\Lambda^ {*}) \cdot (1 + \rho_ {1 / (s t)} (\Lambda^ {*} \setminus \{\mathbf {0} \}))} \stackrel {(a) + (b)} {\geq} \frac {1 - \frac {1}{2}}{1 + \frac {1}{2}} = \frac {1}{3}.
|
| 169 |
+
$$
|
| 170 |
+
|
| 171 |
+
□
|
| 172 |
+
|
| 173 |
+
# 2.3 The $\ell$ -value and volume estimates
|
| 174 |
+
|
| 175 |
+
We review a few results from convex geometry that can all be found in the textbook by Artstein-Avidan, Giannopoulos and Milman [AAGM15]. We denote $B_2^n \coloneqq \{x \in \mathbb{R}^n \mid \| x \|_2 \leq 1\}$ and $S^{n-1} \coloneqq \{x \in \mathbb{R}^n \mid \| x \|_2 = 1\}$ as the Euclidean ball and sphere, resp. Let $\nu_n \coloneqq \mathrm{Vol}_n(B_2^n)$ . The relative interior of $K$ is rel.int( $K$ ) := $\{x \in K \mid \exists \varepsilon > 0 : (x + \varepsilon \cdot B_2^n) \cap \text{affine hull}(K) \subseteq K\}$ .
|
| 176 |
+
|
| 177 |
+
We define the mean width of a convex body $K$ as $w(K) \coloneqq \mathbb{E}_{\theta \sim S^{n-1}}[\max \{\langle \theta, x - y \rangle : x, y \in K\}]$ . For a compact convex $K \subseteq \mathbb{R}^n$ with $\mathbf{0} \in \operatorname{rel.int}(K)$ we denote its polar by $K^\circ \coloneqq \{y \in \operatorname{span}(K) : \langle x, y \rangle \leq 1 \forall x \in K\}$ . Recall the following basic facts.
|
| 178 |
+
|
| 179 |
+
Lemma 16 (Properties of polarity). For two convex bodies $K, Q \subseteq \mathbb{R}^n$ with $\mathbf{0} \in \operatorname{int}(K)$ and $\mathbf{0} \in \operatorname{int}(Q)$ the following holds:
|
| 180 |
+
|
| 181 |
+
(a) One has $(K^{\circ})^{\circ} = K$
|
| 182 |
+
(b) For any subspace $F\subseteq \mathbb{R}^n$ one has $\Pi_F(K)^\circ = K^\circ \cap F$
|
| 183 |
+
(c) One has $(K\cap Q)^{\circ} = conv(K^{\circ}\cup Q^{\circ})$
|
| 184 |
+
(d) One has $(-K)^{\circ} = -K^{\circ}$ .
|
| 185 |
+
|
| 186 |
+
We write $N(\mathbf{0},I_n)$ as the standard Gaussian distribution on $\mathbb{R}^n$ . The $\ell$ -value of a symmetric convex $Q\subseteq \mathbb{R}^n$ is defined as
|
| 187 |
+
|
| 188 |
+
$$
|
| 189 |
+
\ell_ {Q} = \underset {x \sim N (\mathbf {0}, I _ {n})} {\mathbb {E}} [ \| x \| _ {Q} ^ {2} ] ^ {1 / 2}.
|
| 190 |
+
$$
|
| 191 |
+
|
| 192 |
+
One may think of $\ell_{Q}$ as the "average thinness" of $Q$ . It turns out that the $\ell$ -value is also related to the mean width. To see this, note that $\| \cdot \|_{Q^{\circ}}$ is the dual norm to $\| \cdot \|_{Q}$ , i.e. for all $x \in \mathbb{R}^{n}$ one has $\| x \|_{Q^{\circ}} = \max \{\langle x, y \rangle : y \in Q\}$ . Then
|
| 193 |
+
|
| 194 |
+
$$
|
| 195 |
+
\ell_ {Q ^ {\circ}} = \underset {x \sim N \left(\mathbf {0}, I _ {n}\right)} {\mathbb {E}} [ \| x \| _ {Q ^ {\circ}} ^ {2} ] ^ {1 / 2} = \underset {x \sim N \left(\mathbf {0}, I _ {n}\right)} {\mathbb {E}} \left[ \max \{\langle x, y \rangle^ {2}: y \in Q \} \right] ^ {1 / 2}. \tag {1}
|
| 196 |
+
$$
|
| 197 |
+
|
| 198 |
+
We can see that the right hand side of (1) almost matches the definition of $w(Q)$ . In fact, one can prove:
|
| 199 |
+
|
| 200 |
+
Lemma 17. For any symmetric convex body $Q \subseteq \mathbb{R}^n$ one has $\ell_{Q^\circ} \asymp \sqrt{n} \cdot w(Q)$ .
|
| 201 |
+
|
| 202 |
+
For a positive semidefinite matrix $\Sigma$ we write $N(\mathbf{0},\Sigma)$ as the Gaussian with mean $\mathbf{0}$ and covariance matrix $\Sigma$ and for a subspace $U\subseteq \mathbb{R}^n$ we write $I_U$ as the identity matrix on that subspace. Occasionally we will need to refer to the $\ell$ -value of a compact symmetric convex set $Q$ that is not necessarily full-dimensional. In that case we extend the definition to $\ell_{Q} = \mathbb{E}_{x\sim N(\mathbf{0},I_{\mathrm{span}(Q)})}[\| x\|_{Q}^{2}]^{1 / 2}$ .
|
| 203 |
+
|
| 204 |
+
We say that a symmetric convex body $Q$ is in $\ell$ -position if $\ell_{Q} \cdot \ell_{Q^{\circ}} \leq O(n \log(2n))$ . One of the most powerful tools in convex geometry is that every symmetric convex body can indeed be brought into $\ell$ -position:
|
| 205 |
+
|
| 206 |
+
Theorem 18 (Figiel, Tomczak-Jaegerman, Pisier). For any symmetric convex body $Q \subseteq \mathbb{R}^n$ , there is an invertible linear map $T: \mathbb{R}^n \to \mathbb{R}^n$ so that $\ell_{T(Q)} \cdot \ell_{(T(Q))^{\circ}} \leq O(n \log(2n))$ .
|
| 207 |
+
|
| 208 |
+
By Lemma 17, the conclusion of Theorem 18 is equivalent to $w(T(Q)) \cdot w(T(Q^\circ)) \leq O(\log(2n))$ . Moreover one can prove that for any symmetric convex body $Q$ one has $w(Q) \cdot w(Q^\circ) \gtrsim w(B_2^n)^2 \gtrsim 1$ . Then one can interpret Theorem 18 as every symmetric convex body can be linearly transformed so that in terms of mean width and average thinness it is within a $O(\log(2n))$ -factor of the Euclidean ball. For the sake of comparison, we note that the bound that could be obtained via the more classical John's Theorem [Joh48] would be of the order of $\sqrt{n}$ . We would like to point out that Theorem 18 is only known for symmetric convex bodies, and it is open to what extent it generalizes to the non-symmetric case.
|
| 209 |
+
|
| 210 |
+
We state two estimates concerning monotonicity of the $\ell$ -value that will be crucial for our later arguments:
|
| 211 |
+
|
| 212 |
+
Lemma 19. Let $Q \subseteq \mathbb{R}^n$ be a symmetric convex body. Then for any subspace $U \subseteq \mathbb{R}^n$ , one has $\ell_{Q \cap U} \leq \ell_Q$ .
|
| 213 |
+
|
| 214 |
+
Proof. Indeed, one has
|
| 215 |
+
|
| 216 |
+
$$
|
| 217 |
+
\ell_ {Q} ^ {2} = \mathbb {E} _ {z \sim N (\mathbf {0}, I _ {U})} \left[ \mathbb {E} _ {y \sim N (\mathbf {0}, I _ {U ^ {\perp}})} [ \| z + y \| _ {Q} ^ {2} ] \right] \geq \mathbb {E} _ {z \sim N (\mathbf {0}, I _ {U})} \left[ \| z + \underbrace {\mathbb {E} _ {y \sim N (\mathbf {0} , I _ {U ^ {\perp}})} [ y ]} _ {= \mathbf {0}} \| _ {Q} ^ {2} \right] = \ell_ {Q \cap U} ^ {2},
|
| 218 |
+
$$
|
| 219 |
+
|
| 220 |
+
where the inequality follows from Jensen's inequality and the convexity of $y \mapsto \| z + y\|_Q^2$ .
|
| 221 |
+
|
| 222 |
+
Lemma 20. Let $Q \subseteq \mathbb{R}^n$ be a symmetric convex body. For any subspaces $V \subset W \subseteq \mathbb{R}^n$ , one has $\ell_{\Pi_{V^\perp}(Q \cap W)} \leq \ell_Q$ .
|
| 223 |
+
|
| 224 |
+
Proof. We have $\ell_{\Pi_{V^{\perp}}(Q\cap W)}\leq \ell_{Q\cap W\cap V^{\perp}}\leq \ell_{Q}$ using that $\Pi_{V^{\perp}}(Q\cap W)\supseteq Q\cap W\cap V^{\perp}$ and using Lemma 19.
|
| 225 |
+
|
| 226 |
+
The following classical result says that among all bodies with identical volume, the Euclidean ball minimizes the mean width.
|
| 227 |
+
|
| 228 |
+
Theorem 21 (Urysohn Inequality I). For any convex body $K \subseteq \mathbb{R}^n$ one has
|
| 229 |
+
|
| 230 |
+
$$
|
| 231 |
+
w (K) \geq 2 \cdot \left(\frac {\mathrm {V o l} _ {n} (K)}{\mathrm {V o l} _ {n} (B _ {2} ^ {n})}\right) ^ {1 / n}.
|
| 232 |
+
$$
|
| 233 |
+
|
| 234 |
+
A slight variant of this inequality will be handy for us:
|
| 235 |
+
|
| 236 |
+
Corollary 22 (Urysohn Inequality II). For any symmetric convex body $Q \subseteq \mathbb{R}^n$ one has $\mathrm{Vol}_n(Q)^{1/n} \lesssim \frac{\ell_{Q^\circ}}{n}$ .
|
| 237 |
+
|
| 238 |
+
Proof. Applying Urysohn's Inequality I we obtain
|
| 239 |
+
|
| 240 |
+
$$
|
| 241 |
+
\operatorname {V o l} _ {n} (Q) ^ {1 / n} \stackrel {{\text {T h m 2 1}}} {\lesssim} w (Q) \cdot \underbrace {\operatorname {V o l} _ {n} (B _ {2} ^ {n}) ^ {1 / n}} _ {\lesssim 1 / \sqrt {n}} \stackrel {{\text {L e m 1 7}}} {\lesssim} \frac {\ell_ {Q ^ {\circ}}}{n}.
|
| 242 |
+
$$
|
| 243 |
+
|
| 244 |
+
Here we use in particular that $\mathrm{Vol}_n(B_2^n)\leq (\frac{2e}{\sqrt{n}})^n$
|
| 245 |
+
|
| 246 |
+

|
| 247 |
+
|
| 248 |
+
The following can be found e.g. in [AAGM15], Chapter 8:
|
| 249 |
+
|
| 250 |
+
Theorem 23 (Blaschke-Santaló-Bourgain-Milman). For any symmetric convex body $K \subseteq \mathbb{R}^n$ one has
|
| 251 |
+
|
| 252 |
+
$$
|
| 253 |
+
C _ {1} ^ {n} \nu_ {n} ^ {2} \leq \mathrm {V o l} _ {n} (K) \cdot \mathrm {V o l} _ {n} (K ^ {\circ}) \leq C _ {2} ^ {n} \nu_ {n} ^ {2},
|
| 254 |
+
$$
|
| 255 |
+
|
| 256 |
+
where $C_1, C_2 > 0$ are constants.
|
| 257 |
+
|
| 258 |
+
Let $b(K) \coloneqq \frac{1}{\mathrm{Vol}_n(K)} \int_K x \, dx$ denote the barycenter or centroid of a convex body $K$ . We will run into the issue that we need to control the volume of a non-symmetric convex body $K$ , but Theorem 18 only holds for symmetric ones. A popular strategy in convex geometry is to translate $K$ so that $b(K) = \mathbf{0}$ and then consider the inner symmetrizer $K \cap -K$ which by construction is a symmetric convex body contained in $K$ which captures much of the geometry of $K$ . For example a classical result by Milman and Pajor says that $\mathrm{Vol}_n(K \cap -K) \geq 2^{-n} \mathrm{Vol}_n(K)$ . However, in our case we need a more powerful estimate that was proven by Vritsiou [Vri23] in the context of showing the existence of regular $M$ -ellipsoids for non-symmetric convex bodies.
|
| 259 |
+
|
| 260 |
+
Proposition 24 ([Vri23], Corollary 11). Let $K \subseteq \mathbb{R}^n$ be a convex body so that $b(K) = 0$ and let $F \subseteq \mathbb{R}^n$ be a $d$ -dimensional subspace. Then
|
| 261 |
+
|
| 262 |
+
$$
|
| 263 |
+
\operatorname {V o l} _ {d} (\Pi_ {F} (K)) ^ {1 / d} \lesssim \left(\frac {n}{d}\right) ^ {5} \cdot \log \left(\frac {e n}{d}\right) ^ {2} \cdot \operatorname {V o l} _ {d} (\Pi_ {F} (K \cap - K)) ^ {1 / d}.
|
| 264 |
+
$$
|
| 265 |
+
|
| 266 |
+
On a previous preprint, we had shown an inequality with better exponent when the body is centered so that $b(K^{\circ}) = \mathbf{0}$ , i.e. the origin is the Santaló point of $K$ . However, algorithmically the barycenter is much easier to compute and the exponent only affects the implicit universal constant in our main result, hence we choose to work with Vritsiou's estimate. For the interested reader, the bound
|
| 267 |
+
|
| 268 |
+
with the Santaló point as center can be found in v2 on arXiv and also independently in [Vri23].
|
| 269 |
+
|
| 270 |
+
We prove a custom-tailored inequality for later:
|
| 271 |
+
|
| 272 |
+
Lemma 25. Let $K \subseteq \mathbb{R}^n$ be a convex body with $b(K) = \mathbf{0}$ and let $F \subseteq \mathbb{R}^n$ be a $d$ -dimensional subspace. Then
|
| 273 |
+
|
| 274 |
+
$$
|
| 275 |
+
(\operatorname {V o l} _ {d} (\Pi_ {F} (K))) ^ {1 / d} \lesssim \left(\frac {n}{d}\right) ^ {6} \cdot \frac {\ell_ {(K \cap - K) ^ {\circ}}}{d}.
|
| 276 |
+
$$
|
| 277 |
+
|
| 278 |
+
Proof. We abbreviate $K_{\mathrm{sym}} \coloneqq K \cap -K$ . Using the volume estimate from Proposition 24 with the assumption that the barycenter of $K$ lies at the origin, we obtain
|
| 279 |
+
|
| 280 |
+
$$
|
| 281 |
+
\begin{array}{l} (\operatorname {V o l} _ {d} (\Pi_ {F} (K))) ^ {1 / d} \stackrel {\text {P r o p 2 4}} {\lesssim} \left(\frac {n}{d}\right) ^ {6} \cdot (\operatorname {V o l} _ {d} (\Pi_ {F} (K _ {\text {s y m}}))) ^ {1 / d} \\ \begin{array}{c c} \operatorname {C o r} _ {\lesssim} ^ {2 2} & \left(\frac {n}{d}\right) ^ {6} \cdot \frac {\ell_ {(\Pi_ {F} (K _ {\text {s y m}})) ^ {\circ}}}{d} \end{array} \\ \stackrel {\text {L e m} 1 6} {=} \left(\frac {n}{d}\right) ^ {6} \cdot \frac {\ell_ {K _ {\text {s y m}} ^ {\circ} \cap F}}{d} \\ \stackrel {\mathrm {L e m 1 9}} {\leq} \left(\frac {n}{d}\right) ^ {6} \cdot \frac {\ell_ {K _ {\mathrm {s y m}} ^ {\circ}}}{d}. \\ \end{array}
|
| 282 |
+
$$
|
| 283 |
+
|
| 284 |
+
Here we also used the fact that $(\Pi_F(K_{\mathrm{sym}}))^{\circ} = K_{\mathrm{sym}}^{\circ}\cap F$
|
| 285 |
+
|
| 286 |
+

|
| 287 |
+
|
| 288 |
+
# 2.4 Properties of the covering radius
|
| 289 |
+
|
| 290 |
+
While the set $K$ may not be symmetric, the sets $\Lambda$ and $\mathbb{R}^n$ are symmetric, which implies the following:
|
| 291 |
+
|
| 292 |
+
Lemma 26 (Properties of the covering radius). Consider a lattice $\Lambda \subseteq \mathbb{R}^n$ and a compact convex set $K \subseteq \mathbb{R}^n$ with $\operatorname{span}(\Lambda) = \operatorname{affine.hull}(K)$ . Then
|
| 293 |
+
|
| 294 |
+
(a) $\mu (\Lambda ,K) = \mu (\Lambda ,K + u)$ for all $u\in \operatorname {span}(\Lambda)$
|
| 295 |
+
(b) $\mu (\Lambda ,K) = \min \{r\geq 0\mid (x + rK)\cap \Lambda \neq \emptyset \forall x\in \operatorname {span}(\Lambda)\}$
|
| 296 |
+
|
| 297 |
+
We need a triangle inequality for the covering radius:
|
| 298 |
+
|
| 299 |
+
Lemma 27. Let $\Lambda \subseteq \mathbb{R}^n$ be a lattice and let $\Lambda' \subseteq \Lambda$ be a primitive sublattice. Then for any compact convex set $K \subseteq \mathbb{R}^n$ with $\mathbf{0} \in \operatorname{rel.int}(K)$ and $\operatorname{span}(\Lambda) = \operatorname{span}(K)$ one has
|
| 300 |
+
|
| 301 |
+
$$
|
| 302 |
+
\mu (\Lambda , K) \leq \mu (\Lambda^ {\prime}, K \cap W) + \mu (\Lambda / \Lambda^ {\prime}, \Pi_ {W ^ {\perp}} (K)),
|
| 303 |
+
$$
|
| 304 |
+
|
| 305 |
+
where $W\coloneqq \operatorname {span}(\Lambda^{\prime})$
|
| 306 |
+
|
| 307 |
+
Proof. W.l.o.g. we may assume that $\Lambda$ has full rank, so $\mathbf{0} \in \operatorname{int}(K)$ . Following the characterization in Lemma 26.(b), we fix an $x \in \mathbb{R}^n$ . For $r_1 \coloneqq \mu(\Pi_{W^\perp}(\Lambda), \Pi_{W^\perp}(K))$ we know that $\Pi_{W^\perp}(x + r_1K) \cap \Pi_{W^\perp}(\Lambda) \neq \emptyset$ . That means there is a $u_1 \in r_1K$ and a lattice point $y \in \Lambda$ so that $\Pi_{W^\perp}(x + u_1) = \Pi_{W^\perp}(y)$ . Next, for $r_2 \coloneqq \mu(\Lambda \cap W, K \cap W)$ we know that $(x + u_1 - y + r_2 \cdot (K \cap W)) \cap (\Lambda \cap W) \neq \emptyset$ which is equivalent to $(x + u_1 + r_2 \cdot (K \cap W)) \cap (y + (\Lambda \cap W)) \neq \emptyset$ . Let $u_2 \in r_2 \cdot (K \cap W)$ be the vector so that $x + u_1 + u_2 \in \Lambda$ . Then $u_1 + u_2 \in (r_1 + r_2)K$ by convexity, so $(x + (r_1 + r_2) \cdot K) \cap \Lambda \neq \emptyset$ .
|
| 308 |
+
|
| 309 |
+

|
| 310 |
+
|
| 311 |
+

|
| 312 |
+
|
| 313 |
+
The natural extension of Lemma 27 to a filtration is as follows:
|
| 314 |
+
|
| 315 |
+
Lemma 28. Let $\Lambda \subseteq \mathbb{R}^n$ be a lattice with any sequence of sublattices $\{\mathbf{0}\} = \Lambda_0 \subset \Lambda_1 \subset \ldots \subset \Lambda_k = \Lambda$ . Then for any compact convex set $K \subseteq \mathbb{R}^n$ with $\mathbf{0} \in \operatorname{rel.int}(K)$ and $\operatorname{span}(\Lambda) = \operatorname{span}(K)$ , one has
|
| 316 |
+
|
| 317 |
+
$$
|
| 318 |
+
\mu (\Lambda , K) \leq \sum_ {i = 1} ^ {k} \mu \left(\Lambda_ {i} / \Lambda_ {i - 1}, \Pi_ {s p a n (\Lambda_ {i - 1}) ^ {\perp}} (K \cap s p a n (\Lambda_ {i}))\right).
|
| 319 |
+
$$
|
| 320 |
+
|
| 321 |
+
Proof. We can use the previous lemma to show by induction over $i_0 = k, k - 1, \ldots, 1$ that
|
| 322 |
+
|
| 323 |
+
$$
|
| 324 |
+
\mu (\Lambda , K) \leq \mu \left(\Lambda_ {i _ {0} - 1}, K \cap \operatorname {s p a n} \left(\Lambda_ {i _ {0} - 1}\right)\right) + \sum_ {i = i _ {0}} ^ {k} \mu \left(\Lambda_ {i} / \Lambda_ {i - 1}, \Pi_ {\operatorname {s p a n} \left(\Lambda_ {i - 1}\right) ^ {\perp}} \left(K \cap \operatorname {s p a n} \left(\Lambda_ {i}\right)\right)\right).
|
| 325 |
+
$$
|
| 326 |
+
|
| 327 |
+
Indeed, for $i_0 = k$ this is exactly Lemma 27. If it holds for some $i_0 > 1$ , then
|
| 328 |
+
|
| 329 |
+
$$
|
| 330 |
+
\begin{array}{l} \mu \left(\Lambda_ {i _ {0} - 1}, K \cap \operatorname {s p a n} \left(\Lambda_ {i _ {0} - 1}\right)\right) \leq \mu \left(\Lambda_ {i _ {0} - 2}, K \cap \operatorname {s p a n} \left(\Lambda_ {i _ {0} - 2}\right)\right) + \\ \mu \left(\Lambda_ {i _ {0} - 1} / \Lambda_ {i _ {0} - 2}, \Pi_ {\operatorname {s p a n} \left(\Lambda_ {i _ {0} - 2}\right) ^ {\perp}} (K \cap \operatorname {s p a n} \left(\Lambda_ {i _ {0} - 1}\right))\right), \\ \end{array}
|
| 331 |
+
$$
|
| 332 |
+
|
| 333 |
+
since $\operatorname{span}(\Lambda_{i_0 - 2}) \subset \operatorname{span}(\Lambda_{i_0 - 1})$ . So the claim follows by induction, and taking $i_0 \coloneqq 1$ yields the statement.
|
| 334 |
+
|
| 335 |
+
# 2.5 Properties of $\mu_{KL}$
|
| 336 |
+
|
| 337 |
+
We also need the following fact:
|
| 338 |
+
|
| 339 |
+
Lemma 29. For any lattice $\Lambda \subseteq \mathbb{R}^n$ , compact convex set $K$ with $\operatorname{span}(\Lambda) = \operatorname{affine.hull}(K)$ and subspace $V \subseteq \operatorname{span}(\Lambda)$ one has $\mu_{KL}(\Pi_V(\Lambda), \Pi_V(K)) \leq \mu_{KL}(\Lambda, K)$ .
|
| 340 |
+
|
| 341 |
+
Proof. Let $W \subseteq V$ be the subspace attaining the left side with $\dim W = d$ . Then
|
| 342 |
+
|
| 343 |
+
$$
|
| 344 |
+
\mu_ {K L} (\Pi_ {V} (\Lambda), \Pi_ {V} (K)) = \left(\frac {\mathrm {d e t} (\Pi_ {W} (\Pi_ {V} (\Lambda)))}{\mathrm {V o l} _ {d} (\Pi_ {W} (\Pi_ {V} (K)))}\right) ^ {1 / d} = \left(\frac {\mathrm {d e t} (\Pi_ {W} (\Lambda))}{\mathrm {V o l} _ {d} (\Pi_ {W} (K))}\right) ^ {1 / d} \leq \mu_ {K L} (\Lambda , K),
|
| 345 |
+
$$
|
| 346 |
+
|
| 347 |
+
using that $\Pi_W(\Pi_V(x)) = \Pi_W(x)$ for all $x\in \mathbb{R}^n$ as $W\subseteq V$
|
| 348 |
+
|
| 349 |
+

|
| 350 |
+
|
| 351 |
+
# 2.6 Approximate stable lattices and the covering radius
|
| 352 |
+
|
| 353 |
+
Using the Reverse Minkowski Theorem it would not be hard to prove that for any stable lattice $\Lambda \subseteq \mathbb{R}^n$ one has $\mu(\Lambda, B_2^n) \leq O(\sqrt{n}\log(2n))$ . In this section, we show how to generalize this to $t$ -stable lattices and to general symmetric convex bodies. For a symmetric convex body $Q$ , we consider the following quantity
|
| 354 |
+
|
| 355 |
+
$$
|
| 356 |
+
\beta (Q) = \sup _ {\Lambda \subseteq \mathbb {R} ^ {n}} \sup _ {\text {l a t t i c e}} \sup _ {u \in \mathbb {R} ^ {n}} \frac {\rho_ {1} ((u + \Lambda) \setminus Q)}{\rho_ {1} (\Lambda)}.
|
| 357 |
+
$$
|
| 358 |
+
|
| 359 |
+
Note that always $0 < \beta(Q) \leq 1$ . Intuitively, a body $Q$ with $\beta(Q) \ll 1$ is large enough that for any lattice a substantial fraction of the discrete Gaussian weight has to fall in $Q$ . As part of the celebrated Transference Theorem, Banaszczyk showed how to relate the $\ell$ -value of a body to its $\beta$ -value:
|
| 360 |
+
|
| 361 |
+
Lemma 30 (Banaszczyk [Ban96]). For any $\varepsilon > 0$ , there is a $\delta > 0$ so that the following holds: for any symmetric convex body $Q \subseteq \mathbb{R}^n$ with $\ell_Q \leq \delta$ one has $\beta(Q) \leq \varepsilon$ .
|
| 362 |
+
|
| 363 |
+
Next, we can get a fairly tight upper bound on the covering radius of a $t$ -stable lattice:
|
| 364 |
+
|
| 365 |
+
Proposition 31. Let $\Lambda \subseteq \mathbb{R}^n$ be a full rank lattice that is the $r$ -scaling of a $t$ -stable lattice and let $Q \subseteq \mathbb{R}^n$ be a symmetric convex body. Then $\mu(\Lambda, Q) \leq O(\log(2n)) \cdot t \cdot r \cdot \ell_Q$ .
|
| 366 |
+
|
| 367 |
+
Proof. Let $\varepsilon > 0$ be a small enough constant that we determine later. Let $\delta$ be the constant so that Lemma 30 applies (w.r.t. $\varepsilon$ ). The claim is invariant under scaling $Q$ , hence we may scale $Q$ so that $\ell_{Q} \leq \delta$ and consequently $\beta(Q) \leq \varepsilon$ . We may also scale the lattice so that $\Lambda$ is $t$ -stable (i.e. $r = 1$ ). It suffices to prove that under these assumptions, $\mu(\Lambda, Q) \leq s \cdot t$ where $s \coloneqq C \log(2n)$ is the parameter from
|
| 368 |
+
|
| 369 |
+
Lemma 15. Now suppose for the sake of contradiction that there is a translate $u \in \mathbb{R}^n$ so that $(u + \Lambda) \cap stQ = \emptyset$ . Since $\beta(Q) \leq \varepsilon$ , we know that
|
| 370 |
+
|
| 371 |
+
$$
|
| 372 |
+
\rho_ {1} \left(\left(\frac {u}{s t} + \frac {\Lambda}{s t}\right) \backslash Q\right) \leq \varepsilon \rho_ {1} \left(\frac {\Lambda}{s t}\right).
|
| 373 |
+
$$
|
| 374 |
+
|
| 375 |
+
Multiplying the sets and parameters by $st$ gives
|
| 376 |
+
|
| 377 |
+
$$
|
| 378 |
+
\rho_ {s t} ((u + \Lambda) \backslash s t Q) \leq \varepsilon \rho_ {s t} (\Lambda). (*)
|
| 379 |
+
$$
|
| 380 |
+
|
| 381 |
+
Using that $\Lambda$ is $t$ -stable, we get
|
| 382 |
+
|
| 383 |
+
$$
|
| 384 |
+
\frac {1}{3} \rho_ {s t} (\Lambda) \stackrel {\mathrm {L e m 1 5}} {\leq} \rho_ {s t} (u + \Lambda) \stackrel {(u + \Lambda) \cap s t Q = \emptyset} {=} \rho_ {s t} ((u + \Lambda) \setminus s t Q) \stackrel {(*)} {\leq} \varepsilon \rho_ {s t} (\Lambda).
|
| 385 |
+
$$
|
| 386 |
+
|
| 387 |
+
Then choosing $\varepsilon \in (0, \frac{1}{3})$ gives a contradiction.
|
| 388 |
+
|
| 389 |
+

|
| 390 |
+
|
| 391 |
+
# 3 Overview
|
| 392 |
+
|
| 393 |
+
Goal of this section is to provide the reader with an overview and some intuition concerning the proof of our main result, Theorem 2. First, we want to prove the inequality from Theorem 2 (with an even better exponent) in the special case that both the lattice and the body $K$ are well-scaled. We will not actually use Prop 32 later in this form, but it will provide us with the idea for a general proof strategy.
|
| 394 |
+
|
| 395 |
+
Proposition 32. Let $\Lambda \subseteq \mathbb{R}^n$ be a full rank 2-stable lattice and let $K$ be a convex body with $b(K) = \mathbf{0}$ so that $K \cap -K$ is in $\ell$ -position. Then $\mu(\Lambda, K) \leq O(\log^2(2n)) \cdot \mu_{KL}(\Lambda, K)$ .
|
| 396 |
+
|
| 397 |
+
Proof. We denote the inner symmetrizer by $K_{\mathrm{sym}} \coloneqq K \cap -K$ . Then applying the estimate for stable lattices from Prop 31 we can upper bound the covering radius:
|
| 398 |
+
|
| 399 |
+
$$
|
| 400 |
+
\mu (\Lambda , K) \stackrel {K \supseteq K _ {\mathrm {s y m}}} {\leq} \mu (\Lambda , K _ {\mathrm {s y m}}) \stackrel {\text {P r o p 3 1}} {\lesssim} \log (2 n) \cdot \ell_ {K _ {\mathrm {s y m}}}
|
| 401 |
+
$$
|
| 402 |
+
|
| 403 |
+
Next, we lower bound $\mu_{KL}(\Lambda, K)$ by simply choosing the subspace $W := \mathbb{R}^n$ as witness. Then
|
| 404 |
+
|
| 405 |
+
$$
|
| 406 |
+
\mu_ {K L} (\Lambda , K) \geq \left(\frac {\mathrm {d e t} (\Lambda)}{\mathrm {V o l} _ {n} (K)}\right) ^ {1 / n} \stackrel {(*)} {\gtrsim} \frac {1}{\mathrm {V o l} _ {n} (K _ {\mathrm {s y m}}) ^ {1 / n}} \stackrel {\mathrm {C o r 2 2}} {\gtrsim} \frac {n}{\ell_ {K _ {\mathrm {s y m}} ^ {\circ}}} \stackrel {\ell \text {- p o s i t i o n}} {\gtrsim} \frac {\ell_ {K _ {\mathrm {s y m}}}}{\log (2 n)},
|
| 407 |
+
$$
|
| 408 |
+
|
| 409 |
+
where we use in (*) that $\operatorname{det}(\Lambda) \geq 2^{-n}$ and $\mathrm{Vol}_n(K_{\mathrm{sym}}) \geq 2^{-n}\mathrm{Vol}_n(K)$ . Combining both inequalities gives the claim.
|
| 410 |
+
|
| 411 |
+
Next, we want to develop a proof strategy that works for general $\Lambda$ and $K$ . Translating $K$ and applying a linear transformation to both $\Lambda$ and $K$ does not affect the claim, hence we may assume that $K$ has the barycenter at $\mathbf{0}$ and the symmetrizer $K_{\mathrm{sym}} := K \cap -K$ is in $\ell$ -position. But in general, $\Lambda$ will not be a 2-stable lattice and we cannot expect that one can always choose the subspace $W = \mathbb{R}^n$ as witness like in Prop 32.
|
| 412 |
+
|
| 413 |
+
But we know by Cor 14 that the lattice $\Lambda$ admits a 2-stable well-separated filtration $\{\mathbf{0}\} = \Lambda_0\subset \ldots \subset \Lambda_k = \Lambda$ . Let us abbreviate $d_{i}\coloneqq \mathrm{rank}(\Lambda_{i} / \Lambda_{i - 1})$ and $r_i\coloneqq \operatorname *{det}(\Lambda_i / \Lambda_{i - 1})^{1 / d_i}$ . Then each quotient lattice $\frac{1}{r_i}\Lambda_i / \Lambda_{i - 1}$ is a 2-stable lattice of dimension $d_{i}$ and hence an argument similar to Prop 32 becomes feasible.
|
| 414 |
+
|
| 415 |
+
We can use the triangle inequality that we developed in Lemma 28 to obtain
|
| 416 |
+
|
| 417 |
+
$$
|
| 418 |
+
\begin{array}{l} \mu (\Lambda , K) \stackrel {K \supseteq K _ {\mathrm {s y m}}} {\leq} \mu (\Lambda , K _ {\mathrm {s y m}}) \stackrel {\text {L e m} 2 8} {\leq} \sum_ {i = 1} ^ {k} \mu (\Lambda_ {i} / \Lambda_ {i - 1}, K _ {i}) \stackrel {\text {P r o p} 3 1} {\lesssim} \log (2 n) \sum_ {i = 1} ^ {k} r _ {i} \ell_ {K _ {i}} \\ \lesssim \log (2 n) \cdot r _ {k} \ell_ {K}, \\ \end{array}
|
| 419 |
+
$$
|
| 420 |
+
|
| 421 |
+
where $K_{i} \coloneqq \Pi_{\mathrm{span}(\Lambda_{i - 1})^{\perp}}(K_{\mathrm{sym}} \cap \mathrm{span}(\Lambda_{i}))$ . Here we have used that the sequence $r_1 < \ldots < r_k$ is geometrically increasing. This provides a convenient upper bound on the covering radius in terms of the relative determinant of the last quotient lattice in the filtration (which is the sparsest one). However we cannot avoid wondering whether we gave up too much by bounding $\ell_{K_i} \leq \ell_K$ .
|
| 422 |
+
|
| 423 |
+
Next, we want to lower bound $\mu_{KL}(\Lambda ,K)$ . The only natural choices for a witness subspace seem to come from the filtration. Hence for some index $i\in \{1,\ldots ,k\}$ we want to understand what can be obtained by choosing $W\coloneqq \operatorname {span}(\Lambda_{i - 1})^{\perp}$ , meaning we project out the densest $i - 1$ of the quotient lattices. Then abbreviating $d\coloneqq \dim (W) = d_i + \dots +d_k$ we have
|
| 424 |
+
|
| 425 |
+
$$
|
| 426 |
+
\mu_ {K L} (\Lambda , K) \geq \left(\frac {\operatorname* {d e t} (\Pi_ {W} (\Lambda))}{\operatorname {V o l} _ {d} (\Pi_ {W} (K))}\right) ^ {1 / d} \stackrel {(*)} {\gtrsim} r _ {i} \cdot \left(\frac {d}{n}\right) ^ {6} \frac {d}{\ell_ {K _ {\mathrm {s y m}}} ^ {\circ}} \stackrel {\ell \text {- p o s i t i o n}} {\gtrsim} r _ {i} \cdot \log (2 n) \cdot \left(\frac {d}{n}\right) ^ {7} \cdot \ell_ {K _ {\mathrm {s y m}}}.
|
| 427 |
+
$$
|
| 428 |
+
|
| 429 |
+
In $(\ast)$ we use that $\Pi_W(\Lambda) = \Lambda /\Lambda_{i - 1}$ and so $\operatorname *{det}(\Pi_W(\Lambda))^{1 / d}$ is a geometric mean of factors that are all at least $r_i$ . Here we also use Lemma 25 to bound $\mathrm{Vol}_d(\Pi_W(K))$ . It seems the only direct comparison can be obtained when letting $i\coloneqq k$ in which case we have
|
| 430 |
+
|
| 431 |
+
$$
|
| 432 |
+
\mu (\Lambda , K) \lesssim \log^ {2} (2 n) \cdot \left(\frac {n}{d _ {k}}\right) ^ {7} \cdot \mu_ {K L} (\Lambda , K).
|
| 433 |
+
$$
|
| 434 |
+
|
| 435 |
+
Hence, we can conclude Theorem 2 if $d_k$ is close $n$ , i.e. the last quotient subspace is large. But of course this is not necessarily true. In fact, the issue is more substantial. If $K_{\mathrm{sym}}$ is in $\ell$ -position with $\ell_{K_{\mathrm{sym}}}$ and $\ell_{K_{\mathrm{sym}}^{\circ}}$ known and $W$ is a $d$ -dimensional subspace, then this determines $\operatorname{Vol}_d(\Pi_W(K))^{1/d}$ only up to a polynomial factor in $\frac{n}{d}$ . Hence the information that we considered so far is simply
|
| 436 |
+
|
| 437 |
+
too weak to approximate $\mu (\Lambda ,K)$ up to a polylogarithmic factor. But fortunately there is a fix: instead of upper bounding the whole covering radius $\mu (\Lambda ,K)$ , we only estimate the covering radius corresponding to the less important half of the filtration. This means we will need to iterate the argument, which comes at the expense of a another logarithmic factor, but it will work!
|
| 438 |
+
|
| 439 |
+
# 4 Proof of the main theorem
|
| 440 |
+
|
| 441 |
+
We will spend the next two subsections proving our main Theorem 2 by induction over $n$ . At each step, we split the lattice $\Lambda$ and the convex body $K$ into a subspace section of dimension at least $n/2$ and a projection where most of the work will go into analyzing the subspace section.
|
| 442 |
+
|
| 443 |
+
# 4.1 The inductive step
|
| 444 |
+
|
| 445 |
+
First, we give a self-contained description of the inductive step, then later in Section 4.2 we describe the main part of the induction.
|
| 446 |
+
|
| 447 |
+
Proposition 33. There is a universal constant $C_0 > 0$ so that the following holds: For any full rank lattice $\Lambda \subseteq \mathbb{R}^n$ and any convex body $K \subseteq \mathbb{R}^n$ with $b(K) = \mathbf{0}$ , there exists a primitive sublattice $\Lambda' \subseteq \Lambda$ with $\mathrm{rank}(\Lambda') \geq n/2$ so that
|
| 448 |
+
|
| 449 |
+
$$
|
| 450 |
+
\mu \left(\Lambda^ {\prime}, (K \cap - K) \cap s p a n \left(\Lambda^ {\prime}\right)\right) \leq C _ {0} \log^ {2} (2 n) \cdot \mu_ {K L} (\Lambda , K).
|
| 451 |
+
$$
|
| 452 |
+
|
| 453 |
+
Proof. Set $K_{\mathrm{sym}} \coloneqq K \cap (-K)$ . The claim is invariant under applying a linear transformation to $K$ and $\Lambda$ . Hence we may assume that $K_{\mathrm{sym}}$ is in $\ell$ -position, i.e. $\ell_{K_{\mathrm{sym}}} \cdot \ell_{K_{\mathrm{sym}}^{\circ}} \leq O(n \log(2n))$ . Consider a well-separated 2-stable filtration $\{\mathbf{0}\} = \Lambda_0 \subset \ldots \subset \Lambda_k = \Lambda$ which exists by Cor 14. We will later choose the lattice $\Lambda'$ from one of the lattices $\Lambda_i$ in the filtration, but we postpone the choice for now. We define
|
| 454 |
+
|
| 455 |
+
$$
|
| 456 |
+
d _ {i} := \mathrm {r a n k} (\Lambda_ {i} / \Lambda_ {i - 1}) \quad \mathrm {a n d} \quad r _ {i} := \mathrm {n d} (\Lambda_ {i} / \Lambda_ {i - 1}) = \mathrm {d e t} (\Lambda_ {i} / \Lambda_ {i - 1}) ^ {1 / d _ {i}},
|
| 457 |
+
$$
|
| 458 |
+
|
| 459 |
+
which are the rank and normalized determinants of the quotient lattices in the filtration. Recall that $r_1 < r_2 < \ldots < r_k$ with $r_i \leq \frac{1}{2} r_{i+2}$ for all $i$ .
|
| 460 |
+
|
| 461 |
+
Claim I. For any $i \in \{1, \dots, k\}$ one has $\mu(\Lambda_i, K_{sym} \cap \text{span}(\Lambda_i)) \lesssim \log(2n) \cdot r_i \cdot \ell_{K_{sym}}$ .
|
| 462 |
+
|
| 463 |
+
Proof of Claim I. We abbreviate $K_{j} \coloneqq \Pi_{\mathrm{span}(\Lambda_{j - 1})^{\perp}}(K_{\mathrm{sym}} \cap \mathrm{span}(\Lambda_{j}))$ . Then $K_{j}$ is convex and symmetric and $\frac{1}{r_j} (\Lambda_j / \Lambda_{j - 1})$ is a 2-stable lattice. Hence we can bound the covering radii of the individual quotient lattices by
|
| 464 |
+
|
| 465 |
+
$$
|
| 466 |
+
\mu \left(\Lambda_ {j} / \Lambda_ {j - 1}, K _ {j}\right) \stackrel {\text {P r o p 3 1}} {\lesssim} \log (2 n) \cdot r _ {j} \cdot \ell_ {K _ {j}} \stackrel {\text {L e m 2 0}} {\leq} \log (2 n) \cdot r _ {j} \cdot \ell_ {K _ {\text {s y m}}}. \tag {2}
|
| 467 |
+
$$
|
| 468 |
+
|
| 469 |
+
Then using the triangle inequality for the covering radius we bound
|
| 470 |
+
|
| 471 |
+
$$
|
| 472 |
+
\begin{array}{l} \mu (\Lambda_ {i}, K _ {\mathrm {s y m}} \cap \operatorname {s p a n} (\Lambda_ {i})) \stackrel {{\text {L e m} 2 8}} {{\leq}} \sum_ {j = 1} ^ {i} \mu \left(\Lambda_ {j} / \Lambda_ {j - 1}, K _ {j}\right) \\ \stackrel {(2)} {\lesssim} \log (2 n) \cdot \ell_ {K _ {\text {s y m}}} \cdot \sum_ {j = 1} ^ {i} r _ {j} \\ \lesssim \log (2 n) \cdot \ell_ {K _ {\text {s y m}}} \cdot r _ {i}, \\ \end{array}
|
| 473 |
+
$$
|
| 474 |
+
|
| 475 |
+
using in the last step that $r_1 < \ldots < r_i$ and $r_j \leq \frac{1}{2} r_{j+2}$ for all $j$ .
|
| 476 |
+
|
| 477 |
+
In the following we abbreviate $d_{\geq i} \coloneqq \sum_{j = i}^{k}d_{j}$ .
|
| 478 |
+
|
| 479 |
+
Claim II. For any $i \in \{1, \dots, k\}$ one has $\mu_{KL}(\Lambda, K) \gtrsim \frac{r_i}{\log(2n)} \cdot \left(\frac{d_{\geq i}}{n}\right)^7 \cdot \ell_{K_{sym}}$ .
|
| 480 |
+
|
| 481 |
+
Proof of Claim II. We choose the subspace $W \coloneqq \operatorname{span}(\Lambda_{i-1})^\perp$ as witness and note that $\Pi_W(\Lambda) = \Lambda / \Lambda_{i-1}$ . Abbreviating $d \coloneqq \dim(W) = \operatorname{rank}(\Lambda / \Lambda_{i-1}) = d_{\geq i}$ we have
|
| 482 |
+
|
| 483 |
+
$$
|
| 484 |
+
\det \left(\Lambda / \Lambda_ {i - 1}\right) ^ {1 / d} = \left(\prod_ {j = i} ^ {k} r _ {j} ^ {d _ {j}}\right) ^ {1 / \sum_ {j = i} ^ {k} d _ {j}} \geq r _ {i}, \tag {3}
|
| 485 |
+
$$
|
| 486 |
+
|
| 487 |
+
where the middle expression denotes a geometric average of values $r_i < r_{i+1} < \ldots < r_k$ . Then lower bounding the covering radius proxy with the witness $W$ gives
|
| 488 |
+
|
| 489 |
+
$$
|
| 490 |
+
\begin{array}{l} \mu_ {K L} (\Lambda , K) \geq \left(\frac {\det (\Pi_ {W} (\Lambda))}{\operatorname {V o l} _ {d} (\Pi_ {W} (K))}\right) ^ {1 / d} \\ \stackrel {(3)} {\geq} \frac {r _ {i}}{\operatorname {V o l} _ {d} (\Pi_ {W} (K)) ^ {1 / d}} \\ \stackrel {\mathrm {L e m 2 5}} {\gtrsim} r _ {i} \cdot \left(\frac {d}{n}\right) ^ {6} \cdot \frac {d}{\ell_ {K _ {\mathrm {s y m}} ^ {\circ}}} \stackrel {\mathrm {l e p o s i t i o n}} {\gtrsim} \frac {r _ {i}}{\log (2 n)} \cdot \left(\frac {d}{n}\right) ^ {7} \cdot \ell_ {K _ {\mathrm {s y m}}}, \\ \end{array}
|
| 491 |
+
$$
|
| 492 |
+
|
| 493 |
+
using $\ell_{K_{\mathrm{sym}}}\cdot \ell_{K_{\mathrm{sym}}^{\circ}}\lesssim n\log (2n)$ in the last step.
|
| 494 |
+
|
| 495 |
+
Combining Claim I and Claim II with the same index $i$ gives
|
| 496 |
+
|
| 497 |
+
$$
|
| 498 |
+
\mu (\Lambda_ {i}, K _ {\mathrm {s y m}} \cap \mathrm {s p a n} (\Lambda_ {i})) \lesssim \log^ {2} (2 n) \cdot \left(\frac {n}{d _ {\geq i}}\right) ^ {7} \cdot \mu_ {K L} (\Lambda , K).
|
| 499 |
+
$$
|
| 500 |
+
|
| 501 |
+
Now, let $i^* \in \{1, \dots, k\}$ be the minimal index so that $\mathrm{rank}(\Lambda_{i^*}) \geq \frac{n}{2}$ . Then $d_{\geq i^*} \geq \frac{n}{2}$ by minimality. Hence $\Lambda' := \Lambda_{i^*}$ satisfies the claim.
|
| 502 |
+
|
| 503 |
+
# 4.2 Completing the main proof
|
| 504 |
+
|
| 505 |
+
Using Proposition 33 we can finish the proof of our main theorem.
|
| 506 |
+
|
| 507 |
+
Proof of Theorem 2. Consider a full rank lattice $\Lambda \subseteq \mathbb{R}^n$ and a convex body $K \subseteq \mathbb{R}^n$ . We will prove by induction over $n$ that
|
| 508 |
+
|
| 509 |
+
$$
|
| 510 |
+
\mu (\Lambda , K) \leq C _ {0} \log^ {3} (2 n) \cdot \mu_ {K L} (\Lambda , K),
|
| 511 |
+
$$
|
| 512 |
+
|
| 513 |
+
where $C_0 \geq 1$ is the constant from Proposition 33. The claim is true for $n = 1$ , hence assume $n \geq 2$ from now on. The claim is invariant under translations of $K$ , hence we may assume that $b(K) = \mathbf{0}$ . Let $\Lambda' \subseteq \Lambda$ be the primitive sublattice from Prop 33 and set $W := \operatorname{span}(\Lambda')$ . Then
|
| 514 |
+
|
| 515 |
+
$$
|
| 516 |
+
\begin{array}{l} \mu (\Lambda , K) \stackrel {{\text {L e m} 2 7}} {{\leq}} \mu (\Lambda \cap W, K \cap W) + \mu (\Pi_ {W ^ {\perp}} (\Lambda), \Pi_ {W ^ {\perp}} (K)) \\ \begin{array}{l l} K \supseteq K _ {\text {s y m}} \\ \leq & \mu (\Lambda \cap W, K _ {\text {s y m}} \cap W) + \mu (\Pi_ {W ^ {\perp}} (\Lambda), \Pi_ {W ^ {\perp}} (K)) \end{array} \\ \text {P r o p} 3 3 \\ \overset {+ \text {i n d .}} {\leq} C _ {0} \log^ {2} (2 n) \cdot \mu_ {K L} (\Lambda , K) + C _ {0} \log^ {3} (2 \underbrace {\dim (W ^ {\perp})} _ {\leq n / 2}) \cdot \underbrace {\mu_ {K L} (\Pi_ {W ^ {\perp}} (\Lambda) , \Pi_ {W ^ {\perp}} (K))} _ {\leq \mu_ {K L} (\Lambda , K)} \\ \stackrel {\text {L e m} 2 9} {\leq} C _ {0} \underbrace {\log^ {2} (2 n) \cdot \left(1 + \log (n)\right)} _ {= \log^ {3} (2 n)} \cdot \mu_ {K L} (\Lambda , K). \\ \end{array}
|
| 517 |
+
$$
|
| 518 |
+
|
| 519 |
+
We should point out that Regev and Stevens-Davidowitz [RS17] prove that in the Euclidean case one has $\mu (\Lambda ,B_2^n)\leq O(\log^{3 / 2}(2n))\cdot \mu_{KL}(\Lambda ,B_2^n)$ . Our proof could be seen as a generalization of their argument in the sense that [RS17] also relate both notions of covering radii to the quantities $r_i$ and $d_{i}$ as defined in Prop 33 by proving that
|
| 520 |
+
|
| 521 |
+
$$
|
| 522 |
+
\mu (\Lambda , B _ {2} ^ {n}) \leq O (\log (2 n)) \cdot \sqrt {\sum_ {i = 1} ^ {k} d _ {i} r _ {i} ^ {2}} \leq O (\log^ {3 / 2} (2 n)) \cdot \mu_ {K L} (\Lambda , B _ {2} ^ {n}).
|
| 523 |
+
$$
|
| 524 |
+
|
| 525 |
+
On the other hand, for them the "standard" canonical filtration suffices and they do not require an inductive step. Implicitly, our induction causes $O(\log(2n))$ many re-centering and rescaling operations using the result of Figiel, Tomczak-Jaegerman and Pisier (Theorem 18). This circumvents the issue that the covering radius might be dominated by a subspace of dimension $d$ with $d \ll n$ , which may not affect the $\ell$ -position of the body sufficiently. Then implicitly the induction will contain an iteration where $d$ is relatively large compared to the current ambient dimension. It may also be instructive to reconsider the proof of Prop 33 in the case that $K = B_2^n$ . Then in (2), we would obtain the inequality $\mu(\Lambda_j / \Lambda_{j-1}, K_j) \lesssim \log(2n) \cdot r_j \cdot \sqrt{n}$ while actually the much stronger bound of $\mu(\Lambda_j / \Lambda_{j-1}, K_j) \lesssim \log(2n) \cdot r_j \cdot \sqrt{d_j}$ holds. The trick is that using a well-separated filtration the arising loss can be efficiently bounded.
|
| 526 |
+
|
| 527 |
+
# 5 Finding the subspace $W$ in single-exponential time
|
| 528 |
+
|
| 529 |
+
In this section, we prove Theorem 3, which guarantees that a suitable subspace subspace $W$ can be found in time $2^{O(n)}$ at the expense of an additional logarithmic factor in the approximation guarantee. It will be convenient to first apply a linear transformation to well-scale $K$ . This can be done in polynomial time and is a standard argument, see Lemma 40 for details. Hence, for us it suffices to prove the following:
|
| 530 |
+
|
| 531 |
+
Theorem 34. Given a full rank lattice $\Lambda \subseteq \mathbb{R}^n$ and a convex body $K \subseteq \mathbb{R}^n$ such that $B_2^n \subseteq K \subseteq (n + 1)^{3/2} B_2^n$ , there exists a randomized $2^{O(n)}$ -time algorithm to compute a subspace $W \subseteq \mathbb{R}^n$ with $d := \dim(W)$ so that
|
| 532 |
+
|
| 533 |
+
$$
|
| 534 |
+
\mu (\Lambda , K) \lesssim \log^ {4} (2 n) \cdot \left(\frac {\operatorname * {d e t} (\Pi_ {W} (\Lambda))}{\operatorname {V o l} _ {d} (\Pi_ {W} (K))}\right) ^ {1 / d}.
|
| 535 |
+
$$
|
| 536 |
+
|
| 537 |
+
The main technical tool will be the following result of Dadush, which is the only step in the algorithm which takes exponential time:
|
| 538 |
+
|
| 539 |
+
Theorem 35 (Theorem 6.4. in [Dad19]). Given a lattice $\Lambda \subseteq \mathbb{R}^n$ one can compute an $O(\log(2n))$ -stable filtration of $\Lambda$ in $2^{O(n)}$ time with probability at least $1 - 2^{-\Omega(n)}$ .
|
| 540 |
+
|
| 541 |
+
The following algorithm mimics the proof in Section 4:
|
| 542 |
+
|
| 543 |
+
FIND-SUBSPACE
|
| 544 |
+
Input: Convex body $K \subseteq \mathbb{R}^n$ so that $B_2^n \subseteq K \subseteq (n + 1)^{3/2} B_2^n$ , full rank lattice $\Lambda \subseteq \mathbb{R}^n$
|
| 545 |
+
Output: Subspace $W \subseteq \mathbb{R}^n$ satisfying Theorem 34
|
| 546 |
+
|
| 547 |
+
(1) Compute an approximate barycenter $\tilde{x}$ such that $\|b(K) - \tilde{x}\|_2 \leq 1$
|
| 548 |
+
(2) Shift $K^{\prime}\coloneqq K - \tilde{x}$
|
| 549 |
+
(3) Set $K_{\mathrm{sym}} \coloneqq K' \cap (-K')$ and compute an invertible linear map $T$ so that
|
| 550 |
+
|
| 551 |
+
$$
|
| 552 |
+
\ell_ {T (K _ {\mathrm {s y m}})} \cdot \ell_ {(T (K _ {\mathrm {s y m}})) ^ {\circ}} \leq C \cdot n \log (2 n)
|
| 553 |
+
$$
|
| 554 |
+
|
| 555 |
+
(4) Set $K^{\prime}\gets T(K)$ and $\Lambda^{\prime}\gets T(\Lambda)$
|
| 556 |
+
(5) Compute an $O(\log (2n))$ -stable filtration $\{\mathbf{0}\} = \Lambda_0\subset \ldots \subset \Lambda_k = \Lambda '$
|
| 557 |
+
(6) Compute a well-separated $O(\log (2n))$ -stable filtration $\{\mathbf{0}\} = \Lambda_0' \subset \ldots \subset \Lambda_{k'}' = \Lambda'$
|
| 558 |
+
(7) Set $i^*$ as the minimal index with $\mathrm{rank}(\Lambda_{i^*}^{\prime})\geq \frac{n}{2}$
|
| 559 |
+
(8) Set $W_{i^{*}}\coloneqq \operatorname {span}(\Lambda_{j^{*}}^{\prime})^{\perp}$
|
| 560 |
+
(9) Recursively call $W_{\Pi} \coloneqq \mathrm{FIND-SUBSPACE}(\Pi_{\mathrm{span}(\Lambda_{i^{*}}^{\prime})^{\perp}}(K^{\prime})), \Pi_{\mathrm{span}(\Lambda_{i^{*}}^{\prime})^{\perp}}(\Lambda^{\prime}))$
|
| 561 |
+
(10) Return $W \coloneqq T^{-1}W'$ where $W' \coloneqq \operatorname*{argmin}_{W \in \{W_{i^*}, W_{\Pi}\}} \left\{ \left( \frac{\operatorname*{det}(\Pi_W(\Lambda'))}{\operatorname{Vol}_{\dim(W)}(\Pi_W(K'))} \right)^{1/\dim(W)} \right\}$ .
|
| 562 |
+
|
| 563 |
+
We will need several volume computations in the algorithm, for which we use the following theorem:
|
| 564 |
+
|
| 565 |
+
Theorem 36 ([KLS97]). Given a convex body $K \subseteq \mathbb{R}^n$ with $r \cdot B_2^n \subseteq K \subseteq R \cdot B_2^n$ , there exists a randomized algorithm which outputs a positive number $\zeta$ with $\mathrm{Vol}_n(K) / \zeta \in [1 - \varepsilon, 1 + \varepsilon]$ . The runtime is polynomial in $n$ , $1 / \varepsilon$ , $\log(1 / r)$ and $\log(R)$ .
|
| 566 |
+
|
| 567 |
+
In fact, [KLS97] also computes an approximation to the barycenter of $K$ :
|
| 568 |
+
|
| 569 |
+
Theorem 37 ([KLS97]). Given a convex body $K \subseteq \mathbb{R}^n$ with $B_2^n \subseteq K \subseteq (n + 1)^{3/2} \cdot B_2^n$ and $\delta > 0$ , there exists a randomized algorithm with running time polynomial in $n$ and $\frac{1}{\delta}$ , which returns an approximate barycenter $\tilde{x}$ such that $\| b(K) - \tilde{x} \|_2 \leq \delta$ .
|
| 570 |
+
|
| 571 |
+
Now, we can prove the main result for this section:
|
| 572 |
+
|
| 573 |
+
Proof of Theorem 34. First we justify the running time of $2^{O(n)}$ , later we discuss the approximation guarantee. We first apply Theorem 37 to compute an approximate barycenter $\tilde{x}$ and shift $K' := K - x$ . Theorem 35 yields a filtration for step (5), which can be refined into a well-separated filtration by Theorem 13. Step (10) requires computation of determinants, which can be done in polynomial time via Gaussian elimination, and the volume of a convex body, which can also be done in randomized polynomial time using Theorem 36. The runtime $T(n)$ of FIND-SUBSPACE satisfies the recursion $T(n) \leq 2^{O(n)} + T(n/2)$ , which can be resolved to $T(n) \leq 2^{O(n)}$ .
|
| 574 |
+
|
| 575 |
+
Next, we justify the approximation guarantee. From the same argument in Section 3 and 4 one can see that the returned subspace satisfies
|
| 576 |
+
|
| 577 |
+
$$
|
| 578 |
+
\mu (\Lambda , K) \lesssim \log^ {4} (2 n) \cdot \left(\frac {\operatorname* {d e t} (\Pi_ {W} (\Lambda))}{\operatorname {V o l} _ {d} (\Pi_ {W} (K))}\right) ^ {1 / d},
|
| 579 |
+
$$
|
| 580 |
+
|
| 581 |
+
where we have taken into account that we pay an additional $\log (2n)$ factor from Proposition 31 as our filtration is only guaranteed to be $O(\log (2n))$ -stable. Another subtle point is that we are using only an approximate barycenter. Hence it remains to generalize Proposition 24 and show that the approximation costs us at most another constant factor:
|
| 582 |
+
|
| 583 |
+
Claim. Let $K \subseteq \mathbb{R}^n$ be a convex body so that $B_2^n \subseteq K$ and $\| b(K) \|_2 \leq 1$ . Let $F \subseteq \mathbb{R}^n$ be a $d$ -dimensional subspace. Then denoting $K_{\mathrm{sym}} := K \cap (-K)$ ,
|
| 584 |
+
|
| 585 |
+
$$
|
| 586 |
+
\operatorname {V o l} _ {d} (\Pi_ {F} (K)) ^ {1 / d} \lesssim \left(\frac {n}{d}\right) ^ {5} \cdot \log \left(\frac {e n}{d}\right) ^ {2} \cdot \operatorname {V o l} _ {d} (\Pi_ {F} (K _ {\mathrm {s y m}})) ^ {1 / d}.
|
| 587 |
+
$$
|
| 588 |
+
|
| 589 |
+
Proof of Claim. By Proposition 24, we know that denoting $\tilde{K}_{\mathrm{sym}} \coloneqq (K - b(K)) \cap$
|
| 590 |
+
|
| 591 |
+
$(-K + b(K))$ , we have
|
| 592 |
+
|
| 593 |
+
$$
|
| 594 |
+
\mathrm {V o l} _ {d} (\Pi_ {F} (K)) ^ {1 / d} \lesssim \left(\frac {n}{d}\right) ^ {5} \cdot \log \left(\frac {e n}{d}\right) ^ {2} \cdot \mathrm {V o l} _ {d} (\Pi_ {F} (\tilde {K} _ {\mathrm {s y m}})) ^ {1 / d}.
|
| 595 |
+
$$
|
| 596 |
+
|
| 597 |
+
Since $-b(K) \subseteq B_2^n \subseteq K$ , it follows that $K - b(K) \subseteq K + K = 2K$ , so that $\tilde{K}_{\mathrm{sym}} \subseteq 2K_{\mathrm{sym}}$ and $\mathrm{Vol}_d(\Pi_F(\tilde{K}_{\mathrm{sym}}))^{1/d} \leq 2 \cdot \mathrm{Vol}_d(\Pi_F(K_{\mathrm{sym}}))^{1/d}$ .
|
| 598 |
+
|
| 599 |
+
# 6 Integer programming in time $(\log (2n))^{O(n)}$
|
| 600 |
+
|
| 601 |
+
Next, we show that integer programming can be solved in time $(\log(2n))^{O(n)}$ . In fact, this is a known consequence of Theorem 3. We do not claim any original contribution for this section, but we reproduce the arguments of Dadush [Dad12] to be self-contained. As it is common in the literature, we only state the dependence of running times on $n$ ; all running times that involve a convex set $K \subseteq rB_2^n$ and a lattice $\Lambda = \Lambda(B)$ also contain a not mentioned factor that is polynomial in $\log(r)$ and in the encoding length of $B$ .
|
| 602 |
+
|
| 603 |
+
First, we describe the intuition behind Dadush's algorithm. Consider a convex body $K \subseteq \mathbb{R}^n$ and a lattice $\Lambda \subseteq \mathbb{R}^n$ ; the goal is to find a point in $K \cap \Lambda$ . We compute a subspace $W \subseteq \mathbb{R}^n$ in time $2^{O(n)}$ that certifies the covering radius $\mu(\Lambda, K)$ up to a factor $\rho(n) := \Theta(\log^4(2n))$ . Consider the points $X := \Pi_W(K) \cap \Pi_W(\Lambda)$ in the projection on $W$ . For each $x \in K \cap \Lambda$ , we also have $\Pi_W(x) \in X$ . Note that the reverse may not be true in the sense that it is entirely possible that $K \cap \Lambda = \varnothing$ while $X \neq \varnothing$ . However, we are guaranteed that all lattice points in $K$ must be in one of the $(n - d)$ -dimensional fibers of the projection, i.e.
|
| 604 |
+
|
| 605 |
+

|
| 606 |
+
|
| 607 |
+
The algorithm enumerates $X$ and then recurses on all the fibers. In order for this algorithm to be efficient we need to (i) bound the cardinality $|X|$ and (ii) be able to enumerate $X$ . For (ii), note that it is possible that $W = \mathbb{R}^n$ and hence we would not gain anything by treating $\Pi_W(K) \cap \Pi_W(\Lambda)$ as a general integer programming problem.
|
| 608 |
+
|
| 609 |
+
For convex bodies $A, B \subseteq \mathbb{R}^n$ , the covering number $N(A, B) \coloneqq \min \{N \mid \exists x_1, \ldots, x_N \in \mathbb{R}^n : A \subseteq \bigcup_{i=1}^{N} (x_i + B)\}$ is the minimum number of translates of $B$ needed to cover $A$ . For a convex body $K \subseteq \mathbb{R}^n$ and a full rank lattice $\Lambda \subseteq \mathbb{R}^n$ we define
|
| 610 |
+
|
| 611 |
+
$$
|
| 612 |
+
G (\Lambda , K) := \max _ {x \in \mathbb {R} ^ {n}} | (K + x) \cap \Lambda |.
|
| 613 |
+
$$
|
| 614 |
+
|
| 615 |
+
In words, $G(\Lambda, K)$ denotes the maximum number of lattice points that any shift of $K$ contains. Note that even if $K \cap \Lambda = \varnothing$ , $G(\Lambda, K)$ might still be arbitrarily large. However, algorithmically the quantity $G(\Lambda, K)$ is useful:
|
| 616 |
+
|
| 617 |
+
Theorem 38 ([DPV11, DV13]). Given a convex body $K \subseteq \mathbb{R}^n$ and a full rank lattice $\Lambda \subseteq \mathbb{R}^n$ , one can enumerate all points in $K \cap \Lambda$ in deterministic time $2^{O(n)} \cdot G(\Lambda, K)$ .
|
| 618 |
+
|
| 619 |
+
We briefly sketch the algorithm behind Theorem 38: We use the method of Dadush and Vempala [DV13] to compute an $M$ -ellipsoid $E$ of $K$ which has the property that $N(K, E), N(E, K) \leq 2^{O(n)}$ . Their deterministic algorithm takes time $2^{O(n)}$ . In particular this means that $2^{-\Theta(n)} \leq \frac{G(\Lambda, K)}{G(\Lambda, E)} \leq 2^{\Theta(n)}$ . Next, we compute the translates $x_1, \ldots, x_N$ with $N \leq 2^{O(n)}$ so that $K \subseteq \bigcup_{i=1}^{N} (x_i + E)$ . Then we can use the following argument by Dadush, Peikert and Vempala [DPV11] to enumerate all lattice points in $(x_i + E) \cap \Lambda$ . After applying a linear transformation, it suffices to compute all points in $(t + B_2^n) \cap \Lambda$ for $t \in \mathbb{R}^n$ . Let $R \subseteq \Lambda \setminus \{\mathbf{0}\}$ be the Voronoi-relevant vectors, which are all the vectors that define a facet of the Voronoi cell of $\Lambda$ . It is known that $|R| \leq 2^{n+1}$ and moreover the set $R$ can be computed in time $2^{O(n)}$ by the algorithm of [MV13]. Next, consider the graph $H = (\Lambda, E)$ with edges $E = \{x, y : x, y \in \Lambda \text{ and } x - y \in R\}$ . Then it follows from the work of [MV13] that the subgraph induced by $\Lambda \cap (t + B_2^n)$ is connected. Hence, one can compute the closest lattice point to $t$ (again using [MV13]) and then traverse the subgraph.
|
| 620 |
+
|
| 621 |
+
Next, we require an upper bound on $G(\Lambda, K)$ in terms of the volume of $K$ and density of $\Lambda$ . Surprisingly, such an upper bound exists if we additionally control the covering radius. We reproduce Dadush's proof as the argument is key to understanding the algorithm:
|
| 622 |
+
|
| 623 |
+
Lemma 39. For any full rank lattice $\Lambda \subseteq \mathbb{R}^n$ and any convex body $K\subseteq \mathbb{R}^n$ one has
|
| 624 |
+
|
| 625 |
+
$$
|
| 626 |
+
G (\Lambda , K) \leq 2 ^ {n} \max \{\mu (\Lambda , K) ^ {n}, 1 \} \cdot \frac {\mathrm {V o l} _ {n} (K)}{\det (\Lambda)}.
|
| 627 |
+
$$
|
| 628 |
+
|
| 629 |
+
Proof. After a linear transformation and scaling by $\max \{\mu (\Lambda ,K),1\}$ , the statement is equivalent to the following simpler claim:
|
| 630 |
+
|
| 631 |
+
Claim. For any convex body $K \subseteq \mathbb{R}^n$ with $\mu(\mathbb{Z}^n, K) \leq 1$ and any $x \in \mathbb{R}^n$ one has $|K \cap (x + \mathbb{Z}^n)| \leq 2^n \mathrm{Vol}_n(K)$ .
|
| 632 |
+
|
| 633 |
+
Proof of Claim. The claim is invariant under translating $K$ , hence we may assume that $\mathbf{0} \in K$ . Let $\equiv$ be the equivalence relation on pairs $x, y \in K$ that is defined by $x \equiv y \Leftrightarrow x - y \in \mathbb{Z}^n$ . We define a set $V \subseteq K$ by selecting one element from each equivalence class w.r.t. $\equiv$ . It would not matter much which element was selected, but let us make the canonical choice of choosing the lexicographically minimal one. In other words, we choose
|
| 634 |
+
|
| 635 |
+
$$
|
| 636 |
+
V = \left\{x \in K \mid x \leq_ {\mathrm {l e x}} y \forall y \in (x + \mathbb {Z} ^ {n}) \cap K \right\},
|
| 637 |
+
$$
|
| 638 |
+
|
| 639 |
+
where $\leq_{\mathrm{lex}}$ is the standard lexicographical ordering.
|
| 640 |
+
|
| 641 |
+

|
| 642 |
+
|
| 643 |
+
As we select at most one element from each equivalence class, we certainly have $\operatorname{Vol}_n(V) \leq 1$ . On the other hand, $\mu(\mathbb{Z}^n, K) \leq 1$ implies that for all $x \in \mathbb{R}^n$ one has $(x + \mathbb{Z}^n) \cap K \neq \emptyset$ . That in turn means that every equivalence class has a member in $K$ and so $\operatorname{Vol}_n(V) \geq 1$ . Together this gives $\operatorname{Vol}_n(V) = 1$ . Next, we note that by construction all translates $x + V$ with $x \in \mathbb{Z}^n$ are disjoint. Moreover, for $x \in K \cap \mathbb{Z}^n$ one has that $x + V \subseteq K + K = 2K$ . Then
|
| 644 |
+
|
| 645 |
+
$$
|
| 646 |
+
|K\cap \mathbb{Z}^{n}| = \sum_{x\in K\cap \mathbb{Z}^{n}}\underbrace{\operatorname{Vol}_{n}(x + V)}_{= 1}\stackrel {\text{disj.}}{=}\operatorname{Vol}_{n}\bigg(\bigcup_{x\in K\cap \mathbb{Z}^{n}}(x + V)\bigg)\leq \operatorname{Vol}_{n}(2K),
|
| 647 |
+
$$
|
| 648 |
+
|
| 649 |
+
which gives the claim.
|
| 650 |
+
|
| 651 |
+

|
| 652 |
+
|
| 653 |
+
One technicality we have to deal with is that Theorem 3 requires a lower bound on the inradius of $K$ . Hence we run a preprocessing step: if there is no suitable lower bound for the inradius, then the lattice points of $K$ are all contained in an easy-to-find hyperplane.
|
| 654 |
+
|
| 655 |
+
Lemma 40. Given a compact convex set $K \subseteq rB_2^n$ and a lattice $\Lambda = \Lambda(B)$ . Then in time polynomial in $n$ , times a polynomial in $\log(r)$ and the encoding length of $B$ one can find at least one of the following:
|
| 656 |
+
|
| 657 |
+
(a) An ellipsoid $E$ and center $c$ so that $c + \frac{1}{(n + 1)^{3 / 2}} E\subseteq K\subseteq c + E.$
|
| 658 |
+
(b) A vector $a \in \mathbb{R}^n \setminus \{\mathbf{0}\}$ and $\beta \in \mathbb{R}$ so that $K \cap \Lambda \subseteq \{x \in \mathbb{R}^n \mid \langle a, x \rangle = \beta\}$ .
|
| 659 |
+
|
| 660 |
+
Proof. We may assume that $\mathrm{rank}(\Lambda) = n$ , otherwise any $a$ orthogonal to $\operatorname{span}(\Lambda)$ will satisfy (b). Next, we use a variant of the ellipsoid method from [GLS88] (see also Lemma 2.5.10 in [Dad12]) to find a pair $(c,E)$ in time polynomial in $n$ , $\log(r)$ and $\log(\frac{1}{\varepsilon})$ so that either (a) holds, or $K \subseteq c + E$ and $\operatorname{Vol}_n(E) \leq \varepsilon$ . Suppose the latter happens. Then using Minkowski's Theorem (Theorem 10) in (*) and the Blaschke-Santaló-Bourgain-Milman Theorem (Theorem 23) in $(**)$ we obtain
|
| 661 |
+
|
| 662 |
+
$$
|
| 663 |
+
\lambda_ {1} (\Lambda^ {*}, E ^ {\circ}) \stackrel {(*)} {\lesssim} \left(\frac {\operatorname * {d e t} (\Lambda^ {*})}{\operatorname {V o l} _ {n} (E ^ {\circ})}\right) ^ {1 / n} \stackrel {(* *)} {\lesssim} \left(\frac {\operatorname {V o l} _ {n} (E)}{\operatorname * {d e t} (\Lambda) \cdot v _ {n} ^ {2}}\right) ^ {1 / n} \lesssim n \cdot \left(\frac {\varepsilon}{\operatorname * {d e t} (\Lambda)}\right) ^ {1 / n} \leq \frac {1}{2} \cdot 2 ^ {- n / 2},
|
| 664 |
+
$$
|
| 665 |
+
|
| 666 |
+
for a suitable choice of $\varepsilon > 0$ . Then the LLL-algorithm [LLL82] can find a dual lattice vector $a \in \Lambda^* \setminus \{\mathbf{0}\}$ with $\|a\|_{E^\circ} \leq 2^{n/2} \cdot \lambda_1(\Lambda^*, E^\circ) \leq \frac{1}{2}$ . That vector $a$ with $\beta := \lceil \langle a, c \rangle \rceil$ will satisfy (b).
|
| 667 |
+
|
| 668 |
+
We are now ready to state the complete algorithm. As mentioned earlier, we denote $\rho(n) \coloneqq \Theta(\log^4(2n))$ as the approximation factor from Theorem 3.
|
| 669 |
+
|
| 670 |
+
# DADUSH'S ALGORITHM
|
| 671 |
+
|
| 672 |
+
Input: Compact convex set $K \subseteq \mathbb{R}^n$ , lattice $\Lambda \subseteq \mathbb{R}^n$
|
| 673 |
+
|
| 674 |
+
Output: Point $x \in K \cap \Lambda$ or decision that there is none
|
| 675 |
+
|
| 676 |
+
(1) If $n = 1$ , use binary search to find integer multiple of $\lambda_1(\Lambda, [-1, 1])$ in $K$ or certify none exists.
|
| 677 |
+
(2) Use Lemma 40. If case (b) happens, obtain hyperplane $H$ with $K \cap \Lambda \subseteq H$ . Recurse on $\mathrm{DADUSH}(K \cap H, \Lambda \cap H)$ and return the answer.
|
| 678 |
+
(3) Compute a subspace $W \subseteq \mathbb{R}^n$ with $d \coloneqq \dim(W)$ and $R \coloneqq (\frac{\operatorname*{det}(\Pi_W(\Lambda))}{\operatorname{Vol}_d(\Pi_W(K))})^{1/d}$ so that $R \leq \mu(\Lambda, K) \leq \rho(n) \cdot R$ .
|
| 679 |
+
(4) Set $\tilde{K} \coloneqq \min \{\rho(n) \cdot R, 1\} \cdot (K - c) + c$ for some $c \in K$ .
|
| 680 |
+
(5) Compute an $M$ -ellipsoid $E \subseteq W$ for $\Pi_W(\tilde{K})$ .
|
| 681 |
+
(6) Compute $N \leq 2^{O(d)}$ points $x_1, \ldots, x_N \in W$ so that $\Pi_W(\tilde{K}) \subseteq \bigcup_{i=1}^N (x_i + E)$ .
|
| 682 |
+
(7) Compute $X\coloneqq \Pi_W(\tilde{K})\cap \Pi_W(\Lambda) = \left(\bigcup_{i = 1}^{N}((x_i + E)\cap \Pi_W(\Lambda))\right)\cap \Pi_W(\tilde{K}).$
|
| 683 |
+
(8) Recursively call $\mathrm{DADUSH}(\tilde{K}\cap \Pi_W^{-1}(x),\bar{\Lambda}\cap \Pi_W^{-1}(x))$ for all $x\in X$ and return any found lattice point (if there is any).
|
| 684 |
+
|
| 685 |
+
Here, to be more informative, we have expanded the blackbox from Theorem 38 into lines (5)-(7). The reader may also note a subtlety here that we have not discussed so far: if $K$ is very large so that $\mu(\Lambda, K) \ll 1$ , then we may shrink $K$ to a smaller body $\tilde{K} \subseteq K$ as long as we ensure that still $\mu(\Lambda, \tilde{K}) \leq 1$ . We can now finish the analysis:
|
| 686 |
+
|
| 687 |
+
Theorem 41. Dadush's algorithm finds a point in $K \cap \Lambda$ in time $(\log(2n))^{O(n)}$ if there is one.
|
| 688 |
+
|
| 689 |
+
Proof. If the algorithm recures in (2), the claim is clear by induction. So assume otherwise. First we argue correctness of the algorithm. Let $s \coloneqq \min \{\rho(n) \cdot R, 1\} \in [0,1]$ and recall that $\tilde{K} \subseteq K$ is a scaling of $K$ by a factor of $s$ . After step (4), the algorithm searches for a lattice point in $\tilde{K}$ rather than in the original body $K$ . If $s < 1$ , then the covering radius of the shrunk body is $\mu(\Lambda, \tilde{K}) = \frac{1}{\rho(n) \cdot R} \mu(\Lambda, K) \leq 1$ . In other words, even though we continue the search in the strictly smaller body $\tilde{K}$ , we are still guaranteed that $\tilde{K} \cap \Lambda \neq \emptyset$ . Next, we discuss the running time of the algorithm. We estimate that
|
| 690 |
+
|
| 691 |
+
$$
|
| 692 |
+
\begin{array}{l} G (\Pi_ {W} (\Lambda), \Pi_ {W} (\tilde {K})) \stackrel {{\text {L e m 3 9}}} {{\leq}} 2 ^ {d} \max \left\{\mu (\Pi_ {W} (\Lambda), \Pi_ {W} (\tilde {K})) ^ {d}, 1 \right\} \cdot \frac {\operatorname {V o l} _ {d} (\Pi_ {W} (\tilde {K}))}{\det (\Pi_ {W} (\Lambda))} \\ \leq \quad 2 ^ {d} \max \left\{\left(\underbrace {\frac {\rho (n) R}{s}} _ {\geq 1}\right) ^ {d}, 1 \right\} \cdot s ^ {d} \cdot \underbrace {\frac {\operatorname {V o l} _ {d} (\Pi_ {W} (K))}{\det (\Pi_ {W} (\Lambda))}} _ {= R ^ {- d}} \\ = 2 ^ {d} \cdot (\rho (n) R) ^ {d} \cdot R ^ {- d} = (2 \rho (n)) ^ {d}. \\ \end{array}
|
| 693 |
+
$$
|
| 694 |
+
|
| 695 |
+
Here we use that $\mu (\Pi_W(\Lambda),\Pi_W(\tilde{K}))\leq \mu (\Lambda ,\tilde{K}) = \frac{1}{s}\cdot \mu (\Lambda ,K)\leq \frac{\rho(n)\cdot R}{s}$ . Then $|X|\leq G(\Pi_W(\Lambda),\Pi_W(\tilde{K}))\leq 2^d\rho (n)^d$ and by Lemma 39, the computation of $X$ in (5)-(7) takes time $2^{O(d)}\rho (n)^{d}$ . Now, let $T(n)$ be the maximum running time of the algorithm on $n$ -dimensional instances. Then we have the recursion
|
| 696 |
+
|
| 697 |
+
$$
|
| 698 |
+
T (n) \leq \max _ {d \in \{1, \dots , n \}} \left\{2 ^ {O (n)} + (O (1) \cdot \rho (n)) ^ {d} \cdot T (n - d) \right\} \quad \mathrm {a n d} \quad T (1) = \Theta (1),
|
| 699 |
+
$$
|
| 700 |
+
|
| 701 |
+
which indeed resolves to $T(n) \leq O(\rho(n))^n$ .
|
| 702 |
+
|
| 703 |
+
We also explain how Dadush's algorithm can be used to solve integer linear programs in time $(\log (2n))^{O(n)}$ . Again, the arguments used are standard. Details on the estimates can be found in the book of Schrijver [Sch99].
|
| 704 |
+
|
| 705 |
+
Proof of Theorem 5. Consider an arbitrary integer linear program $\max \{c^T x \mid Ax \leq b, x \in \mathbb{Z}^n\}$ . One can compute a number $M$ in time polynomial in $n$ and the encoding length of $A$ and $b$ so that if the IP is bounded and feasible, then the opti
|
| 706 |
+
|
| 707 |
+
mum value is the same as $\max \{c^T x\mid Ax\leq b,\| x\|_{\infty}\leq M,x\in \mathbb{Z}^n\}$ . Next, by applying binary search, it suffices to find an integer point in the compact convex set $K = \{x\in \mathbb{R}^n\mid c^T x\geq \delta ,Ax\leq b,\| x\|_{\infty}\leq M\}$ for which Theorem 4 applies.
|
| 708 |
+
|
| 709 |
+
# 7 Implications of Theorem 2
|
| 710 |
+
|
| 711 |
+
Here we derive a few implications of our main result. The following classical inequality will be useful here:
|
| 712 |
+
|
| 713 |
+
Lemma 42 ([RS57]). For any convex set $K \subseteq \mathbb{R}^n$ we have $\operatorname{Vol}_n(K - K) \leq \binom{2n}{n} \cdot \operatorname{Vol}_n(K)$ .
|
| 714 |
+
|
| 715 |
+
We restate Theorem 6, which yields a nearly tight relationship between the covering radii of $K$ and $K - K$ . We remark that it remains an open question whether the two quantities are equal up to a constant.
|
| 716 |
+
|
| 717 |
+
Theorem (Theorem 6). For any full rank lattice $\Lambda \subseteq \mathbb{R}^n$ and any convex body $K \subseteq \mathbb{R}^n$ , one has
|
| 718 |
+
|
| 719 |
+
$$
|
| 720 |
+
\mu (\Lambda , K - K) \leq \mu (\Lambda , K) \leq O (\log^ {3} (2 n)) \cdot \mu (\Lambda , K - K).
|
| 721 |
+
$$
|
| 722 |
+
|
| 723 |
+
Proof. Let $W$ denote the subspace attaining $\mu_{KL}(\Lambda, K)$ with $\dim W = d$ . We can use Theorem 2 to upper bound
|
| 724 |
+
|
| 725 |
+
$$
|
| 726 |
+
\begin{array}{l} \mu (\Lambda , K) \lesssim \log^ {3} (2 n) \cdot \mu_ {K L} (\Lambda , K) = \log^ {3} (2 n) \cdot \left(\frac {\operatorname * {d e t} (\Pi_ {W} (\Lambda))}{\operatorname {V o l} _ {d} (\Pi_ {W} (K))}\right) ^ {1 / d} \\ \stackrel {\text {L e m 4 2}} {\lesssim} \log^ {3} (2 n) \cdot 4 \cdot \left(\frac {\det (\Pi_ {W} (\Lambda))}{\operatorname {V o l} _ {d} (\Pi_ {W} (K - K))}\right) ^ {1 / d} \\ \lesssim \log^ {3} (2 n) \cdot \mu_ {K L} (\Lambda , K - K) \\ \lesssim \log^ {3} (2 n) \cdot \mu (\Lambda , K - K). \quad \square \\ \end{array}
|
| 727 |
+
$$
|
| 728 |
+
|
| 729 |
+
This in turn implies that the flatness constant in dimension $n$ is bounded by $O(n\log^3 (2n))$ :
|
| 730 |
+
|
| 731 |
+
Theorem (Theorem 7). For any convex body $K \subseteq \mathbb{R}^n$ and any full rank lattice $\Lambda \subseteq \mathbb{R}^n$ , one has
|
| 732 |
+
|
| 733 |
+
$$
|
| 734 |
+
\mu (\Lambda , K) \cdot \lambda_ {1} (\Lambda^ {*}, (K - K) ^ {\circ}) \leq O (n \log^ {3} (2 n)).
|
| 735 |
+
$$
|
| 736 |
+
|
| 737 |
+
Proof. First we show a slightly worse bound of $O(n\log^4 (2n))$ . Banaszczyk [Ban96] proved that for any symmetric convex body $Q \subseteq \mathbb{R}^n$ one has $\mu (\Lambda ,Q)\cdot \lambda_1(\Lambda^*,Q^\circ) \leq O(n\log (2n))$ . Setting $Q \coloneqq K - K$ (which is a symmetric convex body) one then has by Theorem 6
|
| 738 |
+
|
| 739 |
+
$$
|
| 740 |
+
\mu (\Lambda , K) \cdot \lambda_ {1} (\Lambda^ {*}, Q ^ {\circ}) \leq O (\log^ {3} (2 n)) \cdot \mu (\Lambda , Q) \cdot \lambda_ {1} (\Lambda^ {*}, Q ^ {\circ}) \leq O (n \log^ {4} (2 n)).
|
| 741 |
+
$$
|
| 742 |
+
|
| 743 |
+
Now we give the argument of the stronger bound of $O(n\log^3 (2n))$ which is due to Dadush. Let $W$ denote the subspace attaining $\mu_{KL}(\Lambda ,K)$ with $\dim W = d$ . By Theorem 2,
|
| 744 |
+
|
| 745 |
+
$$
|
| 746 |
+
\begin{array}{l} \mu (\Lambda , K) \lesssim \log^ {3} (2 n) \cdot \mu_ {K L} (\Lambda , K) = \log^ {3} (2 n) \cdot \left(\frac {\det (\Pi_ {W} (\Lambda))}{\operatorname {V o l} _ {d} (\Pi_ {W} (K))}\right) ^ {1 / d} \\ \stackrel {\text {L e m 4 2}} {\lesssim} \log^ {3} (2 n) \cdot 4 \cdot \left(\frac {\det (\Pi_ {W} (\Lambda))}{\operatorname {V o l} _ {d} (\Pi_ {W} (Q))}\right) ^ {1 / d} \\ \stackrel {\text {L e m} 2 3} {\asymp} \log^ {3} (2 n) \cdot d \cdot \left(\frac {\operatorname {V o l} _ {d} \left(Q ^ {\circ} \cap W\right)}{\det \left(\Lambda^ {*} \cap W\right)}\right) ^ {1 / d} \\ \stackrel {\text {T h m 1 0}} {\lesssim} n \log^ {3} (2 n) \cdot \frac {2}{\lambda_ {1} (\Lambda^ {*} \cap W , Q ^ {\circ} \cap W)}. \\ \end{array}
|
| 747 |
+
$$
|
| 748 |
+
|
| 749 |
+
Here, we have used that $\Pi_W(\Lambda)^* = \Lambda^* \cap W$ . Since $\lambda_1(\Lambda^*, Q^\circ) \leq \lambda_1(\Lambda^* \cap W, Q^\circ \cap W)$ , the theorem follows.
|
| 750 |
+
|
| 751 |
+
We also explain the proof of Corollary 8 which again is standard:
|
| 752 |
+
|
| 753 |
+
Corollary (Cor 8). Let $K \subseteq \mathbb{R}^n$ by a convex body with $K \cap \mathbb{Z}^n = \emptyset$ . Then there is a vector $c \in \mathbb{Z}^n \setminus \{\mathbf{0}\}$ so that at most $O(n\log^3(2n))$ many hyperplanes of the form $\langle c, x \rangle = \delta$ with $\delta \in \mathbb{Z}$ intersect $K$ .
|
| 754 |
+
|
| 755 |
+
Proof. We apply Theorem 7 for the lattice $\Lambda := \mathbb{Z}^n$ so that $\Lambda^* = \mathbb{Z}^n$ . Then $K \cap \mathbb{Z}^n = \emptyset$ implies that $\mu(\mathbb{Z}^n, K) > 1$ and so $\lambda_1(\mathbb{Z}^n, (K - K)^\circ) \lesssim n\log^3(2n)$ . Let $c \in \mathbb{Z}^n \setminus \{\mathbf{0}\}$ be the vector attaining this bound. Then revisiting the definition of the dual norm (Sec 2.3) we have $\max \{\langle c, x - y \rangle : x, y \in K\} = \|c\|_{(K - K)^\circ}$ . That means at most $\|c\|_{(K - K)^\circ} + 1 \lesssim n\log^3(2n)$ hyperplanes of the form $\langle c, x \rangle = \delta$ with $\delta \in \mathbb{Z}$ intersect $K$ .
|
| 756 |
+
|
| 757 |
+
Acknowledgement. The authors are grateful to Daniel Dadush for numerous discussions on related topics, a careful read of a preliminary draft, and for the proof of the improved bound in Theorem 7. The authors would also like to thank the anonymous reviewers who made several helpful suggestions.
|
| 758 |
+
|
| 759 |
+
# References
|
| 760 |
+
|
| 761 |
+
[AAGM15] Shiri Artstein-Avidan, Apostolos Giannopoulos, and Vitali D. Milman. Asymptotic geometric analysis. Part I, volume 202 of Mathematical Surveys and Monographs. American Mathematical Society, Providence, RI, 2015.
|
| 762 |
+
|
| 763 |
+
[AKS01] Miklós Ajtai, Ravi Kumar, and D. Sivakumar. A sieve algorithm for the shortest lattice vector problem. In Proceedings on 33rd Annual ACM Symposium on Theory of Computing, July 6-8, 2001, Heraklion, Crete, Greece, pages 601-610, 2001.
|
| 764 |
+
[Ban96] Wojciech Banaszczyk. Inequalities for convex bodies and polar reciprocal lattices in $\mathbb{R}^n$ II: application of K-convexity. Discret. Comput. Geom., 16(3):305-311, 1996.
|
| 765 |
+
[BLPS99] Wojciech Banaszczyk, Alexander E. Litvak, Alain Pajor, and Stanisław J. Szarek. The flatness theorem for nonsymmetric convex bodies via the local theory of Banach spaces. Math. Oper. Res., 24(3):728-750, 1999.
|
| 766 |
+
[Cas04] Bill Casselman. Stability of Lattices and the Partition of Arithmetic Quotients. Asian Journal of Mathematics, 8(4):607 - 638, 2004.
|
| 767 |
+
[Dad12] Daniel Nicolas Dadush. Integer Programming, Lattice Algorithms, and Deterministic Volume Estimation. PhD thesis, USA, 2012.
|
| 768 |
+
[Dad19] Daniel Dadush. On approximating the covering radius and finding dense lattice subspaces. In STOC, pages 1021-1026. ACM, 2019.
|
| 769 |
+
[DER22] Daniel Dadush, Friedrich Eisenbrand, and Thomas Rothvoss. From approximate to exact integer programming. CoRR, abs/2211.03859, 2022.
|
| 770 |
+
[DPV11] Daniel Dadush, Chris Peikert, and Santosh S. Vempala. Enumerative lattice algorithms in any norm via m-ellipsoid coverings. In FOCS, pages 580–589. IEEE Computer Society, 2011.
|
| 771 |
+
[DR16] Daniel Dadush and Oded Regev. Towards strong reverse Minkowski-type inequalities for lattices. In FOCS, pages 447-456. IEEE Computer Society, 2016.
|
| 772 |
+
[Dv13] Daniel Dadush and Santosh S. Vempala. Near-optimal deterministic algorithms for volume computation via M-ellipsoids. Proc. Natl. Acad. Sci. USA, 110(48):19237-19245, 2013.
|
| 773 |
+
[GLS88] Martin Grötschel, László Lovász, and Alexander Schrijver. Geometric algorithms and combinatorial optimization. Springer, 1988.
|
| 774 |
+
|
| 775 |
+
[Foh48] Fritz John. Extremum problems with inequalities as subsidiary conditions. In Studies and Essays Presented to R. Courant on his 60th Birthday, January 8, 1948, pages 187-204. Interscience Publishers, Inc., New York, 1948.
|
| 776 |
+
[Kan87] Ravi Kannan. Minkowski's convex body theorem and integer programming. Math. Oper. Res., 12(3):415-440, August 1987.
|
| 777 |
+
[KL88] Ravi Kannan and László Lovász. Covering minima and lattice-point-free convex bodies. Annals of Mathematics, 128(3):577-602, 1988.
|
| 778 |
+
[KLS97] Ravi Kannan, László Miklós Lovász, and Miklós Simonovits. Random walks and an $\mathrm{O}(n^5)$ volume algorithm for convex bodies. *Random Structures and Algorithms*, 11:1-50, 1997.
|
| 779 |
+
[Len83] Jr. Lenstra, H. W. Integer programming with a fixed number of variables. Mathematics of Operations Research, 8(4):pp. 538-548, 1983.
|
| 780 |
+
[LLL82] A. K. Lenstra, H. W. Lenstra, Jr., and L. Lovász. Factoring polynomials with rational coefficients. Math. Ann., 261(4):515-534, 1982.
|
| 781 |
+
[MMV13] Daniele Micciancio and Panagiotis Voulgaris. A deterministic single exponential time algorithm for most lattice problems based on Voronoi cell computations. SIAM J. Comput., 42(3):1364-1391, 2013.
|
| 782 |
+
[Odl90] A. M. Odlyzko. The rise and fall of knapsack cryptosystems. Cryptology and Computational Number Theory, pages 75-88, 1990.
|
| 783 |
+
[Reg09a] Oded Regev. Lecture notes on lattices, 2009.
|
| 784 |
+
[Reg09b] Oded Regev. On lattices, learning with errors, random linear codes, and cryptography. J. ACM, 56(6):34:1-34:40, 2009.
|
| 785 |
+
[RS57] Claude Ambrose Rogers and Geoffrey C. Shephard. The difference body of a convex body. Archiv der Mathematik, 8:220-233, 1957.
|
| 786 |
+
[RS17] Oded Regev and Noah Stephens-Davidowitz. A reverse Minkowski theorem. In STOC, pages 941-953. ACM, 2017.
|
| 787 |
+
[Rud98] Mark Rudelson. Distances between non-symmetric convex bodies and the MM*-estimate. Positivity, 4:161-178, 1998.
|
| 788 |
+
[Sch99] Alexander Schrijver. Theory of linear and integer programming. Wiley-Interscience series in discrete mathematics and optimization. Wiley, 1999.
|
| 789 |
+
|
| 790 |
+
[Ste17] Noah Stephens-Davidowitz. On the Gaussian Measure Over Lattices. PhD thesis, New York University, USA, 2017.
|
| 791 |
+
|
| 792 |
+
[Vri23] Beatrice-Helen Vritsiou. Regular ellipsoids and a Blaschke-Santaló-type inequality for projections of non-symmetric convex bodies, 2023.
|
| 793 |
+
|
| 794 |
+
# A The approximate canonical filtration
|
| 795 |
+
|
| 796 |
+
In this chapter, we prove Theorem 13. The proof idea is rather simple: given a $t$ -stable filtration $\{\mathbf{0}\} = \Lambda_0 \subset \ldots \subset \Lambda_k = \Lambda$ , we select one index from every density class in order to make the filtration well-separated. But before we come to the main argument, we require two lemmas.
|
| 797 |
+
|
| 798 |
+
Lemma 43 (Grayson's parallelogram rule [Cas04]). For any two lattices $\Lambda, \Lambda' \subseteq \mathbb{R}^n$ ,
|
| 799 |
+
|
| 800 |
+
$$
|
| 801 |
+
\det (\Lambda) \cdot \det \left(\Lambda^ {\prime}\right) \geq \det \left(\Lambda + \Lambda^ {\prime}\right) \cdot \det \left(\Lambda \cap \Lambda^ {\prime}\right).
|
| 802 |
+
$$
|
| 803 |
+
|
| 804 |
+
A proof may also be found in Chapter 2 of [Ste17]. The $t$ -stable filtration can be used to obtain lower bounds on the determinant of any sublattice:
|
| 805 |
+
|
| 806 |
+
Lemma 44. Let $\Lambda \subseteq \mathbb{R}^n$ be any lattice and let $\{\mathbf{0}\} = \Lambda_0 \subset \Lambda_1 \subset \ldots \subset \Lambda_k = \Lambda$ be a $t$ -stable filtration. Then for any sublattice $\tilde{\Lambda} \subseteq \Lambda$ we have the inequality
|
| 807 |
+
|
| 808 |
+
$$
|
| 809 |
+
\mathrm {n d} (\tilde {\Lambda}) \geq t ^ {- 1} \cdot \mathrm {n d} (\Lambda_ {1}).
|
| 810 |
+
$$
|
| 811 |
+
|
| 812 |
+
Proof. Let $r_i \coloneqq \mathrm{nd}(\Lambda_i / \Lambda_{i-1}) = \mathrm{det}(\Lambda_i / \Lambda_{i-1})^{1/\mathrm{rank}(\Lambda_i / \Lambda_{i-1})}$ be the normalized determinant. We prove by induction on $i \in \{1, \ldots, k\}$ that the result holds for all lattices $\tilde{\Lambda} \subseteq \Lambda_i$ . The base case follows as $\Lambda_1 = \Lambda_1 / \Lambda_0$ is a scalar of the $t$ -stable lattice $\frac{1}{\mathrm{nd}(\Lambda_1)} \Lambda_1$ . Now suppose that $\tilde{\Lambda} \subseteq \Lambda_i$ for some $i > 1$ . Note that $\Lambda_+ \coloneqq \tilde{\Lambda} + \Lambda_{i-1}$ satisfies $\Lambda_{i-1} \subseteq \Lambda_+ \subseteq \Lambda_i$ , so that $\Lambda_+ / \Lambda_{i-1} \subseteq \Lambda_i / \Lambda_{i-1}$ and $\mathrm{nd}(\Lambda_+ / \Lambda_{i-1}) \geq t^{-1} \cdot r_i > t^{-1} \cdot r_1$ . By Lemma 43,
|
| 813 |
+
|
| 814 |
+
$$
|
| 815 |
+
\det (\tilde {\Lambda}) \cdot \det (\Lambda_ {i - 1}) \geq \det (\tilde {\Lambda} + \Lambda_ {i - 1}) \cdot \det (\tilde {\Lambda} \cap \Lambda_ {i - 1}).
|
| 816 |
+
$$
|
| 817 |
+
|
| 818 |
+
Factoring out $\Lambda_{i - 1}$ gives
|
| 819 |
+
|
| 820 |
+
$$
|
| 821 |
+
\det (\tilde {\Lambda}) \geq \det (\Lambda_ {+} / \Lambda_ {i - 1}) \cdot \det (\tilde {\Lambda} \cap \Lambda_ {i - 1}).
|
| 822 |
+
$$
|
| 823 |
+
|
| 824 |
+
Hence
|
| 825 |
+
|
| 826 |
+
$$
|
| 827 |
+
\mathrm {n d} (\tilde {\Lambda}) \geq \mathrm {n d} \left(\Lambda_ {+} / \Lambda_ {i - 1}\right) ^ {\operatorname {r a n k} \left(\Lambda_ {+} / \Lambda_ {i - 1}\right) / \operatorname {r a n k} (\tilde {\Lambda})} \cdot \mathrm {n d} \left(\tilde {\Lambda} \cap \Lambda_ {i - 1}\right) ^ {\operatorname {r a n k} \left(\tilde {\Lambda} \cap \Lambda_ {i - 1}\right) / \operatorname {r a n k} (\tilde {\Lambda})} \geq t ^ {- 1} \cdot r _ {1}
|
| 828 |
+
$$
|
| 829 |
+
|
| 830 |
+
where we used the inductive hypothesis on $\tilde{\Lambda} \cap \Lambda_{i-1} \subseteq \Lambda_{i-1}$ together with the fact that $\operatorname{rank}(\Lambda_+ / \Lambda_{i-1}) + \operatorname{rank}(\tilde{\Lambda} \cap \Lambda_{i-1}) = \operatorname{rank}(\tilde{\Lambda})$ .
|
| 831 |
+
|
| 832 |
+
Now, we come to the main argument:
|
| 833 |
+
|
| 834 |
+
Proof of Theorem 13. Let $r_i \coloneqq \mathrm{nd}(\Lambda_i / \Lambda_{i-1})$ and $d_i \coloneqq \mathrm{rank}(\Lambda_i / \Lambda_{i-1})$ . For $\ell \in \mathbb{Z}$ denote $I_\ell \coloneqq \{i \in [k] : 2^\ell \leq r_i < 2 \cdot 2^\ell\}$ . We define a sequence of indices $0 = \ell(0) < \ell(1) < \ldots < \ell(\tilde{k}) = k$ that contains precisely the largest index $i$ in each $I_\ell$ with $I_\ell \neq \emptyset$ plus the index $\ell(0) = 0$ . We set $\tilde{\Lambda}_j \coloneqq \Lambda_{\ell(j)}$ and $\tilde{r}_j \coloneqq \mathrm{nd}(\tilde{\Lambda}_j / \tilde{\Lambda}_{j-1})$ . First, consider an index $\ell$ with $I_\ell \neq \emptyset$ . Let $i_{\min}, i_{\max} \in I_\ell$ be the minimal and maximal indices in $I_\ell$ . Then
|
| 835 |
+
|
| 836 |
+
$$
|
| 837 |
+
\begin{array}{l} \det (\Lambda_ {i _ {\max}} / \Lambda_ {i _ {\min} - 1}) ^ {1 / \mathrm {r a n k} (\Lambda_ {i _ {\max}} / \Lambda_ {i _ {\min} - 1})} = \Big (\prod_ {i = i _ {\min}} ^ {i _ {\max}} \det (\Lambda_ {i} / \Lambda_ {i - 1}) \Big) ^ {1 / \sum_ {i = i _ {\min}} ^ {i _ {\max}} \mathrm {r a n k} (\Lambda_ {i} / \Lambda_ {i - 1})} \\ { = } { \left( \prod _ { i = i _ { \mathrm { m i n } } } ^ { i _ { \mathrm { m a x } } } r _ { i } ^ { d _ { i } } \right) ^ { 1 / \sum _ { i = i _ { \mathrm { m i n } } } ^ { i _ { \mathrm { m a x } } } d _ { i } } . } \\ \end{array}
|
| 838 |
+
$$
|
| 839 |
+
|
| 840 |
+
Note that this value is a weighted geometric average of $r_i$ -values for $i \in I_\ell$ . From this it immediately follows that $\tilde{r}_1 < \ldots < \tilde{r}_k$ and $\tilde{r}_j \leq \frac{1}{2} \tilde{r}_{j+2}$ for all $j$ , i.e. (a') holds. It remains to show that the quotient lattices are scalars of $2t$ -stable lattices. Fix some index $j \in [\tilde{k}]$ and let $\Lambda' := \frac{1}{\tilde{r}_j} (\tilde{\Lambda}_j / \tilde{\Lambda}_{j-1})$ . First note that by assumption, the filtration $\{\mathbf{0}\} = \Lambda_0' \subset \dots \subset \Lambda_{k'}' := \Lambda'$ given by $\Lambda_i' := \frac{1}{\tilde{r}_j} (\Lambda_{\ell(j-1)+i} / \Lambda_{\ell(j-1)})$ with $k' := \ell(j) - \ell(j-1)$ is also $t$ -stable because $\Lambda_{i+1}' / \Lambda_i' = \frac{1}{\tilde{r}_j} (\Lambda_{\ell(j-1)+i+1} / \Lambda_{\ell(j-1)+i})$ .
|
| 841 |
+
|
| 842 |
+
We will prove the following two statements.
|
| 843 |
+
|
| 844 |
+
(I) For any sublattice $\tilde{\Lambda} \subseteq \Lambda'$ one has $\mathrm{nd}(\tilde{\Lambda}) \geq (2t)^{-1}$ .
|
| 845 |
+
$(II)$ For any sublattice $\tilde{\Lambda} \subseteq (\Lambda')^*$ one has $\mathrm{nd}(\tilde{\Lambda}) \geq (2t)^{-1}$ .
|
| 846 |
+
|
| 847 |
+
First we show $(I)$ . We apply Lemma 44 on $\Lambda'$ to obtain
|
| 848 |
+
|
| 849 |
+
$$
|
| 850 |
+
\mathrm {n d} (\tilde {\Lambda}) \geq t ^ {- 1} \cdot \mathrm {n d} (\Lambda_ {1} ^ {\prime}) \geq t ^ {- 1} \cdot \frac {r _ {\ell (j - 1) + 1}}{\tilde {r} _ {j}} \geq (2 t) ^ {- 1},
|
| 851 |
+
$$
|
| 852 |
+
|
| 853 |
+
since both numerator and denominator belong to the same interval $[2^{\ell}, 2 \cdot 2^{\ell})$ for some $\ell \in \mathbb{Z}$ . Next, we prove (II). Given the filtration $\{\mathbf{0}\} = \Lambda_0' \subset \dots \subset \Lambda_{k'}' = \Lambda'$ with $U_i := \operatorname{span}(\Lambda_i')$ , the dual filtration is given by $\{\mathbf{0}\} = (\Lambda')_0^* \subset \dots \subset (\Lambda')_{k'}^* = (\Lambda')^*$ with $(\Lambda')_i^* := \Lambda^* \cap U_{k' - i}^\perp$ and determinant $\det((\Lambda')_i^*) = \det((\Lambda')^*) \cdot \det(\Lambda_{k' - i}') = \det(\Lambda_{k' - i}')$ , see for example [Dad19]. Since quotients of the dual filtration are duals of the quotients of the original filtration, the dual filtration is also $t$ -stable. We then apply Lemma 44 on $(\Lambda')^*$ :
|
| 854 |
+
|
| 855 |
+
$$
|
| 856 |
+
\mathrm {n d} (\tilde {\Lambda}) \geq t ^ {- 1} \cdot \mathrm {n d} ((\Lambda^ {\prime}) _ {1} ^ {*}) = t ^ {- 1} \cdot (r _ {k ^ {\prime}} ^ {\prime}) ^ {- 1} = t ^ {- 1} \cdot \left(\frac {r _ {\ell (j)}}{\tilde {r} _ {j}}\right) ^ {- 1} \stackrel {r _ {\ell (j)} \leq 2 \cdot \tilde {r} _ {j}} {\geq} (2 t) ^ {- 1}.
|
| 857 |
+
$$
|
2303.14xxx/2303.14605/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4d3d9ca8177775810365cc66aecdfd8eb19dda4d1bcbcc75a8b5e7e3acea5f99
|
| 3 |
+
size 818771
|
2303.14xxx/2303.14605/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14612/e1a8feb8-e3e0-42e1-846f-d4b91a7fc621_content_list.json
ADDED
|
@@ -0,0 +1,604 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Deepfake in the Metaverse: Security Implications for Virtual Gaming, Meetings, and Offices",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
112,
|
| 8 |
+
101,
|
| 9 |
+
885,
|
| 10 |
+
151
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Shahroz Tariq \nCSIRO's Data61, Australia \nshahroz.tariq@data61.csiro.au",
|
| 17 |
+
"bbox": [
|
| 18 |
+
127,
|
| 19 |
+
162,
|
| 20 |
+
333,
|
| 21 |
+
209
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Alsharif Abuadbba \nCSIRO's Data61, Australia \nsharif.abuadbba@data61.csiro.au",
|
| 28 |
+
"bbox": [
|
| 29 |
+
388,
|
| 30 |
+
162,
|
| 31 |
+
609,
|
| 32 |
+
209
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Kristen Moore \nCSIRO's Data61, Australia \nkristen.moore@data61.csiro.au",
|
| 39 |
+
"bbox": [
|
| 40 |
+
661,
|
| 41 |
+
162,
|
| 42 |
+
872,
|
| 43 |
+
209
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "ABSTRACT",
|
| 50 |
+
"text_level": 1,
|
| 51 |
+
"bbox": [
|
| 52 |
+
83,
|
| 53 |
+
218,
|
| 54 |
+
184,
|
| 55 |
+
232
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "The metaverse has gained significant attention from various industries due to its potential to create a fully immersive and interactive virtual world. However, the integration of deepfakes in the metaverse brings serious security implications, particularly with regard to impersonation. This paper examines the security implications of deepfakes in the metaverse, specifically in the context of gaming, online meetings, and virtual offices. The paper discusses how deepfakes can be used to impersonate in gaming scenarios, how online meetings in the metaverse open the door for impersonation, and how virtual offices in the metaverse lack physical authentication, making it easier for attackers to impersonate someone. The implications of these security concerns are discussed in relation to the confidentiality, integrity, and availability (CIA) triad. The paper further explores related issues such as the darkverse, and digital cloning, as well as regulatory and privacy concerns associated with addressing security threats in the virtual world.",
|
| 62 |
+
"bbox": [
|
| 63 |
+
81,
|
| 64 |
+
237,
|
| 65 |
+
483,
|
| 66 |
+
459
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "CCS CONCEPTS",
|
| 73 |
+
"text_level": 1,
|
| 74 |
+
"bbox": [
|
| 75 |
+
83,
|
| 76 |
+
470,
|
| 77 |
+
220,
|
| 78 |
+
483
|
| 79 |
+
],
|
| 80 |
+
"page_idx": 0
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"type": "text",
|
| 84 |
+
"text": "- Human-centered computing $\\rightarrow$ Virtual reality; $\\cdot$ Security and privacy $\\rightarrow$ Social engineering attacks; $\\cdot$ Social and professional topics $\\rightarrow$ Identity theft.",
|
| 85 |
+
"bbox": [
|
| 86 |
+
81,
|
| 87 |
+
488,
|
| 88 |
+
482,
|
| 89 |
+
532
|
| 90 |
+
],
|
| 91 |
+
"page_idx": 0
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"type": "text",
|
| 95 |
+
"text": "KEYWORDS",
|
| 96 |
+
"text_level": 1,
|
| 97 |
+
"bbox": [
|
| 98 |
+
83,
|
| 99 |
+
542,
|
| 100 |
+
191,
|
| 101 |
+
556
|
| 102 |
+
],
|
| 103 |
+
"page_idx": 0
|
| 104 |
+
},
|
| 105 |
+
{
|
| 106 |
+
"type": "text",
|
| 107 |
+
"text": "Metaverse, Deepfake, Security, Impersonation, Gaming, Online meetings, Virtual offices",
|
| 108 |
+
"bbox": [
|
| 109 |
+
81,
|
| 110 |
+
560,
|
| 111 |
+
482,
|
| 112 |
+
589
|
| 113 |
+
],
|
| 114 |
+
"page_idx": 0
|
| 115 |
+
},
|
| 116 |
+
{
|
| 117 |
+
"type": "text",
|
| 118 |
+
"text": "1 INTRODUCTION",
|
| 119 |
+
"text_level": 1,
|
| 120 |
+
"bbox": [
|
| 121 |
+
83,
|
| 122 |
+
599,
|
| 123 |
+
256,
|
| 124 |
+
614
|
| 125 |
+
],
|
| 126 |
+
"page_idx": 0
|
| 127 |
+
},
|
| 128 |
+
{
|
| 129 |
+
"type": "text",
|
| 130 |
+
"text": "The emergence of the metaverse [27] has captured the attention of the technology community, giving rise to widespread anticipation and debate. Prominent companies such as Meta (formerly Facebook), Microsoft, and Nvidia have expressed interest in the concept of a fully immersive virtual world, where individuals can interact with one another and their surroundings. To this end, various companies are releasing their own metaverse experiences, including Meta's Horizon Worlds [18, 19], Roblox's gaming metaverse [20], Microsoft's Mesh [15], and Nvidia's Omniverse [17]. While the potential applications of the metaverse are vast, with possibilities ranging from gaming to virtual meetings and offices, each method has its own benefits and limitations.",
|
| 131 |
+
"bbox": [
|
| 132 |
+
81,
|
| 133 |
+
618,
|
| 134 |
+
482,
|
| 135 |
+
784
|
| 136 |
+
],
|
| 137 |
+
"page_idx": 0
|
| 138 |
+
},
|
| 139 |
+
{
|
| 140 |
+
"type": "text",
|
| 141 |
+
"text": "However, the applicability of deepfakes [9, 16] in the metaverse presents significant security implications, particularly with regard to impersonation. Deepfakes are computer-generated images or videos that can be manipulated to look like real people or events. The ability to generate such content has significantly increased with advances in machine learning and artificial intelligence. Recently, deepfakes in the metaverse have become a topic of discussion on different forums and media articles [2, 12, 21, 28].",
|
| 142 |
+
"bbox": [
|
| 143 |
+
81,
|
| 144 |
+
785,
|
| 145 |
+
482,
|
| 146 |
+
896
|
| 147 |
+
],
|
| 148 |
+
"page_idx": 0
|
| 149 |
+
},
|
| 150 |
+
{
|
| 151 |
+
"type": "image",
|
| 152 |
+
"img_path": "images/0bf79c369ad4ec2c57c2f9f34d281d3efe5b3af90a921f9f1227536169552db0.jpg",
|
| 153 |
+
"image_caption": [
|
| 154 |
+
"Figure 1: The three most commonly publicized scenarios in the metaverse: virtual gaming, virtual meetings, and virtual offices. These applications highlight the potential for immersive virtual experiences, but also raise concerns about security and privacy in this emerging technology."
|
| 155 |
+
],
|
| 156 |
+
"image_footnote": [],
|
| 157 |
+
"bbox": [
|
| 158 |
+
516,
|
| 159 |
+
215,
|
| 160 |
+
915,
|
| 161 |
+
382
|
| 162 |
+
],
|
| 163 |
+
"page_idx": 0
|
| 164 |
+
},
|
| 165 |
+
{
|
| 166 |
+
"type": "text",
|
| 167 |
+
"text": "The utilisation of metaverse technology has been associated with numerous benefits, including the provision of a fully immersive virtual environment that facilitates interaction with other users and virtual objects. Nevertheless, the potential deployment of deepfake technology to perpetrate malicious activities, such as impersonation, has shed light on the limitations of the technology and the need for effective security measures to be put in place. It is noteworthy that existing state-of-the-art deepfake detection methods [3-7, 10, 11, 22-26] primarily focus on detecting deepfakes in the physical world and do not take into account the possibility of deepfakes in the metaverse. Thus, the current research is challenged with an evident gap in the identification and prevention of deepfakes in the metaverse, making it imperative to shed light on this research gap.",
|
| 168 |
+
"bbox": [
|
| 169 |
+
511,
|
| 170 |
+
493,
|
| 171 |
+
915,
|
| 172 |
+
688
|
| 173 |
+
],
|
| 174 |
+
"page_idx": 0
|
| 175 |
+
},
|
| 176 |
+
{
|
| 177 |
+
"type": "text",
|
| 178 |
+
"text": "In this paper, we explore the security implications of deepfakes in the metaverse. We will start by defining the concept of the metaverse and how it is expected to be used in three scenarios: (i) gaming, (ii) online meetings, and (iii) virtual offices (see Fig. 1). We will also discuss the potential dangers of deepfake in each of the three scenarios by exploring the potential consequences of deepfake misuse in the metaverse, such as the ability to impersonate others, manipulate meetings, and disrupt virtual work environments. We will also discuss potential solutions to mitigate these risks and ensure the safety and security of metaverse users. We also explore the security implications of deepfakes in the metaverse to fake digital identity, the CIA triad, legal and regulatory challenges, privacy issues, and darkverse. Through this work, we aim to explore the potential security implications of deepfakes in the metaverse and to raise awareness of the risks and challenges posed by this technology.",
|
| 179 |
+
"bbox": [
|
| 180 |
+
511,
|
| 181 |
+
688,
|
| 182 |
+
915,
|
| 183 |
+
896
|
| 184 |
+
],
|
| 185 |
+
"page_idx": 0
|
| 186 |
+
},
|
| 187 |
+
{
|
| 188 |
+
"type": "aside_text",
|
| 189 |
+
"text": "arXiv:2303.14612v2 [cs.CR] 10 Sep 2023",
|
| 190 |
+
"bbox": [
|
| 191 |
+
22,
|
| 192 |
+
265,
|
| 193 |
+
58,
|
| 194 |
+
707
|
| 195 |
+
],
|
| 196 |
+
"page_idx": 0
|
| 197 |
+
},
|
| 198 |
+
{
|
| 199 |
+
"type": "text",
|
| 200 |
+
"text": "2 WHAT IS THE METAVERSE?",
|
| 201 |
+
"text_level": 1,
|
| 202 |
+
"bbox": [
|
| 203 |
+
84,
|
| 204 |
+
104,
|
| 205 |
+
349,
|
| 206 |
+
119
|
| 207 |
+
],
|
| 208 |
+
"page_idx": 1
|
| 209 |
+
},
|
| 210 |
+
{
|
| 211 |
+
"type": "text",
|
| 212 |
+
"text": "The metaverse is a collective virtual shared space that offers an immersive and interactive 3D environment, facilitating real-time engagement with digital content and other individuals through advanced technologies such as virtual reality (VR) and augmented reality (AR). While originally a concept in science fiction, such as in Neal Stephenson's novel \"Snow Crash\" and the movie \"The Matrix,\" recent technological advances have made the metaverse increasingly feasible. As a new form of social and economic infrastructure, the metaverse provides opportunities for people to work, play, socialize, learn, and consume content within a shared virtual space. While different visions of the metaverse exist among companies, organizations, and individuals, ranging from a fully autonomous world to a combination of different virtual platforms and experiences, the metaverse is considered a transformative technology that has the potential to impact various aspects of our lives.",
|
| 213 |
+
"bbox": [
|
| 214 |
+
86,
|
| 215 |
+
125,
|
| 216 |
+
480,
|
| 217 |
+
330
|
| 218 |
+
],
|
| 219 |
+
"page_idx": 1
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"type": "text",
|
| 223 |
+
"text": "3 GAMING IN THE METVERSE",
|
| 224 |
+
"text_level": 1,
|
| 225 |
+
"bbox": [
|
| 226 |
+
84,
|
| 227 |
+
345,
|
| 228 |
+
362,
|
| 229 |
+
359
|
| 230 |
+
],
|
| 231 |
+
"page_idx": 1
|
| 232 |
+
},
|
| 233 |
+
{
|
| 234 |
+
"type": "text",
|
| 235 |
+
"text": "Gaming in the metaverse encompasses playing video games in a virtual world shared by millions globally, including massively multiplayer online games (MMOs), social games, and casual games such as puzzle and card games. These games offer interactive and detailed virtual worlds, providing players with opportunities to explore and interact with their surroundings.",
|
| 236 |
+
"bbox": [
|
| 237 |
+
84,
|
| 238 |
+
364,
|
| 239 |
+
480,
|
| 240 |
+
446
|
| 241 |
+
],
|
| 242 |
+
"page_idx": 1
|
| 243 |
+
},
|
| 244 |
+
{
|
| 245 |
+
"type": "text",
|
| 246 |
+
"text": "The use of deepfakes in the context of gaming in the metaverse raises significant security concerns. Potential issues include identity theft, cyberbullying, distribution of malware, non-fungible token (NFT) scams, and intellectual property theft. As a significant demographic within metaverse gaming [8], minors are particularly vulnerable to these threats due to their limited experience and knowledge of online safety, which can result in sexual exploitation, social engineering, online grooming, and exposure to misinformation.",
|
| 247 |
+
"bbox": [
|
| 248 |
+
86,
|
| 249 |
+
448,
|
| 250 |
+
480,
|
| 251 |
+
570
|
| 252 |
+
],
|
| 253 |
+
"page_idx": 1
|
| 254 |
+
},
|
| 255 |
+
{
|
| 256 |
+
"type": "text",
|
| 257 |
+
"text": "To address these risks, gaming companies must invest in advanced security measures, including identity verification systems, content monitoring and moderation tools, and anti-malware software. Additionally, players should be educated about the dangers of deepfakes and encouraged to report any suspicious activity encountered in the metaverse. Parents and guardians should take an active role in educating minors on the risks of deepfakes, monitoring their online activities, and encouraging them to report any questionable behavior. Online safety education, privacy settings, and parental controls can also help safeguard minors from the potential harms of deepfakes.",
|
| 258 |
+
"bbox": [
|
| 259 |
+
86,
|
| 260 |
+
571,
|
| 261 |
+
480,
|
| 262 |
+
723
|
| 263 |
+
],
|
| 264 |
+
"page_idx": 1
|
| 265 |
+
},
|
| 266 |
+
{
|
| 267 |
+
"type": "text",
|
| 268 |
+
"text": "4 ONLINE MEETINGS IN THE METVERSE",
|
| 269 |
+
"text_level": 1,
|
| 270 |
+
"bbox": [
|
| 271 |
+
84,
|
| 272 |
+
738,
|
| 273 |
+
450,
|
| 274 |
+
753
|
| 275 |
+
],
|
| 276 |
+
"page_idx": 1
|
| 277 |
+
},
|
| 278 |
+
{
|
| 279 |
+
"type": "text",
|
| 280 |
+
"text": "Online meetings in the metaverse offer a virtual space for individuals to communicate and collaborate within a shared immersive digital environment, spanning from basic text-based chat rooms to fully immersive 3D environments, and utilizing voice chat, instant messaging, or other means of communication. These virtual meetings are versatile and can serve various purposes, such as team collaboration, networking, socializing, or attending virtual events, such as conferences.",
|
| 281 |
+
"bbox": [
|
| 282 |
+
86,
|
| 283 |
+
757,
|
| 284 |
+
480,
|
| 285 |
+
866
|
| 286 |
+
],
|
| 287 |
+
"page_idx": 1
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"type": "text",
|
| 291 |
+
"text": "However, the potential risks to privacy and reputation are significant due to the metaverse's ability to create an opportunity for",
|
| 292 |
+
"bbox": [
|
| 293 |
+
84,
|
| 294 |
+
867,
|
| 295 |
+
480,
|
| 296 |
+
895
|
| 297 |
+
],
|
| 298 |
+
"page_idx": 1
|
| 299 |
+
},
|
| 300 |
+
{
|
| 301 |
+
"type": "text",
|
| 302 |
+
"text": "attackers to impersonate others. Deepfake technology can be utilized to deceive others through impersonation, leading to potential fraud or espionage, as demonstrated in a recent example of Elon Musk's deepfake zoom-bombing online meetings [1]. In addition, the authenticity and trust of participants may be compromised by the creation of convincing deepfakes, which could undermine trust and collaboration.",
|
| 303 |
+
"bbox": [
|
| 304 |
+
517,
|
| 305 |
+
106,
|
| 306 |
+
911,
|
| 307 |
+
202
|
| 308 |
+
],
|
| 309 |
+
"page_idx": 1
|
| 310 |
+
},
|
| 311 |
+
{
|
| 312 |
+
"type": "text",
|
| 313 |
+
"text": "To address these issues, it is essential to implement measures such as identity verification, digital signatures, or other security measures to ensure the authenticity of participants. Moreover, the development of tools and technologies that can detect various forms of deepfakes in the metaverse and prevent their use during online meetings may be necessary. Overall, while deepfake technology poses a disruptive risk to online meetings in the metaverse, it is also possible to mitigate these risks effectively by utilizing appropriate security measures and technology.",
|
| 314 |
+
"bbox": [
|
| 315 |
+
517,
|
| 316 |
+
203,
|
| 317 |
+
911,
|
| 318 |
+
328
|
| 319 |
+
],
|
| 320 |
+
"page_idx": 1
|
| 321 |
+
},
|
| 322 |
+
{
|
| 323 |
+
"type": "text",
|
| 324 |
+
"text": "5 VIRTUAL OFFICES IN THE METVERSE",
|
| 325 |
+
"text_level": 1,
|
| 326 |
+
"bbox": [
|
| 327 |
+
517,
|
| 328 |
+
344,
|
| 329 |
+
875,
|
| 330 |
+
358
|
| 331 |
+
],
|
| 332 |
+
"page_idx": 1
|
| 333 |
+
},
|
| 334 |
+
{
|
| 335 |
+
"type": "text",
|
| 336 |
+
"text": "The emergence of virtual offices or workplaces in the metaverse is a recent but promising development that has the potential to revolutionize collaboration and work practices. By leveraging the metaverse's virtual environment, colleagues can work together in a customizable, shared digital workspace that offers numerous benefits, such as reduced overhead cost, increased flexibility, and access to a global talent pool. In addition, virtual offices in the metaverse can enable more dynamic and immersive meetings, greater collaboration, and increased creativity.",
|
| 337 |
+
"bbox": [
|
| 338 |
+
517,
|
| 339 |
+
362,
|
| 340 |
+
911,
|
| 341 |
+
486
|
| 342 |
+
],
|
| 343 |
+
"page_idx": 1
|
| 344 |
+
},
|
| 345 |
+
{
|
| 346 |
+
"type": "text",
|
| 347 |
+
"text": "However, the virtual nature of the metaverse presents a security challenge, as attackers can impersonate team members through the use of deepfakes, leading to data breaches and financial loss. For instance, deepfakes can be employed to create fake identities or to impersonate colleagues, which could lead to trust issues and confusion within the team. For example, an employee could use a deepfake to create a fake version of their boss or co-worker to make it appear as if they are giving instructions. Furthermore, deepfakes can be used to spread false information or propaganda, potentially impacting important decisions.",
|
| 348 |
+
"bbox": [
|
| 349 |
+
517,
|
| 350 |
+
487,
|
| 351 |
+
911,
|
| 352 |
+
625
|
| 353 |
+
],
|
| 354 |
+
"page_idx": 1
|
| 355 |
+
},
|
| 356 |
+
{
|
| 357 |
+
"type": "text",
|
| 358 |
+
"text": "To address the potential threats posed by deepfakes in virtual offices or workplaces in the metaverse, clear guidelines and protocols are required to verify team members' identities and the authenticity of content shared in the virtual environment. This can be critical in establishing one's innocence in the case of a crime. It is also important to remain abreast of the latest deepfake technology developments and to leverage tools and software that can aid in identifying and detecting deepfakes.",
|
| 359 |
+
"bbox": [
|
| 360 |
+
517,
|
| 361 |
+
626,
|
| 362 |
+
911,
|
| 363 |
+
736
|
| 364 |
+
],
|
| 365 |
+
"page_idx": 1
|
| 366 |
+
},
|
| 367 |
+
{
|
| 368 |
+
"type": "text",
|
| 369 |
+
"text": "6 DISCUSSION",
|
| 370 |
+
"text_level": 1,
|
| 371 |
+
"bbox": [
|
| 372 |
+
517,
|
| 373 |
+
752,
|
| 374 |
+
650,
|
| 375 |
+
766
|
| 376 |
+
],
|
| 377 |
+
"page_idx": 1
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"type": "text",
|
| 381 |
+
"text": "Fake Digital Identity and Cloning in the digital world. One of the central ideas underlying the concept of the metaverse is the ability for individuals to create digital replicas of themselves, known as avatars, in the virtual world. These avatars are designed to mimic the physical appearance and behavior of their real-life counterparts, allowing individuals to interact with one another in the digital realm. However, the ability to clone oneself in the metaverse also raises concerns about the potential for impersonation. Unlike the physical world, where impersonating someone",
|
| 382 |
+
"bbox": [
|
| 383 |
+
517,
|
| 384 |
+
770,
|
| 385 |
+
911,
|
| 386 |
+
895
|
| 387 |
+
],
|
| 388 |
+
"page_idx": 1
|
| 389 |
+
},
|
| 390 |
+
{
|
| 391 |
+
"type": "header",
|
| 392 |
+
"text": "Shahroz Tariq, Alsharif Abuadbba, and Kristen Moore",
|
| 393 |
+
"bbox": [
|
| 394 |
+
658,
|
| 395 |
+
75,
|
| 396 |
+
911,
|
| 397 |
+
85
|
| 398 |
+
],
|
| 399 |
+
"page_idx": 1
|
| 400 |
+
},
|
| 401 |
+
{
|
| 402 |
+
"type": "text",
|
| 403 |
+
"text": "convincingly is challenging, it is much easier to create a convincing digital clone of a person in the metaverse due to the abundance of personal information available on the internet that can be used to create deepfakes. The possibility of an attacker using deepfakes to impersonate someone in the metaverse is a significant concern, as it could be used to commit various illicit activities. One potential solution to this problem is the implementation of digital identity verification systems. Such systems could use biometric data, such as facial recognition, to verify an individual's identity before allowing them to create a digital avatar. By doing so, attackers would be prevented from creating digital clones of other people without their consent, thereby ensuring a higher level of security in the metaverse.",
|
| 404 |
+
"bbox": [
|
| 405 |
+
81,
|
| 406 |
+
107,
|
| 407 |
+
480,
|
| 408 |
+
286
|
| 409 |
+
],
|
| 410 |
+
"page_idx": 2
|
| 411 |
+
},
|
| 412 |
+
{
|
| 413 |
+
"type": "text",
|
| 414 |
+
"text": "Deepfakes impact on CIA Triad in the Metaverse. The CIA Triad, established by the National Institute of Standards and Technology (NIST), comprises confidentiality, integrity, and availability, which are the three primary objectives of information security. Confidentiality safeguards sensitive information by ensuring that only authorized parties can access it. Integrity ensures that information remains accurate and unaltered, while availability guarantees that authorized parties can access information when necessary. In the context of the metaverse, deepfakes pose a potential threat to the CIA Triad's objectives of confidentiality, integrity, and availability. Specifically, deepfakes have the potential to compromise confidentiality by enabling the impersonation of authorized individuals, thereby permitting unauthorized access to sensitive areas. Furthermore, deepfakes can undermine the integrity of information, images, or videos, by spreading false or misleading information about individuals or organizations, causing reputational harm. Finally, deepfakes can also disrupt availability by disseminating propaganda or fake news, leading to confusion and chaos.",
|
| 415 |
+
"bbox": [
|
| 416 |
+
81,
|
| 417 |
+
287,
|
| 418 |
+
482,
|
| 419 |
+
535
|
| 420 |
+
],
|
| 421 |
+
"page_idx": 2
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"type": "text",
|
| 425 |
+
"text": "Legal and Regulatory Challenges. The lack of regulations regarding the application of laws from the physical world in the metaverse presents a significant challenge. For instance, identifying and prosecuting an offender who has committed a crime in the virtual world using deepfake impersonation can be difficult. Additionally, jurisdictional issues arise due to the existence of varying laws in different countries. In the event that an attacker is located in a country where there are no legal consequences for their actions in the metaverse, holding them accountable can become problematic. Consequently, a universal set of rules and regulations for the metaverse becomes difficult to establish, given that different countries may have different interpretations of what constitutes criminal behavior. To address security concerns in the metaverse, a coordinated effort between governments, regulatory bodies, and technology companies is necessary [13]. This entails the development of universally applicable standards and regulations that can transcend geographical and jurisdictional barriers. Also, continuous efforts toward the development of new technologies capable of preventing and detecting criminal activities in the metaverse are also required. Privacy issues. Although digital identity verification systems can serve as a potential measure for mitigating deepfake-based impersonation in the metaverse, the utilization of these systems raises concerns. The apprehension stems from the possibility of digital identity verification systems being utilized to track and monitor individuals' virtual activities, consequently, potentially infringing on their privacy and freedom. It is argued that any digital identity",
|
| 426 |
+
"bbox": [
|
| 427 |
+
81,
|
| 428 |
+
536,
|
| 429 |
+
482,
|
| 430 |
+
896
|
| 431 |
+
],
|
| 432 |
+
"page_idx": 2
|
| 433 |
+
},
|
| 434 |
+
{
|
| 435 |
+
"type": "text",
|
| 436 |
+
"text": "verification system deployed in the metaverse must maintain a balance between security and the need for privacy and freedom.",
|
| 437 |
+
"bbox": [
|
| 438 |
+
511,
|
| 439 |
+
107,
|
| 440 |
+
911,
|
| 441 |
+
133
|
| 442 |
+
],
|
| 443 |
+
"page_idx": 2
|
| 444 |
+
},
|
| 445 |
+
{
|
| 446 |
+
"type": "text",
|
| 447 |
+
"text": "Darkverse. The rise of metaverse technology has created new opportunities for both legitimate users and malicious actors. One of the primary concerns is the creation of private spaces that enable illegal activities and communication among criminals, which Trend Micro refers to as the darkverse [14]. This space operates similarly to the dark web, but it exists within the metaverse and is unindexed, making it challenging to locate via standard search engines. The darkverse's pseudo-physical user presence makes it more dangerous than the dark web, as criminals can use proximity-based messaging or other methods to conceal their communications, rendering them difficult for law enforcement agencies to intercept. Darkverse could be used to facilitate illegal activities such as deepfake-based revenge pornography and misinformation campaigns. Despite the possibility of the darkverse being a space for free speech, the primary objective of these spaces is to facilitate illegal activities, and it may become a safe haven for criminals seeking to engage in such activities with minimal risk of detection.",
|
| 448 |
+
"bbox": [
|
| 449 |
+
511,
|
| 450 |
+
135,
|
| 451 |
+
913,
|
| 452 |
+
369
|
| 453 |
+
],
|
| 454 |
+
"page_idx": 2
|
| 455 |
+
},
|
| 456 |
+
{
|
| 457 |
+
"type": "text",
|
| 458 |
+
"text": "7 CONCLUSION",
|
| 459 |
+
"text_level": 1,
|
| 460 |
+
"bbox": [
|
| 461 |
+
513,
|
| 462 |
+
383,
|
| 463 |
+
663,
|
| 464 |
+
397
|
| 465 |
+
],
|
| 466 |
+
"page_idx": 2
|
| 467 |
+
},
|
| 468 |
+
{
|
| 469 |
+
"type": "text",
|
| 470 |
+
"text": "In conclusion, deepfakes in the metaverse present significant security implications, particularly around impersonation. The three scenarios of gaming, online meetings, and virtual offices serve as examples of how these security implications can play out in practice. The lack of physical authentication in the metaverse makes it easier for attackers to impersonate others and commit crimes without being held accountable. Mitigating these security implications will require a combination of technological solutions and legal frameworks that balance security and privacy concerns. As the metaverse continues to evolve, it is important to address these issues proactively to ensure a safe and secure virtual environment for all users.",
|
| 471 |
+
"bbox": [
|
| 472 |
+
511,
|
| 473 |
+
402,
|
| 474 |
+
913,
|
| 475 |
+
566
|
| 476 |
+
],
|
| 477 |
+
"page_idx": 2
|
| 478 |
+
},
|
| 479 |
+
{
|
| 480 |
+
"type": "text",
|
| 481 |
+
"text": "ACKNOWLEDGMENTS",
|
| 482 |
+
"text_level": 1,
|
| 483 |
+
"bbox": [
|
| 484 |
+
514,
|
| 485 |
+
580,
|
| 486 |
+
712,
|
| 487 |
+
595
|
| 488 |
+
],
|
| 489 |
+
"page_idx": 2
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "text",
|
| 493 |
+
"text": "This research was financially supported by CSIRO's Collaborative Intelligence Future Science Platform. The diagram has been designed using images from Flaticon.com",
|
| 494 |
+
"bbox": [
|
| 495 |
+
511,
|
| 496 |
+
599,
|
| 497 |
+
913,
|
| 498 |
+
642
|
| 499 |
+
],
|
| 500 |
+
"page_idx": 2
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"type": "text",
|
| 504 |
+
"text": "REFERENCES",
|
| 505 |
+
"text_level": 1,
|
| 506 |
+
"bbox": [
|
| 507 |
+
516,
|
| 508 |
+
655,
|
| 509 |
+
633,
|
| 510 |
+
669
|
| 511 |
+
],
|
| 512 |
+
"page_idx": 2
|
| 513 |
+
},
|
| 514 |
+
{
|
| 515 |
+
"type": "list",
|
| 516 |
+
"sub_type": "ref_text",
|
| 517 |
+
"list_items": [
|
| 518 |
+
"[1] Samantha Cole. 2020. This Open-Source Program Deepfakes You During Zoom Meetings, in Real Time. https://www.vice.com/en/article/g5xagy/this-open-source-program-deepfakes-you-during-zoom-meetings-in-real-time Accessed: 13-March-2023.",
|
| 519 |
+
"[2] Michael del Castillo. 2022. Facebook's Metaverse Could Be Overrun By Deep Fakes And Other Misinformation If These Non-Profits Don't Succeed. https://www.forbes.com/sites/michaeldelcastillo/2022/08/29/facebookmetaverse-could-be-overrun-by-deep-fakes-and-other-misinformation-if-these-non-profits-dont-succeed/?sh=185318742737 Accessed: 13-March-2023.",
|
| 520 |
+
"[3] Hasam Khalid, Minha Kim, Shahroz Tariq, and Simon S Woo. 2021. Evaluation of an Audio-Video Multimodal Deepfake Dataset using Unimodal and Multimodal Detectors. In Proceedings of the 1st Workshop on Synthetic Multimedia-Audiovisual Deepfake Generation and Detection. 7-15.",
|
| 521 |
+
"[4] Hasam Khalid, Shahroz Tariq, Minha Kim, and Simon S Woo. 2021. FakeAVCeleb: A Novel Audio-Video Multimodal Deepfake Dataset. arXiv preprint arXiv:2108.05080 (2021).",
|
| 522 |
+
"[5] Jeongho Kim, Shahroz Tariq, and Simon S Woo. 2022. PTD: Privacy-Preserving Human Face Processing Framework using Tensor Decomposition. In Proceedings of the 37th ACM/SIGAPP Symposium on Applied Computing. 1296-1303. https://doi.org/10.1145/3477314.3507036",
|
| 523 |
+
"[6] Minha Kim, Shahroz Tariq, and Simon S Woo. 2021. Cored: Generalizing fake media detection with continual representation using distillation. In Proceedings"
|
| 524 |
+
],
|
| 525 |
+
"bbox": [
|
| 526 |
+
521,
|
| 527 |
+
672,
|
| 528 |
+
913,
|
| 529 |
+
895
|
| 530 |
+
],
|
| 531 |
+
"page_idx": 2
|
| 532 |
+
},
|
| 533 |
+
{
|
| 534 |
+
"type": "header",
|
| 535 |
+
"text": "Deepfake in the Metaverse: Security Implications for Virtual Gaming, Meetings, and Offices",
|
| 536 |
+
"bbox": [
|
| 537 |
+
84,
|
| 538 |
+
74,
|
| 539 |
+
519,
|
| 540 |
+
85
|
| 541 |
+
],
|
| 542 |
+
"page_idx": 2
|
| 543 |
+
},
|
| 544 |
+
{
|
| 545 |
+
"type": "list",
|
| 546 |
+
"sub_type": "ref_text",
|
| 547 |
+
"list_items": [
|
| 548 |
+
"of the 29th ACM International Conference on Multimedia. 337-346.",
|
| 549 |
+
"[7] Minha Kim, Shahroz Tariq, and Simon S Woo. 2021. FReTAL: Generalizing Deepfake Detection using Knowledge Distillation and Representation Learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 1001-1012.",
|
| 550 |
+
"[8] David Kleeman. 2021. Kids have Kickstarted the Metaverse. https://techonomy.com/kids-have-kickstarted-the-metaverse/ Accessed: 13-March-2023.",
|
| 551 |
+
"[9] Binh Le, Shahroz Tariq, Alsharif Abuadbba, Kristen Moore, and Simon Woo. 2023. Why Do Deepfake Detectors Fail? arXiv preprint arXiv:2302.13156 (2023).",
|
| 552 |
+
"[10] Sangyup Lee, Shahroz Tariq, Junyaup Kim, and Simon S Woo. 2021. TAR: Generalized Forensic Framework to Detect Deepfakes Using Weakly Supervised Learning. In IFIP International Conference on ICT Systems Security and Privacy Protection. Springer, 351-366.",
|
| 553 |
+
"[11] Sangyup Lee, Shahroz Tariq, Youjin Shin, and Simon S Woo. 2021. Detecting handcrafted facial image manipulations and GAN-generated facial images using Shallow-FakeFaceNet. Applied Soft Computing 105 (2021), 107256.",
|
| 554 |
+
"[12] Steven Levy. 2022. What's Deepfake Bruce Willis Doing in My Metaverse? https://www.wired.com/story/plaintext-bruce-willis-deepfake-metaverse Accessed: 13-March-2023.",
|
| 555 |
+
"[13] Reed Smith LLP. 2022. Reed Smith Guide to the Metaverse, 2nd Edition. Reed Smith LLP. https://www.reedsmith.com/en/perspectives/metaverse",
|
| 556 |
+
"[14] Trend Micro. 2023. Darkverse. https://www.trendmicro.com/vinfo/us/security/ definition/darkverse Accessed: 13-March-2023.",
|
| 557 |
+
"[15] Microsoft. 2023. Microsoft Mesh. https://www.microsoft.com/en-us/mesh Accessed: 13-March-2023.",
|
| 558 |
+
"[16] Yisroel Mirsky and Wenke Lee. 2021. The creation and detection of deepfakes: A survey. ACM Computing Surveys (CSUR) 54, 1 (2021), 1-41.",
|
| 559 |
+
"[17] Nvidia. 2023. NVIDIA Omniverse. https://www.nvidia.com/en-gb/omniverse/ Accessed: 13-March-2023.",
|
| 560 |
+
"[18] Meta Platforms. 2023. Facebook Metaverse. https://about.meta.com/metaverse Accessed: 13-March-2023."
|
| 561 |
+
],
|
| 562 |
+
"bbox": [
|
| 563 |
+
86,
|
| 564 |
+
109,
|
| 565 |
+
482,
|
| 566 |
+
421
|
| 567 |
+
],
|
| 568 |
+
"page_idx": 3
|
| 569 |
+
},
|
| 570 |
+
{
|
| 571 |
+
"type": "list",
|
| 572 |
+
"sub_type": "ref_text",
|
| 573 |
+
"list_items": [
|
| 574 |
+
"[19] Meta Platforms. 2023. Meta Horizon Worlds. https://www.meta.com/gb/en/horizon-worlds/ Accessed: 13-March-2023.",
|
| 575 |
+
"[20] Gautam Raturi. 2022. Roblox Metaverse: Everything You Need to Know. https://medium.com/codex/everything-you-need-to-know-about-the-roblox-metaverse-928e9531693 Accessed: 13-March-2023.",
|
| 576 |
+
"[21] Darin Stewart. 2021. Maverick Research: Deepfakes Will Kill the Metaverse; Synthetic Media Could Save It. https://www.gartner.com/en/documents/4008295 Accessed: 13-March-2023.",
|
| 577 |
+
"[22] Shahroz Tariq, Sowon Jeon, and Simon Woo. 2021. Am I a Real or Fake Celebrity? Measuring Commercial Face Recognition Web APIs under Deepfake Impersonation Attack. arXiv preprint arXiv:2103.00847 (2021).",
|
| 578 |
+
"[23] Shahroz Tariq, Sangyup Lee, Hoyoung Kim, Youjin Shin, and Simon S Woo. 2018. Detecting both machine and human created fake face images in the wild. In Proceedings of the 2nd International Workshop on Multimedia Privacy and Security. ACM, 81-87.",
|
| 579 |
+
"[24] Shahroz Tariq, Sangyup Lee, Hoyoung Kim, Youjin Shin, and Simon S Woo. 2019. GAN is a friend or foe?: a framework to detect various fake face images. In Proceedings of the 34th ACM/SIGAPP Symposium on Applied Computing. ACM, 1296-1303.",
|
| 580 |
+
"[25] Shahroz Tariq, Sangyup Lee, and Simon Woo. 2021. One detector to rule them all: Towards a general deepfake attack detection framework. In Proceedings of the web conference 2021. 3625-3637.",
|
| 581 |
+
"[26] Shahroz Tariq, Sangyup Lee, and Simon S Woo. 2020. A Convolutional LSTM based Residual Network for Deepfake Video Detection. arXiv preprint arXiv:2009.07480 (2020).",
|
| 582 |
+
"[27] Yuntao Wang, Zhou Su, Ning Zhang, Rui Xing, Dongxiao Liu, Tom H Luan, and Xuemin Shen. 2022. A survey on metaverse: Fundamentals, security, and privacy. IEEE Communications Surveys & Tutorials (2022).",
|
| 583 |
+
"[28] Emma Woollacott. 2022. Rise of deepfakes: who can you trust in the metaverse? https://cybernews.com/security/rise-of-deepfakes Accessed: 13-March-2023."
|
| 584 |
+
],
|
| 585 |
+
"bbox": [
|
| 586 |
+
516,
|
| 587 |
+
109,
|
| 588 |
+
911,
|
| 589 |
+
412
|
| 590 |
+
],
|
| 591 |
+
"page_idx": 3
|
| 592 |
+
},
|
| 593 |
+
{
|
| 594 |
+
"type": "header",
|
| 595 |
+
"text": "Shahroz Tariq, Alsharif Abuadbba, and Kristen Moore",
|
| 596 |
+
"bbox": [
|
| 597 |
+
658,
|
| 598 |
+
75,
|
| 599 |
+
911,
|
| 600 |
+
85
|
| 601 |
+
],
|
| 602 |
+
"page_idx": 3
|
| 603 |
+
}
|
| 604 |
+
]
|
2303.14xxx/2303.14612/e1a8feb8-e3e0-42e1-846f-d4b91a7fc621_model.json
ADDED
|
@@ -0,0 +1,890 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "title",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.113,
|
| 7 |
+
0.102,
|
| 8 |
+
0.887,
|
| 9 |
+
0.152
|
| 10 |
+
],
|
| 11 |
+
"angle": 0,
|
| 12 |
+
"content": "Deepfake in the Metaverse: Security Implications for Virtual Gaming, Meetings, and Offices"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.129,
|
| 18 |
+
0.163,
|
| 19 |
+
0.334,
|
| 20 |
+
0.21
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "Shahroz Tariq \nCSIRO's Data61, Australia \nshahroz.tariq@data61.csiro.au"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.389,
|
| 29 |
+
0.163,
|
| 30 |
+
0.611,
|
| 31 |
+
0.21
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "Alsharif Abuadbba \nCSIRO's Data61, Australia \nsharif.abuadbba@data61.csiro.au"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.662,
|
| 40 |
+
0.163,
|
| 41 |
+
0.874,
|
| 42 |
+
0.21
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "Kristen Moore \nCSIRO's Data61, Australia \nkristen.moore@data61.csiro.au"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "title",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.084,
|
| 51 |
+
0.219,
|
| 52 |
+
0.185,
|
| 53 |
+
0.233
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "ABSTRACT"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.082,
|
| 62 |
+
0.238,
|
| 63 |
+
0.485,
|
| 64 |
+
0.46
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "The metaverse has gained significant attention from various industries due to its potential to create a fully immersive and interactive virtual world. However, the integration of deepfakes in the metaverse brings serious security implications, particularly with regard to impersonation. This paper examines the security implications of deepfakes in the metaverse, specifically in the context of gaming, online meetings, and virtual offices. The paper discusses how deepfakes can be used to impersonate in gaming scenarios, how online meetings in the metaverse open the door for impersonation, and how virtual offices in the metaverse lack physical authentication, making it easier for attackers to impersonate someone. The implications of these security concerns are discussed in relation to the confidentiality, integrity, and availability (CIA) triad. The paper further explores related issues such as the darkverse, and digital cloning, as well as regulatory and privacy concerns associated with addressing security threats in the virtual world."
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "title",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.084,
|
| 73 |
+
0.471,
|
| 74 |
+
0.221,
|
| 75 |
+
0.484
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "CCS CONCEPTS"
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.082,
|
| 84 |
+
0.489,
|
| 85 |
+
0.483,
|
| 86 |
+
0.533
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "- Human-centered computing \\(\\rightarrow\\) Virtual reality; \\(\\cdot\\) Security and privacy \\(\\rightarrow\\) Social engineering attacks; \\(\\cdot\\) Social and professional topics \\(\\rightarrow\\) Identity theft."
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "title",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.084,
|
| 95 |
+
0.543,
|
| 96 |
+
0.192,
|
| 97 |
+
0.557
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "KEYWORDS"
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.082,
|
| 106 |
+
0.561,
|
| 107 |
+
0.483,
|
| 108 |
+
0.59
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "Metaverse, Deepfake, Security, Impersonation, Gaming, Online meetings, Virtual offices"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "title",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.084,
|
| 117 |
+
0.601,
|
| 118 |
+
0.257,
|
| 119 |
+
0.615
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "1 INTRODUCTION"
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.082,
|
| 128 |
+
0.619,
|
| 129 |
+
0.483,
|
| 130 |
+
0.785
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "The emergence of the metaverse [27] has captured the attention of the technology community, giving rise to widespread anticipation and debate. Prominent companies such as Meta (formerly Facebook), Microsoft, and Nvidia have expressed interest in the concept of a fully immersive virtual world, where individuals can interact with one another and their surroundings. To this end, various companies are releasing their own metaverse experiences, including Meta's Horizon Worlds [18, 19], Roblox's gaming metaverse [20], Microsoft's Mesh [15], and Nvidia's Omniverse [17]. While the potential applications of the metaverse are vast, with possibilities ranging from gaming to virtual meetings and offices, each method has its own benefits and limitations."
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.082,
|
| 139 |
+
0.786,
|
| 140 |
+
0.483,
|
| 141 |
+
0.897
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "However, the applicability of deepfakes [9, 16] in the metaverse presents significant security implications, particularly with regard to impersonation. Deepfakes are computer-generated images or videos that can be manipulated to look like real people or events. The ability to generate such content has significantly increased with advances in machine learning and artificial intelligence. Recently, deepfakes in the metaverse have become a topic of discussion on different forums and media articles [2, 12, 21, 28]."
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "image",
|
| 148 |
+
"bbox": [
|
| 149 |
+
0.517,
|
| 150 |
+
0.217,
|
| 151 |
+
0.916,
|
| 152 |
+
0.383
|
| 153 |
+
],
|
| 154 |
+
"angle": 0,
|
| 155 |
+
"content": null
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "image_caption",
|
| 159 |
+
"bbox": [
|
| 160 |
+
0.514,
|
| 161 |
+
0.393,
|
| 162 |
+
0.916,
|
| 163 |
+
0.464
|
| 164 |
+
],
|
| 165 |
+
"angle": 0,
|
| 166 |
+
"content": "Figure 1: The three most commonly publicized scenarios in the metaverse: virtual gaming, virtual meetings, and virtual offices. These applications highlight the potential for immersive virtual experiences, but also raise concerns about security and privacy in this emerging technology."
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"type": "text",
|
| 170 |
+
"bbox": [
|
| 171 |
+
0.513,
|
| 172 |
+
0.494,
|
| 173 |
+
0.916,
|
| 174 |
+
0.689
|
| 175 |
+
],
|
| 176 |
+
"angle": 0,
|
| 177 |
+
"content": "The utilisation of metaverse technology has been associated with numerous benefits, including the provision of a fully immersive virtual environment that facilitates interaction with other users and virtual objects. Nevertheless, the potential deployment of deepfake technology to perpetrate malicious activities, such as impersonation, has shed light on the limitations of the technology and the need for effective security measures to be put in place. It is noteworthy that existing state-of-the-art deepfake detection methods [3-7, 10, 11, 22-26] primarily focus on detecting deepfakes in the physical world and do not take into account the possibility of deepfakes in the metaverse. Thus, the current research is challenged with an evident gap in the identification and prevention of deepfakes in the metaverse, making it imperative to shed light on this research gap."
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"type": "text",
|
| 181 |
+
"bbox": [
|
| 182 |
+
0.513,
|
| 183 |
+
0.689,
|
| 184 |
+
0.916,
|
| 185 |
+
0.897
|
| 186 |
+
],
|
| 187 |
+
"angle": 0,
|
| 188 |
+
"content": "In this paper, we explore the security implications of deepfakes in the metaverse. We will start by defining the concept of the metaverse and how it is expected to be used in three scenarios: (i) gaming, (ii) online meetings, and (iii) virtual offices (see Fig. 1). We will also discuss the potential dangers of deepfake in each of the three scenarios by exploring the potential consequences of deepfake misuse in the metaverse, such as the ability to impersonate others, manipulate meetings, and disrupt virtual work environments. We will also discuss potential solutions to mitigate these risks and ensure the safety and security of metaverse users. We also explore the security implications of deepfakes in the metaverse to fake digital identity, the CIA triad, legal and regulatory challenges, privacy issues, and darkverse. Through this work, we aim to explore the potential security implications of deepfakes in the metaverse and to raise awareness of the risks and challenges posed by this technology."
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"type": "aside_text",
|
| 192 |
+
"bbox": [
|
| 193 |
+
0.023,
|
| 194 |
+
0.266,
|
| 195 |
+
0.059,
|
| 196 |
+
0.708
|
| 197 |
+
],
|
| 198 |
+
"angle": 270,
|
| 199 |
+
"content": "arXiv:2303.14612v2 [cs.CR] 10 Sep 2023"
|
| 200 |
+
}
|
| 201 |
+
],
|
| 202 |
+
[
|
| 203 |
+
{
|
| 204 |
+
"type": "header",
|
| 205 |
+
"bbox": [
|
| 206 |
+
0.659,
|
| 207 |
+
0.076,
|
| 208 |
+
0.912,
|
| 209 |
+
0.086
|
| 210 |
+
],
|
| 211 |
+
"angle": 0,
|
| 212 |
+
"content": "Shahroz Tariq, Alsharif Abuadbba, and Kristen Moore"
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"type": "title",
|
| 216 |
+
"bbox": [
|
| 217 |
+
0.086,
|
| 218 |
+
0.106,
|
| 219 |
+
0.35,
|
| 220 |
+
0.12
|
| 221 |
+
],
|
| 222 |
+
"angle": 0,
|
| 223 |
+
"content": "2 WHAT IS THE METAVERSE?"
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "text",
|
| 227 |
+
"bbox": [
|
| 228 |
+
0.087,
|
| 229 |
+
0.125,
|
| 230 |
+
0.481,
|
| 231 |
+
0.331
|
| 232 |
+
],
|
| 233 |
+
"angle": 0,
|
| 234 |
+
"content": "The metaverse is a collective virtual shared space that offers an immersive and interactive 3D environment, facilitating real-time engagement with digital content and other individuals through advanced technologies such as virtual reality (VR) and augmented reality (AR). While originally a concept in science fiction, such as in Neal Stephenson's novel \"Snow Crash\" and the movie \"The Matrix,\" recent technological advances have made the metaverse increasingly feasible. As a new form of social and economic infrastructure, the metaverse provides opportunities for people to work, play, socialize, learn, and consume content within a shared virtual space. While different visions of the metaverse exist among companies, organizations, and individuals, ranging from a fully autonomous world to a combination of different virtual platforms and experiences, the metaverse is considered a transformative technology that has the potential to impact various aspects of our lives."
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "title",
|
| 238 |
+
"bbox": [
|
| 239 |
+
0.086,
|
| 240 |
+
0.346,
|
| 241 |
+
0.364,
|
| 242 |
+
0.361
|
| 243 |
+
],
|
| 244 |
+
"angle": 0,
|
| 245 |
+
"content": "3 GAMING IN THE METVERSE"
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "text",
|
| 249 |
+
"bbox": [
|
| 250 |
+
0.086,
|
| 251 |
+
0.365,
|
| 252 |
+
0.481,
|
| 253 |
+
0.448
|
| 254 |
+
],
|
| 255 |
+
"angle": 0,
|
| 256 |
+
"content": "Gaming in the metaverse encompasses playing video games in a virtual world shared by millions globally, including massively multiplayer online games (MMOs), social games, and casual games such as puzzle and card games. These games offer interactive and detailed virtual worlds, providing players with opportunities to explore and interact with their surroundings."
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "text",
|
| 260 |
+
"bbox": [
|
| 261 |
+
0.087,
|
| 262 |
+
0.449,
|
| 263 |
+
0.481,
|
| 264 |
+
0.571
|
| 265 |
+
],
|
| 266 |
+
"angle": 0,
|
| 267 |
+
"content": "The use of deepfakes in the context of gaming in the metaverse raises significant security concerns. Potential issues include identity theft, cyberbullying, distribution of malware, non-fungible token (NFT) scams, and intellectual property theft. As a significant demographic within metaverse gaming [8], minors are particularly vulnerable to these threats due to their limited experience and knowledge of online safety, which can result in sexual exploitation, social engineering, online grooming, and exposure to misinformation."
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"bbox": [
|
| 272 |
+
0.087,
|
| 273 |
+
0.573,
|
| 274 |
+
0.481,
|
| 275 |
+
0.724
|
| 276 |
+
],
|
| 277 |
+
"angle": 0,
|
| 278 |
+
"content": "To address these risks, gaming companies must invest in advanced security measures, including identity verification systems, content monitoring and moderation tools, and anti-malware software. Additionally, players should be educated about the dangers of deepfakes and encouraged to report any suspicious activity encountered in the metaverse. Parents and guardians should take an active role in educating minors on the risks of deepfakes, monitoring their online activities, and encouraging them to report any questionable behavior. Online safety education, privacy settings, and parental controls can also help safeguard minors from the potential harms of deepfakes."
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "title",
|
| 282 |
+
"bbox": [
|
| 283 |
+
0.086,
|
| 284 |
+
0.739,
|
| 285 |
+
0.452,
|
| 286 |
+
0.754
|
| 287 |
+
],
|
| 288 |
+
"angle": 0,
|
| 289 |
+
"content": "4 ONLINE MEETINGS IN THE METVERSE"
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "text",
|
| 293 |
+
"bbox": [
|
| 294 |
+
0.087,
|
| 295 |
+
0.758,
|
| 296 |
+
0.481,
|
| 297 |
+
0.867
|
| 298 |
+
],
|
| 299 |
+
"angle": 0,
|
| 300 |
+
"content": "Online meetings in the metaverse offer a virtual space for individuals to communicate and collaborate within a shared immersive digital environment, spanning from basic text-based chat rooms to fully immersive 3D environments, and utilizing voice chat, instant messaging, or other means of communication. These virtual meetings are versatile and can serve various purposes, such as team collaboration, networking, socializing, or attending virtual events, such as conferences."
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "text",
|
| 304 |
+
"bbox": [
|
| 305 |
+
0.086,
|
| 306 |
+
0.868,
|
| 307 |
+
0.481,
|
| 308 |
+
0.896
|
| 309 |
+
],
|
| 310 |
+
"angle": 0,
|
| 311 |
+
"content": "However, the potential risks to privacy and reputation are significant due to the metaverse's ability to create an opportunity for"
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"bbox": [
|
| 316 |
+
0.519,
|
| 317 |
+
0.107,
|
| 318 |
+
0.913,
|
| 319 |
+
0.203
|
| 320 |
+
],
|
| 321 |
+
"angle": 0,
|
| 322 |
+
"content": "attackers to impersonate others. Deepfake technology can be utilized to deceive others through impersonation, leading to potential fraud or espionage, as demonstrated in a recent example of Elon Musk's deepfake zoom-bombing online meetings [1]. In addition, the authenticity and trust of participants may be compromised by the creation of convincing deepfakes, which could undermine trust and collaboration."
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "text",
|
| 326 |
+
"bbox": [
|
| 327 |
+
0.519,
|
| 328 |
+
0.204,
|
| 329 |
+
0.913,
|
| 330 |
+
0.329
|
| 331 |
+
],
|
| 332 |
+
"angle": 0,
|
| 333 |
+
"content": "To address these issues, it is essential to implement measures such as identity verification, digital signatures, or other security measures to ensure the authenticity of participants. Moreover, the development of tools and technologies that can detect various forms of deepfakes in the metaverse and prevent their use during online meetings may be necessary. Overall, while deepfake technology poses a disruptive risk to online meetings in the metaverse, it is also possible to mitigate these risks effectively by utilizing appropriate security measures and technology."
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "title",
|
| 337 |
+
"bbox": [
|
| 338 |
+
0.519,
|
| 339 |
+
0.345,
|
| 340 |
+
0.877,
|
| 341 |
+
0.359
|
| 342 |
+
],
|
| 343 |
+
"angle": 0,
|
| 344 |
+
"content": "5 VIRTUAL OFFICES IN THE METVERSE"
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"type": "text",
|
| 348 |
+
"bbox": [
|
| 349 |
+
0.519,
|
| 350 |
+
0.363,
|
| 351 |
+
0.913,
|
| 352 |
+
0.487
|
| 353 |
+
],
|
| 354 |
+
"angle": 0,
|
| 355 |
+
"content": "The emergence of virtual offices or workplaces in the metaverse is a recent but promising development that has the potential to revolutionize collaboration and work practices. By leveraging the metaverse's virtual environment, colleagues can work together in a customizable, shared digital workspace that offers numerous benefits, such as reduced overhead cost, increased flexibility, and access to a global talent pool. In addition, virtual offices in the metaverse can enable more dynamic and immersive meetings, greater collaboration, and increased creativity."
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"type": "text",
|
| 359 |
+
"bbox": [
|
| 360 |
+
0.519,
|
| 361 |
+
0.488,
|
| 362 |
+
0.913,
|
| 363 |
+
0.626
|
| 364 |
+
],
|
| 365 |
+
"angle": 0,
|
| 366 |
+
"content": "However, the virtual nature of the metaverse presents a security challenge, as attackers can impersonate team members through the use of deepfakes, leading to data breaches and financial loss. For instance, deepfakes can be employed to create fake identities or to impersonate colleagues, which could lead to trust issues and confusion within the team. For example, an employee could use a deepfake to create a fake version of their boss or co-worker to make it appear as if they are giving instructions. Furthermore, deepfakes can be used to spread false information or propaganda, potentially impacting important decisions."
|
| 367 |
+
},
|
| 368 |
+
{
|
| 369 |
+
"type": "text",
|
| 370 |
+
"bbox": [
|
| 371 |
+
0.519,
|
| 372 |
+
0.627,
|
| 373 |
+
0.913,
|
| 374 |
+
0.737
|
| 375 |
+
],
|
| 376 |
+
"angle": 0,
|
| 377 |
+
"content": "To address the potential threats posed by deepfakes in virtual offices or workplaces in the metaverse, clear guidelines and protocols are required to verify team members' identities and the authenticity of content shared in the virtual environment. This can be critical in establishing one's innocence in the case of a crime. It is also important to remain abreast of the latest deepfake technology developments and to leverage tools and software that can aid in identifying and detecting deepfakes."
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"type": "title",
|
| 381 |
+
"bbox": [
|
| 382 |
+
0.519,
|
| 383 |
+
0.753,
|
| 384 |
+
0.651,
|
| 385 |
+
0.767
|
| 386 |
+
],
|
| 387 |
+
"angle": 0,
|
| 388 |
+
"content": "6 DISCUSSION"
|
| 389 |
+
},
|
| 390 |
+
{
|
| 391 |
+
"type": "text",
|
| 392 |
+
"bbox": [
|
| 393 |
+
0.519,
|
| 394 |
+
0.771,
|
| 395 |
+
0.913,
|
| 396 |
+
0.896
|
| 397 |
+
],
|
| 398 |
+
"angle": 0,
|
| 399 |
+
"content": "Fake Digital Identity and Cloning in the digital world. One of the central ideas underlying the concept of the metaverse is the ability for individuals to create digital replicas of themselves, known as avatars, in the virtual world. These avatars are designed to mimic the physical appearance and behavior of their real-life counterparts, allowing individuals to interact with one another in the digital realm. However, the ability to clone oneself in the metaverse also raises concerns about the potential for impersonation. Unlike the physical world, where impersonating someone"
|
| 400 |
+
}
|
| 401 |
+
],
|
| 402 |
+
[
|
| 403 |
+
{
|
| 404 |
+
"type": "header",
|
| 405 |
+
"bbox": [
|
| 406 |
+
0.085,
|
| 407 |
+
0.075,
|
| 408 |
+
0.521,
|
| 409 |
+
0.087
|
| 410 |
+
],
|
| 411 |
+
"angle": 0,
|
| 412 |
+
"content": "Deepfake in the Metaverse: Security Implications for Virtual Gaming, Meetings, and Offices"
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"type": "text",
|
| 416 |
+
"bbox": [
|
| 417 |
+
0.082,
|
| 418 |
+
0.108,
|
| 419 |
+
0.482,
|
| 420 |
+
0.287
|
| 421 |
+
],
|
| 422 |
+
"angle": 0,
|
| 423 |
+
"content": "convincingly is challenging, it is much easier to create a convincing digital clone of a person in the metaverse due to the abundance of personal information available on the internet that can be used to create deepfakes. The possibility of an attacker using deepfakes to impersonate someone in the metaverse is a significant concern, as it could be used to commit various illicit activities. One potential solution to this problem is the implementation of digital identity verification systems. Such systems could use biometric data, such as facial recognition, to verify an individual's identity before allowing them to create a digital avatar. By doing so, attackers would be prevented from creating digital clones of other people without their consent, thereby ensuring a higher level of security in the metaverse."
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"type": "text",
|
| 427 |
+
"bbox": [
|
| 428 |
+
0.082,
|
| 429 |
+
0.288,
|
| 430 |
+
0.483,
|
| 431 |
+
0.536
|
| 432 |
+
],
|
| 433 |
+
"angle": 0,
|
| 434 |
+
"content": "Deepfakes impact on CIA Triad in the Metaverse. The CIA Triad, established by the National Institute of Standards and Technology (NIST), comprises confidentiality, integrity, and availability, which are the three primary objectives of information security. Confidentiality safeguards sensitive information by ensuring that only authorized parties can access it. Integrity ensures that information remains accurate and unaltered, while availability guarantees that authorized parties can access information when necessary. In the context of the metaverse, deepfakes pose a potential threat to the CIA Triad's objectives of confidentiality, integrity, and availability. Specifically, deepfakes have the potential to compromise confidentiality by enabling the impersonation of authorized individuals, thereby permitting unauthorized access to sensitive areas. Furthermore, deepfakes can undermine the integrity of information, images, or videos, by spreading false or misleading information about individuals or organizations, causing reputational harm. Finally, deepfakes can also disrupt availability by disseminating propaganda or fake news, leading to confusion and chaos."
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"type": "text",
|
| 438 |
+
"bbox": [
|
| 439 |
+
0.082,
|
| 440 |
+
0.537,
|
| 441 |
+
0.483,
|
| 442 |
+
0.897
|
| 443 |
+
],
|
| 444 |
+
"angle": 0,
|
| 445 |
+
"content": "Legal and Regulatory Challenges. The lack of regulations regarding the application of laws from the physical world in the metaverse presents a significant challenge. For instance, identifying and prosecuting an offender who has committed a crime in the virtual world using deepfake impersonation can be difficult. Additionally, jurisdictional issues arise due to the existence of varying laws in different countries. In the event that an attacker is located in a country where there are no legal consequences for their actions in the metaverse, holding them accountable can become problematic. Consequently, a universal set of rules and regulations for the metaverse becomes difficult to establish, given that different countries may have different interpretations of what constitutes criminal behavior. To address security concerns in the metaverse, a coordinated effort between governments, regulatory bodies, and technology companies is necessary [13]. This entails the development of universally applicable standards and regulations that can transcend geographical and jurisdictional barriers. Also, continuous efforts toward the development of new technologies capable of preventing and detecting criminal activities in the metaverse are also required. Privacy issues. Although digital identity verification systems can serve as a potential measure for mitigating deepfake-based impersonation in the metaverse, the utilization of these systems raises concerns. The apprehension stems from the possibility of digital identity verification systems being utilized to track and monitor individuals' virtual activities, consequently, potentially infringing on their privacy and freedom. It is argued that any digital identity"
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"type": "text",
|
| 449 |
+
"bbox": [
|
| 450 |
+
0.513,
|
| 451 |
+
0.108,
|
| 452 |
+
0.912,
|
| 453 |
+
0.135
|
| 454 |
+
],
|
| 455 |
+
"angle": 0,
|
| 456 |
+
"content": "verification system deployed in the metaverse must maintain a balance between security and the need for privacy and freedom."
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "text",
|
| 460 |
+
"bbox": [
|
| 461 |
+
0.513,
|
| 462 |
+
0.136,
|
| 463 |
+
0.915,
|
| 464 |
+
0.37
|
| 465 |
+
],
|
| 466 |
+
"angle": 0,
|
| 467 |
+
"content": "Darkverse. The rise of metaverse technology has created new opportunities for both legitimate users and malicious actors. One of the primary concerns is the creation of private spaces that enable illegal activities and communication among criminals, which Trend Micro refers to as the darkverse [14]. This space operates similarly to the dark web, but it exists within the metaverse and is unindexed, making it challenging to locate via standard search engines. The darkverse's pseudo-physical user presence makes it more dangerous than the dark web, as criminals can use proximity-based messaging or other methods to conceal their communications, rendering them difficult for law enforcement agencies to intercept. Darkverse could be used to facilitate illegal activities such as deepfake-based revenge pornography and misinformation campaigns. Despite the possibility of the darkverse being a space for free speech, the primary objective of these spaces is to facilitate illegal activities, and it may become a safe haven for criminals seeking to engage in such activities with minimal risk of detection."
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "title",
|
| 471 |
+
"bbox": [
|
| 472 |
+
0.514,
|
| 473 |
+
0.384,
|
| 474 |
+
0.665,
|
| 475 |
+
0.398
|
| 476 |
+
],
|
| 477 |
+
"angle": 0,
|
| 478 |
+
"content": "7 CONCLUSION"
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"type": "text",
|
| 482 |
+
"bbox": [
|
| 483 |
+
0.513,
|
| 484 |
+
0.403,
|
| 485 |
+
0.915,
|
| 486 |
+
0.568
|
| 487 |
+
],
|
| 488 |
+
"angle": 0,
|
| 489 |
+
"content": "In conclusion, deepfakes in the metaverse present significant security implications, particularly around impersonation. The three scenarios of gaming, online meetings, and virtual offices serve as examples of how these security implications can play out in practice. The lack of physical authentication in the metaverse makes it easier for attackers to impersonate others and commit crimes without being held accountable. Mitigating these security implications will require a combination of technological solutions and legal frameworks that balance security and privacy concerns. As the metaverse continues to evolve, it is important to address these issues proactively to ensure a safe and secure virtual environment for all users."
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "title",
|
| 493 |
+
"bbox": [
|
| 494 |
+
0.515,
|
| 495 |
+
0.582,
|
| 496 |
+
0.714,
|
| 497 |
+
0.597
|
| 498 |
+
],
|
| 499 |
+
"angle": 0,
|
| 500 |
+
"content": "ACKNOWLEDGMENTS"
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"type": "text",
|
| 504 |
+
"bbox": [
|
| 505 |
+
0.513,
|
| 506 |
+
0.601,
|
| 507 |
+
0.915,
|
| 508 |
+
0.643
|
| 509 |
+
],
|
| 510 |
+
"angle": 0,
|
| 511 |
+
"content": "This research was financially supported by CSIRO's Collaborative Intelligence Future Science Platform. The diagram has been designed using images from Flaticon.com"
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"type": "title",
|
| 515 |
+
"bbox": [
|
| 516 |
+
0.517,
|
| 517 |
+
0.656,
|
| 518 |
+
0.634,
|
| 519 |
+
0.67
|
| 520 |
+
],
|
| 521 |
+
"angle": 0,
|
| 522 |
+
"content": "REFERENCES"
|
| 523 |
+
},
|
| 524 |
+
{
|
| 525 |
+
"type": "ref_text",
|
| 526 |
+
"bbox": [
|
| 527 |
+
0.522,
|
| 528 |
+
0.673,
|
| 529 |
+
0.914,
|
| 530 |
+
0.713
|
| 531 |
+
],
|
| 532 |
+
"angle": 0,
|
| 533 |
+
"content": "[1] Samantha Cole. 2020. This Open-Source Program Deepfakes You During Zoom Meetings, in Real Time. https://www.vice.com/en/article/g5xagy/this-open-source-program-deepfakes-you-during-zoom-meetings-in-real-time Accessed: 13-March-2023."
|
| 534 |
+
},
|
| 535 |
+
{
|
| 536 |
+
"type": "ref_text",
|
| 537 |
+
"bbox": [
|
| 538 |
+
0.522,
|
| 539 |
+
0.714,
|
| 540 |
+
0.915,
|
| 541 |
+
0.763
|
| 542 |
+
],
|
| 543 |
+
"angle": 0,
|
| 544 |
+
"content": "[2] Michael del Castillo. 2022. Facebook's Metaverse Could Be Overrun By Deep Fakes And Other Misinformation If These Non-Profits Don't Succeed. https://www.forbes.com/sites/michaeldelcastillo/2022/08/29/facebookmetaverse-could-be-overrun-by-deep-fakes-and-other-misinformation-if-these-non-profits-dont-succeed/?sh=185318742737 Accessed: 13-March-2023."
|
| 545 |
+
},
|
| 546 |
+
{
|
| 547 |
+
"type": "ref_text",
|
| 548 |
+
"bbox": [
|
| 549 |
+
0.522,
|
| 550 |
+
0.764,
|
| 551 |
+
0.915,
|
| 552 |
+
0.804
|
| 553 |
+
],
|
| 554 |
+
"angle": 0,
|
| 555 |
+
"content": "[3] Hasam Khalid, Minha Kim, Shahroz Tariq, and Simon S Woo. 2021. Evaluation of an Audio-Video Multimodal Deepfake Dataset using Unimodal and Multimodal Detectors. In Proceedings of the 1st Workshop on Synthetic Multimedia-Audiovisual Deepfake Generation and Detection. 7-15."
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"type": "ref_text",
|
| 559 |
+
"bbox": [
|
| 560 |
+
0.522,
|
| 561 |
+
0.805,
|
| 562 |
+
0.915,
|
| 563 |
+
0.834
|
| 564 |
+
],
|
| 565 |
+
"angle": 0,
|
| 566 |
+
"content": "[4] Hasam Khalid, Shahroz Tariq, Minha Kim, and Simon S Woo. 2021. FakeAVCeleb: A Novel Audio-Video Multimodal Deepfake Dataset. arXiv preprint arXiv:2108.05080 (2021)."
|
| 567 |
+
},
|
| 568 |
+
{
|
| 569 |
+
"type": "ref_text",
|
| 570 |
+
"bbox": [
|
| 571 |
+
0.522,
|
| 572 |
+
0.835,
|
| 573 |
+
0.915,
|
| 574 |
+
0.875
|
| 575 |
+
],
|
| 576 |
+
"angle": 0,
|
| 577 |
+
"content": "[5] Jeongho Kim, Shahroz Tariq, and Simon S Woo. 2022. PTD: Privacy-Preserving Human Face Processing Framework using Tensor Decomposition. In Proceedings of the 37th ACM/SIGAPP Symposium on Applied Computing. 1296-1303. https://doi.org/10.1145/3477314.3507036"
|
| 578 |
+
},
|
| 579 |
+
{
|
| 580 |
+
"type": "ref_text",
|
| 581 |
+
"bbox": [
|
| 582 |
+
0.522,
|
| 583 |
+
0.875,
|
| 584 |
+
0.915,
|
| 585 |
+
0.896
|
| 586 |
+
],
|
| 587 |
+
"angle": 0,
|
| 588 |
+
"content": "[6] Minha Kim, Shahroz Tariq, and Simon S Woo. 2021. Cored: Generalizing fake media detection with continual representation using distillation. In Proceedings"
|
| 589 |
+
},
|
| 590 |
+
{
|
| 591 |
+
"type": "list",
|
| 592 |
+
"bbox": [
|
| 593 |
+
0.522,
|
| 594 |
+
0.673,
|
| 595 |
+
0.915,
|
| 596 |
+
0.896
|
| 597 |
+
],
|
| 598 |
+
"angle": 0,
|
| 599 |
+
"content": null
|
| 600 |
+
}
|
| 601 |
+
],
|
| 602 |
+
[
|
| 603 |
+
{
|
| 604 |
+
"type": "header",
|
| 605 |
+
"bbox": [
|
| 606 |
+
0.659,
|
| 607 |
+
0.076,
|
| 608 |
+
0.913,
|
| 609 |
+
0.086
|
| 610 |
+
],
|
| 611 |
+
"angle": 0,
|
| 612 |
+
"content": "Shahroz Tariq, Alsharif Abuadbba, and Kristen Moore"
|
| 613 |
+
},
|
| 614 |
+
{
|
| 615 |
+
"type": "ref_text",
|
| 616 |
+
"bbox": [
|
| 617 |
+
0.114,
|
| 618 |
+
0.11,
|
| 619 |
+
0.419,
|
| 620 |
+
0.12
|
| 621 |
+
],
|
| 622 |
+
"angle": 0,
|
| 623 |
+
"content": "of the 29th ACM International Conference on Multimedia. 337-346."
|
| 624 |
+
},
|
| 625 |
+
{
|
| 626 |
+
"type": "ref_text",
|
| 627 |
+
"bbox": [
|
| 628 |
+
0.092,
|
| 629 |
+
0.12,
|
| 630 |
+
0.482,
|
| 631 |
+
0.161
|
| 632 |
+
],
|
| 633 |
+
"angle": 0,
|
| 634 |
+
"content": "[7] Minha Kim, Shahroz Tariq, and Simon S Woo. 2021. FReTAL: Generalizing Deepfake Detection using Knowledge Distillation and Representation Learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 1001-1012."
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"type": "ref_text",
|
| 638 |
+
"bbox": [
|
| 639 |
+
0.091,
|
| 640 |
+
0.161,
|
| 641 |
+
0.483,
|
| 642 |
+
0.181
|
| 643 |
+
],
|
| 644 |
+
"angle": 0,
|
| 645 |
+
"content": "[8] David Kleeman. 2021. Kids have Kickstarted the Metaverse. https://techonomy.com/kids-have-kickstarted-the-metaverse/ Accessed: 13-March-2023."
|
| 646 |
+
},
|
| 647 |
+
{
|
| 648 |
+
"type": "ref_text",
|
| 649 |
+
"bbox": [
|
| 650 |
+
0.092,
|
| 651 |
+
0.181,
|
| 652 |
+
0.482,
|
| 653 |
+
0.201
|
| 654 |
+
],
|
| 655 |
+
"angle": 0,
|
| 656 |
+
"content": "[9] Binh Le, Shahroz Tariq, Alsharif Abuadbba, Kristen Moore, and Simon Woo. 2023. Why Do Deepfake Detectors Fail? arXiv preprint arXiv:2302.13156 (2023)."
|
| 657 |
+
},
|
| 658 |
+
{
|
| 659 |
+
"type": "ref_text",
|
| 660 |
+
"bbox": [
|
| 661 |
+
0.088,
|
| 662 |
+
0.201,
|
| 663 |
+
0.482,
|
| 664 |
+
0.242
|
| 665 |
+
],
|
| 666 |
+
"angle": 0,
|
| 667 |
+
"content": "[10] Sangyup Lee, Shahroz Tariq, Junyaup Kim, and Simon S Woo. 2021. TAR: Generalized Forensic Framework to Detect Deepfakes Using Weakly Supervised Learning. In IFIP International Conference on ICT Systems Security and Privacy Protection. Springer, 351-366."
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"type": "ref_text",
|
| 671 |
+
"bbox": [
|
| 672 |
+
0.088,
|
| 673 |
+
0.242,
|
| 674 |
+
0.482,
|
| 675 |
+
0.272
|
| 676 |
+
],
|
| 677 |
+
"angle": 0,
|
| 678 |
+
"content": "[11] Sangyup Lee, Shahroz Tariq, Youjin Shin, and Simon S Woo. 2021. Detecting handcrafted facial image manipulations and GAN-generated facial images using Shallow-FakeFaceNet. Applied Soft Computing 105 (2021), 107256."
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
"type": "ref_text",
|
| 682 |
+
"bbox": [
|
| 683 |
+
0.088,
|
| 684 |
+
0.271,
|
| 685 |
+
0.482,
|
| 686 |
+
0.3
|
| 687 |
+
],
|
| 688 |
+
"angle": 0,
|
| 689 |
+
"content": "[12] Steven Levy. 2022. What's Deepfake Bruce Willis Doing in My Metaverse? https://www.wired.com/story/plaintext-bruce-willis-deepfake-metaverse Accessed: 13-March-2023."
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"type": "ref_text",
|
| 693 |
+
"bbox": [
|
| 694 |
+
0.087,
|
| 695 |
+
0.301,
|
| 696 |
+
0.481,
|
| 697 |
+
0.322
|
| 698 |
+
],
|
| 699 |
+
"angle": 0,
|
| 700 |
+
"content": "[13] Reed Smith LLP. 2022. Reed Smith Guide to the Metaverse, 2nd Edition. Reed Smith LLP. https://www.reedsmith.com/en/perspectives/metaverse"
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "ref_text",
|
| 704 |
+
"bbox": [
|
| 705 |
+
0.087,
|
| 706 |
+
0.322,
|
| 707 |
+
0.481,
|
| 708 |
+
0.342
|
| 709 |
+
],
|
| 710 |
+
"angle": 0,
|
| 711 |
+
"content": "[14] Trend Micro. 2023. Darkverse. https://www.trendmicro.com/vinfo/us/security/ definition/darkverse Accessed: 13-March-2023."
|
| 712 |
+
},
|
| 713 |
+
{
|
| 714 |
+
"type": "ref_text",
|
| 715 |
+
"bbox": [
|
| 716 |
+
0.087,
|
| 717 |
+
0.342,
|
| 718 |
+
0.481,
|
| 719 |
+
0.362
|
| 720 |
+
],
|
| 721 |
+
"angle": 0,
|
| 722 |
+
"content": "[15] Microsoft. 2023. Microsoft Mesh. https://www.microsoft.com/en-us/mesh Accessed: 13-March-2023."
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"type": "ref_text",
|
| 726 |
+
"bbox": [
|
| 727 |
+
0.087,
|
| 728 |
+
0.362,
|
| 729 |
+
0.481,
|
| 730 |
+
0.383
|
| 731 |
+
],
|
| 732 |
+
"angle": 0,
|
| 733 |
+
"content": "[16] Yisroel Mirsky and Wenke Lee. 2021. The creation and detection of deepfakes: A survey. ACM Computing Surveys (CSUR) 54, 1 (2021), 1-41."
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"type": "ref_text",
|
| 737 |
+
"bbox": [
|
| 738 |
+
0.087,
|
| 739 |
+
0.383,
|
| 740 |
+
0.481,
|
| 741 |
+
0.402
|
| 742 |
+
],
|
| 743 |
+
"angle": 0,
|
| 744 |
+
"content": "[17] Nvidia. 2023. NVIDIA Omniverse. https://www.nvidia.com/en-gb/omniverse/ Accessed: 13-March-2023."
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "ref_text",
|
| 748 |
+
"bbox": [
|
| 749 |
+
0.087,
|
| 750 |
+
0.402,
|
| 751 |
+
0.481,
|
| 752 |
+
0.422
|
| 753 |
+
],
|
| 754 |
+
"angle": 0,
|
| 755 |
+
"content": "[18] Meta Platforms. 2023. Facebook Metaverse. https://about.meta.com/metaverse Accessed: 13-March-2023."
|
| 756 |
+
},
|
| 757 |
+
{
|
| 758 |
+
"type": "list",
|
| 759 |
+
"bbox": [
|
| 760 |
+
0.087,
|
| 761 |
+
0.11,
|
| 762 |
+
0.483,
|
| 763 |
+
0.422
|
| 764 |
+
],
|
| 765 |
+
"angle": 0,
|
| 766 |
+
"content": null
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"type": "ref_text",
|
| 770 |
+
"bbox": [
|
| 771 |
+
0.518,
|
| 772 |
+
0.11,
|
| 773 |
+
0.913,
|
| 774 |
+
0.129
|
| 775 |
+
],
|
| 776 |
+
"angle": 0,
|
| 777 |
+
"content": "[19] Meta Platforms. 2023. Meta Horizon Worlds. https://www.meta.com/gb/en/horizon-worlds/ Accessed: 13-March-2023."
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"type": "ref_text",
|
| 781 |
+
"bbox": [
|
| 782 |
+
0.518,
|
| 783 |
+
0.13,
|
| 784 |
+
0.913,
|
| 785 |
+
0.16
|
| 786 |
+
],
|
| 787 |
+
"angle": 0,
|
| 788 |
+
"content": "[20] Gautam Raturi. 2022. Roblox Metaverse: Everything You Need to Know. https://medium.com/codex/everything-you-need-to-know-about-the-roblox-metaverse-928e9531693 Accessed: 13-March-2023."
|
| 789 |
+
},
|
| 790 |
+
{
|
| 791 |
+
"type": "ref_text",
|
| 792 |
+
"bbox": [
|
| 793 |
+
0.518,
|
| 794 |
+
0.161,
|
| 795 |
+
0.913,
|
| 796 |
+
0.19
|
| 797 |
+
],
|
| 798 |
+
"angle": 0,
|
| 799 |
+
"content": "[21] Darin Stewart. 2021. Maverick Research: Deepfakes Will Kill the Metaverse; Synthetic Media Could Save It. https://www.gartner.com/en/documents/4008295 Accessed: 13-March-2023."
|
| 800 |
+
},
|
| 801 |
+
{
|
| 802 |
+
"type": "ref_text",
|
| 803 |
+
"bbox": [
|
| 804 |
+
0.518,
|
| 805 |
+
0.191,
|
| 806 |
+
0.913,
|
| 807 |
+
0.222
|
| 808 |
+
],
|
| 809 |
+
"angle": 0,
|
| 810 |
+
"content": "[22] Shahroz Tariq, Sowon Jeon, and Simon Woo. 2021. Am I a Real or Fake Celebrity? Measuring Commercial Face Recognition Web APIs under Deepfake Impersonation Attack. arXiv preprint arXiv:2103.00847 (2021)."
|
| 811 |
+
},
|
| 812 |
+
{
|
| 813 |
+
"type": "ref_text",
|
| 814 |
+
"bbox": [
|
| 815 |
+
0.518,
|
| 816 |
+
0.222,
|
| 817 |
+
0.913,
|
| 818 |
+
0.261
|
| 819 |
+
],
|
| 820 |
+
"angle": 0,
|
| 821 |
+
"content": "[23] Shahroz Tariq, Sangyup Lee, Hoyoung Kim, Youjin Shin, and Simon S Woo. 2018. Detecting both machine and human created fake face images in the wild. In Proceedings of the 2nd International Workshop on Multimedia Privacy and Security. ACM, 81-87."
|
| 822 |
+
},
|
| 823 |
+
{
|
| 824 |
+
"type": "ref_text",
|
| 825 |
+
"bbox": [
|
| 826 |
+
0.517,
|
| 827 |
+
0.261,
|
| 828 |
+
0.913,
|
| 829 |
+
0.3
|
| 830 |
+
],
|
| 831 |
+
"angle": 0,
|
| 832 |
+
"content": "[24] Shahroz Tariq, Sangyup Lee, Hoyoung Kim, Youjin Shin, and Simon S Woo. 2019. GAN is a friend or foe?: a framework to detect various fake face images. In Proceedings of the 34th ACM/SIGAPP Symposium on Applied Computing. ACM, 1296-1303."
|
| 833 |
+
},
|
| 834 |
+
{
|
| 835 |
+
"type": "ref_text",
|
| 836 |
+
"bbox": [
|
| 837 |
+
0.517,
|
| 838 |
+
0.301,
|
| 839 |
+
0.913,
|
| 840 |
+
0.332
|
| 841 |
+
],
|
| 842 |
+
"angle": 0,
|
| 843 |
+
"content": "[25] Shahroz Tariq, Sangyup Lee, and Simon Woo. 2021. One detector to rule them all: Towards a general deepfake attack detection framework. In Proceedings of the web conference 2021. 3625-3637."
|
| 844 |
+
},
|
| 845 |
+
{
|
| 846 |
+
"type": "ref_text",
|
| 847 |
+
"bbox": [
|
| 848 |
+
0.517,
|
| 849 |
+
0.332,
|
| 850 |
+
0.913,
|
| 851 |
+
0.362
|
| 852 |
+
],
|
| 853 |
+
"angle": 0,
|
| 854 |
+
"content": "[26] Shahroz Tariq, Sangyup Lee, and Simon S Woo. 2020. A Convolutional LSTM based Residual Network for Deepfake Video Detection. arXiv preprint arXiv:2009.07480 (2020)."
|
| 855 |
+
},
|
| 856 |
+
{
|
| 857 |
+
"type": "ref_text",
|
| 858 |
+
"bbox": [
|
| 859 |
+
0.517,
|
| 860 |
+
0.362,
|
| 861 |
+
0.913,
|
| 862 |
+
0.393
|
| 863 |
+
],
|
| 864 |
+
"angle": 0,
|
| 865 |
+
"content": "[27] Yuntao Wang, Zhou Su, Ning Zhang, Rui Xing, Dongxiao Liu, Tom H Luan, and Xuemin Shen. 2022. A survey on metaverse: Fundamentals, security, and privacy. IEEE Communications Surveys & Tutorials (2022)."
|
| 866 |
+
},
|
| 867 |
+
{
|
| 868 |
+
"type": "ref_text",
|
| 869 |
+
"bbox": [
|
| 870 |
+
0.517,
|
| 871 |
+
0.393,
|
| 872 |
+
0.913,
|
| 873 |
+
0.413
|
| 874 |
+
],
|
| 875 |
+
"angle": 0,
|
| 876 |
+
"content": "[28] Emma Woollacott. 2022. Rise of deepfakes: who can you trust in the metaverse? https://cybernews.com/security/rise-of-deepfakes Accessed: 13-March-2023."
|
| 877 |
+
},
|
| 878 |
+
{
|
| 879 |
+
"type": "list",
|
| 880 |
+
"bbox": [
|
| 881 |
+
0.517,
|
| 882 |
+
0.11,
|
| 883 |
+
0.913,
|
| 884 |
+
0.413
|
| 885 |
+
],
|
| 886 |
+
"angle": 0,
|
| 887 |
+
"content": null
|
| 888 |
+
}
|
| 889 |
+
]
|
| 890 |
+
]
|
2303.14xxx/2303.14612/e1a8feb8-e3e0-42e1-846f-d4b91a7fc621_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:525f9646bf58b9320664249880eeae985c89c27d12a4e02574fe193bbda60fa0
|
| 3 |
+
size 661104
|
2303.14xxx/2303.14612/full.md
ADDED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Deepfake in the Metaverse: Security Implications for Virtual Gaming, Meetings, and Offices
|
| 2 |
+
|
| 3 |
+
Shahroz Tariq
|
| 4 |
+
CSIRO's Data61, Australia
|
| 5 |
+
shahroz.tariq@data61.csiro.au
|
| 6 |
+
|
| 7 |
+
Alsharif Abuadbba
|
| 8 |
+
CSIRO's Data61, Australia
|
| 9 |
+
sharif.abuadbba@data61.csiro.au
|
| 10 |
+
|
| 11 |
+
Kristen Moore
|
| 12 |
+
CSIRO's Data61, Australia
|
| 13 |
+
kristen.moore@data61.csiro.au
|
| 14 |
+
|
| 15 |
+
# ABSTRACT
|
| 16 |
+
|
| 17 |
+
The metaverse has gained significant attention from various industries due to its potential to create a fully immersive and interactive virtual world. However, the integration of deepfakes in the metaverse brings serious security implications, particularly with regard to impersonation. This paper examines the security implications of deepfakes in the metaverse, specifically in the context of gaming, online meetings, and virtual offices. The paper discusses how deepfakes can be used to impersonate in gaming scenarios, how online meetings in the metaverse open the door for impersonation, and how virtual offices in the metaverse lack physical authentication, making it easier for attackers to impersonate someone. The implications of these security concerns are discussed in relation to the confidentiality, integrity, and availability (CIA) triad. The paper further explores related issues such as the darkverse, and digital cloning, as well as regulatory and privacy concerns associated with addressing security threats in the virtual world.
|
| 18 |
+
|
| 19 |
+
# CCS CONCEPTS
|
| 20 |
+
|
| 21 |
+
- Human-centered computing $\rightarrow$ Virtual reality; $\cdot$ Security and privacy $\rightarrow$ Social engineering attacks; $\cdot$ Social and professional topics $\rightarrow$ Identity theft.
|
| 22 |
+
|
| 23 |
+
# KEYWORDS
|
| 24 |
+
|
| 25 |
+
Metaverse, Deepfake, Security, Impersonation, Gaming, Online meetings, Virtual offices
|
| 26 |
+
|
| 27 |
+
# 1 INTRODUCTION
|
| 28 |
+
|
| 29 |
+
The emergence of the metaverse [27] has captured the attention of the technology community, giving rise to widespread anticipation and debate. Prominent companies such as Meta (formerly Facebook), Microsoft, and Nvidia have expressed interest in the concept of a fully immersive virtual world, where individuals can interact with one another and their surroundings. To this end, various companies are releasing their own metaverse experiences, including Meta's Horizon Worlds [18, 19], Roblox's gaming metaverse [20], Microsoft's Mesh [15], and Nvidia's Omniverse [17]. While the potential applications of the metaverse are vast, with possibilities ranging from gaming to virtual meetings and offices, each method has its own benefits and limitations.
|
| 30 |
+
|
| 31 |
+
However, the applicability of deepfakes [9, 16] in the metaverse presents significant security implications, particularly with regard to impersonation. Deepfakes are computer-generated images or videos that can be manipulated to look like real people or events. The ability to generate such content has significantly increased with advances in machine learning and artificial intelligence. Recently, deepfakes in the metaverse have become a topic of discussion on different forums and media articles [2, 12, 21, 28].
|
| 32 |
+
|
| 33 |
+

|
| 34 |
+
Figure 1: The three most commonly publicized scenarios in the metaverse: virtual gaming, virtual meetings, and virtual offices. These applications highlight the potential for immersive virtual experiences, but also raise concerns about security and privacy in this emerging technology.
|
| 35 |
+
|
| 36 |
+
The utilisation of metaverse technology has been associated with numerous benefits, including the provision of a fully immersive virtual environment that facilitates interaction with other users and virtual objects. Nevertheless, the potential deployment of deepfake technology to perpetrate malicious activities, such as impersonation, has shed light on the limitations of the technology and the need for effective security measures to be put in place. It is noteworthy that existing state-of-the-art deepfake detection methods [3-7, 10, 11, 22-26] primarily focus on detecting deepfakes in the physical world and do not take into account the possibility of deepfakes in the metaverse. Thus, the current research is challenged with an evident gap in the identification and prevention of deepfakes in the metaverse, making it imperative to shed light on this research gap.
|
| 37 |
+
|
| 38 |
+
In this paper, we explore the security implications of deepfakes in the metaverse. We will start by defining the concept of the metaverse and how it is expected to be used in three scenarios: (i) gaming, (ii) online meetings, and (iii) virtual offices (see Fig. 1). We will also discuss the potential dangers of deepfake in each of the three scenarios by exploring the potential consequences of deepfake misuse in the metaverse, such as the ability to impersonate others, manipulate meetings, and disrupt virtual work environments. We will also discuss potential solutions to mitigate these risks and ensure the safety and security of metaverse users. We also explore the security implications of deepfakes in the metaverse to fake digital identity, the CIA triad, legal and regulatory challenges, privacy issues, and darkverse. Through this work, we aim to explore the potential security implications of deepfakes in the metaverse and to raise awareness of the risks and challenges posed by this technology.
|
| 39 |
+
|
| 40 |
+
# 2 WHAT IS THE METAVERSE?
|
| 41 |
+
|
| 42 |
+
The metaverse is a collective virtual shared space that offers an immersive and interactive 3D environment, facilitating real-time engagement with digital content and other individuals through advanced technologies such as virtual reality (VR) and augmented reality (AR). While originally a concept in science fiction, such as in Neal Stephenson's novel "Snow Crash" and the movie "The Matrix," recent technological advances have made the metaverse increasingly feasible. As a new form of social and economic infrastructure, the metaverse provides opportunities for people to work, play, socialize, learn, and consume content within a shared virtual space. While different visions of the metaverse exist among companies, organizations, and individuals, ranging from a fully autonomous world to a combination of different virtual platforms and experiences, the metaverse is considered a transformative technology that has the potential to impact various aspects of our lives.
|
| 43 |
+
|
| 44 |
+
# 3 GAMING IN THE METVERSE
|
| 45 |
+
|
| 46 |
+
Gaming in the metaverse encompasses playing video games in a virtual world shared by millions globally, including massively multiplayer online games (MMOs), social games, and casual games such as puzzle and card games. These games offer interactive and detailed virtual worlds, providing players with opportunities to explore and interact with their surroundings.
|
| 47 |
+
|
| 48 |
+
The use of deepfakes in the context of gaming in the metaverse raises significant security concerns. Potential issues include identity theft, cyberbullying, distribution of malware, non-fungible token (NFT) scams, and intellectual property theft. As a significant demographic within metaverse gaming [8], minors are particularly vulnerable to these threats due to their limited experience and knowledge of online safety, which can result in sexual exploitation, social engineering, online grooming, and exposure to misinformation.
|
| 49 |
+
|
| 50 |
+
To address these risks, gaming companies must invest in advanced security measures, including identity verification systems, content monitoring and moderation tools, and anti-malware software. Additionally, players should be educated about the dangers of deepfakes and encouraged to report any suspicious activity encountered in the metaverse. Parents and guardians should take an active role in educating minors on the risks of deepfakes, monitoring their online activities, and encouraging them to report any questionable behavior. Online safety education, privacy settings, and parental controls can also help safeguard minors from the potential harms of deepfakes.
|
| 51 |
+
|
| 52 |
+
# 4 ONLINE MEETINGS IN THE METVERSE
|
| 53 |
+
|
| 54 |
+
Online meetings in the metaverse offer a virtual space for individuals to communicate and collaborate within a shared immersive digital environment, spanning from basic text-based chat rooms to fully immersive 3D environments, and utilizing voice chat, instant messaging, or other means of communication. These virtual meetings are versatile and can serve various purposes, such as team collaboration, networking, socializing, or attending virtual events, such as conferences.
|
| 55 |
+
|
| 56 |
+
However, the potential risks to privacy and reputation are significant due to the metaverse's ability to create an opportunity for
|
| 57 |
+
|
| 58 |
+
attackers to impersonate others. Deepfake technology can be utilized to deceive others through impersonation, leading to potential fraud or espionage, as demonstrated in a recent example of Elon Musk's deepfake zoom-bombing online meetings [1]. In addition, the authenticity and trust of participants may be compromised by the creation of convincing deepfakes, which could undermine trust and collaboration.
|
| 59 |
+
|
| 60 |
+
To address these issues, it is essential to implement measures such as identity verification, digital signatures, or other security measures to ensure the authenticity of participants. Moreover, the development of tools and technologies that can detect various forms of deepfakes in the metaverse and prevent their use during online meetings may be necessary. Overall, while deepfake technology poses a disruptive risk to online meetings in the metaverse, it is also possible to mitigate these risks effectively by utilizing appropriate security measures and technology.
|
| 61 |
+
|
| 62 |
+
# 5 VIRTUAL OFFICES IN THE METVERSE
|
| 63 |
+
|
| 64 |
+
The emergence of virtual offices or workplaces in the metaverse is a recent but promising development that has the potential to revolutionize collaboration and work practices. By leveraging the metaverse's virtual environment, colleagues can work together in a customizable, shared digital workspace that offers numerous benefits, such as reduced overhead cost, increased flexibility, and access to a global talent pool. In addition, virtual offices in the metaverse can enable more dynamic and immersive meetings, greater collaboration, and increased creativity.
|
| 65 |
+
|
| 66 |
+
However, the virtual nature of the metaverse presents a security challenge, as attackers can impersonate team members through the use of deepfakes, leading to data breaches and financial loss. For instance, deepfakes can be employed to create fake identities or to impersonate colleagues, which could lead to trust issues and confusion within the team. For example, an employee could use a deepfake to create a fake version of their boss or co-worker to make it appear as if they are giving instructions. Furthermore, deepfakes can be used to spread false information or propaganda, potentially impacting important decisions.
|
| 67 |
+
|
| 68 |
+
To address the potential threats posed by deepfakes in virtual offices or workplaces in the metaverse, clear guidelines and protocols are required to verify team members' identities and the authenticity of content shared in the virtual environment. This can be critical in establishing one's innocence in the case of a crime. It is also important to remain abreast of the latest deepfake technology developments and to leverage tools and software that can aid in identifying and detecting deepfakes.
|
| 69 |
+
|
| 70 |
+
# 6 DISCUSSION
|
| 71 |
+
|
| 72 |
+
Fake Digital Identity and Cloning in the digital world. One of the central ideas underlying the concept of the metaverse is the ability for individuals to create digital replicas of themselves, known as avatars, in the virtual world. These avatars are designed to mimic the physical appearance and behavior of their real-life counterparts, allowing individuals to interact with one another in the digital realm. However, the ability to clone oneself in the metaverse also raises concerns about the potential for impersonation. Unlike the physical world, where impersonating someone
|
| 73 |
+
|
| 74 |
+
convincingly is challenging, it is much easier to create a convincing digital clone of a person in the metaverse due to the abundance of personal information available on the internet that can be used to create deepfakes. The possibility of an attacker using deepfakes to impersonate someone in the metaverse is a significant concern, as it could be used to commit various illicit activities. One potential solution to this problem is the implementation of digital identity verification systems. Such systems could use biometric data, such as facial recognition, to verify an individual's identity before allowing them to create a digital avatar. By doing so, attackers would be prevented from creating digital clones of other people without their consent, thereby ensuring a higher level of security in the metaverse.
|
| 75 |
+
|
| 76 |
+
Deepfakes impact on CIA Triad in the Metaverse. The CIA Triad, established by the National Institute of Standards and Technology (NIST), comprises confidentiality, integrity, and availability, which are the three primary objectives of information security. Confidentiality safeguards sensitive information by ensuring that only authorized parties can access it. Integrity ensures that information remains accurate and unaltered, while availability guarantees that authorized parties can access information when necessary. In the context of the metaverse, deepfakes pose a potential threat to the CIA Triad's objectives of confidentiality, integrity, and availability. Specifically, deepfakes have the potential to compromise confidentiality by enabling the impersonation of authorized individuals, thereby permitting unauthorized access to sensitive areas. Furthermore, deepfakes can undermine the integrity of information, images, or videos, by spreading false or misleading information about individuals or organizations, causing reputational harm. Finally, deepfakes can also disrupt availability by disseminating propaganda or fake news, leading to confusion and chaos.
|
| 77 |
+
|
| 78 |
+
Legal and Regulatory Challenges. The lack of regulations regarding the application of laws from the physical world in the metaverse presents a significant challenge. For instance, identifying and prosecuting an offender who has committed a crime in the virtual world using deepfake impersonation can be difficult. Additionally, jurisdictional issues arise due to the existence of varying laws in different countries. In the event that an attacker is located in a country where there are no legal consequences for their actions in the metaverse, holding them accountable can become problematic. Consequently, a universal set of rules and regulations for the metaverse becomes difficult to establish, given that different countries may have different interpretations of what constitutes criminal behavior. To address security concerns in the metaverse, a coordinated effort between governments, regulatory bodies, and technology companies is necessary [13]. This entails the development of universally applicable standards and regulations that can transcend geographical and jurisdictional barriers. Also, continuous efforts toward the development of new technologies capable of preventing and detecting criminal activities in the metaverse are also required. Privacy issues. Although digital identity verification systems can serve as a potential measure for mitigating deepfake-based impersonation in the metaverse, the utilization of these systems raises concerns. The apprehension stems from the possibility of digital identity verification systems being utilized to track and monitor individuals' virtual activities, consequently, potentially infringing on their privacy and freedom. It is argued that any digital identity
|
| 79 |
+
|
| 80 |
+
verification system deployed in the metaverse must maintain a balance between security and the need for privacy and freedom.
|
| 81 |
+
|
| 82 |
+
Darkverse. The rise of metaverse technology has created new opportunities for both legitimate users and malicious actors. One of the primary concerns is the creation of private spaces that enable illegal activities and communication among criminals, which Trend Micro refers to as the darkverse [14]. This space operates similarly to the dark web, but it exists within the metaverse and is unindexed, making it challenging to locate via standard search engines. The darkverse's pseudo-physical user presence makes it more dangerous than the dark web, as criminals can use proximity-based messaging or other methods to conceal their communications, rendering them difficult for law enforcement agencies to intercept. Darkverse could be used to facilitate illegal activities such as deepfake-based revenge pornography and misinformation campaigns. Despite the possibility of the darkverse being a space for free speech, the primary objective of these spaces is to facilitate illegal activities, and it may become a safe haven for criminals seeking to engage in such activities with minimal risk of detection.
|
| 83 |
+
|
| 84 |
+
# 7 CONCLUSION
|
| 85 |
+
|
| 86 |
+
In conclusion, deepfakes in the metaverse present significant security implications, particularly around impersonation. The three scenarios of gaming, online meetings, and virtual offices serve as examples of how these security implications can play out in practice. The lack of physical authentication in the metaverse makes it easier for attackers to impersonate others and commit crimes without being held accountable. Mitigating these security implications will require a combination of technological solutions and legal frameworks that balance security and privacy concerns. As the metaverse continues to evolve, it is important to address these issues proactively to ensure a safe and secure virtual environment for all users.
|
| 87 |
+
|
| 88 |
+
# ACKNOWLEDGMENTS
|
| 89 |
+
|
| 90 |
+
This research was financially supported by CSIRO's Collaborative Intelligence Future Science Platform. The diagram has been designed using images from Flaticon.com
|
| 91 |
+
|
| 92 |
+
# REFERENCES
|
| 93 |
+
|
| 94 |
+
[1] Samantha Cole. 2020. This Open-Source Program Deepfakes You During Zoom Meetings, in Real Time. https://www.vice.com/en/article/g5xagy/this-open-source-program-deepfakes-you-during-zoom-meetings-in-real-time Accessed: 13-March-2023.
|
| 95 |
+
[2] Michael del Castillo. 2022. Facebook's Metaverse Could Be Overrun By Deep Fakes And Other Misinformation If These Non-Profits Don't Succeed. https://www.forbes.com/sites/michaeldelcastillo/2022/08/29/facebookmetaverse-could-be-overrun-by-deep-fakes-and-other-misinformation-if-these-non-profits-dont-succeed/?sh=185318742737 Accessed: 13-March-2023.
|
| 96 |
+
[3] Hasam Khalid, Minha Kim, Shahroz Tariq, and Simon S Woo. 2021. Evaluation of an Audio-Video Multimodal Deepfake Dataset using Unimodal and Multimodal Detectors. In Proceedings of the 1st Workshop on Synthetic Multimedia-Audiovisual Deepfake Generation and Detection. 7-15.
|
| 97 |
+
[4] Hasam Khalid, Shahroz Tariq, Minha Kim, and Simon S Woo. 2021. FakeAVCeleb: A Novel Audio-Video Multimodal Deepfake Dataset. arXiv preprint arXiv:2108.05080 (2021).
|
| 98 |
+
[5] Jeongho Kim, Shahroz Tariq, and Simon S Woo. 2022. PTD: Privacy-Preserving Human Face Processing Framework using Tensor Decomposition. In Proceedings of the 37th ACM/SIGAPP Symposium on Applied Computing. 1296-1303. https://doi.org/10.1145/3477314.3507036
|
| 99 |
+
[6] Minha Kim, Shahroz Tariq, and Simon S Woo. 2021. Cored: Generalizing fake media detection with continual representation using distillation. In Proceedings
|
| 100 |
+
|
| 101 |
+
of the 29th ACM International Conference on Multimedia. 337-346.
|
| 102 |
+
[7] Minha Kim, Shahroz Tariq, and Simon S Woo. 2021. FReTAL: Generalizing Deepfake Detection using Knowledge Distillation and Representation Learning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 1001-1012.
|
| 103 |
+
[8] David Kleeman. 2021. Kids have Kickstarted the Metaverse. https://techonomy.com/kids-have-kickstarted-the-metaverse/ Accessed: 13-March-2023.
|
| 104 |
+
[9] Binh Le, Shahroz Tariq, Alsharif Abuadbba, Kristen Moore, and Simon Woo. 2023. Why Do Deepfake Detectors Fail? arXiv preprint arXiv:2302.13156 (2023).
|
| 105 |
+
[10] Sangyup Lee, Shahroz Tariq, Junyaup Kim, and Simon S Woo. 2021. TAR: Generalized Forensic Framework to Detect Deepfakes Using Weakly Supervised Learning. In IFIP International Conference on ICT Systems Security and Privacy Protection. Springer, 351-366.
|
| 106 |
+
[11] Sangyup Lee, Shahroz Tariq, Youjin Shin, and Simon S Woo. 2021. Detecting handcrafted facial image manipulations and GAN-generated facial images using Shallow-FakeFaceNet. Applied Soft Computing 105 (2021), 107256.
|
| 107 |
+
[12] Steven Levy. 2022. What's Deepfake Bruce Willis Doing in My Metaverse? https://www.wired.com/story/plaintext-bruce-willis-deepfake-metaverse Accessed: 13-March-2023.
|
| 108 |
+
[13] Reed Smith LLP. 2022. Reed Smith Guide to the Metaverse, 2nd Edition. Reed Smith LLP. https://www.reedsmith.com/en/perspectives/metaverse
|
| 109 |
+
[14] Trend Micro. 2023. Darkverse. https://www.trendmicro.com/vinfo/us/security/ definition/darkverse Accessed: 13-March-2023.
|
| 110 |
+
[15] Microsoft. 2023. Microsoft Mesh. https://www.microsoft.com/en-us/mesh Accessed: 13-March-2023.
|
| 111 |
+
[16] Yisroel Mirsky and Wenke Lee. 2021. The creation and detection of deepfakes: A survey. ACM Computing Surveys (CSUR) 54, 1 (2021), 1-41.
|
| 112 |
+
[17] Nvidia. 2023. NVIDIA Omniverse. https://www.nvidia.com/en-gb/omniverse/ Accessed: 13-March-2023.
|
| 113 |
+
[18] Meta Platforms. 2023. Facebook Metaverse. https://about.meta.com/metaverse Accessed: 13-March-2023.
|
| 114 |
+
|
| 115 |
+
[19] Meta Platforms. 2023. Meta Horizon Worlds. https://www.meta.com/gb/en/horizon-worlds/ Accessed: 13-March-2023.
|
| 116 |
+
[20] Gautam Raturi. 2022. Roblox Metaverse: Everything You Need to Know. https://medium.com/codex/everything-you-need-to-know-about-the-roblox-metaverse-928e9531693 Accessed: 13-March-2023.
|
| 117 |
+
[21] Darin Stewart. 2021. Maverick Research: Deepfakes Will Kill the Metaverse; Synthetic Media Could Save It. https://www.gartner.com/en/documents/4008295 Accessed: 13-March-2023.
|
| 118 |
+
[22] Shahroz Tariq, Sowon Jeon, and Simon Woo. 2021. Am I a Real or Fake Celebrity? Measuring Commercial Face Recognition Web APIs under Deepfake Impersonation Attack. arXiv preprint arXiv:2103.00847 (2021).
|
| 119 |
+
[23] Shahroz Tariq, Sangyup Lee, Hoyoung Kim, Youjin Shin, and Simon S Woo. 2018. Detecting both machine and human created fake face images in the wild. In Proceedings of the 2nd International Workshop on Multimedia Privacy and Security. ACM, 81-87.
|
| 120 |
+
[24] Shahroz Tariq, Sangyup Lee, Hoyoung Kim, Youjin Shin, and Simon S Woo. 2019. GAN is a friend or foe?: a framework to detect various fake face images. In Proceedings of the 34th ACM/SIGAPP Symposium on Applied Computing. ACM, 1296-1303.
|
| 121 |
+
[25] Shahroz Tariq, Sangyup Lee, and Simon Woo. 2021. One detector to rule them all: Towards a general deepfake attack detection framework. In Proceedings of the web conference 2021. 3625-3637.
|
| 122 |
+
[26] Shahroz Tariq, Sangyup Lee, and Simon S Woo. 2020. A Convolutional LSTM based Residual Network for Deepfake Video Detection. arXiv preprint arXiv:2009.07480 (2020).
|
| 123 |
+
[27] Yuntao Wang, Zhou Su, Ning Zhang, Rui Xing, Dongxiao Liu, Tom H Luan, and Xuemin Shen. 2022. A survey on metaverse: Fundamentals, security, and privacy. IEEE Communications Surveys & Tutorials (2022).
|
| 124 |
+
[28] Emma Woollacott. 2022. Rise of deepfakes: who can you trust in the metaverse? https://cybernews.com/security/rise-of-deepfakes Accessed: 13-March-2023.
|
2303.14xxx/2303.14612/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:536f6d88c6af14210808e92c79fb8a6b53b9590c688d854a17ed0d9d3a9d823e
|
| 3 |
+
size 33948
|
2303.14xxx/2303.14612/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14613/fa86a159-6fcd-438c-a2b4-6ff1e438b175_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14613/fa86a159-6fcd-438c-a2b4-6ff1e438b175_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14613/fa86a159-6fcd-438c-a2b4-6ff1e438b175_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b1e0fd14208f5aacd9325b4b9e3b30fad36a8fd3582459b4405066388e96683a
|
| 3 |
+
size 9693352
|
2303.14xxx/2303.14613/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14613/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:13deea0272512b7eaa5e55e0e4365dd67e752985a43576ffb2cff274f4114230
|
| 3 |
+
size 917163
|
2303.14xxx/2303.14613/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2303.14xxx/2303.14617/202514b2-218d-4a82-968c-224e4f16dc55_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|