Add Batch a14da98c-7842-445e-b421-a377bc9ebe49
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +64 -0
- 2403.00xxx/2403.00409/8805edf3-b3ea-467b-92ab-b41e43567d4c_content_list.json +0 -0
- 2403.00xxx/2403.00409/8805edf3-b3ea-467b-92ab-b41e43567d4c_model.json +0 -0
- 2403.00xxx/2403.00409/8805edf3-b3ea-467b-92ab-b41e43567d4c_origin.pdf +3 -0
- 2403.00xxx/2403.00409/full.md +799 -0
- 2403.00xxx/2403.00409/images.zip +3 -0
- 2403.00xxx/2403.00409/layout.json +0 -0
- 2403.00xxx/2403.00425/4bcef5a3-e808-495b-b52d-5b7269aa0844_content_list.json +0 -0
- 2403.00xxx/2403.00425/4bcef5a3-e808-495b-b52d-5b7269aa0844_model.json +0 -0
- 2403.00xxx/2403.00425/4bcef5a3-e808-495b-b52d-5b7269aa0844_origin.pdf +3 -0
- 2403.00xxx/2403.00425/full.md +0 -0
- 2403.00xxx/2403.00425/images.zip +3 -0
- 2403.00xxx/2403.00425/layout.json +0 -0
- 2403.00xxx/2403.00436/8c21a5cc-c292-4537-b2a4-73c65d3149da_content_list.json +0 -0
- 2403.00xxx/2403.00436/8c21a5cc-c292-4537-b2a4-73c65d3149da_model.json +0 -0
- 2403.00xxx/2403.00436/8c21a5cc-c292-4537-b2a4-73c65d3149da_origin.pdf +3 -0
- 2403.00xxx/2403.00436/full.md +510 -0
- 2403.00xxx/2403.00436/images.zip +3 -0
- 2403.00xxx/2403.00436/layout.json +0 -0
- 2403.00xxx/2403.00439/51835c5b-8381-4a5b-996b-aed0066adc56_content_list.json +0 -0
- 2403.00xxx/2403.00439/51835c5b-8381-4a5b-996b-aed0066adc56_model.json +0 -0
- 2403.00xxx/2403.00439/51835c5b-8381-4a5b-996b-aed0066adc56_origin.pdf +3 -0
- 2403.00xxx/2403.00439/full.md +0 -0
- 2403.00xxx/2403.00439/images.zip +3 -0
- 2403.00xxx/2403.00439/layout.json +0 -0
- 2403.00xxx/2403.00448/46c39b63-fc53-45de-bf27-f6a2478aeeb7_content_list.json +0 -0
- 2403.00xxx/2403.00448/46c39b63-fc53-45de-bf27-f6a2478aeeb7_model.json +0 -0
- 2403.00xxx/2403.00448/46c39b63-fc53-45de-bf27-f6a2478aeeb7_origin.pdf +3 -0
- 2403.00xxx/2403.00448/full.md +477 -0
- 2403.00xxx/2403.00448/images.zip +3 -0
- 2403.00xxx/2403.00448/layout.json +0 -0
- 2403.00xxx/2403.00454/32cf1812-4569-4224-b8b1-fbebe2604343_content_list.json +1902 -0
- 2403.00xxx/2403.00454/32cf1812-4569-4224-b8b1-fbebe2604343_model.json +0 -0
- 2403.00xxx/2403.00454/32cf1812-4569-4224-b8b1-fbebe2604343_origin.pdf +3 -0
- 2403.00xxx/2403.00454/full.md +327 -0
- 2403.00xxx/2403.00454/images.zip +3 -0
- 2403.00xxx/2403.00454/layout.json +0 -0
- 2403.00xxx/2403.00476/ac231d9a-78e3-4c77-9bf9-4d860a5d5fd0_content_list.json +0 -0
- 2403.00xxx/2403.00476/ac231d9a-78e3-4c77-9bf9-4d860a5d5fd0_model.json +0 -0
- 2403.00xxx/2403.00476/ac231d9a-78e3-4c77-9bf9-4d860a5d5fd0_origin.pdf +3 -0
- 2403.00xxx/2403.00476/full.md +0 -0
- 2403.00xxx/2403.00476/images.zip +3 -0
- 2403.00xxx/2403.00476/layout.json +0 -0
- 2403.00xxx/2403.00483/619283d9-9e07-40aa-a0f5-3db958d84022_content_list.json +1787 -0
- 2403.00xxx/2403.00483/619283d9-9e07-40aa-a0f5-3db958d84022_model.json +0 -0
- 2403.00xxx/2403.00483/619283d9-9e07-40aa-a0f5-3db958d84022_origin.pdf +3 -0
- 2403.00xxx/2403.00483/full.md +339 -0
- 2403.00xxx/2403.00483/images.zip +3 -0
- 2403.00xxx/2403.00483/layout.json +0 -0
- 2403.00xxx/2403.00485/c4d4f849-2aca-4cb1-9365-a1f1ef3e8602_content_list.json +0 -0
.gitattributes
CHANGED
|
@@ -8471,3 +8471,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 8471 |
2403.02xxx/2403.02338/4101729d-b71b-4643-863f-8653a63772be_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8472 |
2403.02xxx/2403.02370/1d19e467-ce0d-404c-a27f-76f11f8f2022_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8473 |
2403.03xxx/2403.03230/45b4692c-5b3a-4d25-b0c7-471b0231c670_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8471 |
2403.02xxx/2403.02338/4101729d-b71b-4643-863f-8653a63772be_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8472 |
2403.02xxx/2403.02370/1d19e467-ce0d-404c-a27f-76f11f8f2022_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8473 |
2403.03xxx/2403.03230/45b4692c-5b3a-4d25-b0c7-471b0231c670_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8474 |
+
2403.00xxx/2403.00409/8805edf3-b3ea-467b-92ab-b41e43567d4c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8475 |
+
2403.00xxx/2403.00425/4bcef5a3-e808-495b-b52d-5b7269aa0844_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8476 |
+
2403.00xxx/2403.00436/8c21a5cc-c292-4537-b2a4-73c65d3149da_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8477 |
+
2403.00xxx/2403.00439/51835c5b-8381-4a5b-996b-aed0066adc56_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8478 |
+
2403.00xxx/2403.00448/46c39b63-fc53-45de-bf27-f6a2478aeeb7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8479 |
+
2403.00xxx/2403.00454/32cf1812-4569-4224-b8b1-fbebe2604343_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8480 |
+
2403.00xxx/2403.00476/ac231d9a-78e3-4c77-9bf9-4d860a5d5fd0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8481 |
+
2403.00xxx/2403.00483/619283d9-9e07-40aa-a0f5-3db958d84022_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8482 |
+
2403.00xxx/2403.00485/c4d4f849-2aca-4cb1-9365-a1f1ef3e8602_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8483 |
+
2403.00xxx/2403.00486/dbf32b43-11a5-4826-bcb2-59ed9c5b01dc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8484 |
+
2403.00xxx/2403.00504/02f5f8c0-50f0-4af8-bd48-a06ee4084a4e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8485 |
+
2403.00xxx/2403.00514/e9f5c34d-0146-4b13-9d83-eadf1cf1b4f0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8486 |
+
2403.00xxx/2403.00522/7f83ae3c-267e-49b7-a1ad-5246a6f93f55_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8487 |
+
2403.00xxx/2403.00553/694dc1ec-4d7a-4259-a20b-169e359b1903_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8488 |
+
2403.00xxx/2403.00564/365c0225-3cbe-4c2d-8fd0-cdd1572c7bae_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8489 |
+
2403.00xxx/2403.00579/8c25a30e-f977-4865-ae76-881fa5f0f2ea_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8490 |
+
2403.00xxx/2403.00632/c2026416-fe81-408f-9c1c-cc078f5bee0d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8491 |
+
2403.00xxx/2403.00644/fa9a8dda-b0b0-4825-bad8-ce4b65dfb81c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8492 |
+
2403.00xxx/2403.00669/2f774c19-8837-4609-9c90-c9d7a438d313_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8493 |
+
2403.00xxx/2403.00712/9b018d32-b64a-467d-9d52-92d3442cece6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8494 |
+
2403.00xxx/2403.00742/c6bc0797-9e40-4a48-8718-9d35ed167335_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8495 |
+
2403.00xxx/2403.00745/87a0cadf-b43f-4c12-aee0-927d91689621_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8496 |
+
2403.00xxx/2403.00762/81497dbb-b0da-4002-b2ce-fd5137be569b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8497 |
+
2403.00xxx/2403.00894/d51d4750-b881-438b-91db-088a3e3a207f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8498 |
+
2403.01xxx/2403.01031/48692706-7be4-4505-b4f5-e876e5b9e2f0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8499 |
+
2403.01xxx/2403.01038/91674990-1dd3-4e26-ab2c-76a2c4ef4565_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8500 |
+
2403.01xxx/2403.01081/23548557-c003-4399-8afd-2f3c785ada37_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8501 |
+
2403.01xxx/2403.01091/35af6014-94a9-4903-8940-f048ac9a25e4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8502 |
+
2403.01xxx/2403.01105/7fee1347-6edf-40e8-920c-a9e7ed55c509_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8503 |
+
2403.01xxx/2403.01121/c996157b-dbd8-4f69-a500-6cdaaf494ae4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8504 |
+
2403.01xxx/2403.01123/ce22a7ff-c137-49a4-8cbc-7f17fb8442e2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8505 |
+
2403.01xxx/2403.01131/7e9494cc-8c4d-4a80-9a59-31e9d83bc11c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8506 |
+
2403.01xxx/2403.01180/867cc830-c7d8-424c-aca3-a6d1820c6192_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8507 |
+
2403.01xxx/2403.01189/868048e5-74eb-4de4-9588-b30b6a515ced_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8508 |
+
2403.01xxx/2403.01216/f874a52a-cae2-403e-a1dc-1380b998456b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8509 |
+
2403.01xxx/2403.01218/b134147c-7502-4340-9159-1e07ecf80138_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8510 |
+
2403.01xxx/2403.01222/5e8f4d38-254e-4082-beff-79d2ff972101_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8511 |
+
2403.01xxx/2403.01232/fffd4474-4e96-4ad1-871a-825fd6f1cef3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8512 |
+
2403.01xxx/2403.01241/120e7fa2-aace-46a4-b60c-6e6dcf2b0088_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8513 |
+
2403.01xxx/2403.01244/c2ed86e1-efdf-4ce3-88ab-8dbd2fca2968_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8514 |
+
2403.01xxx/2403.01248/4aa7cada-9147-4715-8abf-d4134dae24d5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8515 |
+
2403.01xxx/2403.01251/2ff3f316-654b-4fa8-91b6-d6d15e1ba7de_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8516 |
+
2403.01xxx/2403.01255/89b8f6fb-9965-466b-8f42-a2a48370dc5f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8517 |
+
2403.01xxx/2403.01267/158485fd-bc89-45ee-b434-520abe2bb872_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8518 |
+
2403.01xxx/2403.01289/3fc304b6-6a11-4822-a98b-bbbea5d06135_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8519 |
+
2403.01xxx/2403.01316/ebf53104-6f06-49f9-8109-e386aacc4df5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8520 |
+
2403.01xxx/2403.01342/2eeeeb49-0101-4df6-badd-12d08139062b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8521 |
+
2403.01xxx/2403.01355/24b9110f-b6f8-4150-bc44-4f1df0e3629b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8522 |
+
2403.01xxx/2403.01387/8e0a7ea2-3a0b-4915-a0ef-ca1ac4bf0c5c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8523 |
+
2403.01xxx/2403.01413/2c80f4c0-b5bd-4213-8fd5-f03df78626bd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8524 |
+
2403.01xxx/2403.01427/ce9365e5-b4b4-4c24-a51e-f0e7f1bce1cf_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8525 |
+
2403.01xxx/2403.01432/51113ca1-8b02-4378-a1ea-d415dbe5b11e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8526 |
+
2403.01xxx/2403.01439/d148f759-8fdf-43df-a60a-d44fed7939c2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8527 |
+
2403.01xxx/2403.01444/b0008608-3e1b-4c8e-832e-e1d4ce87122a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8528 |
+
2403.01xxx/2403.01446/9b3ba110-5f02-4dd7-9adf-961ffb6ffe74_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8529 |
+
2403.01xxx/2403.01480/5c8fdf25-6aec-4d86-945b-0e2d8bb4bd1b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8530 |
+
2403.01xxx/2403.01482/1c960fa3-98e2-4c86-b5cf-7d1128bfd0ac_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8531 |
+
2403.01xxx/2403.01493/ffaab572-6e00-4a3d-a449-338b3a23e909_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8532 |
+
2403.01xxx/2403.01497/bebdd51a-9924-41e7-9f4f-8e6467492831_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8533 |
+
2403.01xxx/2403.01501/acccb559-0a34-41ee-bc6c-72c8a47f5575_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8534 |
+
2403.04xxx/2403.04782/fe59d90b-6e54-4a01-a92f-35836da2bbad_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8535 |
+
2403.04xxx/2403.04783/709cf55b-47b8-471d-bcb6-22b8adcdb628_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8536 |
+
2403.04xxx/2403.04786/4845db1a-1541-4d17-907f-c8cda699bdf3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 8537 |
+
2403.05xxx/2403.05584/e469b1f5-4f81-4617-8a2e-b30130abb578_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
2403.00xxx/2403.00409/8805edf3-b3ea-467b-92ab-b41e43567d4c_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00409/8805edf3-b3ea-467b-92ab-b41e43567d4c_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00409/8805edf3-b3ea-467b-92ab-b41e43567d4c_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5237773ad116d0193212993766c7b124cbe60097a7babef5327a0ccfa13e7846
|
| 3 |
+
size 495704
|
2403.00xxx/2403.00409/full.md
ADDED
|
@@ -0,0 +1,799 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Provably Robust DPO: Aligning Language Models with Noisy Feedback
|
| 2 |
+
|
| 3 |
+
Sayak Ray Chowdhury $^{*1}$ Anush Kini $^{*1}$ Nagarajan Natarajan
|
| 4 |
+
|
| 5 |
+
# Abstract
|
| 6 |
+
|
| 7 |
+
Learning from preference-based feedback has recently gained traction as a promising approach to align language models with human interests. While these aligned generative models have demonstrated impressive capabilities across various tasks, their dependence on high-quality human preference data poses a bottleneck in practical applications. Specifically, noisy (incorrect and ambiguous) preference pairs in the dataset might restrict the language models from capturing human intent accurately. While practitioners have recently proposed heuristics to mitigate the effect of noisy preferences, a complete theoretical understanding of their workings remain elusive.
|
| 8 |
+
|
| 9 |
+
In this work, we aim to bridge this gap by by introducing a general framework for policy optimization in the presence of random preference flips. We focus on the direct preference optimization (DPO) algorithm in particular since it assumes that preferences adhere to the Bradley-Terry-Luce (BTL) model, raising concerns about the impact of noisy data on the learned policy. We design a novel loss function, which de-bias the effect of noise on average, making a policy trained by minimizing that loss robust to the noise. Under log-linear parameterization of the policy class and assuming good feature coverage of the SFT policy, we prove that the sub-optimality gap of the proposed robust DPO (rDPO) policy compared to the optimal policy is of the order $O\left(\frac{1}{1 - 2\varepsilon}\sqrt{\frac{d}{n}}\right)$ , where $\varepsilon < 1/2$ is flip rate of labels, $d$ is policy parameter dimension and $n$ is size of dataset. Our experiments on IMDb sentiment generation and Anthropic's helpful-harmless dataset shows that rDPO is robust to noise in preference labels compared to vanilla DPO and other heuristics proposed by practitioners.
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
Reinforcement Learning from Human Feedback (RLHF) has proven highly effective in aligning Large Language Models (LLMs) with human preferences (Christiano et al., 2017; Stiannon et al., 2020; Ouyang et al., 2022). In the RLHF pipeline (Kaufmann et al., 2023), an LLM is first pretrained using supervised fine tuning to obtain a reference or SFT policy. A reward model is fit to a dataset of human preferences in the form of a classifier between preferred and rejected responses. Next, an LLM policy is trained using RL algorithms such as proximal policy optimization (PPO) to generate high-reward responses while minimizing a certain notion of divergence from the SFT policy.
|
| 14 |
+
|
| 15 |
+
While RLHF produces models (e.g. GPT4, Llama, Mistral etc.) with impressive capabilities across diverse tasks ranging from programming to creative writing, it introduces notable complexities into the training process (Zheng et al., 2023). It requires training two language models (one for reward and another for policy) and frequent sampling from the policy in the course of training. This demands significant compute and storage, often limiting the feasible size of a model. To get around these issues, the direct preference optimisation (DPO) method (Rafailov et al. (2023)) optimizes the language model policy directly from human preferences without learning a reward model explicitly and avoiding complexities of RL. Given a dataset of human preferences over model responses, DPO defines a certain binary-cross entropy loss, and implicitly optimizes the same objective as RLHF in the form of KL-regularized reward maximization.
|
| 16 |
+
|
| 17 |
+
A crucial ingredient governing the success of both RLHF and DPO is the quality of preference data. Gathering responses for a vast array of prompts is often inherently noisy (e.g., ambiguous preferences), which could derail policy training, with or without RL (Lambert et al., 2023; Bai et al., 2022b). We find empirical evidence that these algorithms are robust to noise in some scenarios (as also demonstrated by Rafailov et al. (2023); Ouyang et al. (2022)), even though they work under the assumption that the observed preferences adhere to an underlying sampling model (see Section 2). On the other hand, as we show via simple noise injection mechanisms on real-world datasets in Section 6, the performance of DPO drops significantly when the noise rates are high. We are not the first to identify or address this
|
| 18 |
+
|
| 19 |
+
problem — Wang et al. (2024) demonstrate the sensitivity of reward training step in the RLHF pipeline to noisy preferences in real data; and design heuristics to mitigate the impact (discussed in Section 6). However, little is known about theory behind these heuristics, which could justify their performance in practice.
|
| 20 |
+
|
| 21 |
+
In this work, we focus on bridging this gap between theory and practice by introducing a general theoretical framework for learning from noisy preference data. We particularly focus on the DPO algorithm in the presence of random preference noise, where preferences are flipped with some (known) rate. We make the following contributions.
|
| 22 |
+
|
| 23 |
+
Novel loss function. We design a novel loss function by adapting the binary cross entropy (BCE) loss of DPO with the rate of label flips. We show that this loss is an unbiased estimate of the original BCE loss, which de-biases the effect of preference noise and makes the policy robust. We call it robust DPO (rDPO). Similar to DPO, our rDPO gradients on average increase the log probability of preferred answers relative to the rejected ones. But, unlike DPO, the importance weights in gradients are tuned to the noise level, which mitigate the effect of noisy preferences. Notably, our approach generalizes to reward training in RLHF and to other preference optimization methods (discussed in Section 5).
|
| 24 |
+
|
| 25 |
+
First theoretical guarantees. To the best of our knowledge, we are the first to provide theoretical guarantees for practical preference optimization algorithms. Under log-linear parameterization of the policy class, we show that estimation error of our rDPO policy compared to the optimal policy is at most $O\left(\frac{1}{1 - 2\varepsilon}\sqrt{\frac{d}{n}}\right)$ , where $\varepsilon \in [0,1/2)$ is flip rate, $d$ is dimension of policy parameter and $n$ is number of preference samples. Under good coverage of the SFT policy over the feature space, the estimation error bound translates to a bound on the average reward obtained by our trained policy as compared to the optimal policy. Our results show that the additional cost of preference flips is a (multiplicative) factor of $O\left(\frac{1}{1 - 2\varepsilon}\right)$ . Along the way, setting $\varepsilon = 0$ in the above bound, we obtain the first performance bounds for DPO policy without label noise, which resolves an elusive theoretical gap in the understanding of practical algorithms for learning from human preferences.
|
| 26 |
+
|
| 27 |
+
Empirical evidence. On noisy preferences generated from sentiment generation on IMDb dataset (Maas et al., 2011) and on Anthropic's helpful-harmless (Bai et al., 2022a), we provide empirical evidence that shows performance of DPO degrades with the introduction of high noise in data. However, rDPO is robust to noise in preference labels compared to other baselines including DPO with label smoothing (Mitchell, 2023). Additionally, policies optimized using rDPO are consistently better than other methods across different sampling temperatures.
|
| 28 |
+
|
| 29 |
+
# 1.1. Related Work
|
| 30 |
+
|
| 31 |
+
Recognizing the storage and computational challenges in RLHF, several alternatives have been proposed. Each of these method work with different loss functions. While DPO optimizes BCE loss to learn the policy (Rafailov et al., 2023), SLiC uses hinge loss plus a regularization loss (Zhao et al., 2023), IPO uses square-loss (Azar et al., 2023), RRHF uses ranking loss plus SFT loss (Yuan et al., 2023) and RSO uses BCE loss plus a rejection sampling (Liu et al., 2023). While they have their own intricacies and differences, all are competitive with RLHF on standard language tasks.
|
| 32 |
+
|
| 33 |
+
A recent line of work provides theoretical guarantees on the performance of policy learned using preference-based RL algorithms (Pacchiano et al., 2021; Chen et al., 2022; Zhu et al., 2023; Zhan et al., 2023). All these works focus on guarantees in terms of regret bounds in the standard bandit or RL setting and they do not deal with the practical algorithms like RLHF or DPO. Zhu et al. (2024) considers the problem of reward overfitting in RLHF by replacing hard labels with soft ones. They do not consider model overfitting in the presence of noisy data.
|
| 34 |
+
|
| 35 |
+
There is a line of work in supervised (deep) learning literature that considers learning in the presence of label noise. Müller et al. (2019) study the effect of label smoothing to mitigate the overfitting problem under noisy data. Natarajan et al. (2013) consider binary classification with noisy labels, while Patrini et al. (2017) work on multi-label classification problems. They focus on bounding the excess population risk of trained classifiers under the clean distribution. In contrast, we aim to bound the estimation error of the trained policy, which brings out additional challenges in analysis.
|
| 36 |
+
|
| 37 |
+
# 2. Background and Problem Setup
|
| 38 |
+
|
| 39 |
+
Learning algorithms for conditional language generation from human feedback take a preference dataset $\mathcal{D} = (s_i, a_{w,i}, a_{l,i})_{i=1}^n$ of size $n$ as input that distinguishes the better answer from the worse given the same prompt. First, a prompt is sampled from a distribution: $s \sim \rho$ . Next, a pair of answers are sampled from a supervised fine tuned (SFT) policy: $a, a' \sim \pi_{\mathrm{sft}}(\cdot|s)$ . The response pairs are then presented to human labelers (or, an oracle) who express preferences for answers given prompt $s$ , denoted as $a_w \succ a_l|s$ . The preference distribution is typically expressed using a latent reward model $r^*(s,a)$ as
|
| 40 |
+
|
| 41 |
+
$$
|
| 42 |
+
p _ {s, a, a ^ {\prime}} ^ {*} = \mathbb {P} [ a \succ a ^ {\prime} | s ] = g \left(r ^ {*} (s, a) - r ^ {*} (s, a ^ {\prime})\right), \tag {1}
|
| 43 |
+
$$
|
| 44 |
+
|
| 45 |
+
where $g: \mathbb{R} \to [0,1]$ is a monotone non-decreasing function (with $g(z) = 1 - g(-z)$ ) that converts reward differences into winning probabilities. When $g$ is the sigmoid function $\sigma(z) = \frac{1}{1 + e^{-z}}$ , we get the Bradley-Terry-Luce (BTL) model (Bradley & Terry, 1952; Luce, 2012).
|
| 46 |
+
|
| 47 |
+
Optimal Policy. Starting with a prompt distribution $\rho$ and an SFT policy $\pi_{\mathrm{sft}}$ , the optimal language model policy $\pi^{*}$ corresponding to the latent reward model $r^{*}$ can be computed by maximizing the objective (Schulman et al., 2017)
|
| 48 |
+
|
| 49 |
+
$$
|
| 50 |
+
J (\pi) = \mathbb {E} _ {s \sim \rho , a \sim \pi (\cdot | s)} \left[ r ^ {*} (s, a) - \beta \log \frac {\pi (a | s)}{\pi_ {\mathrm {s f t}} (a | s)} \right].
|
| 51 |
+
$$
|
| 52 |
+
|
| 53 |
+
The optimal policy takes the form (Rafailov et al., 2023)
|
| 54 |
+
|
| 55 |
+
$$
|
| 56 |
+
\pi^ {*} (a | s) = \frac {1}{Z ^ {*} (s)} \pi_ {\mathrm {s f t}} (a | s) \exp \left(r ^ {*} (s, a) / \beta\right), \tag {2}
|
| 57 |
+
$$
|
| 58 |
+
|
| 59 |
+
where $Z^{*}(s) = \sum_{a\in \mathcal{A}}\pi_{\mathrm{sft}}(a|s)\exp (r^{*}(s,a) / \beta)$ denotes the log-partition (normalizing) function. Here $\beta >0$ is a parameter that governs the balance between exploitation and exploration. When $\beta \rightarrow 0$ , all probability mass will concentrate on the response with highest reward (exploitation). On the other extreme, when $\beta \rightarrow \infty$ , optimal policy will be the same as $\pi_{\mathrm{sft}}$ (exploration). The goal is to learn a policy from preference data that generates good reward.
|
| 60 |
+
|
| 61 |
+
Policy Estimation. Re-arranging (2), we get
|
| 62 |
+
|
| 63 |
+
$$
|
| 64 |
+
r ^ {*} (s, a) = \beta \log \frac {\pi^ {*} (a | s)}{\pi_ {0} (a | s)} + \beta \log Z ^ {*} (s). \tag {3}
|
| 65 |
+
$$
|
| 66 |
+
|
| 67 |
+
Then the true preference probabilities under the BTL model (1) can be expressed using the optimal and SFT policies as (Rafailov et al., 2023)
|
| 68 |
+
|
| 69 |
+
$$
|
| 70 |
+
p _ {s, a, a ^ {\prime}} ^ {*} = \sigma \left(\beta \log \frac {\pi^ {*} (a | s)}{\pi_ {\mathrm {s f t}} (a | s)} - \beta \log \frac {\pi^ {*} (a ^ {\prime} | s)}{\pi_ {\mathrm {s f t}} (a ^ {\prime} | s)}\right) .
|
| 71 |
+
$$
|
| 72 |
+
|
| 73 |
+
In this work, we consider parameterized policies $\pi_{\theta}$ , where $\theta \in \Theta \subset \mathbb{R}^d$ is a vector of dimension $d$ . In practice, the most common policy classes are of the form
|
| 74 |
+
|
| 75 |
+
$$
|
| 76 |
+
\Pi = \left\{\pi_ {\theta} (a | s) = \frac {\exp \left(f _ {\theta} (s , a)\right)}{\sum_ {a ^ {\prime} \in \mathcal {A}} \exp \left(f _ {\theta} (s , a ^ {\prime})\right)} \right\}, \tag {4}
|
| 77 |
+
$$
|
| 78 |
+
|
| 79 |
+
where $f_{\theta}$ is a real-valued differentiable function. For example, the tabular softmax policy class is the one where $f_{\theta}(s,a) = \theta_{s,a}$ . Typically, $f_{\theta}$ is either a linear function or a neural network. A linear $f_{\theta}$ can be expressed as $f_{\theta}(s,a) = \phi (s,a)^{\top}\theta$ using a feature map $\phi (s,a)\in \mathbb{R}^d$ . In this case $\pi_{\theta}$ becomes a log-linear policy, i.e., $\log \pi_{\theta}(a|s)\propto \langle \theta ,\phi (s,a)\rangle$ . In case of language model policies, the feature map $\phi$ can be constructed by removing the last layer of the model, and $\theta$ correspond to the weights of the last layer.
|
| 80 |
+
|
| 81 |
+
Let $\theta^{*}$ and $\theta_0$ denote the parameters corresponding to the optimal and SFT policies, respectively. Now, define the preference score of an answer $a$ relative to another one $a^\prime$ given prompt $s$ under policy $\pi_{\theta}$ as
|
| 82 |
+
|
| 83 |
+
$$
|
| 84 |
+
h _ {\theta} (s, a, a ^ {\prime}) = \widehat {r} _ {\theta} (s, a) - \widehat {r} _ {\theta} (s, a ^ {\prime}), \tag {5}
|
| 85 |
+
$$
|
| 86 |
+
|
| 87 |
+
where $\widehat{r}_{\theta}(s,a) = \log \frac{\pi_{\theta}(a|s)}{\pi_{\theta_0}(a|s)}$ is an implicit reward defined by trained and SFT policies $\pi_{\theta}$ and $\pi_{\theta_0}$ . This lets us express, for any $\theta \in \Theta$ , the predicted preference probabilities (we
|
| 88 |
+
|
| 89 |
+
omit dependence on $\theta, \theta_0$ for brevity) as
|
| 90 |
+
|
| 91 |
+
$$
|
| 92 |
+
p _ {s, a, a ^ {\prime}} = \mathbb {P} _ {\theta} [ a \succ a ^ {\prime} | s ] = \sigma \left(\beta h _ {\theta} (s, a, a ^ {\prime})\right). \tag {6}
|
| 93 |
+
$$
|
| 94 |
+
|
| 95 |
+
In this notation, we have the true preference probabilities $p_{s,a,a'}^* = \sigma(\beta h_{\theta^*}(s,a,a'))$ .
|
| 96 |
+
|
| 97 |
+
With preference probabilities expressed in terms of the optimal policy, the DPO algorithm (Rafailov et al., 2023) finds the maximum likelihood estimate (MLE) by minimizing the empirical BCE loss $\frac{1}{n}\sum_{i=1}^{n}\mathcal{L}(\theta; s, a_{w,i}, a_{l,i})$ , where
|
| 98 |
+
|
| 99 |
+
$$
|
| 100 |
+
\mathcal {L} (\theta ; s, a _ {w}, a _ {l}) = - \log \sigma \left(\beta h _ {\theta} (s, a _ {w}, a _ {l})\right). \tag {7}
|
| 101 |
+
$$
|
| 102 |
+
|
| 103 |
+
Technically, the minimizer of this loss is not strictly an MLE for the optimal policy parameter $\theta^{*}$ as the preference pairs are sampled from the SFT policy $\pi_{\theta_0}$ , but not from the policy to be estimated $\pi_{\theta^*}$ . In reality, however, it is challenging to obtain preference pairs directly sampled from $\pi_{\theta^{*}}$ .
|
| 104 |
+
|
| 105 |
+
Preference Noise. In this work, we model noise in the preferences via the standard random noise model (Natarajan et al., 2013; Wang et al., 2024; Mitchell, 2023), where the revealed preferences are true preferences flipped with a small probability $\varepsilon \in (0,1 / 2)$ , i.e.
|
| 106 |
+
|
| 107 |
+
$$
|
| 108 |
+
\mathbb {P} _ {\varepsilon} \left[ (\widetilde {a} _ {l, i}, \widetilde {a} _ {w, i}) = \left(a _ {w, i}, a _ {l, i}\right) | s _ {i} \right] = \varepsilon . \tag {8}
|
| 109 |
+
$$
|
| 110 |
+
|
| 111 |
+
Let $\widetilde{\mathcal{D}} = (s_i, \widetilde{a}_{w,i}, \widetilde{a}_{l,i})_{i=1}^n$ denote the dataset of potentially noisy samples. These noisy samples are what the learning algorithm sees, i.e., $\widetilde{a}_{w,i}$ is seen to be preferred over $\widetilde{a}_{l,i}$ . We will assume that the flip rate $\varepsilon$ is known to the learner. In practice, we will tune the flip rate through cross-validation.
|
| 112 |
+
|
| 113 |
+
Performance Measure. Our goal is to learn a policy $\widehat{\pi}_n(a|s)$ (equivalently, a policy parameter $\widehat{\theta}_n$ ) from noisy preference data $\widetilde{\mathcal{D}}$ that generates maximum expected reward
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
r ^ {*} (\pi) = \mathbb {E} _ {s \sim \rho , a \sim \pi ( \cdot | s )} \left[ r ^ {*} (s, a) \right].
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
We measure performance of the learned policy using a suboptimality gap from the optimal policy $\pi^{*}$ , namely $r^{*}(\pi^{*}) - r^{*}(\widehat{\pi}_{n})$ . Ideally, we want the gap to go down to zero as $n \to \infty$ with a rate at least sub-linear in $n$ . This is a standard measure of policy performance in the RL literature (Zhu et al., 2023; Qiao & Wang, 2022; Agarwal et al., 2021).
|
| 120 |
+
|
| 121 |
+
# 3. Our Approach: Robust DPO
|
| 122 |
+
|
| 123 |
+
We start with the BCE loss under noisy preferences and then approximate it with a conservative loss that practitioners have explored recently (Mitchell, 2023). Next, we discuss their drawback, which help us get intuition for a robust loss.
|
| 124 |
+
|
| 125 |
+
Given corrupted dataset $\widetilde{\mathcal{D}}$ , one can use (7) to compute the MLE under noisy preferences by minimizing the loss
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
\mathcal {L} _ {\varepsilon} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) = - \log \mathbb {P} _ {\theta , \varepsilon} [ \widetilde {a} _ {w} \succ \widetilde {a} _ {l} | s ], \tag {9}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
where, for any $(s,a,a^{\prime})$ triplet, the predicted probabilities
|
| 132 |
+
|
| 133 |
+
under noisy preferences are computed using (6) and (8):
|
| 134 |
+
|
| 135 |
+
$$
|
| 136 |
+
\begin{array}{l} \mathbb {P} _ {\theta , \varepsilon} [ a \succ a ^ {\prime} | s ] = (1 - \varepsilon) \cdot \mathbb {P} _ {\theta} [ a \succ a ^ {\prime} | s ] + \varepsilon \cdot \mathbb {P} _ {\theta} [ a ^ {\prime} \succ a | s ] \\ = (1 - \varepsilon) \cdot \sigma \left(\beta h _ {\theta} \left(s, a, a ^ {\prime}\right)\right) + \varepsilon \cdot \sigma \left(\beta h _ {\theta} \left(s, a ^ {\prime}, a\right)\right). \tag {10} \\ \end{array}
|
| 137 |
+
$$
|
| 138 |
+
|
| 139 |
+
Now, using Jensen's inequality, one can obtain
|
| 140 |
+
|
| 141 |
+
$$
|
| 142 |
+
\begin{array}{l} \log \mathbb {P} _ {\theta , \varepsilon} [ \widetilde {a} _ {w} \succ \widetilde {a} _ {l} | s ] \geq (1 - \varepsilon) \cdot \log \sigma (\beta h _ {\theta} (s, \widetilde {a} _ {w}, \widetilde {a} _ {l})) \\ + \varepsilon \cdot \log \sigma \left(\beta h _ {\theta} (s, \widetilde {a} _ {l}, \widetilde {a} _ {w})\right). \\ \end{array}
|
| 143 |
+
$$
|
| 144 |
+
|
| 145 |
+
Thus, one can upper bound (9) by a conservative loss
|
| 146 |
+
|
| 147 |
+
$$
|
| 148 |
+
\begin{array}{l} \bar {\mathcal {L}} _ {\varepsilon} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) \\ = - (1 - \varepsilon) \log \sigma \left(\beta h _ {\theta} \left(s, \widetilde {a} _ {w}, \widetilde {a} _ {l}\right)\right) - \varepsilon \log \sigma \left(\beta h _ {\theta} \left(s, \widetilde {a} _ {l}, \widetilde {a} _ {w}\right)\right) \\ = (1 - \varepsilon) \mathcal {L} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) + \varepsilon \mathcal {L} (\theta ; s, \widetilde {a} _ {l}, \widetilde {a} _ {w}), \tag {11} \\ \end{array}
|
| 149 |
+
$$
|
| 150 |
+
|
| 151 |
+
which is simply a weighted sum of the DPO loss (7) under noisy preferences. Mitchell (2023) called this method conservative DPO (cDPO). This can also be motivated from the label smoothing technique (Müller et al., 2019) to mitigate over-fitting problem under noisy data. Notably, Wang et al. (2024) use exactly the same loss function to train the reward model for RLHF, and empirically show its superior performance over vanilla RLHF in the presence of noisy data. In our experiments, we call this method (when coupled with PPO for policy training) conservative PPO (cPPO).
|
| 152 |
+
|
| 153 |
+
# 3.1. An Unbiased Loss Function
|
| 154 |
+
|
| 155 |
+
The BCE loss (9) and the conservative loss (11) have a common drawback – both introduce bias in the DPO loss (7). This is due to the fact that
|
| 156 |
+
|
| 157 |
+
$$
|
| 158 |
+
\mathbb {E} \left[ \ell (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) \right] \neq \mathcal {L} (\theta ; s, a _ {w}, a _ {l}), \ell \in \left\{\mathcal {L} _ {\varepsilon}, \bar {\mathcal {L}} _ {\varepsilon} \right\}.
|
| 159 |
+
$$
|
| 160 |
+
|
| 161 |
+
It also holds that (Chowdhury et al., 2023)
|
| 162 |
+
|
| 163 |
+
$$
|
| 164 |
+
\operatorname {l o g i t} \left(\mathbb {P} _ {\theta , \varepsilon} [ a \succ a ^ {\prime} | s ]\right) \neq \operatorname {l o g i t} \left(\mathbb {P} _ {\theta} [ a \succ a ^ {\prime} | s ]\right).
|
| 165 |
+
$$
|
| 166 |
+
|
| 167 |
+
That is, the log-odds of preferring $a$ over $a'$ under noisy preferences is different from that without noise, which introduces a bias in preferences. Ideally, we want the logits to be same for both with and without noise. To this end, we define (un-normalized) preference probabilities
|
| 168 |
+
|
| 169 |
+
$$
|
| 170 |
+
\widehat {\mathbb {P}} _ {\theta , \varepsilon} [ a \succ a ^ {\prime} | s ] = \frac {\sigma (\beta h _ {\theta} (s , a , a ^ {\prime})) ^ {(1 - \varepsilon)}}{\sigma (\beta h _ {\theta} (s , a ^ {\prime} , a)) ^ {\varepsilon}}.
|
| 171 |
+
$$
|
| 172 |
+
|
| 173 |
+
these have the same logits as those without noise, since
|
| 174 |
+
|
| 175 |
+
$$
|
| 176 |
+
\begin{array}{l} \operatorname {l o g i t} \left(\widehat {\mathbb {P}} _ {\theta , \varepsilon} [ a \succ a ^ {\prime} | s ]\right) = \log \left(\frac {\sigma \left(\beta h _ {\theta} (s , a , a ^ {\prime})\right)}{\sigma \left(\beta h _ {\theta} (s , a ^ {\prime} , a)\right)}\right) \\ = \operatorname {l o g i t} \left(\mathbb {P} _ {\theta} [ a \succ a ^ {\prime} | s ]\right). \\ \end{array}
|
| 177 |
+
$$
|
| 178 |
+
|
| 179 |
+
This motivates us to define the loss function:
|
| 180 |
+
|
| 181 |
+
$$
|
| 182 |
+
\begin{array}{l} \widehat {\mathcal {L}} _ {\varepsilon} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) = - \frac {1}{1 - 2 \varepsilon} \log \widehat {\mathbb {P}} _ {\theta , \varepsilon} [ \widetilde {a} _ {w} \succ \widetilde {a} _ {l} | s ] (12) \\ = \frac {(1 - \varepsilon) \mathcal {L} (\theta ; s , \widetilde {a} _ {w} , \widetilde {a} _ {l}) - \varepsilon \mathcal {L} (\theta ; s , \widetilde {a} _ {l} , \widetilde {a} _ {w})}{1 - 2 \varepsilon}. (12) \\ \end{array}
|
| 183 |
+
$$
|
| 184 |
+
|
| 185 |
+
This loss is an unbiased estimator of the DPO loss (7) under noisy preferences as stated in the following lemma.
|
| 186 |
+
|
| 187 |
+
Lemma 3.1. For any $\theta, \theta_0 \in \mathbb{R}^d$ , $\varepsilon \in (0, 1/2)$ , we have
|
| 188 |
+
|
| 189 |
+
$$
|
| 190 |
+
\mathbb {E} _ {\varepsilon} \left[ \widehat {\mathcal {L}} _ {\varepsilon} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) | a _ {w}, a _ {l} \right] = \mathcal {L} (\theta ; s, a _ {w}, a _ {l}).
|
| 191 |
+
$$
|
| 192 |
+
|
| 193 |
+
This way, we learn a good estimate of the policy parameter in the presence of label noise by minimizing the sample average of the above robust (w.r.t. preference flips) loss:
|
| 194 |
+
|
| 195 |
+
$$
|
| 196 |
+
\widehat {\theta} _ {n} \in \operatorname {a r g m i n} _ {\theta \in \Theta} \frac {1}{n} \sum_ {i = 1} ^ {n} \widehat {\mathcal {L}} _ {\varepsilon} (\theta ; s, \widetilde {a} _ {w, i}, \widetilde {a} _ {l, i}). \tag {13}
|
| 197 |
+
$$
|
| 198 |
+
|
| 199 |
+
We call our method robust-DPO (or rDPO in short). Note that when preferences are clean (i.e. flip rate $\varepsilon = 0$ ), the rDPO loss (12) reduces to the DPO loss (7), and hence our trained rDPO policy (13) coincides with the DPO policy of Rafailov et al. (2023).
|
| 200 |
+
|
| 201 |
+
Variance of rDPO loss. Along with unbiasedness, it is also desirable to have bounded variance of the estimator. To this end, consider the un-normalized rDPO loss $(1 - 2\varepsilon)\widehat{\mathcal{L}}_{\varepsilon}(\theta ;s,\widetilde{a}_w,\widetilde{a}_l)$ , which yields the same loss-minimizing policy as in (13). It has a variance $\varepsilon (1-\varepsilon)[\mathcal{L}(\theta ;s,a_w,a_l) - \mathcal{L}(\theta ;s,a_l,a_w)]^2$ . For Neural policy class of the form (4) and for bounded $f_{\theta}$ , the variance is bounded by $C\varepsilon (1 - \varepsilon)$ for some constant $C > 0$ . Since $\varepsilon \leq 1 / 2$ , the variance is bounded by $C / 4$ .
|
| 202 |
+
|
| 203 |
+
# 3.2. Gradients of rDPO Loss
|
| 204 |
+
|
| 205 |
+
To further understand the mechanism of rDPO, let's now look at the gradients of its loss (12) and contrast that with that of DPO loss (7). The gradients of $\widehat{\mathcal{L}}_{\varepsilon}$ with respect to the parameters $\theta$ can be written as
|
| 206 |
+
|
| 207 |
+
$$
|
| 208 |
+
\begin{array}{l} \nabla_ {\theta} \widehat {\mathcal {L}} _ {\varepsilon} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) \\ = \frac {(1 - \varepsilon) \nabla_ {\theta} \mathcal {L} (\theta ; s , \widetilde {a} _ {w} , \widetilde {a} _ {l}) - \varepsilon \nabla_ {\theta} \mathcal {L} (\theta ; s , \widetilde {a} _ {l} , \widetilde {a} _ {w})}{1 - 2 \varepsilon} \\ = - \beta \widehat {\zeta} _ {\theta , \varepsilon} \left(\nabla_ {\theta} \log \pi_ {\theta} (\widetilde {a} _ {w} | s) - \nabla_ {\theta} \log \pi_ {\theta} (\widetilde {a} _ {l} | s)\right). \tag {14} \\ \end{array}
|
| 209 |
+
$$
|
| 210 |
+
|
| 211 |
+
Here the weights in the gradients are given by
|
| 212 |
+
|
| 213 |
+
$$
|
| 214 |
+
\widehat {\zeta} _ {\theta , \varepsilon} = \underbrace {\frac {1 - \varepsilon}{1 - 2 \varepsilon} \sigma (\beta h _ {\theta} (s , \widetilde {a} _ {l} , \widetilde {a} _ {w}))} _ {\text {(I)}} + \underbrace {\frac {\varepsilon}{1 - 2 \varepsilon} \sigma (\beta h _ {\theta} (s , \widetilde {a} _ {w} , \widetilde {a} _ {l}))} _ {\text {(I I)}},
|
| 215 |
+
$$
|
| 216 |
+
|
| 217 |
+
where $h_{\theta}(s, a, a')$ is the difference of implicit rewards $\widehat{r}_{\theta}$ of answers $a$ and $a'$ given prompt $s$ ; see (5). Term (I) puts higher weight when the implicit reward model $\widehat{r}_{\theta}$ orders the observed preferences incorrectly and scales it proportionally with the probability of no-flip. Term (II) puts higher weight when the implicit reward model $\widehat{r}_{\theta}$ orders the observed preferences correctly and scales it proportionally with the probability of flip. Both the terms together de-bias the effect of noise on average in observed preferences.
|
| 218 |
+
|
| 219 |
+
Comparison with DPO and cDPO. The weights in the gradients of cDPO loss $\bar{\mathcal{L}}_{\varepsilon}$ are
|
| 220 |
+
|
| 221 |
+
$$
|
| 222 |
+
\bar {\zeta} _ {\theta , \varepsilon} = (1 - \varepsilon) \sigma \left(\beta h _ {\theta} \left(s, \widetilde {a} _ {l}, \widetilde {a} _ {w}\right)\right) - \varepsilon \sigma \left(\beta h _ {\theta} \left(s, \widetilde {a} _ {w}, \widetilde {a} _ {l}\right)\right).
|
| 223 |
+
$$
|
| 224 |
+
|
| 225 |
+
Meanwhile, the weights for the DPO loss gradients, if run on noisy preferences, are given by
|
| 226 |
+
|
| 227 |
+
$$
|
| 228 |
+
\zeta_ {\theta} = \sigma \left(\beta h _ {\theta} (s, \widetilde {a} _ {l}, \widetilde {a} _ {w})\right) = \sigma \left(\beta \widehat {r} _ {\theta} (s, \widetilde {a} _ {l}) - \beta \widehat {r} _ {\theta} (s, \widetilde {a} _ {w})\right),
|
| 229 |
+
$$
|
| 230 |
+
|
| 231 |
+
Lemma 3.2 (Gradient weights). For any $\varepsilon \in (0,1 / 2)$ , it holds that $\widehat{\zeta}_{\theta ,\varepsilon} = \zeta_{\theta} + \frac{\varepsilon}{1 - 2\varepsilon}$ and $\zeta_{\theta} = \bar{\zeta}_{\theta ,\varepsilon} + \varepsilon$
|
| 232 |
+
|
| 233 |
+
Consider the case, when there is no-flip, $(\widetilde{a}_w,\widetilde{a}_l) = (a_w,a_l)$ . Observe from (14) that rDPO (also cDPO and DPO) gradients increase the likelihood of preferred answers and decreases that of dis-preferred ones. Since weights are higher for rDPO compared to DPO & cDPO (Lemma 3.2), this makes the parameter update for rDPO more aggressive than DPO & cDPO in the desired direction.
|
| 234 |
+
|
| 235 |
+
Now, for the case of preference flips, i.e., $(\widetilde{a}_w,\widetilde{a}_l) = (a_l,a_w)$ , the gradients are not in the desired direction (increase likelihood of dis-preferred answers). Hence, rDPO updates will be more aggressive in the wrong direction than DPO & cDPO. However, since preferences are flipped with probability less than $1 / 2$ , rDPO gradients will push the parameter updates in the correct direction faster than DPO & cDPO on average. This behavior is reflected in our experiments too - latent rewards of rDPO policy converge to that of the optimal policy much faster than DPO & cDPO policies; see Section 6.
|
| 236 |
+
|
| 237 |
+
# 4. Theoretical Analysis
|
| 238 |
+
|
| 239 |
+
Our method enjoys certain theoretical properties. By unbiasedness of $\widehat{\mathcal{L}}_{\varepsilon}$ (Lemma 3.1), we know that, for any fixed $\theta \in \Theta$ , the empirical rDPO loss (12) converges to the population DPO loss $\mathbb{E}_{s,a_w,a_l}\left[\mathcal{L}(\theta ;s,a_w,a_l)\right]$ even though the former is computed using noisy preferences whereas the latter depends on clean preferences. But the rDPO policy $\widehat{\pi}_n = \pi_{\widehat{\theta}_n}$ won't necessarily converge to the optimal policy $\pi^{*}$ as preference pairs are sampled from the SFT policy $\pi_{\mathrm{sft}}$ but not form $\pi^{*}$ - an issue also shared by DPO policy (Liu et al., 2023). However, our end goal is to bound the suboptimality gap of $\widehat{\pi}_n$ . For this, we only need to characterize the estimation error of the learned policy parameter $\widehat{\theta}_n$ as function of number of samples $n$ and flip rate $\varepsilon$ .
|
| 240 |
+
|
| 241 |
+
# 4.1. Estimation Error
|
| 242 |
+
|
| 243 |
+
Under the BTL model (1), two reward functions from the same equivalence class induce the same preference distribution and the same optimal policy (Rafailov et al., 2023). Due to this model under-specification and reward re-parameterization (3), we need to impose an identifiability constraint on the set of policy parameters $\Theta$ , namely $\Theta = \{\theta \in \mathbb{R}^d | \sum_{i=1}^{d} \theta_i = 0\}$ to achieve any guarantee on
|
| 244 |
+
|
| 245 |
+
the estimation error. We also assume $\| \theta \| \leq B, \forall \theta \in \Theta$ . We give guarantees for Neural policy class of the form (4), i.e., when $f_{\theta}$ is a neural network parameterized by $\theta$ . We make a smoothness assumption on the policy class:
|
| 246 |
+
|
| 247 |
+
Assumption 4.1 (Smoothness). For any $\theta \in \Theta$ and $(s,a)$
|
| 248 |
+
|
| 249 |
+
$$
|
| 250 |
+
\left| f _ {\theta} (s, a) \right| \leq \alpha_ {0}, \| \nabla f _ {\theta} (s, a) \| \leq \alpha_ {1}, \nabla^ {2} f _ {\theta} (s, a) \preccurlyeq \alpha_ {2} I.
|
| 251 |
+
$$
|
| 252 |
+
|
| 253 |
+
The assumption ensures that implicit reward differences $h_{\theta}(s, a_w, a_l)$ are bounded, Lipschitz, and their gradients are also Lipschitz. This is quite common for establishing convergence for policy gradient methods (Agarwal et al., 2021). Log-linear policies $(f_{\theta}(s, a) = \theta^\top \phi(s, a))$ , satisfy this assumption with $\alpha_0 = LB$ , $\alpha_1 = L$ , $\alpha_2 = 0$ , where $L$ is an upper bound on $\ell_2$ -norm of features $\phi(s, a)$ .
|
| 254 |
+
|
| 255 |
+
The following result gives a guarantee on the estimation error in the semi-norm $\| \cdot \|_{\widehat{\Sigma}_{\theta}}$ , which is expressed in terms of parameter dimension $d$ and flip rate $\varepsilon$ . Here, for any $\theta \in \mathbb{R}^d$ , $\widehat{\Sigma}_{\theta} = \frac{1}{n}\sum_{i=1}^{n}x_{i}x_{i}^{\top}$ is the sample covariance matrix of gradients of implicit reward differences under true preferences, where $x_{i} = \nabla h_{\theta}(s_{i},a_{w,i},a_{l,i}) = \nabla f_{\theta}(s_{i},a_{w,i}) - \nabla f_{\theta}(s_{i},a_{l,i})$ .
|
| 256 |
+
|
| 257 |
+
The error scales inversely with $\gamma \beta (1 - 2\varepsilon)$ , where $\gamma \leq \sigma'(\beta h_{\theta}(s,a_w,a_l))$ for all $\theta \in \Theta$ and for all preference samples $(s,a_w,a_l)$ . Here $\gamma$ lower bounds the first derivative of the logistic function $\sigma(z_{\theta};\beta,z_0) = \frac{1}{1 + e^{-\beta(z_{\theta} - z_0)}}$ , where $z_{\theta} = f_{\theta}(s,a_w) - f_{\theta}(s,a_l)$ and $z_0 = z_{\theta_0}$ .
|
| 258 |
+
|
| 259 |
+
Theorem 4.2 (Estimation error of $\widehat{\theta}_n$ ). Let $\delta \in (0,1], \varepsilon \in [0,1/2), \lambda > 0$ . Then, for Neural policy class (4) and under Assumption 4.1, with probability at least $1 - \delta$ , we have
|
| 260 |
+
|
| 261 |
+
$$
|
| 262 |
+
\begin{array}{l} \left\| \widehat {\theta} _ {n} - \theta^ {*} \right\| _ {\widehat {\Sigma} _ {\theta^ {*}} + \lambda I} \leq \frac {C}{\gamma \beta (1 - 2 \varepsilon)} \cdot \sqrt {\frac {d + \log (1 / \delta)}{n}} \\ + C ^ {\prime} \cdot B \sqrt {\lambda + \frac {\alpha_ {2}}{\gamma \beta (1 - 2 \varepsilon)} + \alpha_ {1} \alpha_ {2} B}, \\ \end{array}
|
| 263 |
+
$$
|
| 264 |
+
|
| 265 |
+
where $\gamma = \frac{1}{2 + e^{-4\beta\alpha_0} + e^{4\beta\alpha_0}}$ , $C, C'$ are absolute constants.
|
| 266 |
+
|
| 267 |
+
Several remarks are in order with this result. To keep the presentation simple, we consider log-linear policies in the following, where $\alpha_{2} = 0$ and $x_{i} = \phi (s_{i},a_{w,i}) - \phi (s_{i},a_{l,i})$ . In this case, $\widehat{\Sigma}_{\theta}$ is the covariance matrix of feature differences and independent of $\theta$ . We denote this by $\widehat{\Sigma}$ and get a high-probability error bound for log-linear policy class:
|
| 268 |
+
|
| 269 |
+
$$
|
| 270 |
+
\left\| \widehat {\theta} _ {n} - \theta^ {*} \right\| _ {\widehat {\Sigma} + \lambda I} = O \left(\frac {1}{\gamma \beta (1 - 2 \varepsilon)} \sqrt {\frac {d}{n}} + B \sqrt {\lambda}\right). \tag {15}
|
| 271 |
+
$$
|
| 272 |
+
|
| 273 |
+
Choice of Regularizer $\lambda$ . When the feature covariance matrix $\widehat{\Sigma}$ is invertible, the above result holds for $\lambda = 0$ . In this case, we will get a vanishing error-rate in the $\ell_2$ -norm
|
| 274 |
+
|
| 275 |
+
$$
|
| 276 |
+
\left\| \widehat {\theta} _ {n} - \theta^ {*} \right\| = O \left(\frac {1}{\sqrt {\lambda_ {\operatorname* {m i n}} (\Sigma)}} \frac {1}{\gamma \beta (1 - 2 \varepsilon)} \sqrt {\frac {d}{n}}\right). \tag {16}
|
| 277 |
+
$$
|
| 278 |
+
|
| 279 |
+
If this is not the case, $\widehat{\theta}_n$ won't necessarily converge to $\theta^*$ . But one might set $\lambda = O(d / n)$ to achieve a vanishing error in the semi-norm $\widehat{\Sigma}$ for log-linear policies. However, the error will not vanish for Neural policies (as $\alpha_{2} \neq 0$ ).
|
| 280 |
+
|
| 281 |
+
Estimation Error of DPO Policy. As already mentioned, our rDPO policy (13) recovers the DPO policy under clean preferences. Thus, setting $\varepsilon = 0$ in Theorem 4.2, we get an error bound of order $O\left(\frac{1}{\gamma}\sqrt{d / n}\right)$ for the DPO policy. Therefore, as a by-product of our approach, we get the first error bound for the trained DPO policy of Rafailov et al. (2023), which could be of independent interest.
|
| 282 |
+
|
| 283 |
+
Effect of Noisy Preferences. When preferences are noisy (i.e. flip rate $\varepsilon > 0$ ), our rDPO policy achieves an error bound of order $O\left(\frac{1}{\gamma(1 - 2\varepsilon)}\sqrt{d / n}\right)$ . Comparing this with the above error bound for DPO policy under clean preferences, we see that the cost of preference flips is a multiplicative factor of the order $\frac{1}{1 - 2\varepsilon} -$ the higher the (expected) number of preference flips, the higher the estimation error.
|
| 284 |
+
|
| 285 |
+
Effect of KL regularizer. Since $\gamma = O(1 / e^{\beta})$ , the dependence of estimation error on the KL regularizer $\beta$ is of the order $g(\beta) = O(e^{\beta} / \beta)$ . Hence our result won't no longer hold true when $\beta = 0$ (no regularization). In this case preference probabilities are exactly equal to $1 / 2$ (both actions are equally preferred), making learning impossible. Same is the case when $\beta \to \infty$ (full regularization) since one action will always be preferred over the other with probability 1, making the loss function degenerate. This points out the need for tuning $\beta$ properly.
|
| 286 |
+
|
| 287 |
+
# 4.2. Performance Bounds of Learned Policy
|
| 288 |
+
|
| 289 |
+
In this Section, we discuss how the estimation error of $\widehat{\theta}_n$ relates to the sub-optimality gap of the policy $\widehat{\pi}_n$ . We will consider log-linear policy class for ease of presentation.
|
| 290 |
+
|
| 291 |
+
It is well known that learning a near-optimal policy from an offline batch of data cannot be sample efficient without assuming the behavior policy (SFT in our case) has a good coverage over the feature space (Wang et al., 2020). To begin with, we define the population covariance matrix of centered features under a policy $\pi$ :
|
| 292 |
+
|
| 293 |
+
$$
|
| 294 |
+
\Sigma_ {\pi} = \mathbb {E} \left[ \phi (s, a) \phi (s, a) ^ {\top} \right] - \mathbb {E} [ \phi (s, a) ] \mathbb {E} [ \phi (s, a) ] ^ {\top}, \tag {17}
|
| 295 |
+
$$
|
| 296 |
+
|
| 297 |
+
where the expectation is over random draws from $s \sim \rho, a \sim \pi(\cdot | s)$ . Now, we define the condition number of $\Sigma_{\pi}$ relative to $\Sigma_{\pi_{\mathrm{sft}}}$ (covariance matrix under SFT policy):
|
| 298 |
+
|
| 299 |
+
$$
|
| 300 |
+
\forall \pi \in \Pi : \quad \kappa_ {\pi} = \sup _ {v \in \mathbb {R} ^ {d}} \frac {v ^ {\top} \Sigma_ {\pi} v}{v ^ {\top} \Sigma_ {\pi_ {\mathrm {s f t}}} v} = \frac {\lambda_ {\operatorname* {m a x}} (\Sigma_ {\pi})}{\lambda_ {\operatorname* {m i n}} (\Sigma_ {\pi_ {\mathrm {s f t}}})}.
|
| 301 |
+
$$
|
| 302 |
+
|
| 303 |
+
A small relative condition number helps to keep the ratio of maximum feature coverage of policy to be evaluated and minimum coverage of starting policy in check. Thus, it is important to have a good starting policy $\pi_{\mathrm{sft}}$ to ensure a
|
| 304 |
+
|
| 305 |
+
small condition number. Roughly speaking, we desire an SFT policy which provides good coverage over the features.
|
| 306 |
+
|
| 307 |
+
Assumption 4.3 (Feature coverage). The SFT policy satisfies the minimum eigenvalue condition: $\lambda_{\mathrm{min}}(\Sigma_{\mathrm{sft}}) > 0$
|
| 308 |
+
|
| 309 |
+
Let $\kappa = \max_{\pi \in \Pi} \kappa_{\pi}$ . The assumption ensures $\kappa < \infty$ . The result below shows how estimation error and condition number determine the final performance of our learned policy.
|
| 310 |
+
|
| 311 |
+
Theorem 4.4 (Sub-optimality gap of $\widehat{\pi}_n$ ). Let $\delta \in (0,1]$ and $r^*(s,a) \leq r_{\max}$ for all $(s,a)$ . Then, for log-linear policy class, we have with probability at least $1 - \delta$ :
|
| 312 |
+
|
| 313 |
+
$$
|
| 314 |
+
r ^ {*} (\pi^ {*}) - r ^ {*} (\widehat {\pi} _ {n}) \leq r _ {\max } \sqrt {\kappa / 2} \left\| \widehat {\theta} _ {n} - \theta^ {*} \right\| _ {\widehat {\Sigma} + \lambda I}
|
| 315 |
+
$$
|
| 316 |
+
|
| 317 |
+
for $\lambda \geq C\sqrt{d\log(4d / \delta) / n}$ , where $C$ is a universal constant.
|
| 318 |
+
|
| 319 |
+
Now, plugging in the bound on estimation error (15) in Theorem 4.4, we get a sub-optimality gap of order $O\left(\frac{\sqrt{\kappa}}{\gamma\beta(1 - 2\varepsilon)}\sqrt{\frac{d}{n}} + \frac{\sqrt{\kappa}d^{1 / 4}}{n^{1 / 4}}\right)$ . However, when sample feature covariance matrix $\widehat{\Sigma}$ is invertible, i.e. observed samples from SFT policy provide good coverage of the feature space, then we get $O\left(\frac{\sqrt{\kappa}}{\gamma\beta(1 - 2\varepsilon)}\sqrt{\frac{d}{n}}\right)$ suboptimality gap.
|
| 320 |
+
|
| 321 |
+
Data efficiency of rDPO under a given noise level. We can obtain a bound on sample complexity of rDPO for a given noise level and permissible sub-optimality gap. For instance, if $\hat{\Sigma}$ is invertible, then training rDPO on $n\geq \frac{\kappa d}{\Delta^2\gamma^2\beta^2(1 - 2\varepsilon)^2}$ samples, we can ensure a sub-optimality gap $\leq \Delta$ for the aligned model. In contrast, when samples are clean $(\varepsilon = 0)$ , then training vanilla DPO on $n\geq \frac{\kappa d}{\Delta^2\gamma^2\beta^2}$ samples, we can ensure a suboptimality gap $\leq \Delta$ . Thus, under the presence of noise, rDPO needs roughly $\frac{1}{(1 - 2\varepsilon)^2}$ times the samples that DPO needs under clean data. The higher the noise level, the higher the number of samples needed for rDPO.
|
| 322 |
+
|
| 323 |
+
Dimension dependence in $\kappa$ . It is reasonable to expect $\kappa$ to be dimension dependent, but it doesn't necessarily depend on the size of the vocabulary. To see this, consider log-linear policies with bounded features $\| \phi (s,a)\| \leq L$ . In this case $\lambda_{\max}(\Sigma_{\pi})\leq L^2$ and thus $\kappa_{\pi}\leq \frac{L^{2}}{\lambda_{\min}(\Sigma_{\pi_{\mathrm{sft}}})}$ . Now, $\lambda_{\min}(\Sigma_{\pi})$ depends implicitly on the dimension $d$ of features $\phi (s,a)$ and it is reasonable to assume $\lambda_{\min}(\Sigma_{\pi_{\mathrm{sft}}}) = \Theta (L^2 /d)$ (Wang et al., 2020). Thus it is always possible to have $\kappa = O(d)$ (Agarwal et al., 2021).
|
| 324 |
+
|
| 325 |
+
Margin Gap. A related performance measure is the margin under clean distribution. The margin of a policy $\pi_{\theta}$ is defined to be the average difference of implicit rewards $\widehat{r}_{\theta}(s,a) = \log \frac{\pi_{\theta}(a|s)}{\pi_{\mathrm{sft}}(a|s)}$ of chosen and rejected actions, i.e.,
|
| 326 |
+
|
| 327 |
+
$$
|
| 328 |
+
\mathcal {M} \left(\pi_ {\theta}\right) = \mathbb {E} _ {s \sim \rho , \left(y _ {w}, y _ {l}\right) \sim \pi_ {\mathrm {s f t}}} \left[ \widehat {r} _ {\theta} \left(a _ {w} | s\right) - \widehat {r} _ {\theta} \left(a _ {l} | s\right) \right].
|
| 329 |
+
$$
|
| 330 |
+
|
| 331 |
+
Then $\mathcal{M}(\pi^{*}) - \mathcal{M}(\widehat{\pi}_n)$ defines the margin gap of learned policy $\widehat{\pi}_n$ from the optimal policy $\pi^{*}$ . This metric is quite commonly used by practitioners to demonstrate perfor
|
| 332 |
+
|
| 333 |
+
mance of learned policy (von Werra et al., 2020).
|
| 334 |
+
|
| 335 |
+
Lemma 4.5 (Margin gap). Assuming $\widehat{\Sigma}$ to be invertible for log-linear policy class, the margin gap of $\widehat{\pi}_n$ satisfies
|
| 336 |
+
|
| 337 |
+
$$
|
| 338 |
+
\mathcal {M} (\pi^ {*}) - \mathcal {M} (\widehat {\pi} _ {n}) = O \Big (\frac {1}{\lambda_ {\min} (\widehat {\Sigma} ^ {1 / 2})} \frac {1}{\gamma \beta (1 - 2 \varepsilon)} \sqrt {\frac {d}{n}} \Big).
|
| 339 |
+
$$
|
| 340 |
+
|
| 341 |
+
Since $\kappa = O(1 / \lambda_{\mathrm{min}}(\Sigma_{\pi_{\mathrm{sft}}}))$ , comparing this result with sub-optimality bound from the above paragraph, we see that both margin and sub-optimality gaps are roughly of the same order when $\widehat{\Sigma}$ has good coverage. This is also reflected in our experiments, where we see strong correlation between evaluation accuracy (on clean data) and average reward performance for any policy; see Section 6.
|
| 342 |
+
|
| 343 |
+
Generalizing to Neural Policy Class. A similar reasoning as the above can be also used to establish a sub-optimality bound for neural policy class (4). Here the relative condition number needs to be defined using the covariance matrix for the features $f_{\theta}(s,a)$ , which depend on $\theta$ , as opposed to the feature map $\phi(s,a)$ in the log-linear case. The rest follows with an appropriate adaptation of the results above.
|
| 344 |
+
|
| 345 |
+
# 5. Generalizations and Extensions
|
| 346 |
+
|
| 347 |
+
Our approach to mitigate the effect of noisy preferences in data is not limited to DPO algorithm and BTL preference model. It is a general framework that can be adapted to other preference optimizations methods (e.g. SLiC, IPO) and other preference models (e.g. probit, Placket-Luce). More importantly, since DPO implicitly learns a reward function $\widehat{r}_{\theta}$ as we have discussed above, our method seamlessly extends to the reward training stage of the RLHF pipeline, showing versatility of our proposed approach.
|
| 348 |
+
|
| 349 |
+
Reward training in RLHF. Let us consider parameterized reward models $r_{\xi}(s,a)$ , where $\xi \in \mathbb{R}^d$ is a parameter vector. Let $\xi^{*}$ be the parameter of the latent reward model $r^{*}(s,a)$ . Then, from (1), the true preference probabilities following BTL model are given by
|
| 350 |
+
|
| 351 |
+
$$
|
| 352 |
+
p _ {s, a, a ^ {\prime}} ^ {*} = \mathbb {P} _ {\xi^ {*}} [ a \succ a ^ {\prime} | s ] = \sigma \left(r _ {\xi^ {*}} (s, a) - r _ {\xi^ {*}} (s, a ^ {\prime})\right).
|
| 353 |
+
$$
|
| 354 |
+
|
| 355 |
+
Similar to (7), for any $\xi \in \mathbb{R}^d$ , this yields the BCE loss for a preference pair $(s, a_w, a_l)$ :
|
| 356 |
+
|
| 357 |
+
$$
|
| 358 |
+
\mathcal {L} (\xi ; s, a _ {w}, a _ {l}) = - \log \sigma \left(r _ {\xi} (s, a _ {w}) - r _ {\xi} (s, a _ {l})\right). \tag {18}
|
| 359 |
+
$$
|
| 360 |
+
|
| 361 |
+
Under our random noise model (8) with flip rate $\varepsilon$ , for a potentially noisy data $(s, \widetilde{a}_w, \widetilde{a}_l)$ , one can define a loss $\widehat{\mathcal{L}}_{\varepsilon}(\xi; s, \widetilde{a}_w, \widetilde{a}_l)$ using (12), which will be an unbiased estimate of (18) by Lemma 3.1. Thus, using a similar argument as in Section 3, a reward model trained by minimizing this loss will be robust to noisy preferences. This trained reward model can be then directly plugged into (2) to train a language model policy. In practice (2) is solved using PPO algorithm (Schulman et al., 2017). Thus, we call this entire
|
| 362 |
+
|
| 363 |
+
procedure robust PPO (or rPPO in short).
|
| 364 |
+
|
| 365 |
+
Other Optimization Methods. Instead of the BCE loss (7), SLiC (Zhao et al., 2023) minimizes a hinge loss:
|
| 366 |
+
|
| 367 |
+
$$
|
| 368 |
+
\mathcal {L} _ {\text {h i n g e}} (\theta ; s, a _ {w}, a _ {l}) = \max \{0, 1 - \beta h _ {\theta} (s, a _ {w}, a _ {l}) \}
|
| 369 |
+
$$
|
| 370 |
+
|
| 371 |
+
where $1 / \beta$ acts as the margin (of miss-classification). IPO (Azar et al., 2023) minimizes the square loss:
|
| 372 |
+
|
| 373 |
+
$$
|
| 374 |
+
\mathcal {L} _ {\mathrm {I P O}} (\theta ; s, a _ {w}, a _ {l}) = \left(\beta h _ {\theta} (s, a _ {w}, a _ {l}) - 1 / 2\right) ^ {2}.
|
| 375 |
+
$$
|
| 376 |
+
|
| 377 |
+
A potential advantage of IPO and SLiC over DPO is that these methods don't assume any preference model like BTL and could work with general preference probabilities. Under our random noise model (8), one can define robust counterparts of both $\mathcal{L}_{\mathrm{hinge}}$ and $\mathcal{L}_{\mathrm{IPO}}$ using (12). Lemma 3.1 will ensure these losses under noisy data $(\widetilde{a}_w, \widetilde{a}_l)$ are unbiased estimates of those under clean data $(a_w, a_l)$ , and will help one learn a robust policy for these loss functions. Thus our approach is also not to the BTL preference model.
|
| 378 |
+
|
| 379 |
+
Other Preference Models. Our results can be extended to any preference model of the form (1) if $g$ is strongly log-concave, i.e., $-\frac{d^2}{dz^2} \log g(z) \geq \gamma > 0$ in a closed interval around $z = 0$ . For example, in the probit (also known as Thurstone) model (Thurstone, 1927), $g$ is the CDF of standard Gaussian distribution. Thus, for any $\theta$ , the preference probabilities are $\mathbb{P}_{\theta}[a \succ a'|s] = \Phi(\beta h_{\theta}(s,a,a'))$ . Since $\Phi$ is strongly log-concave in $\Theta$ (Tsukida et al., 2011), one can derive similar performance bounds under probit model too.
|
| 380 |
+
|
| 381 |
+
For the Placket-Luce (PL) model (Plackett, 1975; Luce, 2012) for $K$ -wise comparisons between actions. Let $\Pi$ be the set of all permutations $\pi : [K] \to [K]$ , that denotes a ranking given by an oracle over all $K$ actions, where $a_{\pi(j)}$ denotes the $j$ -th ranked action. Under the PL model, we define the loss of a permutation $\pi \in \Pi$ for a question $s$ as
|
| 382 |
+
|
| 383 |
+
$$
|
| 384 |
+
\mathcal {L} (\theta ; s, \pi) = - \log \left(\prod_ {j = 1} ^ {K} \frac {\exp (\widehat {r} _ {\theta} (s , a _ {\pi (j)}))}{\sum_ {k ^ {\prime} = j} ^ {K} \exp (\widehat {r} _ {\theta} (s , a _ {\pi (k ^ {\prime})}))}\right).
|
| 385 |
+
$$
|
| 386 |
+
|
| 387 |
+
Noisy preferences are obtained by perturbing the true ranking $\pi$ to some other ranking $\widetilde{\pi}$ with probability $\frac{\varepsilon}{N - 1}$ , where $N$ is the number of possible rankings (can be at most $K!$ ). Then, if we define the robust-loss for noisy ranking $\widetilde{\pi}$ as
|
| 388 |
+
|
| 389 |
+
$$
|
| 390 |
+
\widehat {\mathcal {L}} _ {\varepsilon} (\theta ; s, \widetilde {\pi}) = \frac {(N - 1 - \varepsilon) \mathcal {L} (\theta ; s , \widetilde {\pi}) - \varepsilon \sum_ {\pi^ {\prime} \neq \widetilde {\pi}} \mathcal {L} (\theta ; s , \pi^ {\prime})}{(1 - \varepsilon) N - 1},
|
| 391 |
+
$$
|
| 392 |
+
|
| 393 |
+
it will be an unbiased estimate of $\mathcal{L}(\theta ;s,\pi)$ . This would help us to learn a robust policy under PL feedback model.
|
| 394 |
+
|
| 395 |
+
# 6. Experiments
|
| 396 |
+
|
| 397 |
+
In this section, we provide details about baselines, datasets, and evaluation results. We empirically evaluate rDPO on two open-ended generation tasks similar to Rafailov et al. (2023): (i) Controlled Sentiment Generation and
|
| 398 |
+
|
| 399 |
+
(ii) Single-turn Dialogue. We compare rDPO with vanilla DPO and cDPO in both tasks. In the sentiment generation task, we also include SLiC (Zhao et al., 2023) and IPO (Azar et al., 2023) as baselines. Furthermore, we compare rPPO with vanilla PPO (RLHF) and cPPO in this task.
|
| 400 |
+
|
| 401 |
+
Controlled Sentiment Generation. In this experiment, each prompt $s$ represents the prefix of a movie review from the IMDb dataset (Maas et al., 2011), and the task is to generate a review (action) $a \sim \pi(\cdot|s)$ with a positive sentiment. We extract the first 20 tokens from each review in the IMDb dataset as a prefix. Subsequently, we generate reviews using a gpt2-large model supervised fine-tuned on the IMDb dataset. We generate four reviews resulting in six preference pairs for each prefix. We employ siebert/sentiment-roberta-large-english $^2$ as the latent (ground-truth) reward model $r^*(s, a)$ . To ensure that we have a clean dataset, we only retain preference triplets $(s, a_w, a_l)$ where $r^*(s, a_w) - r^*(s, a_l) > \tau$ where $\tau = 0.1$ is a threshold chosen for this task. This resulted in a dataset with 12000 preference triplets of which 10000 were used to train the policy, and 2000 for evaluation.
|
| 402 |
+
|
| 403 |
+
Table 1. Mean reward ± Standard Deviation of actions generated by different methods after several steps of policy training on the IMDb dataset under noise level 0.4.
|
| 404 |
+
|
| 405 |
+
<table><tr><td>Steps</td><td>DPO (On clean data)</td><td>DPO</td><td>cDPO</td><td>IPO</td><td>SLiC</td><td>rDPO</td></tr><tr><td>200</td><td>0.99 ± 0.03</td><td>0.93 ± 0.26</td><td>0.84 ± 0.36</td><td>0.85 ± 0.35</td><td>0.94 ± 0.22</td><td>0.99 ± 0.00</td></tr><tr><td>400</td><td>0.99 ± 0.02</td><td>0.72 ± 0.43</td><td>0.82 ± 0.37</td><td>0.83 ± 0.37</td><td>0.88 ± 0.31</td><td>0.99 ± 0.00</td></tr><tr><td>600</td><td>0.99 ± 0.00</td><td>0.88 ± 0.32</td><td>0.82 ± 0.38</td><td>0.84 ± 0.36</td><td>0.90 ± 0.29</td><td>0.99 ± 0.00</td></tr><tr><td>800</td><td>0.99 ± 0.00</td><td>0.88 ± 0.32</td><td>0.83 ± 0.36</td><td>0.83 ± 0.37</td><td>0.89 ± 0.30</td><td>0.99 ± 0.00</td></tr><tr><td>1000</td><td>0.99 ± 0.02</td><td>0.88 ± 0.32</td><td>0.83 ± 0.37</td><td>0.82 ± 0.38</td><td>0.90 ± 0.29</td><td>0.99 ± 0.00</td></tr></table>
|
| 406 |
+
|
| 407 |
+
We then introduce noise into this dataset by randomly flipping preferences with a probability of $\varepsilon = 0.4$ . For all methods, gpt2-large is employed as the initial policy. For methods in the DPO family (vanilla DPO, rDPO, cDPO), we optimized the policy for 1000 steps with batch size 16. We do the same for IPO and SLiC. For methods in the PPO family (vanilla PPO, rPPO, cPPO), we trained a reward model on preference data for 1000 steps with batch size 16 and performed policy optimization for 1 epoch over the entire train dataset.
|
| 408 |
+
|
| 409 |
+
Table 2. Mean reward $\pm$ Standard Deviation on IMDb dataset after policy optimization. The reward model is trained on 1000 steps for all baselines, followed by running PPO for 1 epoch.
|
| 410 |
+
|
| 411 |
+
<table><tr><td>Step</td><td>PPO (On clean data)</td><td>PPO</td><td>cPPO</td><td>rPPO</td></tr><tr><td>1000</td><td>0.99 ± 0.00</td><td>0.78 ± 0.41</td><td>0.87 ± 0.33</td><td>0.94 ± 0.23</td></tr></table>
|
| 412 |
+
|
| 413 |
+
For evaluation, we generate reviews using the final policy and computed rewards using the ground-truth reward model $r^{*}$ . The results are presented in Table 1 for the DPO family and in Table 2 for the PPO family. For reference, we also
|
| 414 |
+
|
| 415 |
+

|
| 416 |
+
Figure 1. Mean reward on IMDb dataset at different sampling temperatures after 1000 steps.
|
| 417 |
+
|
| 418 |
+
Table 3. Percentage Improvement on win-rate vs chosen response over the initial SFT policy
|
| 419 |
+
|
| 420 |
+
<table><tr><td rowspan="2">Method</td><td colspan="2">Improvement over SFT (%)</td></tr><tr><td>gpt2-large</td><td>Llama-2-7b</td></tr><tr><td>DPO</td><td>22.20</td><td>45.78</td></tr><tr><td>cDPO (ε = 0.1)</td><td>18.34</td><td>39.16</td></tr><tr><td>rDPO (ε = 0.1)</td><td>24.32</td><td>51.20</td></tr></table>
|
| 421 |
+
|
| 422 |
+
train DPO and PPO on clean data without any noise. We observe that the performance of DPO degrades with the introduction of high noise $(\varepsilon = 0.4)$ in data. IPO and SLiC also suffers significantly due to noisy preferences. However, rDPO maintains performance across steps, which indicates its robustness to noise. We also observe that cDPO is not able to mitigate the effect of noise confirming the conclusions of Lemma 3.2. Similar observations are noticed for the PPO family. In Figure 1, we evaluate average rewards fetched by generations at different sampling temperatures. It is observed that rDPO and rPPO achieve the best reward by a significant margin compared to peers in their families.
|
| 423 |
+
|
| 424 |
+
Single-turn Dialogue. In this experiment, each prompt $s$ is a human query and each action $a$ is a helpful response to $s$ . We use the Anthropic helpful and harmless dataset (Bai et al., 2022a) as the preference data. We use a supervised fine-tuned gpt2-large model trained on a subset of the chosen preference data as the initial (SFT) policy. We first perform policy optimization using rDPO. As the true noise level in the dataset is unknown, we experiment with different values of $\varepsilon \in \{0.1, 0.2, 0.3, 0.4\}$ . We plot the evaluation accuracy of the policy on a subset of the test set across different training steps. This is given by $\frac{1}{m} \sum_{i \in \mathcal{D}_{\text{test}}} \mathbb{1}(\hat{r}_{\theta}(s_i, a_{w,i}) > \hat{r}_{\theta}(s_i, a_{l,i}))$ , where $\hat{r}_{\theta}$ is the implicit reward defined by policy $\pi_{\theta}$ . We observed the best results with $\varepsilon = 0.1$ . Subsequently, we train DPO and cDPO (with label-smoothing $\varepsilon = 0.1$ ) on the same data.
|
| 425 |
+
|
| 426 |
+
In this experiment, as we do not have access to any latent reward model, we employ meta-llama/Llama-2-13b-chat
|
| 427 |
+
|
| 428 |
+
$\mathsf{h f}^3$ to compute the win rate of policy generations against the chosen preferences on a representative subset of the test dataset. Next, to demonstrate that of our method generalizes to bigger models, we repeat this experiment with Llama-2-7b as the policy model and GPT-4 as the evaluation model. The win-rates for both experiments are tabulated in Table 3. In both cases, we observe that rDPO performs significantly better than DPO and cDPO.
|
| 429 |
+
|
| 430 |
+
Conclusion. We have studied the effect of noisy preferences in the final performance of language model policies. We have designed a robust loss function, which helps mitigate the effect of noise in the generations of the learned policy. We have proved first theoretical results to bound the sub-optimality gap of our robust policy. We have shown robustness of rDPO over a baseline method (DPO) and a label smoothing-based heuristic (cDPO) used by practitioners. It remains open to see how our method performs compared to other heuristics proposed in Wang et al. (2024) e.g. flipping some labels or adding an adaptive margin in the loss.
|
| 431 |
+
|
| 432 |
+
# Acknowledgements
|
| 433 |
+
|
| 434 |
+
SRC would like to thank Xingyu Zhou and Gaurav Sinha for initial discussions about this work.
|
| 435 |
+
|
| 436 |
+
# References
|
| 437 |
+
|
| 438 |
+
Agarwal, A., Kakade, S. M., Lee, J. D., and Mahajan, G. On the theory of policy gradient methods: Optimality, approximation, and distribution shift. The Journal of Machine Learning Research, 22(1):4431-4506, 2021.
|
| 439 |
+
Azar, M. G., Rowland, M., Piot, B., Guo, D., Calandriello, D., Valko, M., and Munos, R. A general theoretical paradigm to understand learning from human preferences. arXiv preprint arXiv:2310.12036, 2023.
|
| 440 |
+
Bai, Y., Jones, A., Ndousse, K., Askell, A., Chen, A., Das-Sarma, N., Drain, D., Fort, S., Ganguli, D., Henighan, T., Joseph, N., Kadavath, S., Kernion, J., Conerly, T., El-Showk, S., Elhage, N., Hatfield-Dodds, Z., Hernandez, D., Hume, T., Johnston, S., Kravec, S., Lovitt, L., Nanda, N., Olsson, C., Amodei, D., Brown, T., Clark, J., McCandlish, S., Olah, C., Mann, B., and Kaplan, J. Training a helpful and harmless assistant with reinforcement learning from human feedback, 2022a. URL https://arxiv.org/pdf/2204.05862.pdf.
|
| 441 |
+
Bai, Y., Jones, A., Ndousse, K., Askell, A., Chen, A., Das-Sarma, N., Drain, D., Fort, S., Ganguli, D., Henighan, T., et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862, 2022b.
|
| 442 |
+
|
| 443 |
+
Bradley, R. A. and Terry, M. E. Rank analysis of incomplete block designs: I. the method of paired comparisons. Biometrika, 39(3/4):324-345, 1952.
|
| 444 |
+
Chen, X., Zhong, H., Yang, Z., Wang, Z., and Wang, L. Human-in-the-loop: Provably efficient preference-based reinforcement learning with general function approximation. In International Conference on Machine Learning, pp. 3773-3793. PMLR, 2022.
|
| 445 |
+
Chowdhury, S. R., Zhou, X., and Natarajan, N. Differentially private reward estimation with preference feedback. arXiv preprint arXiv:2310.19733, 2023.
|
| 446 |
+
Christiano, P. F., Leike, J., Brown, T., Martic, M., Legg, S., and Amodei, D. Deep reinforcement learning from human preferences. Advances in neural information processing systems, 30, 2017.
|
| 447 |
+
Hsu, D., Kakade, S., and Zhang, T. A tail inequality for quadratic forms of subgaussian random vectors. Electronic Communications in Probability, 17(none):1-6, 2012. doi: 10.1214/ECP.v17-2079. URL https://doi.org/10.1214/ECP.v17-2079.
|
| 448 |
+
Kaufmann, T., Weng, P., Bengs, V., and Hüllermeier, E. A survey of reinforcement learning from human feedback. arXiv preprint arXiv:2312.14925, 2023.
|
| 449 |
+
Lambert, N., Krendl Gilbert, T., and Zick, T. The history and risks of reinforcement learning and human feedback. arXiv e-prints, pp. arXiv-2310, 2023.
|
| 450 |
+
Liu, T., Zhao, Y., Joshi, R., Khalman, M., Saleh, M., Liu, P. J., and Liu, J. Statistical rejection sampling improves preference optimization. arXiv preprint arXiv:2309.06657, 2023.
|
| 451 |
+
Luce, R. D. Individual choice behavior: A theoretical analysis. Courier Corporation, 2012.
|
| 452 |
+
Maas, A. L., Daly, R. E., Pham, P. T., Huang, D., Ng, A. Y., and Potts, C. Learning word vectors for sentiment analysis. In Lin, D., Matsumoto, Y., and Mihalcea, R. (eds.), Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies, pp. 142-150, Portland, Oregon, USA, June 2011. Association for Computational Linguistics. URL https://aclanthology.org/P11-1015.
|
| 453 |
+
Mitchell, E. A note on dpo with noisy preferences and relationship to ipo, 2023. URL https://ericmitchell.ai/cdpo.pdf.
|
| 454 |
+
Müller, R., Kornblith, S., and Hinton, G. E. When does label smoothing help? Advances in neural information processing systems, 32, 2019.
|
| 455 |
+
|
| 456 |
+
Natarajan, N., Dhillon, I. S., Ravikumar, P. K., and Tewari, A. Learning with noisy labels. Advances in neural information processing systems, 26, 2013.
|
| 457 |
+
Ouyang, L., Wu, J., Jiang, X., Almeida, D., Wainwright, C., Mishkin, P., Zhang, C., Agarwal, S., Slama, K., Ray, A., et al. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems, 35:27730-27744, 2022.
|
| 458 |
+
Pacchiano, A., Saha, A., and Lee, J. Dueling rl: reinforcement learning with trajectory preferences. arXiv preprint arXiv:2111.04850, 2021.
|
| 459 |
+
Patrini, G., Rozza, A., Krishna Menon, A., Nock, R., and Qu, L. Making deep neural networks robust to label noise: A loss correction approach. In Proceedings of the IEEE conference on computer vision and pattern recognition, pp. 1944-1952, 2017.
|
| 460 |
+
Plackett, R. L. The analysis of permutations. Journal of the Royal Statistical Society Series C: Applied Statistics, 24 (2):193-202, 1975.
|
| 461 |
+
Qiao, D. and Wang, Y.-X. Offline reinforcement learning with differential privacy. arXiv preprint arXiv:2206.00810, 2022.
|
| 462 |
+
Rafailov, R., Sharma, A., Mitchell, E., Ermon, S., Manning, C. D., and Finn, C. Direct preference optimization: Your language model is secretly a reward model. arXiv preprint arXiv:2305.18290, 2023.
|
| 463 |
+
Schulman, J., Wolski, F., Dhariwal, P., Radford, A., and Klimov, O. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.
|
| 464 |
+
Stiennon, N., Ouyang, L., Wu, J., Ziegler, D., Lowe, R., Voss, C., Radford, A., Amodei, D., and Christiano, P. F. Learning to summarize with human feedback. Advances in Neural Information Processing Systems, 33: 3008-3021, 2020.
|
| 465 |
+
Thurstone, L. L. A law of comparative judgment. Psychological review, 34(4):273, 1927.
|
| 466 |
+
Tropp, J. A. et al. An introduction to matrix concentration inequalities. Foundations and Trends® in Machine Learning, 8(1-2):1-230, 2015.
|
| 467 |
+
Tsukida, K., Gupta, M. R., et al. How to analyze paired comparison data. Department of Electrical Engineering University of Washington, Tech. Rep. UWEETR-2011-0004, 1, 2011.
|
| 468 |
+
von Werra, L., Belkada, Y., Tunstall, L., Beeching, E., Thrush, T., Lambert, N., and Huang, S. Trl: Transformer reinforcement learning. https://github.com/huggingface/trl, 2020.
|
| 469 |
+
|
| 470 |
+
Wang, B., Zheng, R., Chen, L., Liu, Y., Dou, S., Huang, C., Shen, W., Jin, S., Zhou, E., Shi, C., et al. Secrets of rlhf in large language models part ii: Reward modeling. arXiv preprint arXiv:2401.06080, 2024.
|
| 471 |
+
Wang, R., Foster, D. P., and Kakade, S. M. What are the statistical limits of offline rl with linear function approximation? arXiv preprint arXiv:2010.11895, 2020.
|
| 472 |
+
Yuan, Z., Yuan, H., Tan, C., Wang, W., Huang, S., and Huang, F. Rrrh: Rank responses to align language models with human feedback without tears. arXiv preprint arXiv:2304.05302, 2023.
|
| 473 |
+
Zhan, W., Uehara, M., Kallus, N., Lee, J. D., and Sun, W. Provable offline reinforcement learning with human feedback. arXiv preprint arXiv:2305.14816, 2023.
|
| 474 |
+
Zhao, Y., Joshi, R., Liu, T., Khalman, M., Saleh, M., and Liu, P. J. Slic-hf: Sequence likelihood calibration with human feedback. arXiv preprint arXiv:2305.10425, 2023.
|
| 475 |
+
Zheng, R., Dou, S., Gao, S., Hua, Y., Shen, W., Wang, B., Liu, Y., Jin, S., Liu, Q., Zhou, Y., et al. Secrets of rlhf in large language models part i: Ppo. arXiv preprint arXiv:2307.04964, 2023.
|
| 476 |
+
Zhu, B., Jiao, J., and Jordan, M. I. Principled reinforcement learning with human feedback from pairwise or $k$ -wise comparisons. arXiv preprint arXiv:2301.11270, 2023.
|
| 477 |
+
Zhu, B., Jordan, M. I., and Jiao, J. Iterative data smoothing: Mitigating reward overfitting and overoptimization in rlhf. arXiv preprint arXiv:2401.16335, 2024.
|
| 478 |
+
|
| 479 |
+
# Appendix
|
| 480 |
+
|
| 481 |
+
# A. Missing Details
|
| 482 |
+
|
| 483 |
+
# A.1. Proof of Lemma 3.1
|
| 484 |
+
|
| 485 |
+
It is easy to see that
|
| 486 |
+
|
| 487 |
+
$$
|
| 488 |
+
\begin{array}{l} \mathbb{E}_{\varepsilon}\Bigl[\widehat{\mathcal{L}}_{\varepsilon}(\theta ;s,\widetilde{a}_{w},\widetilde{a}_{l})|a_{w},a_{l}\Bigr ] = \frac{(1 - \varepsilon)^{2}\mathcal{L}(\theta;s,a_{w},a_{l}) - \varepsilon(1 - \varepsilon)\mathcal{L}(\theta;s,a_{l},a_{w})}{1 - 2\varepsilon} +\frac{\varepsilon(1 - \varepsilon)\mathcal{L}(\theta;s,a_{l},a_{w}) - \varepsilon^{2}\mathcal{L}(\theta;s,a_{w},a_{l})}{1 - 2\varepsilon} \\ = \mathcal {L} (\theta ; s, a _ {w}, a _ {l}). \\ \end{array}
|
| 489 |
+
$$
|
| 490 |
+
|
| 491 |
+
# A.2. Variance of rDPO loss
|
| 492 |
+
|
| 493 |
+
First, define the un-normalized rDPO loss
|
| 494 |
+
|
| 495 |
+
$$
|
| 496 |
+
\tilde {\mathcal {L}} _ {\varepsilon} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) := (1 - 2 \varepsilon) \hat {\mathcal {L}} _ {\varepsilon} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) = (1 - \varepsilon) \mathcal {L} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) - \varepsilon \mathcal {L} (\theta ; s, \widetilde {a} _ {l}, \widetilde {a} _ {w}).
|
| 497 |
+
$$
|
| 498 |
+
|
| 499 |
+
Its variance is given by
|
| 500 |
+
|
| 501 |
+
$$
|
| 502 |
+
\operatorname {V a r} \left[ \widetilde {\mathcal {L}} _ {\varepsilon} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) \right] = \mathbb {E} \left[ \widetilde {\mathcal {L}} _ {\varepsilon} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) ^ {2} \right] - \mathbb {E} \left[ \widetilde {\mathcal {L}} _ {\varepsilon} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) \right] ^ {2}.
|
| 503 |
+
$$
|
| 504 |
+
|
| 505 |
+
From Lemma 3.1, we have
|
| 506 |
+
|
| 507 |
+
$$
|
| 508 |
+
\mathbb {E} \left[ \widetilde {\mathcal {L}} _ {\varepsilon} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) \right] = (1 - 2 \varepsilon) \mathcal {L} (\theta ; s, a _ {w}, a _ {l}).
|
| 509 |
+
$$
|
| 510 |
+
|
| 511 |
+
Furthermore, we have
|
| 512 |
+
|
| 513 |
+
$$
|
| 514 |
+
\mathbb {E} \left[ \widetilde {\mathcal {L}} _ {\varepsilon} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) ^ {2} \right] = (1 - \varepsilon) ^ {2} \mathbb {E} \left[ \mathcal {L} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) ^ {2} \right] + \varepsilon^ {2} \mathbb {E} \left[ \mathcal {L} (\theta ; s, \widetilde {a} _ {l}, \widetilde {a} _ {w}) ^ {2} \right] - 2 \varepsilon (1 - \varepsilon) \mathbb {E} \left[ \mathcal {L} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) \mathcal {L} (\theta ; s, \widetilde {a} _ {l}, \widetilde {a} _ {w}) \right].
|
| 515 |
+
$$
|
| 516 |
+
|
| 517 |
+
Now observe that
|
| 518 |
+
|
| 519 |
+
$$
|
| 520 |
+
\begin{array}{l} \mathbb {E} \left[ \mathcal {L} \left(\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}\right) ^ {2} \right] = (1 - \varepsilon) \mathcal {L} \left(\theta ; s, a _ {w}, a _ {l}\right) ^ {2} + \varepsilon \mathcal {L} \left(\theta ; s, a _ {l}, a _ {w}\right) ^ {2}, \\ \mathbb {E} \left[ \mathcal {L} (\theta ; s, \widetilde {a} _ {l}, \widetilde {a} _ {w}) ^ {2} \right] = (1 - \varepsilon) \mathcal {L} (\theta ; s, a _ {l}, a _ {w}) ^ {2} + \varepsilon \mathcal {L} (\theta ; s, a _ {w}, a _ {l}) ^ {2}, \\ \mathbb {E} \left[ \mathcal {L} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) \mathcal {L} (\theta ; s, \widetilde {a} _ {l}, \widetilde {a} _ {w}) \right] = \mathcal {L} (\theta ; s, a _ {w}, a _ {l}) \mathcal {L} (\theta ; s, a _ {l}, a _ {w}). \\ \end{array}
|
| 521 |
+
$$
|
| 522 |
+
|
| 523 |
+
Combining all these, we get
|
| 524 |
+
|
| 525 |
+
$$
|
| 526 |
+
\mathbb {E} \left[ \widetilde {\mathcal {L}} _ {\varepsilon} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) ^ {2} \right] = (1 - 3 \varepsilon + 3 \varepsilon^ {2}) \mathcal {L} (\theta ; s, a _ {w}, a _ {l}) ^ {2} + \varepsilon (1 - \varepsilon) \mathcal {L} (\theta ; s, a _ {l}, a _ {w}) ^ {2} - 2 \varepsilon (1 - \varepsilon) \mathcal {L} (\theta ; s, a _ {w}, a _ {l}) \mathcal {L} (\theta ; s, a _ {l}, a _ {w}).
|
| 527 |
+
$$
|
| 528 |
+
|
| 529 |
+
Therefore, the variance of the un-normalized rDPO loss is given by
|
| 530 |
+
|
| 531 |
+
$\operatorname{Var}\left[\widetilde{\mathcal{L}}_{\varepsilon}(\theta; s, \widetilde{a}_w, \widetilde{a}_l)\right]$
|
| 532 |
+
|
| 533 |
+
$$
|
| 534 |
+
\begin{array}{l} = \left[ (1 - 3 \varepsilon + 3 \varepsilon^ {2}) - (1 - 2 \varepsilon) ^ {2} \right] \mathcal {L} (\theta ; s, a _ {w}, a _ {l}) ^ {2} + \varepsilon (1 - \varepsilon) \mathcal {L} (\theta ; s, a _ {l}, a _ {w}) ^ {2} - 2 \varepsilon (1 - \varepsilon) \mathcal {L} (\theta ; s, a _ {w}, a _ {l}) \mathcal {L} (\theta ; s, a _ {l}, a _ {w}) \\ = \varepsilon (1 - \varepsilon) \left[ \mathcal {L} (\theta ; s, a _ {w}, a _ {l}) ^ {2} + \mathcal {L} (\theta ; s, a _ {l}, a _ {w}) ^ {2} - 2 \mathcal {L} (\theta ; s, a _ {w}, a _ {l}) \mathcal {L} (\theta ; s, a _ {l}, a _ {w}) \right] \\ = \varepsilon (1 - \varepsilon) [ \mathcal {L} (\theta ; s, a _ {w}, a _ {l}) - \mathcal {L} (\theta ; s, a _ {l}, a _ {w}) ] ^ {2}. \\ \end{array}
|
| 535 |
+
$$
|
| 536 |
+
|
| 537 |
+
# A.3. Proof of Lemma 3.2
|
| 538 |
+
|
| 539 |
+
The gradients of the rDPO loss $\widehat{\mathcal{L}}_{\varepsilon}$ with respect to the parameters $\theta$ can be written as
|
| 540 |
+
|
| 541 |
+
$$
|
| 542 |
+
\begin{array}{l} \nabla_ {\theta} \widehat {\mathcal {L}} _ {\varepsilon} (\theta ; s, \widetilde {a} _ {w}, \widetilde {a} _ {l}) = \frac {(1 - \varepsilon) \nabla_ {\theta} \mathcal {L} (\theta ; s , \widetilde {a} _ {w} , \widetilde {a} _ {l}) - \varepsilon \nabla_ {\theta} \mathcal {L} (\theta ; s , \widetilde {a} _ {l} , \widetilde {a} _ {w})}{1 - 2 \varepsilon} \\ = - \beta \cdot \widehat {\zeta} _ {\theta , \varepsilon} \cdot \left(\nabla_ {\theta} \log \pi_ {\theta} (\widetilde {a} _ {w} | s) - \nabla_ {\theta} \log \pi_ {\theta} (\widetilde {a} _ {l} | s)\right), \\ \end{array}
|
| 543 |
+
$$
|
| 544 |
+
|
| 545 |
+
where the weights $\widehat{\zeta}_{\theta ,\varepsilon}$ are given by
|
| 546 |
+
|
| 547 |
+
$$
|
| 548 |
+
\begin{array}{l} \widehat {\zeta} _ {\theta , \varepsilon} = \frac {1 - \varepsilon}{1 - 2 \varepsilon} \sigma \left(\beta h _ {\theta} \left(s, \widetilde {a} _ {l}, \widetilde {a} _ {w}\right)\right) + \frac {\varepsilon}{1 - 2 \varepsilon} \sigma \left(\beta h _ {\theta} \left(s, \widetilde {a} _ {w}, \widetilde {a} _ {l}\right)\right) \\ = \frac {1 - \varepsilon}{1 - 2 \varepsilon} - \sigma (\beta h _ {\theta} (s, \widetilde {a} _ {w}, \widetilde {a} _ {l})) = \frac {\varepsilon}{1 - 2 \varepsilon} + \sigma (\beta h _ {\theta} (s, \widetilde {a} _ {l}, \widetilde {a} _ {w})) = \zeta_ {\theta} + \frac {\varepsilon}{1 - 2 \varepsilon}, \\ \end{array}
|
| 549 |
+
$$
|
| 550 |
+
|
| 551 |
+
where $\zeta_{\theta}$ are the weights of DPO gradients.
|
| 552 |
+
|
| 553 |
+
The gradient of cDPO loss is given by
|
| 554 |
+
|
| 555 |
+
$$
|
| 556 |
+
\begin{array}{l} \nabla_ {\theta} \bar {\mathcal {L}} _ {\varepsilon} (\theta ; s, \tilde {a} _ {w}, \tilde {a} _ {l}) = (1 - \varepsilon) \nabla_ {\theta} \mathcal {L} (\theta ; s, \tilde {a} _ {w}, \tilde {a} _ {l}) + \varepsilon \nabla_ {\theta} \mathcal {L} (\theta ; s, \tilde {a} _ {l}, \tilde {a} _ {w}) \\ = - \beta \cdot \bar {\zeta} _ {\theta , \varepsilon} \cdot \left(\nabla_ {\theta} \log \pi_ {\theta} (\widetilde {a} _ {w} | s) - \nabla_ {\theta} \log \pi_ {\theta} (\widetilde {a} _ {l} | s)\right), \\ \end{array}
|
| 557 |
+
$$
|
| 558 |
+
|
| 559 |
+
where the weights are $\bar{\zeta}_{\theta ,\varepsilon} = (1 - \varepsilon)\sigma (\beta h_{\theta}(s,\widetilde{a}_l,\widetilde{a}_w)) - \varepsilon \sigma (\beta h_{\theta}(s,\widetilde{a}_w,\widetilde{a}_l))$ . It holds that
|
| 560 |
+
|
| 561 |
+
$$
|
| 562 |
+
\bar {\zeta} _ {\theta , \varepsilon} = \sigma (\beta h _ {\theta} (s, \widetilde {a} _ {l}, \widetilde {a} _ {w})) - \varepsilon = \zeta_ {\theta} - \varepsilon = \widehat {\zeta} _ {\theta , \varepsilon} - \frac {2 \varepsilon (1 - \varepsilon)}{1 - 2 \varepsilon}.
|
| 563 |
+
$$
|
| 564 |
+
|
| 565 |
+
# A.4. Proof of Theorem 4.2
|
| 566 |
+
|
| 567 |
+
For the neural policy of the form (4), we have
|
| 568 |
+
|
| 569 |
+
$$
|
| 570 |
+
h _ {\theta} (s, a, a ^ {\prime}) = \left[ f _ {\theta} (s, a) - f _ {\theta} (s, a ^ {\prime}) \right] - \left[ f _ {\theta_ {0}} (s, a) - f _ {\theta_ {0}} (s, a ^ {\prime}) \right].
|
| 571 |
+
$$
|
| 572 |
+
|
| 573 |
+
Then from Assumption 4.1, we have
|
| 574 |
+
|
| 575 |
+
$$
|
| 576 |
+
\begin{array}{l} \left| h _ {\theta} \left(s, a, a ^ {\prime}\right) \right| \leq \left| f _ {\theta} (s, a) - f _ {\theta_ {0}} (s, a) \right| + \left| f _ {\theta} \left(s, a ^ {\prime}\right) - f _ {\theta_ {0}} \left(s, a ^ {\prime}\right) \right| \leq 2 \alpha_ {0}, \\ \left\| \nabla h _ {\theta} \left(s, a, a ^ {\prime}\right) \right\| = \left\| \nabla f _ {\theta} (s, a) - \nabla f _ {\theta} \left(s, a ^ {\prime}\right) \right\| \leq 2 \alpha_ {1}, \tag {19} \\ \left\| \nabla^ {2} h _ {\theta} (s, a, a ^ {\prime}) \right\| _ {\mathrm {o p}} = \left\| \nabla^ {2} f _ {\theta} (s, a) - \nabla^ {2} f _ {\theta} (s, a ^ {\prime}) \right\| _ {\mathrm {o p}} \leq 2 \alpha_ {2}. \\ \end{array}
|
| 577 |
+
$$
|
| 578 |
+
|
| 579 |
+
Now, we express the population DPO loss $\mathbb{E}_{s,a_w,a_l}\left[\mathcal{L}(\theta ;s,a_w,a_l)\right]$ by incorporating preference probabilities $p_{s,a,a'}^*$ as
|
| 580 |
+
|
| 581 |
+
$$
|
| 582 |
+
\mathcal {L} (\theta) = - \mathbb {E} _ {s, a, a ^ {\prime}, y} \Big [ - y \log \sigma (\beta h _ {\theta} (s, a, a ^ {\prime})) + (1 - y) \log (1 - \sigma (\beta h _ {\theta} (s, a, a ^ {\prime})) \Big ],
|
| 583 |
+
$$
|
| 584 |
+
|
| 585 |
+
where $y$ is a Bernoulli random variable with mean $p_{s,a,a'}^* = \sigma(\beta h_{\theta^*}(s,a,a')$ .
|
| 586 |
+
|
| 587 |
+
Similarly, under the random noise model (8), let each $\widetilde{y}_i$ be Bernoulli distributed with probability $\mathbb{P}_{\theta^{*},\varepsilon}[\widetilde{a}_{w,i}\succ \widetilde{a}_{l,i}|s_i]$ where $\mathbb{P}_{\theta ,\varepsilon}[a\succ a^{\prime}|s]$ is defined in (10).
|
| 588 |
+
|
| 589 |
+
Denote $z_{i} = (s_{i},\widetilde{a}_{w,i},\widetilde{a}_{l,i})$ . Then, our de-biased loss function (12) can be re-written as<sup>4</sup>
|
| 590 |
+
|
| 591 |
+
$$
|
| 592 |
+
\begin{array}{l} \widehat {\mathcal {L}} _ {\varepsilon} (\theta) = - \frac {1}{n} \sum_ {i = 1} ^ {n} \left[ \mathbb {1} \left(\widetilde {y} _ {i} = 1\right) \left((1 - \varepsilon) \log \sigma \left(\beta h _ {\theta} \left(z _ {i}\right)\right) - \varepsilon \log \left(1 - \sigma \left(\beta h _ {\theta} \left(z _ {i}\right)\right)\right) \right. \right. \\ \left. + \mathbb {1} (\widetilde {y} _ {i} = 0) \Big ((1 - \varepsilon) \log (1 - \sigma (\beta h _ {\theta} (z _ {i})) - \varepsilon \log \sigma (\beta h _ {\theta} (z _ {i})) \Big) \right]. \\ \end{array}
|
| 593 |
+
$$
|
| 594 |
+
|
| 595 |
+
The gradient of the loss function is given by $\nabla \widehat{\mathcal{L}}_{\varepsilon}(\theta) = -\frac{\beta}{n}\sum_{i=1}^{n} V_{\theta,i} \nabla h_{\theta}(z_i) = -\frac{\beta}{n} Z_{\theta}^{\top} V_{\theta}$ , where
|
| 596 |
+
|
| 597 |
+
$$
|
| 598 |
+
V _ {\theta , i} = \mathbb {1} (\widetilde {y} _ {i} = 1) \left(\frac {\sigma^ {\prime} (\beta h _ {\theta} (z _ {i}))}{\sigma (\beta h _ {\theta} (z _ {i}))} (1 - \varepsilon) + \frac {\sigma^ {\prime} (\beta h _ {\theta} (z _ {i}))}{1 - \sigma (\beta h _ {\theta} (z _ {i}))} \varepsilon\right) - \mathbb {1} (\widetilde {y} _ {i} = 0) \left(\frac {\sigma^ {\prime} (\beta h _ {\theta} (z _ {i}))}{1 - \sigma (\beta h _ {\theta} (z _ {i}))} (1 - \varepsilon) + \frac {\sigma^ {\prime} (\beta h _ {\theta} (z _ {i}))}{\sigma (\beta h _ {\theta} (z _ {i}))} \varepsilon\right).
|
| 599 |
+
$$
|
| 600 |
+
|
| 601 |
+
It holds that for $\theta = \theta^{*}$
|
| 602 |
+
|
| 603 |
+
$$
|
| 604 |
+
\begin{array}{l} \mathbb {E} _ {\theta} \left[ V _ {\theta , i} \mid z _ {i} \right] = \left(\sigma \left(\beta h _ {\theta} \left(z _ {i}\right)\right) (1 - \varepsilon) + \left(1 - \sigma \left(\beta h _ {\theta} \left(z _ {i}\right)\right)\right) \varepsilon\right) \left(\frac {\sigma^ {\prime} \left(\beta h _ {\theta} \left(z _ {i}\right)\right)}{\sigma \left(\beta h _ {\theta} \left(z _ {i}\right)\right)} (1 - \varepsilon) + \frac {\sigma^ {\prime} \left(\beta h _ {\theta} \left(z _ {i}\right)\right)}{1 - \sigma \left(\beta h _ {\theta} \left(z _ {i}\right)\right)} \varepsilon\right) \\ \left. - \left((1 - \sigma \left(\beta h _ {\theta} \left(z _ {i}\right)\right)) (1 - \varepsilon) + \sigma \left(\beta h _ {\theta} \left(z _ {i}\right)\right) \varepsilon\right) \left(\frac {\sigma^ {\prime} \left(\beta h _ {\theta} \left(z _ {i}\right)\right)}{1 - \sigma \left(\beta h _ {\theta} \left(z _ {i}\right)\right)} (1 - \varepsilon) + \frac {\sigma^ {\prime} \left(\beta h _ {\theta} \left(z _ {i}\right)\right)}{\sigma \left(\beta h _ {\theta} \left(z _ {i}\right)\right)} \varepsilon\right) \right. \\ = 0. \\ \end{array}
|
| 605 |
+
$$
|
| 606 |
+
|
| 607 |
+
Furthermore, we have
|
| 608 |
+
|
| 609 |
+
$$
|
| 610 |
+
\begin{array}{l} \left| V _ {\theta , i} \right| _ {\tilde {y} _ {i}, = 1} = \left(1 - \sigma \left(\beta h _ {\theta} \left(z _ {i}\right)\right)\right) (1 - \varepsilon) + \sigma \left(\beta h _ {\theta} \left(z _ {i}\right)\right) \varepsilon =: \tilde {p} _ {i, 0} \leq 1, \\ \left| V _ {\theta , i} \right| _ {\widetilde {y} _ {i} = 0} = \sigma \left(\beta h _ {\theta} \left(z _ {i}\right)\right) (1 - \varepsilon) + \left(1 - \sigma \left(\beta h _ {\theta} \left(z _ {i}\right)\right)\right) \varepsilon =: \widetilde {p} _ {i, 1} \leq 1. \\ \end{array}
|
| 611 |
+
$$
|
| 612 |
+
|
| 613 |
+
Therefore, it holds that $V_{\theta^{*},i}$ is zero-mean and 1-sub-Gaussian under the conditional distribution $\mathbb{P}_{\theta^{*}}\left[\cdot |z_i\right]$ .
|
| 614 |
+
|
| 615 |
+
Now the Hessian of the loss function is given by
|
| 616 |
+
|
| 617 |
+
$$
|
| 618 |
+
\begin{array}{l} \nabla^ {2} \widehat {\mathcal {L}} _ {\varepsilon} (\theta) = \frac {1}{n} \sum_ {i = 1} ^ {n} \left[ \mathbb {1} (\widetilde {y} _ {i} = 1) \left(\varepsilon \nabla^ {2} \log (1 - \sigma (\beta h _ {\theta} (z _ {i}))) - (1 - \varepsilon) \nabla^ {2} \log \sigma (\beta h _ {\theta} (z _ {i}))\right) \right. \\ \left. + \mathbb {1} (\widetilde {y} _ {i} = 0) \left(\varepsilon \nabla^ {2} \log \sigma (\beta h _ {\theta} (z _ {i})) - (1 - \varepsilon) \nabla^ {2} \log (1 - \sigma (\beta h _ {\theta} (z _ {i})))\right) \right], \\ \end{array}
|
| 619 |
+
$$
|
| 620 |
+
|
| 621 |
+
where
|
| 622 |
+
|
| 623 |
+
$$
|
| 624 |
+
\nabla^ {2} \log \sigma (\beta h _ {\theta} (z _ {i})) = \beta^ {2} \frac {\sigma^ {\prime \prime} (\beta h _ {\theta} (z _ {i})) \sigma (\beta h _ {\theta} (z _ {i})) - \sigma^ {\prime} (\beta h _ {\theta} (z _ {i})) ^ {2}}{\sigma (\beta h _ {\theta} (z _ {i})) ^ {2}} \nabla h _ {\theta} (z _ {i}) \nabla h _ {\theta} (z _ {i}) ^ {\top} + \beta (1 - \sigma (\beta h _ {\theta} (z _ {i}))) \nabla^ {2} h _ {\theta} (z _ {i}),
|
| 625 |
+
$$
|
| 626 |
+
|
| 627 |
+
$$
|
| 628 |
+
\nabla^ {2} \log (1 - \sigma (\beta h _ {\theta} (z _ {i}))) = - \beta^ {2} \frac {\sigma^ {\prime \prime} (\beta h _ {\theta} (z _ {i})) (1 - \sigma (\beta h _ {\theta} (z _ {i}))) + \sigma^ {\prime} (\beta h _ {\theta} (z _ {i})) ^ {2}}{(1 - \sigma (\beta h _ {\theta} (z _ {i}))) ^ {2}} \nabla h _ {\theta} (z _ {i}) \nabla h _ {\theta} (z _ {i}) ^ {\top} - \beta \sigma (\beta h _ {\theta} (z _ {i})) \nabla^ {2} h _ {\theta} (z _ {i}).
|
| 629 |
+
$$
|
| 630 |
+
|
| 631 |
+
Using $\sigma''(z) = \sigma'(z)(1 - 2\sigma(z))$ , we get
|
| 632 |
+
|
| 633 |
+
$$
|
| 634 |
+
\nabla^ {2} \log \sigma (\beta h _ {\theta} (z _ {i})) = - \beta^ {2} \sigma^ {\prime} (\beta h _ {\theta} (z _ {i})) \nabla h _ {\theta} (z _ {i}) \nabla h _ {\theta} (z _ {i}) ^ {\top} + \beta (1 - \sigma (\beta h _ {\theta} (z _ {i}))) \nabla^ {2} h _ {\theta} (z _ {i})
|
| 635 |
+
$$
|
| 636 |
+
|
| 637 |
+
$$
|
| 638 |
+
\nabla^ {2} \log (1 - \sigma (\beta h _ {\theta} (z _ {i}))) = - \beta^ {2} \sigma^ {\prime} (\beta h _ {\theta} (z _ {i})) \nabla h _ {\theta} (z _ {i}) \nabla h _ {\theta} (z _ {i}) ^ {\top} - \beta \sigma (\beta h _ {\theta} (z _ {i})) \nabla^ {2} h _ {\theta} (z _ {i}).
|
| 639 |
+
$$
|
| 640 |
+
|
| 641 |
+
Hence, the Hessian of the loss function takes the form
|
| 642 |
+
|
| 643 |
+
$$
|
| 644 |
+
\begin{array}{l} \nabla^ {2} \widehat {\mathcal {L}} _ {\varepsilon} (\theta) = (1 - 2 \varepsilon) \beta^ {2} \frac {1}{n} \sum_ {i = 1} ^ {n} \sigma^ {\prime} (\beta h _ {\theta} (z _ {i})) \nabla h _ {\theta} (z _ {i}) \nabla h _ {\theta} (z _ {i}) ^ {\top} - \frac {\beta}{n} \sum_ {i = 1} ^ {n} \mathbb {1} (\widetilde {y} _ {i} = 1) \Bigl (\sigma (\beta h _ {\theta} (z _ {i})) \varepsilon + (1 - \sigma (\beta h _ {\theta} (z _ {i}))) (1 - \varepsilon) \Bigr) \nabla^ {2} h _ {\theta} (z _ {i}) \\ + \frac {\beta}{n} \sum_ {i = 1} ^ {n} \mathbb {1} (\widetilde {y _ {i}} = 0) \left(\sigma \left(\beta h _ {\theta} \left(z _ {i}\right)\right) (1 - \varepsilon) + \left(1 - \sigma \left(\beta h _ {\theta} \left(z _ {i}\right)\right)\right) \varepsilon\right) \nabla^ {2} h _ {\theta} \left(z _ {i}\right) \\ = \beta^ {2} (1 - 2 \varepsilon) \frac {1}{n} \sum_ {i = 1} ^ {n} \sigma^ {\prime} (\beta h _ {\theta} (z _ {i})) \nabla h _ {\theta} (z _ {i}) \nabla h _ {\theta} (z _ {i}) ^ {\top} - \frac {\beta}{n} \sum_ {i = 1} ^ {n} \mathbb {1} (\widetilde {y} _ {i} = 1) \widetilde {p} _ {i, 0} \nabla^ {2} h _ {\theta} (z _ {i}) + \frac {\beta}{n} \sum_ {i = 1} ^ {n} \mathbb {1} (\widetilde {y} _ {i} = 0) \widetilde {p} _ {i, 1} \nabla^ {2} h _ {\theta} (z _ {i}) \\ \geqslant \gamma \beta^ {2} (1 - 2 \varepsilon) \frac {1}{n} \sum_ {i = 1} ^ {n} \nabla h _ {\theta} (z _ {i}) \nabla h _ {\theta} (z _ {i}) ^ {\top} - 2 \beta \alpha_ {2} I, \\ \end{array}
|
| 645 |
+
$$
|
| 646 |
+
|
| 647 |
+
which holds by (19) and observing that $\sigma'(\beta h_{\theta}(z_i)) \geq \gamma$ for all $\theta \in \Theta$ , where $\gamma = \frac{1}{2 + \exp(-4\beta\alpha_0) + \exp(4\beta\alpha_0)}$ , and due to the fact that $\varepsilon < 1/2$ .
|
| 648 |
+
|
| 649 |
+
Defining $v_{i} = \nabla h_{\theta}(z_{i}) - \nabla h_{\theta^{*}}(z_{i})$ , we have
|
| 650 |
+
|
| 651 |
+
$$
|
| 652 |
+
\begin{array}{l} \nabla h _ {\theta} \left(z _ {i}\right) \nabla h _ {\theta} \left(z _ {i}\right) ^ {\top} = \nabla h _ {\theta^ {*}} \left(z _ {i}\right) \nabla h _ {\theta^ {*}} \left(z _ {i}\right) ^ {\top} + \nabla h _ {\theta^ {*}} \left(z _ {i}\right) v _ {i} ^ {\top} + v _ {i} \nabla h _ {\theta^ {*}} \left(z _ {i}\right) ^ {\top} + v _ {i} v _ {i} ^ {\top} \\ \succeq \nabla h _ {\theta^ {*}} \left(z _ {i}\right) \nabla h _ {\theta^ {*}} \left(z _ {i}\right) ^ {\top} + \nabla h _ {\theta^ {*}} \left(z _ {i}\right) v _ {i} ^ {\top} + v _ {i} \nabla h _ {\theta^ {*}} \left(z _ {i}\right) ^ {\top}. \\ \end{array}
|
| 653 |
+
$$
|
| 654 |
+
|
| 655 |
+
By (19) and noting that $\| \theta \| \leqslant B$ for all $\theta \in \Theta$ , we have $\| \nabla h_{\theta^*}(z_i)\| \leqslant 2\alpha_1$ and $\| v_i\| \leqslant 2\alpha_2\|\theta^* - \theta\| \leqslant 2\alpha_2B$ . Then, using simple algebra, we have for all $u \in \mathbb{R}^d$ :
|
| 656 |
+
|
| 657 |
+
$$
|
| 658 |
+
u ^ {\top} \nabla^ {2} \hat {\mathcal {L}} _ {\varepsilon} (\theta) u \geqslant \frac {\gamma \beta^ {2} (1 - 2 \varepsilon)}{n} \| Z _ {\theta^ {*}} u \| ^ {2} - 2 \alpha_ {2} (\beta + 2 \gamma \beta^ {2} (1 - 2 \varepsilon) \alpha_ {1} B) \| u \| ^ {2}.
|
| 659 |
+
$$
|
| 660 |
+
|
| 661 |
+
Since $\theta^{*}\in \Theta$ , introducing the error vector $\Delta = \widehat{\theta}_n - \theta^*$ , we conclude that
|
| 662 |
+
|
| 663 |
+
$$
|
| 664 |
+
\gamma \beta^ {2} (1 - 2 \varepsilon) \| \Delta \| _ {\Sigma_ {\theta^ {*}}} ^ {2} \leqslant \left\| \nabla \widehat {\mathcal {L}} _ {\varepsilon} (\theta^ {*}) \right\| _ {(\widehat {\Sigma} _ {\theta^ {*} + \lambda I}) ^ {- 1}} \| \Delta \| _ {(\widehat {\Sigma} _ {\theta^ {*} + \lambda I})} + 2 \alpha_ {2} \beta (1 + 2 \beta \gamma (1 - 2 \varepsilon) \alpha_ {1} B) \| \Delta \| ^ {2}
|
| 665 |
+
$$
|
| 666 |
+
|
| 667 |
+
for some $\lambda > 0$ . Introducing $M_{\theta^*} = \frac{1}{n^2} Z_{\theta^*} (\widehat{\Sigma}_{\theta^*} + \lambda I)^{-1} Z_{\theta^*}^\top$ , we now have $\left\| \nabla \widehat{\mathcal{L}}_{\varepsilon}(\theta^*) \right\|_{(\widehat{\Sigma}_{\theta^*} + \lambda I)^{-1}}^2 = \beta^2 V_{\theta^*}^\top M_{\theta^*} V_{\theta^*}$ . Then, the Bernstein's inequality for sub-Gaussian random variables in quadratic form (see e.g. Hsu et al. (2012, Theorem 2.1)) implies that with probability at least $1 - \delta$
|
| 668 |
+
|
| 669 |
+
$$
|
| 670 |
+
\begin{array}{l} \left\| \nabla \widehat {\mathcal {L}} _ {\varepsilon} \left(\theta^ {*}\right) \right\| _ {\left(\widehat {\Sigma} _ {\theta^ {*}} + \lambda I\right) ^ {- 1}} ^ {2} = \beta^ {2} V _ {\theta^ {*}} ^ {\top} M _ {\theta^ {*}} V _ {\theta^ {*}} \leqslant \beta^ {2} \left(\operatorname {t r} \left(M _ {\theta^ {*}}\right) + 2 \sqrt {\operatorname {t r} \left(M _ {\theta^ {*}} ^ {\top} M _ {\theta^ {*}}\right) \log (1 / \delta)} + 2 \| M _ {\theta^ {*}} \| \log (1 / \delta)\right) \\ \leqslant C _ {1} \cdot \beta^ {2} \cdot \frac {d + \log (1 / \delta)}{n} \\ \end{array}
|
| 671 |
+
$$
|
| 672 |
+
|
| 673 |
+
for some $C_1 > 0$ . Here we have used that $\mathrm{tr}(M_{\theta^*}) \leq d / n$ , $\mathrm{tr}(M_{\theta^*}^\top M_{\theta^*}) \leq d / n^2$ and $\| M_{\theta^*} \| \leq 1 / n$ . Noting that $\|\Delta\| \leqslant B$ , this gives us
|
| 674 |
+
|
| 675 |
+
$$
|
| 676 |
+
\begin{array}{l} \gamma \beta^ {2} (1 - 2 \varepsilon) \| \Delta \| _ {\widehat {\Sigma} _ {\theta^ {*} + \lambda I}} ^ {2} \leqslant \left\| \nabla \widehat {\mathcal {L}} _ {\varepsilon} (\theta^ {*}) \right\| _ {(\Sigma_ {\theta^ {*} + \lambda I}) ^ {- 1}} \| \Delta \| _ {(\widehat {\Sigma} _ {\theta^ {*} + \lambda I})} + (\lambda \gamma \beta^ {2} (1 - 2 \varepsilon) + 2 \alpha_ {2} \beta (1 + 2 \beta \gamma (1 - 2 \varepsilon) \alpha_ {1} B)) B ^ {2} \\ \leqslant \sqrt {C _ {1} \cdot \beta^ {2} \cdot \frac {d + \log (1 / \delta)}{n}} \| \Delta \| _ {(\widehat {\Sigma} _ {\theta^ {*}} + \lambda I)} + (\lambda \gamma \beta^ {2} (1 - 2 \varepsilon) + 2 \alpha_ {2} \beta (1 + 2 \beta \gamma (1 - 2 \varepsilon) \alpha_ {1} B)) B ^ {2}. \\ \end{array}
|
| 677 |
+
$$
|
| 678 |
+
|
| 679 |
+
Solving for the above inequality, we get
|
| 680 |
+
|
| 681 |
+
$$
|
| 682 |
+
\| \Delta \| _ {(\widehat {\Sigma} _ {\theta^ {*}} + \lambda I)} \leqslant C _ {2} \cdot \sqrt {\frac {1}{\gamma^ {2} \beta^ {2} (1 - 2 \varepsilon) ^ {2}} \cdot \frac {d + \log (1 / \delta)}{n} + (\lambda + \frac {\alpha_ {2}}{\gamma \beta (1 - 2 \varepsilon)} + \alpha_ {1} \alpha_ {2} B) B ^ {2}}
|
| 683 |
+
$$
|
| 684 |
+
|
| 685 |
+
for some constant $C_2 > 0$ . Hence, we get
|
| 686 |
+
|
| 687 |
+
$$
|
| 688 |
+
\left\| \widehat {\theta} _ {n} - \theta^ {*} \right\| _ {(\widehat {\Sigma} _ {\theta^ {*}} + \lambda I)} \leqslant \frac {C}{\gamma \beta (1 - 2 \varepsilon)} \cdot \sqrt {\frac {d + \log (1 / \delta)}{n}} + C ^ {\prime} \cdot B \sqrt {\lambda + \frac {\alpha_ {2}}{\gamma \beta (1 - 2 \varepsilon)} + \alpha_ {1} \alpha_ {2} B},
|
| 689 |
+
$$
|
| 690 |
+
|
| 691 |
+
for some $C, C' > 0$ . This completes our proof.
|
| 692 |
+
|
| 693 |
+
# A.5. Proof of Theorem 4.4
|
| 694 |
+
|
| 695 |
+
Define the population covariance matrix of centered gradients of the function $f_{\theta}(s,a)$ under policy $\pi$ :
|
| 696 |
+
|
| 697 |
+
$$
|
| 698 |
+
\Sigma_ {\pi} = \mathbb {E} _ {s \sim \rho , a \sim \pi (\cdot | s)} \left[ g _ {\theta} (s, a) g _ {\theta} (s, a) ^ {\top} \right], \tag {20}
|
| 699 |
+
$$
|
| 700 |
+
|
| 701 |
+
where $g_{\theta}(s,a) = \nabla f_{\theta}(s,a) - \mathbb{E}_{a^{\prime}\sim \pi (\cdot |s)}[\nabla f_{\theta}(s,a^{\prime})]$ denotes the centered features. For log-linear policies, $\nabla f_{\theta}(s,a) = \phi (s,a)$ and $g_{\theta}(s,a) = \phi (s,a) - \mathbb{E}_{\theta}[\phi (s,a')]$ , which gives
|
| 702 |
+
|
| 703 |
+
$$
|
| 704 |
+
\Sigma_ {\pi} = \mathbb {E} _ {s \sim \rho , a \sim \pi (\cdot | s)} \left[ \phi (s, a) \phi (s, a) ^ {\top} \right] - \mathbb {E} _ {s \sim \rho , a \sim \pi (\cdot | s)} \left[ \phi (s, a) \right] \mathbb {E} _ {s \sim \rho , a \sim \pi (\cdot | s)} \left[ \phi (s, a) \right] ^ {\top}.
|
| 705 |
+
$$
|
| 706 |
+
|
| 707 |
+
Define sample covariance and population matrix of feature differences under clean data $\mathcal{D}$
|
| 708 |
+
|
| 709 |
+
$$
|
| 710 |
+
\widehat {\Sigma} = \frac {1}{n} \sum_ {i = 1} ^ {n} \left(\phi (s _ {i}, a _ {w, i}) - \phi (s _ {i}, a _ {l, i})\right) \left(\phi (s _ {i}, a _ {w, i}) - \phi (s _ {i}, a _ {l, i})\right) ^ {\top},
|
| 711 |
+
$$
|
| 712 |
+
|
| 713 |
+
$$
|
| 714 |
+
\Sigma_ {\pi , \text {d i f f}} = \mathbb {E} _ {s \sim \rho , a, a ^ {\prime} \sim \pi (\cdot | s)} \left[ \left(\phi (s, a) - \phi (s, a ^ {\prime})\right) \left(\phi (s, a) - \phi (s, a ^ {\prime})\right) ^ {\top} \right].
|
| 715 |
+
$$
|
| 716 |
+
|
| 717 |
+
Since $a, a'$ are independent samples from policy $\pi(\cdot|s)$ , it holds that
|
| 718 |
+
|
| 719 |
+
$$
|
| 720 |
+
\Sigma_ {\pi , \text {d i f f}} = 2 \Sigma_ {\pi}
|
| 721 |
+
$$
|
| 722 |
+
|
| 723 |
+
Since $(a_{w,i}, a_{li,i})$ are independent samples from SFT policy $\pi_{\mathrm{sft}}(\cdot | s)$ , by matrix concentration inequality (Tropp et al., 2015), we have the following lemma.
|
| 724 |
+
|
| 725 |
+
Lemma A.1. With probability at least $1 - \delta$ , for some universal constant $C$ , we have
|
| 726 |
+
|
| 727 |
+
$$
|
| 728 |
+
\left\| \widehat {\Sigma} - \Sigma_ {\pi_ {\mathrm {s f t}}, \mathrm {d i f f}} \right\| _ {2} \leq C \sqrt {d \log (4 d / \delta) / n}.
|
| 729 |
+
$$
|
| 730 |
+
|
| 731 |
+
This implies, for $\lambda \geq C\sqrt{d\log(4d / \delta) / n}$ , with probability at least $1 - \delta$ :
|
| 732 |
+
|
| 733 |
+
$$
|
| 734 |
+
\begin{array}{l} \widehat {\Sigma} + \lambda I \succeq \Sigma_ {\pi_ {\mathrm {s f t}}, \mathrm {d i f f}} + \lambda I - C \sqrt {d \log (4 d / \delta) / n} \\ \succeq \Sigma_ {\pi_ {\mathrm {s f t}}, \text {d i f f}} = 2 \Sigma_ {\pi_ {\mathrm {s f t}}} \tag {21} \\ \end{array}
|
| 735 |
+
$$
|
| 736 |
+
|
| 737 |
+
Now, we bound the sub-optimality gap conditioned on this high-confidence event. Since $r^*(s, a) \leq r_{\max}$ for all $(s, a)$ , we have the sub-optimality gap:
|
| 738 |
+
|
| 739 |
+
$$
|
| 740 |
+
\begin{array}{l} r ^ {*} \left(\pi^ {*}\right) - r ^ {*} \left(\widehat {\pi} _ {n}\right) = \mathbb {E} _ {s \sim \rho , a \sim \pi^ {*} (\cdot | s)} \left[ r ^ {*} (s, a) \right] - \mathbb {E} _ {s \sim \rho , a \sim \widehat {\pi} _ {n} (\cdot | s)} \left[ r ^ {*} (s, a) \right] \\ \leq r _ {\max } \mathbb {E} _ {s \sim \rho} \left[ \mathrm {T V} \left(\pi^ {*} (\cdot | s), \widehat {\pi} _ {n} (\cdot | s)\right) \right] \\ \leq r _ {\max } \left[ \mathbb {E} _ {s \sim \rho} \sqrt {2 \operatorname {K L} \left(\pi^ {*} (\cdot | s) , \widehat {\pi} _ {n} (\cdot | s)\right)} \right] \\ \leq r _ {\max} \sqrt {2 \mathbb {E} _ {s \sim \rho} \left[ \mathrm {K L} \left(\pi^ {*} (\cdot | s) , \widehat {\pi} _ {n} (\cdot | s)\right) \right]}, \\ \end{array}
|
| 741 |
+
$$
|
| 742 |
+
|
| 743 |
+
where the second step follows from Pinsker's inequality and the last step is due to Jensen's inequality.
|
| 744 |
+
|
| 745 |
+
Since the neural policy class (4) belongs to the exponential family of distributions, it holds that $\mathbb{KL}(\pi_{\theta}(\cdot |s),\pi_{\theta '}(\cdot |s)) =$ $\mathcal{B}_{\mathcal{L}_s}(\theta ',\theta)$ , where $\mathcal{B}_{\mathcal{L}_s}$ is the Bregman divergence with potential function $\mathcal{L}_s(\theta) = \log \sum_{a'\in \mathcal{A}}f_\theta (s,a')$ . It is defined as
|
| 746 |
+
|
| 747 |
+
$$
|
| 748 |
+
\mathcal {B} _ {\mathcal {L} _ {s}} \left(\theta^ {\prime}, \theta\right) \stackrel {{\mathrm {d e f}}} {{=}} \mathcal {L} _ {s} \left(\theta^ {\prime}\right) - \mathcal {L} _ {s} (\theta) - \left\langle \theta^ {\prime} - \theta , \nabla \mathcal {L} _ {s} (\theta) \right\rangle .
|
| 749 |
+
$$
|
| 750 |
+
|
| 751 |
+
Therefore, we get
|
| 752 |
+
|
| 753 |
+
$$
|
| 754 |
+
\mathrm {K L} \left(\pi^ {*} (\cdot | s), \widehat {\pi} _ {n} (\cdot | s)\right) = \mathcal {L} _ {s} (\widehat {\theta} _ {n}) - \mathcal {L} _ {s} (\theta^ {*}) - \langle \widehat {\theta} _ {n} - \theta^ {*}, \nabla \mathcal {L} _ {s} (\theta^ {*}) \rangle = \frac {1}{2} (\widehat {\theta} _ {n} - \theta^ {*}) ^ {\top} \nabla^ {2} \mathcal {L} _ {s} (\theta) (\widehat {\theta} _ {n} - \theta^ {*})
|
| 755 |
+
$$
|
| 756 |
+
|
| 757 |
+
for some $\theta \in \{t\theta^{*} + (1 - t)\widehat{\theta}_{n}:t\in [0,1]\}$ using Taylor's approximation.
|
| 758 |
+
|
| 759 |
+
Now, for log-linear policy, we have $\mathbb{E}_{s\sim \rho}\left[\nabla^2\mathcal{L}_s(\theta)\right] = \Sigma_{\pi_\theta}$ . Then, we can upper bound the sub-optimality gap using relative condition number $\kappa$ as
|
| 760 |
+
|
| 761 |
+
$$
|
| 762 |
+
\begin{array}{l} r ^ {*} (\pi^ {*}) - r ^ {*} (\widehat {\pi} _ {n}) \leq r _ {\max} \left\| \widehat {\theta} _ {n} - \theta^ {*} \right\| _ {\Sigma_ {\pi_ {\theta}}} \\ = r _ {\max} \left\| \widehat {\theta} _ {n} - \theta^ {*} \right\| _ {\widehat {\Sigma} + \lambda I} \sqrt {\frac {(\widehat {\theta} _ {n} - \theta^ {*}) ^ {\top} \Sigma_ {\pi_ {\theta}} (\widehat {\theta} _ {n} - \theta^ {*})}{(\widehat {\theta} _ {n} - \theta^ {*}) ^ {\top} (\widehat {\Sigma} + \lambda I) (\widehat {\theta} _ {n} - \theta^ {*})}} \\ \leq \frac {r _ {\mathrm {m a x}}}{\sqrt {2}} \left\| \widehat {\theta} _ {n} - \theta^ {*} \right\| _ {\widehat {\Sigma} + \lambda I} \sqrt {\frac {(\widehat {\theta} _ {n} - \theta^ {*}) ^ {\top} \Sigma_ {\pi_ {\theta}} (\widehat {\theta} _ {n} - \theta^ {*})}{(\widehat {\theta} _ {n} - \theta^ {*}) ^ {\top} \Sigma_ {\pi_ {\mathrm {s f t}}} (\widehat {\theta} _ {n} - \theta^ {*})}} \\ \leq \frac {r _ {\operatorname* {m a x}}}{\sqrt {2}} \left\| \widehat {\theta} _ {n} - \theta^ {*} \right\| _ {\widehat {\Sigma} + \lambda I} \sqrt {\sup _ {v \in \mathbb {R} ^ {d}} \frac {v ^ {\top} \Sigma_ {\pi_ {\theta}} v}{v ^ {\top} \Sigma_ {\pi_ {\mathrm {s f t}}} v}} \\ = \frac {r _ {\mathrm {m a x}} \sqrt {\kappa_ {\pi_ {\theta}}}}{\sqrt {2}} \left\| \widehat {\theta} _ {n} - \theta^ {*} \right\| _ {\widehat {\Sigma} + \lambda I} \leq \frac {r _ {\mathrm {m a x}} \sqrt {\kappa}}{\sqrt {2}} \left\| \widehat {\theta} _ {n} - \theta^ {*} \right\| _ {\widehat {\Sigma} + \lambda I}. \\ \end{array}
|
| 763 |
+
$$
|
| 764 |
+
|
| 765 |
+
Here, the third step follows from (21), the fifth step holds by definition of (relative) condition number and in the final step, we use that $\kappa = \max_{\pi \in \Pi} \kappa_{\pi}$ . This completes our proof.
|
| 766 |
+
|
| 767 |
+
# A.6. Proof of Lemma 4.5
|
| 768 |
+
|
| 769 |
+
Recall that $\widehat{r}_{\theta}(s,a) = \log \frac{\pi_{\theta}(a|s)}{\pi_{\mathrm{sft}}(a|s)}$ denotes the implicit reward defined by trained and SFT policies $\pi_{\theta}$ and $\pi_{\mathrm{sft}}$ . Then, we have the expected margin gap under clean distribution
|
| 770 |
+
|
| 771 |
+
$$
|
| 772 |
+
\begin{array}{l} \mathcal {M} (\pi^ {*}) - \mathcal {M} (\widehat {\pi} _ {n}) = \mathbb {E} _ {s \sim \rho , (a _ {w}, a _ {l}) \sim \pi_ {\mathrm {s f t}}} \left[ [ \widehat {r} _ {\theta^ {*}} (a _ {w} | s) - \widehat {r} _ {\theta^ {*}} (a _ {l} | s) ] - [ \widehat {r} _ {\widehat {\theta} _ {n}} (a _ {w} | s) - \widehat {r} _ {\widehat {\theta} _ {n}} (a _ {l} | s) ] \right] \\ = \mathbb {E} _ {s \sim \rho , (a _ {w}, a _ {l}) \sim \pi_ {\mathrm {s f t}}} \left[ \log \frac {\pi_ {\theta^ {*}} (a _ {w} | s)}{\pi_ {\theta^ {*}} (a _ {l} | s)} - \log \frac {\pi_ {\widehat {\theta} _ {n}} (a _ {w} | s)}{\pi_ {\widehat {\theta} _ {n}} (a _ {l} | s)} \right] \\ = \mathbb {E} _ {s \sim \rho , (a _ {w}, a _ {l}) \sim \pi_ {\mathrm {s f t}}} \left[ \left[ f _ {\theta^ {*}} (s, a _ {w}) - f _ {\theta^ {*}} (s, a _ {l}) \right] - \left[ f _ {\widehat {\theta} _ {n}} (s, a _ {w}) - f _ {\widehat {\theta} _ {n}} (s, a _ {l}) \right] \right] \\ = \mathbb {E} _ {s \sim \rho , (a _ {w}, a _ {l}) \sim \pi_ {\mathrm {s f t}}} \left[ \left[ f _ {\theta^ {*}} (s, a _ {w}) - f _ {\widehat {\theta} _ {n}} (s, a _ {w}) \right] - \left[ f _ {\theta^ {*}} (s, a _ {l}) - f _ {\widehat {\theta} _ {n}} (s, a _ {l}) \right] \right] \\ \leq \mathbb {E} _ {s \sim \rho , (a _ {w}, a _ {l}) \sim \pi_ {\mathrm {s f t}}} \left[ \left| f _ {\theta^ {*}} (s, a _ {w}) - f _ {\widehat {\theta} _ {n}} (s, a _ {w}) \right| + \left| f _ {\theta^ {*}} (s, a _ {l}) - f _ {\widehat {\theta} _ {n}} (s, a _ {l}) \right| \right] \\ \leq 2 \alpha_ {1} \left\| \theta^ {*} - \widehat {\theta} _ {n} \right\|, \\ \end{array}
|
| 773 |
+
$$
|
| 774 |
+
|
| 775 |
+
where the final step follows from Assumption 4.1. Now, assuming $\widehat{\Sigma}$ to be invertible for log-linear policies, we get from (15):
|
| 776 |
+
|
| 777 |
+
$$
|
| 778 |
+
\left\| \widehat {\theta} _ {n} - \theta^ {*} \right\| _ {\widehat {\Sigma}} = O \left(\frac {1}{\sqrt {\lambda_ {\operatorname* {m i n}} (\widehat {\Sigma})}} \frac {1}{\gamma \beta (1 - 2 \varepsilon)} \sqrt {\frac {d}{n}}\right).
|
| 779 |
+
$$
|
| 780 |
+
|
| 781 |
+
Setting $\alpha_{1} = LB$ for log-linear policies, we obtain
|
| 782 |
+
|
| 783 |
+
$$
|
| 784 |
+
\mathcal {M} (\pi^ {*}) - \mathcal {M} (\widehat {\pi} _ {n}) = O \Big (\frac {1}{\sqrt {\lambda_ {\mathrm {m i n}} (\widehat {\Sigma})}} \frac {2 L B}{\gamma \beta (1 - 2 \varepsilon)} \sqrt {\frac {d}{n}} \Big),
|
| 785 |
+
$$
|
| 786 |
+
|
| 787 |
+
which completes our proof.
|
| 788 |
+
|
| 789 |
+
# B. Hyperparameter Details
|
| 790 |
+
|
| 791 |
+
The hyperparameters for the experiments are outlined in Table 4 and Table 5. Any hyperparameters not explicitly mentioned use the default values in the TRL<sup>5</sup> library.
|
| 792 |
+
|
| 793 |
+
Table 4. Hyperparameters used for methods in the DPO Family
|
| 794 |
+
|
| 795 |
+
<table><tr><td>Parameter</td><td>Value</td></tr><tr><td>beta</td><td>0.1</td></tr><tr><td>learning rate</td><td>0.001</td></tr><tr><td>batch size</td><td>16</td></tr><tr><td>max length</td><td>512</td></tr><tr><td>max prompt length</td><td>128</td></tr></table>
|
| 796 |
+
|
| 797 |
+
Table 5. Hyperparameters used for methods in the PPO Family
|
| 798 |
+
|
| 799 |
+
<table><tr><td>Model</td><td>Parameter</td><td>Value</td></tr><tr><td rowspan="2">Reward Model</td><td>learning rate</td><td>1.41 x 10-5</td></tr><tr><td>batch size</td><td>16</td></tr><tr><td rowspan="2">PPO</td><td>learning rate</td><td>1.41 x 10-5</td></tr><tr><td>batch size</td><td>16</td></tr></table>
|
2403.00xxx/2403.00409/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7531f941232b1e4d4fd4e1761ce8fad85b95d34b072abb192ceaff30333594e1
|
| 3 |
+
size 1080103
|
2403.00xxx/2403.00409/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00425/4bcef5a3-e808-495b-b52d-5b7269aa0844_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00425/4bcef5a3-e808-495b-b52d-5b7269aa0844_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00425/4bcef5a3-e808-495b-b52d-5b7269aa0844_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:eb29dad22d87bd2bc7c03cad76e8496d0ff62787497e8d320e5ff9cbcc8882c7
|
| 3 |
+
size 3335073
|
2403.00xxx/2403.00425/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00425/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6fda28121f11e1af86e0179949e3f16a04aee772a32e75664d53745908a830bd
|
| 3 |
+
size 1258737
|
2403.00xxx/2403.00425/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00436/8c21a5cc-c292-4537-b2a4-73c65d3149da_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00436/8c21a5cc-c292-4537-b2a4-73c65d3149da_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00436/8c21a5cc-c292-4537-b2a4-73c65d3149da_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ef4468d777f579e95cc912e140dae280263f5716fdd6aae55e438a677ddc1a82
|
| 3 |
+
size 4186850
|
2403.00xxx/2403.00436/full.md
ADDED
|
@@ -0,0 +1,510 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Abductive Ego-View Accident Video Understanding for Safe Driving Perception
|
| 2 |
+
|
| 3 |
+
Jianwu Fang $^{1}$ , Lei-lei Li $^{2}$ , Junfei Zhou $^{2}$ , Junbin Xiao $^{3}$ , Hongkai Yu $^{4}$ , Chen Lv $^{5}$ , Jianru Xue $^{1}$ , and Tat-Seng Chua $^{3}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Xi'an Jiaotong University $^{2}$ Chang'an University $^{3}$ National University of Singapore
|
| 6 |
+
|
| 7 |
+
$^{4}$ Cleveland State University $^{5}$ Nanyang Technological University
|
| 8 |
+
|
| 9 |
+
1.{fangjianwu, jrxue}@mail.xjtu.edu.cn 2.{jeffreychou777,6701605321ileilei}@gmail.com
|
| 10 |
+
|
| 11 |
+
3.{junbin,chuats}@comp.nus.edu.sg 4.h.yu19@csuohio.edu 5.lyuchen@ntu.edu.sg
|
| 12 |
+
|
| 13 |
+
# Abstract
|
| 14 |
+
|
| 15 |
+
We present MM-AU, a novel dataset for Multi-Modal Accident video Understanding. MM-AU contains 11,727 in-the-wild ego-view accident videos, each with temporally aligned text descriptions. We annotate over 2.23 million object boxes and 58,650 pairs of video-based accident reasons, covering 58 accident categories. MM-AU supports various accident understanding tasks, particularly multimodal video diffusion to understand accident cause-effect chains for safe driving. With MM-AU, we present an Abductive accident Video understanding framework for Safe Driving perception (AdVersa-SD). AdVersa-SD performs video diffusion via an Object-Centric Video Diffusion (OAVD) method which is driven by an abductive CLIP model. This model involves a contrastive interaction loss to learn the pair co-occurrence of normal, near-accident, accident frames with the corresponding text descriptions, such as accident reasons, prevention advice, and accident categories. OAVD enforces the causal region learning while fixing the content of the original frame background in video generation, to find the dominant cause-effect chain for certain accidents. Extensive experiments verify the abductive ability of AdVersa-SD and the superiority of OAVD against the state-of-the-art diffusion models. Additionally, we provide careful benchmark evaluations for object detection and accident reason answering since AdVersa-SD relies on precise object and accident reason information. The dataset and code are released at www.lotvsmmau.net.
|
| 16 |
+
|
| 17 |
+
# 1. Introduction
|
| 18 |
+
|
| 19 |
+
Autonomous Vehicles (AV) are around the corner for practical use [11]. Yet, occasionally emerging traffic accidents are among the biggest obstacles to be crossed. To make a step further, it is urgent to comprehensively understand the traffic accidents, such as telling what objects are
|
| 20 |
+
|
| 21 |
+
involved, why an accident occurs and how to prevent it. Techniques that can answer these questions are of crucial importance for safe AV systems. So far, there is a lack of a large-scale dataset to develop such techniques.
|
| 22 |
+
|
| 23 |
+
Therefore, this paper constructs MM-AU, a multi-modal dataset for ego-view accident video understanding. MM-AU contains 11,727 in-the-wild ego-view accident videos. The videos are temporally aligned with the text descriptions of accident reasons, prevention solutions, and accident categories. In total, 58.6K pairs of video-based Accident reason Answers (ArA) are annotated for 58 accident categories. Moreover, to enable object-centric accident video understanding, we annotate over 2.23M object boxes for about 463K video frames. As shown in Fig. 1, MM-AU can facilitate 8 tasks of traffic accident video understanding, and the models are required to infer ① what objects are involved, ② what kinds of accidents, ③ where and ④ when the accident will occur, ⑤ why the accident occurs, ⑥ what are the keys to accident reasons, ⑦ how to prevent it, and ⑧ multimodal accident video diffusion.
|
| 24 |
+
|
| 25 |
+
Different from previous works that concentrate on the former 4 basic tasks [3, 15, 24, 59], we advocate an Abductive Accident Video Understanding for Safe Driving (AdVersa-SD) perception by considering the accident reasons into the accident video understanding. For the $⑤$ - $⑦$ tasks, few works [37, 65] formulate Video Question Answering (VQA) problem to discern the accident reasons for given videos. However, understanding the cause-effect chain of accident is more crucial to prevent collision. Hence, we present AdVersa-SD, which underscores a diffusion technique to bridge the visual content (effect) with specific text prompts (cause).
|
| 26 |
+
|
| 27 |
+
Leveraging the text-vision CLIP model [44] and the video diffusion techniques [12, 57], we propose an abductive CLIP in AdVersa-SD with a constrastive interaction loss for the accident reason involved semantic cooccurrence learning within the text and video clips, such as the pairs of $(\square, t_r)$ and $(\square, t_a)$ . To verify the abductive
|
| 28 |
+
|
| 29 |
+
$t_{ar}$ : the time that the crashing object appears $t_{ai}$ : the beginning time of the accident $t_{co}$ : the beginning time of the collision $t_{ae}$ : the ending time of the accident time
|
| 30 |
+
|
| 31 |
+

|
| 32 |
+
tar
|
| 33 |
+
|
| 34 |
+

|
| 35 |
+
tco
|
| 36 |
+
|
| 37 |
+

|
| 38 |
+
|
| 39 |
+

|
| 40 |
+
(2) What kinds of accidents
|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
(3) Where the accident occurs
|
| 44 |
+
|
| 45 |
+

|
| 46 |
+
(4) When the accident will occur
|
| 47 |
+
|
| 48 |
+

|
| 49 |
+
Figure 1. The ego-view multimodality accident video understanding tasks that MM-AU can support, where we highlight the text descriptions for accident reason $(t_r)$ , prevention advice $(t_p)$ , and accident category $(t_a)$ , as well as temporal windows (accident-free $\square$ , near-accident $\square$ , and accident $\square$ windows) for different tasks.
|
| 50 |
+
|
| 51 |
+

|
| 52 |
+
|
| 53 |
+

|
| 54 |
+
|
| 55 |
+

|
| 56 |
+
|
| 57 |
+
ability of abductive CLIP, we treat it as an engine to drive an Object-centric Accident Video Diffusion (OAVD) model by enforcing the learning of object locations and restricting the influence of frame background for causal region generation. OAVD takes the stable diffusion model [46] as the baseline and extends it to the high-quality accident video generation by 3D convolution and well-designed spatial-temporal attention modules, as well as an object region masked latent representation reconstruction. This formulation is useful for finding the dominant cause-effect chain for certain accidents irrelevant to the environmental background. Thus, the contributions are threefold.
|
| 58 |
+
|
| 59 |
+
(1) A new large-scale ego-view multimodality accident understanding dataset, i.e., MM-AU, is created, which will facilitate the more promising abductive understanding for safe driving perception. (2) We present AdVersa-SD, an abductive accident video understanding framework, to learn the dominant reason-occurrence elements of accidents within text-video pairs. (3) Within AdVersa-SD, we propose an Object-centric Accident Video Diffusion (OAVD) driven by the abductive CLIP to attempt to explicitly explore the causal-effect chain of accident occurrence, and positive results are obtained.
|
| 60 |
+
|
| 61 |
+
# 2. Related Work
|
| 62 |
+
|
| 63 |
+
# 2.1. Ego-View Accident Video Understanding
|
| 64 |
+
|
| 65 |
+
Accident Detection: Accident detection in ego-view videos aims to localize the spatial regions and temporal
|
| 66 |
+
|
| 67 |
+
frame windows where the accident occurs. Because of the drastic change in the shape, location, and relations of the participants, the key problems of accident detection are to extract robust appearance or motion features for the representation of video frames, spatial-temporal video volumes, or trajectories. Commonly, the frame consistency [18, 43, 49, 73], location consistency [21, 32, 48, 52], and scene context consistency (e.g., the object interactions) [13, 47, 56] are modeled to find the accident window or regions. Up to now, unsupervised location or frame prediction has been a common choice for model designing. For example, DoTA [66, 67], the typical ego-view accident detection method, computes the Standard Deviation (STD) of predicted locations of the pre-detected objects.
|
| 68 |
+
|
| 69 |
+
Accident Anticipation: Accident anticipation aims to forecast the probability and prefers an early warning of future accidents based on the complex scene structure modeling in video frames [2, 25, 40]. The earliness is maintained by taking the exponential loss [3, 5, 22, 51, 59] to penalize the positive occurrence of the accident. Different from accident detection, most accident anticipation works need to provide the accident window annotation to fulfill supervised learning. One groundbreaking work by Chan et al. [5] models a Dynamic-Spatial-Attention Recurrent Neural Network (DSA-RNN) to correlate the temporal consistency of road participants' tracklets, which is extended by the works [26, 27] to compute the riskiness of video frames or objects. To boost the explainability of accident anticipation, Bao and Kong [3] develop a Deep Reinforced accident
|
| 70 |
+
|
| 71 |
+
Table 1. Attribute comparison of ego-view accident video datasets.
|
| 72 |
+
|
| 73 |
+
<table><tr><td>Datasets</td><td>Years</td><td>#Clips</td><td>#Frames</td><td>Bboxes</td><td>Tracklet</td><td>TA.</td><td>TT/S</td><td></td></tr><tr><td>DAD [5]</td><td>2016</td><td>1,750</td><td>175K</td><td></td><td>✓</td><td>✓</td><td></td><td>R</td></tr><tr><td>A3D [67]</td><td>2019</td><td>3,757</td><td>208K</td><td></td><td></td><td>✓</td><td></td><td>R</td></tr><tr><td>GTACrash [28]</td><td>2019</td><td>7,720</td><td>-</td><td></td><td></td><td>✓</td><td></td><td>S</td></tr><tr><td>VIENA2[1]</td><td>2019</td><td>15,000</td><td>2.25M</td><td></td><td></td><td>✓</td><td></td><td>S</td></tr><tr><td>CTA [68]</td><td>2020</td><td>1,935</td><td>-</td><td></td><td></td><td>✓</td><td>✓</td><td>R</td></tr><tr><td>CCD [2]</td><td>2021</td><td>1,381</td><td>75K</td><td></td><td>✓</td><td>✓</td><td></td><td>R</td></tr><tr><td>TRA [36]</td><td>2022</td><td>560</td><td>-</td><td></td><td></td><td>✓</td><td></td><td>R</td></tr><tr><td>DADA-2000 [15]</td><td>2022</td><td>2000</td><td>658k</td><td></td><td></td><td>✓</td><td></td><td>R</td></tr><tr><td>DoTA [66]</td><td>2022</td><td>5,586</td><td>732K</td><td>partial</td><td></td><td>✓</td><td></td><td>R</td></tr><tr><td>ROL [27]</td><td>2023</td><td>1000</td><td>100K</td><td></td><td></td><td>✓</td><td></td><td>R</td></tr><tr><td>DeepAccident [60]</td><td>2023</td><td>-</td><td>57k</td><td></td><td></td><td>✓</td><td></td><td>S</td></tr><tr><td>CTAD [39]</td><td>2023</td><td>1,100</td><td>-</td><td></td><td></td><td>✓</td><td></td><td>S</td></tr><tr><td>MM-AU</td><td>2023</td><td>11,727</td><td>2.19M</td><td>✓</td><td></td><td>✓</td><td>✓</td><td>R</td></tr></table>
|
| 74 |
+
|
| 75 |
+
Bboxes: bounding boxes of objects, TA.: temporal annotation of the accident, TT: text descriptions, R/S: real or synthetic datasets.
|
| 76 |
+
|
| 77 |
+
anticipation with Visual Explanation (DRIVE) assisted by driver attention [15] and obtain a significant improvement.
|
| 78 |
+
|
| 79 |
+
Accident Classification: Because of the video data limitation of different accident categories, there is a paucity of research on ego-view video-based accident classification, and many works concentrate on the surveillance view with limited image set [17, 29, 39]. Kang et al. [24] propose a Vision Transformer-Traffic Accident (ViT-TA) model that classifies the ego-view traffic accident scenes, highlighting key objects through attention maps to increase the objectivity and reliability of functional scenes.
|
| 80 |
+
|
| 81 |
+
The aforementioned works focus on the monocular vision modal, while the spatial or temporal causal part of the accident video is hard to learn effectively owning to the complex evolution of accidents.
|
| 82 |
+
|
| 83 |
+
Accident Reason Answering: Closely related to this work, You and Han [68] investigate the causal-effect recognition of accident scenarios, and build the class taxonomy of traffic accidents. Besides, SUTD-TrafficQA [65] formulates the reason explanation and prevention advice of accidents by the Question-Answering (QA) framework, which involves the reasoning of dynamic and complex traffic scenes. Based on this, Liu et al. [37] reason the cross-modal causal relation to fulfill the traffic accident reason answering. We believe QA frameworks [64, 71] can provide a direct understanding for telling why the accident occurs. However, there is no explicit double-check solution to verify what key elements (e.g., specific actions or objects) are dominant for subsequent accidents.
|
| 84 |
+
|
| 85 |
+
# 2.2. Ego-View Accident Understanding Datasets
|
| 86 |
+
|
| 87 |
+
The community has realized the importance of accident video understanding for safe driving perception, and some benchmarks have been released in recent years. Tab. 1 presents the attribute comparison of available ego-view accident video datasets. DAD [5] is the pioneering dataset, where each video clip is trimmed with 10 accident frames
|
| 88 |
+
|
| 89 |
+
Table 2. Static attributes of ego-view accident video datasets.
|
| 90 |
+
|
| 91 |
+
<table><tr><td rowspan="2">Datasets</td><td colspan="4">weather condition</td><td colspan="5">occasion situations</td></tr><tr><td>sunny</td><td>rainy</td><td>snowy</td><td>foggy</td><td>highway</td><td>urban</td><td>rural</td><td>mountain tunnel</td><td></td></tr><tr><td>CCD [2]</td><td>1,306</td><td>61</td><td>14</td><td>0</td><td>148</td><td>725</td><td>502</td><td>5</td><td>1</td></tr><tr><td>A3D [67]</td><td>2,990</td><td>251</td><td>474</td><td>42</td><td>225</td><td>2,458</td><td>720</td><td>328</td><td>26</td></tr><tr><td>DADA-2000 [15]</td><td>1,860</td><td>130</td><td>10</td><td>-</td><td>1,420</td><td>380</td><td>180</td><td>-</td><td>20</td></tr><tr><td>DoTA [66]</td><td>4,920</td><td>341</td><td>313</td><td>12</td><td>617</td><td>3,656</td><td>1,148</td><td>145</td><td>20</td></tr><tr><td>MM-AU</td><td>10,116</td><td>761</td><td>793</td><td>57</td><td>1082</td><td>7,563</td><td>2,548</td><td>484</td><td>50</td></tr></table>
|
| 92 |
+
|
| 93 |
+

|
| 94 |
+
Figure 2. Some samples of object annotation in MM-AU.
|
| 95 |
+
|
| 96 |
+

|
| 97 |
+
|
| 98 |
+

|
| 99 |
+
|
| 100 |
+
at the end of each clip. This setting is also adopted in the CCD datasets [2] with a total of 50 frames for each clip. A3D [67] and DoTA [66] are used for unsupervised eg-view accident detection [13, 66, 67]. Specially, the DADA-2000 dataset [15] annotates the extra driver attention data. Because of the difficulty to collect enough accident videos in real world, some work leverages the simulation tool to synthesize the virtual accident videos or object tracklets, such as GTACrash [28], VIENA² [1], DeepAccident [60], and CTAD [39]. However, the real-synthetic data domain gap is a tough nut to crack because it is rather hard to project the natural evolution process of accidents in the simulation tools. Besides CTA [68], the vision modal is concentrated, and the meaningful text descriptions are not explored.
|
| 101 |
+
|
| 102 |
+
# 3. MM-AU Dataset
|
| 103 |
+
|
| 104 |
+
The videos in MM-AU are collected from the publicly available ego-view accident datasets, such as CCD [2], A3D [67], DoTA [66], and DADA-2000 [15], and various video stream sites, such as Youtube $^{1}$ , Bilibili $^{2}$ , and Tencent $^{3}$ , etc. As presented in Tab. 2, the weather conditions and occasion situations are various and our MM-AU owns the largest sample scale. In total, 11,727 videos with 2,195,613 frames are collected and annotated. All videos are annotated the text descriptions, accident windows, and accident time stamps. To the best of our knowledge, MM-AU is the largest and most fine-grained ego-view multi-modal accident dataset. The annotation process of MM-AU is depicted as follows.
|
| 105 |
+
|
| 106 |
+
Accident Window Annotation: Leveraging the annotation criteria of DoTA [66], the accident window is labeled by 5 volunteers, and the final frame indexes are determined by average operation. The temporal annotation contains the beginning time of the accident $t_{ai}$ , the end time of the accident $t_{ae}$ , and the beginning time of the collision $t_{co}$ . The
|
| 107 |
+
|
| 108 |
+
1 https://www.youtube.com
|
| 109 |
+
$^{2}$ https://www.bilibili.com
|
| 110 |
+
3https://v.qq.com
|
| 111 |
+
|
| 112 |
+

|
| 113 |
+
(a) frame ratio of different windows (e.g., accident-free or accident windows)
|
| 114 |
+
|
| 115 |
+

|
| 116 |
+
(b) bounding boxes (#bboxes) of MM-AU and BDD-100K
|
| 117 |
+
|
| 118 |
+

|
| 119 |
+
(c) #frames and #bboxes in coarse and fine annotation sets
|
| 120 |
+
|
| 121 |
+

|
| 122 |
+
(d) accident category distribution of 58 kinds of accidents
|
| 123 |
+
|
| 124 |
+

|
| 125 |
+
(e) accident reason distributions, w.r.t., different road semantics
|
| 126 |
+
Figure 3. The annotation attribute statistics in MM-AU for the temporal, object, and text annotations. Better viewed in zoomed-in mode.
|
| 127 |
+
|
| 128 |
+
frame ratio distributions within different windows of $[0, t_{ai}]$ , $[t_{ai}, t_{co}]$ , $[t_{ai}, t_{ae}]$ , $[t_{co}, t_{ae}]$ , and $[t_{ae}, \text{end}]$ are shown in Fig. 3(a). It can be seen that, in many videos, many accidents end in the last frame. The accident window of $[t_{ai}, t_{ae}]$ of most videos occupies half of the video length, which is useful for model training in accident video understanding.
|
| 129 |
+
|
| 130 |
+
Object Detection Annotation: To facilitate the object-centric accident video understanding, we annotate 7 classes of road participants (i.e., cars, traffic lights, pedestrians, trucks, buses, and cyclists) in MM-AU. To fulfill an efficient annotation, we firstly employ the YOLOX [16] detector (pre-trained on the COCO dataset [35]), to initially detect the objects in the raw MM-AU videos. Secondly, MM-AU has two sets of bounding-box (#bboxes) annotations. We name them as fine annotation set and coarse annotation set. For the fine annotation set, we took three months to manually correct the wrong detections using LabelImg every five frames by ten volunteers, and 2,233,683 bounding boxes within 463,207 frames are obtained. Each bounding box is double-checked for the final confirmation. Fig. 2 presents some samples of object annotations in MM-AU. As for the coarse annotation set, we utilize the state-of-the-art (SOTA) DiffusionDet [7] to obtain the object-bounding boxes for the remainder of frames in MM-AU. Fig. 2(b) presents the #bboxes on different road participants with the comparison to BDD-100K [69], and Fig. 3(c) shows the #frames and #bboxes on the fine and coarse annotation sets.
|
| 131 |
+
|
| 132 |
+
Text Description Annotation: Different from previous ego-view accident video datasets, MM-AU annotates three kinds of text descriptions: accident reason, prevention advice, and accident category descriptions. Accident category description is certainly aligned to the accident window $[t_{ai}, t_{ae}]$ , while the reason and prevention advice description are aligned to the near-accident window $[t_{ai} - 40, t_{ai}]$ . The descriptions and the video sequences do not show a
|
| 133 |
+
|
| 134 |
+
unique correlation, and each description sentence usually correlates with many videos because of the co-occurrence. Similar to the work [14], based on the road layout, road user categories, and their movement actions, we annotate 58 description sentences for accident categories, and their sample distribution is shown in Fig. 3(d).
|
| 135 |
+
|
| 136 |
+
We annotate 110 pairs of sentences for accident reason and prevention advice descriptions correlating to four kinds of road semantics, i.e., pedestrian-centric, vehicle-centric, road-centric, and others (environmental issue). Fig. 3(e) shows the accident reason distribution concerning different road semantics. It is clear that the "failure to notice other participants" is dominant for pedestrian-centric accident reasons, and "speeding", "sudden braking", and "illegal overtaking" are the main kinds of vehicle-centric accident reasons. Following the form of Video Question Answering (VideoQA) task [64], we provide an Accident reason Answering (ArA) task while there is only one question "What is the reason for the accident in this video?" For each accident reason of a video, we further provide four reasonable distractors to form a multi-choice ArA task, and the distractor reasons are all unrelated to the target accident video. We obtain 58,650 ArA pairs in MM-AU.
|
| 137 |
+
|
| 138 |
+
# 4. AdVersa-SD
|
| 139 |
+
|
| 140 |
+
This section presents our AdVersa-SD with the abductive text-video coherent learning for ego-view accident video understanding. As aforementioned, we partition each accident video into three video segments, i.e., the normal video segment $V_{o}$ , the near-accident segment $V_{r}$ , and the
|
| 141 |
+
|
| 142 |
+

|
| 143 |
+
Figure 4. The structure of Abductive CLIP contains four interaction groups with one positive Co-CP and two negative Co-CPs for each interaction group, where $t_{ar} = t_{ai} - 40$ .
|
| 144 |
+
|
| 145 |
+
accident segment $V_{a}$ . Correspondingly, we annotate the text descriptions of the accident reason $t_{r}$ , the prevention advice $t_{p}$ , and the accident category $t_{a}$ . To be clear, we define a denotation of text-video Co-oCurrence Pair (Co-CP) to represent the natural co-occurrence of video clip and text description, e.g., $(V_{r}, t_{r})$ , and $(V_{a}, t_{a})$ .
|
| 146 |
+
|
| 147 |
+
# 4.1. Abductive CLIP
|
| 148 |
+
|
| 149 |
+
For abductive video understanding, we propose an Abductive CLIP for AdVersa-SD to fulfill the coherent semantic learning within different Co-CPs. The structure of Abductive CLIP is illustrated in Fig. 4. We create two virtual Co-CPs for training Abductive CLIP, i.e., the $(V_{o},\tilde{t}_{a})$ and $(\tilde{V}_r,t_p)$ . $(V_o,\tilde{t}_a)$ represents the Co-CP of anonymous accident category description $\tilde{t}_a$ and normal video clip $V_{o}$ while $(\tilde{V}_r,t_p)$ denotes the Co-CP of $\tilde{V}_r$ and the prevention advice description $t_p$ . Notably, $\tilde{t}_a$ is obtained by adding the antonym of verbs to the accident category description, such as "does/do not", "are not", etc. In addition, to make a dissipation process from the near-accident state to the normal state, $\tilde{V}_r$ is obtained by reverse frame rearrangement of $V_{r}$ .
|
| 150 |
+
|
| 151 |
+
Certainly, because each video segment may have different numbers of video frames, we create Co-CPs by coupling the text description with the randomly selected 16 successive frames within the certain video segment. Consequently, the training for Abductive CLIP is straightforward by enhanced difference learning of the embeddings of Co-CPs.
|
| 152 |
+
|
| 153 |
+
Contrastive Interaction Loss: Abductive CLIP takes the XCLIP model [42] as the backbone. Subsequently, we input each Co-CP into XCLIP to obtain the feature embedding of the video clip feature $\mathbf{z}_v$ and the text feature $\mathbf{z}_t$ . To achieve the purpose stated in Fig. 4, we provide a Contrastive Interaction Loss (CILoss) to make the interactive Co-CP learning. The CILoss for different interaction groups of Co-CPs is the same, and consistently defined as:
|
| 154 |
+
|
| 155 |
+
$$
|
| 156 |
+
\mathcal {L} _ {\mathrm {C I L o s s}} = - \sum_ {i = 1} ^ {B} \log \frac {E \left(\mathbf {z} _ {v _ {i}} ^ {p} , \mathbf {z} _ {t _ {i}} ^ {p}\right)}{\mathcal {K}} \tag {1}
|
| 157 |
+
$$
|
| 158 |
+
|
| 159 |
+
$$
|
| 160 |
+
\mathcal {K} = \sum_ {j = 1} ^ {B} [ E (\mathbf {z} _ {v _ {i}} ^ {p}, \mathbf {z} _ {t _ {j}} ^ {p}) + E (\mathbf {z} _ {v _ {i}} ^ {p}, \mathbf {z} _ {t _ {j \neq i}} ^ {n _ {1}}) + E (\mathbf {z} _ {v _ {i}} ^ {p}, \mathbf {z} _ {t _ {j \neq i}} ^ {n _ {2}}) ],
|
| 161 |
+
$$
|
| 162 |
+
|
| 163 |
+
where $E(\mathbf{z}_v, \mathbf{z}_t) = e^{\mathbf{z}_v T \mathbf{z}_t / \tau}$ computes the coherence de
|
| 164 |
+
|
| 165 |
+
gree of video clip feature $\mathbf{z}_v$ and text feature $\mathbf{z}_t$ . $B$ denotes the batchsize scale, the upscripts $p$ , and $n_1 / n_2$ refer to the Pos. CoCP and the Neg. CoCPs. $\tau$ is a learnable hyperparameter, $i$ and $j$ are the sample indexes in each batchsize.
|
| 166 |
+
|
| 167 |
+
CILoss aims to enhance the coherence between the text description and video frames in Co-CP by enlarging the distance of text descriptions or video frames with the ones in negative Co-CPs. Abductive CLIP is optimized by minimizing the summation of four kinds of $\mathcal{L}_{\mathrm{CILoss}}^{o,r,p,a}$ .
|
| 168 |
+
|
| 169 |
+
# 4.2. Extension to Accident Video Diffusion
|
| 170 |
+
|
| 171 |
+
To verify Abductive CLIP, this work treats it as an engine to drive the accident video diffusion task for explicitly exploring the causal-effect relation of accident occurrence. Because traffic accidents are commonly caused by the irregular or sudden movement of road participants, the video diffusion model should have the ability for object-level representation. As shown in Fig. 5, we propose an Object-centric Accident Video Diffusion model (OAVD) which takes the Latent Diffusion Model (LDM) [46] as the baseline and extends it to the video diffusion with the input of Co-CPs and $K$ steps of forward and reverse diffusion process.
|
| 172 |
+
|
| 173 |
+
The structure of OAVD becomes similar to Tune-A-Video work [61], while differently, the 3D-CAB block of the 3D U-net module in Fig. 5 is redesigned and contains the 3D-Conv layers (with four layers with the kernel size of $(3,1,1)$ ), a text-video Cross-Attention (CA) layer, a Spatial Attention (SA) layer, a Temporal Attention (TA) layer and a Gated self-attention (GA) [34] for the frame correlation and object location consideration. To be capable of object-level video diffusion, We further add a masked representation reconstruction path on frame-level reconstruction. The inference phase has the same input form as the OAVD training and generates the new frame clip $V_{g}$ .
|
| 174 |
+
|
| 175 |
+
Masked Video Frame Diffusion: Our aim is to learn the 3D U-net by forward adding noise on the clean latent representation of raw video frames and inverse denoising the noise $\mathbf{z}_v$ with $K$ time steps, conditioned by the text prompt $t$ and the bounding boxes. This formulation enables the derivation of the mask video representation $\mathbf{z}_{mask}$ of $\overline{\overline{V}}$ to fix the frame background details in diffusion process and fulfill the object-centric video generation. The optimization of 3D U-net is achieved by minimizing:
|
| 176 |
+
|
| 177 |
+
$$
|
| 178 |
+
\mathbb {E} _ {V, \mathbf {e} \sim \mathcal {N} (0, I), k, \mathbf {z} _ {t}, \mathbf {z} _ {b}, \overline {{\bar {V}}}} \left\{\| \mathbf {e} - \phi_ {\theta} \left(\mathbf {z} _ {v}, k, \mathbf {z} _ {t}, \mathbf {z} _ {b}\right) \| _ {2} ^ {2} + \right. \tag {2}
|
| 179 |
+
$$
|
| 180 |
+
|
| 181 |
+
$$
|
| 182 |
+
\lambda \| \mathbf {e} (\mathbf {1} - \mathbf {z} _ {m a s k}) - \phi_ {\theta} (\mathbf {z} _ {v}, k, \mathbf {z} _ {t}, \mathbf {z} _ {b}) (\mathbf {1} - \mathbf {z} _ {m a s k}) \| _ {1} ^ {1} \Big \},
|
| 183 |
+
$$
|
| 184 |
+
|
| 185 |
+
where the first term is Mean Square Error $(\mathcal{L}_{MSE})$ and the second term denotes the reconstruction loss of the Masked Latent Representation $(\mathcal{L}_{MASK})$ . $k \in [1, \dots, K]$ denotes the diffusion step ( $K = 1000$ ), $\mathbf{e}$ is the ground-truth noise representation in each diffusion step. $\phi_{\theta}$ is the 3D U-net to be optimized which contains the parameters of 3D
|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
Figure 5. The structure of the object-centric Accident Video Diffusion model (OAVD), where the $V$ and $t$ are the video frame clip and the corresponding text prompt. Object bounding boxes $Bbox$ in each video frame that can be obtained by object detectors. $\overline{\overline{V}}$ is the masked frame clip where the pixels in object regions are set to 0. Different attention modules, i.e., SA, CA, and TA, follow the ones of [61] while with a dense multi-head attention.
|
| 189 |
+
|
| 190 |
+
Cross-Attention Blocks (3D-CAB) with down-sample and up-sample layers (Sample). $\lambda = 0.5$ is a parameter for balancing the weights of $\mathcal{L}_{MSE}$ and $\mathcal{L}_{MASK}$ . $\mathbf{1}$ is an identity tensor with the same size of $\mathbf{e}$ , and the masked noise representation $\mathbf{z}_{mask}$ is obtained by Denoising Diffusion Probabilistic Model (DDPM) Scheduler [20] on the binarization of a latent representation $\mathbf{z}_l$ ( $\mathbf{z}_l = \mathrm{VAE}(\mathbf{z}_v)$ ) through the Variational Autoencoder (VAE) in LDM [46] as:
|
| 191 |
+
|
| 192 |
+
$$
|
| 193 |
+
\mathbf {z} _ {\text {m a s k}} = \operatorname {D D P M S c h e d u l e r} \left(\mathbf {m} _ {(z)}, k, \mathbf {e}\right)),
|
| 194 |
+
$$
|
| 195 |
+
|
| 196 |
+
$$
|
| 197 |
+
\mathbf {m} _ {(z)} = \left\{ \begin{array}{l l} 0 & \text {i f} \mathbf {z} _ {l} < 0. 5, \\ 1 & \text {i f} \mathbf {z} _ {l} \geq 0. 5. \end{array} \right. \tag {3}
|
| 198 |
+
$$
|
| 199 |
+
|
| 200 |
+
Gated Bbox Representation: The key insight is to involve object bounding boxes to enhance the causal object region learning concerning the related text words, which is useful for eliminating the influence of the frame background and explicitly checking the role of certain text words for subsequent accident situations. Inspired by the Gated self-Attention (GA) [34], the location embedding $\mathbf{z}_b$ , collaborating with the output of CA in 3D-CAB, is obtained by:
|
| 201 |
+
|
| 202 |
+
$$
|
| 203 |
+
\mathbf {z} _ {b} = \operatorname {M L P} (\text {F o u r i e r} (B b o x)), \tag {4}
|
| 204 |
+
$$
|
| 205 |
+
|
| 206 |
+
where $\mathbf{z}_b$ is obtained from Bbox by MLP layers with the Fourier embedding [41].
|
| 207 |
+
|
| 208 |
+
OAVD Inference: The OAVD inference stage inputs the Co-CPs while Denosing Diffusion Implicit Model (DDIM) scheduler [50] is taken on the trained 3D U-net $\phi_{\theta}$ conditioned by the text prompt $t$ and Bbox. The frame clip $V$ within the Co-CPs are fed into the inference stage with the same dimension to the generated $V_{g}$ .
|
| 209 |
+
|
| 210 |
+
# 5. Experiments
|
| 211 |
+
|
| 212 |
+
# 5.1. Experimental Details
|
| 213 |
+
|
| 214 |
+
AdVersa-SD takes as input the object bounding boxes and accident reasons. As a prerequisite, we first carry out a benchmark evaluation for the Object Detection (OD) and Accident reason Answering (ArA) tasks, which is of crucial importance to video diffusion. Second, we evaluate our AdVersa-SD with extensive video diffusion experiments.
|
| 215 |
+
|
| 216 |
+
(1) OD Task: We select 11 state-of-the-art detectors to be presented in Tab. 3 in the OD benchmark evaluation. All detectors used the corresponding architectures provided in MMDetection [6] and MMYOLO while keeping important hyperparameters equal, such as batch size, initial learning rate, and epochs. All training and inference are implemented on three GeForce RTX 3090s. All the detectors are pre-trained on the BDD-100K dataset [69], and fine-tuned with the training set of fine object annotations. Notably, to check the OD performance in accident window, we provide two versions of detectors fine-tuned on the training set coming from whole frame windows (abbrev., V1-Train [□, □, □]) and the ones fine-tuned on the training set of accident-free windows (abbrev., V2-Train [□, □]).
|
| 217 |
+
|
| 218 |
+
We use Average Precision (AP50) and Average Recall (AR) [35] to evaluate the detection results with the threshold of $50\%$ detection score.
|
| 219 |
+
|
| 220 |
+
(2) ArA Task: We follow the task of multi-choice Video Question Answering (VQA) to formulate the ArA task while the question is "What is the reason for the accident in this video?" The performance is measured by the accuracy, i.e., the percentage of questions that are correctly answered.
|
| 221 |
+
(3) Abductive Video Diffusion Task: In AdVersa-SD, we aim to explore the cause-effect evolution of accident videos conditioned by the descriptions of accident reasons or prevention advice. Hence, based on the input form of OAVD in AdVersa-SD, we input the object bboxes and Co-CPs of $(V_r, t_r)$ and $(V_r, t_p)$ in the evaluations. Two state-of-the-art video diffusion models including the DDIM inversion-based Tune-A-Video (TAV) [61] and the training-free ControlVideo (CVideo) [72] are selected. We generate 1500 clips (with 16 frames/clip) for all diffusion experiments, in which the object boxes are pre-detected by DiffusionDet [7].
|
| 222 |
+
|
| 223 |
+
Similar to previous video diffusion models, Fréchet Video Distance (FVD) [55] is taken for quality evaluation of the synthetic video clips. We also use the CLIP score $(\mathbf{CLIP}_S)$ [61] to measure the alignment degree between text prompts and video frames.
|
| 224 |
+
|
| 225 |
+
In the evaluation, 6000 pairs of Co-CPs in MM-AU are adopted to train the AdVersa-SD and the Tune-A-Video
|
| 226 |
+
|
| 227 |
+
Table 3. The results of V1-Train [☐, ☐, ☐]) and V2-Train [☐, ☐]) for 11 state-of-the-art detectors on the MM-AU.
|
| 228 |
+
|
| 229 |
+
<table><tr><td rowspan="3">Detectors</td><td rowspan="3">Years</td><td colspan="6">V1-Train [□□□]</td><td colspan="4">V2-Train [□□□]</td><td rowspan="3">Anchor</td><td rowspan="3">GFlops</td><td rowspan="3">#Params.</td></tr><tr><td>val. □□□</td><td>test. □□□</td><td>AR</td><td>mAP50</td><td>AR</td><td>test. □</td><td>mAP50</td><td>AR</td><td>mAP50</td><td>AR</td></tr><tr><td>mAP50</td><td>AR</td><td>mAP50</td><td>AR</td><td>mAP50</td><td>AR</td><td>mAP50</td><td>AR</td><td>mAP50</td><td>AR</td></tr><tr><td>FasterRCNN [45]</td><td>2015</td><td>0.674</td><td>0.634</td><td>0.666</td><td>0.623</td><td>0.664</td><td>0.620</td><td>0.544</td><td>0.524</td><td>0.497</td><td>0.509</td><td>✓</td><td>0.19T</td><td>41.38M</td></tr><tr><td>CornerNet [30]</td><td>2018</td><td>0.495</td><td>0.625</td><td>0.485</td><td>0.619</td><td>0.483</td><td>0.624</td><td>0.436</td><td>0.563</td><td>0.456</td><td>0.598</td><td></td><td>0.71T</td><td>201M</td></tr><tr><td>CascadeRPN [58]</td><td>2019</td><td>0.662</td><td>0.699</td><td>0.664</td><td>0.696</td><td>0.649</td><td>0.689</td><td>0.579</td><td>0.663</td><td>0.532</td><td>0.624</td><td>✓</td><td>0.18T</td><td>41.97M</td></tr><tr><td>CenterNet [10]</td><td>2019</td><td>0.054</td><td>0.238</td><td>0.051</td><td>0.233</td><td>0.047</td><td>0.224</td><td>0.161</td><td>0.260</td><td>0.155</td><td>0.257</td><td></td><td>20.38G</td><td>14.21M</td></tr><tr><td>DeTR [4]</td><td>2020</td><td>0.367</td><td>0.407</td><td>0.377</td><td>0.403</td><td>0.363</td><td>0.403</td><td>0.275</td><td>0.329</td><td>0.254</td><td>0.318</td><td></td><td>44.55G</td><td>28.83M</td></tr><tr><td>EfficientNet [53]</td><td>2020</td><td>0.310</td><td>0.417</td><td>0.310</td><td>0.412</td><td>0.293</td><td>0.404</td><td>0.075</td><td>0.128</td><td>0.073</td><td>0.133</td><td></td><td>57.28G</td><td>18.46M</td></tr><tr><td>Deformable-DeTR [74]</td><td>2021</td><td>0.660</td><td>0.671</td><td>0.661</td><td>0.668</td><td>0.652</td><td>0.663</td><td>0.626</td><td>0.631</td><td>0.587</td><td>0.626</td><td></td><td>0.18T</td><td>40.1M</td></tr><tr><td>YOLOx [16]</td><td>2021</td><td>0.673</td><td>0.709</td><td>0.672</td><td>0.698</td><td>0.670</td><td>0.698</td><td>0.563</td><td>0.627</td><td>0.540</td><td>0.626</td><td></td><td>13.33G</td><td>8.94M</td></tr><tr><td>YOLOv5s [23]</td><td>2021</td><td>0.757</td><td>0.766</td><td>0.748</td><td>0.764</td><td>0.743</td><td>0.761</td><td>0.660</td><td>0.716</td><td>0.636</td><td>0.712</td><td>✓</td><td>8.13G</td><td>12.35M</td></tr><tr><td>DiffusionDet [7]</td><td>2023</td><td>0.731</td><td>0.749</td><td>0.733</td><td>0.745</td><td>0.718</td><td>0.738</td><td>0.701</td><td>0.729</td><td>0.660</td><td>0.716</td><td></td><td>-</td><td>26.82M</td></tr><tr><td>YOLOv8 [54]</td><td>2023</td><td>0.716</td><td>0.754</td><td>0.715</td><td>0.753</td><td>0.717</td><td>0.755</td><td>0.606</td><td>0.702</td><td>0.597</td><td>0.703</td><td></td><td>14.28G</td><td>11.14M</td></tr></table>
|
| 230 |
+
|
| 231 |
+
Notes: To ensure that the distribution of accident windows is essentially the same on the training, validation, and testing sets, we divide the bounding box annotations in the ratio of 7:1.5:1.5. The results of GFlops and #Params. are reported by the tools in MMDetection and MMYOLO of the testing phase.
|
| 232 |
+
|
| 233 |
+
model. The learning rate of Abductive CLIP in AdVersaSD is $1e - 6$ with the batchsize of 2 and trained with 30000 iteration steps. The learning rate of OAVD is $5e - 6$ with the same batchsize and trained with 8000 iteration steps. Adam optimizer is adopted with the default $\beta_{1} = 0.9$ and $\beta_{2} = 0.999$ on a platform with 2 GeForce RTX 3090s.
|
| 234 |
+
|
| 235 |
+
# 5.2. Result Analysis
|
| 236 |
+
|
| 237 |
+
(1) OD Evaluations: Tab. 3 presents the detection results of 11 state-of-the-art detectors. From the results, we can see all the detectors generate a degradation for the accident window test. CenterNet [10] and EfficientNet [53] show limited ability for the OD task in the accident scenarios and the metric values decrease significantly for V2-Train mode. As claimed by previous research, pure Transformer-based detectors, such as DeTR [4], demonstrate limited performance. Deformable-DeTR has improved performance but is still not better than the CNN-based ones for traffic accident cases. YOLOv5s [23] and DiffusionDet [7] are the two leading approaches for the OD task. However, from the results difference obtained by V1-Train [□, □] and V2-Train [□, □], DiffusionDet [7] shows superior performance to the testing set of accident window in V2-Train. It indicates that diffusion-based object detection may be more robust with better generalization ability. More qualitative results can be viewed in the supplemental file.
|
| 238 |
+
|
| 239 |
+
(2) ArA Evaluations: We present in Tab. 4 and Fig. 6 the performances of the state-of-the-art on the ArA task. We carefully select the baseline methods to include temporal relation network (HCRN [31]), graph transformer network (VGT [63], CoVGT [64]), cross-modal pre-trained transformers (ClipBERT [33]) and those using large language models (LMMs) (FrozenGQA [62] and SeViLA [70]). The methods also include frame-centric and more fine-grained object-centric video representations. Our key observations are: LLM-based methods, such as SeViLA which uses Flan T5-XL (3B) [8], show absolute advantage in this task, surpassing the second-ranked method CoVGT by $7\%$ to $9\%$ .
|
| 240 |
+
|
| 241 |
+
Table 4. The Accident Reason Answering (ArA) Accuracy (Acc. %) on the validation (val.) and testing (test.) set of MM-AU by 6 SOTA methods whose size of learnable parameters is provided.
|
| 242 |
+
|
| 243 |
+
<table><tr><td>Methods</td><td>Years</td><td>Acc (val.)</td><td>Acc (test.)</td><td>V.</td><td>T.</td><td>Params.(M)</td></tr><tr><td>HCRN [31]</td><td>2020</td><td>65.81</td><td>64.65</td><td>F</td><td>G</td><td>42</td></tr><tr><td>ClipBERT [33]</td><td>2021</td><td>72.09</td><td>72.71</td><td>F</td><td>B</td><td>137</td></tr><tr><td>VGT [63]</td><td>2022</td><td>68.40</td><td>68.66</td><td>O</td><td>B</td><td>143</td></tr><tr><td>FrozenGQA [62]</td><td>2023</td><td>77.10</td><td>77.01</td><td>F</td><td>D</td><td>30</td></tr><tr><td>CoVGT [64]</td><td>2023</td><td>81.70</td><td>79.97</td><td>O</td><td>R</td><td>159</td></tr><tr><td>SeViLA [70]</td><td>2023</td><td>89.26</td><td>89.02</td><td>O</td><td>F</td><td>108</td></tr></table>
|
| 244 |
+
|
| 245 |
+
F: frame-centric representations; O: object-centric representations; V.: Vision; T: Text; G: GloVe; B: BERT [9]; D: DeBERTa [19]; R: RoBERTa [38]; F: Flan T5 [8]. The ratio of training, validation, and testing set is 7:1:2.
|
| 246 |
+
|
| 247 |
+

|
| 248 |
+
Figure 6. Accident Reason Answering (ArA) Accuracy (Acc. %), w.r.t., different accident participants on the testing set of MM-AU.
|
| 249 |
+
|
| 250 |
+
Furthermore, fine-grained visual representations, e.g., region or object level, are key for higher performances. We speculate that the videos are all taken on the road about traffic accidents and thus a coarse frame-level representation is insufficient to discern various accident reasons.
|
| 251 |
+
|
| 252 |
+
An in-depth analysis in Fig. 6 shows that the methods' performances vary a lot across different accident participant types. Generally, SeViLA outperforms other methods in all scenarios. Yet, all methods perform closely when the accident reason is car-related. Curiously, we find that most of the methods fail to identify the cyclists in the accidents except for SeViLA. The reason could be that cyclists are the least frequent participants (as shown in Fig. 3(b)) in accidents. Thus, it calls for commonsense knowledge (carried
|
| 253 |
+
|
| 254 |
+

|
| 255 |
+
Figure 7. Some results $(V_{g})$ inputting the Co-CPs of $(V_{r}, t_{r})$ or $(V_{r}, t_{p})$ . From the generation of OAVD, the participants to be involved in accidents appear in advance when giving the accident reason prompt $t_{r}$ , while the accident objects disappear when providing the prevention advice prompt $t_{p}$ . ControlVideo and Tune-A-Video are given the same $t_{r}$ and $t_{p}$ prompt with OAVD, respectively. However, the artifacts and unrelated content are generated, and the phenomena of "appear in advance" or "disappear" do not occur.
|
| 256 |
+
|
| 257 |
+
in LLMs) to find the related reasons.
|
| 258 |
+
|
| 259 |
+
(3) Diffusion Evaluations: Here, we evaluate the AdVersa-SD, and the importance of Abductive-CLIP and Bboxes. To verify Abductive-CLIP in AdVersa-SD, we take two baselines: 1) the original CLIP model [44] and 2) a "Sequential-CLIP (S-CLIP)" that only maintains the positive Co-CPs of the Abductive-CLIP (abbrev., A-CLIP) structure (see Fig. 4).
|
| 260 |
+
|
| 261 |
+
Abductive Ability Check of AdVesrsa-SD. Fig. 7 visually presents video diffusion results of accident scenarios given the descriptions of accident reason $t_r$ and prevention advice $t_p$ , respectively. Curiously, OAVD can make the accident participant (i.e., the pedestrian or the black car) appear in advance given $t_r$ , and eliminate the accident participants provided $t_p$ . It indicates that our AdVesrsa-SD catches the dominant object representation for the cause-effect chain of the accident occurrence. Contrarily, ControlVideo and Tune-A-Video generate irrelevant styles with worse performance than OAVD, as listed in Tab. 5, which shows that accident knowledge is scarce in this field but rather crucial for the accident video diffusion models. Tab. 5 shows the results of different diffusion models.
|
| 262 |
+
|
| 263 |
+
Roles of different CLIP Models. Tab. 5 also presents the results of OAVD with varying CLIP models. The results show that our Abductive-CLIP can generate better text-video semantic alignment than the original CLIP model and the Sequential-CLIP trained on our MM-AU. It indicates that the contrastive interaction loss of the text-video pairs, i.e., Co-CPs, is important to discern the key semantic information within text and videos.
|
| 264 |
+
|
| 265 |
+
Role of Bboxes From Tab. 5 and Fig. 8, the advantages of Bboxes are demonstrated with clearer and more detailed content in the generated frames. In addition, object-involved video diffusion can facilitate the key object region learning and maintain the details of the frames better than the version without Bbox input. More ablation studies on the role of bboxes can be viewed in the supplemental file.
|
| 266 |
+
|
| 267 |
+
Table 5. Results with SOTA diffusion models and our OAVD driven by varying CLIP models, where $\downarrow$ and $\uparrow$ prefer a lower and larger value, respectively. FPS: frames/second (tested on a single GeForce RTX 3090).
|
| 268 |
+
|
| 269 |
+
<table><tr><td>Method</td><td>TAV [61]</td><td>CVideo [72]</td><td>OAVD (CLIP [44])</td><td>OAVD (S-CLIP)*</td><td>OAVD (A-CLIP)*</td></tr><tr><td>CLIP_S ↑</td><td>21.77</td><td>22.51</td><td>21.9</td><td>27.14</td><td>27.24</td></tr><tr><td>FVD ↓</td><td>9545.6</td><td>12275.2</td><td>10122.5</td><td>5372.3</td><td>5238.1</td></tr><tr><td>FPS ↑</td><td>1.7</td><td>0.5</td><td>1.7</td><td>1.2</td><td>1.2</td></tr></table>
|
| 270 |
+
|
| 271 |
+
*: with the input of bounding boxes.
|
| 272 |
+
|
| 273 |
+
Figure 8. A visualization for the importance of bounding box.
|
| 274 |
+

|
| 275 |
+
$t_r$ : The truck do not give way to normal driving vehicles when turning or changing lanes.
|
| 276 |
+
|
| 277 |
+
Besides, our OAVD also can flexibly generate any accident videos with the input of only object boxes or the accident category descriptions (see the supplemental file).
|
| 278 |
+
|
| 279 |
+
# 6. Conclusion
|
| 280 |
+
|
| 281 |
+
This work presents a precious large-scale ego-view multi-modal accident dataset (MM-AU) for safe driving perception which provides the temporal, object, and text annotations for fine-grained accident video understanding. Within MM-AU, the evaluations of the state-of-the-art methods on object detection and accident reason answering tasks are carefully conducted. Based on MM-AU, we present AdVersa-SD to fulfill an abductive accident video understanding, where an Object-centric Accident Video Diffusion (OAVD) driven by an Abductive-CLIP model is proposed. Extensive experiments verify that AdVersa-SD shows promising ability for the cause-effect understanding of accident situations and generates superior video diffusion performance to two state-of-the-art diffusion models.
|
| 282 |
+
|
| 283 |
+
# References
|
| 284 |
+
|
| 285 |
+
[1] Mohammad Sadegh Aliakbarian, Fatemeh Sadat Saleh, Mathieu Salzmann, Basura Fernando, Lars Petersson, and Lars Andersson. VIENA: A driving anticipation dataset. In ACCV, pages 449-466, 2019. 3
|
| 286 |
+
[2] Wentao Bao, Qi Yu, and Yu Kong. Uncertainty-based traffic accident anticipation with spatio-temporal relational learning. In ACM MM, pages 2682-2690, 2020. 2, 3
|
| 287 |
+
[3] Wentao Bao, Qi Yu, and Yu Kong. DRIVE: deep reinforced accident anticipation with visual explanation. In ICCV, pages 7599-7608, 2021. 1, 2
|
| 288 |
+
[4] Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov, and Sergey Zagoruyko. End-to-end object detection with transformers. In ECCV, pages 213-229, 2020. 7, 2, 3, 4
|
| 289 |
+
[5] Fu-Hsiang Chan, Yu-Ting Chen, Yu Xiang, and Min Sun. Anticipating accidents in dashcam videos. In ACCV, volume 10114, pages 136-153, 2016. 2, 3
|
| 290 |
+
[6] Kai Chen et al. MMDetection: Open mmlab detection toolbox and benchmark. arXiv preprint arXiv:1906.07155, 2019. 6
|
| 291 |
+
[7] Shoufa Chen, Peize Sun, Yibing Song, and Ping Luo. Diffusiondet: Diffusion model for object detection. In ICCV, pages 19830-19843, 2023. 4, 6, 7, 2, 3
|
| 292 |
+
[8] Hyung Won Chung, Le Hou, Shayne Longpre, Barret Zoph, Yi Tay, William Fedus, Yunxuan Li, Xuezhi Wang, Mostafa Dehghani, Siddhartha Brahma, et al. Scaling instruction-finetuned language models. arXiv preprint arXiv:2210.11416, 2022.7
|
| 293 |
+
[9] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805, 2018. 7
|
| 294 |
+
[10] Kaiwen Duan, Song Bai, Lingxi Xie, Honggang Qi, Qingming Huang, and Qi Tian. Centernet: Keypoint triplets for object detection. In ICCV, pages 6569-6578, 2019. 7, 2, 3
|
| 295 |
+
[11] Editorial. Safe driving cars. Nat. Mach. Intell., 4:95-96, 2022. 1
|
| 296 |
+
[12] Patrick Esser, Johnathan Chiu, Parmida Atighehchian, Jonathan Granskog, and Anastasis Germanidis. Structure and content-guided video synthesis with diffusion models. In ICCV, pages 7346-7356, 2023. 1
|
| 297 |
+
[13] Jianwu Fang, Jiahuan Qiao, Jie Bai, Hongkai Yu, and Jianru Xue. Traffic accident detection via self-supervised consistency learning in driving scenarios. IEEE Trans. Intell. Transp. Syst., 23(7):9601-9614, 2022. 2, 3
|
| 298 |
+
[14] Jianwu Fang, Dingxin Yan, Jiahuan Qiao, Jianru Xue, He Wang, and Sen Li. DADA-2000: can driving accident be predicted by driver attention? Analyzed by A benchmark. In ITSC, pages 4303–4309, 2019. 4
|
| 299 |
+
[15] Jianwu Fang, Dingxin Yan, Jiahuan Qiao, Jianru Xue, and Hongkai Yu. DADA: driver attention prediction in driving accident scenarios. IEEE Trans. Intell. Transp. Syst., 23(6):4959-4971, 2022. 1, 3
|
| 300 |
+
[16] Zheng Ge, Songtao Liu, Feng Wang, Zeming Li, and Jian Sun. YOLOX: exceeding YOLO series in 2021. CoRR, abs/2107.08430, 2021. 4, 7, 2, 3
|
| 301 |
+
|
| 302 |
+
[17] Sreyan Ghosh, Sherwin Joseph Sunny, and Rohan Roney. Accident detection using convolutional neural networks. In IconDSC, pages 1-6, 2019. 3
|
| 303 |
+
[18] Feten Hajri and Hajer Fradi. Vision transformers for road accident detection from dashboard cameras. In AVSS, pages 1-8, 2022. 2
|
| 304 |
+
[19] Pengcheng He, Xiaodong Liu, Jianfeng Gao, and Weizhu Chen. Deberta: Decoding-enhanced bert with disentangled attention. arXiv preprint arXiv:2006.03654, 2020. 7
|
| 305 |
+
[20] Jonathan Ho, Ajay Jain, and Pieter Abbeel. Denoising diffusion probabilistic models. NeurIPS, 33:6840-6851, 2020. 6
|
| 306 |
+
[21] Hongyu Hu, Qi Wang, Ming Cheng, and Zhenhai Gao. Cost-sensitive semi-supervised deep learning to assess driving risk by application of naturalistic vehicle trajectories. Expert Syst. Appl., 178:115041, 2021. 2
|
| 307 |
+
[22] Ashesh Jain, Avi Singh, Hema Swetha Koppula, Shane Soh, and Ashutosh Saxena. Recurrent neural networks for driver activity anticipation via sensory-fusion architecture. In ICRA, pages 3118-3125, 2016. 2
|
| 308 |
+
[23] Glenn Jocher et al. ultralytics/yolov5: v6.2 - YOLOv5 Classification Models, Apple M1, Reproducibility, ClearML and Deci.ai integrations, 2022. 7, 2, 3, 4
|
| 309 |
+
[24] Minhee Kang, Wooseop Lee, Keeyeon Hwang, and Young Yoon. Vision transformer for detecting critical situations and extracting functional scenario for automated vehicle safety assessment. Sustainability, 14(15):9680, 2022. 1, 3
|
| 310 |
+
[25] Muhammad Monjurul Karim, Yu Li, Ruwen Qin, and Zhaozheng Yin. A dynamic spatial-temporal attention network for early anticipation of traffic accidents. IEEE Trans. Intell. Transp. Syst., 23(7):9590-9600, 2022. 2
|
| 311 |
+
[26] Muhammad Monjurul Karim, Yu Li, Ruwen Qin, and Zhaozheng Yin. A dynamic spatial-temporal attention network for early anticipation of traffic accidents. IEEE Trans. Intell. Transp. Syst., 23(7):9590-9600, 2022. 2
|
| 312 |
+
[27] Muhammad Monjurul Karim, Zhaozheng Yin, and Ruwen Qin. An attention-guided multistream feature fusion network for early localization of risky traffic agents in driving videos. IEEE Trans. Intell. Veh. in Press, 2023. 2, 3
|
| 313 |
+
[28] Hoon Kim, Kangwook Lee, Gyeongjo Hwang, and Changho Suh. Crash to not crash: Learn to identify dangerous vehicles using a simulator. In AAAI, pages 978-985, 2019. 3
|
| 314 |
+
[29] Bulbula Kameda et al. Vehicle accident and traffic classification using deep convolutional neural networks. In International Computer Conference on Wavelet Active Media Technology and Information Processing, pages 323-328, 2019. 3
|
| 315 |
+
[30] Hei Law and Jia Deng. Cornernet: Detecting objects as paired keypoints. In ECCV, pages 765-781, 2018. 7, 2, 3, 4
|
| 316 |
+
[31] Thao Minh Le, Vuong Le, Svetha Venkatesh, and Truyen Tran. Hierarchical conditional relation networks for video question answering. In CVPR, pages 9972-9981, 2020. 7, 4
|
| 317 |
+
[32] Trung-Nghia Le, Shintaro Ono, Akihiro Sugimoto, and Hiroshi Kawasaki. Attention R-CNN for accident detection. In IV, pages 313-320, 2020. 2
|
| 318 |
+
[33] Jie Lei, Linjie Li, Luowei Zhou, Zhe Gan, Tamara L Berg, Mohit Bansal, and Jingjing Liu. Less is more: Clipbert for
|
| 319 |
+
|
| 320 |
+
video-and-language learning via sparse sampling. In $CVPR$ , pages 7331-7341, 2021. 7
|
| 321 |
+
[34] Yuheng Li, Haotian Liu, Qingyang Wu, Fangzhou Mu, Jianwei Yang, Jianfeng Gao, Chunyuan Li, and Yong Jae Lee. Gligen: Open-set grounded text-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22511-22521, 2023. 5, 6, 1
|
| 322 |
+
[35] Tsung-Yi Lin, Michael Maire, Serge Belongie, James Hays, Pietro Perona, Deva Ramanan, Piotr Dólar, and C Lawrence Zitnick. Microsoft COCO: Common objects in context. In ECCV, pages 740-755, 2014. 4, 6
|
| 323 |
+
[36] Chunsheng Liu, Zijian Li, Faliang Chang, Shuang Li, and Jincan Xie. Temporal shift and spatial attention-based two-stream network for traffic risk assessment. IEEE Trans. Intell. Transp. Syst., 23(8):12518-12530, 2022. 3
|
| 324 |
+
[37] Yang Liu, Guanbin Li, and Liang Lin. Cross-modal causal relational reasoning for event-level visual question answering. IEEE Trans. Pattern Anal. Mach. Intell., 45(10):11624-11641, 2023. 1, 3
|
| 325 |
+
[38] Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettle-moyer, and Veselin Stoyanov. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692, 2019. 7
|
| 326 |
+
[39] Haohan Luo and Feng Wang. A simulation-based framework for urban traffic accident detection. In ICASSP, pages 1-5, 2023. 3
|
| 327 |
+
[40] Arnav Vaibhav Malawade, Shih-Yuan Yu, Brandon Hsu, Deepan Muthirayan, Pramod P. Khargonekar, and Mohammad Abdullah Al Faruque. Spatiotemporal scene-graph embedding for autonomous vehicle collision prediction. IEEE Internet Things J., 9(12):9379-9388, 2022. 2
|
| 328 |
+
[41] Ben Mildenhall, Pratul P Srinivasan, Matthew Tancik, Jonathan T Barron, Ravi Ramamoorthi, and Ren Ng. Nerf: Representing scenes as neural radiance fields for view synthesis. Communications of the ACM, 65(1):99-106, 2021. 6
|
| 329 |
+
[42] Bolin Ni, Houwen Peng, Minghao Chen, Songyang Zhang, Gaofeng Meng, Jianlong Fu, Shiming Xiang, and Haibin Ling. Expanding language-image pretrained models for general video recognition. In European Conference on Computer Vision, pages 1-18. Springer, 2022. 5
|
| 330 |
+
[43] Karishma Pawar and Vahida Attar. Deep learning based detection and localization of road accidents from traffic surveillance videos. ICT Express, 8(3):379-387, 2022. 2
|
| 331 |
+
[44] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763, 2021. 1, 8
|
| 332 |
+
[45] Shaoqing Ren, Kaiming He, Ross Girshick, and Jian Sun. Faster R-CNN: Towards real-time object detection with region proposal networks. NeurIPS, 28, 2015. 7, 2, 3
|
| 333 |
+
[46] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In CVPR, pages 10684-10695, 2022. 2, 5, 6, 1
|
| 334 |
+
|
| 335 |
+
[47] Debaditya Roy, Tetsuhiro Ishizaka, C Krishna Mohan, and Atsushi Fukuda. Detection of collision-prone vehicle behavior at intersections using siamese interaction lstm. IEEE Trans. Intell. Transp. Syst., 23(4):3137-3147, 2020. 2
|
| 336 |
+
[48] Kelathodi Kumaran Santhosh, Debi Prosad Dogra, Partha Pratim Roy, and Adway Mitra. Vehicular trajectory classification and traffic anomaly detection in videos using a hybrid cnn-vae architecture. IEEE Trans. Intell. Transp. Syst., 23(8):11891-11902, 2021. 2
|
| 337 |
+
[49] Dinesh Singh and Chalavadi Krishna Mohan. Deep spatiotemporal representation for detection of road accidents using stacked autoencoder. IEEE Trans. Intell. Transp. Syst., 20(3):879-887, 2019. 2
|
| 338 |
+
[50] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. In ICLR, 2021. 6
|
| 339 |
+
[51] Tomoyuki Suzuki, Hirokatsu Kataoka, Yoshimitsu Aoki, and Yutaka Satoh. Anticipating traffic accidents with adaptive loss and large-scale incident DB. In CVPR, pages 3521-3529, 2018. 2
|
| 340 |
+
[52] Leonardo Taccari, Francesco Sambo, Luca Bravi, Samuele Sarti, Leonardo Sarti, Matteo Simoncini, and Alessandro Lori. Classification of crash and near-crash events from dashcam videos and telematics. In ITSC, pages 2460-2465, 2018. 2
|
| 341 |
+
[53] Mingxing Tan and Quoc V. Le. Efficientnet: Rethinking model scaling for convolutional neural networks. In Kamalika Chaudhuri and Ruslan Salakhutdinov, editors, ICML, volume 97, pages 6105-6114, 2019. 7, 2, 3
|
| 342 |
+
[54] Ultralytics. Ultralytics github repository. https://github.com/ultralytics/ultralytics, November 2023. 7, 2, 3
|
| 343 |
+
[55] Thomas Unterthiner, Sjoerd Van Steenkiste, Karol Kurach, Raphael Marinier, Marcin Michalski, and Sylvain Gelly. Towards accurate generative models of video: A new metric & challenges. arXiv preprint arXiv:1812.01717, 2018. 6, 8
|
| 344 |
+
[56] Thakare Kamalakar Vijay, Debi Prosad Dogra, Heeseung Choi, Gi Pyo Nam, and Ig-Jae Kim. Detection of road accidents using synthetically generated multi-perspective accident videos. IEEE Trans. Intell. Transp. Syst., 24(2):1926-1935, 2023. 2
|
| 345 |
+
[57] Vikram Voleti, Alexia Jolicoeur-Martineau, and Chris Pal. Mcvd-masked conditional video diffusion for prediction, generation, and interpolation. NeurIPS, 35:23371-23385, 2022. 1
|
| 346 |
+
[58] Thang Vu, Hyunjun Jang, Trung X. Pham, and Chang Dong Yoo. Cascade RPN: delving into high-quality region proposal network with adaptive convolution. In NeurIPS, pages 1430-1440, 2019. 7, 2, 3
|
| 347 |
+
[59] Tianhang Wang, Kai Chen, Guang Chen, Bin Li, Zhijun Li, Zhengfa Liu, and Changjun Jiang. GSC: A graph and spatiotemporal continuity based framework for accident anticipation. IEEE Trans. Intell. Veh. in Press, 2023. 1, 2
|
| 348 |
+
[60] Tianqi Wang, Sukmin Kim, Wenxuan Ji, Enze Xie, Chongjian Ge, Junsong Chen, Zhenguo Li, and Ping Luo. Deepaccident: A motion and accident prediction benchmark for V2X autonomous driving. CoRR, abs/2304.01168, 2023. 3
|
| 349 |
+
|
| 350 |
+
[61] Jay Zhangjie Wu, Yixiao Ge, Xintao Wang, Stan Weixian Lei, Yuchao Gu, Yufei Shi, Wynne Hsu, Ying Shan, Xiaohu Qie, and Mike Zheng Shou. Tune-a-Video: One-shot tuning of image diffusion models for text-to-video generation. In ICCV, pages 7623–7633, 2023. 5, 6, 8, 1, 7
|
| 351 |
+
[62] Junbin Xiao, Angela Yao, Yicong Li, and Tat Seng Chua. Can i trust your answer? visually grounded video question answering. arXiv preprint arXiv:2309.01327, 2023. 7
|
| 352 |
+
[63] Junbin Xiao, Pan Zhou, Tat-Seng Chua, and Shuicheng Yan. Video graph transformer for video question answering. In ECCV, pages 39-58, 2022. 7
|
| 353 |
+
[64] Junbin Xiao, Pan Zhou, Angela Yao, Yicong Li, Richang Hong, Shuicheng Yan, and Tat-Seng Chua. Contrastive video question answering via video graph transformer. IEEE T-PAMI, 45(11):13265-13280, 2023. 3, 4, 7
|
| 354 |
+
[65] Li Xu, He Huang, and Jun Liu. SUTD-TrafficQA: A question answering benchmark and an efficient network for video reasoning over traffic events. In CVPR, pages 9878-9888, 2021. 1, 3
|
| 355 |
+
[66] Yu Yao, Xizi Wang, Mingze Xu, Zelin Pu, Yuchen Wang, Ella M. Atkins, and David J. Crandall. Dota: Unsupervised detection of traffic anomaly in driving videos. IEEE Trans. Pattern Anal. Mach. Intell., 45(1):444-459, 2023. 2, 3
|
| 356 |
+
[67] Yu Yao, Mingze Xu, Yuchen Wang, David J. Crandall, and Ella M. Atkins. Unsupervised traffic accident detection in first-person videos. In IROS, pages 273-280, 2019. 2, 3
|
| 357 |
+
[68] Tackgeun You and Bohyung Han. Traffic accident benchmark for causality recognition. In ECCV, volume 12352, pages 540-556, 2020. 3
|
| 358 |
+
[69] Fisher Yu, Haofeng Chen, Xin Wang, Wenqi Xian, Yingying Chen, Fangchen Liu, Vashisht Madhavan, and Trevor Darryll. BDD100K: A diverse driving dataset for heterogeneous multitask learning. In CVPR, pages 2633-2642, 2020. 4, 6
|
| 359 |
+
[70] Shoubin Yu, Jaemin Cho, Prateek Yadav, and Mohit Bansal. Self-chained image-language model for video localization and question answering. NeurIPS, 2023. 7, 4
|
| 360 |
+
[71] Chuanqi Zang, Hanqing Wang, Mingtao Pei, and Wei Liang. Discovering the real association: Multimodal causal reasoning in video question answering. In CVPR, pages 19027-19036, 2023. 3
|
| 361 |
+
[72] Yabo Zhang, Yuxiang Wei, Dongsheng Jiang, Xiaopeng Zhang, Wangmeng Zuo, and Qi Tian. Controlvideo: Training-free controllable text-to-video generation. arXiv preprint arXiv:2305.13077, 2023. 6, 8, 7
|
| 362 |
+
[73] Zhili Zhou, Xiaohua Dong, Zhetao Li, Keping Yu, Chun Ding, and Yimin Yang. Spatio-temporal feature encoding for traffic accident detection in vanet environment. IEEE Trans. Intell. Transp. Syst., 23(10):19772-19781, 2022. 2
|
| 363 |
+
[74] Xizhou Zhu, Weijie Su, Lewei Lu, Bin Li, Xiaogang Wang, and Jifeng Dai. Deformable detr: Deformable transformers for end-to-end object detection. In ICLR, 2021. 7, 2, 3
|
| 364 |
+
|
| 365 |
+
# Supplementary Material of Abductive Ego-View Accident Video Understanding for Safe Driving Perception
|
| 366 |
+
|
| 367 |
+

|
| 368 |
+
Figure 1. The detailed workflow of 3D-CAB in OAVD. 3D-CAB is one layer of the 3D U-net $\phi$ in Fig. 5 of the main paper body. To be clear, we denote the input representation of 3D-CAB as $\mathbf{z}_{v_{in}}$ , and the output representation as $\mathbf{z}_{v_{out}}$ . Within 3D-CAB, the feature representation of bounding boxes $\mathbf{z}_b$ and text descriptions $\mathbf{z}_t$ are fused successively to the Gated Self-Attention (GA) and Cross-Attention (CA) modules, where $\mathbf{z}_b$ is obtained by MLP(Fourier(Bbox)) in Eq.(4) and $\mathbf{z}_t$ is generated by our Abductive CLIP. In Fourier(Bbox), there is a Token Selection (TS) module [34] to find the important tokens for object representation learning. Notably, different from [61], the query ( $\mathbf{q}$ ), key ( $\mathbf{k}$ ), and value ( $\mathbf{v}$ ) are all updated in the OAVD training phase.
|
| 369 |
+
|
| 370 |
+

|
| 371 |
+
|
| 372 |
+
# 1. The Architecture of 3D-CAB
|
| 373 |
+
|
| 374 |
+
To be clear for re-reproduction, we detail the workflow of 3D-CAB in OAVD, as shown in Fig. 1. $B$ denotes the batch size, and the maximum text prompt length $L$ is set to 77. In each layer of 3D-CAB, $c$ , $h$ , and $w$ represent the channels, height, and width of the input video feature $\mathbf{z}_{v_{in}}$ , and $C$ , $H$ , and $W$ represent the channels, height, and width of the video clip representation after ResNet Block encoding. Notably, the channels, height, and width in each step of 3D-CAB change for a dimension adaptation. Furthermore, we inject different attention modules, i.e., SA, CA, and TA, into Low-rank Adaptation (LoRA) trainer<sup>7</sup> for fast fine-tuning on LDM [46].
|
| 375 |
+
|
| 376 |
+
# 2. OD Analysis for Different Kinds of Objects
|
| 377 |
+
|
| 378 |
+
For an adequate benchmark, we offer a more detailed Object Detection (OD) analysis for distinct object types. Likewise, our evaluation utilizes the Average Precision (AP) metrics. In this context, we consider the original AP (average precision with IoU thresholds ranging from 0.5 to 0.95), AP50 (with an IoU threshold of 0.5), and AP75 (with an IoU threshold of 0.75) for our assessment. Addi
|
| 379 |
+
|
| 380 |
+
tionally, due to the varying scales of the objects involved in collisions during accident scenarios, we have evaluated the proficiency of the model in detecting objects of small $(< 32*32)$ , medium $(>32*32\& < 96*96)$ , and large $(>96*96)$ scales, as measured by AP_S, AP_M, and AP_L.
|
| 381 |
+
|
| 382 |
+
We present the fine-grained quantitative object analysis for 11 state-of-the-art detectors in Tab. 1 and Tab. 2. According to the results, we can see that the accuracy of both detectors, YOLOv5s and DiffusionDet are the best in almost all object categories. YOLOv5s is better than DiffusionDet with V1-Train [□, □, □] for testing [□, □, □], while DiffusionDet benefits from excellent generalization (V2-Train [□, □], test.[□]), which allows DiffusionDet to detect important objects in accident scenarios even if these objects are not present in the training data.
|
| 383 |
+
|
| 384 |
+
Sensitivity to Different Kinds of Objects: According to the results of Tab. 1 and Tab. 2, all object detectors perform best when detecting cars as they are the most commonly occurring object in MM-AU. YOLOv5s obtains 0.936 of AP50 in the V1-Train mode, and DiffusionDet generates 0.908 of AP50 under the V2-Train mode. For cars, pedestrians, trucks, buses, and traffic lights, the AP values of the best detector are larger than 0.5. Yet, motorcycles and cyclists are hard to be detected especially under the V2-Train
|
| 385 |
+
|
| 386 |
+
Table 1. The object detection results of V1-Train [□, □, □]) and V2-Train [□, □]) for 11 state-of-the-art detectors on the MM-AU, w.r.t., pedestrians, cars, motorcycles, and trucks.
|
| 387 |
+
pedestrian
|
| 388 |
+
|
| 389 |
+
<table><tr><td></td><td colspan="6">V1-Train [□, □, □], test. [□, □, □]</td><td colspan="6">V2-Train [□, □], test. [□]</td></tr><tr><td>method</td><td>AP</td><td>AP50</td><td>mAP75</td><td>AP_S</td><td>AP_M</td><td>AP_L</td><td>AP</td><td>AP50</td><td>AP75</td><td>AP_S</td><td>AP_M</td><td>AP_L</td></tr><tr><td>FasterRCNN [45]</td><td>0.454</td><td>0.715</td><td>0.522</td><td>0.491</td><td>0.473</td><td>0.376</td><td>0.294</td><td>0.535</td><td>0.302</td><td>0.33</td><td>0.340</td><td>0.149</td></tr><tr><td>CornerNet [30]</td><td>0.378</td><td>0.549</td><td>0.439</td><td>0.317</td><td>0.436</td><td>0.252</td><td>0.335</td><td>0.511</td><td>0.384</td><td>0.203</td><td>0.406</td><td>0.217</td></tr><tr><td>CascadeRPN [58]</td><td>0.448</td><td>0.699</td><td>0.513</td><td>0.443</td><td>0.46</td><td>0.424</td><td>0.365</td><td>0.593</td><td>0.407</td><td>0.331</td><td>0.405</td><td>0.266</td></tr><tr><td>CenterNet [10]</td><td>0.011</td><td>0.040</td><td>0.002</td><td>0.037</td><td>0.014</td><td>0.011</td><td>0.047</td><td>0.135</td><td>0.019</td><td>0.034</td><td>0.062</td><td>0.021</td></tr><tr><td>DETR [4]</td><td>0.099</td><td>0.294</td><td>0.038</td><td>0.096</td><td>0.088</td><td>0.15</td><td>0.058</td><td>0.175</td><td>0.022</td><td>0.029</td><td>0.064</td><td>0.067</td></tr><tr><td>EfficientNet [53]</td><td>0.114</td><td>0.299</td><td>0.055</td><td>0.096</td><td>0.127</td><td>0.106</td><td>0.000</td><td>0.002</td><td>0.000</td><td>0.004</td><td>0.000</td><td>0.001</td></tr><tr><td>Deformable-DeTR [74]</td><td>0.404</td><td>0.686</td><td>0.441</td><td>0.396</td><td>0.421</td><td>0.361</td><td>0.369</td><td>0.64</td><td>0.404</td><td>0.317</td><td>0.414</td><td>0.279</td></tr><tr><td>YOLOx [16]</td><td>0.424</td><td>0.695</td><td>0.471</td><td>0.387</td><td>0.440</td><td>0.406</td><td>0.293</td><td>0.531</td><td>0.297</td><td>0.206</td><td>0.344</td><td>0.213</td></tr><tr><td>YOLOv5s [23]</td><td>0.529</td><td>0.784</td><td>0.632</td><td>0.459</td><td>0.544</td><td>0.521</td><td>0.370</td><td>0.632</td><td>0.412</td><td>0.295</td><td>0.419</td><td>0.265</td></tr><tr><td>DiffusionDet [7]</td><td>0.527</td><td>0.767</td><td>0.607</td><td>0.463</td><td>0.544</td><td>0.516</td><td>0.480</td><td>0.699</td><td>0.557</td><td>0.423</td><td>0.531</td><td>0.376</td></tr><tr><td>YOLOv8 [54]</td><td>0.506</td><td>0.748</td><td>0.590</td><td>0.455</td><td>0.516</td><td>0.512</td><td>0.415</td><td>0.650</td><td>0.481</td><td>0.322</td><td>0.463</td><td>0.337</td></tr></table>
|
| 390 |
+
|
| 391 |
+
car
|
| 392 |
+
|
| 393 |
+
<table><tr><td></td><td colspan="6">V1-Train [□, □, □], test. [□, □, □]</td><td colspan="6">V2-Train [□, □, □], test. [□]</td></tr><tr><td>Detectors</td><td>AP</td><td>AP50</td><td>AP75</td><td>AP_S</td><td>AP_M</td><td>AP_L</td><td>AP</td><td>AP50</td><td>AP75</td><td>AP_S</td><td>AP_M</td><td>AP_L</td></tr><tr><td>FasterRCNN [45]</td><td>0.677</td><td>0.910</td><td>0.788</td><td>0.532</td><td>0.672</td><td>0.771</td><td>0.608</td><td>0.851</td><td>0.694</td><td>0.501</td><td>0.629</td><td>0.639</td></tr><tr><td>CornerNet [30]</td><td>0.493</td><td>0.628</td><td>0.537</td><td>0.259</td><td>0.561</td><td>0.532</td><td>0.481</td><td>0.639</td><td>0.522</td><td>0.259</td><td>0.563</td><td>0.479</td></tr><tr><td>CascadeRPN [58]</td><td>0.714</td><td>0.908</td><td>0.805</td><td>0.567</td><td>0.701</td><td>0.819</td><td>0.644</td><td>0.866</td><td>0.733</td><td>0.531</td><td>0.646</td><td>0.706</td></tr><tr><td>CenterNet [10]</td><td>0.073</td><td>0.135</td><td>0.071</td><td>0.100</td><td>0.094</td><td>0.062</td><td>0.264</td><td>0.515</td><td>0.242</td><td>0.194</td><td>0.328</td><td>0.256</td></tr><tr><td>DETR [4]</td><td>0.402</td><td>0.746</td><td>0.381</td><td>0.133</td><td>0.349</td><td>0.638</td><td>0.346</td><td>0.676</td><td>0.312</td><td>0.135</td><td>0.308</td><td>0.524</td></tr><tr><td>EfficientNet [53]</td><td>0.409</td><td>0.745</td><td>0.426</td><td>0.140</td><td>0.423</td><td>0.547</td><td>0.146</td><td>0.359</td><td>0.086</td><td>0.050</td><td>0.151</td><td>0.191</td></tr><tr><td>Deformable-DeTR [74]</td><td>0.657</td><td>0.906</td><td>0.763</td><td>0.466</td><td>0.636</td><td>0.801</td><td>0.607</td><td>0.882</td><td>0.684</td><td>0.393</td><td>0.599</td><td>0.736</td></tr><tr><td>YOLOx [16]</td><td>0.713</td><td>0.913</td><td>0.799</td><td>0.529</td><td>0.706</td><td>0.840</td><td>0.619</td><td>0.844</td><td>0.692</td><td>0.431</td><td>0.622</td><td>0.720</td></tr><tr><td>YOLOv5s [23]</td><td>0.769</td><td>0.936</td><td>0.862</td><td>0.585</td><td>0.762</td><td>0.882</td><td>0.682</td><td>0.902</td><td>0.787</td><td>0.495</td><td>0.684</td><td>0.773</td></tr><tr><td>DiffusionDet [7]</td><td>0.754</td><td>0.932</td><td>0.836</td><td>0.586</td><td>0.747</td><td>0.867</td><td>0.720</td><td>0.908</td><td>0.801</td><td>0.575</td><td>0.721</td><td>0.808</td></tr><tr><td>YOLOv8 [54]</td><td>0.755</td><td>0.926</td><td>0.836</td><td>0.576</td><td>0.748</td><td>0.867</td><td>0.707</td><td>0.896</td><td>0.791</td><td>0.532</td><td>0.706</td><td>0.801</td></tr></table>
|
| 394 |
+
|
| 395 |
+
motorcycle
|
| 396 |
+
|
| 397 |
+
<table><tr><td></td><td colspan="6">V1-Train [□, □, □], test. [□, □, □]</td><td colspan="6">V2-Train [□, □, □], test. [□]</td></tr><tr><td>Detectors</td><td>AP</td><td>AP50</td><td>AP75</td><td>AP_S</td><td>AP_M</td><td>AP_L</td><td>AP</td><td>AP50</td><td>AP75</td><td>AP_S</td><td>AP_M</td><td>AP_L</td></tr><tr><td>FasterRCNN [45]</td><td>0.316</td><td>0.554</td><td>0.330</td><td>0.268</td><td>0.341</td><td>0.291</td><td>0.165</td><td>0.342</td><td>0.139</td><td>0.208</td><td>0.200</td><td>0.081</td></tr><tr><td>CornerNet [30]</td><td>0.232</td><td>0.393</td><td>0.250</td><td>0.200</td><td>0.284</td><td>0.147</td><td>0.176</td><td>0.334</td><td>0.175</td><td>0.160</td><td>0.222</td><td>0.108</td></tr><tr><td>CascadeRPN [58]</td><td>0.320</td><td>0.511</td><td>0.340</td><td>0.272</td><td>0.336</td><td>0.313</td><td>0.175</td><td>0.357</td><td>0.150</td><td>0.186</td><td>0.200</td><td>0.0153</td></tr><tr><td>CenterNet [10]</td><td>0.002</td><td>0.008</td><td>0.001</td><td>0.021</td><td>0.003</td><td>0.001</td><td>0.016</td><td>0.052</td><td>0.005</td><td>0.053</td><td>0.019</td><td>0.005</td></tr><tr><td>DETR [4]</td><td>0.115</td><td>0.306</td><td>0.059</td><td>0.057</td><td>0.123</td><td>0.128</td><td>0.038</td><td>0.121</td><td>0.010</td><td>0.029</td><td>0.044</td><td>0.035</td></tr><tr><td>EfficientNet [53]</td><td>0.133</td><td>0.312</td><td>0.085</td><td>0.074</td><td>0.151</td><td>0.127</td><td>0.002</td><td>0.006</td><td>0.000</td><td>0.014</td><td>0.002</td><td>0.001</td></tr><tr><td>Deformable-DeTR [74]</td><td>0.276</td><td>0.506</td><td>0.276</td><td>0.231</td><td>0.305</td><td>0.266</td><td>0.201</td><td>0.417</td><td>0.173</td><td>0.115</td><td>0.223</td><td>0.176</td></tr><tr><td>YOLOx [16]</td><td>0.332</td><td>0.560</td><td>0.356</td><td>0.253</td><td>0.365</td><td>0.312</td><td>0.148</td><td>0.318</td><td>0.120</td><td>0.183</td><td>0.189</td><td>0.125</td></tr><tr><td>YOLOv5s [23]</td><td>0.388</td><td>0.615</td><td>0.429</td><td>0.301</td><td>0.406</td><td>0.391</td><td>0.061</td><td>0.146</td><td>0.040</td><td>0.017</td><td>0.044</td><td>0.105</td></tr><tr><td>DiffusionDet [7]</td><td>0.375</td><td>0.599</td><td>0.403</td><td>0.300</td><td>0.398</td><td>0.365</td><td>0.286</td><td>0.493</td><td>0.297</td><td>0.256</td><td>0.325</td><td>0.219</td></tr><tr><td>YOLOv8 [54]</td><td>0.370</td><td>0.578</td><td>0.412</td><td>0.296</td><td>0.390</td><td>0.368</td><td>0.241</td><td>0.440</td><td>0.237</td><td>0.241</td><td>0.271</td><td>0.215</td></tr></table>
|
| 398 |
+
|
| 399 |
+
truck
|
| 400 |
+
|
| 401 |
+
<table><tr><td></td><td colspan="6">V1-Train [□, □, □], test. [□, □, □]</td><td colspan="6">V2-Train [□, □, □], test. [□]</td></tr><tr><td>Detectors</td><td>AP</td><td>AP50</td><td>AP75</td><td>AP_S</td><td>AP_M</td><td>AP_L</td><td>AP</td><td>AP50</td><td>AP75</td><td>AP_S</td><td>AP_M</td><td>AP_L</td></tr><tr><td>FasterRCNN [45]</td><td>0.505</td><td>0.715</td><td>0.594</td><td>0.389</td><td>0.467</td><td>0.539</td><td>0.338</td><td>0.516</td><td>0.390</td><td>0.286</td><td>0.384</td><td>0.314</td></tr><tr><td>CornerNet [30]</td><td>0.410</td><td>0.521</td><td>0.439</td><td>0.203</td><td>0.473</td><td>0.390</td><td>0.398</td><td>0.517</td><td>0.422</td><td>0.181</td><td>0.419</td><td>0.404</td></tr><tr><td>CascadeRPN [58]</td><td>0.545</td><td>0.715</td><td>0.620</td><td>0.385</td><td>0.493</td><td>0.591</td><td>0.412</td><td>0.574</td><td>0.471</td><td>0.316</td><td>0.379</td><td>0.441</td></tr><tr><td>CenterNet [10]</td><td>0.021</td><td>0.040</td><td>0.021</td><td>0.017</td><td>0.018</td><td>0.036</td><td>0.076</td><td>0.161</td><td>0.060</td><td>0.048</td><td>0.102</td><td>0.076</td></tr><tr><td>DETR [4]</td><td>0.287</td><td>0.506</td><td>0.292</td><td>0.098</td><td>0.201</td><td>0.373</td><td>0.18</td><td>0.341</td><td>0.173</td><td>0.053</td><td>0.129</td><td>0.220</td></tr><tr><td>EfficientNet [53]</td><td>0.201</td><td>0.345</td><td>0.225</td><td>0.119</td><td>0.193</td><td>0.218</td><td>0.015</td><td>0.045</td><td>0.004</td><td>0.005</td><td>0.016</td><td>0.014</td></tr><tr><td>Deformable-DeTR [74]</td><td>0.550</td><td>0.741</td><td>0.645</td><td>0.362</td><td>0.476</td><td>0.612</td><td>0.463</td><td>0.649</td><td>0.538</td><td>0.266</td><td>0.42</td><td>0.509</td></tr><tr><td>YOLOx [16]</td><td>0.332</td><td>0.560</td><td>0.356</td><td>0.253</td><td>0.365</td><td>0.312</td><td>0.410</td><td>0.595</td><td>0.462</td><td>0.253</td><td>0.371</td><td>0.449</td></tr><tr><td>YOLOv5s [23]</td><td>0.388</td><td>0.615</td><td>0.429</td><td>0.301</td><td>0.406</td><td>0.391</td><td>0.510</td><td>0.686</td><td>0.600</td><td>0.285</td><td>0.418</td><td>0.575</td></tr><tr><td>DiffusionDet [7]</td><td>0.652</td><td>0.792</td><td>0.708</td><td>0.488</td><td>0.580</td><td>0.714</td><td>0.549</td><td>0.681</td><td>0.599</td><td>0.405</td><td>0.510</td><td>0.582</td></tr><tr><td>YOLOv8 [54]</td><td>0.370</td><td>0.578</td><td>0.412</td><td>0.296</td><td>0.390</td><td>0.368</td><td>0.556</td><td>0.692</td><td>0.615</td><td>0.344</td><td>0.470</td><td>0.615</td></tr></table>
|
| 402 |
+
|
| 403 |
+
Table 2. The object detection results of V1-Train [□, □, □]) and V2-Train [□, □]) for 11 state-of-the-art detectors on the MM-AU, w.r.t., buses, traffic lights, and cyclists.
|
| 404 |
+
bus
|
| 405 |
+
|
| 406 |
+
<table><tr><td></td><td colspan="6">V1-Train [□, □, □], test. [□, □, □]</td><td colspan="6">V2-Train [□, □], test. [□]</td></tr><tr><td>Detectors</td><td>AP</td><td>AP50</td><td>AP75</td><td>AP_S</td><td>AP_M</td><td>AP_L</td><td>AP</td><td>AP50</td><td>AP75</td><td>AP_S</td><td>AP_M</td><td>AP_L</td></tr><tr><td>FasterRCNN [45]</td><td>0.521</td><td>0.690</td><td>0.615</td><td>0.304</td><td>0.431</td><td>0.580</td><td>0.312</td><td>0.455</td><td>0.356</td><td>0.263</td><td>0.298</td><td>0.328</td></tr><tr><td>CornerNet [30]</td><td>0.380</td><td>0.465</td><td>0.408</td><td>0.174</td><td>0.404</td><td>0.376</td><td>0.412</td><td>0.507</td><td>0.443</td><td>0.154</td><td>0.359</td><td>0.461</td></tr><tr><td>CascadeRPN [58]</td><td>0.522</td><td>0.658</td><td>0.604</td><td>0.263</td><td>0.449</td><td>0.579</td><td>0.395</td><td>0.529</td><td>0.464</td><td>0.214</td><td>0.342</td><td>0.441</td></tr><tr><td>CenterNet [10]</td><td>0.003</td><td>0.005</td><td>0.003</td><td>0.001</td><td>0.002</td><td>0.003</td><td>0.027</td><td>0.052</td><td>0.025</td><td>0.028</td><td>0.036</td><td>0.026</td></tr><tr><td>DETR [4]</td><td>0.201</td><td>0.321</td><td>0.219</td><td>0.042</td><td>0.118</td><td>0.258</td><td>0.131</td><td>0.212</td><td>0.141</td><td>0.001</td><td>0.076</td><td>0.167</td></tr><tr><td>EfficientNet [53]</td><td>0.106</td><td>0.169</td><td>0.123</td><td>0.028</td><td>0.108</td><td>0.109</td><td>0.003</td><td>0.008</td><td>0.001</td><td>0.001</td><td>0.002</td><td>0.003</td></tr><tr><td>Deformable-DeTR [74]</td><td>0.511</td><td>0.670</td><td>0.603</td><td>0.266</td><td>0.401</td><td>0.591</td><td>0.484</td><td>0.625</td><td>0.575</td><td>0.282</td><td>0.396</td><td>0.541</td></tr><tr><td>YOLOx [16]</td><td>0.595</td><td>0.730</td><td>0.678</td><td>0.336</td><td>0.479</td><td>0.670</td><td>0.417</td><td>0.556</td><td>0.483</td><td>0.136</td><td>0.33</td><td>0.475</td></tr><tr><td>YOLOv5s [23]</td><td>0.685</td><td>0.794</td><td>0.757</td><td>0.400</td><td>0.541</td><td>0.767</td><td>0.418</td><td>0.553</td><td>0.503</td><td>0.006</td><td>0.238</td><td>0.541</td></tr><tr><td>DiffusionDet [7]</td><td>0.650</td><td>0.759</td><td>0.707</td><td>0.360</td><td>0.531</td><td>0.721</td><td>0.574</td><td>0.674</td><td>0.632</td><td>0.315</td><td>0.492</td><td>0.631</td></tr><tr><td>YOLOv8 [54]</td><td>0.668</td><td>0.779</td><td>0.734</td><td>0.371</td><td>0.526</td><td>0.753</td><td>0.533</td><td>0.637</td><td>0.592</td><td>0.123</td><td>0.409</td><td>0.616</td></tr></table>
|
| 407 |
+
|
| 408 |
+
traffic light
|
| 409 |
+
|
| 410 |
+
<table><tr><td></td><td colspan="6">V1-Train [□, □, □], test. [□, □, □]</td><td colspan="6">V2-Train [□, □, □], test. [□]</td></tr><tr><td>Detectors</td><td>AP</td><td>AP50</td><td>AP75</td><td>AP_S</td><td>AP_M</td><td>AP_L</td><td>AP</td><td>AP50</td><td>AP75</td><td>AP_S</td><td>AP_M</td><td>AP_L</td></tr><tr><td>FasterRCNN [45]</td><td>0.487</td><td>0.689</td><td>0.583</td><td>0.434</td><td>0.515</td><td>0.208</td><td>0.371</td><td>0.528</td><td>0.451</td><td>0.325</td><td>0.402</td><td>0.039</td></tr><tr><td>CornerNet [30]</td><td>0.412</td><td>0.543</td><td>0.482</td><td>0.306</td><td>0.506</td><td>0.024</td><td>0.248</td><td>0.317</td><td>0.280</td><td>0.275</td><td>0.286</td><td>0.018</td></tr><tr><td>CascadeRPN [58]</td><td>0.495</td><td>0.675</td><td>0.585</td><td>0.417</td><td>0.532</td><td>0.226</td><td>0.409</td><td>0.531</td><td>0.480</td><td>0.368</td><td>0.437</td><td>0.103</td></tr><tr><td>CenterNet [10]</td><td>0.061</td><td>0.127</td><td>0.048</td><td>0.040</td><td>0.085</td><td>0.000</td><td>0.076</td><td>0.167</td><td>0.057</td><td>0.070</td><td>0.094</td><td>0.000</td></tr><tr><td>DETR [4]</td><td>0.132</td><td>0.359</td><td>0.069</td><td>0.062</td><td>0.163</td><td>0.068</td><td>0.079</td><td>0.243</td><td>0.024</td><td>0.048</td><td>0.095</td><td>0.046</td></tr><tr><td>EfficientNet [53]</td><td>0.164</td><td>0.260</td><td>0.169</td><td>0.090</td><td>0.207</td><td>0.011</td><td>0.024</td><td>0.089</td><td>0.000</td><td>0.006</td><td>0.033</td><td>0.000</td></tr><tr><td>Deformable-DeTR [74]</td><td>0.394</td><td>0.669</td><td>0.450</td><td>0.345</td><td>0.420</td><td>0.304</td><td>0.320</td><td>0.585</td><td>0.323</td><td>0.264</td><td>0.35</td><td>0.155</td></tr><tr><td>YOLOx [16]</td><td>0.480</td><td>0.667</td><td>0.570</td><td>0.384</td><td>0.546</td><td>0.328</td><td>0.310</td><td>0.458</td><td>0.359</td><td>0.248</td><td>0.346</td><td>0.151</td></tr><tr><td>YOLOv5s [23]</td><td>0.542</td><td>0.743</td><td>0.653</td><td>0.428</td><td>0.590</td><td>0.423</td><td>0.356</td><td>0.548</td><td>0.420</td><td>0.297</td><td>0.391</td><td>0.207</td></tr><tr><td>DiffusionDet [7]</td><td>0.522</td><td>0.703</td><td>0.605</td><td>0.440</td><td>0.570</td><td>0.344</td><td>0.511</td><td>0.680</td><td>0.589</td><td>0.441</td><td>0.559</td><td>0.248</td></tr><tr><td>YOLOv8 [54]</td><td>0.526</td><td>0.703</td><td>0.622</td><td>0.413</td><td>0.575</td><td>0.429</td><td>0.417</td><td>0.570</td><td>0.492</td><td>0.317</td><td>0.465</td><td>0.247</td></tr></table>
|
| 411 |
+
|
| 412 |
+
cyclist
|
| 413 |
+
|
| 414 |
+
<table><tr><td></td><td colspan="6">V1-Train [□, □, □], test. [□, □, □]</td><td colspan="6">V2-Train [□, □, □], test. [□, □]</td></tr><tr><td>Detectors</td><td>AP</td><td>AP50</td><td>AP75</td><td>AP_S</td><td>AP_M</td><td>AP_L</td><td>AP</td><td>AP50</td><td>AP75</td><td>AP_S</td><td>AP_M</td><td>AP_L</td></tr><tr><td>FasterRCNN [45]</td><td>0.218</td><td>0.391</td><td>0.246</td><td>0.015</td><td>0.248</td><td>0.196</td><td>0.122</td><td>0.255</td><td>0.105</td><td>0.086</td><td>0.144</td><td>0.072</td></tr><tr><td>CornerNet [30]</td><td>0.179</td><td>0.297</td><td>0.184</td><td>0.020</td><td>0.196</td><td>0.191</td><td>0.227</td><td>0.370</td><td>0.242</td><td>0.034</td><td>0.257</td><td>0.194</td></tr><tr><td>CascadeRPN [58]</td><td>0.255</td><td>0.446</td><td>0.276</td><td>0.017</td><td>0.253</td><td>0.318</td><td>0.150</td><td>0.275</td><td>0.166</td><td>0.066</td><td>0.159</td><td>0.171</td></tr><tr><td>CenterNet [10]</td><td>0.000</td><td>0.000</td><td>0.000</td><td>0.000</td><td>0.000</td><td>0.000</td><td>0.000</td><td>0.000</td><td>0.000</td><td>0.001</td><td>0.000</td><td>0.000</td></tr><tr><td>DETR [4]</td><td>0.035</td><td>0.106</td><td>0.011</td><td>0.012</td><td>0.033</td><td>0.051</td><td>0.003</td><td>0.012</td><td>0.002</td><td>0.000</td><td>0.003</td><td>0.007</td></tr><tr><td>EfficientNet [53]</td><td>0.016</td><td>0.039</td><td>0.010</td><td>0.002</td><td>0.018</td><td>0.019</td><td>0.000</td><td>0.000</td><td>0.000</td><td>0.000</td><td>0.000</td><td>0.000</td></tr><tr><td>Deformable-DeTR [74]</td><td>0.231</td><td>0.446</td><td>0.221</td><td>0.038</td><td>0.244</td><td>0.255</td><td>0.166</td><td>0.313</td><td>0.158</td><td>0.088</td><td>0.177</td><td>0.161</td></tr><tr><td>YOLOx [16]</td><td>0.235</td><td>0.439</td><td>0.223</td><td>0.048</td><td>0.228</td><td>0.309</td><td>0.137</td><td>0.271</td><td>0.121</td><td>0.081</td><td>0.174</td><td>0.108</td></tr><tr><td>YOLOv5s [23]</td><td>0.368</td><td>0.601</td><td>0.423</td><td>0.058</td><td>0.362</td><td>0.477</td><td>0.005</td><td>0.012</td><td>0.004</td><td>0.000</td><td>0.006</td><td>0.004</td></tr><tr><td>DiffusionDet [7]</td><td>0.360</td><td>0.577</td><td>0.391</td><td>0.036</td><td>0.373</td><td>0.418</td><td>0.302</td><td>0.474</td><td>0.319</td><td>0.095</td><td>0.318</td><td>0.291</td></tr><tr><td>YOLOv8 [54]</td><td>0.314</td><td>0.508</td><td>0.351</td><td>0.057</td><td>0.302</td><td>0.416</td><td>0.198</td><td>0.332</td><td>0.211</td><td>0.070</td><td>0.236</td><td>0.221</td></tr></table>
|
| 415 |
+
|
| 416 |
+
mode, where all kinds of AP values are less than 0.5. Here, compared with DiffusionDet, YOLOv5s is with failure on motorcycles and cyclists in the V2-Train mode.
|
| 417 |
+
|
| 418 |
+
Adaptability to Small Objects: Small object detection is a difficult problem because there are not enough details to obtain a strong feature representation. As for accident scenarios, this problem may be aggravated because of the unusual property. Therefore, we can observe that most detectors generate the lowest AP_S values within their AP value set. For motorcycles, traffic lights, and pedestrians, too large objects commonly are unusual and AP_L values are the smallest in V2-Train mode. Contrarily, for these kinds of objects,
|
| 419 |
+
|
| 420 |
+
AP_L values in V1-Train mode are not the smallest, which indicates that the large size of objects in the accident window frequently appears due to the severe scale change, e.g., the ego-car involved cases in Fig. 2 (1)-(2) and (5)-(6).
|
| 421 |
+
|
| 422 |
+
Scalability to Corner Objects: The objects in the road accident window are the typical corner cases in object detection. Fig. 2 demonstrates some examples of the detection results of CenterNet, DETR, DiffusionDet, and YOLOv5s. It is clear that these corner cases are hard to address because of the dramatic scale change (Fig. 2(1)-(2) and (5)-(6)) and severe pose distortion (Fig. 2(1) and (3)-(4)). Many objects are wrongly detected, such as the wrong detections of
|
| 423 |
+
|
| 424 |
+

|
| 425 |
+
Figure 2. The object detection snapshots in accident frames by CenterNet [30], DETR [4], DiffusionDet [7], and YOLOv5s [23]. We can see that all detectors fail to detect the cyclist (column (2)) and the pedestrian with distorted posture (column (1)). DETR is more active for covering all possible objects while many false detections are generated.
|
| 426 |
+
|
| 427 |
+
car $\rightarrow$ truck, bus $\rightarrow$ truck. DETR is more active in covering all possible objects while generates many false detections.
|
| 428 |
+
|
| 429 |
+
In summary, due to the corner cases, object detection in ego-view accident videos still has many unresolved issues.
|
| 430 |
+
|
| 431 |
+
# 3. ArA Case Analysis, w.r.t., Different Objects
|
| 432 |
+
|
| 433 |
+
Continuing the aforementioned analysis of the ArA task in the main body, we show some cases with respect to different objects in Fig. 3 from the results of the state-of-the-art methods. We can see that because many pedestrian-involved accidents may be caused by distracted walking or aggressive movement, such as sudden crossing, besides HCRN [31], all the methods can provide an accurate accident reason for the shown cases. For the surrounding car-involved cases, the irregular behaviors of cars are the common reason for the accidents, which implies a traffic rule reasoning problem. Therefore, the methods with better commonsense knowledge learning, such as SeViLA [70] (the only method for the accurate ArA for the $4th$ case), have advantages. As for the ego-car involved cases, the severe scale change advocates the object-centric methods with better region context learning.
|
| 434 |
+
|
| 435 |
+
# 4. More Evaluations of OAVD
|
| 436 |
+
|
| 437 |
+
More evaluations are provided here for a sufficient understanding of our Object-centric Accident Video Diffusion
|
| 438 |
+
|
| 439 |
+
(OAVD). We provide more example analysis to check the abductive ability by our OAVD with a comparison to other state-of-the-art video diffusion methods. Notably, we further include ModelScope T2V (preprint)<sup>8</sup> and Text2VideoZero (published in ICCV2023)<sup>9</sup> in the evaluation. ModelScope T2V is re-trained by a same number of samples with our OAVD (i.e., 6000 Co-CPs), and Text2Video-Zero is another training-free video diffusion method.
|
| 440 |
+
|
| 441 |
+
More Visualizations of OAVD Against SOTAs: Fig. 4 and Fig. 5 present the qualitative comparisons of different video diffusion models. The inference flow is $(Bboxes\to V_r) + t_r / t_p\to V_g$ , i.e., that we input the detected bounding boxes $Bboxes$ , the video clip in near-accident window $V_{r}$ and the accident reason or prevention advice description $t_r / t_p$ . From the demonstrated snapshots, we can see that, our OAVD similarly shows an "in advance" phenomenon for the accident reason prompt and eliminates the crashing object when inputting the prevention advice description. ModelScope T2V also generates promising video frames with clear details, even with the ability to eliminate the objects to be involved in accidents after inputting the prevention advice description, as shown by the second example in Fig. 4 and the first case in Fig. 5. Yet, it is not
|
| 442 |
+
|
| 443 |
+

|
| 444 |
+
Figure 3. The case visualization of Accident reason Answering (ArA) by 8 state-of-the-art Video Question Answering (VQA) methods.
|
| 445 |
+
|
| 446 |
+
stable verified by Fig. 4 (the 1st example) and Fig. 5 (the 2nd sample). As for other methods, including the training-free ones, the style and the content of the generated video frames are not relevant to the given text prompts.
|
| 447 |
+
|
| 448 |
+
More Analysis on the Impact of Bboxes: To be clear about the impact of bounding boxes (Bboxes) for our OAVD model, we re-train the OAVD without the input of Bboxes driven by the Sequential CLIP (S-CLIP) and Ab
|
| 449 |
+
|
| 450 |
+

|
| 451 |
+
|
| 452 |
+

|
| 453 |
+
Figure 4. The visualization of generated frames by our OAVD, ModelScope T2V, Tune-A-Video [61], ControlVideo [72], and Text2Video-Zero.
|
| 454 |
+
|
| 455 |
+

|
| 456 |
+
$t_p$ : The black vehicle should comply with the traffic rules during driving, and should not exceed the speed limit.
|
| 457 |
+
|
| 458 |
+

|
| 459 |
+
$t_p$ : The ego-car should slow down or honk their horns when they stop at intersections or trunk roads where their vision is blocked to prevent other vehicles or pedestrians from rushing out suddenly.
|
| 460 |
+
|
| 461 |
+
$t_r$ : The ego-car's vision is blocked or blurred, and a cyclist appears suddenly.
|
| 462 |
+
|
| 463 |
+

|
| 464 |
+
|
| 465 |
+
$t_p$ : The ego-car should slow down or honk their horns when they stop at trunk roads where their vision is blocked to prevent other pedestrians from rushing out suddenly.
|
| 466 |
+
|
| 467 |
+

|
| 468 |
+
|
| 469 |
+
$t_r$ : The vehicles drive too fast with short braking distance.
|
| 470 |
+
|
| 471 |
+

|
| 472 |
+
Figure 5. The visualization of video diffusion by our OAVD, ModelScope T2V, Tune-A-Video [61], ControlVideo [72], and Text2Video-Zero.
|
| 473 |
+
|
| 474 |
+
$t_p$ : Vehicles should slow down when passing intersections or crosswalks, and observe the traffic carefully.
|
| 475 |
+
|
| 476 |
+

|
| 477 |
+
|
| 478 |
+

|
| 479 |
+
|
| 480 |
+

|
| 481 |
+
|
| 482 |
+

|
| 483 |
+
Figure 6. The visualization of accident video generation of OAVD with the inference path of $(Bboxes\to V_o) + t_a\to V_g$
|
| 484 |
+
|
| 485 |
+

|
| 486 |
+
|
| 487 |
+

|
| 488 |
+
|
| 489 |
+

|
| 490 |
+
|
| 491 |
+

|
| 492 |
+
|
| 493 |
+

|
| 494 |
+
|
| 495 |
+

|
| 496 |
+
Figure 7. The visualization of video-free accident video generation of OAVD with the inference path of $Bboxes + t_a \rightarrow V_g$ .
|
| 497 |
+
|
| 498 |
+

|
| 499 |
+
|
| 500 |
+
ductive CLIP (A-CLIP) models. The video-level Fréchet Video Distance (FVD) [55] is adopted here. The results in Tab. 3 show that the bounding boxes are useful for enhancing the video quality, and lower FVD values are generated. Based on the evaluation, object-centric video diffusion is promising for generating detailed frame content.
|
| 501 |
+
|
| 502 |
+
Table 3. FVD value comparison of our OAVD with or without the input of bounding boxes. *: with the input of bounding boxes.
|
| 503 |
+
|
| 504 |
+
<table><tr><td>Method</td><td>OAVD (S-CLIP) *</td><td>OAVD (S-CLIP)</td><td>OAVD (A-CLIP) *</td><td>OAVD (A-CLIP)</td></tr><tr><td>FVD ↓</td><td>5372.3</td><td>5384.6</td><td>5238.1</td><td>5358.8</td></tr></table>
|
| 505 |
+
|
| 506 |
+
Visualizations of Accident Video Generation: Besides the abductive check for our video diffusion model OAVD, we also show its ability for flexible accident video generation. To be clear, the inference stage here takes the video clip in normal video segment $V_{o}$ and the accident cate
|
| 507 |
+
|
| 508 |
+
gory description $t_a$ . This configuration verifies the reality-changing ability from normal situations to accidents. Fig. 6 shows some examples of accident video generation. We can curiously find that our OAVD can create the object to be involved in accidents with a clear pose or appearance. This ability may address the few-shot sample issue of accident videos for future task use.
|
| 509 |
+
|
| 510 |
+
In addition, we also check the video-free accident video generation by only inputting the bounding boxes to our OAVD. Here, the four $^{10}$ bounding boxes are randomly set for each example. From the results in Fig. 7, the guidance of the accident category description is clearly verified and the generated accident videos are more realistic without the restriction of original video frames. From these visualizations, OAVD can flexibly augment the video sample scale of ego-view accidents for safe driving.
|
2403.00xxx/2403.00436/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e942c367d2da7d09ac73fabaa7b0b1812f5161cdcda786642fe27a21c4515b58
|
| 3 |
+
size 3226856
|
2403.00xxx/2403.00436/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00439/51835c5b-8381-4a5b-996b-aed0066adc56_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00439/51835c5b-8381-4a5b-996b-aed0066adc56_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00439/51835c5b-8381-4a5b-996b-aed0066adc56_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:50f1643313c39f79d8f53ffcadace39ac01d8800074bcba946e7f043b4f73b9e
|
| 3 |
+
size 2009000
|
2403.00xxx/2403.00439/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00439/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:74eda05c288871da5cea9bc895ef12dcc9f11dd2a0b8d6baafa509b479fc0ba8
|
| 3 |
+
size 282487
|
2403.00xxx/2403.00439/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00448/46c39b63-fc53-45de-bf27-f6a2478aeeb7_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00448/46c39b63-fc53-45de-bf27-f6a2478aeeb7_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00448/46c39b63-fc53-45de-bf27-f6a2478aeeb7_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:809639beb1c3e8528252c3081dce58f8f83d4445cee44c6073da43b193e87b5c
|
| 3 |
+
size 2633338
|
2403.00xxx/2403.00448/full.md
ADDED
|
@@ -0,0 +1,477 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# When Large Language Models Confront Repository-Level Automatic Program Repair: How Well They Done?
|
| 2 |
+
|
| 3 |
+
Yuxiao Chen
|
| 4 |
+
|
| 5 |
+
Institute of Software, Chinese
|
| 6 |
+
|
| 7 |
+
Academy of Sciences, China
|
| 8 |
+
|
| 9 |
+
University of Chinese Academy of
|
| 10 |
+
|
| 11 |
+
Sciences, China
|
| 12 |
+
|
| 13 |
+
chenyuxiao2021@iscas.ac.cn
|
| 14 |
+
|
| 15 |
+
Changjiang Li
|
| 16 |
+
|
| 17 |
+
Stony Brook University, USA
|
| 18 |
+
|
| 19 |
+
meet.cjli@gmail.com
|
| 20 |
+
|
| 21 |
+
Jingzheng Wu
|
| 22 |
+
|
| 23 |
+
Institute of Software, Chinese
|
| 24 |
+
|
| 25 |
+
Academy of Sciences, China
|
| 26 |
+
|
| 27 |
+
jingzheng08@iscas.ac.cn
|
| 28 |
+
|
| 29 |
+
Zhiqing Rui
|
| 30 |
+
|
| 31 |
+
Institute of Software, Chinese
|
| 32 |
+
|
| 33 |
+
Academy of Sciences, China
|
| 34 |
+
|
| 35 |
+
University of Chinese Academy of
|
| 36 |
+
|
| 37 |
+
Sciences, China
|
| 38 |
+
|
| 39 |
+
zhiqing@iscas.ac.cn
|
| 40 |
+
|
| 41 |
+
Yanjun Wu
|
| 42 |
+
|
| 43 |
+
Institute of Software, Chinese
|
| 44 |
+
|
| 45 |
+
Academy of Sciences, China
|
| 46 |
+
|
| 47 |
+
yanjun@iscas.ac.cn
|
| 48 |
+
|
| 49 |
+
Xiang Ling
|
| 50 |
+
|
| 51 |
+
Institute of Software, Chinese
|
| 52 |
+
|
| 53 |
+
Academy of Sciences, China
|
| 54 |
+
|
| 55 |
+
lingxiang@iscas.ac.cn
|
| 56 |
+
|
| 57 |
+
Tianyue Luo
|
| 58 |
+
|
| 59 |
+
Institute of Software, Chinese
|
| 60 |
+
|
| 61 |
+
Academy of Sciences, China
|
| 62 |
+
|
| 63 |
+
tianyue@iscas.ac.cn
|
| 64 |
+
|
| 65 |
+
# ABSTRACT
|
| 66 |
+
|
| 67 |
+
In recent years, large language models (LLMs) have demonstrated substantial potential in addressing automatic program repair (APR) tasks. However, the current evaluation of these models for APR tasks focuses solely on the limited context of the single function or file where the bug is located, overlooking the valuable information in the repository-level context. This paper investigates the performance of popular LLMs in handling repository-level repair tasks. We introduce RepoBugs, a new benchmark comprising 124 typical repository-level bugs from open-source repositories. Preliminary experiments using GPT3.5 based on the function where the error is located, reveal that the repair rate on RepoBugs is only $22.58\%$ , significantly diverging from the performance of GPT3.5 on function-level bugs in related studies. This underscores the importance of providing repository-level context when addressing bugs at this level. However, the repository-level context offered by the preliminary method often proves redundant and imprecise and easily exceeds the prompt length limit of LLMs. To solve the problem, we propose a simple and universal repository-level context extraction method (RLCE) designed to provide more precise context for repository-level code repair tasks. Evaluations of three
|
| 68 |
+
|
| 69 |
+
$\text{品}$ Jingzheng Wu and Xiang Ling are the corresponding authors.
|
| 70 |
+
|
| 71 |
+
Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.
|
| 72 |
+
|
| 73 |
+
ICSE'46, April 2024, Lisbon, POR
|
| 74 |
+
|
| 75 |
+
© 2024 Copyright held by the owner/author(s). Publication rights licensed to ACM.
|
| 76 |
+
|
| 77 |
+
ACM ISBN 978-x-xxxxx-xxxxx-x/YY/MM
|
| 78 |
+
|
| 79 |
+
https://doi.org/10.1145/3639478.3647633
|
| 80 |
+
|
| 81 |
+
mainstream LLMs show that RLCE significantly enhances the ability to repair repository-level bugs. The improvement reaches a maximum of $160\%$ compared to the preliminary method. Additionally, we conduct a comprehensive analysis of the effectiveness and limitations of RLCE, along with the capacity of LLMs to address repository-level bugs, offering valuable insights for future research.
|
| 82 |
+
|
| 83 |
+
# ACM Reference Format:
|
| 84 |
+
|
| 85 |
+
Yuxiao Chen, Jingzheng Wu $^{\text{圆}}$ , Xiang Ling $^{\text{圆}}$ , Changjiang Li, Zhiqing Rui, Tianyue Luo, and Yanjun Wu. 2024. When Large Language Models Confront Repository-Level Automatic Program Repair: How Well They Done?. In Proceedings of 2024 IEEE/ACM 46th International Conference on Software Engineering (ICSE'46). ACM, Lisbon, POR, 13 pages. https://doi.org/10.1145/3639478.3647633
|
| 86 |
+
|
| 87 |
+
# 1 INTRODUCTION
|
| 88 |
+
|
| 89 |
+
Automatic program repair (APR) is an important challenge in software engineering, helping programmers significantly reduce debugging costs. Many researchers have explored various methods for APR, including pattern-based methods like TBar [17], SketchFix [9] and ErrDoc [29], and deep learning-based methods like CoCoNuT [21] and CURE [11]. Recently, the excellent generation ability of LLMs has brought new potential solutions for APR tasks. Many related studies have shown that LLMs are highly competitive in processing APR tasks [4, 10, 26, 27], even surpassing previously optimal methods.
|
| 90 |
+
|
| 91 |
+
However, the current evaluation of APR tasks using LLMs relies solely on the limited context of the single function or file where the bug is located. Bugs in programs can be categorized into two groups based on the size of the context they depend on for repair: function-level and repository-level [?]. For function-level bugs, the correct repair requires only providing the function where the
|
| 92 |
+
|
| 93 |
+
error is located. However, due to widespread modular programming in software engineering [25, 28], there are often complex interactions or dependencies between multiple code files. This relationship can easily result in repository-level bugs, such as interface inconsistency, incorrect error handling, global variable abuse, race conditions [22], and more. Such repair tasks often require providing a broader repository context for repair tools. The performance of LLMs remains underexplored for repository-level repair tasks.
|
| 94 |
+
|
| 95 |
+
Our work in this paper aims to explore the performance of current popular LLMs in addressing this issue. However, the most pressing challenge is the lack of a suitable dataset. Existing datasets are either not built at repository level, such as QuixBugs [16], or cannot accurately restore scenarios of repository-level bugs, such as Defects4 [12]. Furthermore, datasets created too early may pose a potential risk of data leakage if used as training data for LLMs. To address this challenge, we propose a new benchmark called RepoBugs, specifically designed for evaluating repository-level APR tasks. It is built on popular open-source repositories from GitHub and contains 124 typical repository-level bugs.
|
| 96 |
+
|
| 97 |
+
We adopt ChatGPT from current popular LLMs for preliminary experiments. The experimental results show that if the preliminary method only uses the function where the error was located as context, the repair rate of repairing on RepoBugs was only $22.58\%$ . This result was significantly different from the repair rate of ChatGPT on function-level bugs evaluated in other related studies [4, 10, 26, 27]. Figure 1 shows a simple example. When the preliminary method only provides function-level context, ChatGPT can not perform the repair correctly. However, when we provide the complete repository as context, ChatGPT provides the correct repair result. This indicates that providing repository-level context is helpful when dealing with repository-level bugs in LLMs. Figure 1 illustrates a small example. When the repository size is small, we can employ a straightforward approach by considering the entire repository as the context. However, the size of the repository can be very large, and the input prompt for LLMs has an upper limit, such as a maximum token limit of 4,096 for ChatGPT. In addition, not all code in the repository is useful for the current repair task. Most code may be redundant information that interferes with the attention of the model. Therefore, it is necessary to provide precise context for the repair task of LLMs. In the field of code generation, extracting accurate context from code repositories is also an important challenge. Currently, many studies have provided solutions to this problem [6, 20, 33]. The methods used in these studies are roughly similar, all of which first segment the repository into slices and then obtain the most relevant fragments as context based on comparative similarity. We call this method the slice-similarity method. However, the code segments obtained through this method relying on similarity are different from the code segments that programmers refer to when making corrections to errors. Therefore, we believe that the code segments acquired through the slice-similarity method may not be well-suited for APR tasks. We also demonstrate this in subsequent experiments.
|
| 98 |
+
|
| 99 |
+
In this paper, we propose a simple and universal repository-level context extraction method (RLCE) that can extract more precise context for repository-level code repair tasks. RLCE starts with the bug location and constructs specialized prompts for handling
|
| 100 |
+
|
| 101 |
+

|
| 102 |
+
Figure 1: A simple example of using ChatGPT to handle repository-level bugs. The bug type is interface inconsistency. The number of parameter lists calling the power function is inconsistent with the function definition in utils.py. The left figure represents the reply to ChatGPT providing the function where the bug is located as context. The right figure represents the reply after adding the repository context.
|
| 103 |
+
|
| 104 |
+

|
| 105 |
+
|
| 106 |
+
repository-level repair tasks for LLMs by parsing repository structures, filtering code fragments, and adding auxiliary information. We evaluate three mainstream LLMs separately, and experimental results show that compared to the preliminary method, the context provided by RLCE can significantly enhance the ability of LLMs to handle repository-level bugs, with a maximum improvement of $160\%$ . In addition, we also conduct a comprehensive analysis of the effectiveness and limitations of RLCE, as well as the ability of LLMs to handle repository-level bugs, providing insights for future research. Our main contributions are summarized as follows:
|
| 107 |
+
|
| 108 |
+
- We initially investigate the performance of popular LLMs in addressing repository-level APR tasks. The success rates of the preliminary method in repairing errors for GPT3.5 and GPT4 are only $22.58\%$ and $41.13\%$ , respectively.
|
| 109 |
+
- We introduce a new benchmark, RepoBugs, which is built on popular open-source repositories from GitHub and contains 124 typical repository-level bugs. To the best of our knowledge, this is the first benchmark specifically designed for repository-level program repair.
|
| 110 |
+
- The repair rate of the preliminary method is unsatisfactory, and the length of the repository often exceeds the prompt length limit of LLMs. To address these problems, we propose a simple and universal method RLCE, which provides more precise context for APR tasks and achieves over $100\%$ improvement in repair rates on all experimental models compared to the preliminary method.
|
| 111 |
+
|
| 112 |
+
# 2 BENCHMARK CONSTRUCTION
|
| 113 |
+
|
| 114 |
+
To effectively evaluate the performance of LLMs on repository-level APR tasks, the required dataset should meet the following requirements:
|
| 115 |
+
|
| 116 |
+
- Each bug needs to be based on a repository context environment;
|
| 117 |
+
- Bug fixing requires utilizing repository-level context;
|
| 118 |
+
- The creation time of the repository is later than the collection time of current popular LLMs training data.
|
| 119 |
+
|
| 120 |
+
Moreover, conventional approaches to dataset construction in the realm of computer science, such as automatic code disruption or crawling open-source repositories, encounter challenges in accurately filtering bug types and ensuring adherence to stringent dataset quality requirements. In light of these challenges, we present a novel benchmark dataset named RepoBugs. This dataset is derived from crawled open-source Python repositories and crafted through expert manual disruption. RepoBugs comprises 124 repository-level bugs specifically addressing interface inconsistency types. Although the selection of error types is circumscribed, it is highly representative, given that interface inconsistency errors constitute the most prevalent repository-level issues [35]. Subsequently, we will provide a detailed exposition of the methods employed for open-source repository collection and code disruption.
|
| 121 |
+
|
| 122 |
+
# 2.1 Dataset Collection
|
| 123 |
+
|
| 124 |
+
We collect repositories from open-source projects on GitHub. To minimize the risk of leakage due to the repositories being used as training data for LLMs, we set the search date condition to later than October 1, 2021. Additionally, we increase the number of crawled repositories. In this paper, we primarily focus on conceptual validation using the Python programming language. Consequently, we constrain the repositories considered in our work to those utilizing the Python language. Simultaneously, we ensure that each size of the repository does not exceed 1MB and that it has a minimum of 2,000 stars. We also filter out repositories with fewer than 4 Python files to adequately capture cross-file characteristics of repository-level bugs. In the end, we filter and obtain 11 repositories that meet these criteria, and detailed information is presented in Table 1.
|
| 125 |
+
|
| 126 |
+
# 2.2 Dataset Generation
|
| 127 |
+
|
| 128 |
+
The disruptive aspects of RepoBugs are manually crafted by experts with extensive programming experience. We designate the function where a call occurs as the main function and the called function as the context function, based on the characteristics of errors related to interface inconsistency. Based on existing research concerning program bugs at the repository scale [7, 15, 34], we design six typical disruption rules for main and context functions, as follows:
|
| 129 |
+
|
| 130 |
+
- NRV: Inconsistency in the number of return values between the context function and the main function.
|
| 131 |
+
- NP: Inconsistency in the number of input parameters between the main function and the context function.
|
| 132 |
+
- ORV: Inconsistency in the order of return parameters between the main function and the context function.
|
| 133 |
+
- OP: Inconsistency in the order of input parameters between the main function and the context function.
|
| 134 |
+
|
| 135 |
+
<table><tr><td>Index</td><td>Repo</td><td>Date</td><td>File</td><td>Sample</td></tr><tr><td>1</td><td>developer</td><td>2023/5/13</td><td>13</td><td>10</td></tr><tr><td>2</td><td>tiktoken</td><td>2022/12/1</td><td>16</td><td>11</td></tr><tr><td>3</td><td>gpt-migrate</td><td>2023/6/24</td><td>18</td><td>10</td></tr><tr><td>4</td><td>starcoder</td><td>2023/4/24</td><td>7</td><td>12</td></tr><tr><td>5</td><td>shell_gpt</td><td>2023/1/18</td><td>16</td><td>10</td></tr><tr><td>6</td><td>consistency_models</td><td>2023/2/26</td><td>23</td><td>12</td></tr><tr><td>7</td><td>musicIm-pytorch</td><td>2023/1/30</td><td>4</td><td>13</td></tr><tr><td>8</td><td>MAE-pytorch</td><td>2021/11/1</td><td>13</td><td>12</td></tr><tr><td>9</td><td>poe-api</td><td>2023/5/10</td><td>12</td><td>12</td></tr><tr><td>10</td><td>ijepa</td><td>2023/6/13</td><td>15</td><td>12</td></tr><tr><td>11</td><td>CommandlineConfig</td><td>2022/9/19</td><td>4</td><td>10</td></tr></table>
|
| 136 |
+
|
| 137 |
+
Table 1: Repository information in the RepoBugs dataset. Index represents the repository number. Repo represents the name of the repository. Date represents the date when the repository was created. File represents the total number of Python source files. Sample represents the number of test samples extracted from each repository.
|
| 138 |
+
|
| 139 |
+
- CRV: Inconsistency in the return from the context function and the requirements of the main function.
|
| 140 |
+
- CP: Inconsistency in the input parameters between the main function and the requirements of the context function.
|
| 141 |
+
|
| 142 |
+
During the destruction process, it is important to ensure all disruptions involve interaction between two or more functions in the repository and do not introduce syntax errors that cannot pass a Python interpreter. To simplify the process, all disruptions are completed within a single line. As a result, we have obtained a total of 124 bugs in RepoBugs.
|
| 143 |
+
|
| 144 |
+
# 3 PROPOSED FRAMEWORK
|
| 145 |
+
|
| 146 |
+
# 3.1 Overall Framework
|
| 147 |
+
|
| 148 |
+
Using LLMs to complete APR tasks at the repository level can be seen as a generation problem: the repaired code $F'$ for $F$ is generated based on the function $F$ where the error is located and the context $C$ at the repository level, which can be described as $F' = M(C, F)$ , where $M$ represents the large language model used. In this step, it is crucial to obtain the repository context $C$ , and our goal is to provide more accurate context $C$ for repair tasks and generate prompts for LLMs. Figure 2 shows the overview of our repository-level context extraction method (RLCE). Specifically, we first use a context retriever to retrieve repository code fragments related to repair tasks (Section 3.2). Subsequently, a comprehensive large language model prompt is generated by incorporating additional information, such as appended summaries and slices (Section 3.3).
|
| 149 |
+
|
| 150 |
+
# 3.2 Context Retriever
|
| 151 |
+
|
| 152 |
+
The core of RLCE lies in addressing the questions of where to retrieve from the repository and what kind of context to obtain. To achieve this objective, we design and implement a context retriever, which is a static code analysis tool capable of automatically parsing the repository into code segments based on its structure. It retrieves segments relevant to the repair task based on error localization information. Given that repository projects often have
|
| 153 |
+
|
| 154 |
+

|
| 155 |
+
Figure 2: The overview of our repository-level context extraction method (RLCE).
|
| 156 |
+
|
| 157 |
+
complex structures due to dependencies among files, our tool needs to possess the capability to analyze the structure of the repository for a clearer understanding of the relationships between its components. The overall structure of the context retriever is illustrated in Figure 2, consisting primarily of two key steps: (1) parsing repository files and constructing the project structure tree and (2) conducting retrieval in the project structure tree based on error location to obtain the required code segments.
|
| 158 |
+
|
| 159 |
+
Build project structure tree: During the construction of the project structure tree, our primary focus revolves around five types of entity nodes, namely: directories, files, classes, functions, and global variables. The connections between these nodes adhere to the original structural relationships within the repository. For example, the partial project structure tree of the developer repository is illustrated in Figure 2. Specifically, a project structure tree originates from a root node, with its child nodes encompassing subdirectories and files under the root directory of the repository. The child nodes of file entities include globally defined variables, classes, and functions. The leaf nodes of the project structure tree are restricted to function nodes or variable nodes, encompassing the code where functions or variables are defined. In addition to structural information, for the sake of facilitating subsequent retrieval processes, if a file calls functions, variables, or classes defined in another file, markers need to be placed on the file node for reference.
|
| 160 |
+
|
| 161 |
+
Retrieve code segments: As illustrated in Figure 2, the errors for the targeted code fix task are localized within one or several lines of code, referred to as the "error location". Before retrieval, the context retriever tool needs to analyze and extract the functions and global variables called within the error location, which we term Error-Invoking Functions (EIF). Subsequently, we define four types of context sources to determine where the retriever should extract code segments from the project structure tree as part of the context:
|
| 162 |
+
|
| 163 |
+
- Definitions of EIF: Retrieve code segments containing the definitions of the extracted Error-Invoking Functions within the repository scope.
|
| 164 |
+
- Callers of EIF: Search other occurrences of the Error-Invoking Function within the repository (excluding the error location) to obtain code segments containing their calling locations.
|
| 165 |
+
- Error Function (EF): The function containing the error location.
|
| 166 |
+
- Callers of EF: Examine if the Error Function is called elsewhere in the repository, and if so, retrieve code segments containing the calling locations.
|
| 167 |
+
|
| 168 |
+
A null value will be returned During the retrieval process if a particular context source is absent. Formally, the context retriever constructs a project structure tree $T$ for the repository and gathers a collection of code segments $C_{repo} = R(EL,T)$ , where $C_{repo}$ encompasses all code segments from the four context sources.
|
| 169 |
+
|
| 170 |
+
# 3.3 PromptComposer
|
| 171 |
+
|
| 172 |
+
The primary function of the prompt composer is to further process the code segments obtained by the context retriever and merge them with templates from different prompt strategies to generate the final prompt for the large language model. This can be represented as $C = P(C_{repo})$ , where $C$ represents the repository-level context in the final prompt, and $P$ denotes the process of handling the collection of code segments. As depicted in Figure 2, for each context source, our processing approach is as follows:
|
| 173 |
+
|
| 174 |
+
Definitions of EIF: For this part, we attach extra semantic information to EIF to enhance the model's understanding of the function purposes and parameter meanings. Semantic information comprises two components: function signature and function summary. The function signature includes the function name, the type of each parameter in the parameter list, and the type of return value. The function summary provides an overview of the main functionality. Any model capable of generating code summaries and signatures can be used. For the sake of simplicity and leveraging the outstanding performance of LLMs in code summarization, our experiments choose the LLMs as the generation model.
|
| 175 |
+
|
| 176 |
+
Callers of EIF: For this part, we employ a slicing approach for processing. Since the error location contains calls to EIF, calls to EIF in other locations within the repository are likely to have valuable references for error correction. Therefore, the most useful information in code segments from Callers of EIF for the repair task is primarily concentrated around the statements where EIF are invoked. To minimize the introduction of excessive redundant information, we adopt a slicing approach, preserving the content of the statements before and after the invocation, each with a context window of five lines.
|
| 177 |
+
|
| 178 |
+
Callers of EF: For Callers of EF, providing useful contextual information about the Error Function may contribute to a better understanding of LLMs. Therefore, similar to Callers of EIF, the approach for this section also employs the same slicing method.
|
| 179 |
+
|
| 180 |
+
Error Function (EF): In the context of Error Function, no additional processing has been applied; instead, they are incorporated directly into the prompt context.
|
| 181 |
+
|
| 182 |
+
It is worth noting that, in this paper, we primarily focus on conducting our experiments using programs written in the Python language. However, the design principles of the context retriever can be extended to other programming languages. Examples of our method of repairing bugs in the Java programming language can be found in Appendix A.2.2.
|
| 183 |
+
|
| 184 |
+
# 4 EVALUATION SETUP
|
| 185 |
+
|
| 186 |
+
# 4.1 Models
|
| 187 |
+
|
| 188 |
+
We select three representative models from the currently most popular LLMs for our experiments.
|
| 189 |
+
|
| 190 |
+
GPT3.5 [24] is developed by OpenAI. It has strong language comprehension and generation capabilities through large-scale data training. The model we select in our experiment is GPT-3.5 Turbo, which supports a maximum token sequence length of 4K. The training data is up to September 2021.
|
| 191 |
+
|
| 192 |
+
PaLM2 [1] is a new generation big language model launched by Google. It has advanced reasoning ability and natural language generation ability. In our experiment, we select the text bison-001
|
| 193 |
+
|
| 194 |
+
model, which supports a maximum of 8K tokens input and 1K output, with a knowledge cutoff date of mid-2021.
|
| 195 |
+
|
| 196 |
+
GPT4 [23] stands as one of the most powerful LLMs currently released by OpenAI. In handling intricate tasks, GPT-4 exhibits greater reliability and creativity. For our experiments, we select the gpt-4-0613 model, which supports a maximum token length of 8K and was trained with data up to September 2021.
|
| 197 |
+
|
| 198 |
+
As these models are not open-source, we obtain responses through their APIs.
|
| 199 |
+
|
| 200 |
+
# 4.2 Prompt Generation
|
| 201 |
+
|
| 202 |
+
Due to the significant impact of prompt engineering on the performance of LLMs [19, 32], to fully evaluate their performance, we adopt the following three most commonly used prompt strategies in various tasks (for simplicity, specific prompt designs can be found in the Appendix A.1), including zero-shot, one-shot, and chain of thought (CoT).
|
| 203 |
+
|
| 204 |
+
Zero-shot [3]: This strategy does not provide any example of the model during the inference process, only natural language instructions describing the task. This method can minimize the limited prompt length and provide more contextual capacity for repair tasks, but it also faces difficulties in understanding task formats and other issues. We use two types of instructions, Simple and Detail, in the experiment. The Simple format instruction describes the task in an extremely concise language, while the Detail format is more specific, requiring the large language model to assume that it is a programmer completing a task of fixing bugs from the repository using context.
|
| 205 |
+
|
| 206 |
+
One-shot [3]: This strategy is similar to zero-shot but allows for an example other than natural language instructions that describe the task. We use the same instruction as the Detail method in the zero-shot strategy in the experiment and add a complete repair example.
|
| 207 |
+
|
| 208 |
+
CoT [30]: Previous studies have shown that the CoT strategy can significantly enhance the reasoning ability of LLMs. The process of automatically fixing bugs can be seen as an inference process. To investigate the efficacy of the CoT strategy in the field of repository-level APR, we propose a straightforward zero-shot-CoT [14] approach in this paper. This approach decomposes the repair task into three distinct logical steps: first, identifying the root cause of errors by integrating contextual information; second, devising targeted solutions based on the identified error causes; and finally, generating the comprehensive repaired code. Our instructional prompt guides the model to systematically engage in these three steps, providing explicit error explanations, repair strategies, and the resultant fixed code, respectively.
|
| 209 |
+
|
| 210 |
+
# 4.3 Compared Repair Baselines
|
| 211 |
+
|
| 212 |
+
Preliminary method: A key contribution of our RLCE is the extraction of more precise repository-level context for repair tasks. To demonstrate the effectiveness of our method, we implement a preliminary method as the baseline that only provides the function itself where the error is located as the context. This simulates the existing preliminary method that leverages LLMs to address function-level APR tasks.
|
| 213 |
+
|
| 214 |
+
Slice-similarity method: To explore whether the slice-similarity method applies to the field of APR, our experiment reproduced the Retrieval Model used in the RepoCoder [33] method. Specifically, we set the slicing window to 10, use the sparse word bag model as the vector representation model, and use the Jaccard algorithm to calculate the similarity between the segment where the error is located and other segment vectors in the repository. Finally, 5 segments with the highest similarity are selected as the context.
|
| 215 |
+
|
| 216 |
+
# 4.4 Evaluation Metrics
|
| 217 |
+
|
| 218 |
+
Due to the high cost of running the repository and designing test cases, as well as the diversity of bug-fixing solutions, effective fixing accuracy cannot be achieved through precise matching (example can be found in the Appendix A.2.1). Therefore, to ensure the accuracy of the evaluation results, we ultimately adopt a manual evaluation method, and the evaluation results were provided by two experts who have more than 5 years of experience in Python programming. We will divide the evaluation indicators into four items to fully evaluate the return results of the large model. The specific evaluation criteria are as follows. When evaluating, if it meets the criteria, it is marked as 1; otherwise, it is marked as 0:
|
| 219 |
+
|
| 220 |
+
- Related reply: The return result is not empty and is related to the repair task in the instruction.
|
| 221 |
+
- Correct format: The returned result is in the expected format and the content is complete without any duplicate or redundant content.
|
| 222 |
+
- Correct repair: The returned result contains the correct fix for the error.
|
| 223 |
+
- Correct explanation: This item is specialized for the CoT prompt strategy, and the criteria are that the returned result includes a correct explanation of the cause of the error.
|
| 224 |
+
|
| 225 |
+
In the specific evaluation process, the two experts first have a detailed discussion on the evaluation criteria to ensure both reach a consistent understanding. Then, the two experts independently evaluated all the experimental results. Subsequently, the evaluation results of the two individuals are compared, and any minor discrepancies were addressed through further discussion and reassessment by both experts. This iterative process ensured the attainment of a unanimous final result.
|
| 226 |
+
|
| 227 |
+
# 5 RESULTS AND ANALYSIS
|
| 228 |
+
|
| 229 |
+
# 5.1 RLCE Outperforms the Baselines
|
| 230 |
+
|
| 231 |
+
We present the results of our experiments in Table 2, where each cell represents the proportion of samples passing the corresponding evaluation metric among all samples. Our RLCE method exhibits a significant improvement compared to the other two baselines. Regarding the relevance of model responses and the correctness of format, all methods and models generally generate responses relevant to the questions and conform to the expected format. In terms of repair rate, we observe that all models perform poorly when employing the preliminary method. For instance, even the superior GPT4 achieves only a $41.43\%$ success rate. This indicates that even state-of-the-art LLMs struggle to accomplish repository-level repair tasks with only limited context at the function level, as they fail to provide sufficient information for repair tasks. After
|
| 232 |
+
|
| 233 |
+
using the RLCE method to provide repository-level context, the repair rate improvement compared to the preliminary method has reached over $100\%$ , with the GPT3.5 reaching the highest of $160\%$ . This underscores the necessity of repository-level context for such code repair tasks.
|
| 234 |
+
|
| 235 |
+
Additionally, across the experiments with the three models, the repair rate of the slice-similarity method does not surpass our RLCE method. For example, in the GPT4 model, the respective improvement rates compared to the preliminary method are $20\%$ and $100\%$ . This suggests that the enhancement provided by the context of this method is limited. The slice-similarity method exploits code repetition in the repository, retrieving code segments similar to the error location to aid in error repair. However, relying solely on similar code makes it challenging to reconstruct the actual execution-time context before and after the error location, leading LLMs to struggle in correctly inferring the reasons for errors.
|
| 236 |
+
|
| 237 |
+
# 5.2 Impact of Prompt Strategies on RLCE
|
| 238 |
+
|
| 239 |
+
It is worth noting that the performance of the RLCE method is influenced by the use of different prompt strategies. As shown in Table 2, a comparison of different models reveals the most pronounced performance fluctuation in GPT3.5 due to the prompting strategy. Specifically, the one-shot strategy yield a repair rate improvement of over $37\%$ compared to the Simple method with the zero-shot strategy. In practical repair tasks, it is desirable for the model to exhibit low sensitivity to prompting strategies. Because, in situations with similar contexts, stable responses of the model to various prompting strategies can reduce the uncertainty of repair outcomes and mitigate the exploration costs associated with finding appropriate prompting strategies. In summary, considering both response stability and accuracy, GPT4 demonstrates optimal performance.
|
| 240 |
+
|
| 241 |
+
# 5.3 Correct Explanation is Important for CoT
|
| 242 |
+
|
| 243 |
+
Analyzing the data in Table 2, we find that the repair rate of all models employing the CoT strategy did not meet our expectations, with a notable decrease in performance even on PaLM2. The GPT4, which exhibited the best performance overall, also experienced a slight decline in repair accuracy after adopting the CoT strategy. To investigate the reasons behind this, we categorize and statistically analyze the repair outcomes of the three models using both the Detail and CoT methods, along with the explanations generated by CoT. The results are presented in Figure 3. It is evident from the data that a significant correlation exists between the repair and explanation aspects for all models. In cases where the CoT method was successfully repaired (clusters denoted by the second uppercase letter T), the proportion of correct explanations was exceptionally high. Moreover, as the inference capabilities of the models improved, this trend became more pronounced. The statistical results for GPT4 indicate that all successfully repaired cases also had accurate explanations. When observing the three sets of TF clusters, corresponding to cases where the Detail method successfully repaired while the CoT method did not, the proportion of incorrect explanations was significantly higher than that of correct explanations. From the analysis above, we hypothesize that in the CoT method, incorrect interpretations can introduce significant
|
| 244 |
+
|
| 245 |
+
<table><tr><td rowspan="3">Model</td><td rowspan="3">Metric</td><td colspan="6">Method</td></tr><tr><td rowspan="2">Preliminary</td><td rowspan="2">Slice-similarity</td><td colspan="4">RLCE</td></tr><tr><td>Simple</td><td>Detail</td><td>One-shot</td><td>CoT</td></tr><tr><td rowspan="4">GPT3.5</td><td>Related reply</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>0.9919</td></tr><tr><td>Correct format</td><td>0.9597</td><td>0.9516</td><td>0.9274</td><td>0.9516</td><td>0.9597</td><td>0.9597</td></tr><tr><td>Correct repair</td><td>0.2258</td><td>0.3387</td><td>0.4113</td><td>0.5645</td><td>0.5968</td><td>0.5161</td></tr><tr><td>Correct explanation</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>0.5284</td></tr><tr><td rowspan="4">PaLM2</td><td>Related reply</td><td>0.8871</td><td>0.8468</td><td>0.8387</td><td>0.8629</td><td>0.871</td><td>0.879</td></tr><tr><td>Correct format</td><td>0.8548</td><td>0.7903</td><td>0.7984</td><td>0.8064</td><td>0.8387</td><td>0.5242</td></tr><tr><td>Correct repair</td><td>0.2177</td><td>0.2419</td><td>0.4272</td><td>0.3952</td><td>0.4032</td><td>0.2742</td></tr><tr><td>Correct explanation</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>0.1774</td></tr><tr><td rowspan="4">GPT4</td><td>Related reply</td><td>0.9677</td><td>1</td><td>0.9919</td><td>0.9919</td><td>0.9839</td><td>1</td></tr><tr><td>Correct format</td><td>0.9677</td><td>0.9758</td><td>0.9839</td><td>0.9758</td><td>0.9839</td><td>1</td></tr><tr><td>Correct repair</td><td>0.4113</td><td>0.4919</td><td>0.7742</td><td>0.7581</td><td>0.8145</td><td>0.75</td></tr><tr><td>Correct explanation</td><td>-</td><td>-</td><td>-</td><td>-</td><td>-</td><td>0.7742</td></tr></table>
|
| 246 |
+
|
| 247 |
+
interference, leading to erroneous outcomes. Using models with enhanced inference capabilities or guiding models to generate more accurate explanations may potentially improve the repair rate.
|
| 248 |
+
|
| 249 |
+

|
| 250 |
+
Figure 3: Statistical categorization of repair outcomes using the detail and CoT methods for three models, along with CoT-generated explanations. The horizontal axis comprises four categories, each composed of two uppercase letters, T or F, representing whether the repair results using the Detail and CoT methods are correct (e.g., FT denotes all cases where Detail repair is incorrect and CoT repair is correct). The vertical axis represents the number of cases for each category. In each category, red indicates instances where the CoT method provided incorrect explanations, while green signifies correct explanations.
|
| 251 |
+
|
| 252 |
+
# 5.4 Validity of Context Sources
|
| 253 |
+
|
| 254 |
+
Our research results indicate that the RLCE method can significantly enhance repository-level program repair tasks. To explore the correlation between the four context sources mentioned in Section 3.2 and extra semantic information with code repair performance, we conduct a set of ablation experiments. In these experiments, we remove three context sources and extra semantic information, excluding EF due to the inclusion of error localization. The results of the experiments can be found in Table 3. The models selected for the experiments are GPT3.5 and GPT4, both showing better overall performance, with all strategies adopting the one-shot approach.
|
| 255 |
+
|
| 256 |
+
Table 2: Evaluation results of different models and methods on RepoBugs. The values in the cells represent the proportion of samples that passed the corresponding evaluation metrics out of the total number of samples. The cell data corresponding to the best-performing method in each row is bolded. Detailed descriptions of the Preliminary and Slice-similarity methods can be found in Section 4.3, both employing the one-shot prompting strategy.
|
| 257 |
+
|
| 258 |
+
<table><tr><td rowspan="2">Model</td><td colspan="4">Context Source</td><td colspan="3">Evaluation</td></tr><tr><td>Summa- rize</td><td>Callers of EF</td><td>Definitions of EIF</td><td>Callers of EIF</td><td>Related reply</td><td>Correct format</td><td>Correct repair</td></tr><tr><td rowspan="5">GPT3.5</td><td>✓</td><td>✓</td><td>✓</td><td>✘</td><td>1</td><td>0.9597</td><td>0.5806</td></tr><tr><td>-</td><td>✓</td><td>✘</td><td>✓</td><td>1</td><td>0.9355</td><td>0.2742</td></tr><tr><td>✓</td><td>✘</td><td>✓</td><td>✓</td><td>1</td><td>0.9597</td><td>0.5565</td></tr><tr><td>✘</td><td>✓</td><td>✓</td><td>✓</td><td>1</td><td>0.9597</td><td>0.5403</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>1</td><td>0.9597</td><td>0.5968</td></tr><tr><td rowspan="5">GPT4</td><td>✓</td><td>✓</td><td>✓</td><td>✘</td><td>0.9839</td><td>0.9839</td><td>0.7742</td></tr><tr><td>-</td><td>✓</td><td>✘</td><td>✓</td><td>0.9919</td><td>0.9677</td><td>0.5000</td></tr><tr><td>✓</td><td>✘</td><td>✓</td><td>✓</td><td>0.9839</td><td>0.9839</td><td>0.7661</td></tr><tr><td>✘</td><td>✓</td><td>✓</td><td>✓</td><td>0.9839</td><td>0.9839</td><td>0.7823</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>0.9839</td><td>0.9839</td><td>0.8145</td></tr></table>
|
| 259 |
+
|
| 260 |
+
Table 3: Comparison of the effects of different context sources in the Context.
|
| 261 |
+
|
| 262 |
+
From the results in Table 3, it is evident that both models perform well in terms of response format and relevance. In terms of accuracy, the experimental groups utilizing the complete context
|
| 263 |
+
|
| 264 |
+
sources achieve the highest repair accuracy. However, the repair rate of different context sources on the repair results varies. If the context source of definitions of EIF is lacking, both models exhibit a significant decrease in repair rates, with GPT3.5 even dropping by more than half. This indicates that the definitions of EIF context source provide the most important information for the repair task. Moreover, the extra semantic information does not noticeably enhance the success rate. It suggests that the useful information extracted from it might be limited for LLMs.
|
| 265 |
+
|
| 266 |
+
# 5.5 Error Types Affect Repair Effectiveness
|
| 267 |
+
|
| 268 |
+
In Section 2.2, we present six distinct disruption rules. Errors arising from these diverse disruption rules often necessitate varying analytical approaches of LLMs to repair. To investigate the differences in the repair rate of LLMs when confronted with different error types, we categorize the results of distinct prompt strategies based on error categories and compute the correction accuracy of GPT-3.5 and GPT-4 model, as depicted in Figure 4.
|
| 269 |
+
|
| 270 |
+
Analysis of Figure 4 reveals a consistent trend in the performance of both models when addressing different error types. Both models exhibit poorer performance when faced with ORV and CRV error types. Upon examining instances of these two error types, we observe that, compared to other categories, these errors are more subtle and challenging to repair, mainly in two aspects. Firstly, ORV and CRV errors involve parameter mismatch or incorrect order between interfaces, requiring a deep understanding of the specific meaning of each parameter and the functionality of the function. This requires a deep semantic understanding. In contrast, the mismatch in the number of parameters can often be identified through simple syntax checks. Secondly, in real-world repositories, variable names in most cases align between parameters and arguments (We generally use the word 'parameter' for a variable named in the parenthesized list in a function definition, and 'argument' for the value used in a call of the function [13]), enabling the large language model to identify some obvious errors by comparing the parameter list in the function definition position with the argument list in the invocation position. In contrast, inconsistencies in the return values, as seen in ORV and CRV types, demand the large language model to comprehend the functionality of function and even the meaning of each variable during the parameter-passing process.
|
| 271 |
+
|
| 272 |
+
# 5.6 Long Prompt vs. Short Prompt
|
| 273 |
+
|
| 274 |
+
The prompt length of LLMs is subject to an upper limit. Does this imply that one should aim to incorporate as much information from the repository as possible within the confines of the prompt length? We classify the experimental results of GPT-3.5 and GPT-4 models using the one-shot method based on the length of the prompt. The statistical results are illustrated in Figure 5. It is observed that as the prompt length increases, both GPT3.5 and GPT4 exhibit a noticeable decline in repair accuracy. This reflects that the performance of LLMs varies when handling contexts of different scales. We posit that, for program repair tasks, a longer prompt may provide more information to the large language model, but simultaneously, it may also distract the model, increasing the difficulty of repair. Therefore, the longer length of the provided context does not lead to better
|
| 275 |
+
|
| 276 |
+
results; instead, it should aim to provide precise context to enhance the density of useful information for the APR task.
|
| 277 |
+
|
| 278 |
+
# 6 THREATS TO VALIDITY
|
| 279 |
+
|
| 280 |
+
Internal Validity: In our experiments, we select three major LLMs (GPT3.5, GPT4, and PaLM2). However, different models usually differ in various aspects such as parameter count, training data, and fine-tuning methods. These differences may impact the repair rate of our method. For instance, if a large language model is inadequately trained on a specific programming language, its performance for that language may be suboptimal. Additionally, in our experiments, we standardize the temperature parameter across all LLMs to 0, ensuring stable outputs when facing the same prompt. Increasing the temperature parameter leads to unstable model outputs, potentially influencing the final results.
|
| 281 |
+
|
| 282 |
+
External Validity: The most significant factor influencing external validity is the choice of datasets. Firstly, if the creation time of repositories in the dataset predates the cutoff time of the training data, there is a high probability that the repository has already been learned as part of the training data, potentially impacting the final results. Secondly, an increase in the scale and complexity of repositories may escalate the cost and difficulty of context retrieval, affecting the reliability of the results. Additionally, in our experiments, we primarily focus on the error type of interface inconsistency, as it is common at the repository level and can typify the language model's ability to handle repository-level errors. However, given that RepoBugs is the only dataset known to us specifically designed for repository-level error repair, the generalization of our approach to datasets involving other error types may vary.
|
| 283 |
+
|
| 284 |
+
# 7 RELATED WORK
|
| 285 |
+
|
| 286 |
+
Automatic program repair (APR) is a crucial research problem in the field of software engineering, representing a significant research direction to reduce the cost of software maintenance and enhance software reliability. Since the introduction of an APR framework by Arcuri and Yao in 2008 [2], the field of APR has undergone rapid development. Early approaches to APR predominantly relied on traditional methods; for instance, GenProg [31] employed an extended form of genetic programming to evolve program variants and validated the effectiveness of repairs through test cases. TBar [17] explored template-based APR methods, assessing the effectiveness of different repair patterns through experiments. LSRepair [18] addressed program errors by conducting real-time searches for repair components within existing code repositories.
|
| 287 |
+
|
| 288 |
+
In recent years, a substantial amount of research has shifted towards leveraging machine learning techniques, particularly deep learning, for program repair. Researchers generate repair solutions by learning from extensive repair data in code repositories. For example, DeepFix [8] utilized a multi-layer sequence-to-sequence neural network to fix common programming errors without relying on external tools for locating or repairing. SequenceR [5] combined the encoder/decoder architecture with a copying mechanism to overcome the challenge of large vocabulary in source code. CURE [11] integrated pre-trained GPT models for programming languages with translation models, introducing a Context-aware Targeted Search strategy. SGEPR (? ] uses a novel intermediate
|
| 289 |
+
|
| 290 |
+

|
| 291 |
+
Figure 4: The results of various prompt strategies employed by GPT-3.5 and GPT-4 are statistically compiled in terms of correction accuracy based on error categories. The horizontal axis denotes the six categories of errors, with specific definitions provided in Section 2.2. The total number of samples corresponding to each category is enclosed in parentheses. Different colors of bars represent distinct methods, while the vertical axis represents the repair accuracy.
|
| 292 |
+
|
| 293 |
+

|
| 294 |
+
Figure 5: The relationship between different prompt lengths and repair accuracy is depicted through six subfigures. Each subfigure represents a distinct prompt strategy. All cases within each prompt strategy are evenly divided into four subsets based on prompt length (with a total dataset size of 124, resulting in 31 cases per subset). In each subfigure, bars represent the average length of tokens in the prompt, while the line graph illustrates the repair accuracy.
|
| 295 |
+
|
| 296 |
+
representation named sequence code property graph (SCPG) to model program semantic information. Recently, the remarkable comprehension and generation capabilities of LLMs have attracted widespread attention. Nan, Liu, and others [10] compared ten code
|
| 297 |
+
|
| 298 |
+
language models and four deep learning APR techniques across four APR benchmarks. The experimental results demonstrated the competitive repair abilities of code language models.
|
| 299 |
+
|
| 300 |
+
# 8 CONCLUSION
|
| 301 |
+
|
| 302 |
+
In summary, we conduct a pioneering evaluation of the capabilities of major existing LLMs in handling repository-level repair tasks. We introduce a benchmark dataset, RepoBugs, along with a straightforward and versatile repository-level context extraction method, RLCE. RLCE, leveraging repository structure parsing and relevant context retrieval, offers more precise context for repository-level repair tasks. Experiments on the RepoBugs benchmark indicate that RLCE significantly enhances the performance of LLMs in repository-level program repair tasks. Furthermore, we conduct a detailed analysis of the experimental results from aspects such as context sources, error types, and prompt length, providing valuable insights for future research. RLCE has the potential to empower LLMs to offer efficient and accurate guidance for addressing errors encountered in actual development processes.
|
| 303 |
+
|
| 304 |
+
# ACKNOWLEDGMENTS
|
| 305 |
+
|
| 306 |
+
This paper is supported by the Strategic Priority Research Program of the Chinese Academy of Sciences under Grant No. XDA0320401 and the National Natural Science Foundation of China under No. 62202457. This paper is supported by YuanTu Large Research Infrastructure.
|
| 307 |
+
|
| 308 |
+
# A APPENDIX
|
| 309 |
+
|
| 310 |
+
# A.1 Prompt design
|
| 311 |
+
|
| 312 |
+
We prepare Figure 6, illustrating the design structure of prompts in our experiments. From top to bottom, it includes the task instruction, example (one-shot method), context retrieved by the RLCE method, and the Error Function.
|
| 313 |
+
|
| 314 |
+
Prompt Structure
|
| 315 |
+
```txt
|
| 316 |
+
{instruction}
|
| 317 |
+
#####Example
|
| 318 |
+
{example}
|
| 319 |
+
########Context
|
| 320 |
+
{context}
|
| 321 |
+
#########Function fragment
|
| 322 |
+
{error function}
|
| 323 |
+
#####Response:
|
| 324 |
+
```
|
| 325 |
+
|
| 326 |
+
Figure 6: Prompt structure of our experiment
|
| 327 |
+
|
| 328 |
+
For each prompt strategy, the designed instructions are depicted in Figure 7.
|
| 329 |
+
|
| 330 |
+
# A.2 Case Study
|
| 331 |
+
|
| 332 |
+
A.2.1 Diverse repair methods. In APR tasks, one bug often corresponds to multiple distinct repair possibilities, increasing the difficulty of assessing the accuracy of model-generated repairs. Figure 8 illustrates an example, the error involves a missing parameter in the parameters returned by the function. The original correct line in the repository utilized “_” to receive this parameter, and both the manual correction and the repair generated by the GPT3.5 model employed “mask_C”, effectively repairing the error.
|
| 333 |
+
|
| 334 |
+
A.2.2 RLCE for other program languages. As described in Section 3.3, RLCE is a simple and versatile method that can be applied to different programming languages. As illustrated in Figure 9, in this example, the bug arises from an incorrect parameter order during a function call. GPT-4 successfully repairs the bug based on the repository-level context provided by our RLCE method.
|
| 335 |
+
|
| 336 |
+
Simple
|
| 337 |
+
```txt
|
| 338 |
+
Please use context to fix the bug in the {'bug_line}' line in the function fragment:
|
| 339 |
+
```
|
| 340 |
+
|
| 341 |
+
Detail
|
| 342 |
+
```txt
|
| 343 |
+
You are a code programmer, your task is to fix a bug in the '{bug_line}' line of a function fragment from a code repository. Context is potentially useful contextual information provided to you, please make full use of it:
|
| 344 |
+
```
|
| 345 |
+
|
| 346 |
+
One-shot
|
| 347 |
+
```python
|
| 348 |
+
You are a code programmer, your task is to fix a bug in a function fragment from a code repository. Context is potentially useful contextual information provided to you, please make full use of it: # Here is an example of using context to repair a function fragment in another repository:
|
| 349 |
+
!
|
| 350 |
+
{
|
| 351 |
+
#
|
| 352 |
+
def square(a): return a \*\* 2 def addnumbers(a,b): return a + b
|
| 353 |
+
Function fragment
|
| 354 |
+
def main(): result1 = add(numbers(3,5) print("Result1:", result1) result2 = square(3,2) print("Result1:", result2)
|
| 355 |
+
Response: Fixed code': # main.py
|
| 356 |
+
def main(): result1 = add(numbers(3,5) print("Result1:", result1) result2 = square(3) print("Result1:", result2)}
|
| 357 |
+
Here are the context and function fragment you need to complete your task. The bug is located in the {bug_line} line of the function fragment:
|
| 358 |
+
```
|
| 359 |
+
|
| 360 |
+
Chain of Thought
|
| 361 |
+
```txt
|
| 362 |
+
You are a code programmer, your task is to fix a bug in the '{bug_line}' line of a function fragment from a code repository. You need to use the information provided in the context as a reference. Let's gradually solve this problem, first start with the error line and analyze the possible cause of the bug based on the function fragment and context, then provide a solution to the bug, and finally provide the fixed code:
|
| 363 |
+
```
|
| 364 |
+
|
| 365 |
+
Figure 7: Instructions for different prompt strategies
|
| 366 |
+
|
| 367 |
+
Repository Context
|
| 368 |
+
```python
|
| 369 |
+
def _sample_block_mask(self, b_size: tuple, acceptable_regions: list = None) -> tuple:
|
| 370 |
+
return mask, mask_complement
|
| 371 |
+
```
|
| 372 |
+
|
| 373 |
+
Error Function
|
| 374 |
+
```python
|
| 375 |
+
def __call__(self, batch):
|
| 376 |
+
...
|
| 377 |
+
for _ in range(self.nenc):
|
| 378 |
+
mask = self._sample_block_mask(e_size, acceptable_regions=acceptable_regions)
|
| 379 |
+
masks_e.append(mu)
|
| 380 |
+
min_keep_enc = min(min_keep_enc, len(mu))
|
| 381 |
+
```
|
| 382 |
+
|
| 383 |
+
Correct Line
|
| 384 |
+
```python
|
| 385 |
+
mask, _ = self._sample_block_mask(e_size, acceptable_regions=acceptable_regions)
|
| 386 |
+
```
|
| 387 |
+
|
| 388 |
+
LLM Return
|
| 389 |
+
```python
|
| 390 |
+
mask, mask_C = self._sample_block_mask(e_size, acceptable_regions=acceptable_regions)
|
| 391 |
+
```
|
| 392 |
+
|
| 393 |
+
Figure 8: Diversification of repair methods
|
| 394 |
+
|
| 395 |
+
Prompt
|
| 396 |
+
```txt
|
| 397 |
+
You are a code programmer, your task ....
|
| 398 |
+
///Context
|
| 399 |
+
/*The call of the function called in the error line
|
| 400 |
+
in other locations of the repository: */
|
| 401 |
+
......
|
| 402 |
+
Partial newPartial = new Partial(iChronology,
|
| 403 |
+
新业态,.Values);
|
| 404 |
+
...
|
| 405 |
+
/*Functions called in the error line and their
|
| 406 |
+
definitions:**/
|
| 407 |
+
public Partial(DateTimeFieldType[] types, int[])
|
| 408 |
+
values, Chronology chronology) {
|
| 409 |
+
...
|
| 410 |
+
}
|
| 411 |
+
///Function fragment
|
| 412 |
+
public Partial without(DateTimeFieldTypeFIELDType) {
|
| 413 |
+
int index = indexOfFIELDType);
|
| 414 |
+
...
|
| 415 |
+
System.arraycopy(iValues, index + 1,
|
| 416 |
+
.Values, index,.Values.length - index);
|
| 417 |
+
}
|
| 418 |
+
Partial newPartial = new Partial(iChronology,
|
| 419 |
+
新业态,.Values);
|
| 420 |
+
iChronology.valid(newPartial,.Values);
|
| 421 |
+
...
|
| 422 |
+
}
|
| 423 |
+
///Response:
|
| 424 |
+
'Fixed code':
|
| 425 |
+
```
|
| 426 |
+
|
| 427 |
+
GPT4 Repair
|
| 428 |
+
```txt
|
| 429 |
+
public Partial without(DateTimeFieldType fieldType) {
|
| 430 |
+
Partial newPartial $=$ new Partial(newTypes,
|
| 431 |
+
newValues,iChronology);
|
| 432 |
+
```
|
| 433 |
+
|
| 434 |
+
Figure 9: RLCE method for Java language program repair
|
| 435 |
+
|
| 436 |
+
# REFERENCES
|
| 437 |
+
|
| 438 |
+
[1] Rohan Anil, Andrew M Dai, Orhan Firat, Melvin Johnson, Dmitry Lepikhin, Alexandre Passos, Siamak Shakeri, Emanuel Taropa, Paige Bailey, Zhifeng Chen, et al. 2023. Palm 2 technical report. arXiv preprint arXiv:2305.10403 (2023).
|
| 439 |
+
[2] Andrea Arcuri and Xin Yao. 2008. A novel co-evolutionary approach to automatic software bug fixing. In 2008 IEEE Congress on Evolutionary Computation (IEEE World Congress on Computational Intelligence). https://doi.org/10.1109/cec.2008.4630793
|
| 440 |
+
[3] Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in neural information processing systems 33 (2020), 1877-1901.
|
| 441 |
+
[4] Jialun Cao, Meiziniu Li, Ming Wen, and Shing-chi Cheung. 2023. A study on prompt design, advantages and limitations of chatgpt for deep learning program repair. arXiv preprint arXiv:2304.08191 (2023).
|
| 442 |
+
[5] Zimin Chen, Steve James Kommrusch, Michele Tufano, Louis-Noel Pouchet, Denys Poshyvanyk, and Martin Monperrus. 2021. SEQUENCES: Sequence-to-Sequence Learning for End-to-End Program Repair. IEEE Transactions on Software Engineering (Jan 2021), 1-1. https://doi.org/10.1109/tse.2019.2940179
|
| 443 |
+
[6] Yangruibo Ding, Zijian Wang, Wasi Uddin Ahmad, Hantian Ding, Ming Tan, Nihal Jain, Murali Krishna Ramanathan, Ramesh Nallapati, Parminder Bhatia, Dan Roth, et al. 2023. *CrossCodeEval: A Diverse and Multilingual Benchmark for Cross-File Code Completion*. arXiv preprint arXiv:2310.11248 (2023).
|
| 444 |
+
[7] Zuxing Gu, Jiecheng Wu, Jiaxiang Liu, Min Zhou, and Ming Gu. 2019. An empirical study on api-misuse bugs in open-source c programs. In 2019 IEEE 43rd annual computer software and applications conference (COMPSAC), Vol. 1. IEEE, 11-20.
|
| 445 |
+
[8] Rahul Gupta, Soham Pal, Aditya Kanade, and Shirish Shevade. 2022. DeepFix: Fixing Common C Language Errors by Deep Learning. Proceedings of the AAAI Conference on Artificial Intelligence (Jun 2022). https://doi.org/10.1609/aaii.v3i1.10742
|
| 446 |
+
[9] Jinru Hua, Mengshi Zhang, Kaiyuan Wang, and Sarfraz Khurshid. 2018. Sketchfix: a tool for automated program repair approach using lazy candidate generation. In Proceedings of the 2018 26th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering. 888-891.
|
| 447 |
+
[10] Nan Jiang, Kevin Liu, Thibaud Lutellier, and Lin Tan. 2023. Impact of code language models on automated program repair. arXiv preprint arXiv:2302.05020 (2023).
|
| 448 |
+
[11] Nan Jiang, Thibaud Lutellier, and Lin Tan. 2021. Cure: Code-aware neural machine translation for automatic program repair. In 2021 IEEE/ACM 43rd International Conference on Software Engineering (ICSE), IEEE, 1161-1173.
|
| 449 |
+
[12] René Just, Darioush Jalali, and Michael D Ernst. 2014. Defects4J: A database of existing faults to enable controlled testing studies for Java programs. In Proceedings of the 2014 international symposium on software testing and analysis. 437-440.
|
| 450 |
+
[13] Brian W Kernighan and Dennis M Ritchie. 1988. The C programming language. Prentice-Hall (1988).
|
| 451 |
+
[14] Takeshi Kojima, Shixiang Shane Gu, Machel Reid, Yutaka Matsuo, and Yusuke Iwasawa. 2022. Large language models are zero-shot reasoners. Advances in neural information processing systems 35 (2022), 22199-22213.
|
| 452 |
+
[15] Xia Li, Jiajun Jiang, Samuel Benton, Yingfei Xiong, and Lingming Zhang. 2021. A Large-scale Study on API Misuses in the Wild. In 2021 14th IEEE Conference on Software Testing, Verification and Validation (ICST). IEEE, 241-252.
|
| 453 |
+
[16] Derrick Lin, James Koppel, Angela Chen, and Armando Solar-Lezama. 2017. QuixBugs: A multi-lingual program repair benchmark set based on the Quixey Challenge. In Proceedings Companion of the 2017 ACM SIGPLAN international conference on systems, programming, languages, and applications: software for humanity. 55-56.
|
| 454 |
+
[17] Kui Liu, Anil Koyuncu, Dongsun Kim, and Tegawende F Bissyandé. 2019. TBar: Revisiting template-based automated program repair. In Proceedings of the 28th ACM SIGSOFT International Symposium on Software Testing and Analysis. 31-42.
|
| 455 |
+
[18] Kui Liu, Anil Koyuncu, Kisub Kim, Dongsun Kim, and Tegawende F. Bissyande. 2018. LSRepair: Live Search of Fix Ingredients for Automated Program Repair. In 2018 25th Asia-Pacific Software Engineering Conference (APSEC). https://doi.org/10.1109/apsec.2018.00085
|
| 456 |
+
[19] Pengfei Liu, Weizhe Yuan, Jinlan Fu, Zhengbao Jiang, Hiroaki Hayashi, and Graham Neubig. 2023. Pre-train, prompt, and predict: A systematic survey of prompting methods in natural language processing. Comput. Surveys 55, 9 (2023), 1-35.
|
| 457 |
+
[20] Shuai Lu, Nan Duan, Hojae Han, Daya Guo, Seung-won Hwang, and Alexey Svyatkovskiy. 2022. Reac: A retrieval-augmented code completion framework. arXiv preprint arXiv:2203.07722 (2022).
|
| 458 |
+
[21] Thibaud Lutellier, Hung Viet Pham, Lawrence Pang, Yitong Li, Moshi Wei, and Lin Tan. 2020. Coconut: combining context-aware neural translation models using ensemble for program repair. In Proceedings of the 29th ACM SIGSOFT international symposium on software testing and analysis. 101-114.
|
| 459 |
+
[22] Robert HB Netzer and Barton P Miller. 1992. What are race conditions? Some issues and formalizations. ACM Letters on Programming Languages and Systems
|
| 460 |
+
|
| 461 |
+
(LOPLAS) 1, 1 (1992), 74-88.
|
| 462 |
+
[23] OpenAI, Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florecnia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, Red Avila, Igor Babuschkin, Suchir Balaji, Valerie Balcom, Paul Baltescu, Haiming Bao, Mob Bavarian, Jeff Belgium, Irwan Bello, Jake Berdine, Gabriel Bernadett-Shapiro, Christopher Berner, Lenny Bogdonoff, Oleg Boiko, Madelaine Boyd, Anna-Luisa Brakman, Greg Brockman, Tim Brooks, Miles Brundage, Kevin Button, Trevor Cai, Rosie Campbell, Andrew Cann, Brittany Carey, Chelsea Carlson, Rory Carmichael, Brooke Chan, Che Chang, Fotis Chantzis, Derek Chen, Sully Chen, Ruby Chen, Jason Chen, Mark Chen, Ben Chess, Chester Cho, Casey Chu, Hyung Won Chung, Dave Cummings, Jeremiah Currier, Yunxing Dai, Cory Decareaux, Thomas Degry, Noah Deutsch, Damien Deville, Arka Dhar, David Dohan, Steve Dowling, Sheila Dunning, Adrien Ecoffet, Atty Eleti, Tyna Eloundoun, David Farhi, Liaf Fedus, Niko Felix, Simons Posada Fishman, Juston Forte, Isabella Fulford, Leo Gao, Elie Georges, Christian Gibson, Vik Goel, Tarun Gogineni, Gabriel Goh, Rapha Gontijo-Lopes, Jonathan Gordon, Morgan Grafstein, Scott Gray, Ryan Greene, Joshua Gross, Shixiang Shane Gu, Yufei Guo, Chris Hallacy, Jesse Han, Jeff Harris, Yuchen He, Mike Heaton, Johannes Heidecke, Chris Hesse, Alan Hickey, Wade Hickey, Peter Hoeschele, Brandon Houghton Kenny Hsu Shengli Hu Xin Hu Joost Huizinga Shantanu Jain, Shawn JainJoanne JangAngela Jiang Roger JiangHaozhun JinDenny Jin Shino JomotoBillie JonnHeewoo JunTomer Kaftan Lukasz Kaiser Ali Kamali Ingmar Kanitscheder,Nitish Shirish Keskar Tabarak Khan Logan Kilpatrick Jong Wok KimChristina KimYongjik KimHendrik Kirchner Jamie Kiros,Matt Knight Daniel Kokotajlo Lukasz Kondraciuk Andrew KondrichAris Konstantinidis Kyle Kosic,Gretchen Krueger,Vishal Kuo Michael Lampe,Ikai Lan Teddy LeeJan LeikeJade LeungDaniel LevyChak Ming LiRachel LimMolly Lin Stephanie LinMateusz LitwinTheresa LopezRyan Lowe Patricia LueAnna Makanju Kim Malfacini Sam ManningTodor Markov Yaniv Markovski Bianca MartinKatie MayerAndrew MayneBob McGrewScott Mayer McKinney Christine McLeaveyPaul McMillanJake McNeilDavid MedinaAalok MehtaJacob Menick Luke Metz Andrey Mishchenko Pamela MishkinVinnie Monaco Evan Morikawa Daniel Mossing Tong Mu Mira Muratl Oleg Murk David Mely Ashvin NairReiichiro Nakano Rajeev Nayak Arvind Neelakantan Richard Ngo Hyeonwoo Noh Long OuyangCullen O'Keefe,Jakub Pachocki Alex Paino Joe Palermo Ashley Pantuliano Giambattista Parascandolo Joel Parish Emy Parparita Alex Passos Mikhail Pavlov Andrew PengAdam Perelman Filipe de Avila Belbute Peres Michael PetrovHenrique Ponde de Oliveira PintoMichael Pokorny Michelle Pokrass Vitchyr Pong Tolly Powell Alethea Power Boris Power Elizabeth Proehl Raul Puri,Alec Radford Jack RaeAditya Ramesh Cameron RaymondFrancis Real Kendra Rimbach Carl RossBob Rotsted Henri Roussez Nick Ryder Mario Saltarelli Ted Sanders Shibani Santurkar Girish Sastry Heather Schmidt David Schnurr John Schulman Daniel Selsam Kyla Sheppard,Toki Sherbakov Jessica Shieh Sarah Shoker Pranav Shyam Szymon Sidor Eric Sigler Maddie Simens Jordan Sitkin Katarina Slama Ian Sohl Benjamin Sokolowsky Yang Song Natalie Staudacher Felipe Petroski Such Natalie Summers Ilya Sutskever Jie Tang Nikolas Tezak Madeleine Thompson Phil Tillet Amin Tootoonchian Elizabeth Tseng,Preston Tuggle Nick Turley Jerry Tworek Juan Filipino Ceron Uribe Andrea Vallone Arun Vijayvergiya Chelsea VossCarroll Wainwright Justin Jay WangAlvin WangBen WangJonathan WardJason Wei CJ Weinmann Akila WeihindaPeter Welinder Jiayi Weng Lilian Weng Matt Wiethoff Dave Willner Clemens Winter,Samuel Wolrich Hannah WongLauren Workman Sherwin WuJeff WuMichael Wu Kai Xiao Tao XuSarah YooKevin YuQiming Yuan Wojciech Zaremba Rowan Zellers Chong ZhangMarvin ZhangShengjia ZhaoTianhao ZhengJuntang ZhuangWilliam Zhuk,and Barret Zoph. 2023.GPT-4 Technical Report. arXiv:2303.08774 [cs.CL]
|
| 463 |
+
[24] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. 2022. Training language models to follow instructions with human feedback. Advances in Neural Information Processing Systems 35 (2022), 27730-27744.
|
| 464 |
+
[25] David Lorge Parnas. 1972. On the criteria to be used in decomposing systems into modules. Commun. ACM 15, 12 (1972), 1053-1058.
|
| 465 |
+
[26] Julian Aron Prenner and Romain Robbes. 2021. Automatic Program Repair with OpenAI's Codex: Evaluating QuixBugs. arXiv preprint arXiv:2111.03922 (2021).
|
| 466 |
+
[27] Dominik Sobania, Martin Briesch, Carol Hanna, and Justyna Petke. 2023. An analysis of the automatic bug fixing performance of chatgpt. arXiv preprint arXiv:2301.08653 (2023).
|
| 467 |
+
[28] Kevin J Sullivan, William G Griswold, Yuanfang Cai, and Ben Hallen. 2001. The structure and value of modularity in software design. ACM SIGSOFT Software Engineering Notes 26, 5 (2001), 99-108.
|
| 468 |
+
[29] Yuchi Tian and Baishakhi Ray. 2017. Automatically diagnosing and repairing error handling bugs in c. In Proceedings of the 2017 11th joint meeting on foundations of software engineering. 752-762.
|
| 469 |
+
[30] Jason Wei, Xuezhi Wang, Dale Schuurmans, Maarten Bosma, Fei Xia, Ed Chi, Quoc V Le, Denny Zhou, et al. 2022. Chain-of-thought prompting elicits reasoning in large language models. Advances in Neural Information Processing Systems 35 (2022), 24824-24837.
|
| 470 |
+
|
| 471 |
+
[31] Westley Weiner, ThanhVu Nguyen, Claire Le Goues, and Stephanie Forrest. 2009. Automatically finding patches using genetic programming. In 2009 IEEE 31st International Conference on Software Engineering. https://doi.org/10.1109/icse.2009.5070536
|
| 472 |
+
[32] Jules White, Quchen Fu, Sam Hays, Michael Sandborn, Carlos Olea, Henry Gilbert, Ashraf Elnashar, Jesse Spencer-Smith, and Douglas C Schmidt. 2023. A prompt pattern catalog to enhance prompt engineering with chatgpt. arXiv preprint arXiv:2302.11382 (2023).
|
| 473 |
+
[33] Fengji Zhang, Bei Chen, Yue Zhang, Jin Liu, Daoguang Zan, Yi Mao, Jian-Guang Lou, and Weizhu Chen. 2023. RepEncoder: Repository-level code completion
|
| 474 |
+
|
| 475 |
+
through iterative retrieval and generation. arXiv preprint arXiv:2303.12570 (2023).
|
| 476 |
+
[34] Xin Zhang, Rongjie Yan, Jiwei Yan, Baoquan Cui, Jun Yan, and Jian Zhang. 2022. ExcePy: A Python Benchmark for Bugs with Python Built-in Types. In 2022 IEEE International Conference on Software Analysis, Evolution and Reengineering (SANER). IEEE, 856-866.
|
| 477 |
+
[35] Yangyang Zhao, Hareton Leung, Yibiao Yang, Yuming Zhou, and Baowen Xu. 2017. Towards an understanding of change types in bug fixing code. Information and software technology 86 (2017), 37-53.
|
2403.00xxx/2403.00448/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7a2d1f5985c4f66294e00ad948438b92b4a4e0f91719b6d4f6e3f1fd21857436
|
| 3 |
+
size 502348
|
2403.00xxx/2403.00448/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00454/32cf1812-4569-4224-b8b1-fbebe2604343_content_list.json
ADDED
|
@@ -0,0 +1,1902 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Shorts vs. Regular Videos on YouTube: A Comparative Analysis of User Engagement and Content Creation Trends",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
84,
|
| 8 |
+
99,
|
| 9 |
+
913,
|
| 10 |
+
151
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Caroline Violot caroline.violot@unil.ch University of Lausanne",
|
| 17 |
+
"bbox": [
|
| 18 |
+
212,
|
| 19 |
+
162,
|
| 20 |
+
377,
|
| 21 |
+
209
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Igor Bilogrevic \nibilogrevic@gmail.com \nGoogle",
|
| 28 |
+
"bbox": [
|
| 29 |
+
212,
|
| 30 |
+
220,
|
| 31 |
+
379,
|
| 32 |
+
267
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Tugrulcan Elmas telmas@iu.edu Indiana University Bloomington",
|
| 39 |
+
"bbox": [
|
| 40 |
+
589,
|
| 41 |
+
162,
|
| 42 |
+
810,
|
| 43 |
+
209
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "Mathias Humbert \nmathias.humbert@unil.ch \nUniversity of Lausanne",
|
| 50 |
+
"bbox": [
|
| 51 |
+
611,
|
| 52 |
+
220,
|
| 53 |
+
787,
|
| 54 |
+
267
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "ABSTRACT",
|
| 61 |
+
"text_level": 1,
|
| 62 |
+
"bbox": [
|
| 63 |
+
84,
|
| 64 |
+
277,
|
| 65 |
+
184,
|
| 66 |
+
291
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "YouTube introduced the Shorts video format in 2021, allowing users to upload short videos that are prominently displayed on its website and app. Despite having such a large visual footprint, there are no studies to date that have looked at the impact Shorts introduction had on the production and consumption of content on YouTube. This paper presents the first comparative analysis of YouTube Shorts versus regular videos with respect to user engagement (i.e., views, likes, and comments), content creation frequency and video categories. We collected a dataset containing information about 70k channels that posted at least one Short, and we analyzed the metadata of all the videos (9.9M Shorts and 6.9M regular videos) they uploaded between January 2021 and December 2022, spanning a two-year period including the introduction of Shorts. Our longitudinal analysis shows that content creators consistently increased the frequency of Shorts production over this period, especially for newly-created channels, which surpassed that of regular videos. We also observe that Shorts target mostly entertainment categories, while regular videos cover a wide variety of categories. In general, Shorts attract more views and likes per view than regular videos, but attract less comments per view. However, Shorts do not outperform regular videos in the education and political categories as much as they do in other categories. Our study contributes to understanding social media dynamics, to quantifying the spread of short-form content, and to motivating future research on its impact on society.",
|
| 73 |
+
"bbox": [
|
| 74 |
+
84,
|
| 75 |
+
296,
|
| 76 |
+
482,
|
| 77 |
+
641
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "text",
|
| 83 |
+
"text": "CCS CONCEPTS",
|
| 84 |
+
"text_level": 1,
|
| 85 |
+
"bbox": [
|
| 86 |
+
84,
|
| 87 |
+
654,
|
| 88 |
+
220,
|
| 89 |
+
667
|
| 90 |
+
],
|
| 91 |
+
"page_idx": 0
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"type": "text",
|
| 95 |
+
"text": "- Information systems $\\rightarrow$ Social networks; Web log analysis.",
|
| 96 |
+
"bbox": [
|
| 97 |
+
83,
|
| 98 |
+
672,
|
| 99 |
+
482,
|
| 100 |
+
686
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "text",
|
| 106 |
+
"text": "KEYWORDS",
|
| 107 |
+
"text_level": 1,
|
| 108 |
+
"bbox": [
|
| 109 |
+
84,
|
| 110 |
+
699,
|
| 111 |
+
189,
|
| 112 |
+
713
|
| 113 |
+
],
|
| 114 |
+
"page_idx": 0
|
| 115 |
+
},
|
| 116 |
+
{
|
| 117 |
+
"type": "text",
|
| 118 |
+
"text": "YouTube, Short-Form Video Content, Engagement, Popularity, Upload Behavior, Social Media Dynamics, Content Production Patterns",
|
| 119 |
+
"bbox": [
|
| 120 |
+
83,
|
| 121 |
+
717,
|
| 122 |
+
482,
|
| 123 |
+
744
|
| 124 |
+
],
|
| 125 |
+
"page_idx": 0
|
| 126 |
+
},
|
| 127 |
+
{
|
| 128 |
+
"type": "text",
|
| 129 |
+
"text": "ACM Reference Format:",
|
| 130 |
+
"text_level": 1,
|
| 131 |
+
"bbox": [
|
| 132 |
+
84,
|
| 133 |
+
751,
|
| 134 |
+
228,
|
| 135 |
+
762
|
| 136 |
+
],
|
| 137 |
+
"page_idx": 0
|
| 138 |
+
},
|
| 139 |
+
{
|
| 140 |
+
"type": "text",
|
| 141 |
+
"text": "Caroline Violot, Tugrulcan Elmas, Igor Bilogrevic, and Mathias Humbert. 2024. Shorts vs. Regular Videos on YouTube: A Comparative Analysis of User",
|
| 142 |
+
"bbox": [
|
| 143 |
+
83,
|
| 144 |
+
763,
|
| 145 |
+
482,
|
| 146 |
+
789
|
| 147 |
+
],
|
| 148 |
+
"page_idx": 0
|
| 149 |
+
},
|
| 150 |
+
{
|
| 151 |
+
"type": "text",
|
| 152 |
+
"text": "Permission to make digital or hard copies of part or all of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for third-party components of this work must be honored. For all other uses, contact the owner/author(s).",
|
| 153 |
+
"bbox": [
|
| 154 |
+
81,
|
| 155 |
+
801,
|
| 156 |
+
482,
|
| 157 |
+
852
|
| 158 |
+
],
|
| 159 |
+
"page_idx": 0
|
| 160 |
+
},
|
| 161 |
+
{
|
| 162 |
+
"type": "text",
|
| 163 |
+
"text": "Websci '24, May 21-24, 2024, Stuttgart, Germany",
|
| 164 |
+
"bbox": [
|
| 165 |
+
84,
|
| 166 |
+
853,
|
| 167 |
+
308,
|
| 168 |
+
863
|
| 169 |
+
],
|
| 170 |
+
"page_idx": 0
|
| 171 |
+
},
|
| 172 |
+
{
|
| 173 |
+
"type": "text",
|
| 174 |
+
"text": "© 2024 Copyright held by the owner/author(s).",
|
| 175 |
+
"bbox": [
|
| 176 |
+
84,
|
| 177 |
+
864,
|
| 178 |
+
303,
|
| 179 |
+
875
|
| 180 |
+
],
|
| 181 |
+
"page_idx": 0
|
| 182 |
+
},
|
| 183 |
+
{
|
| 184 |
+
"type": "text",
|
| 185 |
+
"text": "ACM ISBN 979-8-4007-0334-8/24/05",
|
| 186 |
+
"bbox": [
|
| 187 |
+
84,
|
| 188 |
+
875,
|
| 189 |
+
253,
|
| 190 |
+
883
|
| 191 |
+
],
|
| 192 |
+
"page_idx": 0
|
| 193 |
+
},
|
| 194 |
+
{
|
| 195 |
+
"type": "text",
|
| 196 |
+
"text": "https://doi.org/10.1145/3614419.3644023",
|
| 197 |
+
"bbox": [
|
| 198 |
+
84,
|
| 199 |
+
883,
|
| 200 |
+
272,
|
| 201 |
+
893
|
| 202 |
+
],
|
| 203 |
+
"page_idx": 0
|
| 204 |
+
},
|
| 205 |
+
{
|
| 206 |
+
"type": "text",
|
| 207 |
+
"text": "Engagement and Content Creation Trends. In ACM Web Science Conference (Websci '24), May 21-24, 2024, Stuttgart, Germany. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/3614419.3644023",
|
| 208 |
+
"bbox": [
|
| 209 |
+
513,
|
| 210 |
+
279,
|
| 211 |
+
913,
|
| 212 |
+
316
|
| 213 |
+
],
|
| 214 |
+
"page_idx": 0
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
"type": "text",
|
| 218 |
+
"text": "1 INTRODUCTION",
|
| 219 |
+
"text_level": 1,
|
| 220 |
+
"bbox": [
|
| 221 |
+
514,
|
| 222 |
+
364,
|
| 223 |
+
687,
|
| 224 |
+
378
|
| 225 |
+
],
|
| 226 |
+
"page_idx": 0
|
| 227 |
+
},
|
| 228 |
+
{
|
| 229 |
+
"type": "text",
|
| 230 |
+
"text": "During the last few years, short-form video content has gained widespread popularity [29, 32]. TikTok, a social media platform launched in 2016 that focuses on short videos, quickly became a commercial success, with 3 billion downloads and 1 billion active monthly users in 2023 [8]. Shortly after that, platforms such as YouTube, Instagram, and Facebook introduced their own short-form video content features, with a similar format across all platforms. YouTube, in particular, introduced its so-called Shorts format as a beta version in the US on March 18, 2021 [31], and worldwide a few months later. Since its introduction on YouTube, more and more content creators have started to produce content in this format [29].",
|
| 231 |
+
"bbox": [
|
| 232 |
+
511,
|
| 233 |
+
383,
|
| 234 |
+
913,
|
| 235 |
+
535
|
| 236 |
+
],
|
| 237 |
+
"page_idx": 0
|
| 238 |
+
},
|
| 239 |
+
{
|
| 240 |
+
"type": "text",
|
| 241 |
+
"text": "The world of short-form video content is currently a topic of lively discussion, with a spectrum of perspectives. On the one hand, it provides an opportunity for creators to engage and entertain their audience with concise and attractive content. Moreover, short-form videos can be effective at disseminating information about social issues [42] and create new professional perspectives for content creators [2]. On the other hand, recent studies exploring TikTok highlighted some potentially concerning aspects for its users, such as a form of dependency [30], increase of daytime fatigue [33] and decrease of prospective memory [7]. Moreover, its impact on informative content, for example educational videos that thrive on depth and detail, should be explored. To investigate this, we focus on a fundamental research question: \"Are short videos replacing longer videos on YouTube, the most popular online video-sharing platform?\"",
|
| 242 |
+
"bbox": [
|
| 243 |
+
511,
|
| 244 |
+
536,
|
| 245 |
+
913,
|
| 246 |
+
743
|
| 247 |
+
],
|
| 248 |
+
"page_idx": 0
|
| 249 |
+
},
|
| 250 |
+
{
|
| 251 |
+
"type": "text",
|
| 252 |
+
"text": "To address this question, we undertake the first comprehensive study of YouTube Shorts, comparing them with regular videos (RVs), in terms of their effect on overall channel behavior and user engagement. Using data from the public YouTube Data API [36], we are able to control for platform, creator, and video features, offering insights into the evolving YouTube ecosystem. Our longitudinal comparative analysis quantifies changes in video content creation and user engagement across Shorts and RVs over the two years following the introduction of YouTube Shorts in March 2021. This study aims to provide valuable insights to content creators, advertisers, and researchers, allowing them to better understand the",
|
| 253 |
+
"bbox": [
|
| 254 |
+
511,
|
| 255 |
+
743,
|
| 256 |
+
913,
|
| 257 |
+
895
|
| 258 |
+
],
|
| 259 |
+
"page_idx": 0
|
| 260 |
+
},
|
| 261 |
+
{
|
| 262 |
+
"type": "aside_text",
|
| 263 |
+
"text": "arXiv:2403.00454v1 [cs.SI] 1 Mar 2024",
|
| 264 |
+
"bbox": [
|
| 265 |
+
22,
|
| 266 |
+
277,
|
| 267 |
+
57,
|
| 268 |
+
699
|
| 269 |
+
],
|
| 270 |
+
"page_idx": 0
|
| 271 |
+
},
|
| 272 |
+
{
|
| 273 |
+
"type": "text",
|
| 274 |
+
"text": "perspectives of an era marked by the rise of short-form video content. To delve deeper into the impact of Shorts on platform content and user engagement, we address the following research questions:",
|
| 275 |
+
"bbox": [
|
| 276 |
+
86,
|
| 277 |
+
107,
|
| 278 |
+
480,
|
| 279 |
+
148
|
| 280 |
+
],
|
| 281 |
+
"page_idx": 1
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"type": "text",
|
| 285 |
+
"text": "RQ1 How did the introduction of Shorts affect preexisting channels in terms of content creation behavior, and how do channels created after Shorts introduction differ from older channels?",
|
| 286 |
+
"bbox": [
|
| 287 |
+
84,
|
| 288 |
+
152,
|
| 289 |
+
480,
|
| 290 |
+
193
|
| 291 |
+
],
|
| 292 |
+
"page_idx": 1
|
| 293 |
+
},
|
| 294 |
+
{
|
| 295 |
+
"type": "text",
|
| 296 |
+
"text": "RQ2 How do Shorts compare to RVs in terms of views and content creation frequency across video categories?",
|
| 297 |
+
"bbox": [
|
| 298 |
+
84,
|
| 299 |
+
194,
|
| 300 |
+
480,
|
| 301 |
+
222
|
| 302 |
+
],
|
| 303 |
+
"page_idx": 1
|
| 304 |
+
},
|
| 305 |
+
{
|
| 306 |
+
"type": "text",
|
| 307 |
+
"text": "RQ3 What differences between Shorts and RVs can be observed in terms of user engagement (views, likes, and comments), and does the duration of RVs have an influence?",
|
| 308 |
+
"bbox": [
|
| 309 |
+
84,
|
| 310 |
+
222,
|
| 311 |
+
480,
|
| 312 |
+
263
|
| 313 |
+
],
|
| 314 |
+
"page_idx": 1
|
| 315 |
+
},
|
| 316 |
+
{
|
| 317 |
+
"type": "text",
|
| 318 |
+
"text": "Our dataset contains data about 70k channels that have created at least one video in the Shorts format since March 2021. We collected metadata of 9.9M Shorts and 6.9M RVs posted by those channels between January 2021 and December 2022, which allowed us to analyze both the time window around Shorts introduction and the long-lasting impact it had on those channels and their videos. For each video we also retrieved its category and its number of views, likes, and comments.",
|
| 319 |
+
"bbox": [
|
| 320 |
+
86,
|
| 321 |
+
268,
|
| 322 |
+
480,
|
| 323 |
+
377
|
| 324 |
+
],
|
| 325 |
+
"page_idx": 1
|
| 326 |
+
},
|
| 327 |
+
{
|
| 328 |
+
"type": "text",
|
| 329 |
+
"text": "Our results highlight the emergence of three main trends. First, we observed that channels that posted at least one Shorts tended to adopt the format, eventually uploading more Shorts than RVs. Second, we found that categories are not distributed evenly between Shorts and RVs: Shorts were mainly uploaded in entertainment-related categories while RVs encompassed a wide variety of content, including political or educational. This indicates that the two types of videos are not created to cover the same themes, but rather coexist on the platform for different purposes. Additionally, political, educational and artistic Shorts videos generate fewer views, suggesting that for some categories, viewers prefer RVs. Overall, we found that Shorts outperformed RVs in terms of views and likes per view, but generated less comments per view, although this gap is narrowing. This trend is even more pronounced when comparing the engagement metrics of Shorts and RVs uploaded by the same channel, with Shorts getting 110 times more views (on average) than their RVs counterparts. However, when differentiating RVs between different duration groups we found that the median number of views of videos from 10 to 30 minutes long was higher than the median number of Shorts views.",
|
| 330 |
+
"bbox": [
|
| 331 |
+
86,
|
| 332 |
+
378,
|
| 333 |
+
480,
|
| 334 |
+
654
|
| 335 |
+
],
|
| 336 |
+
"page_idx": 1
|
| 337 |
+
},
|
| 338 |
+
{
|
| 339 |
+
"type": "text",
|
| 340 |
+
"text": "The rest of the paper is organized as follows. Section 2 describes how we collected and processed the data used in this article. Section 3 shows how the video publishing behavior evolved over the period of observation. Section 4 reports the differences in content between Shorts and RVs. Section 5 describes how users engaged with both types of videos in terms of views, likes, and comments. Section 6 discusses the impact of Shorts' introduction on overall content publishing behavior and user reaction. Section 7 provides the prior literature on the topic. Finally, Section 8 concludes the paper with its main findings, discusses its limitations, and provides future directions of research.",
|
| 341 |
+
"bbox": [
|
| 342 |
+
86,
|
| 343 |
+
656,
|
| 344 |
+
480,
|
| 345 |
+
806
|
| 346 |
+
],
|
| 347 |
+
"page_idx": 1
|
| 348 |
+
},
|
| 349 |
+
{
|
| 350 |
+
"type": "text",
|
| 351 |
+
"text": "2 DATA COLLECTION",
|
| 352 |
+
"text_level": 1,
|
| 353 |
+
"bbox": [
|
| 354 |
+
86,
|
| 355 |
+
821,
|
| 356 |
+
279,
|
| 357 |
+
835
|
| 358 |
+
],
|
| 359 |
+
"page_idx": 1
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"type": "text",
|
| 363 |
+
"text": "Before diving into the details of our data collection, hereafter we briefly describe YouTube's RVs and Shorts. RVs can last from a few seconds to several hours. They are recorded and usually edited outside of YouTube, and they can be in a horizontal, vertical, or",
|
| 364 |
+
"bbox": [
|
| 365 |
+
86,
|
| 366 |
+
840,
|
| 367 |
+
480,
|
| 368 |
+
895
|
| 369 |
+
],
|
| 370 |
+
"page_idx": 1
|
| 371 |
+
},
|
| 372 |
+
{
|
| 373 |
+
"type": "text",
|
| 374 |
+
"text": "square format [38]. Shorts is a newer format that can last up to 60 seconds and must be in a vertical or square shape, optimized for viewing on mobile devices. They can be created outside of YouTube and then posted, but they can also be created directly from the app, by filming one or several clips that are combined on the spot, adding music, adjusting recording speed, adding filters, etc. [37], making Shorts particularly easy and quick to shoot, edit, and upload. Shorts have their own dedicated tab on the platform website or app, and users can move from one video to another by swiping on an endless scroll, without actively clicking on or searching for the videos.",
|
| 375 |
+
"bbox": [
|
| 376 |
+
517,
|
| 377 |
+
107,
|
| 378 |
+
911,
|
| 379 |
+
244
|
| 380 |
+
],
|
| 381 |
+
"page_idx": 1
|
| 382 |
+
},
|
| 383 |
+
{
|
| 384 |
+
"type": "text",
|
| 385 |
+
"text": "In order to efficiently collect relevant data, we leverage the YouTube Data API [36]. It provides methods to search video metadata by keywords or by channel identifiers, and to collect channel metadata, among other functionalities. Unfortunately, the YouTube Data API does not currently support random sampling of videos, hence we had to define a methodology to collect videos while acknowledging that the resulting dataset could contain some biases. We discuss them at the end of this section.",
|
| 386 |
+
"bbox": [
|
| 387 |
+
517,
|
| 388 |
+
246,
|
| 389 |
+
911,
|
| 390 |
+
354
|
| 391 |
+
],
|
| 392 |
+
"page_idx": 1
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"type": "text",
|
| 396 |
+
"text": "2.1 Collection Process",
|
| 397 |
+
"text_level": 1,
|
| 398 |
+
"bbox": [
|
| 399 |
+
517,
|
| 400 |
+
369,
|
| 401 |
+
707,
|
| 402 |
+
383
|
| 403 |
+
],
|
| 404 |
+
"page_idx": 1
|
| 405 |
+
},
|
| 406 |
+
{
|
| 407 |
+
"type": "text",
|
| 408 |
+
"text": "The data collection consisted of three steps: (i) collecting an initial set of short videos (i.e., seeds), (ii) identifying which of them are Shorts, and (iii) growing the dataset.",
|
| 409 |
+
"bbox": [
|
| 410 |
+
517,
|
| 411 |
+
388,
|
| 412 |
+
911,
|
| 413 |
+
429
|
| 414 |
+
],
|
| 415 |
+
"page_idx": 1
|
| 416 |
+
},
|
| 417 |
+
{
|
| 418 |
+
"type": "text",
|
| 419 |
+
"text": "2.1.1 Collecting seed Shorts. Our primary objective is to collect seed Shorts so that we can identify channels that include both Shorts and RVs for our comparative analysis. As the YouTube API requires keywords to provide videos, we first come up with a comprehensive set of keywords that represent video categories for which the YouTubers create Shorts. To this end, we used common video categories of TikTok as search queries, assuming they would also be common in YouTube Shorts. We collected the categories from a digital marketing website.<sup>1</sup> We separated the terms that contained an \"&\" (for example \"food & cooking\" was split into a \"food\" keyword and a \"cooking\" keyword), for a total of 50 keywords.",
|
| 420 |
+
"bbox": [
|
| 421 |
+
517,
|
| 422 |
+
454,
|
| 423 |
+
911,
|
| 424 |
+
604
|
| 425 |
+
],
|
| 426 |
+
"page_idx": 1
|
| 427 |
+
},
|
| 428 |
+
{
|
| 429 |
+
"type": "text",
|
| 430 |
+
"text": "The queries returned both Shorts and RVs as the YouTube API did not provide an option to collect only Shorts. However, it was possible to restrain the \"Search\" results to \"short\" (less than 4 minutes), \"medium\" (between 4 and 20 minutes), and \"long\" videos (more than 20 minutes). Thus, to maximize the amount of Shorts we collected in this seed phase, we collected only videos from the \"short\" category.",
|
| 431 |
+
"bbox": [
|
| 432 |
+
517,
|
| 433 |
+
606,
|
| 434 |
+
911,
|
| 435 |
+
702
|
| 436 |
+
],
|
| 437 |
+
"page_idx": 1
|
| 438 |
+
},
|
| 439 |
+
{
|
| 440 |
+
"type": "text",
|
| 441 |
+
"text": "We first collected videos posted between March 18, 2021, (the date of the US beta launch of YouTube's Shorts [31]) and July 26, 2022. The API returns around 500 videos per query, so to further increase the number of results and ensure that the search results were not biased towards a specific period, we divided the time period into weeks, e.g., we first collected videos that were published between March 18, 2021, and March 21, 2021, then collected videos between March 22, 2021, and March 28, 2021, and so on, each time using all the aforementioned keywords. In total, we collected around 300k videos from 150k channels.",
|
| 442 |
+
"bbox": [
|
| 443 |
+
517,
|
| 444 |
+
703,
|
| 445 |
+
911,
|
| 446 |
+
839
|
| 447 |
+
],
|
| 448 |
+
"page_idx": 1
|
| 449 |
+
},
|
| 450 |
+
{
|
| 451 |
+
"type": "header",
|
| 452 |
+
"text": "Websci '24, May 21-24, 2024, Stuttgart, Germany",
|
| 453 |
+
"bbox": [
|
| 454 |
+
84,
|
| 455 |
+
75,
|
| 456 |
+
316,
|
| 457 |
+
87
|
| 458 |
+
],
|
| 459 |
+
"page_idx": 1
|
| 460 |
+
},
|
| 461 |
+
{
|
| 462 |
+
"type": "header",
|
| 463 |
+
"text": "Caroline Violot, Tugrulcan Elmas, Igor Bilogrevic, and Mathias Humbert",
|
| 464 |
+
"bbox": [
|
| 465 |
+
571,
|
| 466 |
+
75,
|
| 467 |
+
911,
|
| 468 |
+
87
|
| 469 |
+
],
|
| 470 |
+
"page_idx": 1
|
| 471 |
+
},
|
| 472 |
+
{
|
| 473 |
+
"type": "page_footnote",
|
| 474 |
+
"text": "<sup>1</sup>https://marketsplash.com/tiktok-hashtags/#link3",
|
| 475 |
+
"bbox": [
|
| 476 |
+
517,
|
| 477 |
+
872,
|
| 478 |
+
751,
|
| 479 |
+
883
|
| 480 |
+
],
|
| 481 |
+
"page_idx": 1
|
| 482 |
+
},
|
| 483 |
+
{
|
| 484 |
+
"type": "page_footnote",
|
| 485 |
+
"text": "2First week is incomplete as March 18, 2021 was a Thursday.",
|
| 486 |
+
"bbox": [
|
| 487 |
+
517,
|
| 488 |
+
883,
|
| 489 |
+
799,
|
| 490 |
+
895
|
| 491 |
+
],
|
| 492 |
+
"page_idx": 1
|
| 493 |
+
},
|
| 494 |
+
{
|
| 495 |
+
"type": "image",
|
| 496 |
+
"img_path": "images/e83ccc64008b9064504930a1387ba972cbcb5aec74ee63133bafc989d76879f7.jpg",
|
| 497 |
+
"image_caption": [
|
| 498 |
+
"Figure 1: Data collection process summary."
|
| 499 |
+
],
|
| 500 |
+
"image_footnote": [],
|
| 501 |
+
"bbox": [
|
| 502 |
+
86,
|
| 503 |
+
103,
|
| 504 |
+
911,
|
| 505 |
+
247
|
| 506 |
+
],
|
| 507 |
+
"page_idx": 2
|
| 508 |
+
},
|
| 509 |
+
{
|
| 510 |
+
"type": "text",
|
| 511 |
+
"text": "2.1.2 Labelling Shorts. The YouTube Data API does not currently return information on whether a video is a Short or a RV. Hence, we use the following methodology to identify which videos are Shorts: we send a GET request to www.youtube.com/shorts/<videoId> for each videoId and check in the redirection link if the URL stayed the same or if it was modified to the regular www.youtube.com/watch?v=<videoId>. YouTube allows older videos to be seen in the Shorts tab, as long as they are up to 60 seconds and have a square or vertical aspect ratio, so this method can classify videos uploaded before Shorts introduction as Shorts. Nevertheless, this method allows us to determine whether creators turned to short-form, square/vertical video content. We classified 144k videos as Shorts, and 159k as RVs using this method.",
|
| 512 |
+
"bbox": [
|
| 513 |
+
81,
|
| 514 |
+
296,
|
| 515 |
+
482,
|
| 516 |
+
477
|
| 517 |
+
],
|
| 518 |
+
"page_idx": 2
|
| 519 |
+
},
|
| 520 |
+
{
|
| 521 |
+
"type": "text",
|
| 522 |
+
"text": "2.1.3 Growing the dataset and collecting additional metadata. Using the aforementioned method to identify Shorts, we identified all the channels that contained at least one Short during our period of interest. Among the 150k channels collected in the first step, there were 70,712 channels with at least one Short video. We collected all the videos posted by these channels between January 1, 2021, and December 31, 2022, totalling 16,746,091 videos, among which 6,862,321 RVs and 9,883,770 Shorts. Using the YouTube Data API, we collected the videos' metadata (title, description, posting date and time, duration, channel) and engagement statistics (number of views, likes, and comments). We also collected the YouTube categories of each video. YouTube categories (listed in Table 1 in Section 4) consists of 15 categories that creators or YouTube assign such as Music or Gaming. Finally, we collected the channels' metadata, mainly their title, description, creation date, and origin country. We further collected the channels' engagement metrics, i.e., the total view count, subscriber count, and number of uploaded videos.",
|
| 523 |
+
"bbox": [
|
| 524 |
+
81,
|
| 525 |
+
501,
|
| 526 |
+
482,
|
| 527 |
+
737
|
| 528 |
+
],
|
| 529 |
+
"page_idx": 2
|
| 530 |
+
},
|
| 531 |
+
{
|
| 532 |
+
"type": "text",
|
| 533 |
+
"text": "2.2 YouTube Terms & Conditions Compliance",
|
| 534 |
+
"text_level": 1,
|
| 535 |
+
"bbox": [
|
| 536 |
+
81,
|
| 537 |
+
752,
|
| 538 |
+
468,
|
| 539 |
+
768
|
| 540 |
+
],
|
| 541 |
+
"page_idx": 2
|
| 542 |
+
},
|
| 543 |
+
{
|
| 544 |
+
"type": "text",
|
| 545 |
+
"text": "The YouTube Data API limits the daily number of queries with a quotas system, where costs vary between different methods, e.g., a video search query costs 100 quotas and a channel metadata query costs 1 quota. The default number of quotas per day is $10\\mathrm{k}$ . As this limit is too restrictive to collect a large-scale dataset, we applied to and joined the YouTube Research Program [40] and obtained a research quota extension of 1M queries per day. We made sure to comply with the specific data policies and terms of use that come with being part of the YouTube Research Program [39]. In",
|
| 546 |
+
"bbox": [
|
| 547 |
+
81,
|
| 548 |
+
770,
|
| 549 |
+
482,
|
| 550 |
+
896
|
| 551 |
+
],
|
| 552 |
+
"page_idx": 2
|
| 553 |
+
},
|
| 554 |
+
{
|
| 555 |
+
"type": "text",
|
| 556 |
+
"text": "particular, we are not able to share our data due to the no-data disclosure, which forbids us to \"disclose, reproduce, sell, license or otherwise transfer to any third party, in part or in whole, any Program Data\".",
|
| 557 |
+
"bbox": [
|
| 558 |
+
511,
|
| 559 |
+
296,
|
| 560 |
+
913,
|
| 561 |
+
351
|
| 562 |
+
],
|
| 563 |
+
"page_idx": 2
|
| 564 |
+
},
|
| 565 |
+
{
|
| 566 |
+
"type": "text",
|
| 567 |
+
"text": "2.3 Bias Discussion",
|
| 568 |
+
"text_level": 1,
|
| 569 |
+
"bbox": [
|
| 570 |
+
513,
|
| 571 |
+
376,
|
| 572 |
+
686,
|
| 573 |
+
390
|
| 574 |
+
],
|
| 575 |
+
"page_idx": 2
|
| 576 |
+
},
|
| 577 |
+
{
|
| 578 |
+
"type": "text",
|
| 579 |
+
"text": "Ideally, we would prefer a random sample of videos for our collection of seed Shorts, for an unbiased analysis. However, as previously mentioned, YouTube does not currently support such random sampling. Furthermore, we are not able to use an empty query or queries with very general keywords to approximate random sampling. This is because we observe that regardless of the keyword, YouTube limits the search results amount, e.g., we could only collect 597 videos with the query \"cats\" and 251 videos with an empty query. Past research instead focused on videos that were the most influential using popularity as a proxy. For instance, Riberio et al. crawled channels with at least 10k subscribers and then collected their videos to provide a comprehensive dataset of YouTube [27]. Although this approach may facilitate studying popular channels, it may prevent us from analyzing YouTube Shorts that went viral and were viewed many times despite the low popularity of the channel. Our approach mitigates such a bias, but we acknowledge that it creates a bias towards the videos related to the keywords we used.",
|
| 580 |
+
"bbox": [
|
| 581 |
+
511,
|
| 582 |
+
393,
|
| 583 |
+
913,
|
| 584 |
+
630
|
| 585 |
+
],
|
| 586 |
+
"page_idx": 2
|
| 587 |
+
},
|
| 588 |
+
{
|
| 589 |
+
"type": "text",
|
| 590 |
+
"text": "3 POSTING BEHAVIOR EVOLUTION",
|
| 591 |
+
"text_level": 1,
|
| 592 |
+
"bbox": [
|
| 593 |
+
513,
|
| 594 |
+
654,
|
| 595 |
+
831,
|
| 596 |
+
667
|
| 597 |
+
],
|
| 598 |
+
"page_idx": 2
|
| 599 |
+
},
|
| 600 |
+
{
|
| 601 |
+
"type": "text",
|
| 602 |
+
"text": "We present here the evolution of the channels posting behavior, from January 2021 to December 2022.",
|
| 603 |
+
"bbox": [
|
| 604 |
+
511,
|
| 605 |
+
672,
|
| 606 |
+
913,
|
| 607 |
+
700
|
| 608 |
+
],
|
| 609 |
+
"page_idx": 2
|
| 610 |
+
},
|
| 611 |
+
{
|
| 612 |
+
"type": "text",
|
| 613 |
+
"text": "3.1 Evolution of Global Video Uploads",
|
| 614 |
+
"text_level": 1,
|
| 615 |
+
"bbox": [
|
| 616 |
+
513,
|
| 617 |
+
724,
|
| 618 |
+
841,
|
| 619 |
+
739
|
| 620 |
+
],
|
| 621 |
+
"page_idx": 2
|
| 622 |
+
},
|
| 623 |
+
{
|
| 624 |
+
"type": "text",
|
| 625 |
+
"text": "We first focus on the overall posting evolution, without considering individual channels behavior and analyze the total number of Shorts and RVs uploaded per week. YouTube allows any videos shorter than 60 seconds, with a square/vertical format, to be displayed in the Shorts tab (and therefore categorized as such), hence videos from before Shorts introduction can be labeled as such. As shown in Figure 2, we observe a constant rise in the number of new Shorts until mid-2022, followed by a slight decrease, and a constant number of created RVs. This shows that, while collectively continuing to produce RVs, video creators have also produced an increasing number of Shorts since March 2021.",
|
| 626 |
+
"bbox": [
|
| 627 |
+
511,
|
| 628 |
+
743,
|
| 629 |
+
913,
|
| 630 |
+
896
|
| 631 |
+
],
|
| 632 |
+
"page_idx": 2
|
| 633 |
+
},
|
| 634 |
+
{
|
| 635 |
+
"type": "header",
|
| 636 |
+
"text": "Shorts vs. Regular Videos on YouTube: A Comparative Analysis of User Engagement and Content Creation Trends",
|
| 637 |
+
"bbox": [
|
| 638 |
+
83,
|
| 639 |
+
75,
|
| 640 |
+
624,
|
| 641 |
+
87
|
| 642 |
+
],
|
| 643 |
+
"page_idx": 2
|
| 644 |
+
},
|
| 645 |
+
{
|
| 646 |
+
"type": "header",
|
| 647 |
+
"text": "Websci '24, May 21-24, 2024, Stuttgart, Germany",
|
| 648 |
+
"bbox": [
|
| 649 |
+
679,
|
| 650 |
+
75,
|
| 651 |
+
913,
|
| 652 |
+
87
|
| 653 |
+
],
|
| 654 |
+
"page_idx": 2
|
| 655 |
+
},
|
| 656 |
+
{
|
| 657 |
+
"type": "image",
|
| 658 |
+
"img_path": "images/8d13616c35af5968d669d85d362496e78b48e4e7169d208d89fa29c7e2484e89.jpg",
|
| 659 |
+
"image_caption": [
|
| 660 |
+
"Figure 2: Weekly video uploads, categorized into Shorts and RVs, with the number of channels older than the respective week shown in light grey. Some videos created before the introduction of Shorts were retrospectively classified as Shorts."
|
| 661 |
+
],
|
| 662 |
+
"image_footnote": [],
|
| 663 |
+
"bbox": [
|
| 664 |
+
94,
|
| 665 |
+
111,
|
| 666 |
+
472,
|
| 667 |
+
281
|
| 668 |
+
],
|
| 669 |
+
"page_idx": 3
|
| 670 |
+
},
|
| 671 |
+
{
|
| 672 |
+
"type": "text",
|
| 673 |
+
"text": "3.2 Evolution of Shorts Prevalence",
|
| 674 |
+
"text_level": 1,
|
| 675 |
+
"bbox": [
|
| 676 |
+
83,
|
| 677 |
+
386,
|
| 678 |
+
377,
|
| 679 |
+
400
|
| 680 |
+
],
|
| 681 |
+
"page_idx": 3
|
| 682 |
+
},
|
| 683 |
+
{
|
| 684 |
+
"type": "text",
|
| 685 |
+
"text": "Next, we focus on the individual posting behavior of channels, aiming to show results that equally reflect the behavior of all the channels in our dataset, without being biased towards the highly prolific channels. For most of the analysis, we first split the channels between newer channels and older channels, based on whether they were created after or before Shorts introduction.",
|
| 686 |
+
"bbox": [
|
| 687 |
+
81,
|
| 688 |
+
404,
|
| 689 |
+
482,
|
| 690 |
+
487
|
| 691 |
+
],
|
| 692 |
+
"page_idx": 3
|
| 693 |
+
},
|
| 694 |
+
{
|
| 695 |
+
"type": "text",
|
| 696 |
+
"text": "For each week between January 2021 and December 2022, we categorize active channels based on the percentage of Shorts they posted each week into the following categories: $[0\\%, 1 - 50\\%, 51 - 99\\%, 100\\%]$ . A given channel can change category from one week to another, if its posting behavior evolves. Then, we compute the fraction of channels belonging to each category and show the evolution of the posting behavior in Figure 3. Channels are separated between older channels and newer channels.",
|
| 697 |
+
"bbox": [
|
| 698 |
+
81,
|
| 699 |
+
487,
|
| 700 |
+
482,
|
| 701 |
+
598
|
| 702 |
+
],
|
| 703 |
+
"page_idx": 3
|
| 704 |
+
},
|
| 705 |
+
{
|
| 706 |
+
"type": "text",
|
| 707 |
+
"text": "In March 2021, the fraction of channels posting exclusively Shorts was 2.2 times higher among newly created channels than for older channels, with more than $60\\%$ of the latter opting to posting exclusively RVs. From there, we see a similar evolution for older and newer channels, with a marked increase in the fraction of channels posting only Shorts and a decrease in the fraction of the channels posting only RVs. However, while the intermediate categories exhibit a constant fraction for newer channels, we observe that, for older channels, the intermediate categories increased since January 2021. This indicates that a substantial number of the older channels which started posting Shorts continued to create RVs as well.",
|
| 708 |
+
"bbox": [
|
| 709 |
+
81,
|
| 710 |
+
598,
|
| 711 |
+
482,
|
| 712 |
+
752
|
| 713 |
+
],
|
| 714 |
+
"page_idx": 3
|
| 715 |
+
},
|
| 716 |
+
{
|
| 717 |
+
"type": "text",
|
| 718 |
+
"text": "3.3 Evolution of Posting Frequency",
|
| 719 |
+
"text_level": 1,
|
| 720 |
+
"bbox": [
|
| 721 |
+
83,
|
| 722 |
+
763,
|
| 723 |
+
383,
|
| 724 |
+
781
|
| 725 |
+
],
|
| 726 |
+
"page_idx": 3
|
| 727 |
+
},
|
| 728 |
+
{
|
| 729 |
+
"type": "text",
|
| 730 |
+
"text": "Having observed that channels created an increasing number of Shorts, collectively and individually, we analyze the impact it had on the production of RVs. Again, separating channels between older and newer channels, for each channel $c$ and each week $w$ , we compute $n_{cw}^{\\text{regular}}$ (resp. $n_{cw}^{\\text{Shorts}}$ ), the number of RVs (resp. Shorts) uploaded that week by that channel. We divide each $n_{cw}^{*}$ by $n_c$ , the total number of videos posted by channel $c$ , and obtain $f_{cw}^{*}$ , the normalized frequency of posting for each channel. We finally compute",
|
| 731 |
+
"bbox": [
|
| 732 |
+
81,
|
| 733 |
+
782,
|
| 734 |
+
482,
|
| 735 |
+
896
|
| 736 |
+
],
|
| 737 |
+
"page_idx": 3
|
| 738 |
+
},
|
| 739 |
+
{
|
| 740 |
+
"type": "image",
|
| 741 |
+
"img_path": "images/dffe21236e4c0812068759c6407db1ace2f22048f7a849ccc1dec48e012fb568.jpg",
|
| 742 |
+
"image_caption": [
|
| 743 |
+
"Figure 3: Analysis of channels' posting activity from January 2021 to December 2022. Channels were divided into groups based on the percentage of Shorts in the videos uploaded each week. The evolution of the fractions of channels in each group is shown."
|
| 744 |
+
],
|
| 745 |
+
"image_footnote": [],
|
| 746 |
+
"bbox": [
|
| 747 |
+
526,
|
| 748 |
+
111,
|
| 749 |
+
903,
|
| 750 |
+
367
|
| 751 |
+
],
|
| 752 |
+
"page_idx": 3
|
| 753 |
+
},
|
| 754 |
+
{
|
| 755 |
+
"type": "text",
|
| 756 |
+
"text": "$f_{w}^{*}$ , by averaging the normalized frequencies across all channels for each week, shown in Figure 4. This approach enables us to discern the weeks during which channels collectively uploaded more or fewer videos of each type, each channel contributing equally to the outcome, regardless of their total uploads count.",
|
| 757 |
+
"bbox": [
|
| 758 |
+
511,
|
| 759 |
+
497,
|
| 760 |
+
911,
|
| 761 |
+
566
|
| 762 |
+
],
|
| 763 |
+
"page_idx": 3
|
| 764 |
+
},
|
| 765 |
+
{
|
| 766 |
+
"type": "text",
|
| 767 |
+
"text": "We observe that both newer and older channels progressively reduced the frequency of RVs uploads over time. However, while the frequency of Shorts uploads increased for older channels, it decreased for newer channels. This is surprising at first, considering that some of the channels were created after and therefore their highest posting frequency should increase the value of the average frequency on later weeks. One possible explanation is that many channels which appeared around Shorts introduction were created in order to try the Shorts format, but lots of them stopped posting videos after a few weeks (out of the 445 channels created between 18 March 2021 and 25 March 2021, $4\\%$ had posted half of their videos a week after their creation and $9\\%$ had posted half of their videos a month after their creation) and therefore have a high normalized frequency of posting during their first few weeks of activity.",
|
| 768 |
+
"bbox": [
|
| 769 |
+
511,
|
| 770 |
+
566,
|
| 771 |
+
913,
|
| 772 |
+
762
|
| 773 |
+
],
|
| 774 |
+
"page_idx": 3
|
| 775 |
+
},
|
| 776 |
+
{
|
| 777 |
+
"type": "text",
|
| 778 |
+
"text": "3.4 Evolution of Weekly Content Volume",
|
| 779 |
+
"text_level": 1,
|
| 780 |
+
"bbox": [
|
| 781 |
+
513,
|
| 782 |
+
777,
|
| 783 |
+
864,
|
| 784 |
+
792
|
| 785 |
+
],
|
| 786 |
+
"page_idx": 3
|
| 787 |
+
},
|
| 788 |
+
{
|
| 789 |
+
"type": "text",
|
| 790 |
+
"text": "To complement the evolution of content uploading, we also analyzed the sum of the durations of the videos uploaded each week, called the weekly content volume. This allows us to get a sense of the amount of Shorts and RV content produced each week. As before, we separate our results between older and newer channels. Following the same logic as for the normalized frequency of posting, for each channel $c$ and each week $w$ , we compute $d_{cw}^{regular}$ and $d_{cw}^{Shorts}$ ,",
|
| 791 |
+
"bbox": [
|
| 792 |
+
511,
|
| 793 |
+
796,
|
| 794 |
+
913,
|
| 795 |
+
896
|
| 796 |
+
],
|
| 797 |
+
"page_idx": 3
|
| 798 |
+
},
|
| 799 |
+
{
|
| 800 |
+
"type": "header",
|
| 801 |
+
"text": "Websci '24, May 21-24, 2024, Stuttgart, Germany",
|
| 802 |
+
"bbox": [
|
| 803 |
+
83,
|
| 804 |
+
75,
|
| 805 |
+
318,
|
| 806 |
+
87
|
| 807 |
+
],
|
| 808 |
+
"page_idx": 3
|
| 809 |
+
},
|
| 810 |
+
{
|
| 811 |
+
"type": "header",
|
| 812 |
+
"text": "Caroline Violot, Tugrulcan Elmas, Igor Bilogrevic, and Mathias Humbert",
|
| 813 |
+
"bbox": [
|
| 814 |
+
570,
|
| 815 |
+
75,
|
| 816 |
+
913,
|
| 817 |
+
87
|
| 818 |
+
],
|
| 819 |
+
"page_idx": 3
|
| 820 |
+
},
|
| 821 |
+
{
|
| 822 |
+
"type": "image",
|
| 823 |
+
"img_path": "images/84f6afb1a92e7c8a48600954b040049d3f5cbfb2c55897fb950d45d32cb7d633.jpg",
|
| 824 |
+
"image_caption": [
|
| 825 |
+
"Figure 4: Evolution of normalized uploads frequency. Average weekly uploads of Shorts and RVs of each channel normalized by the total number videos (of both types) posted by that channel."
|
| 826 |
+
],
|
| 827 |
+
"image_footnote": [],
|
| 828 |
+
"bbox": [
|
| 829 |
+
96,
|
| 830 |
+
111,
|
| 831 |
+
472,
|
| 832 |
+
358
|
| 833 |
+
],
|
| 834 |
+
"page_idx": 4
|
| 835 |
+
},
|
| 836 |
+
{
|
| 837 |
+
"type": "text",
|
| 838 |
+
"text": "respectively the RVs' content volume and the Shorts' content volume uploaded that week by that channel. For $* \\in \\{ \\text{regular, Shorts} \\}$ , we divide each $d_{cw}^{*}$ by $d_{c}$ , the total content volume of channel $c$ , and obtain $v_{cw}^{*}$ , the normalized weekly content volume for each channel. The resulting quantities, shown in Figure 5, allow us to see on which week did channels invest the more time on average. Similarly to posting frequencies, newer channels had a peak of content creation around the introduction of Shorts and rapidly decreased from there, for both Shorts and RVs.",
|
| 839 |
+
"bbox": [
|
| 840 |
+
81,
|
| 841 |
+
462,
|
| 842 |
+
482,
|
| 843 |
+
585
|
| 844 |
+
],
|
| 845 |
+
"page_idx": 4
|
| 846 |
+
},
|
| 847 |
+
{
|
| 848 |
+
"type": "text",
|
| 849 |
+
"text": "As for older channels, since the introduction of Shorts, on average channels have increased their amount of Shorts content until reaching a plateau around June 2021, but the amount of RVs content is declining.",
|
| 850 |
+
"bbox": [
|
| 851 |
+
81,
|
| 852 |
+
585,
|
| 853 |
+
482,
|
| 854 |
+
642
|
| 855 |
+
],
|
| 856 |
+
"page_idx": 4
|
| 857 |
+
},
|
| 858 |
+
{
|
| 859 |
+
"type": "text",
|
| 860 |
+
"text": "4 CONTENT ANALYSIS",
|
| 861 |
+
"text_level": 1,
|
| 862 |
+
"bbox": [
|
| 863 |
+
83,
|
| 864 |
+
655,
|
| 865 |
+
292,
|
| 866 |
+
669
|
| 867 |
+
],
|
| 868 |
+
"page_idx": 4
|
| 869 |
+
},
|
| 870 |
+
{
|
| 871 |
+
"type": "text",
|
| 872 |
+
"text": "Our content analysis relies on video categories. The category is unique to a video and is either chosen by the creator or assigned by YouTube. Public videos can be assigned 15 categories, the others being movie genres for paid content.",
|
| 873 |
+
"bbox": [
|
| 874 |
+
81,
|
| 875 |
+
674,
|
| 876 |
+
482,
|
| 877 |
+
729
|
| 878 |
+
],
|
| 879 |
+
"page_idx": 4
|
| 880 |
+
},
|
| 881 |
+
{
|
| 882 |
+
"type": "text",
|
| 883 |
+
"text": "We first examine the distribution of categories for Shorts and RVs. Table 1 shows the categories, and the number of videos we collected for each category. People & Blogs being the default category it is overrepresented. In the second place, we find the Entertainment category. Nonprofits & Activism and Pets & Animals were the categories for which we collected the fewest videos.",
|
| 884 |
+
"bbox": [
|
| 885 |
+
81,
|
| 886 |
+
729,
|
| 887 |
+
482,
|
| 888 |
+
811
|
| 889 |
+
],
|
| 890 |
+
"page_idx": 4
|
| 891 |
+
},
|
| 892 |
+
{
|
| 893 |
+
"type": "text",
|
| 894 |
+
"text": "In general, the fraction of Shorts differs widely between categories. Categories with the highest fractions of Shorts are Comedy and People & Blogs whereas News & Politics and Nonprofits & Activism exhibit the lowest fractions. This observation may suggest that Shorts are predominantly used for generating lighthearted content, while RVs are the preferred format for delivering more serious",
|
| 895 |
+
"bbox": [
|
| 896 |
+
81,
|
| 897 |
+
811,
|
| 898 |
+
482,
|
| 899 |
+
896
|
| 900 |
+
],
|
| 901 |
+
"page_idx": 4
|
| 902 |
+
},
|
| 903 |
+
{
|
| 904 |
+
"type": "image",
|
| 905 |
+
"img_path": "images/50331a6c32f9734e8ef2cbdbae7ac4cd373532d60faa6c29dcc8c22d768ab9f0.jpg",
|
| 906 |
+
"image_caption": [
|
| 907 |
+
"Figure 5: Evolution of normalized weekly content volume. Average over all channels of the weekly content volume (sum of the durations of each video posted that week), separated between Shorts and RVs, normalized by the combined weekly content volume of both types."
|
| 908 |
+
],
|
| 909 |
+
"image_footnote": [],
|
| 910 |
+
"bbox": [
|
| 911 |
+
526,
|
| 912 |
+
111,
|
| 913 |
+
903,
|
| 914 |
+
357
|
| 915 |
+
],
|
| 916 |
+
"page_idx": 4
|
| 917 |
+
},
|
| 918 |
+
{
|
| 919 |
+
"type": "table",
|
| 920 |
+
"img_path": "images/1b008d09397db9dd077bb191833073985d5a8832cf4b51e973b3b8012dad3fe2.jpg",
|
| 921 |
+
"table_caption": [],
|
| 922 |
+
"table_footnote": [],
|
| 923 |
+
"table_body": "<table><tr><td>YouTube categories</td><td>Collected videos count</td><td>% of Shorts</td></tr><tr><td>People & Blogs</td><td>5.7M</td><td>74.3</td></tr><tr><td>Entertainment</td><td>3.2M</td><td>55.5</td></tr><tr><td>Howto & Style</td><td>1.9M</td><td>57.3</td></tr><tr><td>Education</td><td>1.8M</td><td>43.8</td></tr><tr><td>Gaming</td><td>1.0M</td><td>54.4</td></tr><tr><td>News & Politics</td><td>766.4k</td><td>14.4</td></tr><tr><td>Sports</td><td>544.8k</td><td>39.3</td></tr><tr><td>Comedy</td><td>385.3k</td><td>76.5</td></tr><tr><td>Science & Technology</td><td>340.3k</td><td>51.2</td></tr><tr><td>Music</td><td>333.0k</td><td>63.2</td></tr><tr><td>Film & Animation</td><td>288.9k</td><td>57.3</td></tr><tr><td>Travel & Events</td><td>208.0k</td><td>57.3</td></tr><tr><td>Autos & Vehicles</td><td>200.3k</td><td>57.8</td></tr><tr><td>Pets & Animals</td><td>118.4k</td><td>68.5</td></tr><tr><td>Nonprofits & Activism</td><td>42.5k</td><td>34.1</td></tr></table>",
|
| 924 |
+
"bbox": [
|
| 925 |
+
575,
|
| 926 |
+
465,
|
| 927 |
+
856,
|
| 928 |
+
685
|
| 929 |
+
],
|
| 930 |
+
"page_idx": 4
|
| 931 |
+
},
|
| 932 |
+
{
|
| 933 |
+
"type": "text",
|
| 934 |
+
"text": "Table 1: YouTube categories and the corresponding number of videos from our dataset. The percentage of Shorts out of all the videos collected in each category is also shown.",
|
| 935 |
+
"bbox": [
|
| 936 |
+
513,
|
| 937 |
+
686,
|
| 938 |
+
915,
|
| 939 |
+
727
|
| 940 |
+
],
|
| 941 |
+
"page_idx": 4
|
| 942 |
+
},
|
| 943 |
+
{
|
| 944 |
+
"type": "text",
|
| 945 |
+
"text": "information. People & Blogs having the largest number of Shorts could imply that creators do not specify the category when posting Shorts as often as they do when posting RVs, or that YouTube takes longer to classify them into the relevant categories.",
|
| 946 |
+
"bbox": [
|
| 947 |
+
511,
|
| 948 |
+
770,
|
| 949 |
+
913,
|
| 950 |
+
825
|
| 951 |
+
],
|
| 952 |
+
"page_idx": 4
|
| 953 |
+
},
|
| 954 |
+
{
|
| 955 |
+
"type": "text",
|
| 956 |
+
"text": "We then observe the evolution of the categories over our two years period. We selected the nine most common categories attributed to the videos in our dataset and labelled the rest of them under \"Other\". Next, for each category, we compute the percentage of videos to which the category was attributed out of all the videos",
|
| 957 |
+
"bbox": [
|
| 958 |
+
511,
|
| 959 |
+
827,
|
| 960 |
+
913,
|
| 961 |
+
896
|
| 962 |
+
],
|
| 963 |
+
"page_idx": 4
|
| 964 |
+
},
|
| 965 |
+
{
|
| 966 |
+
"type": "header",
|
| 967 |
+
"text": "Shorts vs. Regular Videos on YouTube: A Comparative Analysis of User Engagement and Content Creation Trends",
|
| 968 |
+
"bbox": [
|
| 969 |
+
84,
|
| 970 |
+
75,
|
| 971 |
+
624,
|
| 972 |
+
87
|
| 973 |
+
],
|
| 974 |
+
"page_idx": 4
|
| 975 |
+
},
|
| 976 |
+
{
|
| 977 |
+
"type": "header",
|
| 978 |
+
"text": "Websci '24, May 21-24, 2024, Stuttgart, Germany",
|
| 979 |
+
"bbox": [
|
| 980 |
+
679,
|
| 981 |
+
75,
|
| 982 |
+
911,
|
| 983 |
+
87
|
| 984 |
+
],
|
| 985 |
+
"page_idx": 4
|
| 986 |
+
},
|
| 987 |
+
{
|
| 988 |
+
"type": "image",
|
| 989 |
+
"img_path": "images/da66a7323317c0567c8350e44e305c9fe727a67ace7de37d1b9ad5e8ca9bfb59.jpg",
|
| 990 |
+
"image_caption": [
|
| 991 |
+
"Figure 6: Evolution of the percentage of categories attributed to Shorts and RVs, showing the changes of categories popularity among creators over our time period."
|
| 992 |
+
],
|
| 993 |
+
"image_footnote": [],
|
| 994 |
+
"bbox": [
|
| 995 |
+
109,
|
| 996 |
+
109,
|
| 997 |
+
408,
|
| 998 |
+
306
|
| 999 |
+
],
|
| 1000 |
+
"page_idx": 5
|
| 1001 |
+
},
|
| 1002 |
+
{
|
| 1003 |
+
"type": "image",
|
| 1004 |
+
"img_path": "images/01510d8a285eadd90ccf0163993448abaa563a7a0a4788bf64e993ff6ed1dc39.jpg",
|
| 1005 |
+
"image_caption": [],
|
| 1006 |
+
"image_footnote": [],
|
| 1007 |
+
"bbox": [
|
| 1008 |
+
413,
|
| 1009 |
+
109,
|
| 1010 |
+
691,
|
| 1011 |
+
306
|
| 1012 |
+
],
|
| 1013 |
+
"page_idx": 5
|
| 1014 |
+
},
|
| 1015 |
+
{
|
| 1016 |
+
"type": "image",
|
| 1017 |
+
"img_path": "images/3d8da28a4cee7733b452024857324b4bd0c86e84ee670f52e2265ad298a4528e.jpg",
|
| 1018 |
+
"image_caption": [],
|
| 1019 |
+
"image_footnote": [],
|
| 1020 |
+
"bbox": [
|
| 1021 |
+
707,
|
| 1022 |
+
130,
|
| 1023 |
+
885,
|
| 1024 |
+
258
|
| 1025 |
+
],
|
| 1026 |
+
"page_idx": 5
|
| 1027 |
+
},
|
| 1028 |
+
{
|
| 1029 |
+
"type": "text",
|
| 1030 |
+
"text": "posted in a given week, and repeat that for every week of our time period. Results are shown in Figure 6. We see that Shorts videos are consistently dominated by the People & Blogs category, although it is slowly declining in favor of the Entertainment category.",
|
| 1031 |
+
"bbox": [
|
| 1032 |
+
81,
|
| 1033 |
+
378,
|
| 1034 |
+
480,
|
| 1035 |
+
434
|
| 1036 |
+
],
|
| 1037 |
+
"page_idx": 5
|
| 1038 |
+
},
|
| 1039 |
+
{
|
| 1040 |
+
"type": "text",
|
| 1041 |
+
"text": "Categories of RVs are way more diverse and evenly distributed, and we can observe large trends of categories rising, declining or maintaining a constant percentage. The People & Blogs and Entertainment categories are also the most commonly attributed, but not as dramatically above as for Shorts. We notice that the Education category and the Science & Technology category maintained a constant percentage of uploads, and that the News & Politics category steadily increased after the beginning of 2022.",
|
| 1042 |
+
"bbox": [
|
| 1043 |
+
81,
|
| 1044 |
+
434,
|
| 1045 |
+
482,
|
| 1046 |
+
544
|
| 1047 |
+
],
|
| 1048 |
+
"page_idx": 5
|
| 1049 |
+
},
|
| 1050 |
+
{
|
| 1051 |
+
"type": "text",
|
| 1052 |
+
"text": "5 ENGAGEMENT ANALYSIS",
|
| 1053 |
+
"text_level": 1,
|
| 1054 |
+
"bbox": [
|
| 1055 |
+
83,
|
| 1056 |
+
556,
|
| 1057 |
+
331,
|
| 1058 |
+
569
|
| 1059 |
+
],
|
| 1060 |
+
"page_idx": 5
|
| 1061 |
+
},
|
| 1062 |
+
{
|
| 1063 |
+
"type": "text",
|
| 1064 |
+
"text": "In this section, we examine the evolution of the number of views, likes and comments, collectively referred to as the engagement metrics, received by Shorts and RVs. Our caveat is that we only have access to the engagement metrics as of the query time, which limits the analysis of the metrics evolution. However, most videos experience their peak of attention a few days after their release [6], nine months passed between the latest video's publication date (December 31, 2022) and the collection of engagement statistics (September 1, 2023). Therefore, besides a few exceptions where videos become viral a long time after their publication date, engagement metrics should not drastically fluctuate, and rather grow at a seemingly constant rate. This allows us to draw comparative results between the popularity of Shorts and RVs.",
|
| 1065 |
+
"bbox": [
|
| 1066 |
+
81,
|
| 1067 |
+
574,
|
| 1068 |
+
482,
|
| 1069 |
+
755
|
| 1070 |
+
],
|
| 1071 |
+
"page_idx": 5
|
| 1072 |
+
},
|
| 1073 |
+
{
|
| 1074 |
+
"type": "text",
|
| 1075 |
+
"text": "5.1 Engagement at the Video Level",
|
| 1076 |
+
"text_level": 1,
|
| 1077 |
+
"bbox": [
|
| 1078 |
+
83,
|
| 1079 |
+
766,
|
| 1080 |
+
377,
|
| 1081 |
+
781
|
| 1082 |
+
],
|
| 1083 |
+
"page_idx": 5
|
| 1084 |
+
},
|
| 1085 |
+
{
|
| 1086 |
+
"type": "text",
|
| 1087 |
+
"text": "We first analyzed the engagement at the video level, aggregating the results without considering channels or categories. Coherently with previous work [1], we found that $1\\%$ of the Shorts (resp. RVs) attracted $63\\%$ (resp. $61\\%$ ) of the Shorts (resp. RVs) views. We also computed that views and likes are highly correlated, with a Pearson correlation coefficient (PCC) of 0.848, but that the number of comments is not necessarily correlated to the others (with a PCC of 0.273 with views and a PCC of 0.360 with likes).",
|
| 1088 |
+
"bbox": [
|
| 1089 |
+
81,
|
| 1090 |
+
785,
|
| 1091 |
+
482,
|
| 1092 |
+
895
|
| 1093 |
+
],
|
| 1094 |
+
"page_idx": 5
|
| 1095 |
+
},
|
| 1096 |
+
{
|
| 1097 |
+
"type": "image",
|
| 1098 |
+
"img_path": "images/4fd91b0c7e6b12e1233dc42930044dc44f6f7ebf2899e9fa57da657eee945a07.jpg",
|
| 1099 |
+
"image_caption": [
|
| 1100 |
+
"Figure 7: Evolution of engagement metrics for Shorts and RVs, including the mean views, median views, mean of the likes count divided by views count and mean of the comments count divided by views count. The gray line indicates Shorts introduction."
|
| 1101 |
+
],
|
| 1102 |
+
"image_footnote": [],
|
| 1103 |
+
"bbox": [
|
| 1104 |
+
527,
|
| 1105 |
+
382,
|
| 1106 |
+
903,
|
| 1107 |
+
633
|
| 1108 |
+
],
|
| 1109 |
+
"page_idx": 5
|
| 1110 |
+
},
|
| 1111 |
+
{
|
| 1112 |
+
"type": "text",
|
| 1113 |
+
"text": "In Figure 7, we present an overview of the engagement metrics evolution. Specifically, we tracked the mean number of views, the median number of views, the mean likes per view and the mean comments per view.",
|
| 1114 |
+
"bbox": [
|
| 1115 |
+
511,
|
| 1116 |
+
757,
|
| 1117 |
+
913,
|
| 1118 |
+
811
|
| 1119 |
+
],
|
| 1120 |
+
"page_idx": 5
|
| 1121 |
+
},
|
| 1122 |
+
{
|
| 1123 |
+
"type": "text",
|
| 1124 |
+
"text": "We first observe that, over the two years, Shorts received four times as many views as RVs, and by the end of 2022, this difference had increased to six times. This is not surprising given that each user may watch far more Shorts than long videos in the same amount of time. We also note that mean views of RVs declined slowly and consistently with time, whereas mean views of Shorts",
|
| 1125 |
+
"bbox": [
|
| 1126 |
+
511,
|
| 1127 |
+
811,
|
| 1128 |
+
913,
|
| 1129 |
+
895
|
| 1130 |
+
],
|
| 1131 |
+
"page_idx": 5
|
| 1132 |
+
},
|
| 1133 |
+
{
|
| 1134 |
+
"type": "header",
|
| 1135 |
+
"text": "Websci '24, May 21-24, 2024, Stuttgart, Germany",
|
| 1136 |
+
"bbox": [
|
| 1137 |
+
83,
|
| 1138 |
+
75,
|
| 1139 |
+
318,
|
| 1140 |
+
87
|
| 1141 |
+
],
|
| 1142 |
+
"page_idx": 5
|
| 1143 |
+
},
|
| 1144 |
+
{
|
| 1145 |
+
"type": "header",
|
| 1146 |
+
"text": "Caroline Violot, Tugrulcan Elmas, Igor Bilogrevic, and Mathias Humbert",
|
| 1147 |
+
"bbox": [
|
| 1148 |
+
570,
|
| 1149 |
+
75,
|
| 1150 |
+
913,
|
| 1151 |
+
87
|
| 1152 |
+
],
|
| 1153 |
+
"page_idx": 5
|
| 1154 |
+
},
|
| 1155 |
+
{
|
| 1156 |
+
"type": "text",
|
| 1157 |
+
"text": "fluctuated around its introduction and then mostly increased from there, before slightly declining near the end of 2022.",
|
| 1158 |
+
"bbox": [
|
| 1159 |
+
81,
|
| 1160 |
+
107,
|
| 1161 |
+
480,
|
| 1162 |
+
133
|
| 1163 |
+
],
|
| 1164 |
+
"page_idx": 6
|
| 1165 |
+
},
|
| 1166 |
+
{
|
| 1167 |
+
"type": "text",
|
| 1168 |
+
"text": "As previously mentioned, the vast majority of views are harvested by a handful of videos. The extreme engagement values obtained by the top $1\\%$ of videos skew the means towards higher values and prevent from grasping the engagement evolution of more low-ranking videos which constitute the majority of videos. Looking at the median allows to better understand the dynamics of the $99\\%$ majority. Since the introduction of Shorts in March 2021 and until the end of the year, we observe a consistent and similar increase in the median number of views, with a slope of increase of $11.9 (r^2 = 0.74)$ for RVs and a slope of increase of $13.5 (r^2 = 0.81)$ for Shorts. But while RVs reached a plateau around March 2022, the median number of views that Shorts attracted increased drastically during 2022, with a slope of increase of $53.0 (r^2 = 0.78)$ . This shows that even less popular Shorts still obtain a substantial number of views which is not the case of RVs. The Shorts format would then allow not yet popular creators to reach a wider audience than RVs.",
|
| 1169 |
+
"bbox": [
|
| 1170 |
+
81,
|
| 1171 |
+
135,
|
| 1172 |
+
482,
|
| 1173 |
+
354
|
| 1174 |
+
],
|
| 1175 |
+
"page_idx": 6
|
| 1176 |
+
},
|
| 1177 |
+
{
|
| 1178 |
+
"type": "text",
|
| 1179 |
+
"text": "Regarding the other engagement metrics, we see that around Shorts introduction, Shorts and RVs have the same likes per view rate, but as from August 2021, Shorts started to convert views into likes more effectively than RVs and by the end of 2022, Shorts' likes per view rate was 1.4 times higher than RVs' likes per view rate. Comments per view, a more active form of engagement [6], have a higher rate for RVs than Shorts, but the gap seems to be narrowing over time.",
|
| 1180 |
+
"bbox": [
|
| 1181 |
+
81,
|
| 1182 |
+
356,
|
| 1183 |
+
482,
|
| 1184 |
+
465
|
| 1185 |
+
],
|
| 1186 |
+
"page_idx": 6
|
| 1187 |
+
},
|
| 1188 |
+
{
|
| 1189 |
+
"type": "text",
|
| 1190 |
+
"text": "5.2 Engagement at the Channel Level",
|
| 1191 |
+
"text_level": 1,
|
| 1192 |
+
"bbox": [
|
| 1193 |
+
83,
|
| 1194 |
+
517,
|
| 1195 |
+
401,
|
| 1196 |
+
532
|
| 1197 |
+
],
|
| 1198 |
+
"page_idx": 6
|
| 1199 |
+
},
|
| 1200 |
+
{
|
| 1201 |
+
"type": "text",
|
| 1202 |
+
"text": "We established that Shorts are generally more viewed and liked than RVs. We now explore if that is the case for videos originating from the same channel and if the trends that were observed globally also apply on a channel basis. We first split our channels between the $1\\%$ with the most subscribers (referred to as top 1 channels) and the rest (referred to as bottom 99 channels). The top $1\\%$ channels views accounted for $46\\%$ of the total views.",
|
| 1203 |
+
"bbox": [
|
| 1204 |
+
81,
|
| 1205 |
+
535,
|
| 1206 |
+
480,
|
| 1207 |
+
631
|
| 1208 |
+
],
|
| 1209 |
+
"page_idx": 6
|
| 1210 |
+
},
|
| 1211 |
+
{
|
| 1212 |
+
"type": "text",
|
| 1213 |
+
"text": "We divide our two-year period into four semesters referred to as \"2021-S1\" for the $1^{\\text{st}}$ semester of 2021, \"2021-S2\" for the $2^{\\text{nd}}$ semester of 2021, and so on. We then compute the mean views per channel and per semester, distinguishing between Shorts and RVs. This process is repeated for each semester, including only channels that posted both Shorts and RVs that semester. The ratio of mean views for Shorts to RVs is computed for each channel, and the average ratio is obtained by averaging across all eligible channels for each semester. This yields the evolution of the average ratio between Shorts' and RVs' views, on a channel basis, and a closer intuition to the difference between Shorts and RVs engagement that creators can expect for their channel. Results are shown in Figure 8.",
|
| 1214 |
+
"bbox": [
|
| 1215 |
+
81,
|
| 1216 |
+
632,
|
| 1217 |
+
480,
|
| 1218 |
+
797
|
| 1219 |
+
],
|
| 1220 |
+
"page_idx": 6
|
| 1221 |
+
},
|
| 1222 |
+
{
|
| 1223 |
+
"type": "text",
|
| 1224 |
+
"text": "For both popularity classes, Shorts consistently draw significantly more views than RVs from the same channel $-80$ times more for top 1 channels and 111 times more for bottom 99 channels, on average over the two-year period. However, while the ratio between Shorts' and RVs' views increases for top 1 channels, it declines for bottom 99 channels. Nonetheless, both groups can still reach a broader audience using Shorts than RVs.",
|
| 1225 |
+
"bbox": [
|
| 1226 |
+
81,
|
| 1227 |
+
799,
|
| 1228 |
+
480,
|
| 1229 |
+
896
|
| 1230 |
+
],
|
| 1231 |
+
"page_idx": 6
|
| 1232 |
+
},
|
| 1233 |
+
{
|
| 1234 |
+
"type": "image",
|
| 1235 |
+
"img_path": "images/dcdc6eda42bb89a720d6f90afc310cfa782a569a4de5cf4a0b529aa0fbc19fb1.jpg",
|
| 1236 |
+
"image_caption": [
|
| 1237 |
+
"Figure 8: Evolution of the average ratio between channels' Shorts' and RVs' views, divided between the top $1\\%$ of channels with regards to subscribers count and the rest."
|
| 1238 |
+
],
|
| 1239 |
+
"image_footnote": [],
|
| 1240 |
+
"bbox": [
|
| 1241 |
+
526,
|
| 1242 |
+
113,
|
| 1243 |
+
736,
|
| 1244 |
+
265
|
| 1245 |
+
],
|
| 1246 |
+
"page_idx": 6
|
| 1247 |
+
},
|
| 1248 |
+
{
|
| 1249 |
+
"type": "image",
|
| 1250 |
+
"img_path": "images/65f6c8e6576a2392dce97e45a78de270886f53e456fc8e8ed0db910f095db666.jpg",
|
| 1251 |
+
"image_caption": [],
|
| 1252 |
+
"image_footnote": [],
|
| 1253 |
+
"bbox": [
|
| 1254 |
+
743,
|
| 1255 |
+
113,
|
| 1256 |
+
903,
|
| 1257 |
+
265
|
| 1258 |
+
],
|
| 1259 |
+
"page_idx": 6
|
| 1260 |
+
},
|
| 1261 |
+
{
|
| 1262 |
+
"type": "image",
|
| 1263 |
+
"img_path": "images/a1b1db6ba894029cefd989e7c1d5fb8efe505e58bb3fb8aebb4bd7bbfe1cde36.jpg",
|
| 1264 |
+
"image_caption": [
|
| 1265 |
+
"Figure 9: Mean number of views and median number of views for Shorts and RVs of different durations."
|
| 1266 |
+
],
|
| 1267 |
+
"image_footnote": [],
|
| 1268 |
+
"bbox": [
|
| 1269 |
+
524,
|
| 1270 |
+
373,
|
| 1271 |
+
727,
|
| 1272 |
+
526
|
| 1273 |
+
],
|
| 1274 |
+
"page_idx": 6
|
| 1275 |
+
},
|
| 1276 |
+
{
|
| 1277 |
+
"type": "image",
|
| 1278 |
+
"img_path": "images/953bdb4a789266d3081d388160cafa4128911d3bf34cbc784b715ec0abd0d019.jpg",
|
| 1279 |
+
"image_caption": [],
|
| 1280 |
+
"image_footnote": [],
|
| 1281 |
+
"bbox": [
|
| 1282 |
+
733,
|
| 1283 |
+
373,
|
| 1284 |
+
903,
|
| 1285 |
+
525
|
| 1286 |
+
],
|
| 1287 |
+
"page_idx": 6
|
| 1288 |
+
},
|
| 1289 |
+
{
|
| 1290 |
+
"type": "text",
|
| 1291 |
+
"text": "5.3 Engagement Based on Duration",
|
| 1292 |
+
"text_level": 1,
|
| 1293 |
+
"bbox": [
|
| 1294 |
+
514,
|
| 1295 |
+
628,
|
| 1296 |
+
815,
|
| 1297 |
+
643
|
| 1298 |
+
],
|
| 1299 |
+
"page_idx": 6
|
| 1300 |
+
},
|
| 1301 |
+
{
|
| 1302 |
+
"type": "text",
|
| 1303 |
+
"text": "We previously compared the mean and median views of Shorts and RVs but, while Shorts are restricted to 60 seconds, RVs display a wide range of durations, each format requiring a different engagement from viewers and creators. We classified the RVs based on their duration into the following time intervals: less than 1 minute, 1 to 5 minutes, 5 to 10 minutes, 10 to 30 minutes, 30 minutes to 1 hour, and longer than 1 hour. For each group we computed the mean and the median number of views. Results are shown in Figure 9. The means of the views confirm previous results on the superiority of Shorts on RVs in terms of engagement, although at various degree, the least popular group being the 1-5 minutes group and the most popular being the under 1 minute group, followed by the 10-30 minutes group. The popularity of the later is confirmed when looking at the medians, where it appears that half of the videos between 10-30 minutes obtained around 5'800 views or more which is way above the other groups and three times as much as the median views for Shorts. This is quite surprising to observe that, median-wise, longer RVs are surpassing Shorts in attracting views.",
|
| 1304 |
+
"bbox": [
|
| 1305 |
+
511,
|
| 1306 |
+
646,
|
| 1307 |
+
913,
|
| 1308 |
+
896
|
| 1309 |
+
],
|
| 1310 |
+
"page_idx": 6
|
| 1311 |
+
},
|
| 1312 |
+
{
|
| 1313 |
+
"type": "header",
|
| 1314 |
+
"text": "Shorts vs. Regular Videos on YouTube: A Comparative Analysis of User Engagement and Content Creation Trends",
|
| 1315 |
+
"bbox": [
|
| 1316 |
+
83,
|
| 1317 |
+
75,
|
| 1318 |
+
622,
|
| 1319 |
+
87
|
| 1320 |
+
],
|
| 1321 |
+
"page_idx": 6
|
| 1322 |
+
},
|
| 1323 |
+
{
|
| 1324 |
+
"type": "header",
|
| 1325 |
+
"text": "Websci '24, May 21-24, 2024, Stuttgart, Germany",
|
| 1326 |
+
"bbox": [
|
| 1327 |
+
679,
|
| 1328 |
+
75,
|
| 1329 |
+
913,
|
| 1330 |
+
87
|
| 1331 |
+
],
|
| 1332 |
+
"page_idx": 6
|
| 1333 |
+
},
|
| 1334 |
+
{
|
| 1335 |
+
"type": "table",
|
| 1336 |
+
"img_path": "images/1d4f8497d63581ac7509690a96026d1fa4059a687d374020f3fda1ec5312a47f.jpg",
|
| 1337 |
+
"table_caption": [],
|
| 1338 |
+
"table_footnote": [],
|
| 1339 |
+
"table_body": "<table><tr><td rowspan=\"2\">category</td><td colspan=\"2\">2021</td><td colspan=\"2\">2022</td></tr><tr><td>S1</td><td>S2</td><td>S1</td><td>S2</td></tr><tr><td>Autos & Vehicles</td><td>1.42</td><td>2.27</td><td>2.49</td><td>2.20</td></tr><tr><td>Comedy</td><td>7.28</td><td>4.61</td><td>2.77</td><td>3.84</td></tr><tr><td>Education</td><td>5.28</td><td>3.28</td><td>3.88</td><td>4.26</td></tr><tr><td>Entertainment</td><td>6.39</td><td>4.33</td><td>4.53</td><td>5.70</td></tr><tr><td>Film & Animation</td><td>1.53</td><td>1.20</td><td>1.88</td><td>2.41</td></tr><tr><td>Gaming</td><td>2.16</td><td>2.45</td><td>2.38</td><td>3.97</td></tr><tr><td>Howto & Style</td><td>2.93</td><td>3.55</td><td>4.23</td><td>4.68</td></tr><tr><td>Music</td><td>0.42</td><td>0.46</td><td>0.79</td><td>1.10</td></tr><tr><td>News & Politics</td><td>0.72</td><td>4.63</td><td>3.20</td><td>2.80</td></tr><tr><td>Nonprofits & Activism</td><td>1.18</td><td>4.39</td><td>8.59</td><td>12.88</td></tr><tr><td>People & Blogs</td><td>5.64</td><td>4.32</td><td>5.59</td><td>7.05</td></tr><tr><td>Pets & Animals</td><td>2.47</td><td>2.84</td><td>4.73</td><td>8.16</td></tr><tr><td>Science & Technology</td><td>2.30</td><td>2.37</td><td>3.20</td><td>4.15</td></tr><tr><td>Sports</td><td>3.55</td><td>3.46</td><td>5.96</td><td>6.59</td></tr><tr><td>Travel & Events</td><td>2.51</td><td>2.31</td><td>4.30</td><td>7.03</td></tr><tr><td>All</td><td>4.19</td><td>3.40</td><td>4.04</td><td>5.2</td></tr></table>",
|
| 1340 |
+
"bbox": [
|
| 1341 |
+
116,
|
| 1342 |
+
103,
|
| 1343 |
+
447,
|
| 1344 |
+
371
|
| 1345 |
+
],
|
| 1346 |
+
"page_idx": 7
|
| 1347 |
+
},
|
| 1348 |
+
{
|
| 1349 |
+
"type": "text",
|
| 1350 |
+
"text": "Table 2: Evolution of the ratio between the mean number of views per Short and the mean number of views per RV, categorized by different YouTube categories, over the two years' semesters. The overall ratio trend for all categories combined is given. Values exceeding the overall ratio for each semester and category are highlighted in bold.",
|
| 1351 |
+
"bbox": [
|
| 1352 |
+
84,
|
| 1353 |
+
373,
|
| 1354 |
+
480,
|
| 1355 |
+
455
|
| 1356 |
+
],
|
| 1357 |
+
"page_idx": 7
|
| 1358 |
+
},
|
| 1359 |
+
{
|
| 1360 |
+
"type": "text",
|
| 1361 |
+
"text": "5.4 Engagement Based on Categories",
|
| 1362 |
+
"text_level": 1,
|
| 1363 |
+
"bbox": [
|
| 1364 |
+
84,
|
| 1365 |
+
493,
|
| 1366 |
+
393,
|
| 1367 |
+
508
|
| 1368 |
+
],
|
| 1369 |
+
"page_idx": 7
|
| 1370 |
+
},
|
| 1371 |
+
{
|
| 1372 |
+
"type": "text",
|
| 1373 |
+
"text": "Finally, we compare the levels of engagement generated by Shorts and RVs within the different categories, in order to see if, for some categories, RVs attracted more views than Shorts. For each semester and each category we compute the ratio between the mean number of views per Short and the mean number of views per RV. We also compute this ratio for all categories combined, as a reference. Results are given in Table 2.",
|
| 1374 |
+
"bbox": [
|
| 1375 |
+
84,
|
| 1376 |
+
511,
|
| 1377 |
+
480,
|
| 1378 |
+
608
|
| 1379 |
+
],
|
| 1380 |
+
"page_idx": 7
|
| 1381 |
+
},
|
| 1382 |
+
{
|
| 1383 |
+
"type": "text",
|
| 1384 |
+
"text": "We observe that the popularity of Shorts varies between categories and time periods. It appears that for some categories, such as Music, Film & Animation, Gaming, and, to a lesser extent, Science & Technology, users prefer to watch RVs than Shorts. Some categories, such as Comedy and Education, initially generated a high engagement for Shorts before slowly loosing this advantage at the relative benefit of RVs. Conversely, for Nonprofits & Activism, which initially generated a limited engagement for Shorts, there was a progressive increase, eventually reaching 12 times more views for Shorts than RVs by the second semester of 2022. Finally, the Entertainment and the People & Blogs categories exhibit a ratio systematically high and above the reference ratio.",
|
| 1385 |
+
"bbox": [
|
| 1386 |
+
86,
|
| 1387 |
+
609,
|
| 1388 |
+
480,
|
| 1389 |
+
773
|
| 1390 |
+
],
|
| 1391 |
+
"page_idx": 7
|
| 1392 |
+
},
|
| 1393 |
+
{
|
| 1394 |
+
"type": "text",
|
| 1395 |
+
"text": "6 DISCUSSION",
|
| 1396 |
+
"text_level": 1,
|
| 1397 |
+
"bbox": [
|
| 1398 |
+
84,
|
| 1399 |
+
787,
|
| 1400 |
+
217,
|
| 1401 |
+
800
|
| 1402 |
+
],
|
| 1403 |
+
"page_idx": 7
|
| 1404 |
+
},
|
| 1405 |
+
{
|
| 1406 |
+
"type": "text",
|
| 1407 |
+
"text": "Our analysis sheds light on the significant impact of Shorts on the content created and consumed on the YouTube platform.",
|
| 1408 |
+
"bbox": [
|
| 1409 |
+
84,
|
| 1410 |
+
806,
|
| 1411 |
+
480,
|
| 1412 |
+
833
|
| 1413 |
+
],
|
| 1414 |
+
"page_idx": 7
|
| 1415 |
+
},
|
| 1416 |
+
{
|
| 1417 |
+
"type": "text",
|
| 1418 |
+
"text": "$RQ1$ . We focus on channels that have uploaded at least one Short to see how trying this new content affected their overall behavior. For these channels, Shorts production has grown impressively, and eventually surpassed RVs production. A notable trend is that a",
|
| 1419 |
+
"bbox": [
|
| 1420 |
+
84,
|
| 1421 |
+
840,
|
| 1422 |
+
480,
|
| 1423 |
+
895
|
| 1424 |
+
],
|
| 1425 |
+
"page_idx": 7
|
| 1426 |
+
},
|
| 1427 |
+
{
|
| 1428 |
+
"type": "text",
|
| 1429 |
+
"text": "large proportion of channels created after March 2021 posted only Shorts from the beginning, implying that new channels were mostly created with the goal of uploading Shorts. Older channels, while initially more inclined to upload only RVs, also gradually turned to Shorts. Both older and newer channels reduced their production of RVs, and older channels persistently maintained high uploads of Shorts, further indicating that Shorts are well implanted in the YouTube landscape, and that channels have a confirmed interest in uploading Shorts, beyond the first curiosity. This growth in Shorts production may also be caused by the platform's efforts to popularize Shorts, for example by providing incentives for creators, with a new type of monetization [13], and frequent updates [14].",
|
| 1430 |
+
"bbox": [
|
| 1431 |
+
517,
|
| 1432 |
+
107,
|
| 1433 |
+
911,
|
| 1434 |
+
273
|
| 1435 |
+
],
|
| 1436 |
+
"page_idx": 7
|
| 1437 |
+
},
|
| 1438 |
+
{
|
| 1439 |
+
"type": "text",
|
| 1440 |
+
"text": "$RQ$ 2. Regarding the types of content produced, our analysis revealed that the distributions of Shorts and RVs vary widely across YouTube categories. Shorts are primarily employed for creating entertaining content, while RVs remain the preferred format for conveying more serious information, for example on politics or social activism issues. Furthermore, we observe a synchronicity between creators and viewers, as the same entertainment-related categories exhibit a ratio of Shorts views to RV views consistently higher than the reference ratio which includes all the categories. Viewers are mainly consuming Shorts for entertainment purposes and creators may have understood that from the beginning.",
|
| 1441 |
+
"bbox": [
|
| 1442 |
+
517,
|
| 1443 |
+
280,
|
| 1444 |
+
911,
|
| 1445 |
+
431
|
| 1446 |
+
],
|
| 1447 |
+
"page_idx": 7
|
| 1448 |
+
},
|
| 1449 |
+
{
|
| 1450 |
+
"type": "text",
|
| 1451 |
+
"text": "On the other hand, education-related ratio of Shorts views to RV views remains consistently below the reference ratio, indicating that, while users consume Shorts in this category, they stayed faithful to RVs for learning new things. Similarly, the exceptionally low ratio of the art-related categories suggests that users are willing to consume longer forms of content when it is videos in which artists invested time and energy. One surprising exception was the impressive engagement generated by Shorts in the Nonprofits & Activism category, despite the low percentage of Shorts in this category. Creators uploading videos in this category might benefit from using Shorts to reach a wider audience about social issues.",
|
| 1452 |
+
"bbox": [
|
| 1453 |
+
517,
|
| 1454 |
+
431,
|
| 1455 |
+
911,
|
| 1456 |
+
583
|
| 1457 |
+
],
|
| 1458 |
+
"page_idx": 7
|
| 1459 |
+
},
|
| 1460 |
+
{
|
| 1461 |
+
"type": "text",
|
| 1462 |
+
"text": "$RQ 3$ . Analyzing engagement metrics showed that Shorts are particularly effective at capturing the attention and engagement of viewers. This advantage is even more striking when we consider videos from the same channel, in which Shorts attract 110 times more views than their RV counterparts. Moreover, the gap in views between Shorts and RVs is progressively growing, both due to Shorts being increasingly watched and RVs' views declining. One nuance to this observation is that there might be a different delay between Shorts and RVs upload and their consumption by viewers. A two-year period separates the first and last videos of our dataset. Usually, after the peak of attention following their upload, videos continue to slowly accumulate views [6]. This would partly explains why RVs posted during December 2022 have fewer views than videos from January 2021. However, newer Shorts have as many views as older Shorts, suggesting that old Shorts are rarely shown to users.",
|
| 1463 |
+
"bbox": [
|
| 1464 |
+
517,
|
| 1465 |
+
590,
|
| 1466 |
+
911,
|
| 1467 |
+
811
|
| 1468 |
+
],
|
| 1469 |
+
"page_idx": 7
|
| 1470 |
+
},
|
| 1471 |
+
{
|
| 1472 |
+
"type": "text",
|
| 1473 |
+
"text": "RVs are currently having a higher comments per view rate than Shorts. Users watching RVs spend more time on the same video allowing them to engage more into comments than viewers of Shorts, rapidly swiping to the next video. Additionally, some categories prone to generate more debate and comments, like News & Politics, are less covered by Shorts.",
|
| 1474 |
+
"bbox": [
|
| 1475 |
+
517,
|
| 1476 |
+
813,
|
| 1477 |
+
911,
|
| 1478 |
+
895
|
| 1479 |
+
],
|
| 1480 |
+
"page_idx": 7
|
| 1481 |
+
},
|
| 1482 |
+
{
|
| 1483 |
+
"type": "header",
|
| 1484 |
+
"text": "Websci '24, May 21-24, 2024, Stuttgart, Germany",
|
| 1485 |
+
"bbox": [
|
| 1486 |
+
84,
|
| 1487 |
+
75,
|
| 1488 |
+
316,
|
| 1489 |
+
87
|
| 1490 |
+
],
|
| 1491 |
+
"page_idx": 7
|
| 1492 |
+
},
|
| 1493 |
+
{
|
| 1494 |
+
"type": "header",
|
| 1495 |
+
"text": "Caroline Violot, Tugrulcan Elmas, Igor Bilogrevic, and Mathias Humbert",
|
| 1496 |
+
"bbox": [
|
| 1497 |
+
571,
|
| 1498 |
+
75,
|
| 1499 |
+
911,
|
| 1500 |
+
87
|
| 1501 |
+
],
|
| 1502 |
+
"page_idx": 7
|
| 1503 |
+
},
|
| 1504 |
+
{
|
| 1505 |
+
"type": "text",
|
| 1506 |
+
"text": "On YouTube, a few videos collect the vast majority of views, and mean values are not representative of the majority of videos. Hence, to explore whether less popular channels also benefit from Shorts, we examine the median views evolution. We observe an increase of the median number of views for Shorts but also for RVs, which is contrary to the mean evolution of RVs views. The majority of Shorts are increasingly viewed but so are the majority of RVs.",
|
| 1507 |
+
"bbox": [
|
| 1508 |
+
81,
|
| 1509 |
+
107,
|
| 1510 |
+
480,
|
| 1511 |
+
204
|
| 1512 |
+
],
|
| 1513 |
+
"page_idx": 8
|
| 1514 |
+
},
|
| 1515 |
+
{
|
| 1516 |
+
"type": "text",
|
| 1517 |
+
"text": "The evolution of the ratio between Shorts and RV views from the same channel confirms this particular dynamic. For exceptionally popular channels, Shorts are increasingly getting more views than RVs but for the remaining majority of channels the trend is the opposite. Different reasons could explain this observation. The first one comes from the decreasing frequency of RV uploads. As most channels tend to upload fewer RVs, those that are uploaded attract more views, benefiting from some scarcity effect. The second one relates to the successive updates of YouTube Shorts delivery system. Initially accessible only from a tab on the YouTube home page, without the user being able to choose, Shorts are now recommended on the home page with thumbnails, or suggested in the \"watch next\" banner of RVs. These different entry points allow users to click on Shorts they want to watch out of a few suggestions, which might favor popular Shorts.",
|
| 1518 |
+
"bbox": [
|
| 1519 |
+
81,
|
| 1520 |
+
205,
|
| 1521 |
+
480,
|
| 1522 |
+
411
|
| 1523 |
+
],
|
| 1524 |
+
"page_idx": 8
|
| 1525 |
+
},
|
| 1526 |
+
{
|
| 1527 |
+
"type": "text",
|
| 1528 |
+
"text": "We will end this discussion with an additional nuance on the Shorts superiority over RVs. In most of our analysis we made the binary distinction between Shorts and RVs. However, RVs can widely vary in duration, and videos of 5-10 minutes constitute a type of content arguably very different than 1 hour long video essays. On average, Shorts dominate RVs of all duration groups, but at different degrees. Moreover, the analysis of the medians challenges Shorts superiority. Indeed, half of the videos in the 10-30 minutes group garnered at least 5,800 views, contrasting with a median of 1,986 views for Shorts. This duration range also ranks as the third most viewed, with a mean number of views surpassing 160k, confirming its sustained popularity among YouTube video consumers.",
|
| 1529 |
+
"bbox": [
|
| 1530 |
+
81,
|
| 1531 |
+
411,
|
| 1532 |
+
480,
|
| 1533 |
+
578
|
| 1534 |
+
],
|
| 1535 |
+
"page_idx": 8
|
| 1536 |
+
},
|
| 1537 |
+
{
|
| 1538 |
+
"type": "text",
|
| 1539 |
+
"text": "Ethical Considerations. As our research solely relies on publicly available data and does not involve interactions with human participants, it does not classify as human subjects research. We follow common ethical standards; we do not attempt to de-anonymize users, we do not disclose any personal information, and our study does not report any offensive content.",
|
| 1540 |
+
"bbox": [
|
| 1541 |
+
81,
|
| 1542 |
+
589,
|
| 1543 |
+
480,
|
| 1544 |
+
674
|
| 1545 |
+
],
|
| 1546 |
+
"page_idx": 8
|
| 1547 |
+
},
|
| 1548 |
+
{
|
| 1549 |
+
"type": "text",
|
| 1550 |
+
"text": "7 RELATED WORK",
|
| 1551 |
+
"text_level": 1,
|
| 1552 |
+
"bbox": [
|
| 1553 |
+
83,
|
| 1554 |
+
689,
|
| 1555 |
+
259,
|
| 1556 |
+
704
|
| 1557 |
+
],
|
| 1558 |
+
"page_idx": 8
|
| 1559 |
+
},
|
| 1560 |
+
{
|
| 1561 |
+
"type": "text",
|
| 1562 |
+
"text": "Our research delves into the dynamic shifts in content creation and user behavior resulting from evolving platform policies, with a special focus on YouTube Shorts, which is a popular instance of short-form video concept. We now provide a brief survey on existing research related to short-form videos and the effect of platform policies on user behavior.",
|
| 1563 |
+
"bbox": [
|
| 1564 |
+
81,
|
| 1565 |
+
708,
|
| 1566 |
+
480,
|
| 1567 |
+
792
|
| 1568 |
+
],
|
| 1569 |
+
"page_idx": 8
|
| 1570 |
+
},
|
| 1571 |
+
{
|
| 1572 |
+
"type": "text",
|
| 1573 |
+
"text": "7.1 Short-Form Videos",
|
| 1574 |
+
"text_level": 1,
|
| 1575 |
+
"bbox": [
|
| 1576 |
+
83,
|
| 1577 |
+
808,
|
| 1578 |
+
282,
|
| 1579 |
+
821
|
| 1580 |
+
],
|
| 1581 |
+
"page_idx": 8
|
| 1582 |
+
},
|
| 1583 |
+
{
|
| 1584 |
+
"type": "text",
|
| 1585 |
+
"text": "YouTube is the primary video-oriented social media platform. Past studies on YouTube focused on its role on propagating disinformation [15, 16, 25], on video popularity [4, 34], on attention dynamics [20], and on user interactions [35]. Nonetheless, it is unclear if the results of these analyses focusing on regular video content hold",
|
| 1586 |
+
"bbox": [
|
| 1587 |
+
81,
|
| 1588 |
+
825,
|
| 1589 |
+
480,
|
| 1590 |
+
896
|
| 1591 |
+
],
|
| 1592 |
+
"page_idx": 8
|
| 1593 |
+
},
|
| 1594 |
+
{
|
| 1595 |
+
"type": "text",
|
| 1596 |
+
"text": "for short-form videos as the way people interact with these two types of videos are different.",
|
| 1597 |
+
"bbox": [
|
| 1598 |
+
511,
|
| 1599 |
+
107,
|
| 1600 |
+
911,
|
| 1601 |
+
133
|
| 1602 |
+
],
|
| 1603 |
+
"page_idx": 8
|
| 1604 |
+
},
|
| 1605 |
+
{
|
| 1606 |
+
"type": "text",
|
| 1607 |
+
"text": "Short-form videos are currently understudied and large-scale analyses of such data are relatively uncommon. This is first because the short-form video concept has gained widespread popularity and became the primary feature of social media platforms only recently, e.g., YouTube introduced Shorts and their section in 2021. Second, the platforms have been restrictive in their data-sharing policies. YouTube announced its research API in 2022, TikTok announced it in 2023, and Instagram still does not allow large-scale analysis of Reels. As such, our work is one of the few that analyze short-form videos and the first to analyze YouTube Shorts to the best of our knowledge. We now survey the existing works on short-form videos, which mostly rely on TikTok as data source.",
|
| 1608 |
+
"bbox": [
|
| 1609 |
+
511,
|
| 1610 |
+
135,
|
| 1611 |
+
913,
|
| 1612 |
+
300
|
| 1613 |
+
],
|
| 1614 |
+
"page_idx": 8
|
| 1615 |
+
},
|
| 1616 |
+
{
|
| 1617 |
+
"type": "text",
|
| 1618 |
+
"text": "Past analyses of TikTok mainly focused on its trending and recommendation algorithm. Klug et al. studied the characteristics of trending videos on TikTok and found that they have high video engagements and are more likely to be posted at certain times [17]. They also report that using trending hashtags do not necessarily contribute to a video being trending. Simpson et al. investigated the impact of TikTok's algorithm on $\\mathrm{LGBTQ+}$ users and highlighted algorithmic exclusion and resilience in identity work [28]. Boeker and Urman studied TikTok's recommendation algorithm and reported that it is influenced by users' location, language, and engagement with the content [3]. Lee et al. employed a qualitative method in which they interview TikTok users to understand how they perceive and interact with TikTok's algorithms [19]. Other studies focused on how the platform shapes political communication by analyzing users' sentiment against the politicians [41], hyperpartisan activity [23], and the platforms' policies against misinformation [21].",
|
| 1619 |
+
"bbox": [
|
| 1620 |
+
511,
|
| 1621 |
+
301,
|
| 1622 |
+
913,
|
| 1623 |
+
523
|
| 1624 |
+
],
|
| 1625 |
+
"page_idx": 8
|
| 1626 |
+
},
|
| 1627 |
+
{
|
| 1628 |
+
"type": "text",
|
| 1629 |
+
"text": "7.2 Impact of Platform Policies on Users",
|
| 1630 |
+
"text_level": 1,
|
| 1631 |
+
"bbox": [
|
| 1632 |
+
513,
|
| 1633 |
+
585,
|
| 1634 |
+
856,
|
| 1635 |
+
602
|
| 1636 |
+
],
|
| 1637 |
+
"page_idx": 8
|
| 1638 |
+
},
|
| 1639 |
+
{
|
| 1640 |
+
"type": "text",
|
| 1641 |
+
"text": "Popular social media platforms introduce new policies from time to time. This may be due to external actors (e.g., governments) enforcing platforms to adopt new policies such as censorship [10], privacy policies [22], and moderate hate speech [18] and disinformation [26]. In other cases, platforms themselves may introduce changes to the platforms to enhance user experience. In all cases, such changes encourage users to change their behavior and adapt, which eventually shapes public communication. Such behavioral change may manifest itself in user content. For instance, users tend to use more abbreviations and contracted forms when they are constrained by the length of their content, but they also tend to create content with better quality [12]. They may also game platforms' policies to manipulate social media, such as purchasing popular accounts instead of growing new accounts [11] or maintaining backup accounts to recover from platform suspensions [24]. Other actors such as researchers may also be affected by platforms' data collection policies such as denying access to removed content [9] or limiting access to the API [5]. In this work, we study the impact of the introduction of Shorts on channel behavior by comparing the channels' regular videos and Shorts, which have not been studied to date.",
|
| 1642 |
+
"bbox": [
|
| 1643 |
+
511,
|
| 1644 |
+
604,
|
| 1645 |
+
913,
|
| 1646 |
+
896
|
| 1647 |
+
],
|
| 1648 |
+
"page_idx": 8
|
| 1649 |
+
},
|
| 1650 |
+
{
|
| 1651 |
+
"type": "header",
|
| 1652 |
+
"text": "Shorts vs. Regular Videos on YouTube: A Comparative Analysis of User Engagement and Content Creation Trends",
|
| 1653 |
+
"bbox": [
|
| 1654 |
+
84,
|
| 1655 |
+
75,
|
| 1656 |
+
624,
|
| 1657 |
+
87
|
| 1658 |
+
],
|
| 1659 |
+
"page_idx": 8
|
| 1660 |
+
},
|
| 1661 |
+
{
|
| 1662 |
+
"type": "header",
|
| 1663 |
+
"text": "Websci '24, May 21-24, 2024, Stuttgart, Germany",
|
| 1664 |
+
"bbox": [
|
| 1665 |
+
679,
|
| 1666 |
+
75,
|
| 1667 |
+
911,
|
| 1668 |
+
87
|
| 1669 |
+
],
|
| 1670 |
+
"page_idx": 8
|
| 1671 |
+
},
|
| 1672 |
+
{
|
| 1673 |
+
"type": "text",
|
| 1674 |
+
"text": "8 CONCLUSION",
|
| 1675 |
+
"text_level": 1,
|
| 1676 |
+
"bbox": [
|
| 1677 |
+
83,
|
| 1678 |
+
104,
|
| 1679 |
+
232,
|
| 1680 |
+
119
|
| 1681 |
+
],
|
| 1682 |
+
"page_idx": 9
|
| 1683 |
+
},
|
| 1684 |
+
{
|
| 1685 |
+
"type": "text",
|
| 1686 |
+
"text": "In conclusion, this article sheds light on the substantial impact of Shorts on the YouTube platform. We showed that channels which took an interest in Shorts, and uploaded at least one Short, largely adopted the format, eventually surpassing RV uploads. We also showed that, while the collective amount of RV uploads stayed mostly constant over time, when looking at individual behaviors, most channels reduced their production of RVs, while increasing and then maintaining a constant frequency of Short uploads.",
|
| 1687 |
+
"bbox": [
|
| 1688 |
+
81,
|
| 1689 |
+
125,
|
| 1690 |
+
482,
|
| 1691 |
+
234
|
| 1692 |
+
],
|
| 1693 |
+
"page_idx": 9
|
| 1694 |
+
},
|
| 1695 |
+
{
|
| 1696 |
+
"type": "text",
|
| 1697 |
+
"text": "We also observed that Shorts and RVs are not evenly distributed between content categories. Shorts mainly belong to lighthearted, entertainment categories, while RVs touch more diverse content, including news, politics, and education. This disparity in content production is reflected in content consumption. Indeed, the supremacy of Shorts in terms of views is less striking for entertainment-related categories than for the others. In art-related categories, Shorts barely attracted more views than RVs.",
|
| 1698 |
+
"bbox": [
|
| 1699 |
+
81,
|
| 1700 |
+
234,
|
| 1701 |
+
482,
|
| 1702 |
+
345
|
| 1703 |
+
],
|
| 1704 |
+
"page_idx": 9
|
| 1705 |
+
},
|
| 1706 |
+
{
|
| 1707 |
+
"type": "text",
|
| 1708 |
+
"text": "Finally, we showed that Shorts progressively generated more views per video until getting five times more views on average than RVs, by the end of 2022. When looking at videos from the same channel, Shorts generated 110 times more views than their RVs counterparts. This effect is more pronounced for popular channels than for the rest.",
|
| 1709 |
+
"bbox": [
|
| 1710 |
+
81,
|
| 1711 |
+
345,
|
| 1712 |
+
482,
|
| 1713 |
+
428
|
| 1714 |
+
],
|
| 1715 |
+
"page_idx": 9
|
| 1716 |
+
},
|
| 1717 |
+
{
|
| 1718 |
+
"type": "text",
|
| 1719 |
+
"text": "Limitations & Future Work. We acknowledge that our findings are not free from limitations. As mentioned in the data collection, our dataset is biased towards the keywords we selected to collect our data. Additionally, we focused on channels who took an interest in Shorts and posted at least one Short. Opening the lens of focus to all types of channels to analyze the prevalence of channels that adopted Shorts, and overall channels behavior would contribute to ground this work into the current YouTube landscape. As such, further investigations comparing channels that adopted Shorts, and channels that did not, would provide interesting and complementary insights to this study. During our analysis of the engagement metrics we did not use comments' text. By collecting comments, we could observe the adaptation of the (commenting) user-base by analyzing the evolution of the network of commenters of a channel and the nature of the comments themselves.",
|
| 1720 |
+
"bbox": [
|
| 1721 |
+
81,
|
| 1722 |
+
443,
|
| 1723 |
+
482,
|
| 1724 |
+
650
|
| 1725 |
+
],
|
| 1726 |
+
"page_idx": 9
|
| 1727 |
+
},
|
| 1728 |
+
{
|
| 1729 |
+
"type": "text",
|
| 1730 |
+
"text": "ACKNOWLEDGMENTS",
|
| 1731 |
+
"text_level": 1,
|
| 1732 |
+
"bbox": [
|
| 1733 |
+
83,
|
| 1734 |
+
670,
|
| 1735 |
+
281,
|
| 1736 |
+
684
|
| 1737 |
+
],
|
| 1738 |
+
"page_idx": 9
|
| 1739 |
+
},
|
| 1740 |
+
{
|
| 1741 |
+
"type": "text",
|
| 1742 |
+
"text": "We thank Kévin Huguenin for his valuable insights and feedback on an earlier version of this paper.",
|
| 1743 |
+
"bbox": [
|
| 1744 |
+
81,
|
| 1745 |
+
688,
|
| 1746 |
+
482,
|
| 1747 |
+
717
|
| 1748 |
+
],
|
| 1749 |
+
"page_idx": 9
|
| 1750 |
+
},
|
| 1751 |
+
{
|
| 1752 |
+
"type": "text",
|
| 1753 |
+
"text": "REFERENCES",
|
| 1754 |
+
"text_level": 1,
|
| 1755 |
+
"bbox": [
|
| 1756 |
+
84,
|
| 1757 |
+
734,
|
| 1758 |
+
202,
|
| 1759 |
+
750
|
| 1760 |
+
],
|
| 1761 |
+
"page_idx": 9
|
| 1762 |
+
},
|
| 1763 |
+
{
|
| 1764 |
+
"type": "list",
|
| 1765 |
+
"sub_type": "ref_text",
|
| 1766 |
+
"list_items": [
|
| 1767 |
+
"[1] Jane Arthurs, Sophia Drakopoulou, and Alessandro Gandini. 2018. Researching YouTube. Convergence 24, 1 (Feb. 2018), 3-15. https://doi.org/10.1177/1354856517737222",
|
| 1768 |
+
"[2] Xiang Bi and Cunchen Tang. 2020. Research on the Motives Affecting the Behavior of Short Video's Creators. IEEE Access 8 (2020), 188415-188428. https://doi.org/10.1109/ACCESS.2020.3028392",
|
| 1769 |
+
"[3] Maximilian Boeker and Aleksandra Urman. 2022. An empirical investigation of personalization factors on tiktok. In Proceedings of the ACM Web Conference 2022. 2298-2309.",
|
| 1770 |
+
"[4] Youmna Borghol, Siddharth Mitra, Sebastien Ardon, Niklas Carlsson, Derek Eager, and Anirban Mahanti. 2011. Characterizing and modelling popularity of user-generated videos. In Performance Evaluation.",
|
| 1771 |
+
"[5] Joshua Braun. 2023. Journalism, Media Research, and Mastodon: Notes on the Future. Digital Journalism (2023), 1-8."
|
| 1772 |
+
],
|
| 1773 |
+
"bbox": [
|
| 1774 |
+
89,
|
| 1775 |
+
752,
|
| 1776 |
+
482,
|
| 1777 |
+
895
|
| 1778 |
+
],
|
| 1779 |
+
"page_idx": 9
|
| 1780 |
+
},
|
| 1781 |
+
{
|
| 1782 |
+
"type": "list",
|
| 1783 |
+
"sub_type": "ref_text",
|
| 1784 |
+
"list_items": [
|
| 1785 |
+
"[6] M. Cha, H. Kwak, P. Rodriguez, Y. Ahn, and S. Moon. 2009. Analyzing the Video Popularity Characteristics of Large-Scale User Generated Content Systems. In IEEE/ACM Transactions on Networking.",
|
| 1786 |
+
"[7] Francesco Chiossi, Luke Haliburton, Changkun Ou, Andreas Martin Butz, and Albrecht Schmidt. 2023. Short-Form Videos Degrade Our Capacity to Retain Intentions: Effect of Context Switching On Prospective Memory. In Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems (CHI '23). Association for Computing Machinery, New York, NY, USA, 1-15. https://doi.org/10.1145/3544548.3580778",
|
| 1787 |
+
"[8] Brian Dean. 2023. TikTok User Statistics. https://backlinko.com/tiktok-users.",
|
| 1788 |
+
"[9] Tugrulcan Elmas. 2023. The Impact of Data Persistence Bias on Social Media Studies. In Proceedings of the 15th ACM Web Science Conference 2023. 196-207.",
|
| 1789 |
+
"[10] Tugrulcan Elmas, Rebekah Overdorf, and Karl Aberer. 2021. A Dataset of State-Censored Tweets.. In ICWSM. 1009-1015.",
|
| 1790 |
+
"[11] Tugrulcan Elmas, Rebekah Overdorf, and Karl Aberer. 2023. Misleading repurposing on twitter. In Proceedings of the International AAAAI Conference on Web and Social Media, Vol. 17. 209-220.",
|
| 1791 |
+
"[12] Kristina Gligoric, Ashton Anderson, and Robert West. 2018. How constraints affect content: The case of Twitter's switch from 140 to 280 characters. In Proceedings of the International AAAI Conference on Web and Social Media, Vol. 12.",
|
| 1792 |
+
"[13] James Hale. 2021. YouTube Shorts Launches $100 Million Fund To Pay Creators Of Top Videos. https://www.tubefilter.com/2021/05/11/youtube-shorts-100-million-creator-fund/.",
|
| 1793 |
+
"[14] YouTube Help. 2021. New Features and Updates for Shorts Viewers & Creators. https://support.google.com/youtube/thread/139221507.",
|
| 1794 |
+
"[15] Muhammad Nihal Hussain, Seripil Tokdemir, Nitin Agarwal, and Samer Al-Khateeb. 2018. Analyzing disinformation and crowd manipulation tactics on YouTube. In 2018 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM). IEEE, 1092-1095.",
|
| 1795 |
+
"[16] Eslam Hussein, Prerna Juneja, and Tanushree Mitra. 2020. Measuring Misinformation in Video Search Platforms: An Audit Study on YouTube. In Proceedings of the ACM on Human-Computer Interaction.",
|
| 1796 |
+
"[17] Daniel Klug, Yiluo Qin, Morgan Evans, and Geoff Kaufman. 2021. Trick and please. A mixed-method study on user assumptions about the TikTok algorithm. In Proceedings of the 13th ACM Web Science Conference 2021. 84-92.",
|
| 1797 |
+
"[18] Anastasia Kozyreva, Stefan M Herzog, Stephan Lewandowsky, Ralph Hertwig, Philipp Lorenz-Spreen, Mark Leiser, and Jason Reifler. 2023. Resolving content moderation dilemmas between free speech and harmful misinformation. Proceedings of the National Academy of Sciences 120, 7 (2023), e2210666120.",
|
| 1798 |
+
"[19] Angela Y Lee, Hannah Mieczkowski, Nicole B Ellison, and Jeffrey T Hancock. 2022. The algorithmic crystal: Conceptualizing the self through algorithmic personalization on TikTok. Proceedings of the ACM on Human-computer Interaction 6, CSCW2 (2022), 1-22.",
|
| 1799 |
+
"[20] JooYoung Lee, Siqi Wu, Ali Mert Ertugrul, Yu-Ru Lin, and Lexing Xie. 2022. Whose Advantage? Measuring Attention Dynamics across YouTube and Twitter on Controversial Topics. 16 (2022), 573-583. https://doi.org/10.1609/icwsm.v16i1.19316",
|
| 1800 |
+
"[21] Chen Ling, Krishna P Gummadi, and Savvas Zannettou. 2023. \" Learn the Facts About COVID-19\": Analyzing the Use of Warning Labels on TikTok Videos. In Proceedings of the International AAAI Conference on Web and Social Media, Vol. 17. 554-565.",
|
| 1801 |
+
"[22] Shuang Liu, Baiyang Zhao, Renjie Guo, Guozhu Meng, Fan Zhang, and Meishan Zhang. 2021. Have you been properly notified? automatic compliance analysis of privacy policy text with GDPR article 13. In Proceedings of the Web Conference 2021. 2154-2164.",
|
| 1802 |
+
"[23] Juan Carlos Medina Serrano, Orestis Papakyriakopoulos, and Simon Hegelich. 2020. Dancing to the partisan beat: A first analysis of political communication on TikTok. In Proceedings of the 12th ACM Conference on Web Science, 257-266.",
|
| 1803 |
+
"[24] Maya Merhi, Sarah Rajtmajer, and Dongwon Lee. 2023. Information operations in turkey: Manufacturing resilience with free twitter accounts. In Proceedings of the International AAAI Conference on Web and Social Media, Vol. 17, 638-649.",
|
| 1804 |
+
"[25] Kostantinos Papadamou, Savvas Zannettou, Jeremy Blackburn, Emiliano De Cristofaro, Gianluca Stringhini, and Michael Sirivianos. 2022. \"It Is Just a Flu\": Assessing the Effect of Watch History on YouTube's Pseudoscientific Video Recommendations. 16 (2022), 723-734. https://ojs.aaii.org/index.php/ICWSM/article/view/19329",
|
| 1805 |
+
"[26] Orestis Papakyriakopoulos and Ellen Goodman. 2022. The impact of Twitter labels on misinformation spread and user engagement: Lessons from Trump's election tweets. In Proceedings of the ACM web conference 2022. 2541-2551.",
|
| 1806 |
+
"[27] Manoel Horta Ribeiro and Robert West. 2021. YouNiverse: Large-Scale Channel and Video Metadata from English-Speaking YouTube. In ICWSM. 1016-1024.",
|
| 1807 |
+
"[28] Ellen Simpson and Bryan Semaan. 2021. For You, or For \"You\"? Everyday LGBTQ+ Encounters with TikTok. Proceedings of the ACM on human-computer interaction 4, CSCW3 (2021), 1-34.",
|
| 1808 |
+
"[29] Matt G. Southern. 2022. YouTube Shorts Hits 30 Billion Views Per Day. https://www.searchenginejournal.com/youtube-shorts-hits-30-billion-views-per-day/447785/."
|
| 1809 |
+
],
|
| 1810 |
+
"bbox": [
|
| 1811 |
+
517,
|
| 1812 |
+
108,
|
| 1813 |
+
913,
|
| 1814 |
+
885
|
| 1815 |
+
],
|
| 1816 |
+
"page_idx": 9
|
| 1817 |
+
},
|
| 1818 |
+
{
|
| 1819 |
+
"type": "header",
|
| 1820 |
+
"text": "Websci '24, May 21-24, 2024, Stuttgart, Germany",
|
| 1821 |
+
"bbox": [
|
| 1822 |
+
83,
|
| 1823 |
+
75,
|
| 1824 |
+
318,
|
| 1825 |
+
87
|
| 1826 |
+
],
|
| 1827 |
+
"page_idx": 9
|
| 1828 |
+
},
|
| 1829 |
+
{
|
| 1830 |
+
"type": "header",
|
| 1831 |
+
"text": "Caroline Violot, Tugrulcan Elmas, Igor Bilogrevic, and Mathias Humbert",
|
| 1832 |
+
"bbox": [
|
| 1833 |
+
570,
|
| 1834 |
+
75,
|
| 1835 |
+
913,
|
| 1836 |
+
87
|
| 1837 |
+
],
|
| 1838 |
+
"page_idx": 9
|
| 1839 |
+
},
|
| 1840 |
+
{
|
| 1841 |
+
"type": "list",
|
| 1842 |
+
"sub_type": "ref_text",
|
| 1843 |
+
"list_items": [
|
| 1844 |
+
"[30] Conghui Su, Hui Zhou, Liangyu Gong, Binyu Teng, Fengji Geng, and Yuzheng Hu. 2021. Viewing personalized video clips recommended by TikTok activates default mode network and ventral tegmental area. NeuroImage 237 (2021), 118136.",
|
| 1845 |
+
"[31] Todd Sherman. 2021. Bringing YouTube Shorts to the U.S. https://blog.youtube/news-and-events/youtube-shorts-united-states/.",
|
| 1846 |
+
"[32] Victor Potrel. 2022. Five Insights Into The Popularity Of Short-Form Video Content. https://www.forbes.com/sites/forbescommunicationscouncil/2022/09/06/five-insights-into-the-popularity-of-short-form-video-content/.",
|
| 1847 |
+
"[33] Kexin Wang and Sebastian Scherr. 2022. Dance the Night Away: How Automatic TikTok Use Creates Pre-Sleep Cognitive Arousal and Daytime Fatigue. Mobile Media & Communication 10, 2 (May 2022), 316-336. https://doi.org/10.1177/20501579211056116",
|
| 1848 |
+
"[34] Mirjam Wattenhofer, Roger Wattenhofer, and Zack Zhu. 2012. The YouTube Social Network. https://ods(aaai.org/index.php/ICWSM/article/view/14243.6.1 (2012), 354-361. Number: 1.",
|
| 1849 |
+
"[35] Siqi Wu and Paul Resnick. 2021. Cross-Partisan Discussions on YouTube: Conservatives Talk to Liberals but Liberals Don't Talk to Conservatives. In Proceedings of the Fifteenth International AAAI Conference on Web and Social Media, ICWSM 2021, held virtually, June 7-10, 2021, Ceren Budak, Meeyoung"
|
| 1850 |
+
],
|
| 1851 |
+
"bbox": [
|
| 1852 |
+
84,
|
| 1853 |
+
108,
|
| 1854 |
+
480,
|
| 1855 |
+
301
|
| 1856 |
+
],
|
| 1857 |
+
"page_idx": 10
|
| 1858 |
+
},
|
| 1859 |
+
{
|
| 1860 |
+
"type": "list",
|
| 1861 |
+
"sub_type": "ref_text",
|
| 1862 |
+
"list_items": [
|
| 1863 |
+
"Cha, Daniele Quercia, and Lexing Xie (Eds.). AAAI Press, 808-819. https://ojs.aaii.org/index.php/ICWSM/article/view/18105",
|
| 1864 |
+
"[36] YouTube Developers. 2022. YouTube Data API | Google for Developers. https://developers.google.com/youtube/v3/.",
|
| 1865 |
+
"[37] YouTube Help. 2023. Create Shorts. https://support.google.com/youtube/topic/10343432.",
|
| 1866 |
+
"[38] YouTube Help. 2023. Upload Videos. https://support.google.com/youtube/topic/16547.",
|
| 1867 |
+
"[39] YouTube Research. 2022. Program Terms & Conditions. https://research.youtube/policies/terms/.",
|
| 1868 |
+
"[40] YouTube Research. 2022. YouTube Researcher Program. https://research.youtube/.",
|
| 1869 |
+
"[41] Jing Zeng and Crystal Abidin. 2021. # OkBoomer, time to meet the Zoomers: Studying the memefication of intergenerational politics on TikTok. Information, Communication & Society 24, 16 (2021), 2459-2481.",
|
| 1870 |
+
"[42] Cevin Zhang, Hemingxi Zheng, and Qing Wang. 2022. Driving Factors and Moderating Effects Behind Citizen Engagement With Mobile Short-Form Videos. IEEE Access 10 (2022), 40999–41009. https://doi.org/10.1109/ACCESS.2022.3167687"
|
| 1871 |
+
],
|
| 1872 |
+
"bbox": [
|
| 1873 |
+
516,
|
| 1874 |
+
109,
|
| 1875 |
+
913,
|
| 1876 |
+
291
|
| 1877 |
+
],
|
| 1878 |
+
"page_idx": 10
|
| 1879 |
+
},
|
| 1880 |
+
{
|
| 1881 |
+
"type": "header",
|
| 1882 |
+
"text": "Shorts vs. Regular Videos on YouTube: A Comparative Analysis of User Engagement and Content Creation Trends",
|
| 1883 |
+
"bbox": [
|
| 1884 |
+
83,
|
| 1885 |
+
75,
|
| 1886 |
+
622,
|
| 1887 |
+
87
|
| 1888 |
+
],
|
| 1889 |
+
"page_idx": 10
|
| 1890 |
+
},
|
| 1891 |
+
{
|
| 1892 |
+
"type": "header",
|
| 1893 |
+
"text": "Websci '24, May 21-24, 2024, Stuttgart, Germany",
|
| 1894 |
+
"bbox": [
|
| 1895 |
+
679,
|
| 1896 |
+
75,
|
| 1897 |
+
911,
|
| 1898 |
+
87
|
| 1899 |
+
],
|
| 1900 |
+
"page_idx": 10
|
| 1901 |
+
}
|
| 1902 |
+
]
|
2403.00xxx/2403.00454/32cf1812-4569-4224-b8b1-fbebe2604343_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00454/32cf1812-4569-4224-b8b1-fbebe2604343_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b6323a5eeb9fd7d475f5fb5a3ec2b280e03ca576b3d4283017162ba9756cf071
|
| 3 |
+
size 982933
|
2403.00xxx/2403.00454/full.md
ADDED
|
@@ -0,0 +1,327 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Shorts vs. Regular Videos on YouTube: A Comparative Analysis of User Engagement and Content Creation Trends
|
| 2 |
+
|
| 3 |
+
Caroline Violot caroline.violot@unil.ch University of Lausanne
|
| 4 |
+
|
| 5 |
+
Igor Bilogrevic
|
| 6 |
+
ibilogrevic@gmail.com
|
| 7 |
+
Google
|
| 8 |
+
|
| 9 |
+
Tugrulcan Elmas telmas@iu.edu Indiana University Bloomington
|
| 10 |
+
|
| 11 |
+
Mathias Humbert
|
| 12 |
+
mathias.humbert@unil.ch
|
| 13 |
+
University of Lausanne
|
| 14 |
+
|
| 15 |
+
# ABSTRACT
|
| 16 |
+
|
| 17 |
+
YouTube introduced the Shorts video format in 2021, allowing users to upload short videos that are prominently displayed on its website and app. Despite having such a large visual footprint, there are no studies to date that have looked at the impact Shorts introduction had on the production and consumption of content on YouTube. This paper presents the first comparative analysis of YouTube Shorts versus regular videos with respect to user engagement (i.e., views, likes, and comments), content creation frequency and video categories. We collected a dataset containing information about 70k channels that posted at least one Short, and we analyzed the metadata of all the videos (9.9M Shorts and 6.9M regular videos) they uploaded between January 2021 and December 2022, spanning a two-year period including the introduction of Shorts. Our longitudinal analysis shows that content creators consistently increased the frequency of Shorts production over this period, especially for newly-created channels, which surpassed that of regular videos. We also observe that Shorts target mostly entertainment categories, while regular videos cover a wide variety of categories. In general, Shorts attract more views and likes per view than regular videos, but attract less comments per view. However, Shorts do not outperform regular videos in the education and political categories as much as they do in other categories. Our study contributes to understanding social media dynamics, to quantifying the spread of short-form content, and to motivating future research on its impact on society.
|
| 18 |
+
|
| 19 |
+
# CCS CONCEPTS
|
| 20 |
+
|
| 21 |
+
- Information systems $\rightarrow$ Social networks; Web log analysis.
|
| 22 |
+
|
| 23 |
+
# KEYWORDS
|
| 24 |
+
|
| 25 |
+
YouTube, Short-Form Video Content, Engagement, Popularity, Upload Behavior, Social Media Dynamics, Content Production Patterns
|
| 26 |
+
|
| 27 |
+
# ACM Reference Format:
|
| 28 |
+
|
| 29 |
+
Caroline Violot, Tugrulcan Elmas, Igor Bilogrevic, and Mathias Humbert. 2024. Shorts vs. Regular Videos on YouTube: A Comparative Analysis of User
|
| 30 |
+
|
| 31 |
+
Permission to make digital or hard copies of part or all of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for third-party components of this work must be honored. For all other uses, contact the owner/author(s).
|
| 32 |
+
|
| 33 |
+
Websci '24, May 21-24, 2024, Stuttgart, Germany
|
| 34 |
+
|
| 35 |
+
© 2024 Copyright held by the owner/author(s).
|
| 36 |
+
|
| 37 |
+
ACM ISBN 979-8-4007-0334-8/24/05
|
| 38 |
+
|
| 39 |
+
https://doi.org/10.1145/3614419.3644023
|
| 40 |
+
|
| 41 |
+
Engagement and Content Creation Trends. In ACM Web Science Conference (Websci '24), May 21-24, 2024, Stuttgart, Germany. ACM, New York, NY, USA, 11 pages. https://doi.org/10.1145/3614419.3644023
|
| 42 |
+
|
| 43 |
+
# 1 INTRODUCTION
|
| 44 |
+
|
| 45 |
+
During the last few years, short-form video content has gained widespread popularity [29, 32]. TikTok, a social media platform launched in 2016 that focuses on short videos, quickly became a commercial success, with 3 billion downloads and 1 billion active monthly users in 2023 [8]. Shortly after that, platforms such as YouTube, Instagram, and Facebook introduced their own short-form video content features, with a similar format across all platforms. YouTube, in particular, introduced its so-called Shorts format as a beta version in the US on March 18, 2021 [31], and worldwide a few months later. Since its introduction on YouTube, more and more content creators have started to produce content in this format [29].
|
| 46 |
+
|
| 47 |
+
The world of short-form video content is currently a topic of lively discussion, with a spectrum of perspectives. On the one hand, it provides an opportunity for creators to engage and entertain their audience with concise and attractive content. Moreover, short-form videos can be effective at disseminating information about social issues [42] and create new professional perspectives for content creators [2]. On the other hand, recent studies exploring TikTok highlighted some potentially concerning aspects for its users, such as a form of dependency [30], increase of daytime fatigue [33] and decrease of prospective memory [7]. Moreover, its impact on informative content, for example educational videos that thrive on depth and detail, should be explored. To investigate this, we focus on a fundamental research question: "Are short videos replacing longer videos on YouTube, the most popular online video-sharing platform?"
|
| 48 |
+
|
| 49 |
+
To address this question, we undertake the first comprehensive study of YouTube Shorts, comparing them with regular videos (RVs), in terms of their effect on overall channel behavior and user engagement. Using data from the public YouTube Data API [36], we are able to control for platform, creator, and video features, offering insights into the evolving YouTube ecosystem. Our longitudinal comparative analysis quantifies changes in video content creation and user engagement across Shorts and RVs over the two years following the introduction of YouTube Shorts in March 2021. This study aims to provide valuable insights to content creators, advertisers, and researchers, allowing them to better understand the
|
| 50 |
+
|
| 51 |
+
perspectives of an era marked by the rise of short-form video content. To delve deeper into the impact of Shorts on platform content and user engagement, we address the following research questions:
|
| 52 |
+
|
| 53 |
+
RQ1 How did the introduction of Shorts affect preexisting channels in terms of content creation behavior, and how do channels created after Shorts introduction differ from older channels?
|
| 54 |
+
|
| 55 |
+
RQ2 How do Shorts compare to RVs in terms of views and content creation frequency across video categories?
|
| 56 |
+
|
| 57 |
+
RQ3 What differences between Shorts and RVs can be observed in terms of user engagement (views, likes, and comments), and does the duration of RVs have an influence?
|
| 58 |
+
|
| 59 |
+
Our dataset contains data about 70k channels that have created at least one video in the Shorts format since March 2021. We collected metadata of 9.9M Shorts and 6.9M RVs posted by those channels between January 2021 and December 2022, which allowed us to analyze both the time window around Shorts introduction and the long-lasting impact it had on those channels and their videos. For each video we also retrieved its category and its number of views, likes, and comments.
|
| 60 |
+
|
| 61 |
+
Our results highlight the emergence of three main trends. First, we observed that channels that posted at least one Shorts tended to adopt the format, eventually uploading more Shorts than RVs. Second, we found that categories are not distributed evenly between Shorts and RVs: Shorts were mainly uploaded in entertainment-related categories while RVs encompassed a wide variety of content, including political or educational. This indicates that the two types of videos are not created to cover the same themes, but rather coexist on the platform for different purposes. Additionally, political, educational and artistic Shorts videos generate fewer views, suggesting that for some categories, viewers prefer RVs. Overall, we found that Shorts outperformed RVs in terms of views and likes per view, but generated less comments per view, although this gap is narrowing. This trend is even more pronounced when comparing the engagement metrics of Shorts and RVs uploaded by the same channel, with Shorts getting 110 times more views (on average) than their RVs counterparts. However, when differentiating RVs between different duration groups we found that the median number of views of videos from 10 to 30 minutes long was higher than the median number of Shorts views.
|
| 62 |
+
|
| 63 |
+
The rest of the paper is organized as follows. Section 2 describes how we collected and processed the data used in this article. Section 3 shows how the video publishing behavior evolved over the period of observation. Section 4 reports the differences in content between Shorts and RVs. Section 5 describes how users engaged with both types of videos in terms of views, likes, and comments. Section 6 discusses the impact of Shorts' introduction on overall content publishing behavior and user reaction. Section 7 provides the prior literature on the topic. Finally, Section 8 concludes the paper with its main findings, discusses its limitations, and provides future directions of research.
|
| 64 |
+
|
| 65 |
+
# 2 DATA COLLECTION
|
| 66 |
+
|
| 67 |
+
Before diving into the details of our data collection, hereafter we briefly describe YouTube's RVs and Shorts. RVs can last from a few seconds to several hours. They are recorded and usually edited outside of YouTube, and they can be in a horizontal, vertical, or
|
| 68 |
+
|
| 69 |
+
square format [38]. Shorts is a newer format that can last up to 60 seconds and must be in a vertical or square shape, optimized for viewing on mobile devices. They can be created outside of YouTube and then posted, but they can also be created directly from the app, by filming one or several clips that are combined on the spot, adding music, adjusting recording speed, adding filters, etc. [37], making Shorts particularly easy and quick to shoot, edit, and upload. Shorts have their own dedicated tab on the platform website or app, and users can move from one video to another by swiping on an endless scroll, without actively clicking on or searching for the videos.
|
| 70 |
+
|
| 71 |
+
In order to efficiently collect relevant data, we leverage the YouTube Data API [36]. It provides methods to search video metadata by keywords or by channel identifiers, and to collect channel metadata, among other functionalities. Unfortunately, the YouTube Data API does not currently support random sampling of videos, hence we had to define a methodology to collect videos while acknowledging that the resulting dataset could contain some biases. We discuss them at the end of this section.
|
| 72 |
+
|
| 73 |
+
# 2.1 Collection Process
|
| 74 |
+
|
| 75 |
+
The data collection consisted of three steps: (i) collecting an initial set of short videos (i.e., seeds), (ii) identifying which of them are Shorts, and (iii) growing the dataset.
|
| 76 |
+
|
| 77 |
+
2.1.1 Collecting seed Shorts. Our primary objective is to collect seed Shorts so that we can identify channels that include both Shorts and RVs for our comparative analysis. As the YouTube API requires keywords to provide videos, we first come up with a comprehensive set of keywords that represent video categories for which the YouTubers create Shorts. To this end, we used common video categories of TikTok as search queries, assuming they would also be common in YouTube Shorts. We collected the categories from a digital marketing website.<sup>1</sup> We separated the terms that contained an "&" (for example "food & cooking" was split into a "food" keyword and a "cooking" keyword), for a total of 50 keywords.
|
| 78 |
+
|
| 79 |
+
The queries returned both Shorts and RVs as the YouTube API did not provide an option to collect only Shorts. However, it was possible to restrain the "Search" results to "short" (less than 4 minutes), "medium" (between 4 and 20 minutes), and "long" videos (more than 20 minutes). Thus, to maximize the amount of Shorts we collected in this seed phase, we collected only videos from the "short" category.
|
| 80 |
+
|
| 81 |
+
We first collected videos posted between March 18, 2021, (the date of the US beta launch of YouTube's Shorts [31]) and July 26, 2022. The API returns around 500 videos per query, so to further increase the number of results and ensure that the search results were not biased towards a specific period, we divided the time period into weeks, e.g., we first collected videos that were published between March 18, 2021, and March 21, 2021, then collected videos between March 22, 2021, and March 28, 2021, and so on, each time using all the aforementioned keywords. In total, we collected around 300k videos from 150k channels.
|
| 82 |
+
|
| 83 |
+

|
| 84 |
+
Figure 1: Data collection process summary.
|
| 85 |
+
|
| 86 |
+
2.1.2 Labelling Shorts. The YouTube Data API does not currently return information on whether a video is a Short or a RV. Hence, we use the following methodology to identify which videos are Shorts: we send a GET request to www.youtube.com/shorts/<videoId> for each videoId and check in the redirection link if the URL stayed the same or if it was modified to the regular www.youtube.com/watch?v=<videoId>. YouTube allows older videos to be seen in the Shorts tab, as long as they are up to 60 seconds and have a square or vertical aspect ratio, so this method can classify videos uploaded before Shorts introduction as Shorts. Nevertheless, this method allows us to determine whether creators turned to short-form, square/vertical video content. We classified 144k videos as Shorts, and 159k as RVs using this method.
|
| 87 |
+
|
| 88 |
+
2.1.3 Growing the dataset and collecting additional metadata. Using the aforementioned method to identify Shorts, we identified all the channels that contained at least one Short during our period of interest. Among the 150k channels collected in the first step, there were 70,712 channels with at least one Short video. We collected all the videos posted by these channels between January 1, 2021, and December 31, 2022, totalling 16,746,091 videos, among which 6,862,321 RVs and 9,883,770 Shorts. Using the YouTube Data API, we collected the videos' metadata (title, description, posting date and time, duration, channel) and engagement statistics (number of views, likes, and comments). We also collected the YouTube categories of each video. YouTube categories (listed in Table 1 in Section 4) consists of 15 categories that creators or YouTube assign such as Music or Gaming. Finally, we collected the channels' metadata, mainly their title, description, creation date, and origin country. We further collected the channels' engagement metrics, i.e., the total view count, subscriber count, and number of uploaded videos.
|
| 89 |
+
|
| 90 |
+
# 2.2 YouTube Terms & Conditions Compliance
|
| 91 |
+
|
| 92 |
+
The YouTube Data API limits the daily number of queries with a quotas system, where costs vary between different methods, e.g., a video search query costs 100 quotas and a channel metadata query costs 1 quota. The default number of quotas per day is $10\mathrm{k}$ . As this limit is too restrictive to collect a large-scale dataset, we applied to and joined the YouTube Research Program [40] and obtained a research quota extension of 1M queries per day. We made sure to comply with the specific data policies and terms of use that come with being part of the YouTube Research Program [39]. In
|
| 93 |
+
|
| 94 |
+
particular, we are not able to share our data due to the no-data disclosure, which forbids us to "disclose, reproduce, sell, license or otherwise transfer to any third party, in part or in whole, any Program Data".
|
| 95 |
+
|
| 96 |
+
# 2.3 Bias Discussion
|
| 97 |
+
|
| 98 |
+
Ideally, we would prefer a random sample of videos for our collection of seed Shorts, for an unbiased analysis. However, as previously mentioned, YouTube does not currently support such random sampling. Furthermore, we are not able to use an empty query or queries with very general keywords to approximate random sampling. This is because we observe that regardless of the keyword, YouTube limits the search results amount, e.g., we could only collect 597 videos with the query "cats" and 251 videos with an empty query. Past research instead focused on videos that were the most influential using popularity as a proxy. For instance, Riberio et al. crawled channels with at least 10k subscribers and then collected their videos to provide a comprehensive dataset of YouTube [27]. Although this approach may facilitate studying popular channels, it may prevent us from analyzing YouTube Shorts that went viral and were viewed many times despite the low popularity of the channel. Our approach mitigates such a bias, but we acknowledge that it creates a bias towards the videos related to the keywords we used.
|
| 99 |
+
|
| 100 |
+
# 3 POSTING BEHAVIOR EVOLUTION
|
| 101 |
+
|
| 102 |
+
We present here the evolution of the channels posting behavior, from January 2021 to December 2022.
|
| 103 |
+
|
| 104 |
+
# 3.1 Evolution of Global Video Uploads
|
| 105 |
+
|
| 106 |
+
We first focus on the overall posting evolution, without considering individual channels behavior and analyze the total number of Shorts and RVs uploaded per week. YouTube allows any videos shorter than 60 seconds, with a square/vertical format, to be displayed in the Shorts tab (and therefore categorized as such), hence videos from before Shorts introduction can be labeled as such. As shown in Figure 2, we observe a constant rise in the number of new Shorts until mid-2022, followed by a slight decrease, and a constant number of created RVs. This shows that, while collectively continuing to produce RVs, video creators have also produced an increasing number of Shorts since March 2021.
|
| 107 |
+
|
| 108 |
+

|
| 109 |
+
Figure 2: Weekly video uploads, categorized into Shorts and RVs, with the number of channels older than the respective week shown in light grey. Some videos created before the introduction of Shorts were retrospectively classified as Shorts.
|
| 110 |
+
|
| 111 |
+
# 3.2 Evolution of Shorts Prevalence
|
| 112 |
+
|
| 113 |
+
Next, we focus on the individual posting behavior of channels, aiming to show results that equally reflect the behavior of all the channels in our dataset, without being biased towards the highly prolific channels. For most of the analysis, we first split the channels between newer channels and older channels, based on whether they were created after or before Shorts introduction.
|
| 114 |
+
|
| 115 |
+
For each week between January 2021 and December 2022, we categorize active channels based on the percentage of Shorts they posted each week into the following categories: $[0\%, 1 - 50\%, 51 - 99\%, 100\%]$ . A given channel can change category from one week to another, if its posting behavior evolves. Then, we compute the fraction of channels belonging to each category and show the evolution of the posting behavior in Figure 3. Channels are separated between older channels and newer channels.
|
| 116 |
+
|
| 117 |
+
In March 2021, the fraction of channels posting exclusively Shorts was 2.2 times higher among newly created channels than for older channels, with more than $60\%$ of the latter opting to posting exclusively RVs. From there, we see a similar evolution for older and newer channels, with a marked increase in the fraction of channels posting only Shorts and a decrease in the fraction of the channels posting only RVs. However, while the intermediate categories exhibit a constant fraction for newer channels, we observe that, for older channels, the intermediate categories increased since January 2021. This indicates that a substantial number of the older channels which started posting Shorts continued to create RVs as well.
|
| 118 |
+
|
| 119 |
+
# 3.3 Evolution of Posting Frequency
|
| 120 |
+
|
| 121 |
+
Having observed that channels created an increasing number of Shorts, collectively and individually, we analyze the impact it had on the production of RVs. Again, separating channels between older and newer channels, for each channel $c$ and each week $w$ , we compute $n_{cw}^{\text{regular}}$ (resp. $n_{cw}^{\text{Shorts}}$ ), the number of RVs (resp. Shorts) uploaded that week by that channel. We divide each $n_{cw}^{*}$ by $n_c$ , the total number of videos posted by channel $c$ , and obtain $f_{cw}^{*}$ , the normalized frequency of posting for each channel. We finally compute
|
| 122 |
+
|
| 123 |
+

|
| 124 |
+
Figure 3: Analysis of channels' posting activity from January 2021 to December 2022. Channels were divided into groups based on the percentage of Shorts in the videos uploaded each week. The evolution of the fractions of channels in each group is shown.
|
| 125 |
+
|
| 126 |
+
$f_{w}^{*}$ , by averaging the normalized frequencies across all channels for each week, shown in Figure 4. This approach enables us to discern the weeks during which channels collectively uploaded more or fewer videos of each type, each channel contributing equally to the outcome, regardless of their total uploads count.
|
| 127 |
+
|
| 128 |
+
We observe that both newer and older channels progressively reduced the frequency of RVs uploads over time. However, while the frequency of Shorts uploads increased for older channels, it decreased for newer channels. This is surprising at first, considering that some of the channels were created after and therefore their highest posting frequency should increase the value of the average frequency on later weeks. One possible explanation is that many channels which appeared around Shorts introduction were created in order to try the Shorts format, but lots of them stopped posting videos after a few weeks (out of the 445 channels created between 18 March 2021 and 25 March 2021, $4\%$ had posted half of their videos a week after their creation and $9\%$ had posted half of their videos a month after their creation) and therefore have a high normalized frequency of posting during their first few weeks of activity.
|
| 129 |
+
|
| 130 |
+
# 3.4 Evolution of Weekly Content Volume
|
| 131 |
+
|
| 132 |
+
To complement the evolution of content uploading, we also analyzed the sum of the durations of the videos uploaded each week, called the weekly content volume. This allows us to get a sense of the amount of Shorts and RV content produced each week. As before, we separate our results between older and newer channels. Following the same logic as for the normalized frequency of posting, for each channel $c$ and each week $w$ , we compute $d_{cw}^{regular}$ and $d_{cw}^{Shorts}$ ,
|
| 133 |
+
|
| 134 |
+

|
| 135 |
+
Figure 4: Evolution of normalized uploads frequency. Average weekly uploads of Shorts and RVs of each channel normalized by the total number videos (of both types) posted by that channel.
|
| 136 |
+
|
| 137 |
+
respectively the RVs' content volume and the Shorts' content volume uploaded that week by that channel. For $* \in \{ \text{regular, Shorts} \}$ , we divide each $d_{cw}^{*}$ by $d_{c}$ , the total content volume of channel $c$ , and obtain $v_{cw}^{*}$ , the normalized weekly content volume for each channel. The resulting quantities, shown in Figure 5, allow us to see on which week did channels invest the more time on average. Similarly to posting frequencies, newer channels had a peak of content creation around the introduction of Shorts and rapidly decreased from there, for both Shorts and RVs.
|
| 138 |
+
|
| 139 |
+
As for older channels, since the introduction of Shorts, on average channels have increased their amount of Shorts content until reaching a plateau around June 2021, but the amount of RVs content is declining.
|
| 140 |
+
|
| 141 |
+
# 4 CONTENT ANALYSIS
|
| 142 |
+
|
| 143 |
+
Our content analysis relies on video categories. The category is unique to a video and is either chosen by the creator or assigned by YouTube. Public videos can be assigned 15 categories, the others being movie genres for paid content.
|
| 144 |
+
|
| 145 |
+
We first examine the distribution of categories for Shorts and RVs. Table 1 shows the categories, and the number of videos we collected for each category. People & Blogs being the default category it is overrepresented. In the second place, we find the Entertainment category. Nonprofits & Activism and Pets & Animals were the categories for which we collected the fewest videos.
|
| 146 |
+
|
| 147 |
+
In general, the fraction of Shorts differs widely between categories. Categories with the highest fractions of Shorts are Comedy and People & Blogs whereas News & Politics and Nonprofits & Activism exhibit the lowest fractions. This observation may suggest that Shorts are predominantly used for generating lighthearted content, while RVs are the preferred format for delivering more serious
|
| 148 |
+
|
| 149 |
+

|
| 150 |
+
Figure 5: Evolution of normalized weekly content volume. Average over all channels of the weekly content volume (sum of the durations of each video posted that week), separated between Shorts and RVs, normalized by the combined weekly content volume of both types.
|
| 151 |
+
|
| 152 |
+
<table><tr><td>YouTube categories</td><td>Collected videos count</td><td>% of Shorts</td></tr><tr><td>People & Blogs</td><td>5.7M</td><td>74.3</td></tr><tr><td>Entertainment</td><td>3.2M</td><td>55.5</td></tr><tr><td>Howto & Style</td><td>1.9M</td><td>57.3</td></tr><tr><td>Education</td><td>1.8M</td><td>43.8</td></tr><tr><td>Gaming</td><td>1.0M</td><td>54.4</td></tr><tr><td>News & Politics</td><td>766.4k</td><td>14.4</td></tr><tr><td>Sports</td><td>544.8k</td><td>39.3</td></tr><tr><td>Comedy</td><td>385.3k</td><td>76.5</td></tr><tr><td>Science & Technology</td><td>340.3k</td><td>51.2</td></tr><tr><td>Music</td><td>333.0k</td><td>63.2</td></tr><tr><td>Film & Animation</td><td>288.9k</td><td>57.3</td></tr><tr><td>Travel & Events</td><td>208.0k</td><td>57.3</td></tr><tr><td>Autos & Vehicles</td><td>200.3k</td><td>57.8</td></tr><tr><td>Pets & Animals</td><td>118.4k</td><td>68.5</td></tr><tr><td>Nonprofits & Activism</td><td>42.5k</td><td>34.1</td></tr></table>
|
| 153 |
+
|
| 154 |
+
Table 1: YouTube categories and the corresponding number of videos from our dataset. The percentage of Shorts out of all the videos collected in each category is also shown.
|
| 155 |
+
|
| 156 |
+
information. People & Blogs having the largest number of Shorts could imply that creators do not specify the category when posting Shorts as often as they do when posting RVs, or that YouTube takes longer to classify them into the relevant categories.
|
| 157 |
+
|
| 158 |
+
We then observe the evolution of the categories over our two years period. We selected the nine most common categories attributed to the videos in our dataset and labelled the rest of them under "Other". Next, for each category, we compute the percentage of videos to which the category was attributed out of all the videos
|
| 159 |
+
|
| 160 |
+

|
| 161 |
+
Figure 6: Evolution of the percentage of categories attributed to Shorts and RVs, showing the changes of categories popularity among creators over our time period.
|
| 162 |
+
|
| 163 |
+

|
| 164 |
+
|
| 165 |
+

|
| 166 |
+
|
| 167 |
+
posted in a given week, and repeat that for every week of our time period. Results are shown in Figure 6. We see that Shorts videos are consistently dominated by the People & Blogs category, although it is slowly declining in favor of the Entertainment category.
|
| 168 |
+
|
| 169 |
+
Categories of RVs are way more diverse and evenly distributed, and we can observe large trends of categories rising, declining or maintaining a constant percentage. The People & Blogs and Entertainment categories are also the most commonly attributed, but not as dramatically above as for Shorts. We notice that the Education category and the Science & Technology category maintained a constant percentage of uploads, and that the News & Politics category steadily increased after the beginning of 2022.
|
| 170 |
+
|
| 171 |
+
# 5 ENGAGEMENT ANALYSIS
|
| 172 |
+
|
| 173 |
+
In this section, we examine the evolution of the number of views, likes and comments, collectively referred to as the engagement metrics, received by Shorts and RVs. Our caveat is that we only have access to the engagement metrics as of the query time, which limits the analysis of the metrics evolution. However, most videos experience their peak of attention a few days after their release [6], nine months passed between the latest video's publication date (December 31, 2022) and the collection of engagement statistics (September 1, 2023). Therefore, besides a few exceptions where videos become viral a long time after their publication date, engagement metrics should not drastically fluctuate, and rather grow at a seemingly constant rate. This allows us to draw comparative results between the popularity of Shorts and RVs.
|
| 174 |
+
|
| 175 |
+
# 5.1 Engagement at the Video Level
|
| 176 |
+
|
| 177 |
+
We first analyzed the engagement at the video level, aggregating the results without considering channels or categories. Coherently with previous work [1], we found that $1\%$ of the Shorts (resp. RVs) attracted $63\%$ (resp. $61\%$ ) of the Shorts (resp. RVs) views. We also computed that views and likes are highly correlated, with a Pearson correlation coefficient (PCC) of 0.848, but that the number of comments is not necessarily correlated to the others (with a PCC of 0.273 with views and a PCC of 0.360 with likes).
|
| 178 |
+
|
| 179 |
+

|
| 180 |
+
Figure 7: Evolution of engagement metrics for Shorts and RVs, including the mean views, median views, mean of the likes count divided by views count and mean of the comments count divided by views count. The gray line indicates Shorts introduction.
|
| 181 |
+
|
| 182 |
+
In Figure 7, we present an overview of the engagement metrics evolution. Specifically, we tracked the mean number of views, the median number of views, the mean likes per view and the mean comments per view.
|
| 183 |
+
|
| 184 |
+
We first observe that, over the two years, Shorts received four times as many views as RVs, and by the end of 2022, this difference had increased to six times. This is not surprising given that each user may watch far more Shorts than long videos in the same amount of time. We also note that mean views of RVs declined slowly and consistently with time, whereas mean views of Shorts
|
| 185 |
+
|
| 186 |
+
fluctuated around its introduction and then mostly increased from there, before slightly declining near the end of 2022.
|
| 187 |
+
|
| 188 |
+
As previously mentioned, the vast majority of views are harvested by a handful of videos. The extreme engagement values obtained by the top $1\%$ of videos skew the means towards higher values and prevent from grasping the engagement evolution of more low-ranking videos which constitute the majority of videos. Looking at the median allows to better understand the dynamics of the $99\%$ majority. Since the introduction of Shorts in March 2021 and until the end of the year, we observe a consistent and similar increase in the median number of views, with a slope of increase of $11.9 (r^2 = 0.74)$ for RVs and a slope of increase of $13.5 (r^2 = 0.81)$ for Shorts. But while RVs reached a plateau around March 2022, the median number of views that Shorts attracted increased drastically during 2022, with a slope of increase of $53.0 (r^2 = 0.78)$ . This shows that even less popular Shorts still obtain a substantial number of views which is not the case of RVs. The Shorts format would then allow not yet popular creators to reach a wider audience than RVs.
|
| 189 |
+
|
| 190 |
+
Regarding the other engagement metrics, we see that around Shorts introduction, Shorts and RVs have the same likes per view rate, but as from August 2021, Shorts started to convert views into likes more effectively than RVs and by the end of 2022, Shorts' likes per view rate was 1.4 times higher than RVs' likes per view rate. Comments per view, a more active form of engagement [6], have a higher rate for RVs than Shorts, but the gap seems to be narrowing over time.
|
| 191 |
+
|
| 192 |
+
# 5.2 Engagement at the Channel Level
|
| 193 |
+
|
| 194 |
+
We established that Shorts are generally more viewed and liked than RVs. We now explore if that is the case for videos originating from the same channel and if the trends that were observed globally also apply on a channel basis. We first split our channels between the $1\%$ with the most subscribers (referred to as top 1 channels) and the rest (referred to as bottom 99 channels). The top $1\%$ channels views accounted for $46\%$ of the total views.
|
| 195 |
+
|
| 196 |
+
We divide our two-year period into four semesters referred to as "2021-S1" for the $1^{\text{st}}$ semester of 2021, "2021-S2" for the $2^{\text{nd}}$ semester of 2021, and so on. We then compute the mean views per channel and per semester, distinguishing between Shorts and RVs. This process is repeated for each semester, including only channels that posted both Shorts and RVs that semester. The ratio of mean views for Shorts to RVs is computed for each channel, and the average ratio is obtained by averaging across all eligible channels for each semester. This yields the evolution of the average ratio between Shorts' and RVs' views, on a channel basis, and a closer intuition to the difference between Shorts and RVs engagement that creators can expect for their channel. Results are shown in Figure 8.
|
| 197 |
+
|
| 198 |
+
For both popularity classes, Shorts consistently draw significantly more views than RVs from the same channel $-80$ times more for top 1 channels and 111 times more for bottom 99 channels, on average over the two-year period. However, while the ratio between Shorts' and RVs' views increases for top 1 channels, it declines for bottom 99 channels. Nonetheless, both groups can still reach a broader audience using Shorts than RVs.
|
| 199 |
+
|
| 200 |
+

|
| 201 |
+
Figure 8: Evolution of the average ratio between channels' Shorts' and RVs' views, divided between the top $1\%$ of channels with regards to subscribers count and the rest.
|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
|
| 205 |
+

|
| 206 |
+
Figure 9: Mean number of views and median number of views for Shorts and RVs of different durations.
|
| 207 |
+
|
| 208 |
+

|
| 209 |
+
|
| 210 |
+
# 5.3 Engagement Based on Duration
|
| 211 |
+
|
| 212 |
+
We previously compared the mean and median views of Shorts and RVs but, while Shorts are restricted to 60 seconds, RVs display a wide range of durations, each format requiring a different engagement from viewers and creators. We classified the RVs based on their duration into the following time intervals: less than 1 minute, 1 to 5 minutes, 5 to 10 minutes, 10 to 30 minutes, 30 minutes to 1 hour, and longer than 1 hour. For each group we computed the mean and the median number of views. Results are shown in Figure 9. The means of the views confirm previous results on the superiority of Shorts on RVs in terms of engagement, although at various degree, the least popular group being the 1-5 minutes group and the most popular being the under 1 minute group, followed by the 10-30 minutes group. The popularity of the later is confirmed when looking at the medians, where it appears that half of the videos between 10-30 minutes obtained around 5'800 views or more which is way above the other groups and three times as much as the median views for Shorts. This is quite surprising to observe that, median-wise, longer RVs are surpassing Shorts in attracting views.
|
| 213 |
+
|
| 214 |
+
<table><tr><td rowspan="2">category</td><td colspan="2">2021</td><td colspan="2">2022</td></tr><tr><td>S1</td><td>S2</td><td>S1</td><td>S2</td></tr><tr><td>Autos & Vehicles</td><td>1.42</td><td>2.27</td><td>2.49</td><td>2.20</td></tr><tr><td>Comedy</td><td>7.28</td><td>4.61</td><td>2.77</td><td>3.84</td></tr><tr><td>Education</td><td>5.28</td><td>3.28</td><td>3.88</td><td>4.26</td></tr><tr><td>Entertainment</td><td>6.39</td><td>4.33</td><td>4.53</td><td>5.70</td></tr><tr><td>Film & Animation</td><td>1.53</td><td>1.20</td><td>1.88</td><td>2.41</td></tr><tr><td>Gaming</td><td>2.16</td><td>2.45</td><td>2.38</td><td>3.97</td></tr><tr><td>Howto & Style</td><td>2.93</td><td>3.55</td><td>4.23</td><td>4.68</td></tr><tr><td>Music</td><td>0.42</td><td>0.46</td><td>0.79</td><td>1.10</td></tr><tr><td>News & Politics</td><td>0.72</td><td>4.63</td><td>3.20</td><td>2.80</td></tr><tr><td>Nonprofits & Activism</td><td>1.18</td><td>4.39</td><td>8.59</td><td>12.88</td></tr><tr><td>People & Blogs</td><td>5.64</td><td>4.32</td><td>5.59</td><td>7.05</td></tr><tr><td>Pets & Animals</td><td>2.47</td><td>2.84</td><td>4.73</td><td>8.16</td></tr><tr><td>Science & Technology</td><td>2.30</td><td>2.37</td><td>3.20</td><td>4.15</td></tr><tr><td>Sports</td><td>3.55</td><td>3.46</td><td>5.96</td><td>6.59</td></tr><tr><td>Travel & Events</td><td>2.51</td><td>2.31</td><td>4.30</td><td>7.03</td></tr><tr><td>All</td><td>4.19</td><td>3.40</td><td>4.04</td><td>5.2</td></tr></table>
|
| 215 |
+
|
| 216 |
+
Table 2: Evolution of the ratio between the mean number of views per Short and the mean number of views per RV, categorized by different YouTube categories, over the two years' semesters. The overall ratio trend for all categories combined is given. Values exceeding the overall ratio for each semester and category are highlighted in bold.
|
| 217 |
+
|
| 218 |
+
# 5.4 Engagement Based on Categories
|
| 219 |
+
|
| 220 |
+
Finally, we compare the levels of engagement generated by Shorts and RVs within the different categories, in order to see if, for some categories, RVs attracted more views than Shorts. For each semester and each category we compute the ratio between the mean number of views per Short and the mean number of views per RV. We also compute this ratio for all categories combined, as a reference. Results are given in Table 2.
|
| 221 |
+
|
| 222 |
+
We observe that the popularity of Shorts varies between categories and time periods. It appears that for some categories, such as Music, Film & Animation, Gaming, and, to a lesser extent, Science & Technology, users prefer to watch RVs than Shorts. Some categories, such as Comedy and Education, initially generated a high engagement for Shorts before slowly loosing this advantage at the relative benefit of RVs. Conversely, for Nonprofits & Activism, which initially generated a limited engagement for Shorts, there was a progressive increase, eventually reaching 12 times more views for Shorts than RVs by the second semester of 2022. Finally, the Entertainment and the People & Blogs categories exhibit a ratio systematically high and above the reference ratio.
|
| 223 |
+
|
| 224 |
+
# 6 DISCUSSION
|
| 225 |
+
|
| 226 |
+
Our analysis sheds light on the significant impact of Shorts on the content created and consumed on the YouTube platform.
|
| 227 |
+
|
| 228 |
+
$RQ1$ . We focus on channels that have uploaded at least one Short to see how trying this new content affected their overall behavior. For these channels, Shorts production has grown impressively, and eventually surpassed RVs production. A notable trend is that a
|
| 229 |
+
|
| 230 |
+
large proportion of channels created after March 2021 posted only Shorts from the beginning, implying that new channels were mostly created with the goal of uploading Shorts. Older channels, while initially more inclined to upload only RVs, also gradually turned to Shorts. Both older and newer channels reduced their production of RVs, and older channels persistently maintained high uploads of Shorts, further indicating that Shorts are well implanted in the YouTube landscape, and that channels have a confirmed interest in uploading Shorts, beyond the first curiosity. This growth in Shorts production may also be caused by the platform's efforts to popularize Shorts, for example by providing incentives for creators, with a new type of monetization [13], and frequent updates [14].
|
| 231 |
+
|
| 232 |
+
$RQ$ 2. Regarding the types of content produced, our analysis revealed that the distributions of Shorts and RVs vary widely across YouTube categories. Shorts are primarily employed for creating entertaining content, while RVs remain the preferred format for conveying more serious information, for example on politics or social activism issues. Furthermore, we observe a synchronicity between creators and viewers, as the same entertainment-related categories exhibit a ratio of Shorts views to RV views consistently higher than the reference ratio which includes all the categories. Viewers are mainly consuming Shorts for entertainment purposes and creators may have understood that from the beginning.
|
| 233 |
+
|
| 234 |
+
On the other hand, education-related ratio of Shorts views to RV views remains consistently below the reference ratio, indicating that, while users consume Shorts in this category, they stayed faithful to RVs for learning new things. Similarly, the exceptionally low ratio of the art-related categories suggests that users are willing to consume longer forms of content when it is videos in which artists invested time and energy. One surprising exception was the impressive engagement generated by Shorts in the Nonprofits & Activism category, despite the low percentage of Shorts in this category. Creators uploading videos in this category might benefit from using Shorts to reach a wider audience about social issues.
|
| 235 |
+
|
| 236 |
+
$RQ 3$ . Analyzing engagement metrics showed that Shorts are particularly effective at capturing the attention and engagement of viewers. This advantage is even more striking when we consider videos from the same channel, in which Shorts attract 110 times more views than their RV counterparts. Moreover, the gap in views between Shorts and RVs is progressively growing, both due to Shorts being increasingly watched and RVs' views declining. One nuance to this observation is that there might be a different delay between Shorts and RVs upload and their consumption by viewers. A two-year period separates the first and last videos of our dataset. Usually, after the peak of attention following their upload, videos continue to slowly accumulate views [6]. This would partly explains why RVs posted during December 2022 have fewer views than videos from January 2021. However, newer Shorts have as many views as older Shorts, suggesting that old Shorts are rarely shown to users.
|
| 237 |
+
|
| 238 |
+
RVs are currently having a higher comments per view rate than Shorts. Users watching RVs spend more time on the same video allowing them to engage more into comments than viewers of Shorts, rapidly swiping to the next video. Additionally, some categories prone to generate more debate and comments, like News & Politics, are less covered by Shorts.
|
| 239 |
+
|
| 240 |
+
On YouTube, a few videos collect the vast majority of views, and mean values are not representative of the majority of videos. Hence, to explore whether less popular channels also benefit from Shorts, we examine the median views evolution. We observe an increase of the median number of views for Shorts but also for RVs, which is contrary to the mean evolution of RVs views. The majority of Shorts are increasingly viewed but so are the majority of RVs.
|
| 241 |
+
|
| 242 |
+
The evolution of the ratio between Shorts and RV views from the same channel confirms this particular dynamic. For exceptionally popular channels, Shorts are increasingly getting more views than RVs but for the remaining majority of channels the trend is the opposite. Different reasons could explain this observation. The first one comes from the decreasing frequency of RV uploads. As most channels tend to upload fewer RVs, those that are uploaded attract more views, benefiting from some scarcity effect. The second one relates to the successive updates of YouTube Shorts delivery system. Initially accessible only from a tab on the YouTube home page, without the user being able to choose, Shorts are now recommended on the home page with thumbnails, or suggested in the "watch next" banner of RVs. These different entry points allow users to click on Shorts they want to watch out of a few suggestions, which might favor popular Shorts.
|
| 243 |
+
|
| 244 |
+
We will end this discussion with an additional nuance on the Shorts superiority over RVs. In most of our analysis we made the binary distinction between Shorts and RVs. However, RVs can widely vary in duration, and videos of 5-10 minutes constitute a type of content arguably very different than 1 hour long video essays. On average, Shorts dominate RVs of all duration groups, but at different degrees. Moreover, the analysis of the medians challenges Shorts superiority. Indeed, half of the videos in the 10-30 minutes group garnered at least 5,800 views, contrasting with a median of 1,986 views for Shorts. This duration range also ranks as the third most viewed, with a mean number of views surpassing 160k, confirming its sustained popularity among YouTube video consumers.
|
| 245 |
+
|
| 246 |
+
Ethical Considerations. As our research solely relies on publicly available data and does not involve interactions with human participants, it does not classify as human subjects research. We follow common ethical standards; we do not attempt to de-anonymize users, we do not disclose any personal information, and our study does not report any offensive content.
|
| 247 |
+
|
| 248 |
+
# 7 RELATED WORK
|
| 249 |
+
|
| 250 |
+
Our research delves into the dynamic shifts in content creation and user behavior resulting from evolving platform policies, with a special focus on YouTube Shorts, which is a popular instance of short-form video concept. We now provide a brief survey on existing research related to short-form videos and the effect of platform policies on user behavior.
|
| 251 |
+
|
| 252 |
+
# 7.1 Short-Form Videos
|
| 253 |
+
|
| 254 |
+
YouTube is the primary video-oriented social media platform. Past studies on YouTube focused on its role on propagating disinformation [15, 16, 25], on video popularity [4, 34], on attention dynamics [20], and on user interactions [35]. Nonetheless, it is unclear if the results of these analyses focusing on regular video content hold
|
| 255 |
+
|
| 256 |
+
for short-form videos as the way people interact with these two types of videos are different.
|
| 257 |
+
|
| 258 |
+
Short-form videos are currently understudied and large-scale analyses of such data are relatively uncommon. This is first because the short-form video concept has gained widespread popularity and became the primary feature of social media platforms only recently, e.g., YouTube introduced Shorts and their section in 2021. Second, the platforms have been restrictive in their data-sharing policies. YouTube announced its research API in 2022, TikTok announced it in 2023, and Instagram still does not allow large-scale analysis of Reels. As such, our work is one of the few that analyze short-form videos and the first to analyze YouTube Shorts to the best of our knowledge. We now survey the existing works on short-form videos, which mostly rely on TikTok as data source.
|
| 259 |
+
|
| 260 |
+
Past analyses of TikTok mainly focused on its trending and recommendation algorithm. Klug et al. studied the characteristics of trending videos on TikTok and found that they have high video engagements and are more likely to be posted at certain times [17]. They also report that using trending hashtags do not necessarily contribute to a video being trending. Simpson et al. investigated the impact of TikTok's algorithm on $\mathrm{LGBTQ+}$ users and highlighted algorithmic exclusion and resilience in identity work [28]. Boeker and Urman studied TikTok's recommendation algorithm and reported that it is influenced by users' location, language, and engagement with the content [3]. Lee et al. employed a qualitative method in which they interview TikTok users to understand how they perceive and interact with TikTok's algorithms [19]. Other studies focused on how the platform shapes political communication by analyzing users' sentiment against the politicians [41], hyperpartisan activity [23], and the platforms' policies against misinformation [21].
|
| 261 |
+
|
| 262 |
+
# 7.2 Impact of Platform Policies on Users
|
| 263 |
+
|
| 264 |
+
Popular social media platforms introduce new policies from time to time. This may be due to external actors (e.g., governments) enforcing platforms to adopt new policies such as censorship [10], privacy policies [22], and moderate hate speech [18] and disinformation [26]. In other cases, platforms themselves may introduce changes to the platforms to enhance user experience. In all cases, such changes encourage users to change their behavior and adapt, which eventually shapes public communication. Such behavioral change may manifest itself in user content. For instance, users tend to use more abbreviations and contracted forms when they are constrained by the length of their content, but they also tend to create content with better quality [12]. They may also game platforms' policies to manipulate social media, such as purchasing popular accounts instead of growing new accounts [11] or maintaining backup accounts to recover from platform suspensions [24]. Other actors such as researchers may also be affected by platforms' data collection policies such as denying access to removed content [9] or limiting access to the API [5]. In this work, we study the impact of the introduction of Shorts on channel behavior by comparing the channels' regular videos and Shorts, which have not been studied to date.
|
| 265 |
+
|
| 266 |
+
# 8 CONCLUSION
|
| 267 |
+
|
| 268 |
+
In conclusion, this article sheds light on the substantial impact of Shorts on the YouTube platform. We showed that channels which took an interest in Shorts, and uploaded at least one Short, largely adopted the format, eventually surpassing RV uploads. We also showed that, while the collective amount of RV uploads stayed mostly constant over time, when looking at individual behaviors, most channels reduced their production of RVs, while increasing and then maintaining a constant frequency of Short uploads.
|
| 269 |
+
|
| 270 |
+
We also observed that Shorts and RVs are not evenly distributed between content categories. Shorts mainly belong to lighthearted, entertainment categories, while RVs touch more diverse content, including news, politics, and education. This disparity in content production is reflected in content consumption. Indeed, the supremacy of Shorts in terms of views is less striking for entertainment-related categories than for the others. In art-related categories, Shorts barely attracted more views than RVs.
|
| 271 |
+
|
| 272 |
+
Finally, we showed that Shorts progressively generated more views per video until getting five times more views on average than RVs, by the end of 2022. When looking at videos from the same channel, Shorts generated 110 times more views than their RVs counterparts. This effect is more pronounced for popular channels than for the rest.
|
| 273 |
+
|
| 274 |
+
Limitations & Future Work. We acknowledge that our findings are not free from limitations. As mentioned in the data collection, our dataset is biased towards the keywords we selected to collect our data. Additionally, we focused on channels who took an interest in Shorts and posted at least one Short. Opening the lens of focus to all types of channels to analyze the prevalence of channels that adopted Shorts, and overall channels behavior would contribute to ground this work into the current YouTube landscape. As such, further investigations comparing channels that adopted Shorts, and channels that did not, would provide interesting and complementary insights to this study. During our analysis of the engagement metrics we did not use comments' text. By collecting comments, we could observe the adaptation of the (commenting) user-base by analyzing the evolution of the network of commenters of a channel and the nature of the comments themselves.
|
| 275 |
+
|
| 276 |
+
# ACKNOWLEDGMENTS
|
| 277 |
+
|
| 278 |
+
We thank Kévin Huguenin for his valuable insights and feedback on an earlier version of this paper.
|
| 279 |
+
|
| 280 |
+
# REFERENCES
|
| 281 |
+
|
| 282 |
+
[1] Jane Arthurs, Sophia Drakopoulou, and Alessandro Gandini. 2018. Researching YouTube. Convergence 24, 1 (Feb. 2018), 3-15. https://doi.org/10.1177/1354856517737222
|
| 283 |
+
[2] Xiang Bi and Cunchen Tang. 2020. Research on the Motives Affecting the Behavior of Short Video's Creators. IEEE Access 8 (2020), 188415-188428. https://doi.org/10.1109/ACCESS.2020.3028392
|
| 284 |
+
[3] Maximilian Boeker and Aleksandra Urman. 2022. An empirical investigation of personalization factors on tiktok. In Proceedings of the ACM Web Conference 2022. 2298-2309.
|
| 285 |
+
[4] Youmna Borghol, Siddharth Mitra, Sebastien Ardon, Niklas Carlsson, Derek Eager, and Anirban Mahanti. 2011. Characterizing and modelling popularity of user-generated videos. In Performance Evaluation.
|
| 286 |
+
[5] Joshua Braun. 2023. Journalism, Media Research, and Mastodon: Notes on the Future. Digital Journalism (2023), 1-8.
|
| 287 |
+
|
| 288 |
+
[6] M. Cha, H. Kwak, P. Rodriguez, Y. Ahn, and S. Moon. 2009. Analyzing the Video Popularity Characteristics of Large-Scale User Generated Content Systems. In IEEE/ACM Transactions on Networking.
|
| 289 |
+
[7] Francesco Chiossi, Luke Haliburton, Changkun Ou, Andreas Martin Butz, and Albrecht Schmidt. 2023. Short-Form Videos Degrade Our Capacity to Retain Intentions: Effect of Context Switching On Prospective Memory. In Proceedings of the 2023 CHI Conference on Human Factors in Computing Systems (CHI '23). Association for Computing Machinery, New York, NY, USA, 1-15. https://doi.org/10.1145/3544548.3580778
|
| 290 |
+
[8] Brian Dean. 2023. TikTok User Statistics. https://backlinko.com/tiktok-users.
|
| 291 |
+
[9] Tugrulcan Elmas. 2023. The Impact of Data Persistence Bias on Social Media Studies. In Proceedings of the 15th ACM Web Science Conference 2023. 196-207.
|
| 292 |
+
[10] Tugrulcan Elmas, Rebekah Overdorf, and Karl Aberer. 2021. A Dataset of State-Censored Tweets.. In ICWSM. 1009-1015.
|
| 293 |
+
[11] Tugrulcan Elmas, Rebekah Overdorf, and Karl Aberer. 2023. Misleading repurposing on twitter. In Proceedings of the International AAAAI Conference on Web and Social Media, Vol. 17. 209-220.
|
| 294 |
+
[12] Kristina Gligoric, Ashton Anderson, and Robert West. 2018. How constraints affect content: The case of Twitter's switch from 140 to 280 characters. In Proceedings of the International AAAI Conference on Web and Social Media, Vol. 12.
|
| 295 |
+
[13] James Hale. 2021. YouTube Shorts Launches $100 Million Fund To Pay Creators Of Top Videos. https://www.tubefilter.com/2021/05/11/youtube-shorts-100-million-creator-fund/.
|
| 296 |
+
[14] YouTube Help. 2021. New Features and Updates for Shorts Viewers & Creators. https://support.google.com/youtube/thread/139221507.
|
| 297 |
+
[15] Muhammad Nihal Hussain, Seripil Tokdemir, Nitin Agarwal, and Samer Al-Khateeb. 2018. Analyzing disinformation and crowd manipulation tactics on YouTube. In 2018 IEEE/ACM International Conference on Advances in Social Networks Analysis and Mining (ASONAM). IEEE, 1092-1095.
|
| 298 |
+
[16] Eslam Hussein, Prerna Juneja, and Tanushree Mitra. 2020. Measuring Misinformation in Video Search Platforms: An Audit Study on YouTube. In Proceedings of the ACM on Human-Computer Interaction.
|
| 299 |
+
[17] Daniel Klug, Yiluo Qin, Morgan Evans, and Geoff Kaufman. 2021. Trick and please. A mixed-method study on user assumptions about the TikTok algorithm. In Proceedings of the 13th ACM Web Science Conference 2021. 84-92.
|
| 300 |
+
[18] Anastasia Kozyreva, Stefan M Herzog, Stephan Lewandowsky, Ralph Hertwig, Philipp Lorenz-Spreen, Mark Leiser, and Jason Reifler. 2023. Resolving content moderation dilemmas between free speech and harmful misinformation. Proceedings of the National Academy of Sciences 120, 7 (2023), e2210666120.
|
| 301 |
+
[19] Angela Y Lee, Hannah Mieczkowski, Nicole B Ellison, and Jeffrey T Hancock. 2022. The algorithmic crystal: Conceptualizing the self through algorithmic personalization on TikTok. Proceedings of the ACM on Human-computer Interaction 6, CSCW2 (2022), 1-22.
|
| 302 |
+
[20] JooYoung Lee, Siqi Wu, Ali Mert Ertugrul, Yu-Ru Lin, and Lexing Xie. 2022. Whose Advantage? Measuring Attention Dynamics across YouTube and Twitter on Controversial Topics. 16 (2022), 573-583. https://doi.org/10.1609/icwsm.v16i1.19316
|
| 303 |
+
[21] Chen Ling, Krishna P Gummadi, and Savvas Zannettou. 2023. " Learn the Facts About COVID-19": Analyzing the Use of Warning Labels on TikTok Videos. In Proceedings of the International AAAI Conference on Web and Social Media, Vol. 17. 554-565.
|
| 304 |
+
[22] Shuang Liu, Baiyang Zhao, Renjie Guo, Guozhu Meng, Fan Zhang, and Meishan Zhang. 2021. Have you been properly notified? automatic compliance analysis of privacy policy text with GDPR article 13. In Proceedings of the Web Conference 2021. 2154-2164.
|
| 305 |
+
[23] Juan Carlos Medina Serrano, Orestis Papakyriakopoulos, and Simon Hegelich. 2020. Dancing to the partisan beat: A first analysis of political communication on TikTok. In Proceedings of the 12th ACM Conference on Web Science, 257-266.
|
| 306 |
+
[24] Maya Merhi, Sarah Rajtmajer, and Dongwon Lee. 2023. Information operations in turkey: Manufacturing resilience with free twitter accounts. In Proceedings of the International AAAI Conference on Web and Social Media, Vol. 17, 638-649.
|
| 307 |
+
[25] Kostantinos Papadamou, Savvas Zannettou, Jeremy Blackburn, Emiliano De Cristofaro, Gianluca Stringhini, and Michael Sirivianos. 2022. "It Is Just a Flu": Assessing the Effect of Watch History on YouTube's Pseudoscientific Video Recommendations. 16 (2022), 723-734. https://ojs.aaii.org/index.php/ICWSM/article/view/19329
|
| 308 |
+
[26] Orestis Papakyriakopoulos and Ellen Goodman. 2022. The impact of Twitter labels on misinformation spread and user engagement: Lessons from Trump's election tweets. In Proceedings of the ACM web conference 2022. 2541-2551.
|
| 309 |
+
[27] Manoel Horta Ribeiro and Robert West. 2021. YouNiverse: Large-Scale Channel and Video Metadata from English-Speaking YouTube. In ICWSM. 1016-1024.
|
| 310 |
+
[28] Ellen Simpson and Bryan Semaan. 2021. For You, or For "You"? Everyday LGBTQ+ Encounters with TikTok. Proceedings of the ACM on human-computer interaction 4, CSCW3 (2021), 1-34.
|
| 311 |
+
[29] Matt G. Southern. 2022. YouTube Shorts Hits 30 Billion Views Per Day. https://www.searchenginejournal.com/youtube-shorts-hits-30-billion-views-per-day/447785/.
|
| 312 |
+
|
| 313 |
+
[30] Conghui Su, Hui Zhou, Liangyu Gong, Binyu Teng, Fengji Geng, and Yuzheng Hu. 2021. Viewing personalized video clips recommended by TikTok activates default mode network and ventral tegmental area. NeuroImage 237 (2021), 118136.
|
| 314 |
+
[31] Todd Sherman. 2021. Bringing YouTube Shorts to the U.S. https://blog.youtube/news-and-events/youtube-shorts-united-states/.
|
| 315 |
+
[32] Victor Potrel. 2022. Five Insights Into The Popularity Of Short-Form Video Content. https://www.forbes.com/sites/forbescommunicationscouncil/2022/09/06/five-insights-into-the-popularity-of-short-form-video-content/.
|
| 316 |
+
[33] Kexin Wang and Sebastian Scherr. 2022. Dance the Night Away: How Automatic TikTok Use Creates Pre-Sleep Cognitive Arousal and Daytime Fatigue. Mobile Media & Communication 10, 2 (May 2022), 316-336. https://doi.org/10.1177/20501579211056116
|
| 317 |
+
[34] Mirjam Wattenhofer, Roger Wattenhofer, and Zack Zhu. 2012. The YouTube Social Network. https://ods(aaai.org/index.php/ICWSM/article/view/14243.6.1 (2012), 354-361. Number: 1.
|
| 318 |
+
[35] Siqi Wu and Paul Resnick. 2021. Cross-Partisan Discussions on YouTube: Conservatives Talk to Liberals but Liberals Don't Talk to Conservatives. In Proceedings of the Fifteenth International AAAI Conference on Web and Social Media, ICWSM 2021, held virtually, June 7-10, 2021, Ceren Budak, Meeyoung
|
| 319 |
+
|
| 320 |
+
Cha, Daniele Quercia, and Lexing Xie (Eds.). AAAI Press, 808-819. https://ojs.aaii.org/index.php/ICWSM/article/view/18105
|
| 321 |
+
[36] YouTube Developers. 2022. YouTube Data API | Google for Developers. https://developers.google.com/youtube/v3/.
|
| 322 |
+
[37] YouTube Help. 2023. Create Shorts. https://support.google.com/youtube/topic/10343432.
|
| 323 |
+
[38] YouTube Help. 2023. Upload Videos. https://support.google.com/youtube/topic/16547.
|
| 324 |
+
[39] YouTube Research. 2022. Program Terms & Conditions. https://research.youtube/policies/terms/.
|
| 325 |
+
[40] YouTube Research. 2022. YouTube Researcher Program. https://research.youtube/.
|
| 326 |
+
[41] Jing Zeng and Crystal Abidin. 2021. # OkBoomer, time to meet the Zoomers: Studying the memefication of intergenerational politics on TikTok. Information, Communication & Society 24, 16 (2021), 2459-2481.
|
| 327 |
+
[42] Cevin Zhang, Hemingxi Zheng, and Qing Wang. 2022. Driving Factors and Moderating Effects Behind Citizen Engagement With Mobile Short-Form Videos. IEEE Access 10 (2022), 40999–41009. https://doi.org/10.1109/ACCESS.2022.3167687
|
2403.00xxx/2403.00454/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:64f0a8e4e3dffdabade0e44eedb608136e371d4caf605c93dd24cf7881e96d60
|
| 3 |
+
size 498921
|
2403.00xxx/2403.00454/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00476/ac231d9a-78e3-4c77-9bf9-4d860a5d5fd0_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00476/ac231d9a-78e3-4c77-9bf9-4d860a5d5fd0_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00476/ac231d9a-78e3-4c77-9bf9-4d860a5d5fd0_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4a6c6b850bce3f6afdf92a62cddb521906aacbd2f2e08a1c5be960632af7ab50
|
| 3 |
+
size 3327560
|
2403.00xxx/2403.00476/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00476/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f608ec27ecfc8e047599f4d14455bdcf59dfd97471e72b5f1c8d82bcb3379743
|
| 3 |
+
size 1783855
|
2403.00xxx/2403.00476/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00483/619283d9-9e07-40aa-a0f5-3db958d84022_content_list.json
ADDED
|
@@ -0,0 +1,1787 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "RealCustom: Narrowing Real Text Word for Real-Time Open-Domain Text-to-Image Customization",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
133,
|
| 8 |
+
128,
|
| 9 |
+
836,
|
| 10 |
+
174
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Mengqi Huang $^{1*}$ , Zhendong Mao $^{1\\dagger}$ , Mingcong Liu $^{2}$ , Qian He $^{2}$ , Yongdong Zhang $^{1}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
176,
|
| 19 |
+
202,
|
| 20 |
+
807,
|
| 21 |
+
220
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "<sup>1</sup> University of Science and Technology of China; <sup>2</sup>ByteDance Inc.",
|
| 28 |
+
"bbox": [
|
| 29 |
+
228,
|
| 30 |
+
220,
|
| 31 |
+
754,
|
| 32 |
+
239
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "{huangmq}@mail.ustc.edu.cn, {zdmao, zhyd73}@ustc.edu.cn, {liumingcong, heqian}@bytedance.com",
|
| 39 |
+
"bbox": [
|
| 40 |
+
86,
|
| 41 |
+
241,
|
| 42 |
+
893,
|
| 43 |
+
257
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "Abstract",
|
| 50 |
+
"text_level": 1,
|
| 51 |
+
"bbox": [
|
| 52 |
+
233,
|
| 53 |
+
291,
|
| 54 |
+
312,
|
| 55 |
+
306
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "Text-to-image customization, which aims to synthesize text-driven images for the given subjects, has recently revolutionized content creation. Existing works follow the pseudo-word paradigm, i.e., represent the given subjects as pseudo-words and then compose them with the given text. However, the inherent entangled influence scope of pseudowords with the given text results in a dual-optimum paradox, i.e., the similarity of the given subjects and the controllability of the given text could not be optimal simultaneously. We present RealCustom that, for the first time, disentangles similarity from controllability by precisely limiting subject influence to relevant parts only, achieved by gradually narrowing real text word from its general connotation to the specific subject and using its cross-attention to distinguish relevance. Specifically, RealCustom introduces a novel \"train-inference\" decoupled framework: (1) during training, RealCustom learns general alignment between visual conditions to original textual conditions by a novel adaptive scoring module to adaptively modulate influence quantity; (2) during inference, a novel adaptive mask guidance strategy is proposed to iteratively update the influence scope and influence quantity of the given subjects to gradually narrow the generation of the real text word. Comprehensive experiments demonstrate the superior real-time customization ability of RealCustom in the open domain, achieving both unprecedented similarity of the given subjects and controllability of the given text for the first time. The project page is https://corleone-huang.github.io/realcustom/.",
|
| 62 |
+
"bbox": [
|
| 63 |
+
75,
|
| 64 |
+
323,
|
| 65 |
+
473,
|
| 66 |
+
763
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "1. Introduction",
|
| 73 |
+
"text_level": 1,
|
| 74 |
+
"bbox": [
|
| 75 |
+
76,
|
| 76 |
+
791,
|
| 77 |
+
209,
|
| 78 |
+
806
|
| 79 |
+
],
|
| 80 |
+
"page_idx": 0
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"type": "text",
|
| 84 |
+
"text": "Recent significant advances in the customization of pretrained large-scale text-to-image models [6, 24, 25, 28] (i.e., text-to-image customization) has revolutionized content cre",
|
| 85 |
+
"bbox": [
|
| 86 |
+
75,
|
| 87 |
+
816,
|
| 88 |
+
468,
|
| 89 |
+
863
|
| 90 |
+
],
|
| 91 |
+
"page_idx": 0
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"type": "image",
|
| 95 |
+
"img_path": "images/68768c87b9c63674af20f2379d0a450b4a5d023b2b5ab96d49319039a897797a.jpg",
|
| 96 |
+
"image_caption": [],
|
| 97 |
+
"image_footnote": [],
|
| 98 |
+
"bbox": [
|
| 99 |
+
506,
|
| 100 |
+
290,
|
| 101 |
+
725,
|
| 102 |
+
378
|
| 103 |
+
],
|
| 104 |
+
"page_idx": 0
|
| 105 |
+
},
|
| 106 |
+
{
|
| 107 |
+
"type": "image",
|
| 108 |
+
"img_path": "images/ccefafe753fbb53c13a7e5013c88baf8ebb578e7b5fd152634d1ec9b3c44ad77.jpg",
|
| 109 |
+
"image_caption": [
|
| 110 |
+
"Figure 1. Comparison between the existing paradigm and ours. (a) The existing paradigm represents the given subject as pseudowords $(e.g., S^{*})$ , which has entangled the same entire influence scope with the given text, resulting in the dual-optimum paradox, i.e., the similarity for the given subject and the controllability for the given text could not achieve optimum simultaneously. (b) We propose RealCustom, a novel paradigm that, for the first time disentangles similarity from controllability by precisely limiting the given subjects to influence only the relevant parts while the rest parts are purely controlled by the given text. This is achieved by iteratively updating the influence scope and influence quantity of the given subjects. (c) The quantitative comparison shows that our paradigm achieves both superior similarity and controllability than the state-of-the-arts of the existing paradigm. CLIP-image score (CLIP-I) and CLIP-text score (CLIP-T) are used to evaluate similarity and controllability. Refer to the experiments for details."
|
| 111 |
+
],
|
| 112 |
+
"image_footnote": [],
|
| 113 |
+
"bbox": [
|
| 114 |
+
508,
|
| 115 |
+
378,
|
| 116 |
+
725,
|
| 117 |
+
473
|
| 118 |
+
],
|
| 119 |
+
"page_idx": 0
|
| 120 |
+
},
|
| 121 |
+
{
|
| 122 |
+
"type": "image",
|
| 123 |
+
"img_path": "images/5c6fa30c41a7bbefab544c9f82f7c72216e271064ae72e0f2e347458837b3026.jpg",
|
| 124 |
+
"image_caption": [],
|
| 125 |
+
"image_footnote": [],
|
| 126 |
+
"bbox": [
|
| 127 |
+
725,
|
| 128 |
+
290,
|
| 129 |
+
882,
|
| 130 |
+
473
|
| 131 |
+
],
|
| 132 |
+
"page_idx": 0
|
| 133 |
+
},
|
| 134 |
+
{
|
| 135 |
+
"type": "text",
|
| 136 |
+
"text": "ation, receiving rapidly growing research interest from both academia and industry. This task empowers pre-trained models with the ability to generate imaginative text-driven scenes for subjects specified by users (e.g., a person's closest friends or favorite paintings), which is a foundation for AI-generated content (AIGC) and real-world applications such as personal image&video creation [7]. The primary goal of customization is dual-faceted: (1) high-quality similarity, i.e., the target subjects in the generated images should closely mirror the given subjects; (2) high-quality control-",
|
| 137 |
+
"bbox": [
|
| 138 |
+
496,
|
| 139 |
+
750,
|
| 140 |
+
893,
|
| 141 |
+
902
|
| 142 |
+
],
|
| 143 |
+
"page_idx": 0
|
| 144 |
+
},
|
| 145 |
+
{
|
| 146 |
+
"type": "aside_text",
|
| 147 |
+
"text": "arXiv:2403.00483v1 [cs.CV] 1 Mar 2024",
|
| 148 |
+
"bbox": [
|
| 149 |
+
22,
|
| 150 |
+
265,
|
| 151 |
+
57,
|
| 152 |
+
700
|
| 153 |
+
],
|
| 154 |
+
"page_idx": 0
|
| 155 |
+
},
|
| 156 |
+
{
|
| 157 |
+
"type": "page_footnote",
|
| 158 |
+
"text": "*Works done during the internship at ByteDance.",
|
| 159 |
+
"bbox": [
|
| 160 |
+
94,
|
| 161 |
+
875,
|
| 162 |
+
351,
|
| 163 |
+
887
|
| 164 |
+
],
|
| 165 |
+
"page_idx": 0
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"type": "page_footnote",
|
| 169 |
+
"text": "$\\dagger$ Zhendong Mao is the corresponding author.",
|
| 170 |
+
"bbox": [
|
| 171 |
+
96,
|
| 172 |
+
887,
|
| 173 |
+
334,
|
| 174 |
+
898
|
| 175 |
+
],
|
| 176 |
+
"page_idx": 0
|
| 177 |
+
},
|
| 178 |
+
{
|
| 179 |
+
"type": "page_number",
|
| 180 |
+
"text": "1",
|
| 181 |
+
"bbox": [
|
| 182 |
+
480,
|
| 183 |
+
924,
|
| 184 |
+
488,
|
| 185 |
+
936
|
| 186 |
+
],
|
| 187 |
+
"page_idx": 0
|
| 188 |
+
},
|
| 189 |
+
{
|
| 190 |
+
"type": "image",
|
| 191 |
+
"img_path": "images/2ec6f26da3ab9bb56bfed52be31f25772c9f7a95295769d939c1c2dec69bf038.jpg",
|
| 192 |
+
"image_caption": [
|
| 193 |
+
"Figure 2. Generated customization results of our proposed novel paradigm RealCustom. Given a single image representing the given subject in the open domain (any subjects, portrait painting, favorite toys, etc.), RealCustom could generate realistic images that consistently adhere to the given text for the given subjects in real-time (without any test-time optimization steps)."
|
| 194 |
+
],
|
| 195 |
+
"image_footnote": [],
|
| 196 |
+
"bbox": [
|
| 197 |
+
86,
|
| 198 |
+
89,
|
| 199 |
+
467,
|
| 200 |
+
281
|
| 201 |
+
],
|
| 202 |
+
"page_idx": 1
|
| 203 |
+
},
|
| 204 |
+
{
|
| 205 |
+
"type": "text",
|
| 206 |
+
"text": "lability, i.e., the remaining subject-irrelevant parts should consistently adhere to the control of the given text.",
|
| 207 |
+
"bbox": [
|
| 208 |
+
75,
|
| 209 |
+
400,
|
| 210 |
+
468,
|
| 211 |
+
430
|
| 212 |
+
],
|
| 213 |
+
"page_idx": 1
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"type": "text",
|
| 217 |
+
"text": "Existing literature follows the pseudo-word paradigm, i.e., (1) learning pseudo-words $(e.g., S^{*}[10]$ or rare-tokens [27]) to represent the given subjects; (2) composing these pseudo-words with the given text for the customized generation. Recent studies have focused on learning more comprehensive pseudo-words [1, 8, 22, 32, 38] to capture more subject information, e.g., different pseudo-words for different diffusion timesteps [1, 38] or layers [32]. Meanwhile, others propose to speed up pseudo-word learning by training an encoder [11, 18, 30, 34] on object-datasets [17]. In parallel, based on the learned pseudo-words, many works further finetune the pre-trained models [16, 18, 27, 34] or add additional adapters [30] for higher similarity. As more information of the given subjects is introduced into pretrained models, the risk of overfitting increases, leading to the degradation of controllability. Therefore, various regularizations $(e.g., l_{1}$ penalty [10, 16, 34], prior-preservation loss [27]) are used to maintain controllability, which in turn sacrifices similarity. Essentially, existing methods are trapped in a dual-optimum paradox, i.e., the similarity and controllability can not be optimal simultaneously.",
|
| 218 |
+
"bbox": [
|
| 219 |
+
75,
|
| 220 |
+
431,
|
| 221 |
+
468,
|
| 222 |
+
748
|
| 223 |
+
],
|
| 224 |
+
"page_idx": 1
|
| 225 |
+
},
|
| 226 |
+
{
|
| 227 |
+
"type": "text",
|
| 228 |
+
"text": "We argue that the fundamental cause of this dual-optimum paradox is rooted in the existing pseudo-word paradigm, where the similarity component (i.e., the pseudowords) to generate the given subjects is intrinsically entangled with the controllability component (i.e., the given text) to generate subject-irrelevant parts, causing an overall conflict in the generation, as illustrated in Fig. 1(a). Specifically, this entanglement is manifested in the same entire influence scope of these two components. i.e., both the pseudo-words and the given text affect all generation",
|
| 229 |
+
"bbox": [
|
| 230 |
+
75,
|
| 231 |
+
750,
|
| 232 |
+
470,
|
| 233 |
+
901
|
| 234 |
+
],
|
| 235 |
+
"page_idx": 1
|
| 236 |
+
},
|
| 237 |
+
{
|
| 238 |
+
"type": "text",
|
| 239 |
+
"text": "regions. This is because each region is updated as a weighted sum of all word features through built-in textual cross-attention in pre-trained text-to-image diffusion models. Therefore, increasing the influence of the similarity component will simultaneously strengthen the similarity in the subject-relevant parts and weaken the influence of the given text in other irrelevant ones, causing the degradation of controllability, and vice versa. Moreover, the necessary correspondence between pseudo-words and subjects confines existing methods to either lengthy test-time optimization [10, 16, 27] or training [18, 34] on object-datasets [17] that have limited categories. As a result, the existing paradigm inherently has poor generalization capability for real-time open-domain scenarios in the real world.",
|
| 240 |
+
"bbox": [
|
| 241 |
+
496,
|
| 242 |
+
90,
|
| 243 |
+
890,
|
| 244 |
+
303
|
| 245 |
+
],
|
| 246 |
+
"page_idx": 1
|
| 247 |
+
},
|
| 248 |
+
{
|
| 249 |
+
"type": "text",
|
| 250 |
+
"text": "In this paper, we present RealCustom, a novel customization paradigm that, for the first time, disentangles the similarity component from the controllability component by precisely limiting the given subjects to influence only the relevant parts while maintaining other irreverent ones purely controlled by the given texts, achieving both high-quality similarity and controllability in a real-time open-domain scenario, as shown in Fig. 2. The core idea of RealCustom is that, instead of representing subjects as pseudowords, we could progressively narrow down the real text words (e.g., \"toy\") from their initial general connotation (e.g., various kinds o toys) to the specific subjects (e.g., the unique sloth toy), wherein the superior text-image alignment in pre-trained models' cross-attention can be leveraged to distinguish subject relevance, as illustrated in Fig. 1(b). Specifically, at each generation step, (1) the influence scope of the given subject is identified by the target real word's cross-attention, with a higher attention score indicating greater relevance; (2) this influence scope then determines the influence quantity of the given subject at the current step, i.e., the amount of subject information to be infused into this scope; (3) this influence quantity, in turn, shapes a more accurate influence scope for the next step, as each step's generation result is based on the output of the previous. Through this iterative updating, the generation result of the real word is smoothly and accurately transformed into the given subject, while other irrelevant parts are completely controlled by the given text.",
|
| 251 |
+
"bbox": [
|
| 252 |
+
496,
|
| 253 |
+
306,
|
| 254 |
+
892,
|
| 255 |
+
731
|
| 256 |
+
],
|
| 257 |
+
"page_idx": 1
|
| 258 |
+
},
|
| 259 |
+
{
|
| 260 |
+
"type": "text",
|
| 261 |
+
"text": "Technically, RealCustom introduces an innovative \"train-inference\" decoupled framework: (1) During training, RealCustom only learns the generalized alignment capabilities between visual conditions and pre-trained models' original text conditions on large-scale text-image datasets through a novel adaptive scoring module, which modulates the influence quantity based on text and currently generated features. (2) During inference, real-time customization is achieved by a novel adaptive mask guidance strategy, which gradually narrows down a real text word based on the learned alignment capabilities. Specif",
|
| 262 |
+
"bbox": [
|
| 263 |
+
496,
|
| 264 |
+
734,
|
| 265 |
+
893,
|
| 266 |
+
902
|
| 267 |
+
],
|
| 268 |
+
"page_idx": 1
|
| 269 |
+
},
|
| 270 |
+
{
|
| 271 |
+
"type": "page_number",
|
| 272 |
+
"text": "2",
|
| 273 |
+
"bbox": [
|
| 274 |
+
478,
|
| 275 |
+
924,
|
| 276 |
+
491,
|
| 277 |
+
936
|
| 278 |
+
],
|
| 279 |
+
"page_idx": 1
|
| 280 |
+
},
|
| 281 |
+
{
|
| 282 |
+
"type": "text",
|
| 283 |
+
"text": "ically, (1) the adaptive scoring module first estimates the visual features' correlation scores with the text features and currently generated features, respectively. Then a timestep-aware schedule is applied to fuse these two scores. A subset of key visual features, chosen based on the fused score, is incorporated into pre-trained diffusion models by extending its textual cross-attention with another visual cross-attention. (2) The adaptive mask guidance strategy consists of a text-to-image (T2I) branch (with the visual condition set to 0) and a text&image-to-image (TI2I) branch (with the visual condition set to the given subject). Firstly, all layers' cross-attention maps of the target real word in the T2I branch are aggregated into a single one, selecting only high-attention regions as the influence scope. Secondly, in the TI2I branch, the influence scope is multiplied by currently generated features to produce the influence quantity and concurrently multiplied by the outputs of the visual cross-attention to avoid influencing subject-irrelevant parts.",
|
| 284 |
+
"bbox": [
|
| 285 |
+
75,
|
| 286 |
+
90,
|
| 287 |
+
472,
|
| 288 |
+
363
|
| 289 |
+
],
|
| 290 |
+
"page_idx": 2
|
| 291 |
+
},
|
| 292 |
+
{
|
| 293 |
+
"type": "text",
|
| 294 |
+
"text": "Our contributions are summarized as follows:",
|
| 295 |
+
"bbox": [
|
| 296 |
+
96,
|
| 297 |
+
364,
|
| 298 |
+
398,
|
| 299 |
+
378
|
| 300 |
+
],
|
| 301 |
+
"page_idx": 2
|
| 302 |
+
},
|
| 303 |
+
{
|
| 304 |
+
"type": "text",
|
| 305 |
+
"text": "Concepts. For the first time, we (1) point out the dual-optimum paradox is rooted in the existing pseudo-word paradigm's entangled influence scope between the similarity (i.e., pseudo-words representing the given subjects) and controllability (i.e., the given texts); (2) present RealCustom, a novel paradigm that achieves disentanglement by gradually narrowing down real words into the given subjects, wherein the given subjects' influence scope is limited based on the cross-attention of the real words.",
|
| 306 |
+
"bbox": [
|
| 307 |
+
75,
|
| 308 |
+
380,
|
| 309 |
+
468,
|
| 310 |
+
515
|
| 311 |
+
],
|
| 312 |
+
"page_idx": 2
|
| 313 |
+
},
|
| 314 |
+
{
|
| 315 |
+
"type": "text",
|
| 316 |
+
"text": "Technology. The proposed RealCustom introduces a novel \"train-inference\" decoupled framework: (1) during training, learning generalized alignment between visual conditions to original text conditions by the adaptive scoring module to modulate influence quantity; (2) during inference, the adaptive mask guidance strategy is proposed to narrow down a real word by iterative updating the given subject's influence scope and quantity.",
|
| 317 |
+
"bbox": [
|
| 318 |
+
75,
|
| 319 |
+
516,
|
| 320 |
+
468,
|
| 321 |
+
637
|
| 322 |
+
],
|
| 323 |
+
"page_idx": 2
|
| 324 |
+
},
|
| 325 |
+
{
|
| 326 |
+
"type": "text",
|
| 327 |
+
"text": "Significance. For the first time, we achieve (1) superior similarity and controllability simultaneously, as shown in Fig. 1(c); (2) real-time open-domain customization ability.",
|
| 328 |
+
"bbox": [
|
| 329 |
+
75,
|
| 330 |
+
638,
|
| 331 |
+
468,
|
| 332 |
+
683
|
| 333 |
+
],
|
| 334 |
+
"page_idx": 2
|
| 335 |
+
},
|
| 336 |
+
{
|
| 337 |
+
"type": "text",
|
| 338 |
+
"text": "2. Related Works",
|
| 339 |
+
"text_level": 1,
|
| 340 |
+
"bbox": [
|
| 341 |
+
76,
|
| 342 |
+
699,
|
| 343 |
+
227,
|
| 344 |
+
715
|
| 345 |
+
],
|
| 346 |
+
"page_idx": 2
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"type": "text",
|
| 350 |
+
"text": "2.1. Text-to-Image Customization",
|
| 351 |
+
"text_level": 1,
|
| 352 |
+
"bbox": [
|
| 353 |
+
76,
|
| 354 |
+
724,
|
| 355 |
+
336,
|
| 356 |
+
739
|
| 357 |
+
],
|
| 358 |
+
"page_idx": 2
|
| 359 |
+
},
|
| 360 |
+
{
|
| 361 |
+
"type": "text",
|
| 362 |
+
"text": "Existing customization methods follow the pseudo-words paradigm, i.e., representing the given subjects as pseudowords and then composing them with the given text for customization. Since the necessary correspondence between the pseudo-words and the given subjects, existing works are confined to either cumbersome test-time optimization-based [1, 8-10, 16, 22, 27, 32] or encoder-based [7, 11, 14, 18, 30, 34] that trained on object-datasets with limited categories. For example, in the optimization-based stream, DreamBooth [27] uses a rare-token as the pseudo-word and",
|
| 363 |
+
"bbox": [
|
| 364 |
+
75,
|
| 365 |
+
750,
|
| 366 |
+
468,
|
| 367 |
+
900
|
| 368 |
+
],
|
| 369 |
+
"page_idx": 2
|
| 370 |
+
},
|
| 371 |
+
{
|
| 372 |
+
"type": "text",
|
| 373 |
+
"text": "further fine-tunes the entire pre-trained diffusion model for better similarity. Custom Diffusion [16] instead finds a subset of key parameters and only optimizes them. The main drawback of this stream is that it requires lengthy optimization times for each new subject. As for the encoder-based stream, the recent ELITE [34] uses a local mapping network to improve similarity, while BLIP-Diffusion [18] introduces a multimodal encoder for better subject representation. These encoder-based works usually show less similarity than optimization-based works and generalize poorly to unseen categories in training. In summary, the entangled influence scope of pseudo-words and the given text naturally limits the current works from achieving both optimal similarity and controllability, as well as hindering real-time open-domain customization.",
|
| 374 |
+
"bbox": [
|
| 375 |
+
496,
|
| 376 |
+
90,
|
| 377 |
+
890,
|
| 378 |
+
316
|
| 379 |
+
],
|
| 380 |
+
"page_idx": 2
|
| 381 |
+
},
|
| 382 |
+
{
|
| 383 |
+
"type": "text",
|
| 384 |
+
"text": "2.2. Cross-Attention in Diffusion Models",
|
| 385 |
+
"text_level": 1,
|
| 386 |
+
"bbox": [
|
| 387 |
+
500,
|
| 388 |
+
325,
|
| 389 |
+
815,
|
| 390 |
+
340
|
| 391 |
+
],
|
| 392 |
+
"page_idx": 2
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"type": "text",
|
| 396 |
+
"text": "Text guidance in modern large-scale text-to-image diffusion models [2, 6, 24, 25, 28] is generally performed using the cross-attention mechanism. Therefore, many works propose to manipulate the cross-attention map for text-driven editing [3, 12] on generated images or real images via inversion [31], e.g., Prompt-to-Prompt [12] proposes to reassign the cross-attention weight to edit the generated image. Another branch of work focuses on improving cross-attention either by adding additional spatial control [20, 21] or post-processing to improve semantic alignment [5, 19]. Meanwhile, a number of works [33, 35, 36] propose using cross-attention in diffusion models for discriminative tasks such as segmentation. However, different from the existing literature, the core idea of RealCustom is to gradually narrow a real text word from its initial general connotation (e.g., whose cross-attention could represent any toy with various types of shapes and details) to the unique given subject (e.g., whose cross-attention accurately represents the unique toy), which is completely unexplored.",
|
| 397 |
+
"bbox": [
|
| 398 |
+
496,
|
| 399 |
+
349,
|
| 400 |
+
890,
|
| 401 |
+
635
|
| 402 |
+
],
|
| 403 |
+
"page_idx": 2
|
| 404 |
+
},
|
| 405 |
+
{
|
| 406 |
+
"type": "text",
|
| 407 |
+
"text": "3. Methodology",
|
| 408 |
+
"text_level": 1,
|
| 409 |
+
"bbox": [
|
| 410 |
+
500,
|
| 411 |
+
648,
|
| 412 |
+
635,
|
| 413 |
+
666
|
| 414 |
+
],
|
| 415 |
+
"page_idx": 2
|
| 416 |
+
},
|
| 417 |
+
{
|
| 418 |
+
"type": "text",
|
| 419 |
+
"text": "In this study, we focus on the most general customization scenario: with only a single image representing the given subject, generating new high-quality images for that subject from the given text. The generated subject may vary in location, pose, style, etc., yet it should maintain high similarity with the given one. The remaining parts should consistently adhere to the given text, thus ensuring controllability.",
|
| 420 |
+
"bbox": [
|
| 421 |
+
496,
|
| 422 |
+
674,
|
| 423 |
+
890,
|
| 424 |
+
779
|
| 425 |
+
],
|
| 426 |
+
"page_idx": 2
|
| 427 |
+
},
|
| 428 |
+
{
|
| 429 |
+
"type": "text",
|
| 430 |
+
"text": "The proposed RealCustom introduces a novel \"train-inference\" decoupled paradigm as illustrated in Fig. 3. Specifically, during training, RealCustom learns general alignment between visual conditions and the original text conditions of pre-trained models. During inference, based on the learned alignment capability, RealCustom gradually narrow down the generation of the real text words (e.g., \"toy\") into the given subject (e.g., the unique brown sloth",
|
| 431 |
+
"bbox": [
|
| 432 |
+
496,
|
| 433 |
+
780,
|
| 434 |
+
890,
|
| 435 |
+
900
|
| 436 |
+
],
|
| 437 |
+
"page_idx": 2
|
| 438 |
+
},
|
| 439 |
+
{
|
| 440 |
+
"type": "page_number",
|
| 441 |
+
"text": "3",
|
| 442 |
+
"bbox": [
|
| 443 |
+
478,
|
| 444 |
+
924,
|
| 445 |
+
491,
|
| 446 |
+
936
|
| 447 |
+
],
|
| 448 |
+
"page_idx": 2
|
| 449 |
+
},
|
| 450 |
+
{
|
| 451 |
+
"type": "image",
|
| 452 |
+
"img_path": "images/e43b521d57291e924133114d3ecb7c07405062e149c840daabcb796186d6e576.jpg",
|
| 453 |
+
"image_caption": [
|
| 454 |
+
"(a)ours training paradigm"
|
| 455 |
+
],
|
| 456 |
+
"image_footnote": [],
|
| 457 |
+
"bbox": [
|
| 458 |
+
98,
|
| 459 |
+
94,
|
| 460 |
+
890,
|
| 461 |
+
237
|
| 462 |
+
],
|
| 463 |
+
"page_idx": 3
|
| 464 |
+
},
|
| 465 |
+
{
|
| 466 |
+
"type": "image",
|
| 467 |
+
"img_path": "images/0ab04480f88c4df1fa06a88d71d9ba1025e063bab0b7ec932c3fa6ec84737098.jpg",
|
| 468 |
+
"image_caption": [
|
| 469 |
+
"(b)oursinferenceparadigm (illustratedat timestep $t$ 1",
|
| 470 |
+
"Figure 3. Illustration of our proposed RealCustom, which employs a novel \"train-inference\" decoupled framework: (a) During training, general alignment between visual and original text conditions is learned by the proposed adaptive scoring module, which accurately derives visual conditions based on text and currently generated features. (b) During inference, progressively narrowing down a real word (e.g., \"toy\") from its initial general connotation to the given subject (e.g., the unique brown sloth toy) by the proposed adaptive mask guidance strategy, which consists of two branches, i.e., a text-to-image (T2I) branch where the visual condition is set to 0, and a text&image-to-image (TI2I) branch where the visual condition is set to the given subject. The T2I branch aims to calculate the influence scope by aggregating the target real word's (e.g., \"toy\") cross-attention, while the TI2I branch aims to inject the influence quantity into this scope."
|
| 471 |
+
],
|
| 472 |
+
"image_footnote": [],
|
| 473 |
+
"bbox": [
|
| 474 |
+
91,
|
| 475 |
+
253,
|
| 476 |
+
890,
|
| 477 |
+
507
|
| 478 |
+
],
|
| 479 |
+
"page_idx": 3
|
| 480 |
+
},
|
| 481 |
+
{
|
| 482 |
+
"type": "text",
|
| 483 |
+
"text": "toy) by iterative updating each step's influence scope and influence quantity of the given subject.",
|
| 484 |
+
"bbox": [
|
| 485 |
+
75,
|
| 486 |
+
667,
|
| 487 |
+
468,
|
| 488 |
+
698
|
| 489 |
+
],
|
| 490 |
+
"page_idx": 3
|
| 491 |
+
},
|
| 492 |
+
{
|
| 493 |
+
"type": "text",
|
| 494 |
+
"text": "We first briefly introduce the preliminaries in Sec. 3.1. The training and inference paradigm of RealCustom will be elaborated in detail in Sec. 3.2 and Sec. 3.3, respectively.",
|
| 495 |
+
"bbox": [
|
| 496 |
+
75,
|
| 497 |
+
699,
|
| 498 |
+
468,
|
| 499 |
+
744
|
| 500 |
+
],
|
| 501 |
+
"page_idx": 3
|
| 502 |
+
},
|
| 503 |
+
{
|
| 504 |
+
"type": "text",
|
| 505 |
+
"text": "3.1. Preliminaries",
|
| 506 |
+
"text_level": 1,
|
| 507 |
+
"bbox": [
|
| 508 |
+
76,
|
| 509 |
+
755,
|
| 510 |
+
217,
|
| 511 |
+
768
|
| 512 |
+
],
|
| 513 |
+
"page_idx": 3
|
| 514 |
+
},
|
| 515 |
+
{
|
| 516 |
+
"type": "text",
|
| 517 |
+
"text": "Our paradigm is implemented over Stable Diffusion [25], which consists of two components, i.e., an autoencoder and a conditional UNet [26] denoiser. Firstly, given an image $\\pmb{x} \\in \\mathbb{R}^{H \\times W \\times 3}$ , the encoder $\\mathcal{E}(\\cdot)$ of the autoencoder maps it into a lower dimensional latent space as $\\pmb{z} = \\mathcal{E}(\\pmb{x}) \\in \\mathbb{R}^{h \\times w \\times c}$ , where $f = \\frac{H_0}{h} = \\frac{W_0}{w}$ is the downsampling factor and $c$ stands for the latent channel dimension. The corresponding decoder $\\mathcal{D}(\\cdot)$ maps the latent vectors back to the",
|
| 518 |
+
"bbox": [
|
| 519 |
+
75,
|
| 520 |
+
779,
|
| 521 |
+
468,
|
| 522 |
+
901
|
| 523 |
+
],
|
| 524 |
+
"page_idx": 3
|
| 525 |
+
},
|
| 526 |
+
{
|
| 527 |
+
"type": "text",
|
| 528 |
+
"text": "image as $\\mathcal{D}(\\mathcal{E}(\\pmb{x}))\\approx \\pmb{x}$ . Secondly, the conditional denoiser $\\epsilon_{\\theta}(\\cdot)$ is trained on this latent space to generate latent vectors based on the text condition $y$ . The pre-trained CLIP text encoder [23] $\\tau_{\\mathrm{text}}(\\cdot)$ is used to encode the text condition $y$ into text features $\\pmb{f}_{ct} = \\tau_{\\mathrm{text}}(y)$ . Then, the denoiser is trained with mean-squared loss:",
|
| 529 |
+
"bbox": [
|
| 530 |
+
496,
|
| 531 |
+
667,
|
| 532 |
+
890,
|
| 533 |
+
758
|
| 534 |
+
],
|
| 535 |
+
"page_idx": 3
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"type": "equation",
|
| 539 |
+
"text": "\n$$\nL := \\mathbb {E} _ {\\boldsymbol {z} \\sim \\mathcal {E} (\\boldsymbol {x}), \\boldsymbol {f} _ {\\boldsymbol {y}}, \\epsilon \\sim \\mathcal {N} (\\boldsymbol {0}, \\mathrm {I}), t} \\left[ \\| \\epsilon - \\epsilon_ {\\theta} \\left(\\boldsymbol {z} _ {\\boldsymbol {t}}, t, \\boldsymbol {f} _ {\\boldsymbol {c} \\boldsymbol {t}}\\right) \\| _ {2} ^ {2} \\right], \\tag {1}\n$$\n",
|
| 540 |
+
"text_format": "latex",
|
| 541 |
+
"bbox": [
|
| 542 |
+
511,
|
| 543 |
+
767,
|
| 544 |
+
890,
|
| 545 |
+
792
|
| 546 |
+
],
|
| 547 |
+
"page_idx": 3
|
| 548 |
+
},
|
| 549 |
+
{
|
| 550 |
+
"type": "text",
|
| 551 |
+
"text": "where $\\epsilon$ denotes for the unscaled noise and $t$ is the timestep. $z_{t}$ is the latent vector that noised according to $t$ :",
|
| 552 |
+
"bbox": [
|
| 553 |
+
500,
|
| 554 |
+
801,
|
| 555 |
+
890,
|
| 556 |
+
832
|
| 557 |
+
],
|
| 558 |
+
"page_idx": 3
|
| 559 |
+
},
|
| 560 |
+
{
|
| 561 |
+
"type": "equation",
|
| 562 |
+
"text": "\n$$\n\\boldsymbol {z} _ {t} = \\sqrt {\\hat {\\alpha} _ {t}} \\boldsymbol {z} _ {0} + \\sqrt {1 - \\hat {\\alpha} _ {t}} \\epsilon , \\tag {2}\n$$\n",
|
| 563 |
+
"text_format": "latex",
|
| 564 |
+
"bbox": [
|
| 565 |
+
602,
|
| 566 |
+
840,
|
| 567 |
+
890,
|
| 568 |
+
859
|
| 569 |
+
],
|
| 570 |
+
"page_idx": 3
|
| 571 |
+
},
|
| 572 |
+
{
|
| 573 |
+
"type": "text",
|
| 574 |
+
"text": "where $\\hat{\\alpha}_t\\in [0,1]$ is the hyper-parameter that modulates the quantity of noise added. Larger $t$ means smaller $\\hat{\\alpha}_t$ and",
|
| 575 |
+
"bbox": [
|
| 576 |
+
496,
|
| 577 |
+
869,
|
| 578 |
+
890,
|
| 579 |
+
901
|
| 580 |
+
],
|
| 581 |
+
"page_idx": 3
|
| 582 |
+
},
|
| 583 |
+
{
|
| 584 |
+
"type": "page_number",
|
| 585 |
+
"text": "4",
|
| 586 |
+
"bbox": [
|
| 587 |
+
478,
|
| 588 |
+
924,
|
| 589 |
+
490,
|
| 590 |
+
935
|
| 591 |
+
],
|
| 592 |
+
"page_idx": 3
|
| 593 |
+
},
|
| 594 |
+
{
|
| 595 |
+
"type": "text",
|
| 596 |
+
"text": "thereby a more noised latent vector $z_{t}$ . During inference, a random Gaussian noise $z_{T}$ is iteratively denoised to $z_{0}$ , and the final generated image is obtained through $x' = \\mathcal{D}(z_0)$ .",
|
| 597 |
+
"bbox": [
|
| 598 |
+
76,
|
| 599 |
+
90,
|
| 600 |
+
468,
|
| 601 |
+
136
|
| 602 |
+
],
|
| 603 |
+
"page_idx": 4
|
| 604 |
+
},
|
| 605 |
+
{
|
| 606 |
+
"type": "text",
|
| 607 |
+
"text": "The incorporation of text condition in Stable Diffusion is implemented as textual cross-attention:",
|
| 608 |
+
"bbox": [
|
| 609 |
+
76,
|
| 610 |
+
136,
|
| 611 |
+
468,
|
| 612 |
+
166
|
| 613 |
+
],
|
| 614 |
+
"page_idx": 4
|
| 615 |
+
},
|
| 616 |
+
{
|
| 617 |
+
"type": "equation",
|
| 618 |
+
"text": "\n$$\n\\operatorname {A t t e n t i o n} (Q, K, V) = \\operatorname {S o f t m a x} \\left(\\frac {Q K ^ {\\top}}{\\sqrt {d}}\\right) V, \\tag {3}\n$$\n",
|
| 619 |
+
"text_format": "latex",
|
| 620 |
+
"bbox": [
|
| 621 |
+
125,
|
| 622 |
+
174,
|
| 623 |
+
468,
|
| 624 |
+
210
|
| 625 |
+
],
|
| 626 |
+
"page_idx": 4
|
| 627 |
+
},
|
| 628 |
+
{
|
| 629 |
+
"type": "text",
|
| 630 |
+
"text": "where the query $Q = W_{Q} \\cdot f_{i}$ , key $K = W_{K} \\cdot f_{ct}$ and value $V = W_{V} \\cdot f_{ct}$ . $W_{Q}, W_{K}, W_{V}$ are weight parameters of query, key and value projection layers. $f_{i}, f_{ct}$ are the latent image features and text features, and $d$ is the channel dimension of key and query features. The latent image feature is then updated with the attention block output.",
|
| 631 |
+
"bbox": [
|
| 632 |
+
76,
|
| 633 |
+
215,
|
| 634 |
+
468,
|
| 635 |
+
306
|
| 636 |
+
],
|
| 637 |
+
"page_idx": 4
|
| 638 |
+
},
|
| 639 |
+
{
|
| 640 |
+
"type": "text",
|
| 641 |
+
"text": "3.2. Training Paradigm",
|
| 642 |
+
"text_level": 1,
|
| 643 |
+
"bbox": [
|
| 644 |
+
76,
|
| 645 |
+
316,
|
| 646 |
+
261,
|
| 647 |
+
330
|
| 648 |
+
],
|
| 649 |
+
"page_idx": 4
|
| 650 |
+
},
|
| 651 |
+
{
|
| 652 |
+
"type": "text",
|
| 653 |
+
"text": "As depicted in Fig. 3(a), the text $y$ and image $x$ are first encoded into text features $\\pmb{f}_{ct} \\in \\mathbb{R}^{n_t \\times c_t}$ and image features $\\pmb{f}_{ci} \\in \\mathbb{R}^{n_i \\times c_i}$ by the pre-trained CLIP text/image encoders [23] respectively. Here, $n_t, c_t, n_i, c_i$ are text feature number/dimension and image feature number/dimension, respectively. Afterward, the adaptive scoring module takes the text features $\\pmb{f}_{ct}$ , currently generated features $\\pmb{z}_t \\in \\mathbb{R}^{h \\times w \\times c}$ , and timestep $t$ as inputs to estimate the score for each features in $\\pmb{f}_{ci}$ , selecting a subset of key ones as the visual condition $\\hat{\\pmb{f}}_{ci} \\in \\mathbb{R}^{\\hat{n}_i \\times c_i}$ , where $\\hat{n}_i < n_i$ is the selected image feature number. Next, we extend textual cross-attention with another visual cross-attention to incorporate the visual condition $\\hat{\\pmb{f}}_{yi}$ . Specifically, Eq. 3 is rewritten as:",
|
| 654 |
+
"bbox": [
|
| 655 |
+
76,
|
| 656 |
+
339,
|
| 657 |
+
468,
|
| 658 |
+
536
|
| 659 |
+
],
|
| 660 |
+
"page_idx": 4
|
| 661 |
+
},
|
| 662 |
+
{
|
| 663 |
+
"type": "equation",
|
| 664 |
+
"text": "\n$$\n\\begin{array}{l} \\operatorname {A t t e n t i o n} (Q, K, V, K _ {i}, V _ {i}) = \\\\ \\operatorname {S o f t m a x} \\left(\\frac {Q K ^ {\\top}}{\\sqrt {d}}\\right) V + \\operatorname {S o f t m a x} \\left(\\frac {Q K _ {i} ^ {\\top}}{\\sqrt {d}}\\right) V _ {i}, \\tag {4} \\\\ \\end{array}\n$$\n",
|
| 665 |
+
"text_format": "latex",
|
| 666 |
+
"bbox": [
|
| 667 |
+
93,
|
| 668 |
+
547,
|
| 669 |
+
468,
|
| 670 |
+
603
|
| 671 |
+
],
|
| 672 |
+
"page_idx": 4
|
| 673 |
+
},
|
| 674 |
+
{
|
| 675 |
+
"type": "text",
|
| 676 |
+
"text": "where the new key $K_{i} = W_{Ki} \\cdot \\hat{f}_{ci}$ , value $V_{i} = W_{Vi} \\cdot \\hat{f}_{ci}$ are added. $W_{Ki}$ and $W_{Vi}$ are weight parameters. During training, only the adaptive scoring module and projection layers $W_{Ki}, W_{Vi}$ in each attention block are trainable, while other pre-trained models' weight remains frozen.",
|
| 677 |
+
"bbox": [
|
| 678 |
+
76,
|
| 679 |
+
612,
|
| 680 |
+
468,
|
| 681 |
+
688
|
| 682 |
+
],
|
| 683 |
+
"page_idx": 4
|
| 684 |
+
},
|
| 685 |
+
{
|
| 686 |
+
"type": "text",
|
| 687 |
+
"text": "Adaptive Scoring Module. On the one hand, the generation of the diffusion model itself, by nature, is a coarse-to-fine process with noise removed and details added step by step. In this process, different steps focus on different degrees of subject detail [2], spanning from global structures in the early to local textures in the latter. Accordingly, the importance of each image feature also dynamically changes. To smoothly narrow the real text word, the image condition of the subject should also adapt synchronously, providing guidance from coarse to fine grain. This requires equipping RealCustom with the ability to estimate the importance score of different image features. On the other hand, utilizing all image features as visual conditions results in a \"train-inference\" gap. This arises because,",
|
| 688 |
+
"bbox": [
|
| 689 |
+
75,
|
| 690 |
+
688,
|
| 691 |
+
468,
|
| 692 |
+
900
|
| 693 |
+
],
|
| 694 |
+
"page_idx": 4
|
| 695 |
+
},
|
| 696 |
+
{
|
| 697 |
+
"type": "text",
|
| 698 |
+
"text": "unlike the training stage, where the same images as the visual conditions and inputs to the denoiser $\\epsilon_{\\theta}$ , the given subjects, and the inference generation results should maintain similarity only in the subject part. Therefore, this gap can degrade both similarity and controllability in inference.",
|
| 699 |
+
"bbox": [
|
| 700 |
+
496,
|
| 701 |
+
90,
|
| 702 |
+
890,
|
| 703 |
+
166
|
| 704 |
+
],
|
| 705 |
+
"page_idx": 4
|
| 706 |
+
},
|
| 707 |
+
{
|
| 708 |
+
"type": "text",
|
| 709 |
+
"text": "The above rationale motivates the adaptive scoring module, which provides smooth and accurate visual conditions for customization. As illustrated in Fig. 4, the text $\\pmb{f}_{ct} \\in \\mathbb{R}^{n_t \\times c_t}$ and currently generated features $\\pmb{z}_t \\in \\mathbb{R}^{h \\times w \\times c} = \\mathbb{R}^{n_z \\times c}$ are first aggregated into the textual context $C_{\\mathrm{textual}}$ and visual context $C_{\\mathrm{visual}}$ through weighted pooling:",
|
| 710 |
+
"bbox": [
|
| 711 |
+
496,
|
| 712 |
+
167,
|
| 713 |
+
890,
|
| 714 |
+
257
|
| 715 |
+
],
|
| 716 |
+
"page_idx": 4
|
| 717 |
+
},
|
| 718 |
+
{
|
| 719 |
+
"type": "equation",
|
| 720 |
+
"text": "\n$$\n\\boldsymbol {A} _ {\\text {t e x t u a l}} = \\operatorname {S o f t m a x} \\left(\\boldsymbol {f} _ {\\boldsymbol {c t}} \\boldsymbol {W} _ {\\boldsymbol {a}} ^ {t}\\right) \\in \\mathbb {R} ^ {n _ {t} \\times 1} \\tag {5}\n$$\n",
|
| 721 |
+
"text_format": "latex",
|
| 722 |
+
"bbox": [
|
| 723 |
+
629,
|
| 724 |
+
268,
|
| 725 |
+
890,
|
| 726 |
+
286
|
| 727 |
+
],
|
| 728 |
+
"page_idx": 4
|
| 729 |
+
},
|
| 730 |
+
{
|
| 731 |
+
"type": "equation",
|
| 732 |
+
"text": "\n$$\n\\boldsymbol {A} _ {\\text {v i s u a l}} = \\operatorname {S o f t m a x} \\left(\\boldsymbol {z} _ {t} \\boldsymbol {W} _ {\\boldsymbol {a}} ^ {v}\\right) \\in \\mathbb {R} ^ {n _ {z} \\times 1} \\tag {6}\n$$\n",
|
| 733 |
+
"text_format": "latex",
|
| 734 |
+
"bbox": [
|
| 735 |
+
637,
|
| 736 |
+
287,
|
| 737 |
+
890,
|
| 738 |
+
304
|
| 739 |
+
],
|
| 740 |
+
"page_idx": 4
|
| 741 |
+
},
|
| 742 |
+
{
|
| 743 |
+
"type": "equation",
|
| 744 |
+
"text": "\n$$\n\\boldsymbol {C} _ {\\text {t e x t u a l}} = \\boldsymbol {A} _ {\\text {t e x t u a l}} ^ {\\top} \\boldsymbol {f} _ {\\boldsymbol {y}} \\in \\mathbb {R} ^ {1 \\times c _ {t}}, \\boldsymbol {C} _ {\\text {v i s u a l}} = \\boldsymbol {A} _ {\\text {v i s u a l}} ^ {\\top} \\boldsymbol {z} _ {\\boldsymbol {t}} \\in \\mathbb {R} ^ {1 \\times c}, \\tag {7}\n$$\n",
|
| 745 |
+
"text_format": "latex",
|
| 746 |
+
"bbox": [
|
| 747 |
+
511,
|
| 748 |
+
306,
|
| 749 |
+
890,
|
| 750 |
+
325
|
| 751 |
+
],
|
| 752 |
+
"page_idx": 4
|
| 753 |
+
},
|
| 754 |
+
{
|
| 755 |
+
"type": "text",
|
| 756 |
+
"text": "where $W_{a}^{t}\\in \\mathbb{R}^{c_{t}\\times 1},W_{a}^{v}\\in \\mathbb{R}^{c\\times 1}$ are weight parameters, and \"Softmax\" is operated in the number dimension. These contexts are then spatially replicated and concatenated with image features $f_{ci}\\in \\mathbb{R}^{n_i\\times c_i}$ to estimate the textual score $S_{\\mathrm{textual}}\\in \\mathbb{R}^{n_i\\times 1}$ and visual score $S_{\\mathrm{visual}}\\in \\mathbb{R}^{n_i\\times 1}$ respectively. These two scores are predicted by two lightweight score-net, which are implemented as two-layer MLPs.",
|
| 757 |
+
"bbox": [
|
| 758 |
+
496,
|
| 759 |
+
335,
|
| 760 |
+
890,
|
| 761 |
+
441
|
| 762 |
+
],
|
| 763 |
+
"page_idx": 4
|
| 764 |
+
},
|
| 765 |
+
{
|
| 766 |
+
"type": "text",
|
| 767 |
+
"text": "Considering that the textual features are roughly accurate and the generated features are gradually refined, a timestep-aware schedule is proposed to fuse these two scores:",
|
| 768 |
+
"bbox": [
|
| 769 |
+
496,
|
| 770 |
+
443,
|
| 771 |
+
890,
|
| 772 |
+
487
|
| 773 |
+
],
|
| 774 |
+
"page_idx": 4
|
| 775 |
+
},
|
| 776 |
+
{
|
| 777 |
+
"type": "equation",
|
| 778 |
+
"text": "\n$$\n\\boldsymbol {S} = \\left(1 - \\sqrt {\\hat {\\alpha} _ {t}}\\right) \\boldsymbol {S} _ {\\text {t e x t u a l}} + \\sqrt {\\hat {\\alpha} _ {t}} \\boldsymbol {S} _ {\\text {v i s u a l}}, \\tag {8}\n$$\n",
|
| 779 |
+
"text_format": "latex",
|
| 780 |
+
"bbox": [
|
| 781 |
+
568,
|
| 782 |
+
500,
|
| 783 |
+
890,
|
| 784 |
+
518
|
| 785 |
+
],
|
| 786 |
+
"page_idx": 4
|
| 787 |
+
},
|
| 788 |
+
{
|
| 789 |
+
"type": "text",
|
| 790 |
+
"text": "where $\\sqrt{\\hat{\\alpha}_t}$ is the hyperparameter of pre-trained diffusion models that modulate the amount of noise added to generated features. Then a softmax activation is applied to the fused score since our focus is on highlighting the comparative significance of each image feature vis-à-vis its counterparts: $S = \\operatorname{Softmax}(S)$ . The fused scores are multiplied with the image features to enable the learning of score-nets:",
|
| 791 |
+
"bbox": [
|
| 792 |
+
496,
|
| 793 |
+
529,
|
| 794 |
+
890,
|
| 795 |
+
635
|
| 796 |
+
],
|
| 797 |
+
"page_idx": 4
|
| 798 |
+
},
|
| 799 |
+
{
|
| 800 |
+
"type": "equation",
|
| 801 |
+
"text": "\n$$\n\\boldsymbol {f} _ {c i} = \\boldsymbol {f} _ {c i} \\circ (1 + S), \\tag {9}\n$$\n",
|
| 802 |
+
"text_format": "latex",
|
| 803 |
+
"bbox": [
|
| 804 |
+
622,
|
| 805 |
+
651,
|
| 806 |
+
890,
|
| 807 |
+
667
|
| 808 |
+
],
|
| 809 |
+
"page_idx": 4
|
| 810 |
+
},
|
| 811 |
+
{
|
| 812 |
+
"type": "text",
|
| 813 |
+
"text": "where $\\circ$ denotes the element-wise multiply. Finally, given a Top-K ratio $\\gamma_{\\mathrm{num}} \\in [0,1]$ , a sub-set of key features with highest scores are selected as the output $\\hat{\\pmb{f}}_{\\pmb{y}\\pmb{i}} \\in \\mathbb{R}^{\\hat{n}_i \\times c_i}$ where $\\hat{n}_i = \\gamma_{\\mathrm{num}} n_i$ . To enable flexible inference with different $\\gamma_{\\mathrm{num}}$ without performance degradation, we propose to use a uniformly random ratio during training:",
|
| 814 |
+
"bbox": [
|
| 815 |
+
496,
|
| 816 |
+
674,
|
| 817 |
+
890,
|
| 818 |
+
765
|
| 819 |
+
],
|
| 820 |
+
"page_idx": 4
|
| 821 |
+
},
|
| 822 |
+
{
|
| 823 |
+
"type": "equation",
|
| 824 |
+
"text": "\n$$\n\\gamma_ {\\text {n u m}} = \\text {u n i f o r m} \\left[ \\gamma_ {\\text {n u m}} ^ {\\text {l o w}}, \\gamma_ {\\text {n u m}} ^ {\\text {h i g h}} \\right], \\tag {10}\n$$\n",
|
| 825 |
+
"text_format": "latex",
|
| 826 |
+
"bbox": [
|
| 827 |
+
599,
|
| 828 |
+
775,
|
| 829 |
+
890,
|
| 830 |
+
794
|
| 831 |
+
],
|
| 832 |
+
"page_idx": 4
|
| 833 |
+
},
|
| 834 |
+
{
|
| 835 |
+
"type": "text",
|
| 836 |
+
"text": "where $\\gamma_{\\mathrm{num}}^{\\mathrm{low}},\\gamma_{\\mathrm{num}}^{\\mathrm{high}}$ are set to 0.3, 1.0, respectively.",
|
| 837 |
+
"bbox": [
|
| 838 |
+
500,
|
| 839 |
+
804,
|
| 840 |
+
821,
|
| 841 |
+
823
|
| 842 |
+
],
|
| 843 |
+
"page_idx": 4
|
| 844 |
+
},
|
| 845 |
+
{
|
| 846 |
+
"type": "text",
|
| 847 |
+
"text": "3.3. Inference Paradigm",
|
| 848 |
+
"text_level": 1,
|
| 849 |
+
"bbox": [
|
| 850 |
+
500,
|
| 851 |
+
832,
|
| 852 |
+
689,
|
| 853 |
+
848
|
| 854 |
+
],
|
| 855 |
+
"page_idx": 4
|
| 856 |
+
},
|
| 857 |
+
{
|
| 858 |
+
"type": "text",
|
| 859 |
+
"text": "The inference paradigm of RealCustom consists of two branches, i.e., a text-to-image (T2I) branch where the visual input is set to 0 and a text&image-to-image (TI2I) branch",
|
| 860 |
+
"bbox": [
|
| 861 |
+
496,
|
| 862 |
+
854,
|
| 863 |
+
890,
|
| 864 |
+
900
|
| 865 |
+
],
|
| 866 |
+
"page_idx": 4
|
| 867 |
+
},
|
| 868 |
+
{
|
| 869 |
+
"type": "page_number",
|
| 870 |
+
"text": "5",
|
| 871 |
+
"bbox": [
|
| 872 |
+
480,
|
| 873 |
+
924,
|
| 874 |
+
488,
|
| 875 |
+
936
|
| 876 |
+
],
|
| 877 |
+
"page_idx": 4
|
| 878 |
+
},
|
| 879 |
+
{
|
| 880 |
+
"type": "image",
|
| 881 |
+
"img_path": "images/dae84ae76d8c7af0cdc41bd5d8875f3fddd750f48d1914e1b507a3be5e84350c.jpg",
|
| 882 |
+
"image_caption": [
|
| 883 |
+
"Figure 4. Illustration of adaptive scoring module. Text features and currently generated features are first aggregated into the textual and visual context, which are then spatially concatenated with image features to predict textual and visual scores. These scores are then fused based on the current timestep. Ultimately, only a subset of the key features is selected based on the fused score."
|
| 884 |
+
],
|
| 885 |
+
"image_footnote": [],
|
| 886 |
+
"bbox": [
|
| 887 |
+
89,
|
| 888 |
+
89,
|
| 889 |
+
467,
|
| 890 |
+
229
|
| 891 |
+
],
|
| 892 |
+
"page_idx": 5
|
| 893 |
+
},
|
| 894 |
+
{
|
| 895 |
+
"type": "text",
|
| 896 |
+
"text": "where the visual input is set to given subjects, as illustrated in Fig. 3(b). These two branches are connected by our proposed adaptive mask guidance strategy. Specifically, given previous step's output $z_{t}$ , a pure text conditional denoising process is performed in T2I branch to get the output $z_{t-1}^{T}$ , where all layers cross-attention map of the target real word (e.g., \"toy\") is extracted and resized to the same resolution (the same as the largest map size, i.e., $64 \\times 64$ in Stable Diffusion). The aggregated attention map is denoted as $M \\in \\mathbb{R}^{64 \\times 64}$ . Next, a Top-K selection is applied, i.e., given the target ratio $\\gamma_{\\mathrm{scope}} \\in [0,1]$ , only $\\gamma_{\\mathrm{scope}} \\times 64 \\times 64$ regions with the highest cross-attention score will remain, while the rest will be set to 0. The selected cross-attention map $\\bar{M}$ is normalized by its maximum value as:",
|
| 897 |
+
"bbox": [
|
| 898 |
+
75,
|
| 899 |
+
354,
|
| 900 |
+
468,
|
| 901 |
+
565
|
| 902 |
+
],
|
| 903 |
+
"page_idx": 5
|
| 904 |
+
},
|
| 905 |
+
{
|
| 906 |
+
"type": "equation",
|
| 907 |
+
"text": "\n$$\n\\hat {M} = \\frac {\\bar {M}}{\\max (\\bar {M})}, \\tag {11}\n$$\n",
|
| 908 |
+
"text_format": "latex",
|
| 909 |
+
"bbox": [
|
| 910 |
+
214,
|
| 911 |
+
577,
|
| 912 |
+
468,
|
| 913 |
+
611
|
| 914 |
+
],
|
| 915 |
+
"page_idx": 5
|
| 916 |
+
},
|
| 917 |
+
{
|
| 918 |
+
"type": "text",
|
| 919 |
+
"text": "where $\\max (\\cdot)$ represents the maximum value. The rationale behind this is that even in these selected parts, the subject relevance of different regions is also different.",
|
| 920 |
+
"bbox": [
|
| 921 |
+
75,
|
| 922 |
+
623,
|
| 923 |
+
468,
|
| 924 |
+
666
|
| 925 |
+
],
|
| 926 |
+
"page_idx": 5
|
| 927 |
+
},
|
| 928 |
+
{
|
| 929 |
+
"type": "text",
|
| 930 |
+
"text": "In the TI2I branch, the influence scope $\\hat{M}$ is first multiplied by currently generated feature $z_{t}$ to provide accurate visual conditions for current generation step. The reason is that only subject-relevant parts should be considered for the calculation of influence quantity. Secondly, $\\hat{M}$ is multiplied by the visual cross-attention results to prevent negative impacts on the controllability of the given texts in other subject-irrelevant parts. Specifically, Eq. 4 is rewritten as:",
|
| 931 |
+
"bbox": [
|
| 932 |
+
75,
|
| 933 |
+
667,
|
| 934 |
+
468,
|
| 935 |
+
789
|
| 936 |
+
],
|
| 937 |
+
"page_idx": 5
|
| 938 |
+
},
|
| 939 |
+
{
|
| 940 |
+
"type": "equation",
|
| 941 |
+
"text": "\n$$\n\\operatorname {A t t e n t i o n} (Q, K, V, K _ {i}, V _ {i}) =\n$$\n",
|
| 942 |
+
"text_format": "latex",
|
| 943 |
+
"bbox": [
|
| 944 |
+
93,
|
| 945 |
+
805,
|
| 946 |
+
303,
|
| 947 |
+
821
|
| 948 |
+
],
|
| 949 |
+
"page_idx": 5
|
| 950 |
+
},
|
| 951 |
+
{
|
| 952 |
+
"type": "equation",
|
| 953 |
+
"text": "\n$$\n\\operatorname {S o f t m a x} \\left(\\frac {Q K ^ {\\top}}{\\sqrt {d}}\\right) V + \\left(\\operatorname {S o f t m a x} \\left(\\frac {Q K _ {i} ^ {\\top}}{\\sqrt {d}}\\right) V _ {i}\\right) \\hat {M}, \\tag {12}\n$$\n",
|
| 954 |
+
"text_format": "latex",
|
| 955 |
+
"bbox": [
|
| 956 |
+
96,
|
| 957 |
+
824,
|
| 958 |
+
468,
|
| 959 |
+
859
|
| 960 |
+
],
|
| 961 |
+
"page_idx": 5
|
| 962 |
+
},
|
| 963 |
+
{
|
| 964 |
+
"type": "text",
|
| 965 |
+
"text": "where the necessary resize operation is applied to match the size of $\\hat{M}$ with the resolution of each cross-attention",
|
| 966 |
+
"bbox": [
|
| 967 |
+
76,
|
| 968 |
+
869,
|
| 969 |
+
468,
|
| 970 |
+
898
|
| 971 |
+
],
|
| 972 |
+
"page_idx": 5
|
| 973 |
+
},
|
| 974 |
+
{
|
| 975 |
+
"type": "text",
|
| 976 |
+
"text": "block. The denoised output of TI2I branch is denoted as $z_{t-1}^{TI}$ . The classifier-free guidance [13] is extended to produce next step's denoised latent feature $z_{t-1}$ as:",
|
| 977 |
+
"bbox": [
|
| 978 |
+
498,
|
| 979 |
+
90,
|
| 980 |
+
890,
|
| 981 |
+
136
|
| 982 |
+
],
|
| 983 |
+
"page_idx": 5
|
| 984 |
+
},
|
| 985 |
+
{
|
| 986 |
+
"type": "equation",
|
| 987 |
+
"text": "\n$$\n\\boldsymbol {z} _ {t - 1} = \\epsilon_ {\\theta} (\\emptyset) + \\omega_ {t} \\left(\\boldsymbol {z} _ {t - 1} ^ {T} - \\epsilon_ {\\theta} (\\emptyset)\\right) + \\omega_ {i} \\left(\\boldsymbol {z} _ {t - 1} ^ {T I} - \\boldsymbol {z} _ {t - 1} ^ {T}\\right), \\tag {13}\n$$\n",
|
| 988 |
+
"text_format": "latex",
|
| 989 |
+
"bbox": [
|
| 990 |
+
506,
|
| 991 |
+
143,
|
| 992 |
+
890,
|
| 993 |
+
165
|
| 994 |
+
],
|
| 995 |
+
"page_idx": 5
|
| 996 |
+
},
|
| 997 |
+
{
|
| 998 |
+
"type": "text",
|
| 999 |
+
"text": "where $\\epsilon_{\\theta}(\\emptyset)$ is the unconditional denoised output.",
|
| 1000 |
+
"bbox": [
|
| 1001 |
+
500,
|
| 1002 |
+
172,
|
| 1003 |
+
826,
|
| 1004 |
+
188
|
| 1005 |
+
],
|
| 1006 |
+
"page_idx": 5
|
| 1007 |
+
},
|
| 1008 |
+
{
|
| 1009 |
+
"type": "text",
|
| 1010 |
+
"text": "With the smooth and accurate influence quantity of the given subject injected into the current step, the generation of the real word will gradually be narrowed from its initial general connotation to the specific subject, which will shape a more precise influence scope for the generation of the next step. Through this iterative updating and generation, we achieve real-time customization where the similarity for the given subject is disentangled with the controllability for the given text, leading to an optimal of both. More importantly, since both the adaptive scoring module as well as visual cross-attention layers are trained on general text-image datasets, the inference could be generally applied to any categories by using any target real words, enabling excellent open-domain customization capability.",
|
| 1011 |
+
"bbox": [
|
| 1012 |
+
496,
|
| 1013 |
+
188,
|
| 1014 |
+
890,
|
| 1015 |
+
400
|
| 1016 |
+
],
|
| 1017 |
+
"page_idx": 5
|
| 1018 |
+
},
|
| 1019 |
+
{
|
| 1020 |
+
"type": "text",
|
| 1021 |
+
"text": "4. Experiments",
|
| 1022 |
+
"text_level": 1,
|
| 1023 |
+
"bbox": [
|
| 1024 |
+
500,
|
| 1025 |
+
412,
|
| 1026 |
+
632,
|
| 1027 |
+
429
|
| 1028 |
+
],
|
| 1029 |
+
"page_idx": 5
|
| 1030 |
+
},
|
| 1031 |
+
{
|
| 1032 |
+
"type": "text",
|
| 1033 |
+
"text": "4.1. Experimental Setups",
|
| 1034 |
+
"text_level": 1,
|
| 1035 |
+
"bbox": [
|
| 1036 |
+
500,
|
| 1037 |
+
436,
|
| 1038 |
+
696,
|
| 1039 |
+
454
|
| 1040 |
+
],
|
| 1041 |
+
"page_idx": 5
|
| 1042 |
+
},
|
| 1043 |
+
{
|
| 1044 |
+
"type": "text",
|
| 1045 |
+
"text": "Implementation. RealCustom is implemented on Stable Diffusion and trained on the filtered subset of Laion-5B [29] based on aesthetic score, using 16 A100 GPUs for 16w iterations with 1e-5 learning rate. Unless otherwise specified, DDIM sampler [31] with 50 sample steps is used for sampling and the classifier-free guidance $\\omega_{t},\\omega_{i}$ is 7.5 and 12.5. Top-K ratios $\\gamma_{\\mathrm{num}} = 0.8$ $\\gamma_{\\mathrm{scope}} = 0.25$",
|
| 1046 |
+
"bbox": [
|
| 1047 |
+
496,
|
| 1048 |
+
460,
|
| 1049 |
+
890,
|
| 1050 |
+
566
|
| 1051 |
+
],
|
| 1052 |
+
"page_idx": 5
|
| 1053 |
+
},
|
| 1054 |
+
{
|
| 1055 |
+
"type": "text",
|
| 1056 |
+
"text": "Evaluation. Similarity. We use the state-of-the-art segmentation model (i.e., SAM [15]) to segment the subject, and then evaluate with both CLIP-I and DINO [4] scores, which are average pairwise cosine similarity CLIP ViT-B/32 or DINO embeddings of the segmented subjects in generated and real images. Controllability. We calculate the cosine similarity between prompt and image CLIP ViT-B/32 embeddings (CLIP-T). In addition, ImageReward [37] is used to evaluate controllability and aesthetics (quality).",
|
| 1057 |
+
"bbox": [
|
| 1058 |
+
496,
|
| 1059 |
+
566,
|
| 1060 |
+
890,
|
| 1061 |
+
700
|
| 1062 |
+
],
|
| 1063 |
+
"page_idx": 5
|
| 1064 |
+
},
|
| 1065 |
+
{
|
| 1066 |
+
"type": "text",
|
| 1067 |
+
"text": "Prior SOTAs. We compare with existing paradigm of both optimization-based (i.e., Textual Inversion[10], DreamBooth [27], CustomDiffusion [16]) and encoder-based (ELITE[34], BLIP-Diffusion[18]) state-of-the-arts.",
|
| 1068 |
+
"bbox": [
|
| 1069 |
+
496,
|
| 1070 |
+
702,
|
| 1071 |
+
890,
|
| 1072 |
+
762
|
| 1073 |
+
],
|
| 1074 |
+
"page_idx": 5
|
| 1075 |
+
},
|
| 1076 |
+
{
|
| 1077 |
+
"type": "text",
|
| 1078 |
+
"text": "4.2. Main Results",
|
| 1079 |
+
"text_level": 1,
|
| 1080 |
+
"bbox": [
|
| 1081 |
+
500,
|
| 1082 |
+
771,
|
| 1083 |
+
637,
|
| 1084 |
+
786
|
| 1085 |
+
],
|
| 1086 |
+
"page_idx": 5
|
| 1087 |
+
},
|
| 1088 |
+
{
|
| 1089 |
+
"type": "text",
|
| 1090 |
+
"text": "Quantitative results. As shown in Tab. 1, RealCustom outperforms existing methods in all metrics: (1) for controllability, we improve CLIP-T and ImageReward by $8.1\\%$ and $223.5\\%$ , respectively. The significant improvement in ImageReward shows that our paradigm generates much higher quality customization; (2) for similarity, we also achieve state-of-the-art performance on both CLIP-I and DINO-I.",
|
| 1091 |
+
"bbox": [
|
| 1092 |
+
496,
|
| 1093 |
+
794,
|
| 1094 |
+
890,
|
| 1095 |
+
898
|
| 1096 |
+
],
|
| 1097 |
+
"page_idx": 5
|
| 1098 |
+
},
|
| 1099 |
+
{
|
| 1100 |
+
"type": "page_number",
|
| 1101 |
+
"text": "6",
|
| 1102 |
+
"bbox": [
|
| 1103 |
+
478,
|
| 1104 |
+
925,
|
| 1105 |
+
490,
|
| 1106 |
+
936
|
| 1107 |
+
],
|
| 1108 |
+
"page_idx": 5
|
| 1109 |
+
},
|
| 1110 |
+
{
|
| 1111 |
+
"type": "table",
|
| 1112 |
+
"img_path": "images/930521eba7eebe22c8c51ed0617cf4c41c344d1cf47097eeee7cd0d9ea4d2b72.jpg",
|
| 1113 |
+
"table_caption": [],
|
| 1114 |
+
"table_footnote": [],
|
| 1115 |
+
"table_body": "<table><tr><td rowspan=\"2\">Methods</td><td colspan=\"2\">controllability</td><td colspan=\"2\">similarity</td><td>efficiency</td></tr><tr><td>CLIP-T ↑</td><td>ImageReward ↑</td><td>CLIP-I ↑</td><td>DINO-I ↑</td><td>test-time optimize steps</td></tr><tr><td>Textual Inversion [10]</td><td>0.2546</td><td>-0.9168</td><td>0.7603</td><td>0.5956</td><td>5000</td></tr><tr><td>DreamBooth [27]</td><td>0.2783</td><td>0.2393</td><td>0.8466</td><td>0.7851</td><td>800</td></tr><tr><td>Custom Diffusion [16]</td><td>0.2884</td><td>0.2558</td><td>0.8257</td><td>0.7093</td><td>500</td></tr><tr><td>ELITE [34]</td><td>0.2920</td><td>0.2690</td><td>0.8022</td><td>0.6489</td><td>0 (real-time)</td></tr><tr><td>BLIP-Diffusion [18]</td><td>0.2967</td><td>0.2172</td><td>0.8145</td><td>0.6486</td><td>0 (real-time)</td></tr><tr><td>RealCustom(ours)</td><td>0.3204</td><td>0.8703</td><td>0.8552</td><td>0.7865</td><td>0 (real-time)</td></tr></table>",
|
| 1116 |
+
"bbox": [
|
| 1117 |
+
78,
|
| 1118 |
+
89,
|
| 1119 |
+
645,
|
| 1120 |
+
208
|
| 1121 |
+
],
|
| 1122 |
+
"page_idx": 6
|
| 1123 |
+
},
|
| 1124 |
+
{
|
| 1125 |
+
"type": "image",
|
| 1126 |
+
"img_path": "images/f89a2160b881c27d56d975d4ecfbb5e7d697377cc0c04ce4127116c702ae10dc.jpg",
|
| 1127 |
+
"image_caption": [],
|
| 1128 |
+
"image_footnote": [],
|
| 1129 |
+
"bbox": [
|
| 1130 |
+
645,
|
| 1131 |
+
90,
|
| 1132 |
+
883,
|
| 1133 |
+
205
|
| 1134 |
+
],
|
| 1135 |
+
"page_idx": 6
|
| 1136 |
+
},
|
| 1137 |
+
{
|
| 1138 |
+
"type": "text",
|
| 1139 |
+
"text": "Table 1. Quantitative comparisons with existing methods. Left: Our proposed RealCustom outperforms existing methods in all metrics, i.e., (1) for controllability, achieving $8.1\\%$ and $223.5\\%$ improvements on CLIP-T and ImageReward, respectively. The significant improvement on ImageReward also validates that RealCustom could generate customized images with much higher quality (higher aesthetic score); (2) for similarity, we also achieve state-of-the-art performance on both CLIP-I and DINO-I. Right: We plot the \"CLIP-T verse DINO\", showing that the existing methods are trapped into the dual-optimum paradox, while RealCustom completely get rid of it and achieve both high-quality similarity and controllability. The same conclusion in \"CLIP-T verse CLIP-I\" can be found in Fig. 1(c).",
|
| 1140 |
+
"bbox": [
|
| 1141 |
+
75,
|
| 1142 |
+
219,
|
| 1143 |
+
895,
|
| 1144 |
+
305
|
| 1145 |
+
],
|
| 1146 |
+
"page_idx": 6
|
| 1147 |
+
},
|
| 1148 |
+
{
|
| 1149 |
+
"type": "image",
|
| 1150 |
+
"img_path": "images/675cfc5839ceb3e5d21ff64450593f8bc08346404913aaf88d565660d8654cd2.jpg",
|
| 1151 |
+
"image_caption": [
|
| 1152 |
+
"Figure 5. Qualitative comparison with existing methods. RealCustom could produce much higher quality customization results that have better similarity with the given subject and better controllability with the given text compared to existing works. Moreover, RealCustom shows superior diversity (different subject poses, locations, etc.) and generation quality (e.g., the \"autumn leaves\" scene in the third row)."
|
| 1153 |
+
],
|
| 1154 |
+
"image_footnote": [],
|
| 1155 |
+
"bbox": [
|
| 1156 |
+
84,
|
| 1157 |
+
321,
|
| 1158 |
+
888,
|
| 1159 |
+
679
|
| 1160 |
+
],
|
| 1161 |
+
"page_idx": 6
|
| 1162 |
+
},
|
| 1163 |
+
{
|
| 1164 |
+
"type": "text",
|
| 1165 |
+
"text": "The figure of \"CLIP-T verse DINO\" validates that the existing paradigm is trapped into the dual-optimum paradox, while RealCustom effectively eradicates it.",
|
| 1166 |
+
"bbox": [
|
| 1167 |
+
75,
|
| 1168 |
+
756,
|
| 1169 |
+
470,
|
| 1170 |
+
801
|
| 1171 |
+
],
|
| 1172 |
+
"page_idx": 6
|
| 1173 |
+
},
|
| 1174 |
+
{
|
| 1175 |
+
"type": "text",
|
| 1176 |
+
"text": "Qualitative results. As shown in Fig. 5, RealCustom demonstrates superior zero-shot open-domain customization capability (e.g., the rare shaped toy in the first row), generating higher-quality custom images that have better similarity with the given subject and better controllability with the given text compared to existing works.",
|
| 1177 |
+
"bbox": [
|
| 1178 |
+
75,
|
| 1179 |
+
809,
|
| 1180 |
+
470,
|
| 1181 |
+
902
|
| 1182 |
+
],
|
| 1183 |
+
"page_idx": 6
|
| 1184 |
+
},
|
| 1185 |
+
{
|
| 1186 |
+
"type": "text",
|
| 1187 |
+
"text": "4.3. Ablations",
|
| 1188 |
+
"text_level": 1,
|
| 1189 |
+
"bbox": [
|
| 1190 |
+
500,
|
| 1191 |
+
756,
|
| 1192 |
+
611,
|
| 1193 |
+
770
|
| 1194 |
+
],
|
| 1195 |
+
"page_idx": 6
|
| 1196 |
+
},
|
| 1197 |
+
{
|
| 1198 |
+
"type": "text",
|
| 1199 |
+
"text": "Effectiveness of adaptive mask guidance strategy. We first visualize the narrowing down process of the real word by the proposed adaptive mask guidance strategy in Fig. 6. We could observe that starting from the same state (the same mask since there's no information of the given subject is introduced at the first step), RealCustom gradually forms the structure and details of the given subject, achieving the open-domain zero-shot customization while remain-",
|
| 1200 |
+
"bbox": [
|
| 1201 |
+
496,
|
| 1202 |
+
779,
|
| 1203 |
+
893,
|
| 1204 |
+
902
|
| 1205 |
+
],
|
| 1206 |
+
"page_idx": 6
|
| 1207 |
+
},
|
| 1208 |
+
{
|
| 1209 |
+
"type": "page_number",
|
| 1210 |
+
"text": "7",
|
| 1211 |
+
"bbox": [
|
| 1212 |
+
478,
|
| 1213 |
+
924,
|
| 1214 |
+
491,
|
| 1215 |
+
936
|
| 1216 |
+
],
|
| 1217 |
+
"page_idx": 6
|
| 1218 |
+
},
|
| 1219 |
+
{
|
| 1220 |
+
"type": "image",
|
| 1221 |
+
"img_path": "images/f2a545cc9ef79d7217c2ac261afa1448737d920b64c61182712c05e1f5124e7d.jpg",
|
| 1222 |
+
"image_caption": [
|
| 1223 |
+
"Figure 6. Illustration of gradually narrowing the real words into the given subjects. Upper: RealCustom generated results (first row) and the original text-to-image generated result (second row) by pre-trained models with the same seed. The mask is visualized by the Top-25% highest attention score regions of the real word \"toy\". We could observe that starting from the same state (the same mask since there's no information of the given subject is introduced at the beginning), RealCustom gradually forms the structure and details of the given subject by our proposed adaptive mask strategy, achieving the open-domain zero-shot customization. Lower: More visualization cases."
|
| 1224 |
+
],
|
| 1225 |
+
"image_footnote": [],
|
| 1226 |
+
"bbox": [
|
| 1227 |
+
94,
|
| 1228 |
+
109,
|
| 1229 |
+
883,
|
| 1230 |
+
281
|
| 1231 |
+
],
|
| 1232 |
+
"page_idx": 7
|
| 1233 |
+
},
|
| 1234 |
+
{
|
| 1235 |
+
"type": "table",
|
| 1236 |
+
"img_path": "images/ba90ec03fb9e0724959b059921bcc65b64f03d1e23872acda27d90cbbebd48d5.jpg",
|
| 1237 |
+
"table_caption": [],
|
| 1238 |
+
"table_footnote": [],
|
| 1239 |
+
"table_body": "<table><tr><td>inference setting</td><td>CLIP-T ↑</td><td>CLIP-I ↑</td></tr><tr><td>γscope = 0.1</td><td>0.32</td><td>0.8085</td></tr><tr><td>γscope = 0.2</td><td>0.3195</td><td>0.8431</td></tr><tr><td>γscope = 0.25</td><td>0.3204</td><td>0.8552</td></tr><tr><td>γscope = 0.25, binary</td><td>0.294</td><td>0.8567</td></tr><tr><td>γscope = 0.3</td><td>0.3129</td><td>0.8578</td></tr><tr><td>γscope = 0.4</td><td>0.3023</td><td>0.8623</td></tr><tr><td>γscope = 0.5</td><td>0.285</td><td>0.8654</td></tr></table>",
|
| 1240 |
+
"bbox": [
|
| 1241 |
+
137,
|
| 1242 |
+
386,
|
| 1243 |
+
410,
|
| 1244 |
+
493
|
| 1245 |
+
],
|
| 1246 |
+
"page_idx": 7
|
| 1247 |
+
},
|
| 1248 |
+
{
|
| 1249 |
+
"type": "image",
|
| 1250 |
+
"img_path": "images/8d12bd738895585261fa086e821c7370221a5e801fae21c628a080ee2772d88b.jpg",
|
| 1251 |
+
"image_caption": [
|
| 1252 |
+
"Figure 7. Visualization of different influence scope."
|
| 1253 |
+
],
|
| 1254 |
+
"image_footnote": [],
|
| 1255 |
+
"bbox": [
|
| 1256 |
+
81,
|
| 1257 |
+
565,
|
| 1258 |
+
468,
|
| 1259 |
+
651
|
| 1260 |
+
],
|
| 1261 |
+
"page_idx": 7
|
| 1262 |
+
},
|
| 1263 |
+
{
|
| 1264 |
+
"type": "table",
|
| 1265 |
+
"img_path": "images/d4bc566218dc66effd8fe17d3c2dcf629fef6cb614ee180594b85fc4a7a79bdc.jpg",
|
| 1266 |
+
"table_caption": [
|
| 1267 |
+
"Table 2. Ablation of different $\\gamma_{\\mathrm{scope}}$ , which denotes the influence scope of the given subject in RealCustom during inference. \"binary\" means using binary masks instead of max norm in Eq. 11."
|
| 1268 |
+
],
|
| 1269 |
+
"table_footnote": [],
|
| 1270 |
+
"table_body": "<table><tr><td>ID</td><td>settings</td><td>CLIP-T ↑</td><td>CLIP-I ↑</td></tr><tr><td>1</td><td>full model, γnum = 0.8</td><td>0.3204</td><td>0.8552</td></tr><tr><td>2</td><td>w/o adaptive scoring module</td><td>0.3002</td><td>0.8221</td></tr><tr><td>3</td><td>textual score only, γnum = 0.8</td><td>0.313</td><td>0.8335</td></tr><tr><td>4</td><td>visual score only, γnum = 0.8</td><td>0.2898</td><td>0.802</td></tr><tr><td>5</td><td>(textual + visual) / 2, γnum = 0.8</td><td>0.3156</td><td>0.8302</td></tr><tr><td>6</td><td>full model, γnum = 0.9</td><td>0.315</td><td>0.8541</td></tr><tr><td>7</td><td>full model, γnum = 0.7</td><td>0.3202</td><td>0.8307</td></tr></table>",
|
| 1271 |
+
"bbox": [
|
| 1272 |
+
89,
|
| 1273 |
+
693,
|
| 1274 |
+
457,
|
| 1275 |
+
801
|
| 1276 |
+
],
|
| 1277 |
+
"page_idx": 7
|
| 1278 |
+
},
|
| 1279 |
+
{
|
| 1280 |
+
"type": "text",
|
| 1281 |
+
"text": "Table 3. Ablation of the adaptive scoring module, where $\\gamma_{\\mathrm{num}}$ means the influence quantity of the given subject during inference.",
|
| 1282 |
+
"bbox": [
|
| 1283 |
+
76,
|
| 1284 |
+
811,
|
| 1285 |
+
467,
|
| 1286 |
+
839
|
| 1287 |
+
],
|
| 1288 |
+
"page_idx": 7
|
| 1289 |
+
},
|
| 1290 |
+
{
|
| 1291 |
+
"type": "text",
|
| 1292 |
+
"text": "ing other subject-irrelevant parts (e.g., the city background) completely controlled by the given text.",
|
| 1293 |
+
"bbox": [
|
| 1294 |
+
76,
|
| 1295 |
+
869,
|
| 1296 |
+
467,
|
| 1297 |
+
900
|
| 1298 |
+
],
|
| 1299 |
+
"page_idx": 7
|
| 1300 |
+
},
|
| 1301 |
+
{
|
| 1302 |
+
"type": "text",
|
| 1303 |
+
"text": "We then ablate on the Top-K raito $\\gamma_{\\mathrm{scope}}$ in Tab. 2: (1) within a proper range (experimentally, $\\gamma_{\\mathrm{scope}} \\in [0.2, 0.4]$ ) the results are quite robust; (2) the maximum normalization in Eq. 11 is important for the unity of high similarity and controllability, since different regions in the selected parts have different subject relevance and should be set to different weights. (3) Too small or too large influence scope will degrade similarity or controllability, respectively. These conclusions are validated by the visualization in Fig. 7.",
|
| 1304 |
+
"bbox": [
|
| 1305 |
+
498,
|
| 1306 |
+
388,
|
| 1307 |
+
890,
|
| 1308 |
+
523
|
| 1309 |
+
],
|
| 1310 |
+
"page_idx": 7
|
| 1311 |
+
},
|
| 1312 |
+
{
|
| 1313 |
+
"type": "text",
|
| 1314 |
+
"text": "Effectiveness of adaptive scoring module. As shown in Tab. 3, (1) We first compare with the simple use of all image features (ID-2), which results in degradation of both similarity and controllability, proving the importance of providing accurate and smooth influence quantity along with the coarse-to-fine diffusion generation process; (2) We then ablate on the module design (ID-3,4,5, ID-5), finding that using image score only results in worse performance. The reason is that the generation features are noisy at the beginning, resulting in an inaccurate score prediction. Therefore, we propose a step-scheduler to adaptively fuse text and image scores, leading to the best performance; (3) Finally, the choice of influence quantity $\\gamma_{\\mathrm{num}}$ is ablated in ID-6 & 7.",
|
| 1315 |
+
"bbox": [
|
| 1316 |
+
496,
|
| 1317 |
+
526,
|
| 1318 |
+
892,
|
| 1319 |
+
722
|
| 1320 |
+
],
|
| 1321 |
+
"page_idx": 7
|
| 1322 |
+
},
|
| 1323 |
+
{
|
| 1324 |
+
"type": "text",
|
| 1325 |
+
"text": "5. Conclusion",
|
| 1326 |
+
"text_level": 1,
|
| 1327 |
+
"bbox": [
|
| 1328 |
+
500,
|
| 1329 |
+
738,
|
| 1330 |
+
617,
|
| 1331 |
+
753
|
| 1332 |
+
],
|
| 1333 |
+
"page_idx": 7
|
| 1334 |
+
},
|
| 1335 |
+
{
|
| 1336 |
+
"type": "text",
|
| 1337 |
+
"text": "In this paper, we present a novel customization paradigm RealCustom that, for the first time, disentangles similarity of given subjects from controllability of given text by precisely limiting subject influence to relevant parts, which gradually narrowing the real word from its general connotation to the specific subject in a novel \"train-inference\" framework: the adaptive scoring module learns to adaptively modulate influence quantity during training; (2) the adaptive mask guidance strategy iteratively updates the in",
|
| 1338 |
+
"bbox": [
|
| 1339 |
+
496,
|
| 1340 |
+
763,
|
| 1341 |
+
890,
|
| 1342 |
+
900
|
| 1343 |
+
],
|
| 1344 |
+
"page_idx": 7
|
| 1345 |
+
},
|
| 1346 |
+
{
|
| 1347 |
+
"type": "header",
|
| 1348 |
+
"text": "Top-25% highest attention score regions of \"toy\"",
|
| 1349 |
+
"bbox": [
|
| 1350 |
+
401,
|
| 1351 |
+
93,
|
| 1352 |
+
643,
|
| 1353 |
+
106
|
| 1354 |
+
],
|
| 1355 |
+
"page_idx": 7
|
| 1356 |
+
},
|
| 1357 |
+
{
|
| 1358 |
+
"type": "page_number",
|
| 1359 |
+
"text": "8",
|
| 1360 |
+
"bbox": [
|
| 1361 |
+
478,
|
| 1362 |
+
924,
|
| 1363 |
+
488,
|
| 1364 |
+
936
|
| 1365 |
+
],
|
| 1366 |
+
"page_idx": 7
|
| 1367 |
+
},
|
| 1368 |
+
{
|
| 1369 |
+
"type": "text",
|
| 1370 |
+
"text": "fluence scope and influence quantity of given subjects during inference. Extensive experiments demonstrate that RealCustom achieves the unity of high-quality similarity and controllability in the real-time open-domain scenario.",
|
| 1371 |
+
"bbox": [
|
| 1372 |
+
76,
|
| 1373 |
+
90,
|
| 1374 |
+
472,
|
| 1375 |
+
151
|
| 1376 |
+
],
|
| 1377 |
+
"page_idx": 8
|
| 1378 |
+
},
|
| 1379 |
+
{
|
| 1380 |
+
"type": "text",
|
| 1381 |
+
"text": "References",
|
| 1382 |
+
"text_level": 1,
|
| 1383 |
+
"bbox": [
|
| 1384 |
+
78,
|
| 1385 |
+
166,
|
| 1386 |
+
171,
|
| 1387 |
+
181
|
| 1388 |
+
],
|
| 1389 |
+
"page_idx": 8
|
| 1390 |
+
},
|
| 1391 |
+
{
|
| 1392 |
+
"type": "list",
|
| 1393 |
+
"sub_type": "ref_text",
|
| 1394 |
+
"list_items": [
|
| 1395 |
+
"[1] Yuval Alaluf, Elad Richardson, Gal Metzer, and Daniel Cohen-Or. A neural space-time representation for text-to-image personalization. arXiv preprint arXiv:2305.15391, 2023. 2, 3",
|
| 1396 |
+
"[2] Yogesh Balaji, Seungjun Nah, Xun Huang, Arash Vahdat, Jiaming Song, Karsten Kreis, Miika Aittala, Timo Aila, Samuli Laine, Bryan Catanzaro, et al. edifi: Text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:2211.01324, 2022. 3, 5",
|
| 1397 |
+
"[3] Mingdeng Cao, Xintao Wang, Zhongang Qi, Ying Shan, Xiaohu Qie, and Yinqiang Zheng. Masactrl: Tuning-free mutual self-attention control for consistent image synthesis and editing. arXiv preprint arXiv:2304.08465, 2023. 3",
|
| 1398 |
+
"[4] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9650-9660, 2021. 6",
|
| 1399 |
+
"[5] Hila Chefer, Yuval Alaluf, Yael Vinker, Lior Wolf, and Daniel Cohen-Or. Attend-and-excite: Attention-based semantic guidance for text-to-image diffusion models. ACM Transactions on Graphics (TOG), 42(4):1-10, 2023. 3",
|
| 1400 |
+
"[6] Wenhu Chen, Hexiang Hu, Chitwan Sahara, and William W Cohen. Re-imagen: Retrieval-augmented text-to-image generator. arXiv preprint arXiv:2209.14491, 2022. 1, 3",
|
| 1401 |
+
"[7] Zhuowei Chen, Shancheng Fang, Wei Liu, Qian He, Mengqi Huang, Yongdong Zhang, and Zhendong Mao. Dreamidentity: Improved editability for efficient face-identity preserved image generation. arXiv preprint arXiv:2307.00300, 2023. 1, 3",
|
| 1402 |
+
"[8] Giannis Daras and Alexandros G Dimakis. Multiresolution textual inversion. arXiv preprint arXiv:2211.17115, 2022. 2, 3",
|
| 1403 |
+
"[9] Ziyi Dong, Pengxu Wei, and Liang Lin. Dreamartist: Towards controllable one-shot text-to-image generation via contrastive prompt-tuning. arXiv preprint arXiv:2211.11337, 2022.",
|
| 1404 |
+
"[10] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022. 2, 3, 6, 7",
|
| 1405 |
+
"[11] Rinon Gal, Moab Arar, Yuval Atzmon, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Designing an encoder for fast personalization of text-to-image models. arXiv preprint arXiv:2302.12228, 2023. 2, 3",
|
| 1406 |
+
"[12] Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022. 3"
|
| 1407 |
+
],
|
| 1408 |
+
"bbox": [
|
| 1409 |
+
78,
|
| 1410 |
+
191,
|
| 1411 |
+
470,
|
| 1412 |
+
898
|
| 1413 |
+
],
|
| 1414 |
+
"page_idx": 8
|
| 1415 |
+
},
|
| 1416 |
+
{
|
| 1417 |
+
"type": "list",
|
| 1418 |
+
"sub_type": "ref_text",
|
| 1419 |
+
"list_items": [
|
| 1420 |
+
"[13] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 6",
|
| 1421 |
+
"[14] Xuhui Jia, Yang Zhao, Kelvin CK Chan, Yandong Li, Han Zhang, Boqing Gong, Tingbo Hou, Huisheng Wang, and Yu-Chuan Su. Taming encoder for zero fine-tuning image customization with text-to-image diffusion models. arXiv preprint arXiv:2304.02642, 2023. 3",
|
| 1422 |
+
"[15] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 6",
|
| 1423 |
+
"[16] Nupur Kumari, Bingliang Zhang, Richard Zhang, Eli Shechtman, and Jun-Yan Zhu. Multi-concept customization of text-to-image diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1931-1941, 2023. 2, 3, 6, 7",
|
| 1424 |
+
"[17] Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander Kolesnikov, et al. The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. International Journal of Computer Vision, 128(7):1956-1981, 2020. 2",
|
| 1425 |
+
"[18] Dongxu Li, Junnan Li, and Steven CH Hoi. Blip-diffusion: Pre-trained subject representation for controllable text-to-image generation and editing. arXiv preprint arXiv:2305.14720, 2023. 2, 3, 6, 7",
|
| 1426 |
+
"[19] Yumeng Li, Margret Keuper, Dan Zhang, and Anna Khoreva. Divide & bind your attention for improved generative semantic nursing. arXiv preprint arXiv:2307.10864, 2023. 3",
|
| 1427 |
+
"[20] Yuheng Li, Haotian Liu, Qingyang Wu, Fangzhou Mu, Jianwei Yang, Jianfeng Gao, Chunyuan Li, and Yong Jae Lee. Gligen: Open-set grounded text-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22511-22521, 2023. 3",
|
| 1428 |
+
"[21] Ziyi Li, Qinye Zhou, Xiaoyun Zhang, Ya Zhang, Yanfeng Wang, and Weidi Xie. Guiding text-to-image diffusion model towards grounded generation. arXiv preprint arXiv:2301.05221, 2023. 3",
|
| 1429 |
+
"[22] Zhiheng Liu, Yifei Zhang, Yujun Shen, Kecheng Zheng, Kai Zhu, Ruili Feng, Yu Liu, Deli Zhao, Jingren Zhou, and Yang Cao. Cones 2: Customizable image synthesis with multiple subjects. arXiv preprint arXiv:2305.19327, 2023. 2, 3",
|
| 1430 |
+
"[23] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 4, 5",
|
| 1431 |
+
"[24] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 1, 3",
|
| 1432 |
+
"[25] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 1, 3, 4"
|
| 1433 |
+
],
|
| 1434 |
+
"bbox": [
|
| 1435 |
+
501,
|
| 1436 |
+
92,
|
| 1437 |
+
890,
|
| 1438 |
+
898
|
| 1439 |
+
],
|
| 1440 |
+
"page_idx": 8
|
| 1441 |
+
},
|
| 1442 |
+
{
|
| 1443 |
+
"type": "page_number",
|
| 1444 |
+
"text": "9",
|
| 1445 |
+
"bbox": [
|
| 1446 |
+
478,
|
| 1447 |
+
924,
|
| 1448 |
+
491,
|
| 1449 |
+
936
|
| 1450 |
+
],
|
| 1451 |
+
"page_idx": 8
|
| 1452 |
+
},
|
| 1453 |
+
{
|
| 1454 |
+
"type": "list",
|
| 1455 |
+
"sub_type": "ref_text",
|
| 1456 |
+
"list_items": [
|
| 1457 |
+
"[26] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In Medical Image Computing and Computer-Assisted Intervention-MICCAI 2015: 18th International Conference, Munich, Germany, October 5-9, 2015, Proceedings, Part III 18, pages 234-241. Springer, 2015. 4",
|
| 1458 |
+
"[27] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22500-22510, 2023. 2, 3, 6, 7",
|
| 1459 |
+
"[28] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 1, 3",
|
| 1460 |
+
"[29] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. Advances in Neural Information Processing Systems, 35:25278-25294, 2022. 6",
|
| 1461 |
+
"[30] Jing Shi, Wei Xiong, Zhe Lin, and Hyun Joon Jung. Instantbooth: Personalized text-to-image generation without test-time finetuning. arXiv preprint arXiv:2304.03411, 2023. 2, 3",
|
| 1462 |
+
"[31] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502, 2020. 3, 6",
|
| 1463 |
+
"[32] Andrey Voynov, Qinghao Chu, Daniel Cohen-Or, and Kfir Aberman. $p+$ : Extended textual conditioning in text-to-image generation. arXiv preprint arXiv:2303.09522, 2023. 2, 3",
|
| 1464 |
+
"[33] Jinglong Wang, Xiawei Li, Jing Zhang, Qingyuan Xu, Qin Zhou, Qian Yu, Lu Sheng, and Dong Xu. Diffusion model is secretly a training-free open vocabulary semantic segmenter. arXiv preprint arXiv:2309.02773, 2023. 3",
|
| 1465 |
+
"[34] Yuxiang Wei, Yabo Zhang, Zhilong Ji, Jinfeng Bai, Lei Zhang, and Wangmeng Zuo. Elite: Encoding visual concepts into textual embeddings for customized text-to-image generation. arXiv preprint arXiv:2302.13848, 2023. 2, 3, 6, 7",
|
| 1466 |
+
"[35] Weijia Wu, Yuzhong Zhao, Mike Zheng Shou, Hong Zhou, and Chunhua Shen. Diffumask: Synthesizing images with pixel-level annotations for semantic segmentation using diffusion models. arXiv preprint arXiv:2303.11681, 2023. 3",
|
| 1467 |
+
"[36] Changming Xiao, Qi Yang, Feng Zhou, and Changshui Zhang. From text to mask: Localizing entities using the attention of text-to-image diffusion models. arXiv preprint arXiv:2309.04109, 2023. 3",
|
| 1468 |
+
"[37] Jiazheng Xu, Xiao Liu, Yuchen Wu, Yuxuan Tong, Qinkai Li, Ming Ding, Jie Tang, and Yuxiao Dong. Imagereward: Learning and evaluating human preferences for text-to-image generation. arXiv preprint arXiv:2304.05977, 2023. 6, 11, 12"
|
| 1469 |
+
],
|
| 1470 |
+
"bbox": [
|
| 1471 |
+
78,
|
| 1472 |
+
90,
|
| 1473 |
+
470,
|
| 1474 |
+
898
|
| 1475 |
+
],
|
| 1476 |
+
"page_idx": 9
|
| 1477 |
+
},
|
| 1478 |
+
{
|
| 1479 |
+
"type": "ref_text",
|
| 1480 |
+
"text": "[38] Yuxin Zhang, Weiming Dong, Fan Tang, Nisha Huang, Haibin Huang, Chongyang Ma, Tong-Yee Lee, Oliver Deussen, and Changsheng Xu. Prospect: Expanded conditioning for the personalization of attribute-aware image generation. arXiv preprint arXiv:2305.16225, 2023. 2",
|
| 1481 |
+
"bbox": [
|
| 1482 |
+
501,
|
| 1483 |
+
90,
|
| 1484 |
+
893,
|
| 1485 |
+
161
|
| 1486 |
+
],
|
| 1487 |
+
"page_idx": 9
|
| 1488 |
+
},
|
| 1489 |
+
{
|
| 1490 |
+
"type": "page_number",
|
| 1491 |
+
"text": "10",
|
| 1492 |
+
"bbox": [
|
| 1493 |
+
477,
|
| 1494 |
+
924,
|
| 1495 |
+
495,
|
| 1496 |
+
936
|
| 1497 |
+
],
|
| 1498 |
+
"page_idx": 9
|
| 1499 |
+
},
|
| 1500 |
+
{
|
| 1501 |
+
"type": "text",
|
| 1502 |
+
"text": "6. Supplementary",
|
| 1503 |
+
"text_level": 1,
|
| 1504 |
+
"bbox": [
|
| 1505 |
+
76,
|
| 1506 |
+
90,
|
| 1507 |
+
230,
|
| 1508 |
+
107
|
| 1509 |
+
],
|
| 1510 |
+
"page_idx": 10
|
| 1511 |
+
},
|
| 1512 |
+
{
|
| 1513 |
+
"type": "text",
|
| 1514 |
+
"text": "6.1. More Qualitative Comparison",
|
| 1515 |
+
"text_level": 1,
|
| 1516 |
+
"bbox": [
|
| 1517 |
+
76,
|
| 1518 |
+
114,
|
| 1519 |
+
344,
|
| 1520 |
+
132
|
| 1521 |
+
],
|
| 1522 |
+
"page_idx": 10
|
| 1523 |
+
},
|
| 1524 |
+
{
|
| 1525 |
+
"type": "text",
|
| 1526 |
+
"text": "As shown in Fig. 8, we provide more qualitative comparison between our proposed RealCustom and recent state-of-the-art methods of previous pseudo-word paradigm in the real-time customization scenario. Compared with existing state-of-the-arts, we could draw the following conclusions: (1) better similarity with the given subjects and better controllability with the given text at the same time, e.g., in the $7^{\\text{th}}$ row, the toy generated by RealCustom exactly on the Great Wall while existing works fail to adhere to the given text. Meanwhile, the toy generated by RealCustom exactly mimics all details of the given one while existing works fail to preserve them. (2) better image quality, i.e., with better aesthetic scores, e.g., the snow scene in the second row, the dirt road scene in the third row, etc. The conclusion adheres to our significant improvement (223.5% improvement) on ImageReward [37] in the main paper since ImageReward evaluates both controllability and image quality. (3) better generalization in open domain, i.e., for any given subjects, RealCustom could generate realistic images that consistently adhere to the given text for the given subjects in real-time, including the common subject like dogs (e.g., $5^{th}$ , $6^{th}$ rows) and rare subjects like the unique backpack (i.e., $1^{st}$ row), while existing state-of-the-arts works poorly on the rare subjects like the backpack in the first row, the special toy in the last row, etc. The reason lies that for the very first time, our proposed RealCustom progressively narrows a real text word from its initial general connotation into the unique subject, which completely get rid of the necessary corresponding between given subjects and learned pseudo-words, and therefore is no longer confined to be trained on object-datasets with limited categories.",
|
| 1527 |
+
"bbox": [
|
| 1528 |
+
76,
|
| 1529 |
+
138,
|
| 1530 |
+
472,
|
| 1531 |
+
609
|
| 1532 |
+
],
|
| 1533 |
+
"page_idx": 10
|
| 1534 |
+
},
|
| 1535 |
+
{
|
| 1536 |
+
"type": "text",
|
| 1537 |
+
"text": "6.2. More Visualization",
|
| 1538 |
+
"text_level": 1,
|
| 1539 |
+
"bbox": [
|
| 1540 |
+
76,
|
| 1541 |
+
619,
|
| 1542 |
+
261,
|
| 1543 |
+
635
|
| 1544 |
+
],
|
| 1545 |
+
"page_idx": 10
|
| 1546 |
+
},
|
| 1547 |
+
{
|
| 1548 |
+
"type": "text",
|
| 1549 |
+
"text": "We provide more comprehensive visualization of the narrowing down process of the real word of our proposed RealCustom in Fig. 9 and Fig. 10. Here, we provide four customization cases that with the same given text \"a toy in the desert\" and four different given subjects. The real text word used for narrowing is \"toy\". The mask is visualized by the Top-25% highest attention score regions of the real text word \"toy\". We visualize all the masks in the total 50 DDIM sampling steps. We could observe that the mask of the \"toy\" gradually being smoothly and accurately narrowed into the specific given subject. Meanwhile, even in these subject-relevant parts (Top-25% highest attention score regions of the real text word \"toy\" in these cases), their relevance is also different, e.g., in Fig. 9, the more important parts like the eyes of the first subject are given higher weight (brighter in the mask), in Fig. 10, the more important parts like the eyes of the second subject are given higher weight.",
|
| 1550 |
+
"bbox": [
|
| 1551 |
+
76,
|
| 1552 |
+
643,
|
| 1553 |
+
472,
|
| 1554 |
+
902
|
| 1555 |
+
],
|
| 1556 |
+
"page_idx": 10
|
| 1557 |
+
},
|
| 1558 |
+
{
|
| 1559 |
+
"type": "text",
|
| 1560 |
+
"text": "6.3. Impact of Different Real Word",
|
| 1561 |
+
"text_level": 1,
|
| 1562 |
+
"bbox": [
|
| 1563 |
+
500,
|
| 1564 |
+
90,
|
| 1565 |
+
772,
|
| 1566 |
+
107
|
| 1567 |
+
],
|
| 1568 |
+
"page_idx": 10
|
| 1569 |
+
},
|
| 1570 |
+
{
|
| 1571 |
+
"type": "text",
|
| 1572 |
+
"text": "The customization results in using different real text words are shown in Fig. 11. The real text word narrowed down for customization is highlighted in red. We could draw the following conclusions: (1) The customization results of our proposed RealCustom are quite robust, i.e., no matter we use how coarse-grained text word to represent the given subject, the generated subject in the customization results are always almost identical to the given subjects. For example, in the upper three rows, when we use \"corgi\", \"dog\" or \"animal\" to customize the given subject, the results all consistently adhere to the given subject. This phenomenon also validates the generalization and robustness of our proposed new paradigm RealCustom. (2) When using completely different word to represent the given subject, e.g., use \"parrot\" to represent a corgi, our proposed RealCustom opens a door for a new application, i.e., novel concept creation. That is, RealCustom will try to combine these two concepts and create a new one, e.g., generating a parrot with the appearance and character of the given brown corgi, as shown in the below three rows. This application will be very valuable for designing new characters in movies or games, etc.",
|
| 1573 |
+
"bbox": [
|
| 1574 |
+
496,
|
| 1575 |
+
113,
|
| 1576 |
+
893,
|
| 1577 |
+
431
|
| 1578 |
+
],
|
| 1579 |
+
"page_idx": 10
|
| 1580 |
+
},
|
| 1581 |
+
{
|
| 1582 |
+
"type": "page_number",
|
| 1583 |
+
"text": "11",
|
| 1584 |
+
"bbox": [
|
| 1585 |
+
475,
|
| 1586 |
+
924,
|
| 1587 |
+
493,
|
| 1588 |
+
936
|
| 1589 |
+
],
|
| 1590 |
+
"page_idx": 10
|
| 1591 |
+
},
|
| 1592 |
+
{
|
| 1593 |
+
"type": "image",
|
| 1594 |
+
"img_path": "images/6e0000e8f3c45e99536c7e1f29cb25845891aa352413f877f858da8cb3961da7.jpg",
|
| 1595 |
+
"image_caption": [
|
| 1596 |
+
"Figure 8. Qualitative comparison between our proposed RealCustom and recent state-of-the-art methods of previous pseudo-word paradigm in the real-time customization scenario. We could conclude that (1) compared with existing state-of-the-arts, RealCustom shows much better similarity with the given subjects and better controllability with the given text at the same time, e.g., in the $7^{\\text{th}}$ row, the toy generated by RealCustom exactly on the Great Wall while existing works fail to adhere to the given text. Meanwhile, the toy generated by RealCustom exactly mimics all details of the given one while existing works fail to preserve them. (2) RealCustom generates customization images with much better quality, i.e., better aesthetic scores, e.g., the snow scene in the second row, the dirt road scene in the third row, etc. The conclusion adheres to our significant improvement (223.5% improvement) on ImageReward [37] in the main paper since ImageReward evaluates both controllability and image quality. (3) RealCustom shows better generalization in open domain, i.e., for any given subjects, RealCustom could generate realistic images that consistently adhere to the given text for the given subjects in real-time, including the common subject like dogs $(e.g., 5^{th}, 6^{th}$ rows) and rare subjects like the unique backpack $(i.e., 1^{st}$ row), while existing state-of-the-arts works poorly on the rare subjects like the backpack in the first row, the special toy in the last row, etc."
|
| 1597 |
+
],
|
| 1598 |
+
"image_footnote": [],
|
| 1599 |
+
"bbox": [
|
| 1600 |
+
84,
|
| 1601 |
+
125,
|
| 1602 |
+
893,
|
| 1603 |
+
696
|
| 1604 |
+
],
|
| 1605 |
+
"page_idx": 11
|
| 1606 |
+
},
|
| 1607 |
+
{
|
| 1608 |
+
"type": "page_number",
|
| 1609 |
+
"text": "12",
|
| 1610 |
+
"bbox": [
|
| 1611 |
+
475,
|
| 1612 |
+
924,
|
| 1613 |
+
493,
|
| 1614 |
+
936
|
| 1615 |
+
],
|
| 1616 |
+
"page_idx": 11
|
| 1617 |
+
},
|
| 1618 |
+
{
|
| 1619 |
+
"type": "image",
|
| 1620 |
+
"img_path": "images/0878095296ba9482e8912170c0fb90e003b63d62a193533b29404d12c6143ed0.jpg",
|
| 1621 |
+
"image_caption": [
|
| 1622 |
+
"Figure 9. Illustration of gradually narrowing the real words into the given subjects. Here we provide two customization cases that with the same given text \"a toy in the desert\" and two different given subjects. The real text word used for narrowing is \"toy\". The mask is visualized by the Top-25% highest attention score regions of the real text word \"toy\". We visualize all the masks in the total 50 DDIM sampling steps, which are shown on the left. We could observe that the mask of the \"toy\" gradually being smoothly and accurately narrowed into the specific given subject. Meanwhile, even in these subject-relevant parts (Top-25% highest attention score regions of the real text word \"toy\" in these cases), their relevance is also different, e.g., the more important parts like the eyes of the first subject are given higher weight (brighter in the mask)."
|
| 1623 |
+
],
|
| 1624 |
+
"image_footnote": [],
|
| 1625 |
+
"bbox": [
|
| 1626 |
+
86,
|
| 1627 |
+
88,
|
| 1628 |
+
890,
|
| 1629 |
+
795
|
| 1630 |
+
],
|
| 1631 |
+
"page_idx": 12
|
| 1632 |
+
},
|
| 1633 |
+
{
|
| 1634 |
+
"type": "page_number",
|
| 1635 |
+
"text": "13",
|
| 1636 |
+
"bbox": [
|
| 1637 |
+
475,
|
| 1638 |
+
924,
|
| 1639 |
+
493,
|
| 1640 |
+
936
|
| 1641 |
+
],
|
| 1642 |
+
"page_idx": 12
|
| 1643 |
+
},
|
| 1644 |
+
{
|
| 1645 |
+
"type": "image",
|
| 1646 |
+
"img_path": "images/ee30e5f8915c2150a4a78f846d8cc145dfdfd4376dd1cc075d1c52ded7b965b4.jpg",
|
| 1647 |
+
"image_caption": [
|
| 1648 |
+
"Figure 10. Illustration of gradually narrowing the real words into the given subjects. Here we provide two customization cases that with the same given text \"a toy in the desert\" and two different given subjects. The real text word used for narrowing is \"toy\". The mask is visualized by the Top-25% highest attention score regions of the real text word \"toy\". We visualize all the masks in the total 50 DDIM sampling steps, which are shown on the left. We could observe that the mask of the \"toy\" gradually being smoothly and accurately narrowed into the specific given subject. Meanwhile, even in these subject-relevant parts (Top-25% highest attention score regions of the real text word \"toy\" in these cases), their relevance is also different, e.g., the more important parts like the eyes of the second subject are given higher weight (brighter in the mask)."
|
| 1649 |
+
],
|
| 1650 |
+
"image_footnote": [],
|
| 1651 |
+
"bbox": [
|
| 1652 |
+
86,
|
| 1653 |
+
88,
|
| 1654 |
+
890,
|
| 1655 |
+
795
|
| 1656 |
+
],
|
| 1657 |
+
"page_idx": 13
|
| 1658 |
+
},
|
| 1659 |
+
{
|
| 1660 |
+
"type": "page_number",
|
| 1661 |
+
"text": "14",
|
| 1662 |
+
"bbox": [
|
| 1663 |
+
475,
|
| 1664 |
+
924,
|
| 1665 |
+
493,
|
| 1666 |
+
935
|
| 1667 |
+
],
|
| 1668 |
+
"page_idx": 13
|
| 1669 |
+
},
|
| 1670 |
+
{
|
| 1671 |
+
"type": "image",
|
| 1672 |
+
"img_path": "images/d6fd9b3bf1248c642bb1893c1e77eee683ada7d6382aec049f31842c2e333550.jpg",
|
| 1673 |
+
"image_caption": [
|
| 1674 |
+
"given subject"
|
| 1675 |
+
],
|
| 1676 |
+
"image_footnote": [],
|
| 1677 |
+
"bbox": [
|
| 1678 |
+
88,
|
| 1679 |
+
383,
|
| 1680 |
+
210,
|
| 1681 |
+
477
|
| 1682 |
+
],
|
| 1683 |
+
"page_idx": 14
|
| 1684 |
+
},
|
| 1685 |
+
{
|
| 1686 |
+
"type": "image",
|
| 1687 |
+
"img_path": "images/2530f0f884082956392f8ddc1535717f0b2f59f8288e281a00464931f5a3e1eb.jpg",
|
| 1688 |
+
"image_caption": [
|
| 1689 |
+
"a corgi is lying in the bed"
|
| 1690 |
+
],
|
| 1691 |
+
"image_footnote": [],
|
| 1692 |
+
"bbox": [
|
| 1693 |
+
250,
|
| 1694 |
+
276,
|
| 1695 |
+
439,
|
| 1696 |
+
422
|
| 1697 |
+
],
|
| 1698 |
+
"page_idx": 14
|
| 1699 |
+
},
|
| 1700 |
+
{
|
| 1701 |
+
"type": "image",
|
| 1702 |
+
"img_path": "images/cfbfc849368cc645af2f7d52ee1fee6f094435f809dd3ada89d2b8df99721aef.jpg",
|
| 1703 |
+
"image_caption": [
|
| 1704 |
+
"a cat is lying in the bed"
|
| 1705 |
+
],
|
| 1706 |
+
"image_footnote": [],
|
| 1707 |
+
"bbox": [
|
| 1708 |
+
250,
|
| 1709 |
+
448,
|
| 1710 |
+
439,
|
| 1711 |
+
594
|
| 1712 |
+
],
|
| 1713 |
+
"page_idx": 14
|
| 1714 |
+
},
|
| 1715 |
+
{
|
| 1716 |
+
"type": "image",
|
| 1717 |
+
"img_path": "images/1bd04e52da4a2659b33ee3b1301c064e5d37bcd58faf83dc3904bc1aa5039444.jpg",
|
| 1718 |
+
"image_caption": [
|
| 1719 |
+
"a dog is lying in the bed."
|
| 1720 |
+
],
|
| 1721 |
+
"image_footnote": [],
|
| 1722 |
+
"bbox": [
|
| 1723 |
+
467,
|
| 1724 |
+
276,
|
| 1725 |
+
656,
|
| 1726 |
+
422
|
| 1727 |
+
],
|
| 1728 |
+
"page_idx": 14
|
| 1729 |
+
},
|
| 1730 |
+
{
|
| 1731 |
+
"type": "image",
|
| 1732 |
+
"img_path": "images/b7ec9cb13e225aa49702de4f29da751bb9ed0a1c492532e948bf51ca3cda0b66.jpg",
|
| 1733 |
+
"image_caption": [
|
| 1734 |
+
"a parrot is lying in the bed.",
|
| 1735 |
+
"Figure 11. The customization results in using different real text words. The real text word narrowed down for customization is highlighted in red. We could draw the following conclusions: (1) The customization results of our proposed RealCustom are quite robust, i.e., no matter we use how coarse-grained text word to represent the given subject, the generated subject in the customization results are always almost identical to the given subjects. For example, in the upper three rows, when we use \"corgi\", \"dog\" or \"animal\" to customize the given subject, the results all consistently adhere to the given subject. This phenomenon also validates the generalization and robustness of our proposed new paradigm RealCustom. (2) When using completely different word to represent the given subject, e.g., use \"parrot\" to represent a corgi, our proposed RealCustom opens a door for a new application, i.e., novel concept creation. That is, RealCustom will try to combine these two concepts and create a new one, e.g., generating a parrot with the appearance and character of the given brown corgi, as shown in the below three rows. This application will be very valuable for designing new characters in movies or games, etc."
|
| 1736 |
+
],
|
| 1737 |
+
"image_footnote": [],
|
| 1738 |
+
"bbox": [
|
| 1739 |
+
467,
|
| 1740 |
+
448,
|
| 1741 |
+
656,
|
| 1742 |
+
594
|
| 1743 |
+
],
|
| 1744 |
+
"page_idx": 14
|
| 1745 |
+
},
|
| 1746 |
+
{
|
| 1747 |
+
"type": "image",
|
| 1748 |
+
"img_path": "images/d0a1e248c03a0f1c22d56dab01da0605a6a5d7cefb8a0fe9b313a0dfb6ce8850.jpg",
|
| 1749 |
+
"image_caption": [
|
| 1750 |
+
"an animal is lying in the bed"
|
| 1751 |
+
],
|
| 1752 |
+
"image_footnote": [],
|
| 1753 |
+
"bbox": [
|
| 1754 |
+
678,
|
| 1755 |
+
275,
|
| 1756 |
+
867,
|
| 1757 |
+
422
|
| 1758 |
+
],
|
| 1759 |
+
"page_idx": 14
|
| 1760 |
+
},
|
| 1761 |
+
{
|
| 1762 |
+
"type": "image",
|
| 1763 |
+
"img_path": "images/5b9b41ae8a6bb9b8d29ee303ddf6554435e0dd56cc6bb6ced8a36416e16e3315.jpg",
|
| 1764 |
+
"image_caption": [
|
| 1765 |
+
"a giraffe is lying in the bed."
|
| 1766 |
+
],
|
| 1767 |
+
"image_footnote": [],
|
| 1768 |
+
"bbox": [
|
| 1769 |
+
678,
|
| 1770 |
+
448,
|
| 1771 |
+
867,
|
| 1772 |
+
594
|
| 1773 |
+
],
|
| 1774 |
+
"page_idx": 14
|
| 1775 |
+
},
|
| 1776 |
+
{
|
| 1777 |
+
"type": "page_number",
|
| 1778 |
+
"text": "15",
|
| 1779 |
+
"bbox": [
|
| 1780 |
+
477,
|
| 1781 |
+
924,
|
| 1782 |
+
493,
|
| 1783 |
+
935
|
| 1784 |
+
],
|
| 1785 |
+
"page_idx": 14
|
| 1786 |
+
}
|
| 1787 |
+
]
|
2403.00xxx/2403.00483/619283d9-9e07-40aa-a0f5-3db958d84022_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00483/619283d9-9e07-40aa-a0f5-3db958d84022_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0544a0ea30f31c1a0ade687f083a620abae0b33c6b601e65790bbf3b4a06ad76
|
| 3 |
+
size 14014848
|
2403.00xxx/2403.00483/full.md
ADDED
|
@@ -0,0 +1,339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# RealCustom: Narrowing Real Text Word for Real-Time Open-Domain Text-to-Image Customization
|
| 2 |
+
|
| 3 |
+
Mengqi Huang $^{1*}$ , Zhendong Mao $^{1\dagger}$ , Mingcong Liu $^{2}$ , Qian He $^{2}$ , Yongdong Zhang $^{1}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup> University of Science and Technology of China; <sup>2</sup>ByteDance Inc.
|
| 6 |
+
|
| 7 |
+
{huangmq}@mail.ustc.edu.cn, {zdmao, zhyd73}@ustc.edu.cn, {liumingcong, heqian}@bytedance.com
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Text-to-image customization, which aims to synthesize text-driven images for the given subjects, has recently revolutionized content creation. Existing works follow the pseudo-word paradigm, i.e., represent the given subjects as pseudo-words and then compose them with the given text. However, the inherent entangled influence scope of pseudowords with the given text results in a dual-optimum paradox, i.e., the similarity of the given subjects and the controllability of the given text could not be optimal simultaneously. We present RealCustom that, for the first time, disentangles similarity from controllability by precisely limiting subject influence to relevant parts only, achieved by gradually narrowing real text word from its general connotation to the specific subject and using its cross-attention to distinguish relevance. Specifically, RealCustom introduces a novel "train-inference" decoupled framework: (1) during training, RealCustom learns general alignment between visual conditions to original textual conditions by a novel adaptive scoring module to adaptively modulate influence quantity; (2) during inference, a novel adaptive mask guidance strategy is proposed to iteratively update the influence scope and influence quantity of the given subjects to gradually narrow the generation of the real text word. Comprehensive experiments demonstrate the superior real-time customization ability of RealCustom in the open domain, achieving both unprecedented similarity of the given subjects and controllability of the given text for the first time. The project page is https://corleone-huang.github.io/realcustom/.
|
| 12 |
+
|
| 13 |
+
# 1. Introduction
|
| 14 |
+
|
| 15 |
+
Recent significant advances in the customization of pretrained large-scale text-to-image models [6, 24, 25, 28] (i.e., text-to-image customization) has revolutionized content cre
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
|
| 19 |
+

|
| 20 |
+
Figure 1. Comparison between the existing paradigm and ours. (a) The existing paradigm represents the given subject as pseudowords $(e.g., S^{*})$ , which has entangled the same entire influence scope with the given text, resulting in the dual-optimum paradox, i.e., the similarity for the given subject and the controllability for the given text could not achieve optimum simultaneously. (b) We propose RealCustom, a novel paradigm that, for the first time disentangles similarity from controllability by precisely limiting the given subjects to influence only the relevant parts while the rest parts are purely controlled by the given text. This is achieved by iteratively updating the influence scope and influence quantity of the given subjects. (c) The quantitative comparison shows that our paradigm achieves both superior similarity and controllability than the state-of-the-arts of the existing paradigm. CLIP-image score (CLIP-I) and CLIP-text score (CLIP-T) are used to evaluate similarity and controllability. Refer to the experiments for details.
|
| 21 |
+
|
| 22 |
+

|
| 23 |
+
|
| 24 |
+
ation, receiving rapidly growing research interest from both academia and industry. This task empowers pre-trained models with the ability to generate imaginative text-driven scenes for subjects specified by users (e.g., a person's closest friends or favorite paintings), which is a foundation for AI-generated content (AIGC) and real-world applications such as personal image&video creation [7]. The primary goal of customization is dual-faceted: (1) high-quality similarity, i.e., the target subjects in the generated images should closely mirror the given subjects; (2) high-quality control-
|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
Figure 2. Generated customization results of our proposed novel paradigm RealCustom. Given a single image representing the given subject in the open domain (any subjects, portrait painting, favorite toys, etc.), RealCustom could generate realistic images that consistently adhere to the given text for the given subjects in real-time (without any test-time optimization steps).
|
| 28 |
+
|
| 29 |
+
lability, i.e., the remaining subject-irrelevant parts should consistently adhere to the control of the given text.
|
| 30 |
+
|
| 31 |
+
Existing literature follows the pseudo-word paradigm, i.e., (1) learning pseudo-words $(e.g., S^{*}[10]$ or rare-tokens [27]) to represent the given subjects; (2) composing these pseudo-words with the given text for the customized generation. Recent studies have focused on learning more comprehensive pseudo-words [1, 8, 22, 32, 38] to capture more subject information, e.g., different pseudo-words for different diffusion timesteps [1, 38] or layers [32]. Meanwhile, others propose to speed up pseudo-word learning by training an encoder [11, 18, 30, 34] on object-datasets [17]. In parallel, based on the learned pseudo-words, many works further finetune the pre-trained models [16, 18, 27, 34] or add additional adapters [30] for higher similarity. As more information of the given subjects is introduced into pretrained models, the risk of overfitting increases, leading to the degradation of controllability. Therefore, various regularizations $(e.g., l_{1}$ penalty [10, 16, 34], prior-preservation loss [27]) are used to maintain controllability, which in turn sacrifices similarity. Essentially, existing methods are trapped in a dual-optimum paradox, i.e., the similarity and controllability can not be optimal simultaneously.
|
| 32 |
+
|
| 33 |
+
We argue that the fundamental cause of this dual-optimum paradox is rooted in the existing pseudo-word paradigm, where the similarity component (i.e., the pseudowords) to generate the given subjects is intrinsically entangled with the controllability component (i.e., the given text) to generate subject-irrelevant parts, causing an overall conflict in the generation, as illustrated in Fig. 1(a). Specifically, this entanglement is manifested in the same entire influence scope of these two components. i.e., both the pseudo-words and the given text affect all generation
|
| 34 |
+
|
| 35 |
+
regions. This is because each region is updated as a weighted sum of all word features through built-in textual cross-attention in pre-trained text-to-image diffusion models. Therefore, increasing the influence of the similarity component will simultaneously strengthen the similarity in the subject-relevant parts and weaken the influence of the given text in other irrelevant ones, causing the degradation of controllability, and vice versa. Moreover, the necessary correspondence between pseudo-words and subjects confines existing methods to either lengthy test-time optimization [10, 16, 27] or training [18, 34] on object-datasets [17] that have limited categories. As a result, the existing paradigm inherently has poor generalization capability for real-time open-domain scenarios in the real world.
|
| 36 |
+
|
| 37 |
+
In this paper, we present RealCustom, a novel customization paradigm that, for the first time, disentangles the similarity component from the controllability component by precisely limiting the given subjects to influence only the relevant parts while maintaining other irreverent ones purely controlled by the given texts, achieving both high-quality similarity and controllability in a real-time open-domain scenario, as shown in Fig. 2. The core idea of RealCustom is that, instead of representing subjects as pseudowords, we could progressively narrow down the real text words (e.g., "toy") from their initial general connotation (e.g., various kinds o toys) to the specific subjects (e.g., the unique sloth toy), wherein the superior text-image alignment in pre-trained models' cross-attention can be leveraged to distinguish subject relevance, as illustrated in Fig. 1(b). Specifically, at each generation step, (1) the influence scope of the given subject is identified by the target real word's cross-attention, with a higher attention score indicating greater relevance; (2) this influence scope then determines the influence quantity of the given subject at the current step, i.e., the amount of subject information to be infused into this scope; (3) this influence quantity, in turn, shapes a more accurate influence scope for the next step, as each step's generation result is based on the output of the previous. Through this iterative updating, the generation result of the real word is smoothly and accurately transformed into the given subject, while other irrelevant parts are completely controlled by the given text.
|
| 38 |
+
|
| 39 |
+
Technically, RealCustom introduces an innovative "train-inference" decoupled framework: (1) During training, RealCustom only learns the generalized alignment capabilities between visual conditions and pre-trained models' original text conditions on large-scale text-image datasets through a novel adaptive scoring module, which modulates the influence quantity based on text and currently generated features. (2) During inference, real-time customization is achieved by a novel adaptive mask guidance strategy, which gradually narrows down a real text word based on the learned alignment capabilities. Specif
|
| 40 |
+
|
| 41 |
+
ically, (1) the adaptive scoring module first estimates the visual features' correlation scores with the text features and currently generated features, respectively. Then a timestep-aware schedule is applied to fuse these two scores. A subset of key visual features, chosen based on the fused score, is incorporated into pre-trained diffusion models by extending its textual cross-attention with another visual cross-attention. (2) The adaptive mask guidance strategy consists of a text-to-image (T2I) branch (with the visual condition set to 0) and a text&image-to-image (TI2I) branch (with the visual condition set to the given subject). Firstly, all layers' cross-attention maps of the target real word in the T2I branch are aggregated into a single one, selecting only high-attention regions as the influence scope. Secondly, in the TI2I branch, the influence scope is multiplied by currently generated features to produce the influence quantity and concurrently multiplied by the outputs of the visual cross-attention to avoid influencing subject-irrelevant parts.
|
| 42 |
+
|
| 43 |
+
Our contributions are summarized as follows:
|
| 44 |
+
|
| 45 |
+
Concepts. For the first time, we (1) point out the dual-optimum paradox is rooted in the existing pseudo-word paradigm's entangled influence scope between the similarity (i.e., pseudo-words representing the given subjects) and controllability (i.e., the given texts); (2) present RealCustom, a novel paradigm that achieves disentanglement by gradually narrowing down real words into the given subjects, wherein the given subjects' influence scope is limited based on the cross-attention of the real words.
|
| 46 |
+
|
| 47 |
+
Technology. The proposed RealCustom introduces a novel "train-inference" decoupled framework: (1) during training, learning generalized alignment between visual conditions to original text conditions by the adaptive scoring module to modulate influence quantity; (2) during inference, the adaptive mask guidance strategy is proposed to narrow down a real word by iterative updating the given subject's influence scope and quantity.
|
| 48 |
+
|
| 49 |
+
Significance. For the first time, we achieve (1) superior similarity and controllability simultaneously, as shown in Fig. 1(c); (2) real-time open-domain customization ability.
|
| 50 |
+
|
| 51 |
+
# 2. Related Works
|
| 52 |
+
|
| 53 |
+
# 2.1. Text-to-Image Customization
|
| 54 |
+
|
| 55 |
+
Existing customization methods follow the pseudo-words paradigm, i.e., representing the given subjects as pseudowords and then composing them with the given text for customization. Since the necessary correspondence between the pseudo-words and the given subjects, existing works are confined to either cumbersome test-time optimization-based [1, 8-10, 16, 22, 27, 32] or encoder-based [7, 11, 14, 18, 30, 34] that trained on object-datasets with limited categories. For example, in the optimization-based stream, DreamBooth [27] uses a rare-token as the pseudo-word and
|
| 56 |
+
|
| 57 |
+
further fine-tunes the entire pre-trained diffusion model for better similarity. Custom Diffusion [16] instead finds a subset of key parameters and only optimizes them. The main drawback of this stream is that it requires lengthy optimization times for each new subject. As for the encoder-based stream, the recent ELITE [34] uses a local mapping network to improve similarity, while BLIP-Diffusion [18] introduces a multimodal encoder for better subject representation. These encoder-based works usually show less similarity than optimization-based works and generalize poorly to unseen categories in training. In summary, the entangled influence scope of pseudo-words and the given text naturally limits the current works from achieving both optimal similarity and controllability, as well as hindering real-time open-domain customization.
|
| 58 |
+
|
| 59 |
+
# 2.2. Cross-Attention in Diffusion Models
|
| 60 |
+
|
| 61 |
+
Text guidance in modern large-scale text-to-image diffusion models [2, 6, 24, 25, 28] is generally performed using the cross-attention mechanism. Therefore, many works propose to manipulate the cross-attention map for text-driven editing [3, 12] on generated images or real images via inversion [31], e.g., Prompt-to-Prompt [12] proposes to reassign the cross-attention weight to edit the generated image. Another branch of work focuses on improving cross-attention either by adding additional spatial control [20, 21] or post-processing to improve semantic alignment [5, 19]. Meanwhile, a number of works [33, 35, 36] propose using cross-attention in diffusion models for discriminative tasks such as segmentation. However, different from the existing literature, the core idea of RealCustom is to gradually narrow a real text word from its initial general connotation (e.g., whose cross-attention could represent any toy with various types of shapes and details) to the unique given subject (e.g., whose cross-attention accurately represents the unique toy), which is completely unexplored.
|
| 62 |
+
|
| 63 |
+
# 3. Methodology
|
| 64 |
+
|
| 65 |
+
In this study, we focus on the most general customization scenario: with only a single image representing the given subject, generating new high-quality images for that subject from the given text. The generated subject may vary in location, pose, style, etc., yet it should maintain high similarity with the given one. The remaining parts should consistently adhere to the given text, thus ensuring controllability.
|
| 66 |
+
|
| 67 |
+
The proposed RealCustom introduces a novel "train-inference" decoupled paradigm as illustrated in Fig. 3. Specifically, during training, RealCustom learns general alignment between visual conditions and the original text conditions of pre-trained models. During inference, based on the learned alignment capability, RealCustom gradually narrow down the generation of the real text words (e.g., "toy") into the given subject (e.g., the unique brown sloth
|
| 68 |
+
|
| 69 |
+

|
| 70 |
+
(a)ours training paradigm
|
| 71 |
+
|
| 72 |
+

|
| 73 |
+
(b)oursinferenceparadigm (illustratedat timestep $t$ 1
|
| 74 |
+
Figure 3. Illustration of our proposed RealCustom, which employs a novel "train-inference" decoupled framework: (a) During training, general alignment between visual and original text conditions is learned by the proposed adaptive scoring module, which accurately derives visual conditions based on text and currently generated features. (b) During inference, progressively narrowing down a real word (e.g., "toy") from its initial general connotation to the given subject (e.g., the unique brown sloth toy) by the proposed adaptive mask guidance strategy, which consists of two branches, i.e., a text-to-image (T2I) branch where the visual condition is set to 0, and a text&image-to-image (TI2I) branch where the visual condition is set to the given subject. The T2I branch aims to calculate the influence scope by aggregating the target real word's (e.g., "toy") cross-attention, while the TI2I branch aims to inject the influence quantity into this scope.
|
| 75 |
+
|
| 76 |
+
toy) by iterative updating each step's influence scope and influence quantity of the given subject.
|
| 77 |
+
|
| 78 |
+
We first briefly introduce the preliminaries in Sec. 3.1. The training and inference paradigm of RealCustom will be elaborated in detail in Sec. 3.2 and Sec. 3.3, respectively.
|
| 79 |
+
|
| 80 |
+
# 3.1. Preliminaries
|
| 81 |
+
|
| 82 |
+
Our paradigm is implemented over Stable Diffusion [25], which consists of two components, i.e., an autoencoder and a conditional UNet [26] denoiser. Firstly, given an image $\pmb{x} \in \mathbb{R}^{H \times W \times 3}$ , the encoder $\mathcal{E}(\cdot)$ of the autoencoder maps it into a lower dimensional latent space as $\pmb{z} = \mathcal{E}(\pmb{x}) \in \mathbb{R}^{h \times w \times c}$ , where $f = \frac{H_0}{h} = \frac{W_0}{w}$ is the downsampling factor and $c$ stands for the latent channel dimension. The corresponding decoder $\mathcal{D}(\cdot)$ maps the latent vectors back to the
|
| 83 |
+
|
| 84 |
+
image as $\mathcal{D}(\mathcal{E}(\pmb{x}))\approx \pmb{x}$ . Secondly, the conditional denoiser $\epsilon_{\theta}(\cdot)$ is trained on this latent space to generate latent vectors based on the text condition $y$ . The pre-trained CLIP text encoder [23] $\tau_{\mathrm{text}}(\cdot)$ is used to encode the text condition $y$ into text features $\pmb{f}_{ct} = \tau_{\mathrm{text}}(y)$ . Then, the denoiser is trained with mean-squared loss:
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
L := \mathbb {E} _ {\boldsymbol {z} \sim \mathcal {E} (\boldsymbol {x}), \boldsymbol {f} _ {\boldsymbol {y}}, \epsilon \sim \mathcal {N} (\boldsymbol {0}, \mathrm {I}), t} \left[ \| \epsilon - \epsilon_ {\theta} \left(\boldsymbol {z} _ {\boldsymbol {t}}, t, \boldsymbol {f} _ {\boldsymbol {c} \boldsymbol {t}}\right) \| _ {2} ^ {2} \right], \tag {1}
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
where $\epsilon$ denotes for the unscaled noise and $t$ is the timestep. $z_{t}$ is the latent vector that noised according to $t$ :
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
\boldsymbol {z} _ {t} = \sqrt {\hat {\alpha} _ {t}} \boldsymbol {z} _ {0} + \sqrt {1 - \hat {\alpha} _ {t}} \epsilon , \tag {2}
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
where $\hat{\alpha}_t\in [0,1]$ is the hyper-parameter that modulates the quantity of noise added. Larger $t$ means smaller $\hat{\alpha}_t$ and
|
| 97 |
+
|
| 98 |
+
thereby a more noised latent vector $z_{t}$ . During inference, a random Gaussian noise $z_{T}$ is iteratively denoised to $z_{0}$ , and the final generated image is obtained through $x' = \mathcal{D}(z_0)$ .
|
| 99 |
+
|
| 100 |
+
The incorporation of text condition in Stable Diffusion is implemented as textual cross-attention:
|
| 101 |
+
|
| 102 |
+
$$
|
| 103 |
+
\operatorname {A t t e n t i o n} (Q, K, V) = \operatorname {S o f t m a x} \left(\frac {Q K ^ {\top}}{\sqrt {d}}\right) V, \tag {3}
|
| 104 |
+
$$
|
| 105 |
+
|
| 106 |
+
where the query $Q = W_{Q} \cdot f_{i}$ , key $K = W_{K} \cdot f_{ct}$ and value $V = W_{V} \cdot f_{ct}$ . $W_{Q}, W_{K}, W_{V}$ are weight parameters of query, key and value projection layers. $f_{i}, f_{ct}$ are the latent image features and text features, and $d$ is the channel dimension of key and query features. The latent image feature is then updated with the attention block output.
|
| 107 |
+
|
| 108 |
+
# 3.2. Training Paradigm
|
| 109 |
+
|
| 110 |
+
As depicted in Fig. 3(a), the text $y$ and image $x$ are first encoded into text features $\pmb{f}_{ct} \in \mathbb{R}^{n_t \times c_t}$ and image features $\pmb{f}_{ci} \in \mathbb{R}^{n_i \times c_i}$ by the pre-trained CLIP text/image encoders [23] respectively. Here, $n_t, c_t, n_i, c_i$ are text feature number/dimension and image feature number/dimension, respectively. Afterward, the adaptive scoring module takes the text features $\pmb{f}_{ct}$ , currently generated features $\pmb{z}_t \in \mathbb{R}^{h \times w \times c}$ , and timestep $t$ as inputs to estimate the score for each features in $\pmb{f}_{ci}$ , selecting a subset of key ones as the visual condition $\hat{\pmb{f}}_{ci} \in \mathbb{R}^{\hat{n}_i \times c_i}$ , where $\hat{n}_i < n_i$ is the selected image feature number. Next, we extend textual cross-attention with another visual cross-attention to incorporate the visual condition $\hat{\pmb{f}}_{yi}$ . Specifically, Eq. 3 is rewritten as:
|
| 111 |
+
|
| 112 |
+
$$
|
| 113 |
+
\begin{array}{l} \operatorname {A t t e n t i o n} (Q, K, V, K _ {i}, V _ {i}) = \\ \operatorname {S o f t m a x} \left(\frac {Q K ^ {\top}}{\sqrt {d}}\right) V + \operatorname {S o f t m a x} \left(\frac {Q K _ {i} ^ {\top}}{\sqrt {d}}\right) V _ {i}, \tag {4} \\ \end{array}
|
| 114 |
+
$$
|
| 115 |
+
|
| 116 |
+
where the new key $K_{i} = W_{Ki} \cdot \hat{f}_{ci}$ , value $V_{i} = W_{Vi} \cdot \hat{f}_{ci}$ are added. $W_{Ki}$ and $W_{Vi}$ are weight parameters. During training, only the adaptive scoring module and projection layers $W_{Ki}, W_{Vi}$ in each attention block are trainable, while other pre-trained models' weight remains frozen.
|
| 117 |
+
|
| 118 |
+
Adaptive Scoring Module. On the one hand, the generation of the diffusion model itself, by nature, is a coarse-to-fine process with noise removed and details added step by step. In this process, different steps focus on different degrees of subject detail [2], spanning from global structures in the early to local textures in the latter. Accordingly, the importance of each image feature also dynamically changes. To smoothly narrow the real text word, the image condition of the subject should also adapt synchronously, providing guidance from coarse to fine grain. This requires equipping RealCustom with the ability to estimate the importance score of different image features. On the other hand, utilizing all image features as visual conditions results in a "train-inference" gap. This arises because,
|
| 119 |
+
|
| 120 |
+
unlike the training stage, where the same images as the visual conditions and inputs to the denoiser $\epsilon_{\theta}$ , the given subjects, and the inference generation results should maintain similarity only in the subject part. Therefore, this gap can degrade both similarity and controllability in inference.
|
| 121 |
+
|
| 122 |
+
The above rationale motivates the adaptive scoring module, which provides smooth and accurate visual conditions for customization. As illustrated in Fig. 4, the text $\pmb{f}_{ct} \in \mathbb{R}^{n_t \times c_t}$ and currently generated features $\pmb{z}_t \in \mathbb{R}^{h \times w \times c} = \mathbb{R}^{n_z \times c}$ are first aggregated into the textual context $C_{\mathrm{textual}}$ and visual context $C_{\mathrm{visual}}$ through weighted pooling:
|
| 123 |
+
|
| 124 |
+
$$
|
| 125 |
+
\boldsymbol {A} _ {\text {t e x t u a l}} = \operatorname {S o f t m a x} \left(\boldsymbol {f} _ {\boldsymbol {c t}} \boldsymbol {W} _ {\boldsymbol {a}} ^ {t}\right) \in \mathbb {R} ^ {n _ {t} \times 1} \tag {5}
|
| 126 |
+
$$
|
| 127 |
+
|
| 128 |
+
$$
|
| 129 |
+
\boldsymbol {A} _ {\text {v i s u a l}} = \operatorname {S o f t m a x} \left(\boldsymbol {z} _ {t} \boldsymbol {W} _ {\boldsymbol {a}} ^ {v}\right) \in \mathbb {R} ^ {n _ {z} \times 1} \tag {6}
|
| 130 |
+
$$
|
| 131 |
+
|
| 132 |
+
$$
|
| 133 |
+
\boldsymbol {C} _ {\text {t e x t u a l}} = \boldsymbol {A} _ {\text {t e x t u a l}} ^ {\top} \boldsymbol {f} _ {\boldsymbol {y}} \in \mathbb {R} ^ {1 \times c _ {t}}, \boldsymbol {C} _ {\text {v i s u a l}} = \boldsymbol {A} _ {\text {v i s u a l}} ^ {\top} \boldsymbol {z} _ {\boldsymbol {t}} \in \mathbb {R} ^ {1 \times c}, \tag {7}
|
| 134 |
+
$$
|
| 135 |
+
|
| 136 |
+
where $W_{a}^{t}\in \mathbb{R}^{c_{t}\times 1},W_{a}^{v}\in \mathbb{R}^{c\times 1}$ are weight parameters, and "Softmax" is operated in the number dimension. These contexts are then spatially replicated and concatenated with image features $f_{ci}\in \mathbb{R}^{n_i\times c_i}$ to estimate the textual score $S_{\mathrm{textual}}\in \mathbb{R}^{n_i\times 1}$ and visual score $S_{\mathrm{visual}}\in \mathbb{R}^{n_i\times 1}$ respectively. These two scores are predicted by two lightweight score-net, which are implemented as two-layer MLPs.
|
| 137 |
+
|
| 138 |
+
Considering that the textual features are roughly accurate and the generated features are gradually refined, a timestep-aware schedule is proposed to fuse these two scores:
|
| 139 |
+
|
| 140 |
+
$$
|
| 141 |
+
\boldsymbol {S} = \left(1 - \sqrt {\hat {\alpha} _ {t}}\right) \boldsymbol {S} _ {\text {t e x t u a l}} + \sqrt {\hat {\alpha} _ {t}} \boldsymbol {S} _ {\text {v i s u a l}}, \tag {8}
|
| 142 |
+
$$
|
| 143 |
+
|
| 144 |
+
where $\sqrt{\hat{\alpha}_t}$ is the hyperparameter of pre-trained diffusion models that modulate the amount of noise added to generated features. Then a softmax activation is applied to the fused score since our focus is on highlighting the comparative significance of each image feature vis-à-vis its counterparts: $S = \operatorname{Softmax}(S)$ . The fused scores are multiplied with the image features to enable the learning of score-nets:
|
| 145 |
+
|
| 146 |
+
$$
|
| 147 |
+
\boldsymbol {f} _ {c i} = \boldsymbol {f} _ {c i} \circ (1 + S), \tag {9}
|
| 148 |
+
$$
|
| 149 |
+
|
| 150 |
+
where $\circ$ denotes the element-wise multiply. Finally, given a Top-K ratio $\gamma_{\mathrm{num}} \in [0,1]$ , a sub-set of key features with highest scores are selected as the output $\hat{\pmb{f}}_{\pmb{y}\pmb{i}} \in \mathbb{R}^{\hat{n}_i \times c_i}$ where $\hat{n}_i = \gamma_{\mathrm{num}} n_i$ . To enable flexible inference with different $\gamma_{\mathrm{num}}$ without performance degradation, we propose to use a uniformly random ratio during training:
|
| 151 |
+
|
| 152 |
+
$$
|
| 153 |
+
\gamma_ {\text {n u m}} = \text {u n i f o r m} \left[ \gamma_ {\text {n u m}} ^ {\text {l o w}}, \gamma_ {\text {n u m}} ^ {\text {h i g h}} \right], \tag {10}
|
| 154 |
+
$$
|
| 155 |
+
|
| 156 |
+
where $\gamma_{\mathrm{num}}^{\mathrm{low}},\gamma_{\mathrm{num}}^{\mathrm{high}}$ are set to 0.3, 1.0, respectively.
|
| 157 |
+
|
| 158 |
+
# 3.3. Inference Paradigm
|
| 159 |
+
|
| 160 |
+
The inference paradigm of RealCustom consists of two branches, i.e., a text-to-image (T2I) branch where the visual input is set to 0 and a text&image-to-image (TI2I) branch
|
| 161 |
+
|
| 162 |
+

|
| 163 |
+
Figure 4. Illustration of adaptive scoring module. Text features and currently generated features are first aggregated into the textual and visual context, which are then spatially concatenated with image features to predict textual and visual scores. These scores are then fused based on the current timestep. Ultimately, only a subset of the key features is selected based on the fused score.
|
| 164 |
+
|
| 165 |
+
where the visual input is set to given subjects, as illustrated in Fig. 3(b). These two branches are connected by our proposed adaptive mask guidance strategy. Specifically, given previous step's output $z_{t}$ , a pure text conditional denoising process is performed in T2I branch to get the output $z_{t-1}^{T}$ , where all layers cross-attention map of the target real word (e.g., "toy") is extracted and resized to the same resolution (the same as the largest map size, i.e., $64 \times 64$ in Stable Diffusion). The aggregated attention map is denoted as $M \in \mathbb{R}^{64 \times 64}$ . Next, a Top-K selection is applied, i.e., given the target ratio $\gamma_{\mathrm{scope}} \in [0,1]$ , only $\gamma_{\mathrm{scope}} \times 64 \times 64$ regions with the highest cross-attention score will remain, while the rest will be set to 0. The selected cross-attention map $\bar{M}$ is normalized by its maximum value as:
|
| 166 |
+
|
| 167 |
+
$$
|
| 168 |
+
\hat {M} = \frac {\bar {M}}{\max (\bar {M})}, \tag {11}
|
| 169 |
+
$$
|
| 170 |
+
|
| 171 |
+
where $\max (\cdot)$ represents the maximum value. The rationale behind this is that even in these selected parts, the subject relevance of different regions is also different.
|
| 172 |
+
|
| 173 |
+
In the TI2I branch, the influence scope $\hat{M}$ is first multiplied by currently generated feature $z_{t}$ to provide accurate visual conditions for current generation step. The reason is that only subject-relevant parts should be considered for the calculation of influence quantity. Secondly, $\hat{M}$ is multiplied by the visual cross-attention results to prevent negative impacts on the controllability of the given texts in other subject-irrelevant parts. Specifically, Eq. 4 is rewritten as:
|
| 174 |
+
|
| 175 |
+
$$
|
| 176 |
+
\operatorname {A t t e n t i o n} (Q, K, V, K _ {i}, V _ {i}) =
|
| 177 |
+
$$
|
| 178 |
+
|
| 179 |
+
$$
|
| 180 |
+
\operatorname {S o f t m a x} \left(\frac {Q K ^ {\top}}{\sqrt {d}}\right) V + \left(\operatorname {S o f t m a x} \left(\frac {Q K _ {i} ^ {\top}}{\sqrt {d}}\right) V _ {i}\right) \hat {M}, \tag {12}
|
| 181 |
+
$$
|
| 182 |
+
|
| 183 |
+
where the necessary resize operation is applied to match the size of $\hat{M}$ with the resolution of each cross-attention
|
| 184 |
+
|
| 185 |
+
block. The denoised output of TI2I branch is denoted as $z_{t-1}^{TI}$ . The classifier-free guidance [13] is extended to produce next step's denoised latent feature $z_{t-1}$ as:
|
| 186 |
+
|
| 187 |
+
$$
|
| 188 |
+
\boldsymbol {z} _ {t - 1} = \epsilon_ {\theta} (\emptyset) + \omega_ {t} \left(\boldsymbol {z} _ {t - 1} ^ {T} - \epsilon_ {\theta} (\emptyset)\right) + \omega_ {i} \left(\boldsymbol {z} _ {t - 1} ^ {T I} - \boldsymbol {z} _ {t - 1} ^ {T}\right), \tag {13}
|
| 189 |
+
$$
|
| 190 |
+
|
| 191 |
+
where $\epsilon_{\theta}(\emptyset)$ is the unconditional denoised output.
|
| 192 |
+
|
| 193 |
+
With the smooth and accurate influence quantity of the given subject injected into the current step, the generation of the real word will gradually be narrowed from its initial general connotation to the specific subject, which will shape a more precise influence scope for the generation of the next step. Through this iterative updating and generation, we achieve real-time customization where the similarity for the given subject is disentangled with the controllability for the given text, leading to an optimal of both. More importantly, since both the adaptive scoring module as well as visual cross-attention layers are trained on general text-image datasets, the inference could be generally applied to any categories by using any target real words, enabling excellent open-domain customization capability.
|
| 194 |
+
|
| 195 |
+
# 4. Experiments
|
| 196 |
+
|
| 197 |
+
# 4.1. Experimental Setups
|
| 198 |
+
|
| 199 |
+
Implementation. RealCustom is implemented on Stable Diffusion and trained on the filtered subset of Laion-5B [29] based on aesthetic score, using 16 A100 GPUs for 16w iterations with 1e-5 learning rate. Unless otherwise specified, DDIM sampler [31] with 50 sample steps is used for sampling and the classifier-free guidance $\omega_{t},\omega_{i}$ is 7.5 and 12.5. Top-K ratios $\gamma_{\mathrm{num}} = 0.8$ $\gamma_{\mathrm{scope}} = 0.25$
|
| 200 |
+
|
| 201 |
+
Evaluation. Similarity. We use the state-of-the-art segmentation model (i.e., SAM [15]) to segment the subject, and then evaluate with both CLIP-I and DINO [4] scores, which are average pairwise cosine similarity CLIP ViT-B/32 or DINO embeddings of the segmented subjects in generated and real images. Controllability. We calculate the cosine similarity between prompt and image CLIP ViT-B/32 embeddings (CLIP-T). In addition, ImageReward [37] is used to evaluate controllability and aesthetics (quality).
|
| 202 |
+
|
| 203 |
+
Prior SOTAs. We compare with existing paradigm of both optimization-based (i.e., Textual Inversion[10], DreamBooth [27], CustomDiffusion [16]) and encoder-based (ELITE[34], BLIP-Diffusion[18]) state-of-the-arts.
|
| 204 |
+
|
| 205 |
+
# 4.2. Main Results
|
| 206 |
+
|
| 207 |
+
Quantitative results. As shown in Tab. 1, RealCustom outperforms existing methods in all metrics: (1) for controllability, we improve CLIP-T and ImageReward by $8.1\%$ and $223.5\%$ , respectively. The significant improvement in ImageReward shows that our paradigm generates much higher quality customization; (2) for similarity, we also achieve state-of-the-art performance on both CLIP-I and DINO-I.
|
| 208 |
+
|
| 209 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="2">controllability</td><td colspan="2">similarity</td><td>efficiency</td></tr><tr><td>CLIP-T ↑</td><td>ImageReward ↑</td><td>CLIP-I ↑</td><td>DINO-I ↑</td><td>test-time optimize steps</td></tr><tr><td>Textual Inversion [10]</td><td>0.2546</td><td>-0.9168</td><td>0.7603</td><td>0.5956</td><td>5000</td></tr><tr><td>DreamBooth [27]</td><td>0.2783</td><td>0.2393</td><td>0.8466</td><td>0.7851</td><td>800</td></tr><tr><td>Custom Diffusion [16]</td><td>0.2884</td><td>0.2558</td><td>0.8257</td><td>0.7093</td><td>500</td></tr><tr><td>ELITE [34]</td><td>0.2920</td><td>0.2690</td><td>0.8022</td><td>0.6489</td><td>0 (real-time)</td></tr><tr><td>BLIP-Diffusion [18]</td><td>0.2967</td><td>0.2172</td><td>0.8145</td><td>0.6486</td><td>0 (real-time)</td></tr><tr><td>RealCustom(ours)</td><td>0.3204</td><td>0.8703</td><td>0.8552</td><td>0.7865</td><td>0 (real-time)</td></tr></table>
|
| 210 |
+
|
| 211 |
+

|
| 212 |
+
|
| 213 |
+
Table 1. Quantitative comparisons with existing methods. Left: Our proposed RealCustom outperforms existing methods in all metrics, i.e., (1) for controllability, achieving $8.1\%$ and $223.5\%$ improvements on CLIP-T and ImageReward, respectively. The significant improvement on ImageReward also validates that RealCustom could generate customized images with much higher quality (higher aesthetic score); (2) for similarity, we also achieve state-of-the-art performance on both CLIP-I and DINO-I. Right: We plot the "CLIP-T verse DINO", showing that the existing methods are trapped into the dual-optimum paradox, while RealCustom completely get rid of it and achieve both high-quality similarity and controllability. The same conclusion in "CLIP-T verse CLIP-I" can be found in Fig. 1(c).
|
| 214 |
+
|
| 215 |
+

|
| 216 |
+
Figure 5. Qualitative comparison with existing methods. RealCustom could produce much higher quality customization results that have better similarity with the given subject and better controllability with the given text compared to existing works. Moreover, RealCustom shows superior diversity (different subject poses, locations, etc.) and generation quality (e.g., the "autumn leaves" scene in the third row).
|
| 217 |
+
|
| 218 |
+
The figure of "CLIP-T verse DINO" validates that the existing paradigm is trapped into the dual-optimum paradox, while RealCustom effectively eradicates it.
|
| 219 |
+
|
| 220 |
+
Qualitative results. As shown in Fig. 5, RealCustom demonstrates superior zero-shot open-domain customization capability (e.g., the rare shaped toy in the first row), generating higher-quality custom images that have better similarity with the given subject and better controllability with the given text compared to existing works.
|
| 221 |
+
|
| 222 |
+
# 4.3. Ablations
|
| 223 |
+
|
| 224 |
+
Effectiveness of adaptive mask guidance strategy. We first visualize the narrowing down process of the real word by the proposed adaptive mask guidance strategy in Fig. 6. We could observe that starting from the same state (the same mask since there's no information of the given subject is introduced at the first step), RealCustom gradually forms the structure and details of the given subject, achieving the open-domain zero-shot customization while remain-
|
| 225 |
+
|
| 226 |
+

|
| 227 |
+
Figure 6. Illustration of gradually narrowing the real words into the given subjects. Upper: RealCustom generated results (first row) and the original text-to-image generated result (second row) by pre-trained models with the same seed. The mask is visualized by the Top-25% highest attention score regions of the real word "toy". We could observe that starting from the same state (the same mask since there's no information of the given subject is introduced at the beginning), RealCustom gradually forms the structure and details of the given subject by our proposed adaptive mask strategy, achieving the open-domain zero-shot customization. Lower: More visualization cases.
|
| 228 |
+
|
| 229 |
+
<table><tr><td>inference setting</td><td>CLIP-T ↑</td><td>CLIP-I ↑</td></tr><tr><td>γscope = 0.1</td><td>0.32</td><td>0.8085</td></tr><tr><td>γscope = 0.2</td><td>0.3195</td><td>0.8431</td></tr><tr><td>γscope = 0.25</td><td>0.3204</td><td>0.8552</td></tr><tr><td>γscope = 0.25, binary</td><td>0.294</td><td>0.8567</td></tr><tr><td>γscope = 0.3</td><td>0.3129</td><td>0.8578</td></tr><tr><td>γscope = 0.4</td><td>0.3023</td><td>0.8623</td></tr><tr><td>γscope = 0.5</td><td>0.285</td><td>0.8654</td></tr></table>
|
| 230 |
+
|
| 231 |
+

|
| 232 |
+
Figure 7. Visualization of different influence scope.
|
| 233 |
+
|
| 234 |
+
Table 2. Ablation of different $\gamma_{\mathrm{scope}}$ , which denotes the influence scope of the given subject in RealCustom during inference. "binary" means using binary masks instead of max norm in Eq. 11.
|
| 235 |
+
|
| 236 |
+
<table><tr><td>ID</td><td>settings</td><td>CLIP-T ↑</td><td>CLIP-I ↑</td></tr><tr><td>1</td><td>full model, γnum = 0.8</td><td>0.3204</td><td>0.8552</td></tr><tr><td>2</td><td>w/o adaptive scoring module</td><td>0.3002</td><td>0.8221</td></tr><tr><td>3</td><td>textual score only, γnum = 0.8</td><td>0.313</td><td>0.8335</td></tr><tr><td>4</td><td>visual score only, γnum = 0.8</td><td>0.2898</td><td>0.802</td></tr><tr><td>5</td><td>(textual + visual) / 2, γnum = 0.8</td><td>0.3156</td><td>0.8302</td></tr><tr><td>6</td><td>full model, γnum = 0.9</td><td>0.315</td><td>0.8541</td></tr><tr><td>7</td><td>full model, γnum = 0.7</td><td>0.3202</td><td>0.8307</td></tr></table>
|
| 237 |
+
|
| 238 |
+
Table 3. Ablation of the adaptive scoring module, where $\gamma_{\mathrm{num}}$ means the influence quantity of the given subject during inference.
|
| 239 |
+
|
| 240 |
+
ing other subject-irrelevant parts (e.g., the city background) completely controlled by the given text.
|
| 241 |
+
|
| 242 |
+
We then ablate on the Top-K raito $\gamma_{\mathrm{scope}}$ in Tab. 2: (1) within a proper range (experimentally, $\gamma_{\mathrm{scope}} \in [0.2, 0.4]$ ) the results are quite robust; (2) the maximum normalization in Eq. 11 is important for the unity of high similarity and controllability, since different regions in the selected parts have different subject relevance and should be set to different weights. (3) Too small or too large influence scope will degrade similarity or controllability, respectively. These conclusions are validated by the visualization in Fig. 7.
|
| 243 |
+
|
| 244 |
+
Effectiveness of adaptive scoring module. As shown in Tab. 3, (1) We first compare with the simple use of all image features (ID-2), which results in degradation of both similarity and controllability, proving the importance of providing accurate and smooth influence quantity along with the coarse-to-fine diffusion generation process; (2) We then ablate on the module design (ID-3,4,5, ID-5), finding that using image score only results in worse performance. The reason is that the generation features are noisy at the beginning, resulting in an inaccurate score prediction. Therefore, we propose a step-scheduler to adaptively fuse text and image scores, leading to the best performance; (3) Finally, the choice of influence quantity $\gamma_{\mathrm{num}}$ is ablated in ID-6 & 7.
|
| 245 |
+
|
| 246 |
+
# 5. Conclusion
|
| 247 |
+
|
| 248 |
+
In this paper, we present a novel customization paradigm RealCustom that, for the first time, disentangles similarity of given subjects from controllability of given text by precisely limiting subject influence to relevant parts, which gradually narrowing the real word from its general connotation to the specific subject in a novel "train-inference" framework: the adaptive scoring module learns to adaptively modulate influence quantity during training; (2) the adaptive mask guidance strategy iteratively updates the in
|
| 249 |
+
|
| 250 |
+
fluence scope and influence quantity of given subjects during inference. Extensive experiments demonstrate that RealCustom achieves the unity of high-quality similarity and controllability in the real-time open-domain scenario.
|
| 251 |
+
|
| 252 |
+
# References
|
| 253 |
+
|
| 254 |
+
[1] Yuval Alaluf, Elad Richardson, Gal Metzer, and Daniel Cohen-Or. A neural space-time representation for text-to-image personalization. arXiv preprint arXiv:2305.15391, 2023. 2, 3
|
| 255 |
+
[2] Yogesh Balaji, Seungjun Nah, Xun Huang, Arash Vahdat, Jiaming Song, Karsten Kreis, Miika Aittala, Timo Aila, Samuli Laine, Bryan Catanzaro, et al. edifi: Text-to-image diffusion models with an ensemble of expert denoisers. arXiv preprint arXiv:2211.01324, 2022. 3, 5
|
| 256 |
+
[3] Mingdeng Cao, Xintao Wang, Zhongang Qi, Ying Shan, Xiaohu Qie, and Yinqiang Zheng. Masactrl: Tuning-free mutual self-attention control for consistent image synthesis and editing. arXiv preprint arXiv:2304.08465, 2023. 3
|
| 257 |
+
[4] Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. In Proceedings of the IEEE/CVF international conference on computer vision, pages 9650-9660, 2021. 6
|
| 258 |
+
[5] Hila Chefer, Yuval Alaluf, Yael Vinker, Lior Wolf, and Daniel Cohen-Or. Attend-and-excite: Attention-based semantic guidance for text-to-image diffusion models. ACM Transactions on Graphics (TOG), 42(4):1-10, 2023. 3
|
| 259 |
+
[6] Wenhu Chen, Hexiang Hu, Chitwan Sahara, and William W Cohen. Re-imagen: Retrieval-augmented text-to-image generator. arXiv preprint arXiv:2209.14491, 2022. 1, 3
|
| 260 |
+
[7] Zhuowei Chen, Shancheng Fang, Wei Liu, Qian He, Mengqi Huang, Yongdong Zhang, and Zhendong Mao. Dreamidentity: Improved editability for efficient face-identity preserved image generation. arXiv preprint arXiv:2307.00300, 2023. 1, 3
|
| 261 |
+
[8] Giannis Daras and Alexandros G Dimakis. Multiresolution textual inversion. arXiv preprint arXiv:2211.17115, 2022. 2, 3
|
| 262 |
+
[9] Ziyi Dong, Pengxu Wei, and Liang Lin. Dreamartist: Towards controllable one-shot text-to-image generation via contrastive prompt-tuning. arXiv preprint arXiv:2211.11337, 2022.
|
| 263 |
+
[10] Rinon Gal, Yuval Alaluf, Yuval Atzmon, Or Patashnik, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. An image is worth one word: Personalizing text-to-image generation using textual inversion. arXiv preprint arXiv:2208.01618, 2022. 2, 3, 6, 7
|
| 264 |
+
[11] Rinon Gal, Moab Arar, Yuval Atzmon, Amit H Bermano, Gal Chechik, and Daniel Cohen-Or. Designing an encoder for fast personalization of text-to-image models. arXiv preprint arXiv:2302.12228, 2023. 2, 3
|
| 265 |
+
[12] Amir Hertz, Ron Mokady, Jay Tenenbaum, Kfir Aberman, Yael Pritch, and Daniel Cohen-Or. Prompt-to-prompt image editing with cross attention control. arXiv preprint arXiv:2208.01626, 2022. 3
|
| 266 |
+
|
| 267 |
+
[13] Jonathan Ho and Tim Salimans. Classifier-free diffusion guidance. arXiv preprint arXiv:2207.12598, 2022. 6
|
| 268 |
+
[14] Xuhui Jia, Yang Zhao, Kelvin CK Chan, Yandong Li, Han Zhang, Boqing Gong, Tingbo Hou, Huisheng Wang, and Yu-Chuan Su. Taming encoder for zero fine-tuning image customization with text-to-image diffusion models. arXiv preprint arXiv:2304.02642, 2023. 3
|
| 269 |
+
[15] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. arXiv preprint arXiv:2304.02643, 2023. 6
|
| 270 |
+
[16] Nupur Kumari, Bingliang Zhang, Richard Zhang, Eli Shechtman, and Jun-Yan Zhu. Multi-concept customization of text-to-image diffusion. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1931-1941, 2023. 2, 3, 6, 7
|
| 271 |
+
[17] Alina Kuznetsova, Hassan Rom, Neil Alldrin, Jasper Uijlings, Ivan Krasin, Jordi Pont-Tuset, Shahab Kamali, Stefan Popov, Matteo Malloci, Alexander Kolesnikov, et al. The open images dataset v4: Unified image classification, object detection, and visual relationship detection at scale. International Journal of Computer Vision, 128(7):1956-1981, 2020. 2
|
| 272 |
+
[18] Dongxu Li, Junnan Li, and Steven CH Hoi. Blip-diffusion: Pre-trained subject representation for controllable text-to-image generation and editing. arXiv preprint arXiv:2305.14720, 2023. 2, 3, 6, 7
|
| 273 |
+
[19] Yumeng Li, Margret Keuper, Dan Zhang, and Anna Khoreva. Divide & bind your attention for improved generative semantic nursing. arXiv preprint arXiv:2307.10864, 2023. 3
|
| 274 |
+
[20] Yuheng Li, Haotian Liu, Qingyang Wu, Fangzhou Mu, Jianwei Yang, Jianfeng Gao, Chunyuan Li, and Yong Jae Lee. Gligen: Open-set grounded text-to-image generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22511-22521, 2023. 3
|
| 275 |
+
[21] Ziyi Li, Qinye Zhou, Xiaoyun Zhang, Ya Zhang, Yanfeng Wang, and Weidi Xie. Guiding text-to-image diffusion model towards grounded generation. arXiv preprint arXiv:2301.05221, 2023. 3
|
| 276 |
+
[22] Zhiheng Liu, Yifei Zhang, Yujun Shen, Kecheng Zheng, Kai Zhu, Ruili Feng, Yu Liu, Deli Zhao, Jingren Zhou, and Yang Cao. Cones 2: Customizable image synthesis with multiple subjects. arXiv preprint arXiv:2305.19327, 2023. 2, 3
|
| 277 |
+
[23] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PMLR, 2021. 4, 5
|
| 278 |
+
[24] Aditya Ramesh, Prafulla Dhariwal, Alex Nichol, Casey Chu, and Mark Chen. Hierarchical text-conditional image generation with clip latents. arXiv preprint arXiv:2204.06125, 1 (2):3, 2022. 1, 3
|
| 279 |
+
[25] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. 1, 3, 4
|
| 280 |
+
|
| 281 |
+
[26] Olaf Ronneberger, Philipp Fischer, and Thomas Brox. U-net: Convolutional networks for biomedical image segmentation. In Medical Image Computing and Computer-Assisted Intervention-MICCAI 2015: 18th International Conference, Munich, Germany, October 5-9, 2015, Proceedings, Part III 18, pages 234-241. Springer, 2015. 4
|
| 282 |
+
[27] Nataniel Ruiz, Yuanzhen Li, Varun Jampani, Yael Pritch, Michael Rubinstein, and Kfir Aberman. Dreambooth: Fine tuning text-to-image diffusion models for subject-driven generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22500-22510, 2023. 2, 3, 6, 7
|
| 283 |
+
[28] Chitwan Sahara, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily L Denton, Kamyar Ghasemipour, Raphael Gontijo Lopes, Burcu Karagol Ayan, Tim Salimans, et al. Photorealistic text-to-image diffusion models with deep language understanding. Advances in Neural Information Processing Systems, 35:36479-36494, 2022. 1, 3
|
| 284 |
+
[29] Christoph Schuhmann, Romain Beaumont, Richard Vencu, Cade Gordon, Ross Wightman, Mehdi Cherti, Theo Coombes, Aarush Katta, Clayton Mullis, Mitchell Wortsman, et al. Laion-5b: An open large-scale dataset for training next generation image-text models. Advances in Neural Information Processing Systems, 35:25278-25294, 2022. 6
|
| 285 |
+
[30] Jing Shi, Wei Xiong, Zhe Lin, and Hyun Joon Jung. Instantbooth: Personalized text-to-image generation without test-time finetuning. arXiv preprint arXiv:2304.03411, 2023. 2, 3
|
| 286 |
+
[31] Jiaming Song, Chenlin Meng, and Stefano Ermon. Denoising diffusion implicit models. arXiv preprint arXiv:2010.02502, 2020. 3, 6
|
| 287 |
+
[32] Andrey Voynov, Qinghao Chu, Daniel Cohen-Or, and Kfir Aberman. $p+$ : Extended textual conditioning in text-to-image generation. arXiv preprint arXiv:2303.09522, 2023. 2, 3
|
| 288 |
+
[33] Jinglong Wang, Xiawei Li, Jing Zhang, Qingyuan Xu, Qin Zhou, Qian Yu, Lu Sheng, and Dong Xu. Diffusion model is secretly a training-free open vocabulary semantic segmenter. arXiv preprint arXiv:2309.02773, 2023. 3
|
| 289 |
+
[34] Yuxiang Wei, Yabo Zhang, Zhilong Ji, Jinfeng Bai, Lei Zhang, and Wangmeng Zuo. Elite: Encoding visual concepts into textual embeddings for customized text-to-image generation. arXiv preprint arXiv:2302.13848, 2023. 2, 3, 6, 7
|
| 290 |
+
[35] Weijia Wu, Yuzhong Zhao, Mike Zheng Shou, Hong Zhou, and Chunhua Shen. Diffumask: Synthesizing images with pixel-level annotations for semantic segmentation using diffusion models. arXiv preprint arXiv:2303.11681, 2023. 3
|
| 291 |
+
[36] Changming Xiao, Qi Yang, Feng Zhou, and Changshui Zhang. From text to mask: Localizing entities using the attention of text-to-image diffusion models. arXiv preprint arXiv:2309.04109, 2023. 3
|
| 292 |
+
[37] Jiazheng Xu, Xiao Liu, Yuchen Wu, Yuxuan Tong, Qinkai Li, Ming Ding, Jie Tang, and Yuxiao Dong. Imagereward: Learning and evaluating human preferences for text-to-image generation. arXiv preprint arXiv:2304.05977, 2023. 6, 11, 12
|
| 293 |
+
|
| 294 |
+
[38] Yuxin Zhang, Weiming Dong, Fan Tang, Nisha Huang, Haibin Huang, Chongyang Ma, Tong-Yee Lee, Oliver Deussen, and Changsheng Xu. Prospect: Expanded conditioning for the personalization of attribute-aware image generation. arXiv preprint arXiv:2305.16225, 2023. 2
|
| 295 |
+
|
| 296 |
+
# 6. Supplementary
|
| 297 |
+
|
| 298 |
+
# 6.1. More Qualitative Comparison
|
| 299 |
+
|
| 300 |
+
As shown in Fig. 8, we provide more qualitative comparison between our proposed RealCustom and recent state-of-the-art methods of previous pseudo-word paradigm in the real-time customization scenario. Compared with existing state-of-the-arts, we could draw the following conclusions: (1) better similarity with the given subjects and better controllability with the given text at the same time, e.g., in the $7^{\text{th}}$ row, the toy generated by RealCustom exactly on the Great Wall while existing works fail to adhere to the given text. Meanwhile, the toy generated by RealCustom exactly mimics all details of the given one while existing works fail to preserve them. (2) better image quality, i.e., with better aesthetic scores, e.g., the snow scene in the second row, the dirt road scene in the third row, etc. The conclusion adheres to our significant improvement (223.5% improvement) on ImageReward [37] in the main paper since ImageReward evaluates both controllability and image quality. (3) better generalization in open domain, i.e., for any given subjects, RealCustom could generate realistic images that consistently adhere to the given text for the given subjects in real-time, including the common subject like dogs (e.g., $5^{th}$ , $6^{th}$ rows) and rare subjects like the unique backpack (i.e., $1^{st}$ row), while existing state-of-the-arts works poorly on the rare subjects like the backpack in the first row, the special toy in the last row, etc. The reason lies that for the very first time, our proposed RealCustom progressively narrows a real text word from its initial general connotation into the unique subject, which completely get rid of the necessary corresponding between given subjects and learned pseudo-words, and therefore is no longer confined to be trained on object-datasets with limited categories.
|
| 301 |
+
|
| 302 |
+
# 6.2. More Visualization
|
| 303 |
+
|
| 304 |
+
We provide more comprehensive visualization of the narrowing down process of the real word of our proposed RealCustom in Fig. 9 and Fig. 10. Here, we provide four customization cases that with the same given text "a toy in the desert" and four different given subjects. The real text word used for narrowing is "toy". The mask is visualized by the Top-25% highest attention score regions of the real text word "toy". We visualize all the masks in the total 50 DDIM sampling steps. We could observe that the mask of the "toy" gradually being smoothly and accurately narrowed into the specific given subject. Meanwhile, even in these subject-relevant parts (Top-25% highest attention score regions of the real text word "toy" in these cases), their relevance is also different, e.g., in Fig. 9, the more important parts like the eyes of the first subject are given higher weight (brighter in the mask), in Fig. 10, the more important parts like the eyes of the second subject are given higher weight.
|
| 305 |
+
|
| 306 |
+
# 6.3. Impact of Different Real Word
|
| 307 |
+
|
| 308 |
+
The customization results in using different real text words are shown in Fig. 11. The real text word narrowed down for customization is highlighted in red. We could draw the following conclusions: (1) The customization results of our proposed RealCustom are quite robust, i.e., no matter we use how coarse-grained text word to represent the given subject, the generated subject in the customization results are always almost identical to the given subjects. For example, in the upper three rows, when we use "corgi", "dog" or "animal" to customize the given subject, the results all consistently adhere to the given subject. This phenomenon also validates the generalization and robustness of our proposed new paradigm RealCustom. (2) When using completely different word to represent the given subject, e.g., use "parrot" to represent a corgi, our proposed RealCustom opens a door for a new application, i.e., novel concept creation. That is, RealCustom will try to combine these two concepts and create a new one, e.g., generating a parrot with the appearance and character of the given brown corgi, as shown in the below three rows. This application will be very valuable for designing new characters in movies or games, etc.
|
| 309 |
+
|
| 310 |
+

|
| 311 |
+
Figure 8. Qualitative comparison between our proposed RealCustom and recent state-of-the-art methods of previous pseudo-word paradigm in the real-time customization scenario. We could conclude that (1) compared with existing state-of-the-arts, RealCustom shows much better similarity with the given subjects and better controllability with the given text at the same time, e.g., in the $7^{\text{th}}$ row, the toy generated by RealCustom exactly on the Great Wall while existing works fail to adhere to the given text. Meanwhile, the toy generated by RealCustom exactly mimics all details of the given one while existing works fail to preserve them. (2) RealCustom generates customization images with much better quality, i.e., better aesthetic scores, e.g., the snow scene in the second row, the dirt road scene in the third row, etc. The conclusion adheres to our significant improvement (223.5% improvement) on ImageReward [37] in the main paper since ImageReward evaluates both controllability and image quality. (3) RealCustom shows better generalization in open domain, i.e., for any given subjects, RealCustom could generate realistic images that consistently adhere to the given text for the given subjects in real-time, including the common subject like dogs $(e.g., 5^{th}, 6^{th}$ rows) and rare subjects like the unique backpack $(i.e., 1^{st}$ row), while existing state-of-the-arts works poorly on the rare subjects like the backpack in the first row, the special toy in the last row, etc.
|
| 312 |
+
|
| 313 |
+

|
| 314 |
+
Figure 9. Illustration of gradually narrowing the real words into the given subjects. Here we provide two customization cases that with the same given text "a toy in the desert" and two different given subjects. The real text word used for narrowing is "toy". The mask is visualized by the Top-25% highest attention score regions of the real text word "toy". We visualize all the masks in the total 50 DDIM sampling steps, which are shown on the left. We could observe that the mask of the "toy" gradually being smoothly and accurately narrowed into the specific given subject. Meanwhile, even in these subject-relevant parts (Top-25% highest attention score regions of the real text word "toy" in these cases), their relevance is also different, e.g., the more important parts like the eyes of the first subject are given higher weight (brighter in the mask).
|
| 315 |
+
|
| 316 |
+

|
| 317 |
+
Figure 10. Illustration of gradually narrowing the real words into the given subjects. Here we provide two customization cases that with the same given text "a toy in the desert" and two different given subjects. The real text word used for narrowing is "toy". The mask is visualized by the Top-25% highest attention score regions of the real text word "toy". We visualize all the masks in the total 50 DDIM sampling steps, which are shown on the left. We could observe that the mask of the "toy" gradually being smoothly and accurately narrowed into the specific given subject. Meanwhile, even in these subject-relevant parts (Top-25% highest attention score regions of the real text word "toy" in these cases), their relevance is also different, e.g., the more important parts like the eyes of the second subject are given higher weight (brighter in the mask).
|
| 318 |
+
|
| 319 |
+

|
| 320 |
+
given subject
|
| 321 |
+
|
| 322 |
+

|
| 323 |
+
a corgi is lying in the bed
|
| 324 |
+
|
| 325 |
+

|
| 326 |
+
a cat is lying in the bed
|
| 327 |
+
|
| 328 |
+

|
| 329 |
+
a dog is lying in the bed.
|
| 330 |
+
|
| 331 |
+

|
| 332 |
+
a parrot is lying in the bed.
|
| 333 |
+
Figure 11. The customization results in using different real text words. The real text word narrowed down for customization is highlighted in red. We could draw the following conclusions: (1) The customization results of our proposed RealCustom are quite robust, i.e., no matter we use how coarse-grained text word to represent the given subject, the generated subject in the customization results are always almost identical to the given subjects. For example, in the upper three rows, when we use "corgi", "dog" or "animal" to customize the given subject, the results all consistently adhere to the given subject. This phenomenon also validates the generalization and robustness of our proposed new paradigm RealCustom. (2) When using completely different word to represent the given subject, e.g., use "parrot" to represent a corgi, our proposed RealCustom opens a door for a new application, i.e., novel concept creation. That is, RealCustom will try to combine these two concepts and create a new one, e.g., generating a parrot with the appearance and character of the given brown corgi, as shown in the below three rows. This application will be very valuable for designing new characters in movies or games, etc.
|
| 334 |
+
|
| 335 |
+

|
| 336 |
+
an animal is lying in the bed
|
| 337 |
+
|
| 338 |
+

|
| 339 |
+
a giraffe is lying in the bed.
|
2403.00xxx/2403.00483/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:dc2e6bdd64443a0a3c7e40c8f75d95f11417c9d64fcaad1fa0e37da429b727b8
|
| 3 |
+
size 1680915
|
2403.00xxx/2403.00483/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2403.00xxx/2403.00485/c4d4f849-2aca-4cb1-9365-a1f1ef3e8602_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|