Add Batch 94cd4944-fe85-4f52-b0c2-53ed112582d9
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +64 -0
- 2203.16xxx/2203.16262/dbde7dba-9c3c-4c02-b3ea-a304e641a5fd_content_list.json +0 -0
- 2203.16xxx/2203.16262/dbde7dba-9c3c-4c02-b3ea-a304e641a5fd_model.json +0 -0
- 2203.16xxx/2203.16262/dbde7dba-9c3c-4c02-b3ea-a304e641a5fd_origin.pdf +3 -0
- 2203.16xxx/2203.16262/full.md +547 -0
- 2203.16xxx/2203.16262/images.zip +3 -0
- 2203.16xxx/2203.16262/layout.json +0 -0
- 2203.16xxx/2203.16263/581c38e5-2dd5-4af0-b295-8d57e76a6f20_content_list.json +921 -0
- 2203.16xxx/2203.16263/581c38e5-2dd5-4af0-b295-8d57e76a6f20_model.json +1299 -0
- 2203.16xxx/2203.16263/581c38e5-2dd5-4af0-b295-8d57e76a6f20_origin.pdf +3 -0
- 2203.16xxx/2203.16263/full.md +183 -0
- 2203.16xxx/2203.16263/images.zip +3 -0
- 2203.16xxx/2203.16263/layout.json +0 -0
- 2203.16xxx/2203.16265/92320dc4-7086-4f19-bc8e-609012aa7b1a_content_list.json +0 -0
- 2203.16xxx/2203.16265/92320dc4-7086-4f19-bc8e-609012aa7b1a_model.json +0 -0
- 2203.16xxx/2203.16265/92320dc4-7086-4f19-bc8e-609012aa7b1a_origin.pdf +3 -0
- 2203.16xxx/2203.16265/full.md +405 -0
- 2203.16xxx/2203.16265/images.zip +3 -0
- 2203.16xxx/2203.16265/layout.json +0 -0
- 2203.16xxx/2203.16317/b564aed0-5bb1-4ebd-a32c-a4a45cb20e3c_content_list.json +1752 -0
- 2203.16xxx/2203.16317/b564aed0-5bb1-4ebd-a32c-a4a45cb20e3c_model.json +2201 -0
- 2203.16xxx/2203.16317/b564aed0-5bb1-4ebd-a32c-a4a45cb20e3c_origin.pdf +3 -0
- 2203.16xxx/2203.16317/full.md +287 -0
- 2203.16xxx/2203.16317/images.zip +3 -0
- 2203.16xxx/2203.16317/layout.json +0 -0
- 2203.16xxx/2203.16318/bdf4203f-60cc-4b35-a342-da2e1d7ac014_content_list.json +1064 -0
- 2203.16xxx/2203.16318/bdf4203f-60cc-4b35-a342-da2e1d7ac014_model.json +1281 -0
- 2203.16xxx/2203.16318/bdf4203f-60cc-4b35-a342-da2e1d7ac014_origin.pdf +3 -0
- 2203.16xxx/2203.16318/full.md +185 -0
- 2203.16xxx/2203.16318/images.zip +3 -0
- 2203.16xxx/2203.16318/layout.json +0 -0
- 2203.16xxx/2203.16365/2f6d35a2-62f5-42ff-899d-7af22007695b_content_list.json +0 -0
- 2203.16xxx/2203.16365/2f6d35a2-62f5-42ff-899d-7af22007695b_model.json +0 -0
- 2203.16xxx/2203.16365/2f6d35a2-62f5-42ff-899d-7af22007695b_origin.pdf +3 -0
- 2203.16xxx/2203.16365/full.md +562 -0
- 2203.16xxx/2203.16365/images.zip +3 -0
- 2203.16xxx/2203.16365/layout.json +0 -0
- 2203.16xxx/2203.16369/b6c3126c-a1ee-4c00-afaf-ac1a4f247180_content_list.json +1757 -0
- 2203.16xxx/2203.16369/b6c3126c-a1ee-4c00-afaf-ac1a4f247180_model.json +0 -0
- 2203.16xxx/2203.16369/b6c3126c-a1ee-4c00-afaf-ac1a4f247180_origin.pdf +3 -0
- 2203.16xxx/2203.16369/full.md +384 -0
- 2203.16xxx/2203.16369/images.zip +3 -0
- 2203.16xxx/2203.16369/layout.json +0 -0
- 2203.16xxx/2203.16421/c0eaf7a9-2c57-463e-87f4-1bf6bb2119e5_content_list.json +0 -0
- 2203.16xxx/2203.16421/c0eaf7a9-2c57-463e-87f4-1bf6bb2119e5_model.json +0 -0
- 2203.16xxx/2203.16421/c0eaf7a9-2c57-463e-87f4-1bf6bb2119e5_origin.pdf +3 -0
- 2203.16xxx/2203.16421/full.md +0 -0
- 2203.16xxx/2203.16421/images.zip +3 -0
- 2203.16xxx/2203.16421/layout.json +0 -0
- 2203.16xxx/2203.16427/7e4731b1-1524-48d4-9792-fa541b30a263_content_list.json +0 -0
.gitattributes
CHANGED
|
@@ -6524,3 +6524,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 6524 |
2204.01xxx/2204.01712/ca24a3c2-d6a6-405d-97e4-412c32f06732_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6525 |
2204.03xxx/2204.03541/6a1d0124-4e29-4eaa-88d2-32d19a491b0c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6526 |
2205.01xxx/2205.01042/28d243c0-a822-4cf5-a911-4cd193cf9dde_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6524 |
2204.01xxx/2204.01712/ca24a3c2-d6a6-405d-97e4-412c32f06732_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6525 |
2204.03xxx/2204.03541/6a1d0124-4e29-4eaa-88d2-32d19a491b0c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6526 |
2205.01xxx/2205.01042/28d243c0-a822-4cf5-a911-4cd193cf9dde_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6527 |
+
2203.16xxx/2203.16262/dbde7dba-9c3c-4c02-b3ea-a304e641a5fd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6528 |
+
2203.16xxx/2203.16263/581c38e5-2dd5-4af0-b295-8d57e76a6f20_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6529 |
+
2203.16xxx/2203.16265/92320dc4-7086-4f19-bc8e-609012aa7b1a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6530 |
+
2203.16xxx/2203.16317/b564aed0-5bb1-4ebd-a32c-a4a45cb20e3c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6531 |
+
2203.16xxx/2203.16318/bdf4203f-60cc-4b35-a342-da2e1d7ac014_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6532 |
+
2203.16xxx/2203.16365/2f6d35a2-62f5-42ff-899d-7af22007695b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6533 |
+
2203.16xxx/2203.16369/b6c3126c-a1ee-4c00-afaf-ac1a4f247180_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6534 |
+
2203.16xxx/2203.16421/c0eaf7a9-2c57-463e-87f4-1bf6bb2119e5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6535 |
+
2203.16xxx/2203.16427/7e4731b1-1524-48d4-9792-fa541b30a263_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6536 |
+
2203.16xxx/2203.16434/8564f3b8-9b86-40ce-9096-1df108de4862_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6537 |
+
2203.16xxx/2203.16437/0dc4b2be-c797-45fb-b717-2545c21b8797_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6538 |
+
2203.16xxx/2203.16481/9eb521f9-cc77-4bc7-9edf-4e393bf3e123_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6539 |
+
2203.16xxx/2203.16487/ea0e9265-2a90-488f-8730-14ea573a590c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6540 |
+
2203.16xxx/2203.16502/020e951b-daa7-455a-8747-f6dc0383889a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6541 |
+
2203.16xxx/2203.16507/9b543b50-4cec-4b36-9b11-e7522c16f4ed_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6542 |
+
2203.16xxx/2203.16513/3e43ce02-edf6-406c-a65b-4d12f8c0dd2e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6543 |
+
2203.16xxx/2203.16527/3831be8e-443b-4ab8-a722-9a5503349165_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6544 |
+
2203.16xxx/2203.16529/c662b713-5c38-4e99-b6da-192a4612fd6e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6545 |
+
2203.16xxx/2203.16533/7057b504-b305-40ce-9c60-8d5f78704801_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6546 |
+
2203.16xxx/2203.16586/c61707eb-89e5-4c09-87bf-8b331cd93446_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6547 |
+
2203.16xxx/2203.16588/a1cb4be4-ab30-49d0-be47-ffcb9b2bae3f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6548 |
+
2203.16xxx/2203.16599/b67eaf63-6cb3-4fff-99ba-b3bb3fa078d6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6549 |
+
2203.16xxx/2203.16600/e5550d81-bc24-4458-9bea-d403ddc4b2a4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6550 |
+
2203.16xxx/2203.16618/24be3dca-6223-4b42-bf77-c9e0c81c62d4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6551 |
+
2203.16xxx/2203.16634/59c316ee-af4f-4d34-ae4c-5218bbdad327_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6552 |
+
2203.16xxx/2203.16654/b04c833d-9d05-4341-a6ed-b474fd6cba1b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6553 |
+
2203.16xxx/2203.16670/c16c7c64-d9fe-49a3-8126-c42c6db1e521_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6554 |
+
2203.16xxx/2203.16680/ba66cd95-18c2-42b3-995a-7eda6c31e4bc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6555 |
+
2203.16xxx/2203.16681/d87810a1-19cd-44b0-a954-c6fc8eab93da_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6556 |
+
2203.16xxx/2203.16691/1339bf79-477b-438f-95ce-551e65aba57d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6557 |
+
2203.16xxx/2203.16705/5bf5aef7-6d61-46ea-aec3-4c051e13bd7c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6558 |
+
2203.16xxx/2203.16708/32328b81-84b7-41cc-b07b-731fc9bb6e46_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6559 |
+
2203.16xxx/2203.16711/f1699e74-b73d-445f-bf45-34c0c0d41f4f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6560 |
+
2203.16xxx/2203.16747/137c9528-efdf-4581-bf65-9fdcbfbe1b44_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6561 |
+
2203.16xxx/2203.16749/58843045-98a9-47b8-949b-df0cc819a872_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6562 |
+
2203.16xxx/2203.16754/c032a73e-f867-4d61-9834-0d674b04e920_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6563 |
+
2203.16xxx/2203.16761/efafb862-30c3-47a1-afaf-d78dc48a7a57_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6564 |
+
2203.16xxx/2203.16768/4fa77ea1-b2d8-41c2-845b-f1bf0448726a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6565 |
+
2203.16xxx/2203.16771/c001f2cc-47a7-4968-951f-6d43fc68c0c7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6566 |
+
2203.16xxx/2203.16775/74ab57dc-7a97-4fa0-8b3c-31ebd5c4e53c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6567 |
+
2203.16xxx/2203.16778/524489b0-79f3-4414-8c6d-80bece0ec891_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6568 |
+
2203.16xxx/2203.16797/b1194283-e188-4654-8b13-904c8706a8d6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6569 |
+
2203.16xxx/2203.16800/7643e78c-9cb0-4766-a2f7-b2f8bb211be0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6570 |
+
2203.16xxx/2203.16804/dc070269-29ac-46f5-ba60-060c856a3d40_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6571 |
+
2203.16xxx/2203.16822/e34919e3-4d09-4355-a0e9-f3d7e6ba5764_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6572 |
+
2203.16xxx/2203.16844/fa008048-69dd-49cd-832b-0a937809c29c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6573 |
+
2203.16xxx/2203.16852/7060b9a6-770b-467e-b548-3772aa0ad353_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6574 |
+
2203.16xxx/2203.16863/3a8f3789-9380-46d3-a25f-b3bbba47aff1_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6575 |
+
2203.16xxx/2203.16875/f4b5c114-c68e-4e1f-b0f5-c1540ee49c9a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6576 |
+
2203.16xxx/2203.16896/aeccef6b-ccc1-4f88-ab32-4098fbc750d4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6577 |
+
2203.16xxx/2203.16897/0cb719ee-d8b5-4529-9d03-b4be77c8647e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6578 |
+
2203.16xxx/2203.16910/70d4335f-35cb-4ba7-8593-698e07247c49_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6579 |
+
2203.16xxx/2203.16931/9bebfd63-8940-4836-9ced-9b38901382d3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6580 |
+
2203.16xxx/2203.16952/d4b6f02d-3ab7-43a4-aa0e-05abdf28d198_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6581 |
+
2203.16xxx/2203.16969/8031269b-19ce-434f-aab1-69c7463a635c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6582 |
+
2203.17xxx/2203.17003/fb36b994-b42a-4e93-b29c-53b538ef1461_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6583 |
+
2203.17xxx/2203.17004/b40d3d5c-e3cc-412c-b0b9-d40f7dc222e0_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6584 |
+
2203.17xxx/2203.17005/e3d8d36d-0944-4574-85ff-c8908056120e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6585 |
+
2203.17xxx/2203.17006/ac06ec0b-f020-4c6c-a6cb-a8e4dad8313a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6586 |
+
2203.17xxx/2203.17024/ca8e0741-e181-4b97-b534-aa59393724b2_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6587 |
+
2203.17xxx/2203.17030/f3723c30-ca9d-4cd3-a40e-d5dd8c60fc41_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6588 |
+
2203.17xxx/2203.17248/af155d91-53a2-487f-9f0d-ad06bbf7234e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6589 |
+
2204.00xxx/2204.00089/9ceff1ad-4bd9-4856-9677-69a48ccab5c4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 6590 |
+
2204.02xxx/2204.02360/f1e41759-3df4-4294-8e07-762b9bcb0798_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
2203.16xxx/2203.16262/dbde7dba-9c3c-4c02-b3ea-a304e641a5fd_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16262/dbde7dba-9c3c-4c02-b3ea-a304e641a5fd_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16262/dbde7dba-9c3c-4c02-b3ea-a304e641a5fd_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f6b288c9e6f171ad6812b748992aee14ea290cc74d93049ea21d30d1619d4554
|
| 3 |
+
size 995880
|
2203.16xxx/2203.16262/full.md
ADDED
|
@@ -0,0 +1,547 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# HOW DOES SIMSIAM AVOID COLLAPSE WITHOUT NEGATIVE SAMPLES? A UNIFIED UNDERSTANDING WITH SELF-SUPERVISED CONTRASTIVE LEARNING
|
| 2 |
+
|
| 3 |
+
Chaoning Zhang* & Kang Zhang* & Chenshuang Zhang & Trung X. Pham
|
| 4 |
+
Chang D. Yoo & In So Kweon
|
| 5 |
+
|
| 6 |
+
Korea Advanced Institute of Science and Technology (KAIST), South Korea
|
| 7 |
+
chaoningzhang1990@gmail.com & zhangkang@kaist.ac.kr
|
| 8 |
+
|
| 9 |
+
# ABSTRACT
|
| 10 |
+
|
| 11 |
+
To avoid collapse in self-supervised learning (SSL), a contrastive loss is widely used but often requires a large number of negative samples. Without negative samples yet achieving competitive performance, a recent work (Chen & He, 2021) has attracted significant attention for providing a minimalist simple Siamese (SimSiam) method to avoid collapse. However, the reason for how it avoids collapse without negative samples remains not fully clear and our investigation starts by revisiting the explanatory claims in the original SimSiam. After refuting their claims, we introduce vector decomposition for analyzing the collapse based on the gradient analysis of the $l_{2}$ -normalized representation vector. This yields a unified perspective on how negative samples and SimSiam alleviate collapse. Such a unified perspective comes timely for understanding the recent progress in SSL.
|
| 12 |
+
|
| 13 |
+
# 1 INTRODUCTION
|
| 14 |
+
|
| 15 |
+
Beyond the success of NLP (Lan et al., 2020; Radford et al., 2019; Devlin et al., 2019; Su et al., 2020; Nie et al., 2020), self-supervised learning (SSL) has also shown its potential in the field of vision tasks (Li et al., 2021; Chen et al., 2021; El-Nouby et al., 2021). Without the ground-truth label, the core of most SSL methods lies in learning an encoder with augmentation-invariant representation (Bachman et al., 2019; He et al., 2020; Chen et al., 2020a; Caron et al., 2020; Grill et al., 2020). Specifically, they often minimize the representation distance between two positive samples, i.e. two augmented views of the same image, based on a Siamese network architecture (Bromley et al., 1993). It is widely known that for such Siamese networks there exists a degenerate solution, i.e. all outputs "collapsing" to an undesired constant (Chen et al., 2020a; Chen & He, 2021). Early works have attributed the collapse to lacking a repulsive component in the optimization goal and adopted contrastive learning (CL) with negative samples, i.e. views of different samples, to alleviate this problem. Introducing momentum into the target encoder, BYOL shows that Siamese architectures can be trained with only positive pairs. More recently, SimSiam (Chen & He, 2021) has caught great attention by further simplifying BYOL by removing the momentum encoder, which has been seen as a major milestone achievement in SSL for providing a minimalist method for achieving competitive performance. However, more investigation is required for the following question:
|
| 16 |
+
|
| 17 |
+
# How does SimSiam avoid collapse without negative samples?
|
| 18 |
+
|
| 19 |
+
Our investigation starts with revisiting the explanatory claims in the original SimSiam paper (Chen & He, 2021). Notably, two components, i.e. stop gradient and predictor, are essential for the success of SimSiam (Chen & He, 2021). The reason has been mainly attributed to the stop gradient (Chen & He, 2021) by hypothesizing that it implicitly involves two sets of variables and SimSiam behaves like alternating between optimizing each set. Chen & He argue that the predictor $h$ is helpful in SimSiam because $h$ fills the gap to approximate expectation over augmentations (EOA).
|
| 20 |
+
|
| 21 |
+
Unfortunately, the above explanatory claims are found to be flawed due to reversing the two paths with and without gradient (see Sec. 2.2). This motivates us to find an alternative explanation, for which we introduce a simple yet intuitive framework for facilitating the analysis of collapse in SSL.
|
| 22 |
+
|
| 23 |
+
Specifically, we propose to decompose a representation vector into center and residual components. This decomposition facilitates understanding which gradient component is beneficial for avoiding collapse. Under this framework, we show that a basic Siamese architecture cannot prevent collapse, for which an extra gradient component needs to be introduced. With SimSiam interpreted as processing the optimization target with an inverse predictor, the analysis of its extra gradient shows that (a) its center vector helps prevent collapse via the de-centering effect; (b) its residual vector achieves dimensional de-correlation which also alleviates collapse.
|
| 24 |
+
|
| 25 |
+
Moreover, under the same gradient decomposition, we find that the extra gradient caused by negative samples in InfoNCE (He et al., 2019; Chen et al., 2020b;a; Tian et al., 2019; Khosla et al., 2020) also achieves de-centering and de-correlation in the same manner. It contributes to a unified understanding on various frameworks in SSL, which also inspires the investigation of hardness-awareness Wang & Liu (2021) from the inter-anchor perspective Zhang et al. (2022) for further bridging the gap between CL and non-CL frameworks in SSL. Finally, simplifying the predictor for more explainable SimSiam, we show that a single bias layer is sufficient for preventing collapse.
|
| 26 |
+
|
| 27 |
+
The basic experimental settings for our analysis are detailed in Appendix A.1 with a more specific setup discussed in the context. Overall, our work is the first attempt for performing a comprehensive study on how SimSiam avoids collapse without negative samples. Several works, however, have attempted to demystify the success of BYOL (Grill et al., 2020), a close variant of SimSiam. A technical report (Fetterman & Albrecht, 2020) has suggested the importance of batch normalization (BN) in BYOL for its success, however, a recent work (Richemond et al., 2020) refutes their claim by showing BYOL works without BN, which is discussed in Appendix B.
|
| 28 |
+
|
| 29 |
+
# 2 REVISITING SIMSIAM AND ITS EXPLANATORY CLAIMS
|
| 30 |
+
|
| 31 |
+
$l_{2}$ -normalized vector and optimization goal. SSL trains an encoder $f$ for learning discriminative representation and we denote such representation as a vector $\mathbf{z}$ , i.e. $f(x) = \mathbf{z}$ where $\mathbf{x}$ is a certain input. For the augmentation-invariant representation, a straightforward goal is to minimize the distance between the representations of two positive samples, i.e. augmented views of the same image, for which mean squared error (MSE) is a default choice. To avoid scale ambiguity, the vectors are often $l_{2}$ -normalized, i.e. $\mathbf{Z} = \mathbf{z} / ||\mathbf{z}||$ (Chen & He, 2021), before calculating the MSE:
|
| 32 |
+
|
| 33 |
+
$$
|
| 34 |
+
\mathcal {L} _ {M S E} = \left(\boldsymbol {Z} _ {a} - \boldsymbol {Z} _ {b}\right) ^ {2} / 2 - 1 = - \boldsymbol {Z} _ {a} \cdot \boldsymbol {Z} _ {b} = L _ {\text {c o s i n e}}, \tag {1}
|
| 35 |
+
$$
|
| 36 |
+
|
| 37 |
+
which shows the equivalence of a normalized MSE loss to the cosine loss (Grill et al., 2020).
|
| 38 |
+
|
| 39 |
+
Collapse in SSL and solution of SimSiam. Based on a Siamese architecture, the loss in Eq 1 causes the collapse, i.e. $f$ always outputs a constant regardless of the input variance. We refer to this Siamese architecture with loss Eq 1 as Naive Siamese in the remainder of paper. Contrastive loss with negative samples is a widely used solution (Chen et al., 2020a). Without using negative samples, SimSiam solves the collapse problem via predictor and stop gradient, based on which the encoder is optimized with a symmetric loss:
|
| 40 |
+
|
| 41 |
+
$$
|
| 42 |
+
L _ {\text {S i m S i a m}} = - \left(\boldsymbol {P} _ {a} \cdot \operatorname {s g} \left(\boldsymbol {Z} _ {b}\right) + \boldsymbol {P} _ {b} \cdot \operatorname {s g} \left(\boldsymbol {Z} _ {a}\right)\right), \tag {2}
|
| 43 |
+
$$
|
| 44 |
+
|
| 45 |
+
where $\mathrm{sg}(\cdot)$ is stop gradient and $\pmb{P}$ is the output of predictor $h$ , i.e. $\pmb{p} = h(\pmb{z})$ and $\pmb{P} = \pmb{p} / ||\pmb{p}||$ .
|
| 46 |
+
|
| 47 |
+
# 2.1 REVISING EXPLANATORY CLAIMS IN SIMSIAM
|
| 48 |
+
|
| 49 |
+
Interpreting stop gradient as AO. Chen & He hypothesize that the stop gradient in Eq 2 is an implementation of Alternating between the Optimization of two sub-problems, which is denoted as AO. Specifically, with the loss considered as $\mathcal{L}(\theta ,\eta) = \mathbb{E}_{x,\mathcal{T}}\Big[\| \mathcal{F}_{\theta}(\mathcal{T}(x)) - \eta_{x}\|^{2}\Big]$ , the optimization objective $\min_{\theta ,\eta}\mathcal{L}(\theta ,\eta)$ can be solved by alternating $\eta^t\gets \arg \min_{\eta}\mathcal{L}(\theta^t,\eta)$ and $\theta^{t + 1}\gets \arg \min_{\theta}\mathcal{L}(\theta ,\eta^{t})$ . It is acknowledged that this hypothesis does not fully explain why the collapse is prevented (Chen & He, 2021). Nonetheless, they mainly attribute SimSiam success to the stop gradient with the interpretation that AO might make it difficult to approach a constant $\forall x$ .
|
| 50 |
+
|
| 51 |
+
Interpreting predictor as EOA. The AO problem (Chen & He, 2021) is formulated independent of predictor $h$ , for which they believe that the usage of predictor $h$ is related to approximating EOA for filling the gap of ignoring $\mathbb{E}_{\mathcal{T}}[\cdot]$ in a sub-problem of AO. The approximation of $\mathbb{E}_{\mathcal{T}}[\cdot]$ is summarized
|
| 52 |
+
|
| 53 |
+
in Appendix A.2. Chen & He support their interpretation by proof-of-concept experiments. Specifically, they show that updating $\eta_{x}$ with a moving-average $\eta_x^t\gets m*\eta_x^t +(1 - m)*\mathcal{F}_{\theta^t}(\mathcal{T}'(x))$ can help prevent collapse without predictor (see Fig. 1 (b)). Given that the training completely fails when the predictor and moving average are both removed, at first sight, their reasoning seems valid.
|
| 54 |
+
|
| 55 |
+
# 2.2 DOES THE PREDICTOR FILL THE GAP TO APPROXIMATE EOA?
|
| 56 |
+
|
| 57 |
+
Reasoning flaw. Considering the stop gradient, we divide the framework into two sub-models with different paths and term them Gradient Path (GP) and Stop Gradient Path (SGP). For SimSiam, only the sub-model with GP includes the predictor (see Fig. 1 (a)). We point out that their reasoning flaw of predictor analysis lies in the reverse of $GP$ and $SGP$ . By default, the moving-average sub-model, as shown in Fig. 1 (b), is on the same side as SGP. Note that Fig. 1 (b) is conceptually similar to Fig. 1 (c) instead of Fig. 1 (a). It is worth mentioning that the Mirror SimSiam in Fig. 1 (c) is what stop gradient in the original SimSiam avoids. Therefore, it is problematic to perceive $h$ as EOA.
|
| 58 |
+
|
| 59 |
+

|
| 60 |
+
(a) SimSiam
|
| 61 |
+
|
| 62 |
+

|
| 63 |
+
(b) Moving Average
|
| 64 |
+
Figure 1: Reasoning Flaw in SimSiam. (a) Standard SimSiam architecture. (b) Moving-Average Model proposed in the proof-of-concept experiment (Chen & He, 2021). (c) Mirror SimSiam, which has the same model architecture as SimSiam but with the reverse of GP and SGP.
|
| 65 |
+
|
| 66 |
+

|
| 67 |
+
(c) Mirror SimSiam
|
| 68 |
+
|
| 69 |
+

|
| 70 |
+
(a) Naive Siamese
|
| 71 |
+
Figure 2: Different architectures of Siamese model. When it is trained experimentally, the inverse predictor in (c) has the same architecture as predictor $h$ .
|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
(b) SimSiam with Symmetric Predictor
|
| 75 |
+
|
| 76 |
+

|
| 77 |
+
(c) SimSiam with Inverse Predictor
|
| 78 |
+
|
| 79 |
+
<table><tr><td>Method</td><td># aug</td><td>Collapse</td><td>Std</td><td>Top-1 (%)</td></tr><tr><td>Moving average</td><td>2</td><td>×</td><td>0.0108</td><td>46.57</td></tr><tr><td>Same batch</td><td>10</td><td>✓</td><td>0</td><td>1</td></tr><tr><td>Same batch</td><td>25</td><td>✓</td><td>0</td><td>1</td></tr></table>
|
| 80 |
+
|
| 81 |
+
Table 1: Influence of Explicit EOA. Detailed setup is reported in Appendix A.3
|
| 82 |
+
|
| 83 |
+
Explicit EOA does not prevent collapse. (Chen & He, 2021) points out that "in practice, it would be unrealistic to actually compute the expectation $\mathbb{E}_{\mathcal{T}}[\cdot]$ . But it may be possible for a neural network (e.g., the predator $h$ ) to learn to predict the expectation, while the sampling of $\mathcal{T}$ is implicitly distributed across multiple epochs." If implicitly sampling across multiple epochs is beneficial, explicitly sampling sufficient large $N$ augmentations in a batch with the latest model would be more beneficial to approximate $\mathbb{E}_{\mathcal{T}}[\cdot]$ . However, Table 1 shows that the collapse still occurs and suggests that the equivalence between predictor and EOA does not hold.
|
| 84 |
+
|
| 85 |
+
# 2.3 ASYMMETRIC INTERPRETATION OF PREDICTOR WITH STOP GRADIENT IN SIMSIAM
|
| 86 |
+
|
| 87 |
+
Symmetric Predictor does not prevent collapse. The difference between Naive Siamese and Sim-siam lies in whether the gradient in backward propagation flows through a predictor, however, we show that this propagation helps avoid collapse only when the predictor is not included in the SGP path. With $h$ being trained the same as Eq 2, we optimize the encoder $f$ through replacing the $Z$ in Eq 2 with $P$ . The results in Table. 2 show that it still leads to collapse. Actually, this is well expected by perceiving $h$ to be part of the new encoder $F$ , i.e. $\pmb{p} = F(x) = h(f(x))$ . In other words, the symmetric architectures with and without predictor $h$ both lead to collapse.
|
| 88 |
+
|
| 89 |
+
Predictor with stop gradient is asymmetric. Clearly, how SimSiam avoids collapse lies in its asymmetric architecture, i.e. one path with $h$ and the other without $h$ . Under this asymmetric architecture, the role of stop gradient is to only allow the path with predictor to be optimized with the encoder output as the target, not vice versa. In other words, the SimSiam avoids collapse by excluding Mirror SimSiam (Fig. 1 (c)) which has a loss (mirror-like Eq 2) as $\mathcal{L}_{\mathrm{Mirror}} = -(P_a \cdot Z_b + P_b \cdot Z_a)$ , where stop gradient is put on the input of $h$ , i.e. $p_a = h(\mathrm{sg}[z_a])$ and $p_b = h(\mathrm{sg}[z_b])$ .
|
| 90 |
+
|
| 91 |
+
Predictor vs. inverse predictor. We interpret $h$ as a function mapping from $\pmb{z}$ to $\pmb{p}$ , and introduce a conceptual inverse mapping $h^{-1}$ , i.e. $\pmb{z} = h^{-1}(\pmb{p})$ . Here, as shown in Table 2, SimSiam with symmetric predictor (Fig. 2(b)) leads to collapse, while SimSiam (Fig. 1(a)) avoids collapse. With the conceptual $h^{-1}$ , we interpret Fig. 1(a) the same as Fig. 2(c) which differs from Fig. 2(b) via changing the optimization target from $\pmb{p}_b$ to $\pmb{z}_b$ , i.e. $\pmb{z}_b = h^{-1}(\pmb{p}_b)$ . This interpretation
|
| 92 |
+
|
| 93 |
+
<table><tr><td>Method</td><td>Collapse</td><td>Top-1 (%)</td></tr><tr><td>Simsiam</td><td>×</td><td>66.62</td></tr><tr><td>Mirror SimSiam</td><td>✓</td><td>1</td></tr><tr><td>Naive Siamese</td><td>✓</td><td>1</td></tr><tr><td>Symmetric Predictor</td><td>✓</td><td>1</td></tr></table>
|
| 94 |
+
|
| 95 |
+
Table 2: Results of various Siamese architectures. Detailed trend and setup are reported in Appendix A.4
|
| 96 |
+
|
| 97 |
+
suggests that the collapse can be avoided by processing the optimization target with $h^{-1}$ . By contrast, Fig. 1 (c) and Fig. 2 (a) both lead to collapse, suggesting that processing the optimization target with $h$ is not beneficial for preventing collapse. Overall, asymmetry alone does not guarantee collapse avoidance, which requires the optimization target to be processed by $h^{-1}$ not $h$ .
|
| 98 |
+
|
| 99 |
+
Trainable inverse predictor and its implication on EOA. In the above, we propose a conceptual inverse predictor $h^{-1}$ in Fig. 2 (c), however, it remains yet unknown whether such an inverse predictor is experimentally trainable. A detailed setup for this investigation is reported in Appendix A.5. The results in Fig. 3 show that a learnable $h^{-1}$ leads to slightly inferior performance, which is expected because $h^{-1}$ cannot make the trainable inverse predictor output $z_{b}^{*}$ completely the same as $z_{b}$ . Note that it would be equivalent to SimSiam if $z_{b}^{*} = z_{b}$ . Despite a slight performance drop, the results confirm that $h^{-1}$ is trainable. The fact that $h^{-1}$ is trainable provides additional evidence that the role $h$ plays in SimSiam is not EOA
|
| 100 |
+
|
| 101 |
+

|
| 102 |
+
Figure 3: Comparison of original SimSiam and SimSiam with Inverse Predictor.
|
| 103 |
+
|
| 104 |
+
because theoretically $h^{-1}$ cannot restore a random augmentation $\mathcal{T}'$ from an expectation $\pmb{p}$ , where $\pmb{p} = h(\pmb{z}) = \mathbb{E}_{\mathcal{T}}\Big[\mathcal{F}_{\theta^t}(\mathcal{T}(x))\Big]$ .
|
| 105 |
+
|
| 106 |
+
# 3 VECTOR DECOMPOSITION FOR UNDERSTANDING COLLAPSE
|
| 107 |
+
|
| 108 |
+
By default, InfoNCE (Chen et al., 2020a) and SimSiam (Chen & He, 2021) both adopt $l_{2}$ -normalization in their loss for avoiding scale ambiguity. We treat the $l_{2}$ -normalized vector, i.e. $Z$ , as the encoder output, which significantly simplifies gradient derivation and the following analysis.
|
| 109 |
+
|
| 110 |
+
Vector decomposition. For the purpose of analysis, we propose to decompose $\mathbf{Z}$ into two parts, $\mathbf{Z} = \mathbf{o} + \mathbf{r}$ , where $\mathbf{o}, \mathbf{r}$ denote center vector and residual vector respectively. Specifically, the center vector $\mathbf{o}$ is defined as an average of $\mathbf{Z}$ over the whole representation space $\mathbf{o}_z = \mathbb{E}[\mathbf{Z}]$ . However, we approximate it with all vectors in current mini-batch, i.e. $\mathbf{o}_z = \frac{1}{M}\sum_{m=1}^{M}\mathbf{Z}_m$ , where $M$ is the mini-batch size. We define the residual vector $\mathbf{r}$ as the residual part of $\mathbf{Z}$ , i.e. $\mathbf{r} = \mathbf{Z} - \mathbf{o}_z$ .
|
| 111 |
+
|
| 112 |
+
# 3.1 COLLAPSE FROM THE VECTOR PERSPECTIVE
|
| 113 |
+
|
| 114 |
+
Collapse: from result to cause. A Naive Siamese is well expected to collapse since the loss is designed to minimize the distance between positive samples, for which a constant constitutes an optimal solution to minimize such loss. When the collapse occurs, $\forall i, Z_i = \frac{1}{M} \sum_{m=1}^{M} Z_m = o_z$ , where $i$ denotes a random sample index, which shows the constant vector is $o_z$ in this case. This interpretation only suggests a possibility that a dominant $o$ can be one of the viable solutions, while the optimization, such as SimSiam, might still lead to a non-collapse solution. This merely describes $o$ as the consequence of the collapse, and our work investigates the cause of such collapse through analyzing the influence of individual gradient components, i.e. $o$ and $r$ during training.
|
| 115 |
+
|
| 116 |
+
Competition between $o$ and $r$ . Complementary to the Standard Deviation (Std) (Chen & He, 2021), for indicating collapse, we introduce the ratio of $o$ in $z$ , i.e. $m_{o} = ||o|| / ||z||$ , where $||*||$ is the $L_{2}$ norm. Similarly, the ratio of $r$ in $z$ is defined as $m_{r} = ||r|| / ||z||$ . When collapse happens, i.e. all vectors $Z$ are close to the center vector $o$ , $m_{o}$ approaches 1 and $m_{r}$ approaches 0, which is not desirable for SSL. A desirable case would be a relatively small $m_{o}$ and a relatively large $m_{r}$ , suggesting a relatively small (large) contribution of $o(r)$ in each $Z$ . We interpret the cause of collapse as a competition between $o$ and $r$ where $o$ dominates over $r$ , i.e. $m_{o} \gg m_{r}$ . For Eq 1, the derived negative gradient on $Z_{a}$ (ignoring $Z_{b}$ for simplicity due to symmetry) is shown as:
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
\mathcal {G} _ {\text {c o s i n e}} = - \frac {\partial \mathcal {L} _ {M S E}}{\partial \mathbf {Z} _ {a}} = \mathbf {Z} _ {b} - \mathbf {Z} _ {a} \Longleftrightarrow - \frac {\partial \mathcal {L} _ {\text {c o s i n e}}}{\partial \mathbf {Z} _ {a}} = \mathbf {Z} _ {b}, \tag {3}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
where the gradient component $Z_{a}$ is a dummy term because the loss $-Z_{a} \cdot Z_{a} = -1$ is a constant having zero gradient on the encoder $f$ .
|
| 123 |
+
|
| 124 |
+
Conjecture1. With $Z_{a} = o_{z} + r_{a}$ , we conjecture that the gradient component of $o_{z}$ is expected to update the encoder to boost the center vector thus increase $m_{o}$ , while the gradient component of $r_{a}$ is expected to behave in the opposite direction to increase $m_{r}$ . A random gradient component is expected to have a relatively small influence.
|
| 125 |
+
|
| 126 |
+
To verify the above conjecture, we revisit the dummy gradient term $Z_{a}$ . We design loss $-Z_{a} \cdot \mathrm{sg}(o_{z})$ and $-Z_{a} \cdot \mathrm{sg}(Z_{a} - o_{z})$ to show the influence of gradient component $o$ and $r_{a}$ respectively. The results in Fig. 4 show that the gradient component $o_{z}$ has the effect of increasing $m_{o}$ while decreasing $m_{r}$ . On the contrary, $r_{a}$ helps increase $m_{r}$ while decreasing $m_{o}$ .
|
| 127 |
+
|
| 128 |
+

|
| 129 |
+
Figure 4: Influence of various gradient components on $m_r$ and $m_o$ .
|
| 130 |
+
|
| 131 |
+
# 3.2 EXTRA GRADIENT COMPONENT FOR ALLEVIATING COLLAPSE
|
| 132 |
+
|
| 133 |
+
Revisit collapse in a symmetric architecture. Based on Conjecture1, here, we provide an intuitive interpretation on why a symmetric Siamese architecture, such as Fig. 2 (a) and (b), cannot be trained without collapse. Take Fig. 2 (a) as example, the gradient in Eq 3 can be interpreted as two equivalent forms, from which we choose $Z_{b} - Z_{a} = (o_{z} + r_{b}) - (o_{z} + r_{a}) = r_{b} - r_{a}$ . Since $r_{b}$ comes from the same positive sample as $r_{a}$ , it is expected that $r_{b}$ also increases $m_{r}$ , however, this effect is expected to be smaller than that of $r_{a}$ , thus causing collapse.
|
| 134 |
+
|
| 135 |
+
Basic gradient and Extra gradient components. The negative gradient on $Z_{a}$ in Fig. 2 (a) is derived as $Z_{b}$ , while that on $P_{a}$ in Fig. 2 (b) is derived as $P_{b}$ . We perceive $Z_{b}$ and $P_{b}$ in these basic Siamese architectures as the Basic Gradient. Our above interpretation shows that such basic components cannot prevent collapse, for which an Extra Gradient component, denoted as $G_{e}$ , needs to be introduced to break the symmetry. As the term suggests, $G_{e}$ is defined as a gradient term that is relative to the basic gradient in a basic Siamese architecture. For example, negative samples can be introduced to Naive Siamese (Fig. 2 (a)) for preventing collapse, where the extra gradient caused by negative samples can thus be perceived as $G_{e}$ with $Z_{b}$ as the basic gradient. Similarly, we can also disentangle the negative gradient on $P_{a}$ in SimSiam (Fig. 1 (a)), i.e. $Z_{b}$ , into a basic gradient (which is $P_{b}$ ) and $G_{e}$ which is derived as $Z_{b} - P_{b}$ (note that $Z_{b} = P_{b} + G_{e}$ ). We analyze how $G_{e}$ prevents collapse via studying the independent roles of its center vector $o_{e}$ and residual vector $r_{e}$ .
|
| 136 |
+
|
| 137 |
+
# 3.3 A TOY EXAMPLE EXPERIMENT WITH NEGATIVE SAMPLE
|
| 138 |
+
|
| 139 |
+
Which repulsive component helps avoid collapse? Existing works often attribute the collapse in Naive Siamese to lacking a repulsive part during the optimization. This explanation has motivated previous works to adopt contrastive learning, i.e. attracting the positive samples while repulsing the negative samples. We experiment with a simple triplet loss $^1$ , $\mathcal{L}_{tri} = -Z_a \cdot \mathrm{sg}(Z_b - Z_n)$ , where $Z_n$ indicates the representation of a Negative sample. The derived negative gradient on $Z_a$ is $Z_b - Z_n$ , where $Z_b$ is the basic gradient component and thus $G_e = -Z_n$ in this setup. For a sample representation, what determines it as a positive sample for attracting or a negative sample for repulsing is the residual component, thus it might be tempting to interpret that $r_e$ is the key component of repulsive part that avoids the collapse. However, the results in Table 3 show that the component beneficial for preventing collapse inside $G_e$ is $o_e$ instead of $r_e$ . Specifically, to explore the individual influence of $o_e$ and $r_e$ in the $G_e$ , we design two experiments by removing one component while keeping the other one. In the first experiment, we remove the $r_e$ in $G_e$ while keeping the $o_e$ . By contrast, the $o_e$ is removed while keeping the $r_e$ in the second experiment. In contrast to what existing explanations may expect, we find that the residual component $o_e$ prevents collapses. With Conjecture1, a gradient component alleviates collapse if it has negative center vector. In this setup, $o_e = -o_z$ , thus $o_e$ has the de-centering role for preventing collapse. On the contrary, $r_e$ does not prevent collapse and keeping $r_e$ even decreases the performance (36.21% < 47.41%). Since the negative sample is randomly chosen, $r_e$ just behaves like a random noise on the optimization to decrease performance.
|
| 140 |
+
|
| 141 |
+
<table><tr><td>Method</td><td>Ltriplet</td><td>Std</td><td>mo</td><td>mr</td><td>Collapse</td><td>Top-1 (%)</td></tr><tr><td>Baseline</td><td>-Za·sg(Zb+Ge)</td><td>0.020</td><td>0.026</td><td>0.99</td><td>×</td><td>36.21</td></tr><tr><td>Removing re</td><td>-Za·sg(Zb+oε)</td><td>0.02005</td><td>0.026</td><td>0.99</td><td>×</td><td>47.41</td></tr><tr><td>Removing oe</td><td>-Za·sg(Zb+re)</td><td>0</td><td>1</td><td>0</td><td>✓</td><td>1</td></tr></table>
|
| 142 |
+
|
| 143 |
+
# 3.4 DECOMPOSED GRADIENT ANALYSIS IN SIMSIAM
|
| 144 |
+
|
| 145 |
+
It is challenging to derive the gradient on the encoder output in SimSiam due to a nonlinear MLP module in $h$ . The negative gradient on $P_{a}$ for $\mathcal{L}_{\text{SimSiam}}$ in Eq 2 can be derived as
|
| 146 |
+
|
| 147 |
+
$$
|
| 148 |
+
\mathcal {G} _ {\text {S i m S i a m}} = - \frac {\partial \mathcal {L} _ {\text {S i m S i a m}}}{\partial \boldsymbol {P} _ {a}} = \boldsymbol {Z} _ {b} = \boldsymbol {P} _ {b} + \left(\boldsymbol {Z} _ {b} - \boldsymbol {P} _ {b}\right) = \boldsymbol {P} _ {b} + \boldsymbol {G} _ {e}, \tag {4}
|
| 149 |
+
$$
|
| 150 |
+
|
| 151 |
+
where $G_{e}$ indicates the aforementioned extra gradient component. To investigate the influence of $o_{e}$ and $r_{e}$ on the collapse, similar to the analysis with the toy example experiment in Sec. 3.3, we design the experiment by removing one component while keeping the other. The results are reported in Table 4. As expected, the model collapses when both components in $G_{e}$ are removed and the best performance is achieved when both components are kept. Interestingly, the model does not collapse when
|
| 152 |
+
|
| 153 |
+
Table 3: Gradient component analysis with a random negative sample.
|
| 154 |
+
|
| 155 |
+
<table><tr><td>o e</td><td>r e</td><td>Collapse</td><td>Top-1 (%)</td></tr><tr><td>✓</td><td>✓</td><td>×</td><td>66.62</td></tr><tr><td>✓</td><td>×</td><td>×</td><td>48.08</td></tr><tr><td>×</td><td>✓</td><td>×</td><td>66.15</td></tr><tr><td>×</td><td>×</td><td>✓</td><td>1</td></tr></table>
|
| 156 |
+
|
| 157 |
+
Table 4: Gradient component analysis for SimSiam.
|
| 158 |
+
|
| 159 |
+
either $o_e$ or $r_e$ is kept. To start, we analyze how $o_e$ affects the collapse based on Conjecture1.
|
| 160 |
+
|
| 161 |
+
How $o_e$ alleviates collapse in SimSiam. Here, $o_p$ is used to denote the center vector of $P$ to differentiate from the above introduced $o_z$ for denoting that of $Z$ . In this setup $G_e = Z_b - P_b$ , thus the residual gradient component is derived to be $o_e = o_z - o_p$ . With Conjecture1, it is well expected that $o_e$ helps prevent collapse if $o_e$ contains negative $o_p$ since the analyzed vector is $P_a$ . To determine the amount of component of $o_p$ existing in $o_e$ , we measure the cosine similarity between $o_e - \eta_p o_p$ and $o_p$ for a wide range of $\eta_p$ . The results in Fig. 5 (a) show that their cosine similarity is zero when $\eta_p$ is around -0.5, suggesting $o_e$ has $\approx -0.5 o_p$ . With Conjecture1, this negative $\eta_p$ explains why SimSiam avoids collapse from the perspective of de-centering.
|
| 162 |
+
|
| 163 |
+
How $o_e$ causes collapse in Mirror SimSiam. As mentioned above, the collapse occurs in Mirror SimSiam, which can also be explained by analyzing its $o_e$ . Here, $o_e = o_p - o_z$ , for which we evaluate the amount of component $o_z$ existing in $o_e$ via reporting the similarity between $o_e - \eta_z o_z$
|
| 164 |
+
|
| 165 |
+
and $o_{z}$ . The results in Fig. 5 (a) show that their cosine similarity is zero when $\eta_{z}$ is set to around 0.2. This positive $\eta_{z}$ explains why Fig. 1(c) causes collapse from the perspective of de-centering.
|
| 166 |
+
|
| 167 |
+
Overall, we find that processing the optimization target with $h^{-1}$ , as in Fig. 2 (c), alleviates collapse $(\eta_{p}\approx -0.5)$ , while processing it with $h$ , as in Fig. 1(c), actually strengthens the collapse $(\eta_z\approx 0.2)$ . In other words, via the analysis of $o_e$ , our results help explain how SimSiam avoids collapse as well as how Mirror SimSiam causes collapse from a straightforward de-centering perspective.
|
| 168 |
+
|
| 169 |
+

|
| 170 |
+
(a)
|
| 171 |
+
|
| 172 |
+

|
| 173 |
+
(b)
|
| 174 |
+
Figure 5: (a) Investigating the amount of $o_p$ existing in $o_z - o_p$ and the amount of $o_z$ existing in $o_p - o_z$ . (b) Normally train the model as SimSiam for 5 epochs, then using collapsing loss for 1 epoch to reduce $m_r$ , followed by a correlation regularization loss. (c) Cosine similarity between $r_e$ ( $o_e$ ) and gradient on $Z_a$ induced by a correlation regularization loss.
|
| 175 |
+
|
| 176 |
+

|
| 177 |
+
(c)
|
| 178 |
+
|
| 179 |
+
Relation to prior works. Motivated from preventing the collapse to a constant, multiple prior works, such as W-MSE (Ermolov et al., 2021), Barlow-twins (Zbontar et al., 2021), DINO (Caron et al., 2021), explicitly adopt de-centering to prevent collapse. Despite various motivations, we find that they all implicitly introduce an $o_e$ that contains a negative center vector. The success of their approaches aligns well with our Conjecture1 as well as our above empirical results. Based on our findings, we argue that the effect of de-centering can be perceived as $o_e$ having a negative center vector. With this interpretation, we are the first to demonstrate that how SimSiam with predictor and stop gradient avoids collapse can be explained from the perspective of de-centering.
|
| 180 |
+
|
| 181 |
+
Beyond de-centering for avoiding collapse. In the toy example experiment in Sec. 3.3, $\boldsymbol{r}_e$ is found to be not beneficial for preventing collapse and keeping $\boldsymbol{r}_e$ even decreases the performance. Interestingly, as shown in Table 4, we find that $\boldsymbol{r}_e$ alone is sufficient for preventing collapse and achieves comparable performance as $\boldsymbol{G}_e$ . This can be explained from the perspective of dimensional de-correlation, which will be discussed in Sec. 3.5.
|
| 182 |
+
|
| 183 |
+
# 3.5 DIMENSIONAL DE-CORRELATION HELPS PREVENT COLLAPSE
|
| 184 |
+
|
| 185 |
+
Conjecture2 and motivation. We conjecture that dimensional de-correlation increases $m_r$ for preventing collapse. The motivation is straightforward as follows. The dimensional correlation would be minimum if only a single dimension has a very high value for every individual class and the dimension changes for different classes. In another extreme case, when all the dimensions have the same values, equivalent to having a single dimension, which already collapses by itself in the sense of losing representation capacity. Conceptually, $r_e$ has no direct influence on the center vector, thus we interpret that $r_e$ prevents collapse through increasing $m_r$ .
|
| 186 |
+
|
| 187 |
+
To verify the above conjecture, we train SimSiam normally with the loss in Eq 2 and train for several epochs with the loss in Eq 1 for intentionally decreasing the $m_r$ to close to zero. Then, we train the loss with only a correlation regularization term, which is detailed in Appendix A.6. The results in Fig. 5 (b) show that this regularization term increases $m_r$ at a very fast rate.
|
| 188 |
+
|
| 189 |
+
Dimensional de-correlation in SimSiam. Assuming $h$ only has a single FC layer to exclude the influence of $o_e$ , the weights in FC are expected to learn the correlation between different dimensions for the encoder output. This interpretation echos well with the finding that the eigenspace of $h$ weight aligns well with that of correlation matrix (Tian et al., 2021). In essence, the $h$ is trained to minimize the cosine similarity between $h(\mathbf{z}_a)$ and $I(\mathbf{z}_b)$ , where $I$ is identity mapping. Thus, $h$ that learns the correlation is optimized close to $I$ , which is conceptually equivalent to optimizing with the goal of de-correlation for $Z$ . As shown in Table 4, for SimSiam, $r_e$ alone also prevents collapse, which
|
| 190 |
+
|
| 191 |
+

|
| 192 |
+
Figure 6: Influence of various gradient components on $m_r$ and $m_o$ .
|
| 193 |
+
|
| 194 |
+

|
| 195 |
+
|
| 196 |
+

|
| 197 |
+
|
| 198 |
+

|
| 199 |
+
|
| 200 |
+
is attributed to the de-correlation effect since $r_e$ has no de-centering effect. We observe from Fig. 6 that except in the first few epochs, SimSiam decreases the covariance during the whole training. Fig. 6 also reports the results for InfoNCE which will be discussed in Sec. 4.
|
| 201 |
+
|
| 202 |
+
# 4 TOWARDS A UNIFIED UNDERSTANDING OF RECENT PROGRESS IN SSL
|
| 203 |
+
|
| 204 |
+
De-centering and de-correlation in InfoNCE. InfoNCE loss is a default choice in multiple seminal contrastive learning frameworks (Sohn, 2016; Wu et al., 2018; Oord et al., 2018; Wang & Liu, 2021). The derived negative gradient of InfoNCE on $Z_{a}$ is proportional to $Z_{b} + \sum_{i=0}^{N} -\lambda_{i}Z_{i}$ , where $\lambda_{i} = \frac{\exp(Z_{a}\cdot Z_{i}/\tau)}{\sum_{i=0}^{N}\exp(Z_{a}\cdot Z_{i}/\tau)}$ , and $Z_{0} = Z_{b}$ for notation simplicity. See Appendix A.7 for the detailed derivation. The extra gradient component $G_{e} = \sum_{i=0}^{N} -\lambda_{i}Z_{i} = -o_{z} - \sum_{i=0}^{N}\lambda_{i}r_{i}$ , for which $o_{e} = -o_{z}$ and $r_{e} = -\sum_{i=0}^{N}\lambda_{i}r_{i}$ . Clearly, $o_{e}$ contains negative $o_{z}$ as de-centering for avoiding collapse, which is equivalent to the toy example in Sec. 3.3 when the $r_{e}$ is removed. Regarding $r_{e}$ , the main difference between $\mathcal{L}_{tri}$ in the toy example and InfoNCE is that the latter exploits a batch of negative samples instead of a random one. $\lambda_{i}$ is proportional to $\exp(Z_{a}\cdot Z_{i})$ , indicating that a large weight is put on the negative sample when it is more similar to the anchor $Z_{a}$ , for which, intuitively, its dimensional values tend to have a high correlation with $Z_{a}$ . Thus, $r_{e}$ containing such negative representation with a high weight tends to decrease dimensional correlation. To verify this intuition, we measure the cosine similarity between $r_{e}$ and the gradient on $Z_{a}$ induced by a correlation regularization loss. The results in Fig. 5 (c) show that their gradient similarity is high for a wide range of temperature values, especially when $\tau$ is around 0.1 or 0.2, suggesting $r_{e}$ achieves similar role as an explicit regularization loss for performing de-correlation. Replacing $r_{e}$ with $o_{e}$ leads to a low cosine similarity, which is expected because $o_{e}$ has no de-correlation effect.
|
| 205 |
+
|
| 206 |
+
The results of InfoNCE in Fig. 6 resembles that of SimSiam in terms of the overall trend. For example, InfoNCE also decreases the covariance value during training. Moreover, we also report the results of InfoNCE where $\boldsymbol{r}_e$ is removed for excluding the de-correlation effect. Removing $\boldsymbol{r}_e$ from the InfoNCE loss leads to a high covariance value during the whole training. Removing $\boldsymbol{r}_e$ also leads to a significant performance drop, which echos with the finding in (Bardes et al., 2021) that dimensional de-correlation is essential for competitive performance. Regarding how $\boldsymbol{r}_e$ in InfoNCE achieves de-correlation, formally, we hypothesize that the de-correlation effect in InfoNCE arises from the biased weights $(\lambda_i)$ on negative samples. This hypothesis is corroborated by the temperature analysis in Fig. 7. We find that a higher temperature makes the weight distribution of $\lambda_i$ more balanced indicated a higher entropy of $\lambda_i$ , which echos with the finding in (Wang & Liu, 2021). Moreover, we observe that a higher temperature also tends to increase the covariance value. Overall, with temperature as the control variable, we find that more balanced weights among negative samples decrease the de-correlation effect, which constitutes an evidence for our hypothesis.
|
| 207 |
+
|
| 208 |
+
Unifying SimSiam and InfoNCE. At first sight, there is no conceptual similarity between SimSiam and InfoNCE, and this is why the community is intrigued by the success of SimSiam without negative samples. Through decomposing the $G_{e}$ into $o_{e}$ and $r_{e}$ , we find that for both, their $o_{e}$ plays the role of de-centering and their $r_{e}$ behaves like de-correlation. In this sense, we bring two seemingly irrelevant frameworks into a unified perspective with disentangled de-centering and de-correlation.
|
| 209 |
+
|
| 210 |
+
Beyond SimSiam and InfoNCE. In SSL, there is a trend of performing explicit manipulation of de-centering and de-correlation, for which W-MSE (Ermolov et al., 2021), Barlow-twins (Zbontar et al., 2021), DINO (Caron et al., 2021) are three representative works. They often achieve performance comparable to those with InfoNCE or SimSiam. Towards a unified understanding of recent progress in SSL, our work is most similar to a concurrent work (Bardes et al., 2021). Their work is mainly inspired by Barlow-twins (Zbontar et al., 2021) but decomposes its loss into three explicit components. By contrast, our work is motivated to answer the question of how SimSiam prevents
|
| 211 |
+
|
| 212 |
+

|
| 213 |
+
(a)
|
| 214 |
+
|
| 215 |
+

|
| 216 |
+
(b)
|
| 217 |
+
Figure 7: Influence of temperature. (a) Entropy of $\lambda_{i}$ with regard to temperature; (b) Top-1 accuracy trend with various temperature; (c) Covariance trend with various temperature.
|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
(c)
|
| 221 |
+
|
| 222 |
+
collapse without negative samples. Their work claims that variance component (equivalent to decentering) is an indispensable component for preventing collapse, while we find that de-correlation itself alleviates collapse. Overall, our work helps understand various frameworks in SSL from an unified perspective, which also inspires an investigation of inter-anchor hardness-awareness Zhang et al. (2022) for further bridging the gap between CL and non-CL frameworks in SSL.
|
| 223 |
+
|
| 224 |
+
# 5 TOWARDS SIMPLIFYING THE PREDICTOR IN SIMSIAM
|
| 225 |
+
|
| 226 |
+
Based on our understanding of how SimSiam prevents collapse, we demonstrate that simple components (instead of a non-linear MLP in SimSiam) in the predictor are sufficient for preventing collapse. For example, to achieve dimensional de-correlation, a single FC layer might be sufficient because a single FC layer can realize the interaction among various dimensions. On the other hand, to achieve de-centering, a single bias layer might be sufficient because a bias vector can represent the center vector. Attaching an $l_{2}$ -normalization layer at the end of the encoder, i.e. before the predictor, is found to be critical for achieving the above goal.
|
| 227 |
+
|
| 228 |
+
Predicator with FC layers. To learn the dimensional correlation, an FC layer is sufficient theoretically but can be difficult to train in practice. Inspired by the property that Multiple FC layers make the training more stable even though they can be mathematically equivalent to a single FC layer (Bell-Kligler et al., 2019), we adopt two consecutive FC layers which are equivalent to removing the BN and ReLU in the original predictor.
|
| 229 |
+
|
| 230 |
+
<table><tr><td>Method</td><td>Predictor</td><td>Top-1 (%)</td></tr><tr><td>SimSiam</td><td>Non-linear MLP</td><td>66.9</td></tr><tr><td>Two FC</td><td>FC+FC+Bias</td><td>66.7</td></tr><tr><td>One FC</td><td>Tanh(FC)</td><td>64.82</td></tr><tr><td>One bias</td><td>Bias</td><td>49.82</td></tr></table>
|
| 231 |
+
|
| 232 |
+
The training can be made more stable if a Tanh layer is applied on the adopted single FC after every iteration. Table 5 shows that they achieve performance comparable to that with a non-linear MLP.
|
| 233 |
+
|
| 234 |
+
Predictor with a bias layer. A predictor with a single bias layer can be utilized for preventing collapse (see Table 5) and the trained bias vector is found to have a cosine similarity of 0.99 with the center vector (see Table 6). A bias in the MLP predictor also has a high cosine similarity of 0.89, suggesting that it is not a coincidence. A theoretical derivation for justifying such a
|
| 235 |
+
|
| 236 |
+
Table 5: Linear evaluation on CIFAR100.
|
| 237 |
+
|
| 238 |
+
<table><tr><td>Bias</td><td>(1) single bias</td><td>(2) bias in MLP</td></tr><tr><td>Similarity</td><td>0.99</td><td>0.89</td></tr></table>
|
| 239 |
+
|
| 240 |
+
Table 6: Similarity between center vector and (1) single bias layer $(\mathbf{b}_p)$ , (2) the last bias layer of MLP in the predictor.
|
| 241 |
+
|
| 242 |
+
high similarity as well as how this single bias layer prevents collapse are discussed in Appendix A.8.
|
| 243 |
+
|
| 244 |
+
# 6 CONCLUSION
|
| 245 |
+
|
| 246 |
+
We point out a hidden flaw in prior works for explaining the success of SimSiam and propose to decompose the representation vector and analyze the decomposed components of extra gradient. We find that its center vector gradient helps prevent collapse via the de-centering effect and its residual gradient achieves de-correlation which also alleviates collapse. Our further analysis reveals that InfoNCE achieve the two effects in a similar manner, which bridges the gap between SimSiam and InfoNCE and contributes to a unified understanding of recent progress in SSL. Towards simplifying the predictor we have also found that a single bias layer is sufficient for preventing collapse.
|
| 247 |
+
|
| 248 |
+
# ACKNOWLEDGEMENT
|
| 249 |
+
|
| 250 |
+
This work was partly supported by Institute for Information & communications Technology Planning & Evaluation (IITP) grant funded by the Korea government (MSIT) under grant No.2019-0-01396 (Development of framework for analyzing, detecting, mitigating of bias in AI model and training data), No.2021-0-01381 (Development of Causal AI through Video Understanding and Reinforcement Learning, and Its Applications to Real Environments) and No.2021-0-02068 (Artificial Intelligence Innovation Hub). During the rebuttal, multiple anonymous reviewers provide valuable advice to significantly improve the quality of this work. Thank you all.
|
| 251 |
+
|
| 252 |
+
# REFERENCES
|
| 253 |
+
|
| 254 |
+
Philip Bachman, R Devon Hjelm, and William Buchwalter. Learning representations by maximizing mutual information across views. arXiv preprint arXiv:1906.00910, 2019.
|
| 255 |
+
Adrien Bardes, Jean Ponce, and Yann LeCun. Vicreg: Variance-invariance-covariance regularization for self-supervised learning. arXiv preprint arXiv:2105.04906, 2021.
|
| 256 |
+
Sefi Bell-Kligler, Assaf Shocher, and Michal Irani. Blind super-resolution kernel estimation using an internal-gan. NeurIPS, 2019.
|
| 257 |
+
Jane Bromley, James W Bentz, Léon Bottou, Isabelle Guyon, Yann LeCun, Cliff Moore, Eduard Säckinger, and Roopak Shah. Signature verification using a "siamese" time delay neural network. International Journal of Pattern Recognition and Artificial Intelligence, 1993.
|
| 258 |
+
Mathilde Caron, Ishan Misra, Julien Mairal, Priya Goyal, Piotr Bojanowski, and Armand Joulin. Unsupervised learning of visual features by contrasting cluster assignments. arXiv preprint arXiv:2006.09882, 2020.
|
| 259 |
+
Mathilde Caron, Hugo Touvron, Ishan Misra, Hervé Jégou, Julien Mairal, Piotr Bojanowski, and Armand Joulin. Emerging properties in self-supervised vision transformers. arXiv preprint arXiv:2104.14294, 2021.
|
| 260 |
+
Ting Chen, Simon Kornblith, Mohammad Norouzi, and Geoffrey Hinton. A simple framework for contrastive learning of visual representations. In ICML, 2020a.
|
| 261 |
+
Xinlei Chen and Kaiming He. Exploring simple siamese representation learning. In CVPR, 2021.
|
| 262 |
+
Xinlei Chen, Haoqi Fan, Ross Girshick, and Kaiming He. Improved baselines with momentum contrastive learning. arXiv preprint arXiv:2003.04297, 2020b.
|
| 263 |
+
Xinlei Chen, Saining Xie, and Kaiming He. An empirical study of training self-supervised vision transformers. ICCV, 2021.
|
| 264 |
+
Victor G. Turrisi da Costa, Enrico Fini, Moin Nabi, Nicu Sebe, and Elisa Ricci. Solo-learn: A library of self-supervised methods for visual representation learning, 2021.
|
| 265 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), 2019.
|
| 266 |
+
Alaaeldin El-Nouby, Hugo Touvron, Mathilde Caron, Piotr Bojanowski, Matthijs Douze, Armand Joulin, Ivan Laptev, Natalia Neverova, Gabriel Synnaeve, Jakob Verbeek, et al. Xcit: Cross-covariance image transformers. arXiv preprint arXiv:2106.09681, 2021.
|
| 267 |
+
Aleksandr Ermolov, Aliaksandr Siarohin, Enver Sangineto, and Nicu Sebe. Whitening for self-supervised representation learning. In ICML. PMLR, 2021.
|
| 268 |
+
Abe Fetterman and Josh Albrecht. Understanding self-supervised and contrastive learning with "bootstrap your own latent" (byol), 2020.
|
| 269 |
+
|
| 270 |
+
https://untitled-ai.github.io/ understanding-self-supervisedcontrastive-learning.html.
|
| 271 |
+
|
| 272 |
+
Jean-Bastien Grill, Florian Strub, Florent Alché, Coretin Tallec, Pierre Richemond, Elena Buchatskaya, Carl Doersch, Bernardo Avila Pires, Zhaohan Guo, Mohammad Gheshlaghi Azar, et al. Bootstrap your own latent-a new approach to self-supervised learning. Advances in Neural Information Processing Systems, 2020.
|
| 273 |
+
Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. arXiv preprint arXiv:1911.05722, 2019.
|
| 274 |
+
Kaiming He, Haoqi Fan, Yuxin Wu, Saining Xie, and Ross Girshick. Momentum contrast for unsupervised visual representation learning. In CVPR, 2020.
|
| 275 |
+
Prannay Khosla, Piotr Teterwak, Chen Wang, Aaron Sarna, Yonglong Tian, Phillip Isola, Aaron Maschinot, Ce Liu, and Dilip Krishnan. Supervised contrastive learning. arXiv preprint arXiv:2004.11362, 2020.
|
| 276 |
+
Zhenzhong Lan, Mingda Chen, Sebastian Goodman, Kevin Gimpel, Piyush Sharma, and Radu Sori-cut. Albert: A lite bert for self-supervised learning of language representations. In ICLR, 2020.
|
| 277 |
+
Chunyuan Li, Jianwei Yang, Pengchuan Zhang, Mei Gao, Bin Xiao, Xiyang Dai, Lu Yuan, and Jianfeng Gao. Efficient self-supervised vision transformers for representation learning. arXiv preprint arXiv:2106.09785, 2021.
|
| 278 |
+
Ping Nie, Yuyu Zhang, Xiubo Geng, Arun Ramamurthy, Le Song, and Daxin Jiang. Dc-bert: Decoupling question and document for efficient contextual encoding. In Proceedings of the 43rd International ACM SIGIR Conference on Research and Development in Information Retrieval, 2020.
|
| 279 |
+
Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv preprint arXiv:1807.03748, 2018.
|
| 280 |
+
Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, and Ilya Sutskever. Language models are unsupervised multitask learners. OpenAI blog, 2019.
|
| 281 |
+
Pierre H Richemond, Jean-Bastien Grill, Florent Alché, Corentin Tallec, Florian Strub, Andrew Brock, Samuel Smith, Soham De, Razvan Pascanu, Bilal Piot, et al. Byol works even without batch statistics. arXiv preprint arXiv:2010.10241, 2020.
|
| 282 |
+
Florian Schroff, Dmitry Kalenichenko, and James Philbin. Facenet: A unified embedding for face recognition and clustering. 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015.
|
| 283 |
+
Kihyuk Sohn. Improved deep metric learning with multi-class n-pair loss objective. In NeurIPS, 2016.
|
| 284 |
+
Weijie Su, Xizhou Zhu, Yue Cao, Bin Li, Lewei Lu, Furu Wei, and Jifeng Dai. {VL}-{bert}: Pre-training of generic visual-linguistic representations. In ICLR, 2020.
|
| 285 |
+
Yonglong Tian, Dilip Krishnan, and Phillip Isola. Contrastive multiview coding. arXiv preprint arXiv:1906.05849, 2019.
|
| 286 |
+
Yuandong Tian, Xinlei Chen, and Surya Ganguli. Understanding self-supervised learning dynamics without contrastive pairs. arXiv preprint arXiv:2102.06810, 2021.
|
| 287 |
+
Feng Wang and Huaping Liu. Understanding the behaviour of contrastive loss. In CVPR, 2021.
|
| 288 |
+
Zhirong Wu, Yuanjun Xiong, Stella X Yu, and Dahua Lin. Unsupervised feature learning via non-parametric instance discrimination. In CVPR, 2018.
|
| 289 |
+
Jure Zbontar, Li Jing, Ishan Misra, Yann LeCun, and Stéphane Deny. Barlow twins: Self-supervised learning via redundancy reduction. ICML, 2021.
|
| 290 |
+
Chaoning Zhang, Kang Zhang, Trung X. Pham, Changdong Yoo, and In-So Kweon. Towards understanding and simplifying moco: Dual temperature helps contrastive learning without many negative samples. In CVPR, 2022.
|
| 291 |
+
|
| 292 |
+
# A APPENDIX
|
| 293 |
+
|
| 294 |
+
# A.1 EXPERIMENTAL SETTINGS
|
| 295 |
+
|
| 296 |
+
Self-supervised encoder training: Below are the settings for self-supervised encoder training. For simplicity, we mainly use the default settings in a popular open library termed solo-learn (da Costa et al., 2021).
|
| 297 |
+
|
| 298 |
+
Data augmentation and normalization: We use a series of transformations including RandomResizedCrop with scale [0.2, 1.0], bicubic interpolation. ColorJitter (brightness (0.4), contrast (0.4), saturation (0.4), hue (0.1)) is randomly applied with the probability of 0.8. Random gray scale RandomGrayscale is applied with $p = 0.2$ Horizontal flip is applied with $p = 0.5$ . The images are normalized with the mean (0.4914, 0.4822, 0.4465) and Std (0.247, 0.243, 0.261).
|
| 299 |
+
|
| 300 |
+
Network architecture and initialization: The backbone architecture is ResNet-18. The projection head contains three fully-connected (FC) layers followed by Batch Norm (BN) and ReLU, for which ReLU in the final FC layer is removed, i.e. $FC_{1} + BN + ReLU + FC_{2} + BN + ReLU + FC_{3} + BN$ . All projection FC layers have 2048 neurons for input, output as well as the hidden dimensions. The predictor head includes two FC layers as follows: $FC_{1} + BN + ReLU + FC_{2}$ . Input and output of the predictor both have the dimension of 2048, while the hidden dimension is 512. All layers of the network are by default initialized in Pytorch.
|
| 301 |
+
|
| 302 |
+
Optimizer: SGD optimizer is used for the encoder training. The batch size $M$ is 256 and the learning rate is linearly scaled by the formula $lr \times M / 256$ with the base learning rate $lr$ set to 0.5. The schedule for learning rate adopts the cosine decay as SimSiam. Momentum 0.9 and weight decay $1.0 \times 10^{-5}$ are used for SGD. We use one GPU for each pre-training experiment. Following the practice of SimSiam, the learning rate of the predictor is fixed during the training. We use warmup training for the first 10 epochs. If not specified, by default we train the model for 1000 epochs.
|
| 303 |
+
|
| 304 |
+
Online linear evaluation: For the online linear revaluation, we also follow the practice in the solo-learn library (da Costa et al., 2021). The frozen features (2048 dimensions) from the training set are extracted (from the self-supervised pre-trained model) to feed into a linear classifier (1 FC layer with the input 2048 and output of 100). The test is performed on the validation set. The learning rate for the linear classifier is 0.1. Overall, we report Top-1 accuracy with the online linear evaluation in this work.
|
| 305 |
+
|
| 306 |
+
# A.2 TWO SUB-PROBLEMS IN AO OF SIMSIAM
|
| 307 |
+
|
| 308 |
+
In the sub-problem $\eta^t\gets \arg \min_{\eta}\mathcal{L}(\theta^t,\eta)$ , $\eta^t$ indicating latent representation of images at step $t$ is actually obtained through $\eta_x^t\gets \mathbb{E}_{\mathcal{T}}\Big[\mathcal{F}_{\theta^t}(\mathcal{T}(x))\Big]$ , where they in practice ignore $\mathbb{E}_{\mathcal{T}}[\cdot ]$ and sample only one augmentation $\mathcal{T}'$ , i.e. $\eta_x^t\gets \mathcal{F}_{\theta^t}(\mathcal{T}'(x))$ . Conceptually, Chen & He equate the role of predictor to EOA.
|
| 309 |
+
|
| 310 |
+
# A.3 EXPERIMENTAL DETAILS FOR EXPLICIT EOA IN TABLE 1
|
| 311 |
+
|
| 312 |
+
In the Moving average experiment, we follow the setting in SimSiam (Chen & He, 2021) without predictor. In the Same batch experiment, multiple augmentations, 10 augmentations for instance, are applied on the same image. With multi augmentations, we get the corresponding encoded representation, i.e. $z_{i}$ , $i \in [1, 10]$ . We minimize the cosine distance between the first representation $z_{1}$ and the average of the remaining vectors, i.e. $\bar{z} = \frac{1}{9} \sum_{i=2}^{10} z_{i}$ . The gradient stop is put on the averaged vector. We also experimented with letting the gradient backward through more augmentations, however, they consistently led to collapse.
|
| 313 |
+
|
| 314 |
+
# A.4 EXPERIMENTAL SETUP AND RESULT TREND FOR TABLE 2.
|
| 315 |
+
|
| 316 |
+
Mirror SimSiam. Here we provide the pseudocode for Mirror SimSiam. In the Mirror SimSiam experiment which relates to Fig. 1 (c). Without taking symmetric loss into account, the pseudocode is shown in Algorithm 1. Taking symmetric loss into account, the pseudocode is shown in Algorithm 2.
|
| 317 |
+
|
| 318 |
+
Algorithm 1 Pytorch-like Pseudocode: Mirror SimSiam
|
| 319 |
+
```python
|
| 320 |
+
f: encoder (backbone + projector)
|
| 321 |
+
h: predictor
|
| 322 |
+
for x in loader: # load a minibatch x with n samples
|
| 323 |
+
x_a, x_b = aug(x), aug(x) # augmentation
|
| 324 |
+
z_a, z_b = f(x_a), f(x_b) # projections
|
| 325 |
+
p_b = h(z_bdetach()) # detach z_b but still allowing gradient p_b
|
| 326 |
+
L = D_cosine(z_a, p_b) # loss
|
| 327 |
+
L.backup() # back-propagate
|
| 328 |
+
update(f, h) # SGD update
|
| 329 |
+
def D_cosine(z, p): # negative cosine similarity
|
| 330 |
+
z = normalize(z, dim=1) # 12-normalize
|
| 331 |
+
p = normalize(p, dim=1) # 12-normalize
|
| 332 |
+
return - (z\*p).sum(dim=1).mean()
|
| 333 |
+
```
|
| 334 |
+
|
| 335 |
+
Algorithm 2 Pytorch-like Pseudocode: Mirror SimSiam
|
| 336 |
+
```python
|
| 337 |
+
f: encoder (backbone + projector)
|
| 338 |
+
h: predictor
|
| 339 |
+
for x in loader: # load a minibatch x with n samples
|
| 340 |
+
x_a, x_b = aug(x), aug(x) # augmentation
|
| 341 |
+
z_a, z_b = f(x_a), f(x_b) # projections
|
| 342 |
+
p_b = h(z_bdetach()) # detach z_b but still allowing gradient p_b
|
| 343 |
+
p_a = h(z_adetach()) # detach z_a but still allowing gradient p_a
|
| 344 |
+
L = D_cosine(z_a, p_b)/2 + D_cosine(z_b, p_a)/2 # loss
|
| 345 |
+
L.backup() # back-propagate
|
| 346 |
+
update(f, h) # SGD update
|
| 347 |
+
def D_cosine(z, p): # negative cosine similarity
|
| 348 |
+
z = normalize(z, dim=1) # 12-normalize
|
| 349 |
+
p = normalize(p, dim=1) # 12-normalize
|
| 350 |
+
return - (z*p).sum(dim=1).mean()
|
| 351 |
+
```
|
| 352 |
+
|
| 353 |
+
Symmetric Predictor. To implement the SimSiam with Symmetric Predictor as in Fig. 2 (b), we can just perceive the predictor as part of the new encoder, for which the pseudocode is provided in Algorithm 3. Alternatively, we can additionally train the predictor similarly as that in SimSiam, for which the training involves two losses, one for training the predictor and another for training the new encoder (the corresponding pseudocode is provided in Algorithm 4). Moreover, for the second implementation, we also experiment with another variant that fixes the predictor while optimizing the new encoder and then train the predictor alternatingly. All of them lead to collapse with a similar trend as long as the symmetric predictor is used for training the encoder. For avoiding redundancy, in Fig. 8 we only report the result of the second implementation.
|
| 354 |
+
|
| 355 |
+
Result trend. The result trend of SimSiam, Naive Siamese, Mirror SimSiam, Symmetric Predictor are shown in Fig. 8. We observe that all architectures lead to collapse except for SimSiam. Mirroe SimSiam was stopped in the middle because a NaN value was returned from the loss.
|
| 356 |
+
|
| 357 |
+
# A.5 EXPERIMENTAL DETAILS FOR INVERSE PREDICTOR.
|
| 358 |
+
|
| 359 |
+
In the inverse predictor experiment which relates to Fig. 2 (c), we introduce a new predictor which has the same structure as that of the original predictor. The training loss consists of 3 parts: predictor training loss, inverse predictor training and new encoder (old encoder+predictor) training. The new
|
| 360 |
+
|
| 361 |
+
Algorithm 3 Pytorch-like Pseudocode: Symmetric Predictor
|
| 362 |
+
```python
|
| 363 |
+
f: encoder (backbone + projector)
|
| 364 |
+
h: predictor
|
| 365 |
+
for x in loader: # load a minibatch x with n samples
|
| 366 |
+
x_a, x_b = aug(x), aug(x) # augmentation
|
| 367 |
+
z_a, z_b = f(x_a), f(x_b) # projections
|
| 368 |
+
p_a, p_b = h(z_a), h(z_b) # predictions
|
| 369 |
+
L = D(p_a, p_b)/2 + D(p_b, p_a)/2 # loss
|
| 370 |
+
L.backup() # back-propagate
|
| 371 |
+
update(f, h) # SGD update
|
| 372 |
+
def D(p, z): # negative cosine similarity
|
| 373 |
+
z = zdetach() # stop gradient
|
| 374 |
+
p = normalize(p, dim=1) # 12-normalize
|
| 375 |
+
z = normalize(z, dim=1) # 12-normalize
|
| 376 |
+
return -(p\*z).sum(dim=1).mean()
|
| 377 |
+
```
|
| 378 |
+
|
| 379 |
+
Algorithm 4 Pytorch-like Pseudocode: Symmetric Predictor (with additional training on predictor)
|
| 380 |
+
```python
|
| 381 |
+
#f: encoder (backbone + projector)
|
| 382 |
+
#h: predictor
|
| 383 |
+
for $\mathbf{x}$ in loader: # load a minibatch x with n samples
|
| 384 |
+
$\mathrm{x\_a}$ $x_{-}b = \mathrm{aug}(x)$ , aug(x) # augmentation
|
| 385 |
+
z_a, z_b = f(x_a), f(x_b) # projections
|
| 386 |
+
p_a, p_b = h(z_a), h(z_b) # predictions
|
| 387 |
+
d_p_a, d_p_b = h(z_adetach(), h(z_bdetach()) # detached predictor output
|
| 388 |
+
# predictor training loss
|
| 389 |
+
L_pred = D(d_p_a, z_b)/2 + D(d_p_b, z_a)/2
|
| 390 |
+
# encoder training loss
|
| 391 |
+
L_enc = D(p_a, d_p_b)/2 + D(p_b, d_p_a)/2
|
| 392 |
+
L = L_pred + L_enc
|
| 393 |
+
L.backup() # back-propagate
|
| 394 |
+
update(f, h) # SGD update
|
| 395 |
+
def D(p, z): # negative cosine similarity with detach on z
|
| 396 |
+
z = zdetach() # stop gradient
|
| 397 |
+
p = normalize(p, dim=1) # 12-normalize
|
| 398 |
+
z = normalize(z, dim=1) # 12-normalize
|
| 399 |
+
return - (p\*z).sum(dim=1).mean ()
|
| 400 |
+
```
|
| 401 |
+
|
| 402 |
+

|
| 403 |
+
Figure 8: Result trend of Naive Siamese, Mirror SimSiam, Symmetric Predictor.
|
| 404 |
+
|
| 405 |
+
encoder $F$ consists of the old encoder $f +$ predictor $h$ . The practice of gradient stop needs to be considered in the implementation. We provide the pseudocode in Algorithm 5.
|
| 406 |
+
|
| 407 |
+
Algorithm 5 Pytorch-like Pseudocode: Trainable Inverse Predictor
|
| 408 |
+
```julia
|
| 409 |
+
for x in loader: # load a minibatch x with n samples
|
| 410 |
+
x_a, x_b = aug(x), aug(x) # augmentation
|
| 411 |
+
z_a, z_b = f(x_a), f(x_b) # projections
|
| 412 |
+
p_a, p_b = h(z_a), h(z_b) # predictions
|
| 413 |
+
d_p_a, d_p_b = h(z_adetach(), h(z_bdetach()) # detached predictor output
|
| 414 |
+
# predictor training loss
|
| 415 |
+
L_pred = D(d_p_a, z_b)/2 + D(d_p_b, z_a)/2 # to train h
|
| 416 |
+
inv_p_a, inv_p_b = h_inv(p_adetach(), h_inv(p_bdetach())) # to train h_inv
|
| 417 |
+
# inverse predictor training loss
|
| 418 |
+
L_inv_pred = D(inv_p_a, z_a)/2 + D(inv_p_b, z_b)/2
|
| 419 |
+
# encoder training loss
|
| 420 |
+
L_enc = D(p_a, h_inv(p_b))/2 + D(p_b, h_inv(p_a))
|
| 421 |
+
L = L_pred + L_inv_pred + L_enc
|
| 422 |
+
L.backup() # back-propagate
|
| 423 |
+
update(f, h, h_inv) # SGD update
|
| 424 |
+
def D(p, z): # negative cosine similarity with detach on z
|
| 425 |
+
z = zdetach() # stop gradient
|
| 426 |
+
p = normalize(p, dim=1) # 12-normalize
|
| 427 |
+
z = normalize(z, dim=1) # 12-normalize
|
| 428 |
+
return -(p*z).sum(dim=1).mean()
|
| 429 |
+
```
|
| 430 |
+
|
| 431 |
+
# A.6 REGULARIZATION LOSS
|
| 432 |
+
|
| 433 |
+
Following Zbontar et al. (2021), we compute covariance regularization loss of encoder output along the mini-batch. The pseudocode for de-correlation loss calculation is put in Algorithm 6.
|
| 434 |
+
|
| 435 |
+
Algorithm 6 Pytorch-like Pseudocode: De-correlation loss
|
| 436 |
+
```python
|
| 437 |
+
Z_a: representation vector
|
| 438 |
+
# N: batch size
|
| 439 |
+
# D: the number of dimension for representation vector
|
| 440 |
+
Z_a = Z_a - Z_a.mean(dim=0)
|
| 441 |
+
cov = Z_a.T @ Z_a / (N-1)
|
| 442 |
+
diag = torch.eye(D)
|
| 443 |
+
loss = cov['diag(bool()].pow_(2).sum() / D
|
| 444 |
+
```
|
| 445 |
+
|
| 446 |
+
# A.7 GRADIENT DERIVATION AND TEMPERATURE ANALYSIS FOR INFONCE
|
| 447 |
+
|
| 448 |
+
With $\cdot$ indicating the cosine similarity between vectors, the InfoNCE loss can be expressed as
|
| 449 |
+
|
| 450 |
+
$$
|
| 451 |
+
\begin{array}{l} \mathcal {L} _ {\text {I n f o N C E}} = - \log \frac {\exp \left(\boldsymbol {Z} _ {a} \cdot \boldsymbol {Z} _ {b} / \tau\right)}{\exp \left(\boldsymbol {Z} _ {a} \cdot \boldsymbol {Z} _ {b} / \tau\right) + \sum_ {i = 1} ^ {N} \exp \left(\boldsymbol {Z} _ {a} \cdot \boldsymbol {Z} _ {i} / \tau\right)} (5) \\ = - \log \frac {\exp \left(\boldsymbol {Z} _ {a} \cdot \boldsymbol {Z} _ {b} / \tau\right)}{\sum_ {i = 0} ^ {N} \exp \left(\boldsymbol {Z} _ {a} \cdot \boldsymbol {Z} _ {i} / \tau\right)}, (5) \\ \end{array}
|
| 452 |
+
$$
|
| 453 |
+
|
| 454 |
+
where $N$ indicates the number of negative samples and $\mathbf{Z}_0 = \mathbf{Z}_b$ for simplifying the notation. By treating $\mathbf{Z}_a\cdot \mathbf{Z}_i$ as the logit in a normal CE loss, we have the corresponding probability for each negative sample as $\lambda_{i} = \frac{\exp(\mathbf{Z}_{a}\cdot\mathbf{Z}_{i} / \tau)}{\sum_{i = 0}^{N}\exp(\mathbf{Z}_{a}\cdot\mathbf{Z}_{i} / \tau)}$ , where $i = 0,1,2,\ldots,N$ and we have $\sum_{i = 0}^{N}\lambda_{i} = 1$ .
|
| 455 |
+
|
| 456 |
+
The negative gradient of the InfoNCE on the representation $Z_{a}$ is shown as
|
| 457 |
+
|
| 458 |
+
$$
|
| 459 |
+
\begin{array}{l} - \frac {\partial \mathcal {L} _ {I n f o N C E}}{\partial \boldsymbol {Z} _ {a}} = \frac {1}{\tau} (1 - \lambda_ {0}) \boldsymbol {Z} _ {b} - \frac {1}{\tau} \sum_ {i = 1} ^ {N} \lambda_ {i} \boldsymbol {Z} _ {i} \\ = \frac {1}{\tau} \left(\boldsymbol {Z} _ {b} - \sum_ {i = 0} ^ {N} \lambda_ {i} \boldsymbol {Z} _ {i}\right) \\ = \frac {1}{\tau} \left(\boldsymbol {Z} _ {b} - \sum_ {i = 0} ^ {N} \lambda_ {i} \left(\boldsymbol {o} _ {z} + \boldsymbol {r} _ {i}\right)\right) \tag {6} \\ = \frac {1}{\tau} \left(\boldsymbol {Z} _ {b} + \left(- \boldsymbol {o} _ {z} - \sum_ {i = 0} ^ {N} \lambda_ {i} \boldsymbol {r} _ {i}\right) \right. \\ \propto \boldsymbol {Z} _ {b} + \left(- \boldsymbol {o} _ {z} - \sum_ {i = 0} ^ {N} \lambda_ {i} \boldsymbol {r} _ {i}\right) \\ \end{array}
|
| 460 |
+
$$
|
| 461 |
+
|
| 462 |
+
where $\frac{1}{\tau}$ can be adjusted through learning rate and is omitted for simple discussion. With $Z_{b}$ as the basic gradient, $\pmb{G}_{e} = -\pmb{o}_{z} - \sum_{i=0}^{N} \lambda_{i} \pmb{r}_{i}$ , for which $\pmb{o}_{e} = -\pmb{o}_{z}$ and $\pmb{r}_{e} = -\sum_{i=0}^{N} \lambda_{i} \pmb{r}_{i}$ .
|
| 463 |
+
|
| 464 |
+
When the temperature is set to a large value, $\lambda_{i} = \frac{\exp(Z_{a}\cdot Z_{i} / \tau)}{\sum_{i = 0}^{N}\exp(Z_{a}\cdot Z_{i} / \tau)}$ , approaches $\frac{1}{N + 1}$ , indicated by a high entropy value (see Fig. 7). InfoNCE will degenerate to a simple contrastive loss, i.e. $\mathcal{L}_{simple} = -Z_{a}\cdot Z_{b} + \frac{1}{N + 1}\sum_{i = 0}^{N}Z_{a}\cdot Z_{i}$ , which repulses every negative sample with an equal force. In contrast, a relative smaller temperature will give more relative weight, i.e. larger $\lambda$ , to negative samples that are more similar to the anchor $(Z_{a})$ .
|
| 465 |
+
|
| 466 |
+
The influence of the temperature on the covariance and accuracy is shown in Fig. 7 (b) and (c). We observe that a higher temperature tends to decrease the effect of de-correlation, indicated by a higher covariance value, which also leads to a performance drop. This verifies our hypothesis regarding on how $r_e$ in InfoNCE achieves de-correlation because a large temperature causes more balanced weights $\lambda_i$ , which is found to alleviate the effect of de-correlation. For the setup, we note that the encoder is trained for 200 epochs with the default setting in Solo-learn for the SimCLR framework.
|
| 467 |
+
|
| 468 |
+
# A.8 THEORETICAL DERIVATION FOR A SINGLE BIAS LAYER
|
| 469 |
+
|
| 470 |
+
With the cosine similarity loss defined as Eq 7 Eq 8:
|
| 471 |
+
|
| 472 |
+
$$
|
| 473 |
+
\operatorname {c o s s i m} (a, b) = \frac {a \cdot b}{\sqrt {a ^ {2} \cdot b ^ {2}}}, \tag {7}
|
| 474 |
+
$$
|
| 475 |
+
|
| 476 |
+
for which the derived gradient on the vector $a$ is shown as
|
| 477 |
+
|
| 478 |
+
$$
|
| 479 |
+
\frac {\partial}{\partial a} \operatorname {c o s s i m} (a, b) = \frac {b _ {1}}{| a | \cdot | b |} - \operatorname {c o s s i m} (a, b) \cdot \frac {a _ {1}}{| a | ^ {2}}. \tag {8}
|
| 480 |
+
$$
|
| 481 |
+
|
| 482 |
+
The above equation is used as a prior for our following derivations. As indicated in the main manuscript, the encoder output $\mathbf{z}_a$ is $l_2$ -normalized before feeding into the predictor, thus $\mathbf{p}_a = Z_a + \mathbf{b}_p$ , $\mathbf{b}_p$ denotes the bias layer in the predictor. The cosine similarity loss (ignoring the symmetry for simplicity) is shown as
|
| 483 |
+
|
| 484 |
+
$$
|
| 485 |
+
\begin{array}{l} \mathcal {L} _ {\text {c o s i n e}} = - \boldsymbol {P} _ {a} \cdot \boldsymbol {Z} _ {b} \\ = - \frac {\boldsymbol {p} _ {a}}{\| \boldsymbol {p} _ {a} \|} \cdot \frac {\boldsymbol {z} _ {b}}{\| \boldsymbol {z} _ {b} \|} \tag {9} \\ \end{array}
|
| 486 |
+
$$
|
| 487 |
+
|
| 488 |
+
The gradient on $\pmb{p}_a$ is derived as
|
| 489 |
+
|
| 490 |
+
$$
|
| 491 |
+
\begin{array}{l} - \frac {\partial \mathcal {L} _ {c o s i n e}}{\partial \boldsymbol {p} _ {a}} = \frac {\boldsymbol {z} _ {b}}{\| \boldsymbol {z} _ {b} \| \cdot \| \boldsymbol {p} _ {a} \|} - c o s s i m (\boldsymbol {Z} _ {a}, \boldsymbol {Z} _ {b}) \cdot \frac {\boldsymbol {p} _ {a}}{| | \boldsymbol {p} _ {a} | | ^ {2}} \\ = \frac {1}{\left\| \boldsymbol {p} _ {a} \right\|} \left(\frac {\boldsymbol {z} _ {b}}{\left\| \boldsymbol {z} _ {b} \right\|} - c o s s i m (\boldsymbol {Z} _ {a}, \boldsymbol {Z} _ {b}) \cdot \boldsymbol {P} _ {a}\right) \\ = \frac {1}{\left\| \boldsymbol {p} _ {a} \right\|} \left(\boldsymbol {Z} _ {b} - \operatorname {c o s s i m} \left(\boldsymbol {Z} _ {a}, \boldsymbol {Z} _ {b}\right) \cdot \frac {\boldsymbol {Z} _ {a} + \boldsymbol {b} _ {p}}{\left\| \boldsymbol {p} _ {a} \right\|}\right) \tag {10} \\ = \frac {1}{\left\| \boldsymbol {p} _ {a} \right\|} \left(\left(\boldsymbol {o} _ {z} + \boldsymbol {r} _ {b}\right) - \frac {\operatorname {c o s s i m} \left(\boldsymbol {Z} _ {a} , \boldsymbol {Z} _ {b}\right)}{\left\| \boldsymbol {p} _ {a} \right\|} \cdot \left(\boldsymbol {o} _ {z} + \boldsymbol {r} _ {a} + \boldsymbol {b} _ {p}\right)\right) \\ = \frac {1}{\left\| \boldsymbol {p} _ {a} \right\|} \left(\left(\boldsymbol {o} _ {z} + \boldsymbol {r} _ {b}\right) - m \cdot \left(\boldsymbol {o} _ {z} + \boldsymbol {r} _ {a} + \boldsymbol {b} _ {p}\right)\right) \\ = \frac {1}{\left\| \boldsymbol {p} _ {a} \right\|} \left((1 - m) \boldsymbol {o} _ {z} - m \boldsymbol {b} _ {p} + \boldsymbol {r} _ {b} - m \cdot \boldsymbol {r} _ {a}\right), \\ \end{array}
|
| 492 |
+
$$
|
| 493 |
+
|
| 494 |
+
where $m = \frac{\text{cossim}(\mathbf{Z}_a, \mathbf{Z}_b)}{\|\mathbf{p}_a\|}$ .
|
| 495 |
+
|
| 496 |
+
Given that $\pmb{p}_a = \pmb{Z}_a + \pmb{b}_p$ , the negative gradient on $\pmb{b}_p$ is the same as that on $\pmb{p}_a$ as
|
| 497 |
+
|
| 498 |
+
$$
|
| 499 |
+
\begin{array}{l} - \frac {\partial \mathcal {L} _ {\text {c o s i n e}}}{\partial \boldsymbol {b} _ {p}} = - \frac {\partial \mathcal {L} _ {\text {c o s i n e}}}{\partial \boldsymbol {p} _ {a}} \tag {11} \\ = \frac {1}{\left\| \boldsymbol {p} _ {a} \right\|} \left((1 - m) \boldsymbol {o} _ {z} - m \boldsymbol {b} _ {p} + \boldsymbol {r} _ {b} - m \cdot \boldsymbol {r} _ {a}\right). \\ \end{array}
|
| 500 |
+
$$
|
| 501 |
+
|
| 502 |
+
We assume that the training is stable and the bias layer converges to a certain value when $-\frac{\partial \text{cossim}(\mathbf{Z}_a, \mathbf{Z}_b)}{\partial \mathbf{b}_p} = 0$ . Thus, the converged $\mathbf{b}_p$ satisfies the following constraint:
|
| 503 |
+
|
| 504 |
+
$$
|
| 505 |
+
\frac {1}{\left\| \boldsymbol {p} _ {a} \right\|} \left(\left(1 - m\right) \boldsymbol {o} _ {z} - m \boldsymbol {b} _ {p} + \boldsymbol {r} _ {b} - m \boldsymbol {r} _ {a}\right)\left. \right) = 0 \tag {12}
|
| 506 |
+
$$
|
| 507 |
+
|
| 508 |
+
$$
|
| 509 |
+
\pmb {b} _ {p} = \frac {1 - m}{m} \pmb {o} _ {z} + \frac {1}{m} \pmb {r} _ {b} - \pmb {r} _ {a}.
|
| 510 |
+
$$
|
| 511 |
+
|
| 512 |
+
With a batch of samples, the average of $\frac{1}{m} \boldsymbol{r}_b$ and $\boldsymbol{r}_a$ is expected to be close to 0 by the definition of residual vector. Thus, the bias layer vector is expected to converge to:
|
| 513 |
+
|
| 514 |
+
$$
|
| 515 |
+
\boldsymbol {b} _ {p} = \frac {1 - m}{m} \boldsymbol {o} _ {z}. \tag {13}
|
| 516 |
+
$$
|
| 517 |
+
|
| 518 |
+
Rational behind the high similarity between $b_{p}$ and $o_{z}$ . The above theoretical derivation shows that the parameters in the bias layer are excepted to converge to a vector $\frac{1 - m}{m} o_{z}$ . This theoretical derivation justifies why the empirically observed cosine similarity between $b_{p}$ and $o_{z}$ is as high as 0.99. Ideally, it should be 1, however, such a small deviation is expected with the training dynamics taken into account.
|
| 519 |
+
|
| 520 |
+
Rational behind how a single bias layer prevents collapse. Given that $\pmb{p}_a = \pmb{Z}_a + \pmb{b}_p$ , the negative gradient on $Z_{a}$ is shown as
|
| 521 |
+
|
| 522 |
+
$$
|
| 523 |
+
\begin{array}{l} - \frac {\partial \mathcal {L} _ {c o s i n e}}{\partial \boldsymbol {Z} _ {a}} = - \frac {\partial \mathcal {L} _ {c o s i n e}}{\partial \boldsymbol {p} _ {a}} \\ = \frac {1}{\left\| \boldsymbol {p} _ {a} \right\|} \left(\boldsymbol {Z} _ {b} - \operatorname {c o s s i m} \left(\boldsymbol {Z} _ {a}, \boldsymbol {Z} _ {b}\right) \cdot \frac {\boldsymbol {Z} _ {a} + \boldsymbol {b} _ {p}}{\left\| \boldsymbol {p} _ {a} \right\|}\right) \tag {14} \\ = \frac {1}{\| \boldsymbol {p} _ {a} \|} \boldsymbol {Z} _ {b} - \frac {\operatorname {c o s s i m} (\boldsymbol {Z} _ {a} , \boldsymbol {Z} _ {b})}{\| \boldsymbol {p} _ {a} \| ^ {2}} \boldsymbol {Z} _ {a} - \frac {\operatorname {c o s s i m} (\boldsymbol {Z} _ {a} , \boldsymbol {Z} _ {b})}{\| \boldsymbol {p} _ {a} \| ^ {2}} \boldsymbol {b} _ {p}. \\ \end{array}
|
| 524 |
+
$$
|
| 525 |
+
|
| 526 |
+
Here, we highlight that since the loss $-\mathbf{Z}_a \cdot \mathbf{Z}_a = -1$ is a constant having zero gradients on the encoder, $-\frac{\text{cossim}(\mathbf{Z}_a, \mathbf{Z}_b)}{\|\mathbf{p}_a\|^2} \mathbf{Z}_a$ can be seen as a dummy term. Considering Eq 13 and $m = \frac{\text{cossim}(\mathbf{Z}_a, \mathbf{Z}_b)}{\|\mathbf{p}_a\|}$ ,
|
| 527 |
+
|
| 528 |
+
we have $b = \left( \frac{\|\pmb{p}_a\|}{\text{cossim}(\pmb{Z}_a, \pmb{Z}_b)} - 1 \right) \pmb{o}_z$ . The above equation is equivalent to
|
| 529 |
+
|
| 530 |
+
$$
|
| 531 |
+
\begin{array}{l} - \frac {\partial \mathcal {L} _ {c o s i n e}}{\partial \boldsymbol {Z} _ {a}} = \frac {1}{\| \boldsymbol {p} _ {a} \|} \boldsymbol {Z} _ {b} - \frac {c o s s i m (\boldsymbol {Z} _ {a} , \boldsymbol {Z} _ {b})}{\| \boldsymbol {p} _ {a} \| ^ {2}} \boldsymbol {b} _ {p} \\ = \frac {1}{\| \boldsymbol {p} _ {a} \|} \boldsymbol {Z} _ {b} - \frac {\operatorname {c o s s i m} \left(\boldsymbol {Z} _ {a} , \boldsymbol {Z} _ {b}\right)}{\| \boldsymbol {p} _ {a} \| ^ {2}} \left(\frac {\| \boldsymbol {p} _ {a} \|}{\operatorname {c o s s i m} \left(\boldsymbol {Z} _ {a} , \boldsymbol {Z} _ {b}\right)} - 1\right) \boldsymbol {o} _ {z} \tag {15} \\ = \frac {1}{\| \boldsymbol {p} _ {a} \|} \boldsymbol {Z} _ {b} - \frac {1}{\| \boldsymbol {p} _ {a} \|} (1 - \frac {\operatorname {c o s s i m} (\boldsymbol {Z} _ {a} , \boldsymbol {Z} _ {b})}{\| \boldsymbol {p} _ {a} \|}) \boldsymbol {o} _ {z} \\ \propto \mathbf {Z} _ {b} - (1 - \frac {\operatorname {c o s s i m} \left(\mathbf {Z} _ {a} , \mathbf {Z} _ {b}\right)}{\| \mathbf {p} _ {a} \|}) \mathbf {o} _ {z}. \\ \end{array}
|
| 532 |
+
$$
|
| 533 |
+
|
| 534 |
+
With $Z_{b}$ as the basic gradient, the extra gradient component $G_{e} = -(1 - \frac{\text{cossim}(Z_{a}, Z_{b})}{\|\pmb{p}_{a}\|}) \pmb{o}_{z}$ . Given that $\pmb{p}_{a} = Z_{a} + \pmb{b}_{p}$ and $\| Z_{a} \| = 1$ , thus $\| \pmb{p}_{a} \| < 1$ only when $Z_{a}$ is negatively correlated with $\pmb{b}_{p}$ . In practice, however, $Z_{a}$ and $\pmb{b}_{p}$ are often positively correlated to some extent due to their shared center vector component. In other words, $\| \pmb{p}_{a} \| > 1$ . Moreover, $\text{cossim}(Z_{a}, Z_{b})$ is smaller than 1, thus $-(1 - \frac{\text{cossim}(Z_{a}, Z_{b})}{\|\pmb{p}_{a}\|}) < 0$ , suggesting $G_{e}$ consists of negative $\pmb{o}_{z}$ with the effect of de-centerization. This above derivation justifies the rationale why a single bias layer can help alleviate collapse.
|
| 535 |
+
|
| 536 |
+
# B DISCUSSION: DOES BN HELP AVOID COLLAPSE?
|
| 537 |
+
|
| 538 |
+

|
| 539 |
+
Figure 9: BN with MSE helps prevent collapse without predictor or stop gradient. Its performance, however, is inferior to the cosine loss-based SimSiam (with predictor and stop gradient).
|
| 540 |
+
|
| 541 |
+

|
| 542 |
+
|
| 543 |
+

|
| 544 |
+
|
| 545 |
+

|
| 546 |
+
|
| 547 |
+
To our knowledge, our work is the first to revisit and refute the explanatory claims in (Chen & He, 2021). Several works, however, have attempted to demystify the success of BYOL (Grill et al., 2020), a close variant of SimSiam. The success has been ascribed to BN in (Fetterman & Albrecht, 2020), however, (Richemond et al., 2020) refutes their claim. Since the role of intermediate BNs is ascribed to stabilize training (Richemond et al., 2020; Chen & He, 2021), we only discuss the final BN in the SimSiam encoder. Note that with our Conjecture1, the final BN that removes the mean of representation vector is supposed to have de-centering effect. BY default SimSiam has such a BN at the end of its encoder, however, it still collapses with the predictor and stop gradient. Why would such a BN not prevent collapse in this case? Interestingly, we observe that such BN can help alleviate collapse with a simple MSE loss (see Fig. 9), however, its performance is inferior to the cosine loss-based SimSiam (with predictor and stop gradient) due to the lack of the de-correlation effect in SimSiam. Note that the cosine loss is in essence equivalent to a MSE loss on the $l_{2}$ -normalized vectors. This phenomenon can be interpreted as that the $l_{2}$ -normalization causes another mean after the BN removes it. Thus, with such $l_{2}$ -normalization in the MSE loss, i.e. adopting the default cosine loss, it is important to remove the $o_e$ from the optimization target. The results with the loss of $-Z_a \cdot \mathrm{sg}(Z_b + o_e)$ in Table 3 show that this indeed prevents collapse and verifies the above interpretation.
|
2203.16xxx/2203.16262/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a10e71c9cce50d7834a99adb891f563b475f193fc18ff2416dc3ae16fe449144
|
| 3 |
+
size 520539
|
2203.16xxx/2203.16262/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16263/581c38e5-2dd5-4af0-b295-8d57e76a6f20_content_list.json
ADDED
|
@@ -0,0 +1,921 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Does Audio Deepfake Detection Generalize?",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
270,
|
| 8 |
+
95,
|
| 9 |
+
727,
|
| 10 |
+
115
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Nicolas M. Müller<sup>1</sup>, Pavel Czempin<sup>2</sup>, Franziska Dieckmann<sup>2</sup>, Adam Froghyar<sup>3</sup>, Konstantin Bötttinger<sup>1</sup>",
|
| 17 |
+
"bbox": [
|
| 18 |
+
250,
|
| 19 |
+
129,
|
| 20 |
+
746,
|
| 21 |
+
162
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "$^{1}$ Fraunhofer AISEC $^{2}$ Technical University Munich $^{3}$ why do birds GmbH",
|
| 28 |
+
"bbox": [
|
| 29 |
+
189,
|
| 30 |
+
173,
|
| 31 |
+
811,
|
| 32 |
+
191
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "nicolas.mueller@aisec.fraunhofer.de",
|
| 39 |
+
"bbox": [
|
| 40 |
+
339,
|
| 41 |
+
193,
|
| 42 |
+
658,
|
| 43 |
+
205
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "Abstract",
|
| 50 |
+
"text_level": 1,
|
| 51 |
+
"bbox": [
|
| 52 |
+
245,
|
| 53 |
+
225,
|
| 54 |
+
324,
|
| 55 |
+
240
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "Current text-to-speech algorithms produce realistic fakes of human voices, making deepfake detection a much-needed area of research. While researchers have presented various deep learning models for audio spoofs detection, it is often unclear exactly why these architectures are successful: Preprocessing steps, hyperparameter settings, and the degree of fine-tuning are not consistent across related work. Which factors contribute to success, and which are accidental?",
|
| 62 |
+
"bbox": [
|
| 63 |
+
90,
|
| 64 |
+
249,
|
| 65 |
+
478,
|
| 66 |
+
348
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "In this work, we address this problem: We systematize audio spoofing detection by re-implementing and uniformly evaluating twelve architectures from related work. We identify overarching features for successful audio deepfake detection, such as using cqtspec or logspec features instead of melspec features, which improves performance by $37\\%$ EER on average, all other factors constant.",
|
| 73 |
+
"bbox": [
|
| 74 |
+
90,
|
| 75 |
+
349,
|
| 76 |
+
478,
|
| 77 |
+
434
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "text",
|
| 83 |
+
"text": "Additionally, we evaluate generalization capabilities: We collect and publish a new dataset consisting of 37.9 hours of found audio recordings of celebrities and politicians, of which 17.2 hours are deepfakes. We find that related work performs poorly on such real-world data (performance degradation of up to one thousand percent). This could suggest that the community has tailored its solutions too closely to the prevailing ASVspoof benchmark and that deepfakes are much harder to detect outside the lab than previously thought.",
|
| 84 |
+
"bbox": [
|
| 85 |
+
90,
|
| 86 |
+
436,
|
| 87 |
+
480,
|
| 88 |
+
548
|
| 89 |
+
],
|
| 90 |
+
"page_idx": 0
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"type": "text",
|
| 94 |
+
"text": "1. Introduction",
|
| 95 |
+
"text_level": 1,
|
| 96 |
+
"bbox": [
|
| 97 |
+
215,
|
| 98 |
+
561,
|
| 99 |
+
356,
|
| 100 |
+
575
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "text",
|
| 106 |
+
"text": "Modern text-to-speech synthesis (TTS) is capable of realistic fakes of human voices, also known as audio deepfakes or spoofs. While there are many ethical applications of this technology, there is also a serious risk of malicious use. For example, TTS technology enables the cloning of politicians' voices [1, 2], which poses a variety of risks to society, including the spread of misinformation.",
|
| 107 |
+
"bbox": [
|
| 108 |
+
90,
|
| 109 |
+
582,
|
| 110 |
+
478,
|
| 111 |
+
669
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "text",
|
| 117 |
+
"text": "Reliable detection of speech spoofing can help mitigate such risks and is therefore an active area of research. However, since the technology to create audio deepfakes has only been available for a few years (see Wavenet [3] and Tacotron [4], published in 2016/17), audio spoof detection is still in its infancy. While many approaches have been proposed (cf. Section 2), it is still difficult to understand why some of the models work well: Each work uses different feature extraction techniques, preprocessing steps, hyperparameter settings, and fine-tuning. Which are the main factors and drivers for models to perform well? What can be learned in principle for the development of such systems?",
|
| 118 |
+
"bbox": [
|
| 119 |
+
90,
|
| 120 |
+
670,
|
| 121 |
+
478,
|
| 122 |
+
818
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 0
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "text",
|
| 128 |
+
"text": "Furthermore, the evaluation of spoof detection models has so far been performed exclusively on the ASVspoof dataset [5, 6], which means that the reported performance of these models is based on a limited set of TTS synthesis algorithms. ASVspoof is based on the VCTK dataset [7], which exclusively",
|
| 129 |
+
"bbox": [
|
| 130 |
+
90,
|
| 131 |
+
819,
|
| 132 |
+
478,
|
| 133 |
+
881
|
| 134 |
+
],
|
| 135 |
+
"page_idx": 0
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "text",
|
| 139 |
+
"text": "features professional speakers and has been recorded in a studio environment, using a semi-anechoic chamber. What can we expect from audio spoof detection trained on this dataset? Is it capable of detecting realistic, unseen, 'in-the-wild' audio spoofs like those encountered on social media?",
|
| 140 |
+
"bbox": [
|
| 141 |
+
517,
|
| 142 |
+
227,
|
| 143 |
+
907,
|
| 144 |
+
290
|
| 145 |
+
],
|
| 146 |
+
"page_idx": 0
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "text",
|
| 150 |
+
"text": "To answer these questions, this paper presents the following contributions:",
|
| 151 |
+
"bbox": [
|
| 152 |
+
519,
|
| 153 |
+
290,
|
| 154 |
+
907,
|
| 155 |
+
313
|
| 156 |
+
],
|
| 157 |
+
"page_idx": 0
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "list",
|
| 161 |
+
"sub_type": "text",
|
| 162 |
+
"list_items": [
|
| 163 |
+
"- We reimplement twelve of the most popular architectures from related work and evaluate them according to a common standard. We systematically exchange components to attribute performance reported in related work to either model architecture, feature extraction, or data preprocessing techniques. In this way, we identify fundamental properties for well-performing audio deepfake detection.",
|
| 164 |
+
"- To investigate the applicability of related work in the real world, we introduce a new audio deepfake dataset<sup>1</sup>. We collect 17.2 hours of high-quality audio deepfakes and 20.7 hours of of authentic material from 58 politicians and celebrities.",
|
| 165 |
+
"- We show that established models generally perform poorly on such real-world data. This discrepancy between reported and actual generalization ability suggests that the detection of audio fakes is a far more difficult challenge than previously thought."
|
| 166 |
+
],
|
| 167 |
+
"bbox": [
|
| 168 |
+
547,
|
| 169 |
+
319,
|
| 170 |
+
905,
|
| 171 |
+
548
|
| 172 |
+
],
|
| 173 |
+
"page_idx": 0
|
| 174 |
+
},
|
| 175 |
+
{
|
| 176 |
+
"type": "text",
|
| 177 |
+
"text": "2. Related Work",
|
| 178 |
+
"text_level": 1,
|
| 179 |
+
"bbox": [
|
| 180 |
+
636,
|
| 181 |
+
561,
|
| 182 |
+
791,
|
| 183 |
+
575
|
| 184 |
+
],
|
| 185 |
+
"page_idx": 0
|
| 186 |
+
},
|
| 187 |
+
{
|
| 188 |
+
"type": "text",
|
| 189 |
+
"text": "2.1. Model Architectures",
|
| 190 |
+
"text_level": 1,
|
| 191 |
+
"bbox": [
|
| 192 |
+
519,
|
| 193 |
+
581,
|
| 194 |
+
690,
|
| 195 |
+
593
|
| 196 |
+
],
|
| 197 |
+
"page_idx": 0
|
| 198 |
+
},
|
| 199 |
+
{
|
| 200 |
+
"type": "text",
|
| 201 |
+
"text": "There is a significant body of work on audio spoof detection, driven largely by the ASVspoof challenges and datasets [5, 6]. In this section, we briefly present the architectures and models used in our evaluation in Section 5.",
|
| 202 |
+
"bbox": [
|
| 203 |
+
517,
|
| 204 |
+
600,
|
| 205 |
+
907,
|
| 206 |
+
649
|
| 207 |
+
],
|
| 208 |
+
"page_idx": 0
|
| 209 |
+
},
|
| 210 |
+
{
|
| 211 |
+
"type": "text",
|
| 212 |
+
"text": "LSTM-based models. Recurrent architectures are a natural choice in the area of language processing, with numerous related work utilizing such models [8, 9, 10, 11]. As a baseline for evaluating this approach, we implement a simple LSTM model: it consists of three LSTM layers followed by a single linear layer. The output is averaged over the time dimension to obtain a single embedding vector.",
|
| 213 |
+
"bbox": [
|
| 214 |
+
517,
|
| 215 |
+
650,
|
| 216 |
+
907,
|
| 217 |
+
736
|
| 218 |
+
],
|
| 219 |
+
"page_idx": 0
|
| 220 |
+
},
|
| 221 |
+
{
|
| 222 |
+
"type": "text",
|
| 223 |
+
"text": "LCNN. Another common architecture for audio spoof detection are LCNN-based learning models such as LCNN, LCNN-Attention, and LCNN-LSTM [12, 13, 14]. LCNNs combine convolutional layers with Max-Feature-Map activations to create 'light' convolutional neural networks. LCNN-Attention has an added single-head-attention pooling layer, while LCNN-LSTM uses a Bi-LSTM layer and a skip connection.",
|
| 224 |
+
"bbox": [
|
| 225 |
+
517,
|
| 226 |
+
736,
|
| 227 |
+
907,
|
| 228 |
+
834
|
| 229 |
+
],
|
| 230 |
+
"page_idx": 0
|
| 231 |
+
},
|
| 232 |
+
{
|
| 233 |
+
"type": "text",
|
| 234 |
+
"text": "MesoNet. MesoNet is based on the Meso-4 [15] architecture, which was originally used for detecting facial video",
|
| 235 |
+
"bbox": [
|
| 236 |
+
517,
|
| 237 |
+
835,
|
| 238 |
+
907,
|
| 239 |
+
860
|
| 240 |
+
],
|
| 241 |
+
"page_idx": 0
|
| 242 |
+
},
|
| 243 |
+
{
|
| 244 |
+
"type": "aside_text",
|
| 245 |
+
"text": "arXiv:2203.16263v4 [cs.SD] 27 Aug 2024",
|
| 246 |
+
"bbox": [
|
| 247 |
+
21,
|
| 248 |
+
304,
|
| 249 |
+
60,
|
| 250 |
+
722
|
| 251 |
+
],
|
| 252 |
+
"page_idx": 0
|
| 253 |
+
},
|
| 254 |
+
{
|
| 255 |
+
"type": "page_footnote",
|
| 256 |
+
"text": "$^{1}$ https://deepfake-total.com/in_the_wild",
|
| 257 |
+
"bbox": [
|
| 258 |
+
539,
|
| 259 |
+
868,
|
| 260 |
+
752,
|
| 261 |
+
879
|
| 262 |
+
],
|
| 263 |
+
"page_idx": 0
|
| 264 |
+
},
|
| 265 |
+
{
|
| 266 |
+
"type": "text",
|
| 267 |
+
"text": "deepfakes. It uses 4 convolutional layers in addition to Batch Normalization, Max Pooling, and a fully connected classifier.",
|
| 268 |
+
"bbox": [
|
| 269 |
+
90,
|
| 270 |
+
87,
|
| 271 |
+
478,
|
| 272 |
+
112
|
| 273 |
+
],
|
| 274 |
+
"page_idx": 1
|
| 275 |
+
},
|
| 276 |
+
{
|
| 277 |
+
"type": "text",
|
| 278 |
+
"text": "MesoInception. Based on the facial deepfake detector Meso-Inception-4 [15], MesoInception extends the Meso-4 architecture with Inception blocks [16].",
|
| 279 |
+
"bbox": [
|
| 280 |
+
90,
|
| 281 |
+
114,
|
| 282 |
+
478,
|
| 283 |
+
151
|
| 284 |
+
],
|
| 285 |
+
"page_idx": 1
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"type": "text",
|
| 289 |
+
"text": "ResNet18. Residual Networks were first used for audio deepfake detection by [17], and continue to be employed [18, 19]. This architecture, first introduced in the computer vision domain [20], uses convolutional layers and shortcut connections, which avoids the vanishing gradient problem and allows to design especially deep networks (18 layers for ResNet18).",
|
| 290 |
+
"bbox": [
|
| 291 |
+
90,
|
| 292 |
+
151,
|
| 293 |
+
477,
|
| 294 |
+
225
|
| 295 |
+
],
|
| 296 |
+
"page_idx": 1
|
| 297 |
+
},
|
| 298 |
+
{
|
| 299 |
+
"type": "text",
|
| 300 |
+
"text": "Transformer. The Transformer architecture has also found its way into the field of audio spoof detection [21]. We use four self-attention layers with 256 hidden dimensions and skip-connections, and encode time with positional encodings [22].",
|
| 301 |
+
"bbox": [
|
| 302 |
+
90,
|
| 303 |
+
225,
|
| 304 |
+
477,
|
| 305 |
+
275
|
| 306 |
+
],
|
| 307 |
+
"page_idx": 1
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"type": "text",
|
| 311 |
+
"text": "CRNNProof. This end-to-end architecture combines 1D convolutions with recurrent layers to learn features directly from raw audio samples [9].",
|
| 312 |
+
"bbox": [
|
| 313 |
+
90,
|
| 314 |
+
275,
|
| 315 |
+
477,
|
| 316 |
+
313
|
| 317 |
+
],
|
| 318 |
+
"page_idx": 1
|
| 319 |
+
},
|
| 320 |
+
{
|
| 321 |
+
"type": "text",
|
| 322 |
+
"text": "RawNet2 [23] is another end-to-end model. It employs Sinc-Layers [24], which correspond to rectangular band-pass filters, to extract information directly from raw waveforms.",
|
| 323 |
+
"bbox": [
|
| 324 |
+
90,
|
| 325 |
+
313,
|
| 326 |
+
477,
|
| 327 |
+
350
|
| 328 |
+
],
|
| 329 |
+
"page_idx": 1
|
| 330 |
+
},
|
| 331 |
+
{
|
| 332 |
+
"type": "text",
|
| 333 |
+
"text": "RawPC is an end-to-end model which also uses Sinc-layers to operate directly on raw wavforms. The architecture is found via differentiable architecture search [25].",
|
| 334 |
+
"bbox": [
|
| 335 |
+
90,
|
| 336 |
+
351,
|
| 337 |
+
477,
|
| 338 |
+
388
|
| 339 |
+
],
|
| 340 |
+
"page_idx": 1
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"type": "text",
|
| 344 |
+
"text": "RawGAT-ST, a spectro-temporal graph attention network (GAT), trained in an end-to-end fashion. It introduces spectral and temporal sub-graphs and a graph pooling strategy, and reports state-of-the-art spoof detection capabilities [26], which we can verify experimentally, c.f. Table 1.",
|
| 345 |
+
"bbox": [
|
| 346 |
+
90,
|
| 347 |
+
388,
|
| 348 |
+
477,
|
| 349 |
+
451
|
| 350 |
+
],
|
| 351 |
+
"page_idx": 1
|
| 352 |
+
},
|
| 353 |
+
{
|
| 354 |
+
"type": "text",
|
| 355 |
+
"text": "3. Datasets",
|
| 356 |
+
"text_level": 1,
|
| 357 |
+
"bbox": [
|
| 358 |
+
231,
|
| 359 |
+
464,
|
| 360 |
+
337,
|
| 361 |
+
479
|
| 362 |
+
],
|
| 363 |
+
"page_idx": 1
|
| 364 |
+
},
|
| 365 |
+
{
|
| 366 |
+
"type": "text",
|
| 367 |
+
"text": "To train and evaluate our models, we use the ASVspoof 2019 dataset [5], in particular its Logical Access (LA) part. It consists of audio files that are either real (i.e., authentic recordings of human speech) or fake (i.e., synthesized or faked audio). The spoofed audio files are from 19 different TTS synthesis algorithms. From a spoofing detection point of view, ASVspoof considers synthetic utterances as a threat to the authenticity of the human voice, and therefore labels them as 'attacks'. In total, there are 19 different attackers in the ASVspoof 2019 dataset, labeled A1 - A19. For each attacker, there are 4914 synthetic audio recordings and 7355 real samples. This dataset is arguably the best known audio deefake dataset used by almost all related work.",
|
| 368 |
+
"bbox": [
|
| 369 |
+
90,
|
| 370 |
+
485,
|
| 371 |
+
478,
|
| 372 |
+
646
|
| 373 |
+
],
|
| 374 |
+
"page_idx": 1
|
| 375 |
+
},
|
| 376 |
+
{
|
| 377 |
+
"type": "text",
|
| 378 |
+
"text": "In order to evaluate our models on realistic unseen data in-the-wild, we additionally create and publish a new audio deefake dataset, c.f. Figure 1. It consists of 37.9 hours of audio clips that are either fake (17.2 hours) or real (20.7 hours). We feature English-speaking celebrities and politicians, both from present and past<sup>2</sup>. The fake clips are created by segmenting 219 of publicly available video and audio files that explicitly advertise audio deepfakes. Since the speakers talk absurdly and out-of-character ('Donald Trump reads Star Wars'), it is easy to verify that the audio files are really spoofed. We then manually collect corresponding genuine instances from the same speakers using publicly available material such as podcasts, speeches, etc. We take care to include clips where the type of speaker, style, emotions, etc. are similar to the fake (e.g., for a fake speech by Barack Obama, we include an authentic speech and try to find similar values for background noise, emotions, duration, etc.). The clips have an average length of 4.3 seconds and",
|
| 379 |
+
"bbox": [
|
| 380 |
+
90,
|
| 381 |
+
646,
|
| 382 |
+
478,
|
| 383 |
+
858
|
| 384 |
+
],
|
| 385 |
+
"page_idx": 1
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"type": "image",
|
| 389 |
+
"img_path": "images/9f995660207673496a7c8c4bbf2bc076a41eb222935de538ea31efa68c1b7b7c.jpg",
|
| 390 |
+
"image_caption": [
|
| 391 |
+
"Biden, Joe"
|
| 392 |
+
],
|
| 393 |
+
"image_footnote": [],
|
| 394 |
+
"bbox": [
|
| 395 |
+
539,
|
| 396 |
+
84,
|
| 397 |
+
625,
|
| 398 |
+
149
|
| 399 |
+
],
|
| 400 |
+
"page_idx": 1
|
| 401 |
+
},
|
| 402 |
+
{
|
| 403 |
+
"type": "image",
|
| 404 |
+
"img_path": "images/1865b7015ad4b15dacd7435a06f9a962a0e01ce9ca32cdf063fb41bb920e6b77.jpg",
|
| 405 |
+
"image_caption": [
|
| 406 |
+
"Clinton, Bill"
|
| 407 |
+
],
|
| 408 |
+
"image_footnote": [],
|
| 409 |
+
"bbox": [
|
| 410 |
+
638,
|
| 411 |
+
85,
|
| 412 |
+
721,
|
| 413 |
+
149
|
| 414 |
+
],
|
| 415 |
+
"page_idx": 1
|
| 416 |
+
},
|
| 417 |
+
{
|
| 418 |
+
"type": "image",
|
| 419 |
+
"img_path": "images/154cb328579a2a73ef205e3bca559a8788a58f2c9eda79e1cac217adf36ca447.jpg",
|
| 420 |
+
"image_caption": [],
|
| 421 |
+
"image_footnote": [],
|
| 422 |
+
"bbox": [
|
| 423 |
+
742,
|
| 424 |
+
123,
|
| 425 |
+
776,
|
| 426 |
+
130
|
| 427 |
+
],
|
| 428 |
+
"page_idx": 1
|
| 429 |
+
},
|
| 430 |
+
{
|
| 431 |
+
"type": "image",
|
| 432 |
+
"img_path": "images/915bd11c04f11358a5671bbbd89d131abe55f63866aa86f68ac2de6623cd1cda.jpg",
|
| 433 |
+
"image_caption": [
|
| 434 |
+
"Zuckerberg, Mark",
|
| 435 |
+
"Figure 1: Schematics of our collected dataset. For $n = 58$ celebrities and politicians, we collected both bona-fide and spoofed audio (represented by blue and red boxes per speaker). In total, we collected 20.8 hours of bona-fide and 17.2 hours of spoofed audio. On average, there are 23 minutes of bona-fide and 18 minutes of spoofed audio per speaker."
|
| 436 |
+
],
|
| 437 |
+
"image_footnote": [],
|
| 438 |
+
"bbox": [
|
| 439 |
+
805,
|
| 440 |
+
85,
|
| 441 |
+
890,
|
| 442 |
+
149
|
| 443 |
+
],
|
| 444 |
+
"page_idx": 1
|
| 445 |
+
},
|
| 446 |
+
{
|
| 447 |
+
"type": "text",
|
| 448 |
+
"text": "are converted to 'wav' after downloading. All recordings were downsampled to $16\\mathrm{kHz}$ (the highest common frequency in the original recordings). Clips were collected from publicly available sources such as social networks and popular video sharing platforms. This dataset is intended as evaluation data: it allows evaluation of a model's cross-database capabilities on a realistic use case.",
|
| 449 |
+
"bbox": [
|
| 450 |
+
517,
|
| 451 |
+
275,
|
| 452 |
+
907,
|
| 453 |
+
363
|
| 454 |
+
],
|
| 455 |
+
"page_idx": 1
|
| 456 |
+
},
|
| 457 |
+
{
|
| 458 |
+
"type": "text",
|
| 459 |
+
"text": "4. Experimental Setup",
|
| 460 |
+
"text_level": 1,
|
| 461 |
+
"bbox": [
|
| 462 |
+
611,
|
| 463 |
+
376,
|
| 464 |
+
815,
|
| 465 |
+
393
|
| 466 |
+
],
|
| 467 |
+
"page_idx": 1
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "text",
|
| 471 |
+
"text": "4.1. Training and Evaluation",
|
| 472 |
+
"text_level": 1,
|
| 473 |
+
"bbox": [
|
| 474 |
+
519,
|
| 475 |
+
398,
|
| 476 |
+
714,
|
| 477 |
+
412
|
| 478 |
+
],
|
| 479 |
+
"page_idx": 1
|
| 480 |
+
},
|
| 481 |
+
{
|
| 482 |
+
"type": "text",
|
| 483 |
+
"text": "4.1.1. Hyper Parameters",
|
| 484 |
+
"text_level": 1,
|
| 485 |
+
"bbox": [
|
| 486 |
+
519,
|
| 487 |
+
418,
|
| 488 |
+
678,
|
| 489 |
+
432
|
| 490 |
+
],
|
| 491 |
+
"page_idx": 1
|
| 492 |
+
},
|
| 493 |
+
{
|
| 494 |
+
"type": "text",
|
| 495 |
+
"text": "We train all of our models using a cross-entropy loss with a log-Softmax over the output logits. We choose the Adam [27] optimizer. We initialize the learning rate at 0.0001 and use a learning rate scheduler. We train for 100 epochs with early stopping using a patience of five epochs.",
|
| 496 |
+
"bbox": [
|
| 497 |
+
517,
|
| 498 |
+
437,
|
| 499 |
+
907,
|
| 500 |
+
500
|
| 501 |
+
],
|
| 502 |
+
"page_idx": 1
|
| 503 |
+
},
|
| 504 |
+
{
|
| 505 |
+
"type": "text",
|
| 506 |
+
"text": "4.1.2. Train and Evaluation Data Splits",
|
| 507 |
+
"text_level": 1,
|
| 508 |
+
"bbox": [
|
| 509 |
+
519,
|
| 510 |
+
512,
|
| 511 |
+
768,
|
| 512 |
+
526
|
| 513 |
+
],
|
| 514 |
+
"page_idx": 1
|
| 515 |
+
},
|
| 516 |
+
{
|
| 517 |
+
"type": "text",
|
| 518 |
+
"text": "We train our models on the 'train' and 'dev' parts of the ASVspoof 2019 Logical Access (LA) dataset part [5]. This is consistent with most related work and also with the evaluation procedure of the ASVspoof 2019 Challenge. We test against two evaluation datasets. As in-domain evaluation data, we use the 'eval' split of ASVspoof 2019. This split contains unseen attacks, i.e., attacks not seen during training. However, the evaluation audios share certain properties with the training data [28], so model generalization cannot be assessed using the 'eval' split of ASVspoof 2019 alone. This motivates the use of our proposed 'in-the-wild' dataset, see Section 3, as unknown out-of-domain evaluation data.",
|
| 519 |
+
"bbox": [
|
| 520 |
+
517,
|
| 521 |
+
531,
|
| 522 |
+
907,
|
| 523 |
+
680
|
| 524 |
+
],
|
| 525 |
+
"page_idx": 1
|
| 526 |
+
},
|
| 527 |
+
{
|
| 528 |
+
"type": "text",
|
| 529 |
+
"text": "4.1.3. Evaluation metrics",
|
| 530 |
+
"text_level": 1,
|
| 531 |
+
"bbox": [
|
| 532 |
+
519,
|
| 533 |
+
693,
|
| 534 |
+
682,
|
| 535 |
+
705
|
| 536 |
+
],
|
| 537 |
+
"page_idx": 1
|
| 538 |
+
},
|
| 539 |
+
{
|
| 540 |
+
"type": "text",
|
| 541 |
+
"text": "We report both the equal-error rate (EER) and the tandem detection cost function (t-DCF) [29] on the ASVspoof 2019 'eval' data. For consistency with the related work, we use the original implementation of the t-DCF as provided for the ASVspoof 2019 challenge [30]. For our proposed dataset, we report only the EER. This is because t-DCF scores require the false alarm and miss costs, which are available only for ASVspoof.",
|
| 542 |
+
"bbox": [
|
| 543 |
+
517,
|
| 544 |
+
712,
|
| 545 |
+
907,
|
| 546 |
+
799
|
| 547 |
+
],
|
| 548 |
+
"page_idx": 1
|
| 549 |
+
},
|
| 550 |
+
{
|
| 551 |
+
"type": "text",
|
| 552 |
+
"text": "4.2. Feature Extraction",
|
| 553 |
+
"text_level": 1,
|
| 554 |
+
"bbox": [
|
| 555 |
+
519,
|
| 556 |
+
812,
|
| 557 |
+
678,
|
| 558 |
+
824
|
| 559 |
+
],
|
| 560 |
+
"page_idx": 1
|
| 561 |
+
},
|
| 562 |
+
{
|
| 563 |
+
"type": "text",
|
| 564 |
+
"text": "Several architectures used in this work require preprocessing the audio data with a feature extractor (LCNN, LCNN-Attention, LCNN-LSTM, LSTM, MesoNet, MesoInception, ResNet18, Transformer). We evalu",
|
| 565 |
+
"bbox": [
|
| 566 |
+
517,
|
| 567 |
+
831,
|
| 568 |
+
907,
|
| 569 |
+
881
|
| 570 |
+
],
|
| 571 |
+
"page_idx": 1
|
| 572 |
+
},
|
| 573 |
+
{
|
| 574 |
+
"type": "page_footnote",
|
| 575 |
+
"text": "${}^{2}$ records available at deepfake-total.com/in_the_wild",
|
| 576 |
+
"bbox": [
|
| 577 |
+
109,
|
| 578 |
+
868,
|
| 579 |
+
394,
|
| 580 |
+
881
|
| 581 |
+
],
|
| 582 |
+
"page_idx": 1
|
| 583 |
+
},
|
| 584 |
+
{
|
| 585 |
+
"type": "table",
|
| 586 |
+
"img_path": "images/f50685de2e22b9f391e06b131dbc06fe719ccfae394ba11548ef244902e737db.jpg",
|
| 587 |
+
"table_caption": [],
|
| 588 |
+
"table_footnote": [],
|
| 589 |
+
"table_body": "<table><tr><td rowspan=\"2\">Model Name</td><td rowspan=\"2\">Feature Type</td><td rowspan=\"2\">Input Length</td><td colspan=\"2\">ASVspoof19 eval</td><td rowspan=\"2\">In-the-Wild Data EER%</td></tr><tr><td>EER%</td><td>t-DCF</td></tr><tr><td>LCNN</td><td>cqtspec</td><td>Full</td><td>6.354±0.39</td><td>0.174±0.03</td><td>65.559±11.14</td></tr><tr><td>LCNN</td><td>cqtspec</td><td>4s</td><td>25.534±0.10</td><td>0.512±0.00</td><td>70.015±4.74</td></tr><tr><td>LCNN</td><td>logspec</td><td>Full</td><td>7.537±0.42</td><td>0.141±0.02</td><td>72.515±2.15</td></tr><tr><td>LCNN</td><td>logspec</td><td>4s</td><td>22.271±2.36</td><td>0.377±0.01</td><td>91.110±2.17</td></tr><tr><td>LCNN</td><td>melspec</td><td>Full</td><td>15.093±2.73</td><td>0.428±0.05</td><td>70.311±2.15</td></tr><tr><td>LCNN</td><td>melspec</td><td>4s</td><td>30.258±3.38</td><td>0.503±0.04</td><td>81.942±3.50</td></tr><tr><td>LCNN-Attention</td><td>cqtspec</td><td>Full</td><td>6.762±0.27</td><td>0.178±0.01</td><td>66.684±1.08</td></tr><tr><td>LCNN-Attention</td><td>cqtspec</td><td>4s</td><td>23.228±3.98</td><td>0.468±0.06</td><td>75.317±8.25</td></tr><tr><td>LCNN-Attention</td><td>logspec</td><td>Full</td><td>7.888±0.57</td><td>0.180±0.05</td><td>77.122±4.91</td></tr><tr><td>LCNN-Attention</td><td>logspec</td><td>4s</td><td>14.958±2.37</td><td>0.354±0.03</td><td>80.651±6.14</td></tr><tr><td>LCNN-Attention</td><td>melspec</td><td>Full</td><td>13.487±5.59</td><td>0.374±0.14</td><td>70.986±9.73</td></tr><tr><td>LCNN-Attention</td><td>melspec</td><td>4s</td><td>19.534±2.57</td><td>0.449±0.02</td><td>85.118±1.01</td></tr><tr><td>LCNN-LSTM</td><td>cqtspec</td><td>Full</td><td>6.228±0.50</td><td>0.113±0.01</td><td>61.500±1.37</td></tr><tr><td>LCNN-LSTM</td><td>cqtspec</td><td>4s</td><td>20.857±0.14</td><td>0.478±0.01</td><td>72.251±2.97</td></tr><tr><td>LCNN-LSTM</td><td>logspec</td><td>Full</td><td>9.936±1.74</td><td>0.158±0.01</td><td>79.109±0.84</td></tr><tr><td>LCNN-LSTM</td><td>logspec</td><td>4s</td><td>13.018±3.08</td><td>0.330±0.05</td><td>79.706±15.80</td></tr><tr><td>LCNN-LSTM</td><td>melspec</td><td>Full</td><td>9.260±1.33</td><td>0.240±0.04</td><td>62.304±0.17</td></tr><tr><td>LCNN-LSTM</td><td>melspec</td><td>4s</td><td>27.948±4.64</td><td>0.483±0.03</td><td>82.857±3.49</td></tr><tr><td>LSTM</td><td>cqtspec</td><td>Full</td><td>7.162±0.27</td><td>0.127±0.00</td><td>53.711±11.68</td></tr><tr><td>LSTM</td><td>cqtspec</td><td>4s</td><td>14.409±2.19</td><td>0.382±0.05</td><td>55.880±0.88</td></tr><tr><td>LSTM</td><td>logspec</td><td>Full</td><td>10.314±0.81</td><td>0.160±0.00</td><td>73.111±2.52</td></tr><tr><td>LSTM</td><td>logspec</td><td>4s</td><td>23.232±0.32</td><td>0.512±0.00</td><td>78.071±0.49</td></tr><tr><td>LSTM</td><td>melspec</td><td>Full</td><td>16.216±2.92</td><td>0.358±0.00</td><td>65.957±7.70</td></tr><tr><td>LSTM</td><td>melspec</td><td>4s</td><td>37.463±0.46</td><td>0.553±0.01</td><td>64.297±2.23</td></tr><tr><td>MesoInception</td><td>cqtspec</td><td>Full</td><td>11.353±1.00</td><td>0.326±0.03</td><td>50.007±14.69</td></tr><tr><td>MesoInception</td><td>cqtspec</td><td>4s</td><td>21.973±4.96</td><td>0.453±0.09</td><td>68.192±12.47</td></tr><tr><td>MesoInception</td><td>logspec</td><td>Full</td><td>10.019±0.18</td><td>0.238±0.02</td><td>37.414±9.16</td></tr><tr><td>MesoInception</td><td>logspec</td><td>4s</td><td>16.377±3.72</td><td>0.375±0.09</td><td>72.753±6.62</td></tr><tr><td>MesoInception</td><td>melspec</td><td>Full</td><td>14.058±5.67</td><td>0.331±0.11</td><td>61.996±12.65</td></tr><tr><td>MesoInception</td><td>melspec</td><td>4s</td><td>21.484±3.51</td><td>0.408±0.03</td><td>51.980±15.32</td></tr><tr><td>MesoNet</td><td>cqtspec</td><td>Full</td><td>7.422±1.61</td><td>0.219±0.07</td><td>54.544±11.50</td></tr><tr><td>MesoNet</td><td>cqtspec</td><td>4s</td><td>20.395±2.03</td><td>0.426±0.06</td><td>65.928±2.57</td></tr><tr><td>MesoNet</td><td>logspec</td><td>Full</td><td>8.369±1.06</td><td>0.170±0.05</td><td>46.939±5.81</td></tr><tr><td>MesoNet</td><td>logspec</td><td>4s</td><td>11.124±0.79</td><td>0.263±0.03</td><td>80.707±12.03</td></tr><tr><td>MesoNet</td><td>melspec</td><td>Full</td><td>11.305±1.80</td><td>0.321±0.06</td><td>58.405±11.28</td></tr><tr><td>MesoNet</td><td>melspec</td><td>4s</td><td>21.761±0.26</td><td>0.467±0.00</td><td>64.415±15.68</td></tr><tr><td>ResNet18</td><td>cqtspec</td><td>Full</td><td>6.552±0.49</td><td>0.140±0.01</td><td>49.759±0.17</td></tr><tr><td>ResNet18</td><td>cqtspec</td><td>4s</td><td>18.378±1.76</td><td>0.432±0.07</td><td>61.827±7.46</td></tr><tr><td>ResNet18</td><td>logspec</td><td>Full</td><td>7.386±0.42</td><td>0.139±0.02</td><td>80.212±0.23</td></tr><tr><td>ResNet18</td><td>logspec</td><td>4s</td><td>15.521±1.83</td><td>0.387±0.02</td><td>88.729±2.88</td></tr><tr><td>ResNet18</td><td>melspec</td><td>Full</td><td>21.658±2.56</td><td>0.551±0.04</td><td>77.614±1.47</td></tr><tr><td>ResNet18</td><td>melspec</td><td>4s</td><td>28.178±0.33</td><td>0.489±0.01</td><td>83.006±7.17</td></tr><tr><td>Transformer</td><td>cqtspec</td><td>Full</td><td>7.498±0.34</td><td>0.129±0.01</td><td>43.775±2.85</td></tr><tr><td>Transformer</td><td>cqtspec</td><td>4s</td><td>11.256±0.07</td><td>0.329±0.00</td><td>48.208±1.49</td></tr><tr><td>Transformer</td><td>logspec</td><td>Full</td><td>9.949±1.77</td><td>0.210±0.06</td><td>64.789±0.88</td></tr><tr><td>Transformer</td><td>logspec</td><td>4s</td><td>13.935±1.70</td><td>0.320±0.03</td><td>44.406±2.17</td></tr><tr><td>Transformer</td><td>melspec</td><td>Full</td><td>20.813±6.44</td><td>0.394±0.10</td><td>73.307±2.81</td></tr><tr><td>Transformer</td><td>melspec</td><td>4s</td><td>26.495±1.76</td><td>0.495±0.00</td><td>68.407±5.53</td></tr><tr><td>CRNNSpoof</td><td>raw</td><td>Full</td><td>15.658±0.35</td><td>0.312±0.01</td><td>44.500±8.13</td></tr><tr><td>CRNNSpoof</td><td>raw</td><td>4s</td><td>19.640±1.62</td><td>0.360±0.04</td><td>41.710±4.86</td></tr><tr><td>RawNet2</td><td>raw</td><td>Full</td><td>3.154±0.87</td><td>0.078±0.02</td><td>37.819±2.23</td></tr><tr><td>RawNet2</td><td>raw</td><td>4s</td><td>4.351±0.29</td><td>0.132±0.01</td><td>33.943±2.59</td></tr><tr><td>RawPC</td><td>raw</td><td>Full</td><td>3.092±0.36</td><td>0.071±0.00</td><td>45.715±12.20</td></tr><tr><td>RawPC</td><td>raw</td><td>4s</td><td>3.067±0.91</td><td>0.097±0.03</td><td>52.884±6.08</td></tr><tr><td>RawGAT-ST</td><td>raw</td><td>Full</td><td>1.229±0.43</td><td>0.036±0.01</td><td>37.154±1.95</td></tr><tr><td>RawGAT-ST</td><td>raw</td><td>4s</td><td>2.297±0.98</td><td>0.074±0.03</td><td>38.767±1.28</td></tr></table>",
|
| 590 |
+
"bbox": [
|
| 591 |
+
201,
|
| 592 |
+
86,
|
| 593 |
+
798,
|
| 594 |
+
790
|
| 595 |
+
],
|
| 596 |
+
"page_idx": 2
|
| 597 |
+
},
|
| 598 |
+
{
|
| 599 |
+
"type": "text",
|
| 600 |
+
"text": "Table 1: Full results of evaluation on the ASVspoof 2019 LA 'eval' data. We compare different model architectures against different feature types and audio input lengths (4s, fixed-sized inputs vs. variable-length inputs). Results are averaged over three independent trials with random initialization, and the standard deviation is reported. Best-performing configurations are highlighted in boldface. When evaluating the models on our proposed 'in-the-wild' dataset, we see an increase in EER by up to $1000\\%$ compared to ASVspoof 2019 (rightmost column).",
|
| 601 |
+
"bbox": [
|
| 602 |
+
92,
|
| 603 |
+
804,
|
| 604 |
+
907,
|
| 605 |
+
866
|
| 606 |
+
],
|
| 607 |
+
"page_idx": 2
|
| 608 |
+
},
|
| 609 |
+
{
|
| 610 |
+
"type": "table",
|
| 611 |
+
"img_path": "images/d9a6bf01f98356925b872addbba3b2776e70f6a6a1226effabd5f9b4b8dc9a98.jpg",
|
| 612 |
+
"table_caption": [],
|
| 613 |
+
"table_footnote": [],
|
| 614 |
+
"table_body": "<table><tr><td rowspan=\"2\">Input Length</td><td colspan=\"2\">ASVspoof19 eval</td><td>In-the-Wild Data</td></tr><tr><td>EER %</td><td>t-DCF</td><td>EER %</td></tr><tr><td>Full</td><td>9.85</td><td>0.22</td><td>60.10</td></tr><tr><td>4s</td><td>18.89</td><td>0.39</td><td>67.25</td></tr></table>",
|
| 615 |
+
"bbox": [
|
| 616 |
+
109,
|
| 617 |
+
86,
|
| 618 |
+
460,
|
| 619 |
+
152
|
| 620 |
+
],
|
| 621 |
+
"page_idx": 3
|
| 622 |
+
},
|
| 623 |
+
{
|
| 624 |
+
"type": "text",
|
| 625 |
+
"text": "Table 2: Model performance averaged by input preprocessing. Fixed-length, 4s inputs perform significantly worse on the ASVspoof data and on the 'in-the-wild' dataset than variable-length inputs. This suggests that related work using fixed-length inputs may (unnecessarily) sacrifice performance.",
|
| 626 |
+
"bbox": [
|
| 627 |
+
90,
|
| 628 |
+
168,
|
| 629 |
+
480,
|
| 630 |
+
231
|
| 631 |
+
],
|
| 632 |
+
"page_idx": 3
|
| 633 |
+
},
|
| 634 |
+
{
|
| 635 |
+
"type": "text",
|
| 636 |
+
"text": "ate these architectures on constant-Q transform (cqtspec [31]), log spectrogram (logspec) and mel-scaled spectrogram (mel-spec [32]) features (all of them 513-dimensional). We use Python, librosa [33] and scipy [34]. The rest of the models does not rely on pre-processed data, but uses raw audio waveforms as inputs.",
|
| 637 |
+
"bbox": [
|
| 638 |
+
90,
|
| 639 |
+
269,
|
| 640 |
+
480,
|
| 641 |
+
347
|
| 642 |
+
],
|
| 643 |
+
"page_idx": 3
|
| 644 |
+
},
|
| 645 |
+
{
|
| 646 |
+
"type": "text",
|
| 647 |
+
"text": "4.3. Audio Input Length",
|
| 648 |
+
"text_level": 1,
|
| 649 |
+
"bbox": [
|
| 650 |
+
90,
|
| 651 |
+
357,
|
| 652 |
+
257,
|
| 653 |
+
372
|
| 654 |
+
],
|
| 655 |
+
"page_idx": 3
|
| 656 |
+
},
|
| 657 |
+
{
|
| 658 |
+
"type": "text",
|
| 659 |
+
"text": "Audio samples usually vary in length, which is also the case for the data in ASVspoof 2019 and our proposed 'in-the-wild' dataset. While some models can accommodate variable-length input (and thus also fixed-length input), many can not. We extend these by introducing a global averaging layer, which adds such capability.",
|
| 660 |
+
"bbox": [
|
| 661 |
+
90,
|
| 662 |
+
376,
|
| 663 |
+
478,
|
| 664 |
+
451
|
| 665 |
+
],
|
| 666 |
+
"page_idx": 3
|
| 667 |
+
},
|
| 668 |
+
{
|
| 669 |
+
"type": "text",
|
| 670 |
+
"text": "In our evaluation of fixed-length input, we chose a length of four seconds, following [23]. If an input sample is longer, a random four-second subset of the sample is used. If it is shorter, the sample is repeated. To keep the evaluation fair, these shorter samples are also repeated during the full-length evaluation. This ensures that full-length input is never shorter than truncated input, but always at least 4s.",
|
| 671 |
+
"bbox": [
|
| 672 |
+
90,
|
| 673 |
+
453,
|
| 674 |
+
480,
|
| 675 |
+
541
|
| 676 |
+
],
|
| 677 |
+
"page_idx": 3
|
| 678 |
+
},
|
| 679 |
+
{
|
| 680 |
+
"type": "text",
|
| 681 |
+
"text": "5. Results",
|
| 682 |
+
"text_level": 1,
|
| 683 |
+
"bbox": [
|
| 684 |
+
236,
|
| 685 |
+
554,
|
| 686 |
+
332,
|
| 687 |
+
569
|
| 688 |
+
],
|
| 689 |
+
"page_idx": 3
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"type": "text",
|
| 693 |
+
"text": "Table 1 shows the results of our experiments, where we evaluate all models against all configurations of data preprocessing: we train twelve different models, using one of four different feature types, with two different ways of handling variable-length audio. Each experiment is performed three times, using random initialization. We report averaged EER and t-DCF, as well as standard deviation. We observe that on ASVspoof, our implementations perform comparable to related work, with a margin of approximately $2 - 4\\%$ EER and 0.1 t-DCF. This is likely because we do not fine-tune our models' hyper-parameters.",
|
| 694 |
+
"bbox": [
|
| 695 |
+
90,
|
| 696 |
+
575,
|
| 697 |
+
480,
|
| 698 |
+
702
|
| 699 |
+
],
|
| 700 |
+
"page_idx": 3
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "text",
|
| 704 |
+
"text": "5.1. Fixed vs. Variable Input Length",
|
| 705 |
+
"text_level": 1,
|
| 706 |
+
"bbox": [
|
| 707 |
+
90,
|
| 708 |
+
712,
|
| 709 |
+
334,
|
| 710 |
+
726
|
| 711 |
+
],
|
| 712 |
+
"page_idx": 3
|
| 713 |
+
},
|
| 714 |
+
{
|
| 715 |
+
"type": "text",
|
| 716 |
+
"text": "We analyze the effects of truncating the input signal to a fixed length compared to using the full, unabridged audio. For all models, performance decreases when the input is trimmed to $4s$ . Table 2 averages all results based on input length. We see that average EER on ASVspoof drops from $19.89\\%$ to $9.85\\%$ when the full-length input is used. These results show that a four-second clip is insufficient for the model to extract useful information compared to using the full audio file as input. Therefore, we propose not to use fixed-length truncated inputs, but to provide the full audio file to the model. This may seem obvious, but the numerous works that use fixed-length inputs [23, 25, 26] suggest otherwise.",
|
| 717 |
+
"bbox": [
|
| 718 |
+
90,
|
| 719 |
+
732,
|
| 720 |
+
480,
|
| 721 |
+
882
|
| 722 |
+
],
|
| 723 |
+
"page_idx": 3
|
| 724 |
+
},
|
| 725 |
+
{
|
| 726 |
+
"type": "text",
|
| 727 |
+
"text": "5.2. Effects of Feature Extraction Techniques",
|
| 728 |
+
"text_level": 1,
|
| 729 |
+
"bbox": [
|
| 730 |
+
519,
|
| 731 |
+
87,
|
| 732 |
+
821,
|
| 733 |
+
102
|
| 734 |
+
],
|
| 735 |
+
"page_idx": 3
|
| 736 |
+
},
|
| 737 |
+
{
|
| 738 |
+
"type": "text",
|
| 739 |
+
"text": "We discuss the effects of different feature preprocessing techniques, c.f. 1: The 'raw' models outperform the feature-based models, obtaining up to $1.2\\%$ EER on ASVspoof and $33.9\\%$ EER on the 'in-the-wild' dataset (RawGAT-ST and RawNet2). The spectrogram-based models perform slightly worse, achieving up to $6.3\\%$ EER on ASVspoof and $37.4\\%$ on the 'in-the-wild' dataset (LCNN and MesoNet). The superiority of the 'raw' models is assumed to be due to finer feature-extraction resolution than the spectrogram-based models [26]. This has lead recent research to focus largely on such raw-feature, end-to-end models [25, 26].",
|
| 740 |
+
"bbox": [
|
| 741 |
+
517,
|
| 742 |
+
107,
|
| 743 |
+
907,
|
| 744 |
+
243
|
| 745 |
+
],
|
| 746 |
+
"page_idx": 3
|
| 747 |
+
},
|
| 748 |
+
{
|
| 749 |
+
"type": "text",
|
| 750 |
+
"text": "Concerning the spectogram-based models, we observe that melspec features are always outperformed by either cqtspec of logspec. Simply replacing melspec with cqtspec increases the average performance by $37\\%$ , all other factors constant.",
|
| 751 |
+
"bbox": [
|
| 752 |
+
517,
|
| 753 |
+
244,
|
| 754 |
+
907,
|
| 755 |
+
294
|
| 756 |
+
],
|
| 757 |
+
"page_idx": 3
|
| 758 |
+
},
|
| 759 |
+
{
|
| 760 |
+
"type": "text",
|
| 761 |
+
"text": "5.3. Evaluation on 'in-the-wild' data",
|
| 762 |
+
"text_level": 1,
|
| 763 |
+
"bbox": [
|
| 764 |
+
519,
|
| 765 |
+
306,
|
| 766 |
+
764,
|
| 767 |
+
319
|
| 768 |
+
],
|
| 769 |
+
"page_idx": 3
|
| 770 |
+
},
|
| 771 |
+
{
|
| 772 |
+
"type": "text",
|
| 773 |
+
"text": "Especially interesting is the performance of the models on real-world deepfake data. Table 1 shows the performance of our models on the 'in-the-wild' dataset. We see that there is a large performance gap between the ASVSproof 2019 evaluation data and our proposed 'in-the-wild' dataset. In general, the EER values of the models deteriorate by about 200 to 1000 percent. Often, the models do not perform better than random guessing.",
|
| 774 |
+
"bbox": [
|
| 775 |
+
517,
|
| 776 |
+
325,
|
| 777 |
+
907,
|
| 778 |
+
412
|
| 779 |
+
],
|
| 780 |
+
"page_idx": 3
|
| 781 |
+
},
|
| 782 |
+
{
|
| 783 |
+
"type": "text",
|
| 784 |
+
"text": "To investigate this further, we train our best 'in-the-wild' model from Table 1, RawNet2 with 4s input length, on all from ASVspoof 2019, i.e., the 'train', 'dev', and 'eval' splits. We then re-evaluate on the 'in-the-wild' dataset to investigate whether adding more ASVspoof training data improves out-of-domain performance. We achieve $33.1 \\pm 0.2\\%$ EER, i.e., no improvement over training with only the 'train' and 'dev' data.",
|
| 785 |
+
"bbox": [
|
| 786 |
+
517,
|
| 787 |
+
412,
|
| 788 |
+
907,
|
| 789 |
+
499
|
| 790 |
+
],
|
| 791 |
+
"page_idx": 3
|
| 792 |
+
},
|
| 793 |
+
{
|
| 794 |
+
"type": "text",
|
| 795 |
+
"text": "The inclusion of the 'eval' split does not seem to add much information that could be used for real-world generalization. This is plausible in that all splits of ASVspoof are fundamentally based on the same dataset, VCTK, although the synthesis algorithms and speakers differ between splits [5].",
|
| 796 |
+
"bbox": [
|
| 797 |
+
517,
|
| 798 |
+
499,
|
| 799 |
+
907,
|
| 800 |
+
562
|
| 801 |
+
],
|
| 802 |
+
"page_idx": 3
|
| 803 |
+
},
|
| 804 |
+
{
|
| 805 |
+
"type": "text",
|
| 806 |
+
"text": "6. Conclusion",
|
| 807 |
+
"text_level": 1,
|
| 808 |
+
"bbox": [
|
| 809 |
+
648,
|
| 810 |
+
574,
|
| 811 |
+
779,
|
| 812 |
+
589
|
| 813 |
+
],
|
| 814 |
+
"page_idx": 3
|
| 815 |
+
},
|
| 816 |
+
{
|
| 817 |
+
"type": "text",
|
| 818 |
+
"text": "In this paper, we systematically evaluate audio spoof detection models from related work according to common standards. In addition, we present a new audio deefake dataset of 'in-the-wild' audio spools that we use to evaluate the generalization capabilities of related work in a real-world scenario.",
|
| 819 |
+
"bbox": [
|
| 820 |
+
517,
|
| 821 |
+
595,
|
| 822 |
+
907,
|
| 823 |
+
657
|
| 824 |
+
],
|
| 825 |
+
"page_idx": 3
|
| 826 |
+
},
|
| 827 |
+
{
|
| 828 |
+
"type": "text",
|
| 829 |
+
"text": "We find that regardless of the model architecture, some preprocessing steps are more successful than others. It turns out that the use of cqtspec or logspec features consistently outperforms the use of melspec features in our comprehensive analysis. Furthermore, we find that for most models, four seconds of input audio does not saturate performance compared to longer examples. Therefore, we argue that one should consider using cqtspec features and unabridged input audio when designing audio deepfake detection architectures.",
|
| 830 |
+
"bbox": [
|
| 831 |
+
517,
|
| 832 |
+
658,
|
| 833 |
+
907,
|
| 834 |
+
769
|
| 835 |
+
],
|
| 836 |
+
"page_idx": 3
|
| 837 |
+
},
|
| 838 |
+
{
|
| 839 |
+
"type": "text",
|
| 840 |
+
"text": "Most importantly, however, we find that the 'in-the-wild' generalization capabilities of many models may have been overestimated. We demonstrate this by collecting our own audio deepfake dataset and evaluating twelve different model architectures on it. Performance drops sharply, and some models degenerate to random guessing. It may be possible that the community has tailored its detection models too closely to the prevailing benchmark, ASVSpoof, and that deepfakes are much harder to detect outside the lab than previously thought.",
|
| 841 |
+
"bbox": [
|
| 842 |
+
517,
|
| 843 |
+
769,
|
| 844 |
+
907,
|
| 845 |
+
882
|
| 846 |
+
],
|
| 847 |
+
"page_idx": 3
|
| 848 |
+
},
|
| 849 |
+
{
|
| 850 |
+
"type": "text",
|
| 851 |
+
"text": "7. References",
|
| 852 |
+
"text_level": 1,
|
| 853 |
+
"bbox": [
|
| 854 |
+
221,
|
| 855 |
+
86,
|
| 856 |
+
347,
|
| 857 |
+
101
|
| 858 |
+
],
|
| 859 |
+
"page_idx": 4
|
| 860 |
+
},
|
| 861 |
+
{
|
| 862 |
+
"type": "list",
|
| 863 |
+
"sub_type": "ref_text",
|
| 864 |
+
"list_items": [
|
| 865 |
+
"[1] \"Audio deep fake: Demonstrator企业发展 am braunhofer aisec - youtube,\" https://www.youtube.com/watch?v=MZTF0eAALmE, (Accessed on 04/01/2021).",
|
| 866 |
+
"[2] \"Deepfake video of volodymyr zelensky surrendering surfaces on social media - youtube,\" https://www.youtube.com/watch?v=X17yrEV5sl4, (Accessed on 03/23/2022).",
|
| 867 |
+
"[3] A. v. d. Oord, S. Dieleman, H. Zen, K. Simonyan, O. Vinyals, A. Graves, N. Kalchbrenner, A. Senior, and K. Kavukcuoglu, \"Wavenet: A generative model for raw audio,\" arXiv preprint arXiv:1609.03499, 2016.",
|
| 868 |
+
"[4] Y. Wang, R. J. Skerry-Ryan, D. Stanton, Y. Wu, R. J. Weiss, N. Jaitly, Z. Yang, Y. Xiao, Z. Chen, S. Bengio, Q. V. Le, Y. Agiomyrgiannakis, R. Clark, and R. A. Saurous, \"Tacotron: A fully end-to-end text-to-speech synthesis model,\" CoRR, vol. abs/1703.10135, 2017. [Online]. Available: http://arxiv.org/abs/1703.10135",
|
| 869 |
+
"[5] M. Todisco, X. Wang, V. Vestman, M. Sahidullah, H. Delgado, A. Nautsch, J. Yamagishi, N. Evans, T. Kinnunen, and K. A. Lee, \"Asvspoof 2019: Future horizons in spoofed and fake audio detection,\" arXiv preprint arXiv:1904.05441, 2019.",
|
| 870 |
+
"[6] A. Nautsch, X. Wang, N. Evans, T. H. Kinnunen, V. Vestman, M. Todisco, H. Delgado, M. Sahidullah, J. Yamagishi, and K. A. Lee, \"ASVspoof 2019: Spoofing Countermeasures for the Detection of Synthesized, Converted and Replayed Speech,\" vol. 3, no. 2, pp. 252-265.",
|
| 871 |
+
"[7] J. Yamagishi, C. Veaux, and K. MacDonald, \"CSTR VCTK Corpus: English multi-speaker corpus for CSTR voice cloning toolkit (version 0.92),\" 2019.",
|
| 872 |
+
"[8] A. Gomez-Alanis, A. M. Peinado, J. A. Gonzalez, and A. M. Gomez, “A Gated Recurrent Convolutional Neural Network for Robust Spoofing Detection,” vol. 27, no. 12, pp. 1985–1999.",
|
| 873 |
+
"[9] A. Chintha, B. Thai, S. J. Sohrawardi, K. M. Bhatt, A. Hickerson, M. Wright, and R. Ptucha, \"Recurrent Convolutional Structures for Audio Spoof and Video Deepfake Detection,\" pp. 1-1.",
|
| 874 |
+
"[10] L. Zhang, X. Wang, E. Cooper, J. Yamagishi, J. Patino, and N. Evans, \"An initial investigation for detecting partially spoofed audio,\" arXiv preprint arXiv:2104.02518, 2021.",
|
| 875 |
+
"[11] S. Tambe, A. Pawar, and S. Yadav, “Deep fake videos identification using ann and lstm,” Journal of Discrete Mathematical Sciences and Cryptography, vol. 24, no. 8, pp. 2353–2364, 2021.",
|
| 876 |
+
"[12] X. Wang and J. Yamagishi. A Comparative Study on Recent Neural Spoofing Countermeasures for Synthetic Speech Detection. [Online]. Available: http://arxiv.org/abs/2103.11326",
|
| 877 |
+
"[13] G. Lavrentyeva, S. Novoselov, E. Malykh, A. Kozlov, O. Kudashev, and V. Shchemelinin, \"Audio replay attack detection with deep learning frameworks,\" in Interspeech 2017. ISCA, pp. 82-86. [Online]. Available: http://www.isca-speech.org/archive/Interspeech_2017/abstracts/0360.html",
|
| 878 |
+
"[14] G. Lavrentyeva, S. Novoselov, A. Tseren, M. Volkova, A. Gorlanov, and A. Kozlov, \"STC antispoofing systems for the ASVspoof2019 challenge,\" in Interspeech 2019. ISCA, pp. 1033-1037. [Online]. Available: http://www.isca-speech.org/archive/Interspeech_2019/abstracts/1768.html",
|
| 879 |
+
"[15] D. Afchar, V. Nozick, J. Yamagishi, and I. Echizen, \"MesoNet: A Compact Facial Video Forgery Detection Network,\" in 2018 IEEE International Workshop on Information Forensics and Security (WIFS), pp. 1-7.",
|
| 880 |
+
"[16] C. Szegedy, W. Liu, Y. Jia, P. Sermanet, S. Reed, D. Anguelov, D. Erhan, V. Vanhoucke, and A. Rabinovich, \"Going deeper with convolutions,\" in 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015, pp. 1-9.",
|
| 881 |
+
"[17] M. Alzantot, Z. Wang, and M. B. Srivastava, “Deep Residual Neural Networks for Audio Spoofing Detection,” in Interspeech 2019. ISCA, pp. 1078–1082. [Online]. Available: http://www.isca-speech.org/archive/Interspeech_2019/abstracts/3174.html"
|
| 882 |
+
],
|
| 883 |
+
"bbox": [
|
| 884 |
+
94,
|
| 885 |
+
107,
|
| 886 |
+
478,
|
| 887 |
+
879
|
| 888 |
+
],
|
| 889 |
+
"page_idx": 4
|
| 890 |
+
},
|
| 891 |
+
{
|
| 892 |
+
"type": "list",
|
| 893 |
+
"sub_type": "ref_text",
|
| 894 |
+
"list_items": [
|
| 895 |
+
"[18] Y. Zhang, F. Jiang, and Z. Duan, “One-class learning towards synthetic voice spoofing detection,” IEEE Signal Processing Letters, vol. 28, pp. 937–941, 2021.",
|
| 896 |
+
"[19] J. Monteiro, J. Alam, and T. H. Falk, \"Generalized end-to-end detection of spoofing attacks to automatic speaker recognizers,\" Computer Speech & Language, vol. 63, p. 101096, 2020.",
|
| 897 |
+
"[20] K. He, X. Zhang, S. Ren, and J. Sun, \"Deep residual learning for image recognition,\" in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 770-778.",
|
| 898 |
+
"[21] Z. Zhang, X. Yi, and X. Zhao, \"Fake speech detection using residual network with transformer encoder,\" in Proceedings of the 2021 ACM Workshop on Information Hiding and Multimedia Security, 2021, pp. 13-22.",
|
| 899 |
+
"[22] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polosukhin, \"Attention is all you need,\" Advances in neural information processing systems, vol. 30, 2017.",
|
| 900 |
+
"[23] H. Tak, J. Patino, M. Todisco, A. Nautsch, N. Evans, and A. Larcher, “End-to-End anti-spoofing with RawNet2,” in ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6369-6373.",
|
| 901 |
+
"[24] M. Ravanelli and Y. Bengio, \"Speaker recognition from raw waveform with sincnet,\" in 2018 IEEE Spoken Language Technology Workshop (SLT). IEEE, 2018, pp. 1021-1028.",
|
| 902 |
+
"[25] W. Ge, J. Patino, M. Todisco, and N. Evans, \"Raw differentiable architecture search for speech deepfake and spoofing detection,\" arXiv preprint arXiv:2107.12212, 2021.",
|
| 903 |
+
"[26] H. Tak, J.-w. Jung, J. Patino, M. Kamble, M. Todisco, and N. Evans, \"End-to-end spectro-temporal graph attention networks for speaker verification anti-spoofing and speech deepfake detection,\" arXiv preprint arXiv:2107.12710, 2021.",
|
| 904 |
+
"[27] D. P. Kingma and J. Ba, \"Adam: A method for stochastic optimization,\" in 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, Y. Bengio and Y. LeCun, Eds., 2015. [Online]. Available: http://arxiv.org/abs/1412.6980",
|
| 905 |
+
"[28] N. M. Müller, F. Dieckmann, P. Czempin, R. Canals, J. Williams, and K. Böttinger. Speech is Silver, Silence is Golden: What do ASVspoof-trained Models Really Learn? [Online]. Available: http://arxiv.org/abs/2106.12914",
|
| 906 |
+
"[29] T. Kinnunen, K. A. Lee, H. Delgado, N. Evans, M. Todisco, M. Sahidullah, J. Yamagishi, and D. A. Reynolds, \"t-DCF: a detection cost function for the tandem assessment of spoofing countermeasures and automatic speaker verification,\" in Odyssey 2018 The Speaker and Language Recognition Workshop. ISCA, pp. 312-319.",
|
| 907 |
+
"[30] \"tdcf official implementation,\" https://www.asvspoof.org/ asvsproof2019/tDCF.python_v1.zip, (Accessed on 03/03/2022).",
|
| 908 |
+
"[31] J. C. Brown, “Calculation of a constant q spectral transform,” The Journal of the Acoustical Society of America, vol. 89, no. 1, pp. 425–434, 1991.",
|
| 909 |
+
"[32] S. S. Stevens, J. Volkmann, and E. B. Newman, “A scale for the measurement of the psychological magnitude pitch,” The journal of the acoustical society of america, vol. 8, no. 3, pp. 185–190, 1937.",
|
| 910 |
+
"[33] B. McFee, C. Raffel, D. Liang, D. P. Ellis, M. McVicar, E. Battenberg, and O. Nieto, \"librosa: Audio and music signal analysis in python,\" in Proceedings of the 14th python in science conference, vol. 8. Citeseer, 2015, pp. 18-25.",
|
| 911 |
+
"[34] P. Virtanen, R. Gommers, T. E. Oliphant, M. Haberland, T. Reddy, D. Cournapeau, E. Burovski, P. Peterson, W. Weckesser, J. Bright et al., \"Scipy 1.0: fundamental algorithms for scientific computing in python,\" Nature methods, vol. 17, no. 3, pp. 261-272, 2020."
|
| 912 |
+
],
|
| 913 |
+
"bbox": [
|
| 914 |
+
522,
|
| 915 |
+
90,
|
| 916 |
+
907,
|
| 917 |
+
829
|
| 918 |
+
],
|
| 919 |
+
"page_idx": 4
|
| 920 |
+
}
|
| 921 |
+
]
|
2203.16xxx/2203.16263/581c38e5-2dd5-4af0-b295-8d57e76a6f20_model.json
ADDED
|
@@ -0,0 +1,1299 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "aside_text",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.023,
|
| 7 |
+
0.305,
|
| 8 |
+
0.061,
|
| 9 |
+
0.723
|
| 10 |
+
],
|
| 11 |
+
"angle": 270,
|
| 12 |
+
"content": "arXiv:2203.16263v4 [cs.SD] 27 Aug 2024"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "title",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.272,
|
| 18 |
+
0.096,
|
| 19 |
+
0.728,
|
| 20 |
+
0.116
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "Does Audio Deepfake Detection Generalize?"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.252,
|
| 29 |
+
0.13,
|
| 30 |
+
0.747,
|
| 31 |
+
0.163
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "Nicolas M. Müller<sup>1</sup>, Pavel Czempin<sup>2</sup>, Franziska Dieckmann<sup>2</sup>, Adam Froghyar<sup>3</sup>, Konstantin Bötttinger<sup>1</sup>"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.19,
|
| 40 |
+
0.174,
|
| 41 |
+
0.812,
|
| 42 |
+
0.192
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "\\(^{1}\\)Fraunhofer AISEC \\(^{2}\\)Technical University Munich \\(^{3}\\)why do birds GmbH"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.34,
|
| 51 |
+
0.194,
|
| 52 |
+
0.66,
|
| 53 |
+
0.206
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "nicolas.mueller@aisec.fraunhofer.de"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "title",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.246,
|
| 62 |
+
0.226,
|
| 63 |
+
0.326,
|
| 64 |
+
0.241
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "Abstract"
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.091,
|
| 73 |
+
0.25,
|
| 74 |
+
0.48,
|
| 75 |
+
0.349
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "Current text-to-speech algorithms produce realistic fakes of human voices, making deepfake detection a much-needed area of research. While researchers have presented various deep learning models for audio spoofs detection, it is often unclear exactly why these architectures are successful: Preprocessing steps, hyperparameter settings, and the degree of fine-tuning are not consistent across related work. Which factors contribute to success, and which are accidental?"
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.091,
|
| 84 |
+
0.35,
|
| 85 |
+
0.48,
|
| 86 |
+
0.435
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "In this work, we address this problem: We systematize audio spoofing detection by re-implementing and uniformly evaluating twelve architectures from related work. We identify overarching features for successful audio deepfake detection, such as using cqtspec or logspec features instead of melspec features, which improves performance by \\(37\\%\\) EER on average, all other factors constant."
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.091,
|
| 95 |
+
0.437,
|
| 96 |
+
0.481,
|
| 97 |
+
0.549
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "Additionally, we evaluate generalization capabilities: We collect and publish a new dataset consisting of 37.9 hours of found audio recordings of celebrities and politicians, of which 17.2 hours are deepfakes. We find that related work performs poorly on such real-world data (performance degradation of up to one thousand percent). This could suggest that the community has tailored its solutions too closely to the prevailing ASVspoof benchmark and that deepfakes are much harder to detect outside the lab than previously thought."
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "title",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.216,
|
| 106 |
+
0.562,
|
| 107 |
+
0.357,
|
| 108 |
+
0.576
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "1. Introduction"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.091,
|
| 117 |
+
0.583,
|
| 118 |
+
0.48,
|
| 119 |
+
0.67
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "Modern text-to-speech synthesis (TTS) is capable of realistic fakes of human voices, also known as audio deepfakes or spoofs. While there are many ethical applications of this technology, there is also a serious risk of malicious use. For example, TTS technology enables the cloning of politicians' voices [1, 2], which poses a variety of risks to society, including the spread of misinformation."
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.091,
|
| 128 |
+
0.671,
|
| 129 |
+
0.48,
|
| 130 |
+
0.819
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "Reliable detection of speech spoofing can help mitigate such risks and is therefore an active area of research. However, since the technology to create audio deepfakes has only been available for a few years (see Wavenet [3] and Tacotron [4], published in 2016/17), audio spoof detection is still in its infancy. While many approaches have been proposed (cf. Section 2), it is still difficult to understand why some of the models work well: Each work uses different feature extraction techniques, preprocessing steps, hyperparameter settings, and fine-tuning. Which are the main factors and drivers for models to perform well? What can be learned in principle for the development of such systems?"
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.091,
|
| 139 |
+
0.82,
|
| 140 |
+
0.48,
|
| 141 |
+
0.882
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "Furthermore, the evaluation of spoof detection models has so far been performed exclusively on the ASVspoof dataset [5, 6], which means that the reported performance of these models is based on a limited set of TTS synthesis algorithms. ASVspoof is based on the VCTK dataset [7], which exclusively"
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"bbox": [
|
| 149 |
+
0.519,
|
| 150 |
+
0.228,
|
| 151 |
+
0.908,
|
| 152 |
+
0.291
|
| 153 |
+
],
|
| 154 |
+
"angle": 0,
|
| 155 |
+
"content": "features professional speakers and has been recorded in a studio environment, using a semi-anechoic chamber. What can we expect from audio spoof detection trained on this dataset? Is it capable of detecting realistic, unseen, 'in-the-wild' audio spoofs like those encountered on social media?"
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "text",
|
| 159 |
+
"bbox": [
|
| 160 |
+
0.52,
|
| 161 |
+
0.291,
|
| 162 |
+
0.908,
|
| 163 |
+
0.315
|
| 164 |
+
],
|
| 165 |
+
"angle": 0,
|
| 166 |
+
"content": "To answer these questions, this paper presents the following contributions:"
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"type": "text",
|
| 170 |
+
"bbox": [
|
| 171 |
+
0.548,
|
| 172 |
+
0.321,
|
| 173 |
+
0.907,
|
| 174 |
+
0.419
|
| 175 |
+
],
|
| 176 |
+
"angle": 0,
|
| 177 |
+
"content": "- We reimplement twelve of the most popular architectures from related work and evaluate them according to a common standard. We systematically exchange components to attribute performance reported in related work to either model architecture, feature extraction, or data preprocessing techniques. In this way, we identify fundamental properties for well-performing audio deepfake detection."
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"type": "text",
|
| 181 |
+
"bbox": [
|
| 182 |
+
0.548,
|
| 183 |
+
0.422,
|
| 184 |
+
0.907,
|
| 185 |
+
0.483
|
| 186 |
+
],
|
| 187 |
+
"angle": 0,
|
| 188 |
+
"content": "- To investigate the applicability of related work in the real world, we introduce a new audio deepfake dataset<sup>1</sup>. We collect 17.2 hours of high-quality audio deepfakes and 20.7 hours of of authentic material from 58 politicians and celebrities."
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"type": "text",
|
| 192 |
+
"bbox": [
|
| 193 |
+
0.548,
|
| 194 |
+
0.486,
|
| 195 |
+
0.907,
|
| 196 |
+
0.549
|
| 197 |
+
],
|
| 198 |
+
"angle": 0,
|
| 199 |
+
"content": "- We show that established models generally perform poorly on such real-world data. This discrepancy between reported and actual generalization ability suggests that the detection of audio fakes is a far more difficult challenge than previously thought."
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"type": "list",
|
| 203 |
+
"bbox": [
|
| 204 |
+
0.548,
|
| 205 |
+
0.321,
|
| 206 |
+
0.907,
|
| 207 |
+
0.549
|
| 208 |
+
],
|
| 209 |
+
"angle": 0,
|
| 210 |
+
"content": null
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"type": "title",
|
| 214 |
+
"bbox": [
|
| 215 |
+
0.638,
|
| 216 |
+
0.562,
|
| 217 |
+
0.792,
|
| 218 |
+
0.576
|
| 219 |
+
],
|
| 220 |
+
"angle": 0,
|
| 221 |
+
"content": "2. Related Work"
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"type": "title",
|
| 225 |
+
"bbox": [
|
| 226 |
+
0.52,
|
| 227 |
+
0.582,
|
| 228 |
+
0.691,
|
| 229 |
+
0.594
|
| 230 |
+
],
|
| 231 |
+
"angle": 0,
|
| 232 |
+
"content": "2.1. Model Architectures"
|
| 233 |
+
},
|
| 234 |
+
{
|
| 235 |
+
"type": "text",
|
| 236 |
+
"bbox": [
|
| 237 |
+
0.519,
|
| 238 |
+
0.601,
|
| 239 |
+
0.908,
|
| 240 |
+
0.65
|
| 241 |
+
],
|
| 242 |
+
"angle": 0,
|
| 243 |
+
"content": "There is a significant body of work on audio spoof detection, driven largely by the ASVspoof challenges and datasets [5, 6]. In this section, we briefly present the architectures and models used in our evaluation in Section 5."
|
| 244 |
+
},
|
| 245 |
+
{
|
| 246 |
+
"type": "text",
|
| 247 |
+
"bbox": [
|
| 248 |
+
0.519,
|
| 249 |
+
0.651,
|
| 250 |
+
0.908,
|
| 251 |
+
0.737
|
| 252 |
+
],
|
| 253 |
+
"angle": 0,
|
| 254 |
+
"content": "LSTM-based models. Recurrent architectures are a natural choice in the area of language processing, with numerous related work utilizing such models [8, 9, 10, 11]. As a baseline for evaluating this approach, we implement a simple LSTM model: it consists of three LSTM layers followed by a single linear layer. The output is averaged over the time dimension to obtain a single embedding vector."
|
| 255 |
+
},
|
| 256 |
+
{
|
| 257 |
+
"type": "text",
|
| 258 |
+
"bbox": [
|
| 259 |
+
0.519,
|
| 260 |
+
0.737,
|
| 261 |
+
0.908,
|
| 262 |
+
0.835
|
| 263 |
+
],
|
| 264 |
+
"angle": 0,
|
| 265 |
+
"content": "LCNN. Another common architecture for audio spoof detection are LCNN-based learning models such as LCNN, LCNN-Attention, and LCNN-LSTM [12, 13, 14]. LCNNs combine convolutional layers with Max-Feature-Map activations to create 'light' convolutional neural networks. LCNN-Attention has an added single-head-attention pooling layer, while LCNN-LSTM uses a Bi-LSTM layer and a skip connection."
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"type": "text",
|
| 269 |
+
"bbox": [
|
| 270 |
+
0.519,
|
| 271 |
+
0.836,
|
| 272 |
+
0.908,
|
| 273 |
+
0.862
|
| 274 |
+
],
|
| 275 |
+
"angle": 0,
|
| 276 |
+
"content": "MesoNet. MesoNet is based on the Meso-4 [15] architecture, which was originally used for detecting facial video"
|
| 277 |
+
},
|
| 278 |
+
{
|
| 279 |
+
"type": "page_footnote",
|
| 280 |
+
"bbox": [
|
| 281 |
+
0.54,
|
| 282 |
+
0.869,
|
| 283 |
+
0.754,
|
| 284 |
+
0.881
|
| 285 |
+
],
|
| 286 |
+
"angle": 0,
|
| 287 |
+
"content": "\\(^{1}\\)https://deepfake-total.com/in_the_wild"
|
| 288 |
+
}
|
| 289 |
+
],
|
| 290 |
+
[
|
| 291 |
+
{
|
| 292 |
+
"type": "text",
|
| 293 |
+
"bbox": [
|
| 294 |
+
0.091,
|
| 295 |
+
0.089,
|
| 296 |
+
0.479,
|
| 297 |
+
0.114
|
| 298 |
+
],
|
| 299 |
+
"angle": 0,
|
| 300 |
+
"content": "deepfakes. It uses 4 convolutional layers in addition to Batch Normalization, Max Pooling, and a fully connected classifier."
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "text",
|
| 304 |
+
"bbox": [
|
| 305 |
+
0.092,
|
| 306 |
+
0.115,
|
| 307 |
+
0.479,
|
| 308 |
+
0.152
|
| 309 |
+
],
|
| 310 |
+
"angle": 0,
|
| 311 |
+
"content": "MesoInception. Based on the facial deepfake detector Meso-Inception-4 [15], MesoInception extends the Meso-4 architecture with Inception blocks [16]."
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"bbox": [
|
| 316 |
+
0.092,
|
| 317 |
+
0.152,
|
| 318 |
+
0.478,
|
| 319 |
+
0.227
|
| 320 |
+
],
|
| 321 |
+
"angle": 0,
|
| 322 |
+
"content": "ResNet18. Residual Networks were first used for audio deepfake detection by [17], and continue to be employed [18, 19]. This architecture, first introduced in the computer vision domain [20], uses convolutional layers and shortcut connections, which avoids the vanishing gradient problem and allows to design especially deep networks (18 layers for ResNet18)."
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "text",
|
| 326 |
+
"bbox": [
|
| 327 |
+
0.092,
|
| 328 |
+
0.227,
|
| 329 |
+
0.478,
|
| 330 |
+
0.277
|
| 331 |
+
],
|
| 332 |
+
"angle": 0,
|
| 333 |
+
"content": "Transformer. The Transformer architecture has also found its way into the field of audio spoof detection [21]. We use four self-attention layers with 256 hidden dimensions and skip-connections, and encode time with positional encodings [22]."
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "text",
|
| 337 |
+
"bbox": [
|
| 338 |
+
0.092,
|
| 339 |
+
0.277,
|
| 340 |
+
0.478,
|
| 341 |
+
0.314
|
| 342 |
+
],
|
| 343 |
+
"angle": 0,
|
| 344 |
+
"content": "CRNNProof. This end-to-end architecture combines 1D convolutions with recurrent layers to learn features directly from raw audio samples [9]."
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"type": "text",
|
| 348 |
+
"bbox": [
|
| 349 |
+
0.092,
|
| 350 |
+
0.315,
|
| 351 |
+
0.478,
|
| 352 |
+
0.351
|
| 353 |
+
],
|
| 354 |
+
"angle": 0,
|
| 355 |
+
"content": "RawNet2 [23] is another end-to-end model. It employs Sinc-Layers [24], which correspond to rectangular band-pass filters, to extract information directly from raw waveforms."
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"type": "text",
|
| 359 |
+
"bbox": [
|
| 360 |
+
0.092,
|
| 361 |
+
0.352,
|
| 362 |
+
0.478,
|
| 363 |
+
0.389
|
| 364 |
+
],
|
| 365 |
+
"angle": 0,
|
| 366 |
+
"content": "RawPC is an end-to-end model which also uses Sinc-layers to operate directly on raw wavforms. The architecture is found via differentiable architecture search [25]."
|
| 367 |
+
},
|
| 368 |
+
{
|
| 369 |
+
"type": "text",
|
| 370 |
+
"bbox": [
|
| 371 |
+
0.092,
|
| 372 |
+
0.39,
|
| 373 |
+
0.478,
|
| 374 |
+
0.452
|
| 375 |
+
],
|
| 376 |
+
"angle": 0,
|
| 377 |
+
"content": "RawGAT-ST, a spectro-temporal graph attention network (GAT), trained in an end-to-end fashion. It introduces spectral and temporal sub-graphs and a graph pooling strategy, and reports state-of-the-art spoof detection capabilities [26], which we can verify experimentally, c.f. Table 1."
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"type": "title",
|
| 381 |
+
"bbox": [
|
| 382 |
+
0.233,
|
| 383 |
+
0.466,
|
| 384 |
+
0.338,
|
| 385 |
+
0.48
|
| 386 |
+
],
|
| 387 |
+
"angle": 0,
|
| 388 |
+
"content": "3. Datasets"
|
| 389 |
+
},
|
| 390 |
+
{
|
| 391 |
+
"type": "text",
|
| 392 |
+
"bbox": [
|
| 393 |
+
0.091,
|
| 394 |
+
0.486,
|
| 395 |
+
0.479,
|
| 396 |
+
0.647
|
| 397 |
+
],
|
| 398 |
+
"angle": 0,
|
| 399 |
+
"content": "To train and evaluate our models, we use the ASVspoof 2019 dataset [5], in particular its Logical Access (LA) part. It consists of audio files that are either real (i.e., authentic recordings of human speech) or fake (i.e., synthesized or faked audio). The spoofed audio files are from 19 different TTS synthesis algorithms. From a spoofing detection point of view, ASVspoof considers synthetic utterances as a threat to the authenticity of the human voice, and therefore labels them as 'attacks'. In total, there are 19 different attackers in the ASVspoof 2019 dataset, labeled A1 - A19. For each attacker, there are 4914 synthetic audio recordings and 7355 real samples. This dataset is arguably the best known audio deefake dataset used by almost all related work."
|
| 400 |
+
},
|
| 401 |
+
{
|
| 402 |
+
"type": "text",
|
| 403 |
+
"bbox": [
|
| 404 |
+
0.091,
|
| 405 |
+
0.648,
|
| 406 |
+
0.48,
|
| 407 |
+
0.859
|
| 408 |
+
],
|
| 409 |
+
"angle": 0,
|
| 410 |
+
"content": "In order to evaluate our models on realistic unseen data in-the-wild, we additionally create and publish a new audio deefake dataset, c.f. Figure 1. It consists of 37.9 hours of audio clips that are either fake (17.2 hours) or real (20.7 hours). We feature English-speaking celebrities and politicians, both from present and past<sup>2</sup>. The fake clips are created by segmenting 219 of publicly available video and audio files that explicitly advertise audio deepfakes. Since the speakers talk absurdly and out-of-character ('Donald Trump reads Star Wars'), it is easy to verify that the audio files are really spoofed. We then manually collect corresponding genuine instances from the same speakers using publicly available material such as podcasts, speeches, etc. We take care to include clips where the type of speaker, style, emotions, etc. are similar to the fake (e.g., for a fake speech by Barack Obama, we include an authentic speech and try to find similar values for background noise, emotions, duration, etc.). The clips have an average length of 4.3 seconds and"
|
| 411 |
+
},
|
| 412 |
+
{
|
| 413 |
+
"type": "image",
|
| 414 |
+
"bbox": [
|
| 415 |
+
0.541,
|
| 416 |
+
0.085,
|
| 417 |
+
0.626,
|
| 418 |
+
0.151
|
| 419 |
+
],
|
| 420 |
+
"angle": 0,
|
| 421 |
+
"content": null
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"type": "image_caption",
|
| 425 |
+
"bbox": [
|
| 426 |
+
0.552,
|
| 427 |
+
0.152,
|
| 428 |
+
0.612,
|
| 429 |
+
0.162
|
| 430 |
+
],
|
| 431 |
+
"angle": 0,
|
| 432 |
+
"content": "Biden, Joe"
|
| 433 |
+
},
|
| 434 |
+
{
|
| 435 |
+
"type": "image",
|
| 436 |
+
"bbox": [
|
| 437 |
+
0.639,
|
| 438 |
+
0.086,
|
| 439 |
+
0.722,
|
| 440 |
+
0.151
|
| 441 |
+
],
|
| 442 |
+
"angle": 0,
|
| 443 |
+
"content": null
|
| 444 |
+
},
|
| 445 |
+
{
|
| 446 |
+
"type": "image_caption",
|
| 447 |
+
"bbox": [
|
| 448 |
+
0.649,
|
| 449 |
+
0.152,
|
| 450 |
+
0.712,
|
| 451 |
+
0.162
|
| 452 |
+
],
|
| 453 |
+
"angle": 0,
|
| 454 |
+
"content": "Clinton, Bill"
|
| 455 |
+
},
|
| 456 |
+
{
|
| 457 |
+
"type": "image",
|
| 458 |
+
"bbox": [
|
| 459 |
+
0.743,
|
| 460 |
+
0.124,
|
| 461 |
+
0.777,
|
| 462 |
+
0.131
|
| 463 |
+
],
|
| 464 |
+
"angle": 0,
|
| 465 |
+
"content": null
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"type": "image",
|
| 469 |
+
"bbox": [
|
| 470 |
+
0.806,
|
| 471 |
+
0.086,
|
| 472 |
+
0.892,
|
| 473 |
+
0.151
|
| 474 |
+
],
|
| 475 |
+
"angle": 0,
|
| 476 |
+
"content": null
|
| 477 |
+
},
|
| 478 |
+
{
|
| 479 |
+
"type": "image_caption",
|
| 480 |
+
"bbox": [
|
| 481 |
+
0.8,
|
| 482 |
+
0.152,
|
| 483 |
+
0.895,
|
| 484 |
+
0.162
|
| 485 |
+
],
|
| 486 |
+
"angle": 0,
|
| 487 |
+
"content": "Zuckerberg, Mark"
|
| 488 |
+
},
|
| 489 |
+
{
|
| 490 |
+
"type": "image_caption",
|
| 491 |
+
"bbox": [
|
| 492 |
+
0.519,
|
| 493 |
+
0.175,
|
| 494 |
+
0.908,
|
| 495 |
+
0.25
|
| 496 |
+
],
|
| 497 |
+
"angle": 0,
|
| 498 |
+
"content": "Figure 1: Schematics of our collected dataset. For \\( n = 58 \\) celebrities and politicians, we collected both bona-fide and spoofed audio (represented by blue and red boxes per speaker). In total, we collected 20.8 hours of bona-fide and 17.2 hours of spoofed audio. On average, there are 23 minutes of bona-fide and 18 minutes of spoofed audio per speaker."
|
| 499 |
+
},
|
| 500 |
+
{
|
| 501 |
+
"type": "text",
|
| 502 |
+
"bbox": [
|
| 503 |
+
0.519,
|
| 504 |
+
0.277,
|
| 505 |
+
0.908,
|
| 506 |
+
0.364
|
| 507 |
+
],
|
| 508 |
+
"angle": 0,
|
| 509 |
+
"content": "are converted to 'wav' after downloading. All recordings were downsampled to \\(16\\mathrm{kHz}\\) (the highest common frequency in the original recordings). Clips were collected from publicly available sources such as social networks and popular video sharing platforms. This dataset is intended as evaluation data: it allows evaluation of a model's cross-database capabilities on a realistic use case."
|
| 510 |
+
},
|
| 511 |
+
{
|
| 512 |
+
"type": "title",
|
| 513 |
+
"bbox": [
|
| 514 |
+
0.612,
|
| 515 |
+
0.378,
|
| 516 |
+
0.816,
|
| 517 |
+
0.394
|
| 518 |
+
],
|
| 519 |
+
"angle": 0,
|
| 520 |
+
"content": "4. Experimental Setup"
|
| 521 |
+
},
|
| 522 |
+
{
|
| 523 |
+
"type": "title",
|
| 524 |
+
"bbox": [
|
| 525 |
+
0.52,
|
| 526 |
+
0.399,
|
| 527 |
+
0.715,
|
| 528 |
+
0.413
|
| 529 |
+
],
|
| 530 |
+
"angle": 0,
|
| 531 |
+
"content": "4.1. Training and Evaluation"
|
| 532 |
+
},
|
| 533 |
+
{
|
| 534 |
+
"type": "title",
|
| 535 |
+
"bbox": [
|
| 536 |
+
0.52,
|
| 537 |
+
0.419,
|
| 538 |
+
0.68,
|
| 539 |
+
0.433
|
| 540 |
+
],
|
| 541 |
+
"angle": 0,
|
| 542 |
+
"content": "4.1.1. Hyper Parameters"
|
| 543 |
+
},
|
| 544 |
+
{
|
| 545 |
+
"type": "text",
|
| 546 |
+
"bbox": [
|
| 547 |
+
0.519,
|
| 548 |
+
0.438,
|
| 549 |
+
0.908,
|
| 550 |
+
0.501
|
| 551 |
+
],
|
| 552 |
+
"angle": 0,
|
| 553 |
+
"content": "We train all of our models using a cross-entropy loss with a log-Softmax over the output logits. We choose the Adam [27] optimizer. We initialize the learning rate at 0.0001 and use a learning rate scheduler. We train for 100 epochs with early stopping using a patience of five epochs."
|
| 554 |
+
},
|
| 555 |
+
{
|
| 556 |
+
"type": "title",
|
| 557 |
+
"bbox": [
|
| 558 |
+
0.52,
|
| 559 |
+
0.513,
|
| 560 |
+
0.769,
|
| 561 |
+
0.527
|
| 562 |
+
],
|
| 563 |
+
"angle": 0,
|
| 564 |
+
"content": "4.1.2. Train and Evaluation Data Splits"
|
| 565 |
+
},
|
| 566 |
+
{
|
| 567 |
+
"type": "text",
|
| 568 |
+
"bbox": [
|
| 569 |
+
0.519,
|
| 570 |
+
0.532,
|
| 571 |
+
0.909,
|
| 572 |
+
0.681
|
| 573 |
+
],
|
| 574 |
+
"angle": 0,
|
| 575 |
+
"content": "We train our models on the 'train' and 'dev' parts of the ASVspoof 2019 Logical Access (LA) dataset part [5]. This is consistent with most related work and also with the evaluation procedure of the ASVspoof 2019 Challenge. We test against two evaluation datasets. As in-domain evaluation data, we use the 'eval' split of ASVspoof 2019. This split contains unseen attacks, i.e., attacks not seen during training. However, the evaluation audios share certain properties with the training data [28], so model generalization cannot be assessed using the 'eval' split of ASVspoof 2019 alone. This motivates the use of our proposed 'in-the-wild' dataset, see Section 3, as unknown out-of-domain evaluation data."
|
| 576 |
+
},
|
| 577 |
+
{
|
| 578 |
+
"type": "title",
|
| 579 |
+
"bbox": [
|
| 580 |
+
0.52,
|
| 581 |
+
0.694,
|
| 582 |
+
0.684,
|
| 583 |
+
0.706
|
| 584 |
+
],
|
| 585 |
+
"angle": 0,
|
| 586 |
+
"content": "4.1.3. Evaluation metrics"
|
| 587 |
+
},
|
| 588 |
+
{
|
| 589 |
+
"type": "text",
|
| 590 |
+
"bbox": [
|
| 591 |
+
0.519,
|
| 592 |
+
0.713,
|
| 593 |
+
0.908,
|
| 594 |
+
0.8
|
| 595 |
+
],
|
| 596 |
+
"angle": 0,
|
| 597 |
+
"content": "We report both the equal-error rate (EER) and the tandem detection cost function (t-DCF) [29] on the ASVspoof 2019 'eval' data. For consistency with the related work, we use the original implementation of the t-DCF as provided for the ASVspoof 2019 challenge [30]. For our proposed dataset, we report only the EER. This is because t-DCF scores require the false alarm and miss costs, which are available only for ASVspoof."
|
| 598 |
+
},
|
| 599 |
+
{
|
| 600 |
+
"type": "title",
|
| 601 |
+
"bbox": [
|
| 602 |
+
0.52,
|
| 603 |
+
0.813,
|
| 604 |
+
0.68,
|
| 605 |
+
0.825
|
| 606 |
+
],
|
| 607 |
+
"angle": 0,
|
| 608 |
+
"content": "4.2. Feature Extraction"
|
| 609 |
+
},
|
| 610 |
+
{
|
| 611 |
+
"type": "text",
|
| 612 |
+
"bbox": [
|
| 613 |
+
0.519,
|
| 614 |
+
0.832,
|
| 615 |
+
0.908,
|
| 616 |
+
0.882
|
| 617 |
+
],
|
| 618 |
+
"angle": 0,
|
| 619 |
+
"content": "Several architectures used in this work require preprocessing the audio data with a feature extractor (LCNN, LCNN-Attention, LCNN-LSTM, LSTM, MesoNet, MesoInception, ResNet18, Transformer). We evalu"
|
| 620 |
+
},
|
| 621 |
+
{
|
| 622 |
+
"type": "page_footnote",
|
| 623 |
+
"bbox": [
|
| 624 |
+
0.11,
|
| 625 |
+
0.869,
|
| 626 |
+
0.396,
|
| 627 |
+
0.882
|
| 628 |
+
],
|
| 629 |
+
"angle": 0,
|
| 630 |
+
"content": "\\( {}^{2} \\) records available at deepfake-total.com/in_the_wild"
|
| 631 |
+
}
|
| 632 |
+
],
|
| 633 |
+
[
|
| 634 |
+
{
|
| 635 |
+
"type": "table",
|
| 636 |
+
"bbox": [
|
| 637 |
+
0.202,
|
| 638 |
+
0.087,
|
| 639 |
+
0.799,
|
| 640 |
+
0.791
|
| 641 |
+
],
|
| 642 |
+
"angle": 0,
|
| 643 |
+
"content": "<table><tr><td rowspan=\"2\">Model Name</td><td rowspan=\"2\">Feature Type</td><td rowspan=\"2\">Input Length</td><td colspan=\"2\">ASVspoof19 eval</td><td rowspan=\"2\">In-the-Wild Data EER%</td></tr><tr><td>EER%</td><td>t-DCF</td></tr><tr><td>LCNN</td><td>cqtspec</td><td>Full</td><td>6.354±0.39</td><td>0.174±0.03</td><td>65.559±11.14</td></tr><tr><td>LCNN</td><td>cqtspec</td><td>4s</td><td>25.534±0.10</td><td>0.512±0.00</td><td>70.015±4.74</td></tr><tr><td>LCNN</td><td>logspec</td><td>Full</td><td>7.537±0.42</td><td>0.141±0.02</td><td>72.515±2.15</td></tr><tr><td>LCNN</td><td>logspec</td><td>4s</td><td>22.271±2.36</td><td>0.377±0.01</td><td>91.110±2.17</td></tr><tr><td>LCNN</td><td>melspec</td><td>Full</td><td>15.093±2.73</td><td>0.428±0.05</td><td>70.311±2.15</td></tr><tr><td>LCNN</td><td>melspec</td><td>4s</td><td>30.258±3.38</td><td>0.503±0.04</td><td>81.942±3.50</td></tr><tr><td>LCNN-Attention</td><td>cqtspec</td><td>Full</td><td>6.762±0.27</td><td>0.178±0.01</td><td>66.684±1.08</td></tr><tr><td>LCNN-Attention</td><td>cqtspec</td><td>4s</td><td>23.228±3.98</td><td>0.468±0.06</td><td>75.317±8.25</td></tr><tr><td>LCNN-Attention</td><td>logspec</td><td>Full</td><td>7.888±0.57</td><td>0.180±0.05</td><td>77.122±4.91</td></tr><tr><td>LCNN-Attention</td><td>logspec</td><td>4s</td><td>14.958±2.37</td><td>0.354±0.03</td><td>80.651±6.14</td></tr><tr><td>LCNN-Attention</td><td>melspec</td><td>Full</td><td>13.487±5.59</td><td>0.374±0.14</td><td>70.986±9.73</td></tr><tr><td>LCNN-Attention</td><td>melspec</td><td>4s</td><td>19.534±2.57</td><td>0.449±0.02</td><td>85.118±1.01</td></tr><tr><td>LCNN-LSTM</td><td>cqtspec</td><td>Full</td><td>6.228±0.50</td><td>0.113±0.01</td><td>61.500±1.37</td></tr><tr><td>LCNN-LSTM</td><td>cqtspec</td><td>4s</td><td>20.857±0.14</td><td>0.478±0.01</td><td>72.251±2.97</td></tr><tr><td>LCNN-LSTM</td><td>logspec</td><td>Full</td><td>9.936±1.74</td><td>0.158±0.01</td><td>79.109±0.84</td></tr><tr><td>LCNN-LSTM</td><td>logspec</td><td>4s</td><td>13.018±3.08</td><td>0.330±0.05</td><td>79.706±15.80</td></tr><tr><td>LCNN-LSTM</td><td>melspec</td><td>Full</td><td>9.260±1.33</td><td>0.240±0.04</td><td>62.304±0.17</td></tr><tr><td>LCNN-LSTM</td><td>melspec</td><td>4s</td><td>27.948±4.64</td><td>0.483±0.03</td><td>82.857±3.49</td></tr><tr><td>LSTM</td><td>cqtspec</td><td>Full</td><td>7.162±0.27</td><td>0.127±0.00</td><td>53.711±11.68</td></tr><tr><td>LSTM</td><td>cqtspec</td><td>4s</td><td>14.409±2.19</td><td>0.382±0.05</td><td>55.880±0.88</td></tr><tr><td>LSTM</td><td>logspec</td><td>Full</td><td>10.314±0.81</td><td>0.160±0.00</td><td>73.111±2.52</td></tr><tr><td>LSTM</td><td>logspec</td><td>4s</td><td>23.232±0.32</td><td>0.512±0.00</td><td>78.071±0.49</td></tr><tr><td>LSTM</td><td>melspec</td><td>Full</td><td>16.216±2.92</td><td>0.358±0.00</td><td>65.957±7.70</td></tr><tr><td>LSTM</td><td>melspec</td><td>4s</td><td>37.463±0.46</td><td>0.553±0.01</td><td>64.297±2.23</td></tr><tr><td>MesoInception</td><td>cqtspec</td><td>Full</td><td>11.353±1.00</td><td>0.326±0.03</td><td>50.007±14.69</td></tr><tr><td>MesoInception</td><td>cqtspec</td><td>4s</td><td>21.973±4.96</td><td>0.453±0.09</td><td>68.192±12.47</td></tr><tr><td>MesoInception</td><td>logspec</td><td>Full</td><td>10.019±0.18</td><td>0.238±0.02</td><td>37.414±9.16</td></tr><tr><td>MesoInception</td><td>logspec</td><td>4s</td><td>16.377±3.72</td><td>0.375±0.09</td><td>72.753±6.62</td></tr><tr><td>MesoInception</td><td>melspec</td><td>Full</td><td>14.058±5.67</td><td>0.331±0.11</td><td>61.996±12.65</td></tr><tr><td>MesoInception</td><td>melspec</td><td>4s</td><td>21.484±3.51</td><td>0.408±0.03</td><td>51.980±15.32</td></tr><tr><td>MesoNet</td><td>cqtspec</td><td>Full</td><td>7.422±1.61</td><td>0.219±0.07</td><td>54.544±11.50</td></tr><tr><td>MesoNet</td><td>cqtspec</td><td>4s</td><td>20.395±2.03</td><td>0.426±0.06</td><td>65.928±2.57</td></tr><tr><td>MesoNet</td><td>logspec</td><td>Full</td><td>8.369±1.06</td><td>0.170±0.05</td><td>46.939±5.81</td></tr><tr><td>MesoNet</td><td>logspec</td><td>4s</td><td>11.124±0.79</td><td>0.263±0.03</td><td>80.707±12.03</td></tr><tr><td>MesoNet</td><td>melspec</td><td>Full</td><td>11.305±1.80</td><td>0.321±0.06</td><td>58.405±11.28</td></tr><tr><td>MesoNet</td><td>melspec</td><td>4s</td><td>21.761±0.26</td><td>0.467±0.00</td><td>64.415±15.68</td></tr><tr><td>ResNet18</td><td>cqtspec</td><td>Full</td><td>6.552±0.49</td><td>0.140±0.01</td><td>49.759±0.17</td></tr><tr><td>ResNet18</td><td>cqtspec</td><td>4s</td><td>18.378±1.76</td><td>0.432±0.07</td><td>61.827±7.46</td></tr><tr><td>ResNet18</td><td>logspec</td><td>Full</td><td>7.386±0.42</td><td>0.139±0.02</td><td>80.212±0.23</td></tr><tr><td>ResNet18</td><td>logspec</td><td>4s</td><td>15.521±1.83</td><td>0.387±0.02</td><td>88.729±2.88</td></tr><tr><td>ResNet18</td><td>melspec</td><td>Full</td><td>21.658±2.56</td><td>0.551±0.04</td><td>77.614±1.47</td></tr><tr><td>ResNet18</td><td>melspec</td><td>4s</td><td>28.178±0.33</td><td>0.489±0.01</td><td>83.006±7.17</td></tr><tr><td>Transformer</td><td>cqtspec</td><td>Full</td><td>7.498±0.34</td><td>0.129±0.01</td><td>43.775±2.85</td></tr><tr><td>Transformer</td><td>cqtspec</td><td>4s</td><td>11.256±0.07</td><td>0.329±0.00</td><td>48.208±1.49</td></tr><tr><td>Transformer</td><td>logspec</td><td>Full</td><td>9.949±1.77</td><td>0.210±0.06</td><td>64.789±0.88</td></tr><tr><td>Transformer</td><td>logspec</td><td>4s</td><td>13.935±1.70</td><td>0.320±0.03</td><td>44.406±2.17</td></tr><tr><td>Transformer</td><td>melspec</td><td>Full</td><td>20.813±6.44</td><td>0.394±0.10</td><td>73.307±2.81</td></tr><tr><td>Transformer</td><td>melspec</td><td>4s</td><td>26.495±1.76</td><td>0.495±0.00</td><td>68.407±5.53</td></tr><tr><td>CRNNSpoof</td><td>raw</td><td>Full</td><td>15.658±0.35</td><td>0.312±0.01</td><td>44.500±8.13</td></tr><tr><td>CRNNSpoof</td><td>raw</td><td>4s</td><td>19.640±1.62</td><td>0.360±0.04</td><td>41.710±4.86</td></tr><tr><td>RawNet2</td><td>raw</td><td>Full</td><td>3.154±0.87</td><td>0.078±0.02</td><td>37.819±2.23</td></tr><tr><td>RawNet2</td><td>raw</td><td>4s</td><td>4.351±0.29</td><td>0.132±0.01</td><td>33.943±2.59</td></tr><tr><td>RawPC</td><td>raw</td><td>Full</td><td>3.092±0.36</td><td>0.071±0.00</td><td>45.715±12.20</td></tr><tr><td>RawPC</td><td>raw</td><td>4s</td><td>3.067±0.91</td><td>0.097±0.03</td><td>52.884±6.08</td></tr><tr><td>RawGAT-ST</td><td>raw</td><td>Full</td><td>1.229±0.43</td><td>0.036±0.01</td><td>37.154±1.95</td></tr><tr><td>RawGAT-ST</td><td>raw</td><td>4s</td><td>2.297±0.98</td><td>0.074±0.03</td><td>38.767±1.28</td></tr></table>"
|
| 644 |
+
},
|
| 645 |
+
{
|
| 646 |
+
"type": "table_caption",
|
| 647 |
+
"bbox": [
|
| 648 |
+
0.094,
|
| 649 |
+
0.806,
|
| 650 |
+
0.909,
|
| 651 |
+
0.868
|
| 652 |
+
],
|
| 653 |
+
"angle": 0,
|
| 654 |
+
"content": "Table 1: Full results of evaluation on the ASVspoof 2019 LA 'eval' data. We compare different model architectures against different feature types and audio input lengths (4s, fixed-sized inputs vs. variable-length inputs). Results are averaged over three independent trials with random initialization, and the standard deviation is reported. Best-performing configurations are highlighted in boldface. When evaluating the models on our proposed 'in-the-wild' dataset, we see an increase in EER by up to \\(1000\\%\\) compared to ASVspoof 2019 (rightmost column)."
|
| 655 |
+
}
|
| 656 |
+
],
|
| 657 |
+
[
|
| 658 |
+
{
|
| 659 |
+
"type": "table",
|
| 660 |
+
"bbox": [
|
| 661 |
+
0.11,
|
| 662 |
+
0.087,
|
| 663 |
+
0.462,
|
| 664 |
+
0.153
|
| 665 |
+
],
|
| 666 |
+
"angle": 0,
|
| 667 |
+
"content": "<table><tr><td rowspan=\"2\">Input Length</td><td colspan=\"2\">ASVspoof19 eval</td><td>In-the-Wild Data</td></tr><tr><td>EER %</td><td>t-DCF</td><td>EER %</td></tr><tr><td>Full</td><td>9.85</td><td>0.22</td><td>60.10</td></tr><tr><td>4s</td><td>18.89</td><td>0.39</td><td>67.25</td></tr></table>"
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"type": "table_caption",
|
| 671 |
+
"bbox": [
|
| 672 |
+
0.092,
|
| 673 |
+
0.169,
|
| 674 |
+
0.481,
|
| 675 |
+
0.232
|
| 676 |
+
],
|
| 677 |
+
"angle": 0,
|
| 678 |
+
"content": "Table 2: Model performance averaged by input preprocessing. Fixed-length, 4s inputs perform significantly worse on the ASVspoof data and on the 'in-the-wild' dataset than variable-length inputs. This suggests that related work using fixed-length inputs may (unnecessarily) sacrifice performance."
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
"type": "text",
|
| 682 |
+
"bbox": [
|
| 683 |
+
0.092,
|
| 684 |
+
0.271,
|
| 685 |
+
0.481,
|
| 686 |
+
0.348
|
| 687 |
+
],
|
| 688 |
+
"angle": 0,
|
| 689 |
+
"content": "ate these architectures on constant-Q transform (cqtspec [31]), log spectrogram (logspec) and mel-scaled spectrogram (mel-spec [32]) features (all of them 513-dimensional). We use Python, librosa [33] and scipy [34]. The rest of the models does not rely on pre-processed data, but uses raw audio waveforms as inputs."
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"type": "title",
|
| 693 |
+
"bbox": [
|
| 694 |
+
0.092,
|
| 695 |
+
0.359,
|
| 696 |
+
0.258,
|
| 697 |
+
0.373
|
| 698 |
+
],
|
| 699 |
+
"angle": 0,
|
| 700 |
+
"content": "4.3. Audio Input Length"
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "text",
|
| 704 |
+
"bbox": [
|
| 705 |
+
0.092,
|
| 706 |
+
0.378,
|
| 707 |
+
0.48,
|
| 708 |
+
0.453
|
| 709 |
+
],
|
| 710 |
+
"angle": 0,
|
| 711 |
+
"content": "Audio samples usually vary in length, which is also the case for the data in ASVspoof 2019 and our proposed 'in-the-wild' dataset. While some models can accommodate variable-length input (and thus also fixed-length input), many can not. We extend these by introducing a global averaging layer, which adds such capability."
|
| 712 |
+
},
|
| 713 |
+
{
|
| 714 |
+
"type": "text",
|
| 715 |
+
"bbox": [
|
| 716 |
+
0.092,
|
| 717 |
+
0.454,
|
| 718 |
+
0.481,
|
| 719 |
+
0.542
|
| 720 |
+
],
|
| 721 |
+
"angle": 0,
|
| 722 |
+
"content": "In our evaluation of fixed-length input, we chose a length of four seconds, following [23]. If an input sample is longer, a random four-second subset of the sample is used. If it is shorter, the sample is repeated. To keep the evaluation fair, these shorter samples are also repeated during the full-length evaluation. This ensures that full-length input is never shorter than truncated input, but always at least 4s."
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"type": "title",
|
| 726 |
+
"bbox": [
|
| 727 |
+
0.238,
|
| 728 |
+
0.555,
|
| 729 |
+
0.334,
|
| 730 |
+
0.57
|
| 731 |
+
],
|
| 732 |
+
"angle": 0,
|
| 733 |
+
"content": "5. Results"
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"type": "text",
|
| 737 |
+
"bbox": [
|
| 738 |
+
0.092,
|
| 739 |
+
0.576,
|
| 740 |
+
0.481,
|
| 741 |
+
0.703
|
| 742 |
+
],
|
| 743 |
+
"angle": 0,
|
| 744 |
+
"content": "Table 1 shows the results of our experiments, where we evaluate all models against all configurations of data preprocessing: we train twelve different models, using one of four different feature types, with two different ways of handling variable-length audio. Each experiment is performed three times, using random initialization. We report averaged EER and t-DCF, as well as standard deviation. We observe that on ASVspoof, our implementations perform comparable to related work, with a margin of approximately \\(2 - 4\\%\\) EER and 0.1 t-DCF. This is likely because we do not fine-tune our models' hyper-parameters."
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "title",
|
| 748 |
+
"bbox": [
|
| 749 |
+
0.092,
|
| 750 |
+
0.713,
|
| 751 |
+
0.336,
|
| 752 |
+
0.727
|
| 753 |
+
],
|
| 754 |
+
"angle": 0,
|
| 755 |
+
"content": "5.1. Fixed vs. Variable Input Length"
|
| 756 |
+
},
|
| 757 |
+
{
|
| 758 |
+
"type": "text",
|
| 759 |
+
"bbox": [
|
| 760 |
+
0.092,
|
| 761 |
+
0.733,
|
| 762 |
+
0.481,
|
| 763 |
+
0.883
|
| 764 |
+
],
|
| 765 |
+
"angle": 0,
|
| 766 |
+
"content": "We analyze the effects of truncating the input signal to a fixed length compared to using the full, unabridged audio. For all models, performance decreases when the input is trimmed to \\(4s\\). Table 2 averages all results based on input length. We see that average EER on ASVspoof drops from \\(19.89\\%\\) to \\(9.85\\%\\) when the full-length input is used. These results show that a four-second clip is insufficient for the model to extract useful information compared to using the full audio file as input. Therefore, we propose not to use fixed-length truncated inputs, but to provide the full audio file to the model. This may seem obvious, but the numerous works that use fixed-length inputs [23, 25, 26] suggest otherwise."
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"type": "title",
|
| 770 |
+
"bbox": [
|
| 771 |
+
0.52,
|
| 772 |
+
0.089,
|
| 773 |
+
0.822,
|
| 774 |
+
0.103
|
| 775 |
+
],
|
| 776 |
+
"angle": 0,
|
| 777 |
+
"content": "5.2. Effects of Feature Extraction Techniques"
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"type": "text",
|
| 781 |
+
"bbox": [
|
| 782 |
+
0.519,
|
| 783 |
+
0.108,
|
| 784 |
+
0.908,
|
| 785 |
+
0.244
|
| 786 |
+
],
|
| 787 |
+
"angle": 0,
|
| 788 |
+
"content": "We discuss the effects of different feature preprocessing techniques, c.f. 1: The 'raw' models outperform the feature-based models, obtaining up to \\(1.2\\%\\) EER on ASVspoof and \\(33.9\\%\\) EER on the 'in-the-wild' dataset (RawGAT-ST and RawNet2). The spectrogram-based models perform slightly worse, achieving up to \\(6.3\\%\\) EER on ASVspoof and \\(37.4\\%\\) on the 'in-the-wild' dataset (LCNN and MesoNet). The superiority of the 'raw' models is assumed to be due to finer feature-extraction resolution than the spectrogram-based models [26]. This has lead recent research to focus largely on such raw-feature, end-to-end models [25, 26]."
|
| 789 |
+
},
|
| 790 |
+
{
|
| 791 |
+
"type": "text",
|
| 792 |
+
"bbox": [
|
| 793 |
+
0.519,
|
| 794 |
+
0.245,
|
| 795 |
+
0.908,
|
| 796 |
+
0.295
|
| 797 |
+
],
|
| 798 |
+
"angle": 0,
|
| 799 |
+
"content": "Concerning the spectogram-based models, we observe that melspec features are always outperformed by either cqtspec of logspec. Simply replacing melspec with cqtspec increases the average performance by \\(37\\%\\), all other factors constant."
|
| 800 |
+
},
|
| 801 |
+
{
|
| 802 |
+
"type": "title",
|
| 803 |
+
"bbox": [
|
| 804 |
+
0.52,
|
| 805 |
+
0.307,
|
| 806 |
+
0.766,
|
| 807 |
+
0.32
|
| 808 |
+
],
|
| 809 |
+
"angle": 0,
|
| 810 |
+
"content": "5.3. Evaluation on 'in-the-wild' data"
|
| 811 |
+
},
|
| 812 |
+
{
|
| 813 |
+
"type": "text",
|
| 814 |
+
"bbox": [
|
| 815 |
+
0.519,
|
| 816 |
+
0.326,
|
| 817 |
+
0.908,
|
| 818 |
+
0.413
|
| 819 |
+
],
|
| 820 |
+
"angle": 0,
|
| 821 |
+
"content": "Especially interesting is the performance of the models on real-world deepfake data. Table 1 shows the performance of our models on the 'in-the-wild' dataset. We see that there is a large performance gap between the ASVSproof 2019 evaluation data and our proposed 'in-the-wild' dataset. In general, the EER values of the models deteriorate by about 200 to 1000 percent. Often, the models do not perform better than random guessing."
|
| 822 |
+
},
|
| 823 |
+
{
|
| 824 |
+
"type": "text",
|
| 825 |
+
"bbox": [
|
| 826 |
+
0.519,
|
| 827 |
+
0.413,
|
| 828 |
+
0.908,
|
| 829 |
+
0.5
|
| 830 |
+
],
|
| 831 |
+
"angle": 0,
|
| 832 |
+
"content": "To investigate this further, we train our best 'in-the-wild' model from Table 1, RawNet2 with 4s input length, on all from ASVspoof 2019, i.e., the 'train', 'dev', and 'eval' splits. We then re-evaluate on the 'in-the-wild' dataset to investigate whether adding more ASVspoof training data improves out-of-domain performance. We achieve \\(33.1 \\pm 0.2\\%\\) EER, i.e., no improvement over training with only the 'train' and 'dev' data."
|
| 833 |
+
},
|
| 834 |
+
{
|
| 835 |
+
"type": "text",
|
| 836 |
+
"bbox": [
|
| 837 |
+
0.519,
|
| 838 |
+
0.5,
|
| 839 |
+
0.908,
|
| 840 |
+
0.563
|
| 841 |
+
],
|
| 842 |
+
"angle": 0,
|
| 843 |
+
"content": "The inclusion of the 'eval' split does not seem to add much information that could be used for real-world generalization. This is plausible in that all splits of ASVspoof are fundamentally based on the same dataset, VCTK, although the synthesis algorithms and speakers differ between splits [5]."
|
| 844 |
+
},
|
| 845 |
+
{
|
| 846 |
+
"type": "title",
|
| 847 |
+
"bbox": [
|
| 848 |
+
0.65,
|
| 849 |
+
0.575,
|
| 850 |
+
0.78,
|
| 851 |
+
0.59
|
| 852 |
+
],
|
| 853 |
+
"angle": 0,
|
| 854 |
+
"content": "6. Conclusion"
|
| 855 |
+
},
|
| 856 |
+
{
|
| 857 |
+
"type": "text",
|
| 858 |
+
"bbox": [
|
| 859 |
+
0.519,
|
| 860 |
+
0.596,
|
| 861 |
+
0.908,
|
| 862 |
+
0.658
|
| 863 |
+
],
|
| 864 |
+
"angle": 0,
|
| 865 |
+
"content": "In this paper, we systematically evaluate audio spoof detection models from related work according to common standards. In addition, we present a new audio deefake dataset of 'in-the-wild' audio spools that we use to evaluate the generalization capabilities of related work in a real-world scenario."
|
| 866 |
+
},
|
| 867 |
+
{
|
| 868 |
+
"type": "text",
|
| 869 |
+
"bbox": [
|
| 870 |
+
0.519,
|
| 871 |
+
0.659,
|
| 872 |
+
0.908,
|
| 873 |
+
0.77
|
| 874 |
+
],
|
| 875 |
+
"angle": 0,
|
| 876 |
+
"content": "We find that regardless of the model architecture, some preprocessing steps are more successful than others. It turns out that the use of cqtspec or logspec features consistently outperforms the use of melspec features in our comprehensive analysis. Furthermore, we find that for most models, four seconds of input audio does not saturate performance compared to longer examples. Therefore, we argue that one should consider using cqtspec features and unabridged input audio when designing audio deepfake detection architectures."
|
| 877 |
+
},
|
| 878 |
+
{
|
| 879 |
+
"type": "text",
|
| 880 |
+
"bbox": [
|
| 881 |
+
0.519,
|
| 882 |
+
0.77,
|
| 883 |
+
0.908,
|
| 884 |
+
0.883
|
| 885 |
+
],
|
| 886 |
+
"angle": 0,
|
| 887 |
+
"content": "Most importantly, however, we find that the 'in-the-wild' generalization capabilities of many models may have been overestimated. We demonstrate this by collecting our own audio deepfake dataset and evaluating twelve different model architectures on it. Performance drops sharply, and some models degenerate to random guessing. It may be possible that the community has tailored its detection models too closely to the prevailing benchmark, ASVSpoof, and that deepfakes are much harder to detect outside the lab than previously thought."
|
| 888 |
+
}
|
| 889 |
+
],
|
| 890 |
+
[
|
| 891 |
+
{
|
| 892 |
+
"type": "title",
|
| 893 |
+
"bbox": [
|
| 894 |
+
0.223,
|
| 895 |
+
0.087,
|
| 896 |
+
0.348,
|
| 897 |
+
0.102
|
| 898 |
+
],
|
| 899 |
+
"angle": 0,
|
| 900 |
+
"content": "7. References"
|
| 901 |
+
},
|
| 902 |
+
{
|
| 903 |
+
"type": "ref_text",
|
| 904 |
+
"bbox": [
|
| 905 |
+
0.101,
|
| 906 |
+
0.108,
|
| 907 |
+
0.48,
|
| 908 |
+
0.14
|
| 909 |
+
],
|
| 910 |
+
"angle": 0,
|
| 911 |
+
"content": "[1] \"Audio deep fake: Demonstrator企业发展 am braunhofer aisec - youtube,\" https://www.youtube.com/watch?v=MZTF0eAALmE, (Accessed on 04/01/2021)."
|
| 912 |
+
},
|
| 913 |
+
{
|
| 914 |
+
"type": "ref_text",
|
| 915 |
+
"bbox": [
|
| 916 |
+
0.102,
|
| 917 |
+
0.144,
|
| 918 |
+
0.48,
|
| 919 |
+
0.177
|
| 920 |
+
],
|
| 921 |
+
"angle": 0,
|
| 922 |
+
"content": "[2] \"Deepfake video of volodymyr zelensky surrendering surfaces on social media - youtube,\" https://www.youtube.com/watch?v=X17yrEV5sl4, (Accessed on 03/23/2022)."
|
| 923 |
+
},
|
| 924 |
+
{
|
| 925 |
+
"type": "ref_text",
|
| 926 |
+
"bbox": [
|
| 927 |
+
0.102,
|
| 928 |
+
0.182,
|
| 929 |
+
0.48,
|
| 930 |
+
0.224
|
| 931 |
+
],
|
| 932 |
+
"angle": 0,
|
| 933 |
+
"content": "[3] A. v. d. Oord, S. Dieleman, H. Zen, K. Simonyan, O. Vinyals, A. Graves, N. Kalchbrenner, A. Senior, and K. Kavukcuoglu, \"Wavenet: A generative model for raw audio,\" arXiv preprint arXiv:1609.03499, 2016."
|
| 934 |
+
},
|
| 935 |
+
{
|
| 936 |
+
"type": "ref_text",
|
| 937 |
+
"bbox": [
|
| 938 |
+
0.102,
|
| 939 |
+
0.229,
|
| 940 |
+
0.48,
|
| 941 |
+
0.294
|
| 942 |
+
],
|
| 943 |
+
"angle": 0,
|
| 944 |
+
"content": "[4] Y. Wang, R. J. Skerry-Ryan, D. Stanton, Y. Wu, R. J. Weiss, N. Jaitly, Z. Yang, Y. Xiao, Z. Chen, S. Bengio, Q. V. Le, Y. Agiomyrgiannakis, R. Clark, and R. A. Saurous, \"Tacotron: A fully end-to-end text-to-speech synthesis model,\" CoRR, vol. abs/1703.10135, 2017. [Online]. Available: http://arxiv.org/abs/1703.10135"
|
| 945 |
+
},
|
| 946 |
+
{
|
| 947 |
+
"type": "ref_text",
|
| 948 |
+
"bbox": [
|
| 949 |
+
0.102,
|
| 950 |
+
0.298,
|
| 951 |
+
0.48,
|
| 952 |
+
0.341
|
| 953 |
+
],
|
| 954 |
+
"angle": 0,
|
| 955 |
+
"content": "[5] M. Todisco, X. Wang, V. Vestman, M. Sahidullah, H. Delgado, A. Nautsch, J. Yamagishi, N. Evans, T. Kinnunen, and K. A. Lee, \"Asvspoof 2019: Future horizons in spoofed and fake audio detection,\" arXiv preprint arXiv:1904.05441, 2019."
|
| 956 |
+
},
|
| 957 |
+
{
|
| 958 |
+
"type": "ref_text",
|
| 959 |
+
"bbox": [
|
| 960 |
+
0.102,
|
| 961 |
+
0.346,
|
| 962 |
+
0.48,
|
| 963 |
+
0.4
|
| 964 |
+
],
|
| 965 |
+
"angle": 0,
|
| 966 |
+
"content": "[6] A. Nautsch, X. Wang, N. Evans, T. H. Kinnunen, V. Vestman, M. Todisco, H. Delgado, M. Sahidullah, J. Yamagishi, and K. A. Lee, \"ASVspoof 2019: Spoofing Countermeasures for the Detection of Synthesized, Converted and Replayed Speech,\" vol. 3, no. 2, pp. 252-265."
|
| 967 |
+
},
|
| 968 |
+
{
|
| 969 |
+
"type": "ref_text",
|
| 970 |
+
"bbox": [
|
| 971 |
+
0.102,
|
| 972 |
+
0.404,
|
| 973 |
+
0.48,
|
| 974 |
+
0.437
|
| 975 |
+
],
|
| 976 |
+
"angle": 0,
|
| 977 |
+
"content": "[7] J. Yamagishi, C. Veaux, and K. MacDonald, \"CSTR VCTK Corpus: English multi-speaker corpus for CSTR voice cloning toolkit (version 0.92),\" 2019."
|
| 978 |
+
},
|
| 979 |
+
{
|
| 980 |
+
"type": "ref_text",
|
| 981 |
+
"bbox": [
|
| 982 |
+
0.102,
|
| 983 |
+
0.441,
|
| 984 |
+
0.48,
|
| 985 |
+
0.474
|
| 986 |
+
],
|
| 987 |
+
"angle": 0,
|
| 988 |
+
"content": "[8] A. Gomez-Alanis, A. M. Peinado, J. A. Gonzalez, and A. M. Gomez, “A Gated Recurrent Convolutional Neural Network for Robust Spoofing Detection,” vol. 27, no. 12, pp. 1985–1999."
|
| 989 |
+
},
|
| 990 |
+
{
|
| 991 |
+
"type": "ref_text",
|
| 992 |
+
"bbox": [
|
| 993 |
+
0.102,
|
| 994 |
+
0.478,
|
| 995 |
+
0.48,
|
| 996 |
+
0.512
|
| 997 |
+
],
|
| 998 |
+
"angle": 0,
|
| 999 |
+
"content": "[9] A. Chintha, B. Thai, S. J. Sohrawardi, K. M. Bhatt, A. Hickerson, M. Wright, and R. Ptucha, \"Recurrent Convolutional Structures for Audio Spoof and Video Deepfake Detection,\" pp. 1-1."
|
| 1000 |
+
},
|
| 1001 |
+
{
|
| 1002 |
+
"type": "ref_text",
|
| 1003 |
+
"bbox": [
|
| 1004 |
+
0.096,
|
| 1005 |
+
0.515,
|
| 1006 |
+
0.48,
|
| 1007 |
+
0.548
|
| 1008 |
+
],
|
| 1009 |
+
"angle": 0,
|
| 1010 |
+
"content": "[10] L. Zhang, X. Wang, E. Cooper, J. Yamagishi, J. Patino, and N. Evans, \"An initial investigation for detecting partially spoofed audio,\" arXiv preprint arXiv:2104.02518, 2021."
|
| 1011 |
+
},
|
| 1012 |
+
{
|
| 1013 |
+
"type": "ref_text",
|
| 1014 |
+
"bbox": [
|
| 1015 |
+
0.095,
|
| 1016 |
+
0.552,
|
| 1017 |
+
0.48,
|
| 1018 |
+
0.585
|
| 1019 |
+
],
|
| 1020 |
+
"angle": 0,
|
| 1021 |
+
"content": "[11] S. Tambe, A. Pawar, and S. Yadav, “Deep fake videos identification using ann and lstm,” Journal of Discrete Mathematical Sciences and Cryptography, vol. 24, no. 8, pp. 2353–2364, 2021."
|
| 1022 |
+
},
|
| 1023 |
+
{
|
| 1024 |
+
"type": "ref_text",
|
| 1025 |
+
"bbox": [
|
| 1026 |
+
0.095,
|
| 1027 |
+
0.589,
|
| 1028 |
+
0.479,
|
| 1029 |
+
0.622
|
| 1030 |
+
],
|
| 1031 |
+
"angle": 0,
|
| 1032 |
+
"content": "[12] X. Wang and J. Yamagishi. A Comparative Study on Recent Neural Spoofing Countermeasures for Synthetic Speech Detection. [Online]. Available: http://arxiv.org/abs/2103.11326"
|
| 1033 |
+
},
|
| 1034 |
+
{
|
| 1035 |
+
"type": "ref_text",
|
| 1036 |
+
"bbox": [
|
| 1037 |
+
0.095,
|
| 1038 |
+
0.626,
|
| 1039 |
+
0.48,
|
| 1040 |
+
0.68
|
| 1041 |
+
],
|
| 1042 |
+
"angle": 0,
|
| 1043 |
+
"content": "[13] G. Lavrentyeva, S. Novoselov, E. Malykh, A. Kozlov, O. Kudashev, and V. Shchemelinin, \"Audio replay attack detection with deep learning frameworks,\" in Interspeech 2017. ISCA, pp. 82-86. [Online]. Available: http://www.isca-speech.org/archive/Interspeech_2017/abstracts/0360.html"
|
| 1044 |
+
},
|
| 1045 |
+
{
|
| 1046 |
+
"type": "ref_text",
|
| 1047 |
+
"bbox": [
|
| 1048 |
+
0.095,
|
| 1049 |
+
0.684,
|
| 1050 |
+
0.48,
|
| 1051 |
+
0.738
|
| 1052 |
+
],
|
| 1053 |
+
"angle": 0,
|
| 1054 |
+
"content": "[14] G. Lavrentyeva, S. Novoselov, A. Tseren, M. Volkova, A. Gorlanov, and A. Kozlov, \"STC antispoofing systems for the ASVspoof2019 challenge,\" in Interspeech 2019. ISCA, pp. 1033-1037. [Online]. Available: http://www.isca-speech.org/archive/Interspeech_2019/abstracts/1768.html"
|
| 1055 |
+
},
|
| 1056 |
+
{
|
| 1057 |
+
"type": "ref_text",
|
| 1058 |
+
"bbox": [
|
| 1059 |
+
0.095,
|
| 1060 |
+
0.742,
|
| 1061 |
+
0.48,
|
| 1062 |
+
0.786
|
| 1063 |
+
],
|
| 1064 |
+
"angle": 0,
|
| 1065 |
+
"content": "[15] D. Afchar, V. Nozick, J. Yamagishi, and I. Echizen, \"MesoNet: A Compact Facial Video Forgery Detection Network,\" in 2018 IEEE International Workshop on Information Forensics and Security (WIFS), pp. 1-7."
|
| 1066 |
+
},
|
| 1067 |
+
{
|
| 1068 |
+
"type": "ref_text",
|
| 1069 |
+
"bbox": [
|
| 1070 |
+
0.095,
|
| 1071 |
+
0.79,
|
| 1072 |
+
0.48,
|
| 1073 |
+
0.834
|
| 1074 |
+
],
|
| 1075 |
+
"angle": 0,
|
| 1076 |
+
"content": "[16] C. Szegedy, W. Liu, Y. Jia, P. Sermanet, S. Reed, D. Anguelov, D. Erhan, V. Vanhoucke, and A. Rabinovich, \"Going deeper with convolutions,\" in 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015, pp. 1-9."
|
| 1077 |
+
},
|
| 1078 |
+
{
|
| 1079 |
+
"type": "ref_text",
|
| 1080 |
+
"bbox": [
|
| 1081 |
+
0.095,
|
| 1082 |
+
0.838,
|
| 1083 |
+
0.48,
|
| 1084 |
+
0.881
|
| 1085 |
+
],
|
| 1086 |
+
"angle": 0,
|
| 1087 |
+
"content": "[17] M. Alzantot, Z. Wang, and M. B. Srivastava, “Deep Residual Neural Networks for Audio Spoofing Detection,” in Interspeech 2019. ISCA, pp. 1078–1082. [Online]. Available: http://www.isca-speech.org/archive/Interspeech_2019/abstracts/3174.html"
|
| 1088 |
+
},
|
| 1089 |
+
{
|
| 1090 |
+
"type": "list",
|
| 1091 |
+
"bbox": [
|
| 1092 |
+
0.095,
|
| 1093 |
+
0.108,
|
| 1094 |
+
0.48,
|
| 1095 |
+
0.881
|
| 1096 |
+
],
|
| 1097 |
+
"angle": 0,
|
| 1098 |
+
"content": null
|
| 1099 |
+
},
|
| 1100 |
+
{
|
| 1101 |
+
"type": "ref_text",
|
| 1102 |
+
"bbox": [
|
| 1103 |
+
0.523,
|
| 1104 |
+
0.091,
|
| 1105 |
+
0.908,
|
| 1106 |
+
0.123
|
| 1107 |
+
],
|
| 1108 |
+
"angle": 0,
|
| 1109 |
+
"content": "[18] Y. Zhang, F. Jiang, and Z. Duan, “One-class learning towards synthetic voice spoofing detection,” IEEE Signal Processing Letters, vol. 28, pp. 937–941, 2021."
|
| 1110 |
+
},
|
| 1111 |
+
{
|
| 1112 |
+
"type": "ref_text",
|
| 1113 |
+
"bbox": [
|
| 1114 |
+
0.523,
|
| 1115 |
+
0.127,
|
| 1116 |
+
0.908,
|
| 1117 |
+
0.16
|
| 1118 |
+
],
|
| 1119 |
+
"angle": 0,
|
| 1120 |
+
"content": "[19] J. Monteiro, J. Alam, and T. H. Falk, \"Generalized end-to-end detection of spoofing attacks to automatic speaker recognizers,\" Computer Speech & Language, vol. 63, p. 101096, 2020."
|
| 1121 |
+
},
|
| 1122 |
+
{
|
| 1123 |
+
"type": "ref_text",
|
| 1124 |
+
"bbox": [
|
| 1125 |
+
0.523,
|
| 1126 |
+
0.164,
|
| 1127 |
+
0.908,
|
| 1128 |
+
0.197
|
| 1129 |
+
],
|
| 1130 |
+
"angle": 0,
|
| 1131 |
+
"content": "[20] K. He, X. Zhang, S. Ren, and J. Sun, \"Deep residual learning for image recognition,\" in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 770-778."
|
| 1132 |
+
},
|
| 1133 |
+
{
|
| 1134 |
+
"type": "ref_text",
|
| 1135 |
+
"bbox": [
|
| 1136 |
+
0.523,
|
| 1137 |
+
0.201,
|
| 1138 |
+
0.908,
|
| 1139 |
+
0.244
|
| 1140 |
+
],
|
| 1141 |
+
"angle": 0,
|
| 1142 |
+
"content": "[21] Z. Zhang, X. Yi, and X. Zhao, \"Fake speech detection using residual network with transformer encoder,\" in Proceedings of the 2021 ACM Workshop on Information Hiding and Multimedia Security, 2021, pp. 13-22."
|
| 1143 |
+
},
|
| 1144 |
+
{
|
| 1145 |
+
"type": "ref_text",
|
| 1146 |
+
"bbox": [
|
| 1147 |
+
0.523,
|
| 1148 |
+
0.248,
|
| 1149 |
+
0.908,
|
| 1150 |
+
0.281
|
| 1151 |
+
],
|
| 1152 |
+
"angle": 0,
|
| 1153 |
+
"content": "[22] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polosukhin, \"Attention is all you need,\" Advances in neural information processing systems, vol. 30, 2017."
|
| 1154 |
+
},
|
| 1155 |
+
{
|
| 1156 |
+
"type": "ref_text",
|
| 1157 |
+
"bbox": [
|
| 1158 |
+
0.523,
|
| 1159 |
+
0.285,
|
| 1160 |
+
0.908,
|
| 1161 |
+
0.328
|
| 1162 |
+
],
|
| 1163 |
+
"angle": 0,
|
| 1164 |
+
"content": "[23] H. Tak, J. Patino, M. Todisco, A. Nautsch, N. Evans, and A. Larcher, “End-to-End anti-spoofing with RawNet2,” in ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6369-6373."
|
| 1165 |
+
},
|
| 1166 |
+
{
|
| 1167 |
+
"type": "ref_text",
|
| 1168 |
+
"bbox": [
|
| 1169 |
+
0.523,
|
| 1170 |
+
0.333,
|
| 1171 |
+
0.908,
|
| 1172 |
+
0.366
|
| 1173 |
+
],
|
| 1174 |
+
"angle": 0,
|
| 1175 |
+
"content": "[24] M. Ravanelli and Y. Bengio, \"Speaker recognition from raw waveform with sincnet,\" in 2018 IEEE Spoken Language Technology Workshop (SLT). IEEE, 2018, pp. 1021-1028."
|
| 1176 |
+
},
|
| 1177 |
+
{
|
| 1178 |
+
"type": "ref_text",
|
| 1179 |
+
"bbox": [
|
| 1180 |
+
0.523,
|
| 1181 |
+
0.369,
|
| 1182 |
+
0.908,
|
| 1183 |
+
0.402
|
| 1184 |
+
],
|
| 1185 |
+
"angle": 0,
|
| 1186 |
+
"content": "[25] W. Ge, J. Patino, M. Todisco, and N. Evans, \"Raw differentiable architecture search for speech deepfake and spoofing detection,\" arXiv preprint arXiv:2107.12212, 2021."
|
| 1187 |
+
},
|
| 1188 |
+
{
|
| 1189 |
+
"type": "ref_text",
|
| 1190 |
+
"bbox": [
|
| 1191 |
+
0.523,
|
| 1192 |
+
0.406,
|
| 1193 |
+
0.908,
|
| 1194 |
+
0.45
|
| 1195 |
+
],
|
| 1196 |
+
"angle": 0,
|
| 1197 |
+
"content": "[26] H. Tak, J.-w. Jung, J. Patino, M. Kamble, M. Todisco, and N. Evans, \"End-to-end spectro-temporal graph attention networks for speaker verification anti-spoofing and speech deepfake detection,\" arXiv preprint arXiv:2107.12710, 2021."
|
| 1198 |
+
},
|
| 1199 |
+
{
|
| 1200 |
+
"type": "ref_text",
|
| 1201 |
+
"bbox": [
|
| 1202 |
+
0.523,
|
| 1203 |
+
0.453,
|
| 1204 |
+
0.908,
|
| 1205 |
+
0.508
|
| 1206 |
+
],
|
| 1207 |
+
"angle": 0,
|
| 1208 |
+
"content": "[27] D. P. Kingma and J. Ba, \"Adam: A method for stochastic optimization,\" in 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, Y. Bengio and Y. LeCun, Eds., 2015. [Online]. Available: http://arxiv.org/abs/1412.6980"
|
| 1209 |
+
},
|
| 1210 |
+
{
|
| 1211 |
+
"type": "ref_text",
|
| 1212 |
+
"bbox": [
|
| 1213 |
+
0.523,
|
| 1214 |
+
0.512,
|
| 1215 |
+
0.908,
|
| 1216 |
+
0.555
|
| 1217 |
+
],
|
| 1218 |
+
"angle": 0,
|
| 1219 |
+
"content": "[28] N. M. Müller, F. Dieckmann, P. Czempin, R. Canals, J. Williams, and K. Böttinger. Speech is Silver, Silence is Golden: What do ASVspoof-trained Models Really Learn? [Online]. Available: http://arxiv.org/abs/2106.12914"
|
| 1220 |
+
},
|
| 1221 |
+
{
|
| 1222 |
+
"type": "ref_text",
|
| 1223 |
+
"bbox": [
|
| 1224 |
+
0.523,
|
| 1225 |
+
0.559,
|
| 1226 |
+
0.908,
|
| 1227 |
+
0.623
|
| 1228 |
+
],
|
| 1229 |
+
"angle": 0,
|
| 1230 |
+
"content": "[29] T. Kinnunen, K. A. Lee, H. Delgado, N. Evans, M. Todisco, M. Sahidullah, J. Yamagishi, and D. A. Reynolds, \"t-DCF: a detection cost function for the tandem assessment of spoofing countermeasures and automatic speaker verification,\" in Odyssey 2018 The Speaker and Language Recognition Workshop. ISCA, pp. 312-319."
|
| 1231 |
+
},
|
| 1232 |
+
{
|
| 1233 |
+
"type": "ref_text",
|
| 1234 |
+
"bbox": [
|
| 1235 |
+
0.523,
|
| 1236 |
+
0.628,
|
| 1237 |
+
0.908,
|
| 1238 |
+
0.65
|
| 1239 |
+
],
|
| 1240 |
+
"angle": 0,
|
| 1241 |
+
"content": "[30] \"tdcf official implementation,\" https://www.asvspoof.org/ asvsproof2019/tDCF.python_v1.zip, (Accessed on 03/03/2022)."
|
| 1242 |
+
},
|
| 1243 |
+
{
|
| 1244 |
+
"type": "ref_text",
|
| 1245 |
+
"bbox": [
|
| 1246 |
+
0.523,
|
| 1247 |
+
0.654,
|
| 1248 |
+
0.908,
|
| 1249 |
+
0.686
|
| 1250 |
+
],
|
| 1251 |
+
"angle": 0,
|
| 1252 |
+
"content": "[31] J. C. Brown, “Calculation of a constant q spectral transform,” The Journal of the Acoustical Society of America, vol. 89, no. 1, pp. 425–434, 1991."
|
| 1253 |
+
},
|
| 1254 |
+
{
|
| 1255 |
+
"type": "ref_text",
|
| 1256 |
+
"bbox": [
|
| 1257 |
+
0.523,
|
| 1258 |
+
0.69,
|
| 1259 |
+
0.908,
|
| 1260 |
+
0.733
|
| 1261 |
+
],
|
| 1262 |
+
"angle": 0,
|
| 1263 |
+
"content": "[32] S. S. Stevens, J. Volkmann, and E. B. Newman, “A scale for the measurement of the psychological magnitude pitch,” The journal of the acoustical society of america, vol. 8, no. 3, pp. 185–190, 1937."
|
| 1264 |
+
},
|
| 1265 |
+
{
|
| 1266 |
+
"type": "ref_text",
|
| 1267 |
+
"bbox": [
|
| 1268 |
+
0.523,
|
| 1269 |
+
0.738,
|
| 1270 |
+
0.908,
|
| 1271 |
+
0.782
|
| 1272 |
+
],
|
| 1273 |
+
"angle": 0,
|
| 1274 |
+
"content": "[33] B. McFee, C. Raffel, D. Liang, D. P. Ellis, M. McVicar, E. Battenberg, and O. Nieto, \"librosa: Audio and music signal analysis in python,\" in Proceedings of the 14th python in science conference, vol. 8. Citeseer, 2015, pp. 18-25."
|
| 1275 |
+
},
|
| 1276 |
+
{
|
| 1277 |
+
"type": "ref_text",
|
| 1278 |
+
"bbox": [
|
| 1279 |
+
0.523,
|
| 1280 |
+
0.786,
|
| 1281 |
+
0.908,
|
| 1282 |
+
0.83
|
| 1283 |
+
],
|
| 1284 |
+
"angle": 0,
|
| 1285 |
+
"content": "[34] P. Virtanen, R. Gommers, T. E. Oliphant, M. Haberland, T. Reddy, D. Cournapeau, E. Burovski, P. Peterson, W. Weckesser, J. Bright et al., \"Scipy 1.0: fundamental algorithms for scientific computing in python,\" Nature methods, vol. 17, no. 3, pp. 261-272, 2020."
|
| 1286 |
+
},
|
| 1287 |
+
{
|
| 1288 |
+
"type": "list",
|
| 1289 |
+
"bbox": [
|
| 1290 |
+
0.523,
|
| 1291 |
+
0.091,
|
| 1292 |
+
0.908,
|
| 1293 |
+
0.83
|
| 1294 |
+
],
|
| 1295 |
+
"angle": 0,
|
| 1296 |
+
"content": null
|
| 1297 |
+
}
|
| 1298 |
+
]
|
| 1299 |
+
]
|
2203.16xxx/2203.16263/581c38e5-2dd5-4af0-b295-8d57e76a6f20_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a3fccc80dbccacd1dda0ea23b291a7f918c230a6e38c23c8c14e117884724a94
|
| 3 |
+
size 186776
|
2203.16xxx/2203.16263/full.md
ADDED
|
@@ -0,0 +1,183 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Does Audio Deepfake Detection Generalize?
|
| 2 |
+
|
| 3 |
+
Nicolas M. Müller<sup>1</sup>, Pavel Czempin<sup>2</sup>, Franziska Dieckmann<sup>2</sup>, Adam Froghyar<sup>3</sup>, Konstantin Bötttinger<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
$^{1}$ Fraunhofer AISEC $^{2}$ Technical University Munich $^{3}$ why do birds GmbH
|
| 6 |
+
|
| 7 |
+
nicolas.mueller@aisec.fraunhofer.de
|
| 8 |
+
|
| 9 |
+
# Abstract
|
| 10 |
+
|
| 11 |
+
Current text-to-speech algorithms produce realistic fakes of human voices, making deepfake detection a much-needed area of research. While researchers have presented various deep learning models for audio spoofs detection, it is often unclear exactly why these architectures are successful: Preprocessing steps, hyperparameter settings, and the degree of fine-tuning are not consistent across related work. Which factors contribute to success, and which are accidental?
|
| 12 |
+
|
| 13 |
+
In this work, we address this problem: We systematize audio spoofing detection by re-implementing and uniformly evaluating twelve architectures from related work. We identify overarching features for successful audio deepfake detection, such as using cqtspec or logspec features instead of melspec features, which improves performance by $37\%$ EER on average, all other factors constant.
|
| 14 |
+
|
| 15 |
+
Additionally, we evaluate generalization capabilities: We collect and publish a new dataset consisting of 37.9 hours of found audio recordings of celebrities and politicians, of which 17.2 hours are deepfakes. We find that related work performs poorly on such real-world data (performance degradation of up to one thousand percent). This could suggest that the community has tailored its solutions too closely to the prevailing ASVspoof benchmark and that deepfakes are much harder to detect outside the lab than previously thought.
|
| 16 |
+
|
| 17 |
+
# 1. Introduction
|
| 18 |
+
|
| 19 |
+
Modern text-to-speech synthesis (TTS) is capable of realistic fakes of human voices, also known as audio deepfakes or spoofs. While there are many ethical applications of this technology, there is also a serious risk of malicious use. For example, TTS technology enables the cloning of politicians' voices [1, 2], which poses a variety of risks to society, including the spread of misinformation.
|
| 20 |
+
|
| 21 |
+
Reliable detection of speech spoofing can help mitigate such risks and is therefore an active area of research. However, since the technology to create audio deepfakes has only been available for a few years (see Wavenet [3] and Tacotron [4], published in 2016/17), audio spoof detection is still in its infancy. While many approaches have been proposed (cf. Section 2), it is still difficult to understand why some of the models work well: Each work uses different feature extraction techniques, preprocessing steps, hyperparameter settings, and fine-tuning. Which are the main factors and drivers for models to perform well? What can be learned in principle for the development of such systems?
|
| 22 |
+
|
| 23 |
+
Furthermore, the evaluation of spoof detection models has so far been performed exclusively on the ASVspoof dataset [5, 6], which means that the reported performance of these models is based on a limited set of TTS synthesis algorithms. ASVspoof is based on the VCTK dataset [7], which exclusively
|
| 24 |
+
|
| 25 |
+
features professional speakers and has been recorded in a studio environment, using a semi-anechoic chamber. What can we expect from audio spoof detection trained on this dataset? Is it capable of detecting realistic, unseen, 'in-the-wild' audio spoofs like those encountered on social media?
|
| 26 |
+
|
| 27 |
+
To answer these questions, this paper presents the following contributions:
|
| 28 |
+
|
| 29 |
+
- We reimplement twelve of the most popular architectures from related work and evaluate them according to a common standard. We systematically exchange components to attribute performance reported in related work to either model architecture, feature extraction, or data preprocessing techniques. In this way, we identify fundamental properties for well-performing audio deepfake detection.
|
| 30 |
+
- To investigate the applicability of related work in the real world, we introduce a new audio deepfake dataset<sup>1</sup>. We collect 17.2 hours of high-quality audio deepfakes and 20.7 hours of of authentic material from 58 politicians and celebrities.
|
| 31 |
+
- We show that established models generally perform poorly on such real-world data. This discrepancy between reported and actual generalization ability suggests that the detection of audio fakes is a far more difficult challenge than previously thought.
|
| 32 |
+
|
| 33 |
+
# 2. Related Work
|
| 34 |
+
|
| 35 |
+
# 2.1. Model Architectures
|
| 36 |
+
|
| 37 |
+
There is a significant body of work on audio spoof detection, driven largely by the ASVspoof challenges and datasets [5, 6]. In this section, we briefly present the architectures and models used in our evaluation in Section 5.
|
| 38 |
+
|
| 39 |
+
LSTM-based models. Recurrent architectures are a natural choice in the area of language processing, with numerous related work utilizing such models [8, 9, 10, 11]. As a baseline for evaluating this approach, we implement a simple LSTM model: it consists of three LSTM layers followed by a single linear layer. The output is averaged over the time dimension to obtain a single embedding vector.
|
| 40 |
+
|
| 41 |
+
LCNN. Another common architecture for audio spoof detection are LCNN-based learning models such as LCNN, LCNN-Attention, and LCNN-LSTM [12, 13, 14]. LCNNs combine convolutional layers with Max-Feature-Map activations to create 'light' convolutional neural networks. LCNN-Attention has an added single-head-attention pooling layer, while LCNN-LSTM uses a Bi-LSTM layer and a skip connection.
|
| 42 |
+
|
| 43 |
+
MesoNet. MesoNet is based on the Meso-4 [15] architecture, which was originally used for detecting facial video
|
| 44 |
+
|
| 45 |
+
deepfakes. It uses 4 convolutional layers in addition to Batch Normalization, Max Pooling, and a fully connected classifier.
|
| 46 |
+
|
| 47 |
+
MesoInception. Based on the facial deepfake detector Meso-Inception-4 [15], MesoInception extends the Meso-4 architecture with Inception blocks [16].
|
| 48 |
+
|
| 49 |
+
ResNet18. Residual Networks were first used for audio deepfake detection by [17], and continue to be employed [18, 19]. This architecture, first introduced in the computer vision domain [20], uses convolutional layers and shortcut connections, which avoids the vanishing gradient problem and allows to design especially deep networks (18 layers for ResNet18).
|
| 50 |
+
|
| 51 |
+
Transformer. The Transformer architecture has also found its way into the field of audio spoof detection [21]. We use four self-attention layers with 256 hidden dimensions and skip-connections, and encode time with positional encodings [22].
|
| 52 |
+
|
| 53 |
+
CRNNProof. This end-to-end architecture combines 1D convolutions with recurrent layers to learn features directly from raw audio samples [9].
|
| 54 |
+
|
| 55 |
+
RawNet2 [23] is another end-to-end model. It employs Sinc-Layers [24], which correspond to rectangular band-pass filters, to extract information directly from raw waveforms.
|
| 56 |
+
|
| 57 |
+
RawPC is an end-to-end model which also uses Sinc-layers to operate directly on raw wavforms. The architecture is found via differentiable architecture search [25].
|
| 58 |
+
|
| 59 |
+
RawGAT-ST, a spectro-temporal graph attention network (GAT), trained in an end-to-end fashion. It introduces spectral and temporal sub-graphs and a graph pooling strategy, and reports state-of-the-art spoof detection capabilities [26], which we can verify experimentally, c.f. Table 1.
|
| 60 |
+
|
| 61 |
+
# 3. Datasets
|
| 62 |
+
|
| 63 |
+
To train and evaluate our models, we use the ASVspoof 2019 dataset [5], in particular its Logical Access (LA) part. It consists of audio files that are either real (i.e., authentic recordings of human speech) or fake (i.e., synthesized or faked audio). The spoofed audio files are from 19 different TTS synthesis algorithms. From a spoofing detection point of view, ASVspoof considers synthetic utterances as a threat to the authenticity of the human voice, and therefore labels them as 'attacks'. In total, there are 19 different attackers in the ASVspoof 2019 dataset, labeled A1 - A19. For each attacker, there are 4914 synthetic audio recordings and 7355 real samples. This dataset is arguably the best known audio deefake dataset used by almost all related work.
|
| 64 |
+
|
| 65 |
+
In order to evaluate our models on realistic unseen data in-the-wild, we additionally create and publish a new audio deefake dataset, c.f. Figure 1. It consists of 37.9 hours of audio clips that are either fake (17.2 hours) or real (20.7 hours). We feature English-speaking celebrities and politicians, both from present and past<sup>2</sup>. The fake clips are created by segmenting 219 of publicly available video and audio files that explicitly advertise audio deepfakes. Since the speakers talk absurdly and out-of-character ('Donald Trump reads Star Wars'), it is easy to verify that the audio files are really spoofed. We then manually collect corresponding genuine instances from the same speakers using publicly available material such as podcasts, speeches, etc. We take care to include clips where the type of speaker, style, emotions, etc. are similar to the fake (e.g., for a fake speech by Barack Obama, we include an authentic speech and try to find similar values for background noise, emotions, duration, etc.). The clips have an average length of 4.3 seconds and
|
| 66 |
+
|
| 67 |
+

|
| 68 |
+
Biden, Joe
|
| 69 |
+
|
| 70 |
+

|
| 71 |
+
Clinton, Bill
|
| 72 |
+
|
| 73 |
+

|
| 74 |
+
|
| 75 |
+

|
| 76 |
+
Zuckerberg, Mark
|
| 77 |
+
Figure 1: Schematics of our collected dataset. For $n = 58$ celebrities and politicians, we collected both bona-fide and spoofed audio (represented by blue and red boxes per speaker). In total, we collected 20.8 hours of bona-fide and 17.2 hours of spoofed audio. On average, there are 23 minutes of bona-fide and 18 minutes of spoofed audio per speaker.
|
| 78 |
+
|
| 79 |
+
are converted to 'wav' after downloading. All recordings were downsampled to $16\mathrm{kHz}$ (the highest common frequency in the original recordings). Clips were collected from publicly available sources such as social networks and popular video sharing platforms. This dataset is intended as evaluation data: it allows evaluation of a model's cross-database capabilities on a realistic use case.
|
| 80 |
+
|
| 81 |
+
# 4. Experimental Setup
|
| 82 |
+
|
| 83 |
+
# 4.1. Training and Evaluation
|
| 84 |
+
|
| 85 |
+
# 4.1.1. Hyper Parameters
|
| 86 |
+
|
| 87 |
+
We train all of our models using a cross-entropy loss with a log-Softmax over the output logits. We choose the Adam [27] optimizer. We initialize the learning rate at 0.0001 and use a learning rate scheduler. We train for 100 epochs with early stopping using a patience of five epochs.
|
| 88 |
+
|
| 89 |
+
# 4.1.2. Train and Evaluation Data Splits
|
| 90 |
+
|
| 91 |
+
We train our models on the 'train' and 'dev' parts of the ASVspoof 2019 Logical Access (LA) dataset part [5]. This is consistent with most related work and also with the evaluation procedure of the ASVspoof 2019 Challenge. We test against two evaluation datasets. As in-domain evaluation data, we use the 'eval' split of ASVspoof 2019. This split contains unseen attacks, i.e., attacks not seen during training. However, the evaluation audios share certain properties with the training data [28], so model generalization cannot be assessed using the 'eval' split of ASVspoof 2019 alone. This motivates the use of our proposed 'in-the-wild' dataset, see Section 3, as unknown out-of-domain evaluation data.
|
| 92 |
+
|
| 93 |
+
# 4.1.3. Evaluation metrics
|
| 94 |
+
|
| 95 |
+
We report both the equal-error rate (EER) and the tandem detection cost function (t-DCF) [29] on the ASVspoof 2019 'eval' data. For consistency with the related work, we use the original implementation of the t-DCF as provided for the ASVspoof 2019 challenge [30]. For our proposed dataset, we report only the EER. This is because t-DCF scores require the false alarm and miss costs, which are available only for ASVspoof.
|
| 96 |
+
|
| 97 |
+
# 4.2. Feature Extraction
|
| 98 |
+
|
| 99 |
+
Several architectures used in this work require preprocessing the audio data with a feature extractor (LCNN, LCNN-Attention, LCNN-LSTM, LSTM, MesoNet, MesoInception, ResNet18, Transformer). We evalu
|
| 100 |
+
|
| 101 |
+
<table><tr><td rowspan="2">Model Name</td><td rowspan="2">Feature Type</td><td rowspan="2">Input Length</td><td colspan="2">ASVspoof19 eval</td><td rowspan="2">In-the-Wild Data EER%</td></tr><tr><td>EER%</td><td>t-DCF</td></tr><tr><td>LCNN</td><td>cqtspec</td><td>Full</td><td>6.354±0.39</td><td>0.174±0.03</td><td>65.559±11.14</td></tr><tr><td>LCNN</td><td>cqtspec</td><td>4s</td><td>25.534±0.10</td><td>0.512±0.00</td><td>70.015±4.74</td></tr><tr><td>LCNN</td><td>logspec</td><td>Full</td><td>7.537±0.42</td><td>0.141±0.02</td><td>72.515±2.15</td></tr><tr><td>LCNN</td><td>logspec</td><td>4s</td><td>22.271±2.36</td><td>0.377±0.01</td><td>91.110±2.17</td></tr><tr><td>LCNN</td><td>melspec</td><td>Full</td><td>15.093±2.73</td><td>0.428±0.05</td><td>70.311±2.15</td></tr><tr><td>LCNN</td><td>melspec</td><td>4s</td><td>30.258±3.38</td><td>0.503±0.04</td><td>81.942±3.50</td></tr><tr><td>LCNN-Attention</td><td>cqtspec</td><td>Full</td><td>6.762±0.27</td><td>0.178±0.01</td><td>66.684±1.08</td></tr><tr><td>LCNN-Attention</td><td>cqtspec</td><td>4s</td><td>23.228±3.98</td><td>0.468±0.06</td><td>75.317±8.25</td></tr><tr><td>LCNN-Attention</td><td>logspec</td><td>Full</td><td>7.888±0.57</td><td>0.180±0.05</td><td>77.122±4.91</td></tr><tr><td>LCNN-Attention</td><td>logspec</td><td>4s</td><td>14.958±2.37</td><td>0.354±0.03</td><td>80.651±6.14</td></tr><tr><td>LCNN-Attention</td><td>melspec</td><td>Full</td><td>13.487±5.59</td><td>0.374±0.14</td><td>70.986±9.73</td></tr><tr><td>LCNN-Attention</td><td>melspec</td><td>4s</td><td>19.534±2.57</td><td>0.449±0.02</td><td>85.118±1.01</td></tr><tr><td>LCNN-LSTM</td><td>cqtspec</td><td>Full</td><td>6.228±0.50</td><td>0.113±0.01</td><td>61.500±1.37</td></tr><tr><td>LCNN-LSTM</td><td>cqtspec</td><td>4s</td><td>20.857±0.14</td><td>0.478±0.01</td><td>72.251±2.97</td></tr><tr><td>LCNN-LSTM</td><td>logspec</td><td>Full</td><td>9.936±1.74</td><td>0.158±0.01</td><td>79.109±0.84</td></tr><tr><td>LCNN-LSTM</td><td>logspec</td><td>4s</td><td>13.018±3.08</td><td>0.330±0.05</td><td>79.706±15.80</td></tr><tr><td>LCNN-LSTM</td><td>melspec</td><td>Full</td><td>9.260±1.33</td><td>0.240±0.04</td><td>62.304±0.17</td></tr><tr><td>LCNN-LSTM</td><td>melspec</td><td>4s</td><td>27.948±4.64</td><td>0.483±0.03</td><td>82.857±3.49</td></tr><tr><td>LSTM</td><td>cqtspec</td><td>Full</td><td>7.162±0.27</td><td>0.127±0.00</td><td>53.711±11.68</td></tr><tr><td>LSTM</td><td>cqtspec</td><td>4s</td><td>14.409±2.19</td><td>0.382±0.05</td><td>55.880±0.88</td></tr><tr><td>LSTM</td><td>logspec</td><td>Full</td><td>10.314±0.81</td><td>0.160±0.00</td><td>73.111±2.52</td></tr><tr><td>LSTM</td><td>logspec</td><td>4s</td><td>23.232±0.32</td><td>0.512±0.00</td><td>78.071±0.49</td></tr><tr><td>LSTM</td><td>melspec</td><td>Full</td><td>16.216±2.92</td><td>0.358±0.00</td><td>65.957±7.70</td></tr><tr><td>LSTM</td><td>melspec</td><td>4s</td><td>37.463±0.46</td><td>0.553±0.01</td><td>64.297±2.23</td></tr><tr><td>MesoInception</td><td>cqtspec</td><td>Full</td><td>11.353±1.00</td><td>0.326±0.03</td><td>50.007±14.69</td></tr><tr><td>MesoInception</td><td>cqtspec</td><td>4s</td><td>21.973±4.96</td><td>0.453±0.09</td><td>68.192±12.47</td></tr><tr><td>MesoInception</td><td>logspec</td><td>Full</td><td>10.019±0.18</td><td>0.238±0.02</td><td>37.414±9.16</td></tr><tr><td>MesoInception</td><td>logspec</td><td>4s</td><td>16.377±3.72</td><td>0.375±0.09</td><td>72.753±6.62</td></tr><tr><td>MesoInception</td><td>melspec</td><td>Full</td><td>14.058±5.67</td><td>0.331±0.11</td><td>61.996±12.65</td></tr><tr><td>MesoInception</td><td>melspec</td><td>4s</td><td>21.484±3.51</td><td>0.408±0.03</td><td>51.980±15.32</td></tr><tr><td>MesoNet</td><td>cqtspec</td><td>Full</td><td>7.422±1.61</td><td>0.219±0.07</td><td>54.544±11.50</td></tr><tr><td>MesoNet</td><td>cqtspec</td><td>4s</td><td>20.395±2.03</td><td>0.426±0.06</td><td>65.928±2.57</td></tr><tr><td>MesoNet</td><td>logspec</td><td>Full</td><td>8.369±1.06</td><td>0.170±0.05</td><td>46.939±5.81</td></tr><tr><td>MesoNet</td><td>logspec</td><td>4s</td><td>11.124±0.79</td><td>0.263±0.03</td><td>80.707±12.03</td></tr><tr><td>MesoNet</td><td>melspec</td><td>Full</td><td>11.305±1.80</td><td>0.321±0.06</td><td>58.405±11.28</td></tr><tr><td>MesoNet</td><td>melspec</td><td>4s</td><td>21.761±0.26</td><td>0.467±0.00</td><td>64.415±15.68</td></tr><tr><td>ResNet18</td><td>cqtspec</td><td>Full</td><td>6.552±0.49</td><td>0.140±0.01</td><td>49.759±0.17</td></tr><tr><td>ResNet18</td><td>cqtspec</td><td>4s</td><td>18.378±1.76</td><td>0.432±0.07</td><td>61.827±7.46</td></tr><tr><td>ResNet18</td><td>logspec</td><td>Full</td><td>7.386±0.42</td><td>0.139±0.02</td><td>80.212±0.23</td></tr><tr><td>ResNet18</td><td>logspec</td><td>4s</td><td>15.521±1.83</td><td>0.387±0.02</td><td>88.729±2.88</td></tr><tr><td>ResNet18</td><td>melspec</td><td>Full</td><td>21.658±2.56</td><td>0.551±0.04</td><td>77.614±1.47</td></tr><tr><td>ResNet18</td><td>melspec</td><td>4s</td><td>28.178±0.33</td><td>0.489±0.01</td><td>83.006±7.17</td></tr><tr><td>Transformer</td><td>cqtspec</td><td>Full</td><td>7.498±0.34</td><td>0.129±0.01</td><td>43.775±2.85</td></tr><tr><td>Transformer</td><td>cqtspec</td><td>4s</td><td>11.256±0.07</td><td>0.329±0.00</td><td>48.208±1.49</td></tr><tr><td>Transformer</td><td>logspec</td><td>Full</td><td>9.949±1.77</td><td>0.210±0.06</td><td>64.789±0.88</td></tr><tr><td>Transformer</td><td>logspec</td><td>4s</td><td>13.935±1.70</td><td>0.320±0.03</td><td>44.406±2.17</td></tr><tr><td>Transformer</td><td>melspec</td><td>Full</td><td>20.813±6.44</td><td>0.394±0.10</td><td>73.307±2.81</td></tr><tr><td>Transformer</td><td>melspec</td><td>4s</td><td>26.495±1.76</td><td>0.495±0.00</td><td>68.407±5.53</td></tr><tr><td>CRNNSpoof</td><td>raw</td><td>Full</td><td>15.658±0.35</td><td>0.312±0.01</td><td>44.500±8.13</td></tr><tr><td>CRNNSpoof</td><td>raw</td><td>4s</td><td>19.640±1.62</td><td>0.360±0.04</td><td>41.710±4.86</td></tr><tr><td>RawNet2</td><td>raw</td><td>Full</td><td>3.154±0.87</td><td>0.078±0.02</td><td>37.819±2.23</td></tr><tr><td>RawNet2</td><td>raw</td><td>4s</td><td>4.351±0.29</td><td>0.132±0.01</td><td>33.943±2.59</td></tr><tr><td>RawPC</td><td>raw</td><td>Full</td><td>3.092±0.36</td><td>0.071±0.00</td><td>45.715±12.20</td></tr><tr><td>RawPC</td><td>raw</td><td>4s</td><td>3.067±0.91</td><td>0.097±0.03</td><td>52.884±6.08</td></tr><tr><td>RawGAT-ST</td><td>raw</td><td>Full</td><td>1.229±0.43</td><td>0.036±0.01</td><td>37.154±1.95</td></tr><tr><td>RawGAT-ST</td><td>raw</td><td>4s</td><td>2.297±0.98</td><td>0.074±0.03</td><td>38.767±1.28</td></tr></table>
|
| 102 |
+
|
| 103 |
+
Table 1: Full results of evaluation on the ASVspoof 2019 LA 'eval' data. We compare different model architectures against different feature types and audio input lengths (4s, fixed-sized inputs vs. variable-length inputs). Results are averaged over three independent trials with random initialization, and the standard deviation is reported. Best-performing configurations are highlighted in boldface. When evaluating the models on our proposed 'in-the-wild' dataset, we see an increase in EER by up to $1000\%$ compared to ASVspoof 2019 (rightmost column).
|
| 104 |
+
|
| 105 |
+
<table><tr><td rowspan="2">Input Length</td><td colspan="2">ASVspoof19 eval</td><td>In-the-Wild Data</td></tr><tr><td>EER %</td><td>t-DCF</td><td>EER %</td></tr><tr><td>Full</td><td>9.85</td><td>0.22</td><td>60.10</td></tr><tr><td>4s</td><td>18.89</td><td>0.39</td><td>67.25</td></tr></table>
|
| 106 |
+
|
| 107 |
+
Table 2: Model performance averaged by input preprocessing. Fixed-length, 4s inputs perform significantly worse on the ASVspoof data and on the 'in-the-wild' dataset than variable-length inputs. This suggests that related work using fixed-length inputs may (unnecessarily) sacrifice performance.
|
| 108 |
+
|
| 109 |
+
ate these architectures on constant-Q transform (cqtspec [31]), log spectrogram (logspec) and mel-scaled spectrogram (mel-spec [32]) features (all of them 513-dimensional). We use Python, librosa [33] and scipy [34]. The rest of the models does not rely on pre-processed data, but uses raw audio waveforms as inputs.
|
| 110 |
+
|
| 111 |
+
# 4.3. Audio Input Length
|
| 112 |
+
|
| 113 |
+
Audio samples usually vary in length, which is also the case for the data in ASVspoof 2019 and our proposed 'in-the-wild' dataset. While some models can accommodate variable-length input (and thus also fixed-length input), many can not. We extend these by introducing a global averaging layer, which adds such capability.
|
| 114 |
+
|
| 115 |
+
In our evaluation of fixed-length input, we chose a length of four seconds, following [23]. If an input sample is longer, a random four-second subset of the sample is used. If it is shorter, the sample is repeated. To keep the evaluation fair, these shorter samples are also repeated during the full-length evaluation. This ensures that full-length input is never shorter than truncated input, but always at least 4s.
|
| 116 |
+
|
| 117 |
+
# 5. Results
|
| 118 |
+
|
| 119 |
+
Table 1 shows the results of our experiments, where we evaluate all models against all configurations of data preprocessing: we train twelve different models, using one of four different feature types, with two different ways of handling variable-length audio. Each experiment is performed three times, using random initialization. We report averaged EER and t-DCF, as well as standard deviation. We observe that on ASVspoof, our implementations perform comparable to related work, with a margin of approximately $2 - 4\%$ EER and 0.1 t-DCF. This is likely because we do not fine-tune our models' hyper-parameters.
|
| 120 |
+
|
| 121 |
+
# 5.1. Fixed vs. Variable Input Length
|
| 122 |
+
|
| 123 |
+
We analyze the effects of truncating the input signal to a fixed length compared to using the full, unabridged audio. For all models, performance decreases when the input is trimmed to $4s$ . Table 2 averages all results based on input length. We see that average EER on ASVspoof drops from $19.89\%$ to $9.85\%$ when the full-length input is used. These results show that a four-second clip is insufficient for the model to extract useful information compared to using the full audio file as input. Therefore, we propose not to use fixed-length truncated inputs, but to provide the full audio file to the model. This may seem obvious, but the numerous works that use fixed-length inputs [23, 25, 26] suggest otherwise.
|
| 124 |
+
|
| 125 |
+
# 5.2. Effects of Feature Extraction Techniques
|
| 126 |
+
|
| 127 |
+
We discuss the effects of different feature preprocessing techniques, c.f. 1: The 'raw' models outperform the feature-based models, obtaining up to $1.2\%$ EER on ASVspoof and $33.9\%$ EER on the 'in-the-wild' dataset (RawGAT-ST and RawNet2). The spectrogram-based models perform slightly worse, achieving up to $6.3\%$ EER on ASVspoof and $37.4\%$ on the 'in-the-wild' dataset (LCNN and MesoNet). The superiority of the 'raw' models is assumed to be due to finer feature-extraction resolution than the spectrogram-based models [26]. This has lead recent research to focus largely on such raw-feature, end-to-end models [25, 26].
|
| 128 |
+
|
| 129 |
+
Concerning the spectogram-based models, we observe that melspec features are always outperformed by either cqtspec of logspec. Simply replacing melspec with cqtspec increases the average performance by $37\%$ , all other factors constant.
|
| 130 |
+
|
| 131 |
+
# 5.3. Evaluation on 'in-the-wild' data
|
| 132 |
+
|
| 133 |
+
Especially interesting is the performance of the models on real-world deepfake data. Table 1 shows the performance of our models on the 'in-the-wild' dataset. We see that there is a large performance gap between the ASVSproof 2019 evaluation data and our proposed 'in-the-wild' dataset. In general, the EER values of the models deteriorate by about 200 to 1000 percent. Often, the models do not perform better than random guessing.
|
| 134 |
+
|
| 135 |
+
To investigate this further, we train our best 'in-the-wild' model from Table 1, RawNet2 with 4s input length, on all from ASVspoof 2019, i.e., the 'train', 'dev', and 'eval' splits. We then re-evaluate on the 'in-the-wild' dataset to investigate whether adding more ASVspoof training data improves out-of-domain performance. We achieve $33.1 \pm 0.2\%$ EER, i.e., no improvement over training with only the 'train' and 'dev' data.
|
| 136 |
+
|
| 137 |
+
The inclusion of the 'eval' split does not seem to add much information that could be used for real-world generalization. This is plausible in that all splits of ASVspoof are fundamentally based on the same dataset, VCTK, although the synthesis algorithms and speakers differ between splits [5].
|
| 138 |
+
|
| 139 |
+
# 6. Conclusion
|
| 140 |
+
|
| 141 |
+
In this paper, we systematically evaluate audio spoof detection models from related work according to common standards. In addition, we present a new audio deefake dataset of 'in-the-wild' audio spools that we use to evaluate the generalization capabilities of related work in a real-world scenario.
|
| 142 |
+
|
| 143 |
+
We find that regardless of the model architecture, some preprocessing steps are more successful than others. It turns out that the use of cqtspec or logspec features consistently outperforms the use of melspec features in our comprehensive analysis. Furthermore, we find that for most models, four seconds of input audio does not saturate performance compared to longer examples. Therefore, we argue that one should consider using cqtspec features and unabridged input audio when designing audio deepfake detection architectures.
|
| 144 |
+
|
| 145 |
+
Most importantly, however, we find that the 'in-the-wild' generalization capabilities of many models may have been overestimated. We demonstrate this by collecting our own audio deepfake dataset and evaluating twelve different model architectures on it. Performance drops sharply, and some models degenerate to random guessing. It may be possible that the community has tailored its detection models too closely to the prevailing benchmark, ASVSpoof, and that deepfakes are much harder to detect outside the lab than previously thought.
|
| 146 |
+
|
| 147 |
+
# 7. References
|
| 148 |
+
|
| 149 |
+
[1] "Audio deep fake: Demonstrator企业发展 am braunhofer aisec - youtube," https://www.youtube.com/watch?v=MZTF0eAALmE, (Accessed on 04/01/2021).
|
| 150 |
+
[2] "Deepfake video of volodymyr zelensky surrendering surfaces on social media - youtube," https://www.youtube.com/watch?v=X17yrEV5sl4, (Accessed on 03/23/2022).
|
| 151 |
+
[3] A. v. d. Oord, S. Dieleman, H. Zen, K. Simonyan, O. Vinyals, A. Graves, N. Kalchbrenner, A. Senior, and K. Kavukcuoglu, "Wavenet: A generative model for raw audio," arXiv preprint arXiv:1609.03499, 2016.
|
| 152 |
+
[4] Y. Wang, R. J. Skerry-Ryan, D. Stanton, Y. Wu, R. J. Weiss, N. Jaitly, Z. Yang, Y. Xiao, Z. Chen, S. Bengio, Q. V. Le, Y. Agiomyrgiannakis, R. Clark, and R. A. Saurous, "Tacotron: A fully end-to-end text-to-speech synthesis model," CoRR, vol. abs/1703.10135, 2017. [Online]. Available: http://arxiv.org/abs/1703.10135
|
| 153 |
+
[5] M. Todisco, X. Wang, V. Vestman, M. Sahidullah, H. Delgado, A. Nautsch, J. Yamagishi, N. Evans, T. Kinnunen, and K. A. Lee, "Asvspoof 2019: Future horizons in spoofed and fake audio detection," arXiv preprint arXiv:1904.05441, 2019.
|
| 154 |
+
[6] A. Nautsch, X. Wang, N. Evans, T. H. Kinnunen, V. Vestman, M. Todisco, H. Delgado, M. Sahidullah, J. Yamagishi, and K. A. Lee, "ASVspoof 2019: Spoofing Countermeasures for the Detection of Synthesized, Converted and Replayed Speech," vol. 3, no. 2, pp. 252-265.
|
| 155 |
+
[7] J. Yamagishi, C. Veaux, and K. MacDonald, "CSTR VCTK Corpus: English multi-speaker corpus for CSTR voice cloning toolkit (version 0.92)," 2019.
|
| 156 |
+
[8] A. Gomez-Alanis, A. M. Peinado, J. A. Gonzalez, and A. M. Gomez, “A Gated Recurrent Convolutional Neural Network for Robust Spoofing Detection,” vol. 27, no. 12, pp. 1985–1999.
|
| 157 |
+
[9] A. Chintha, B. Thai, S. J. Sohrawardi, K. M. Bhatt, A. Hickerson, M. Wright, and R. Ptucha, "Recurrent Convolutional Structures for Audio Spoof and Video Deepfake Detection," pp. 1-1.
|
| 158 |
+
[10] L. Zhang, X. Wang, E. Cooper, J. Yamagishi, J. Patino, and N. Evans, "An initial investigation for detecting partially spoofed audio," arXiv preprint arXiv:2104.02518, 2021.
|
| 159 |
+
[11] S. Tambe, A. Pawar, and S. Yadav, “Deep fake videos identification using ann and lstm,” Journal of Discrete Mathematical Sciences and Cryptography, vol. 24, no. 8, pp. 2353–2364, 2021.
|
| 160 |
+
[12] X. Wang and J. Yamagishi. A Comparative Study on Recent Neural Spoofing Countermeasures for Synthetic Speech Detection. [Online]. Available: http://arxiv.org/abs/2103.11326
|
| 161 |
+
[13] G. Lavrentyeva, S. Novoselov, E. Malykh, A. Kozlov, O. Kudashev, and V. Shchemelinin, "Audio replay attack detection with deep learning frameworks," in Interspeech 2017. ISCA, pp. 82-86. [Online]. Available: http://www.isca-speech.org/archive/Interspeech_2017/abstracts/0360.html
|
| 162 |
+
[14] G. Lavrentyeva, S. Novoselov, A. Tseren, M. Volkova, A. Gorlanov, and A. Kozlov, "STC antispoofing systems for the ASVspoof2019 challenge," in Interspeech 2019. ISCA, pp. 1033-1037. [Online]. Available: http://www.isca-speech.org/archive/Interspeech_2019/abstracts/1768.html
|
| 163 |
+
[15] D. Afchar, V. Nozick, J. Yamagishi, and I. Echizen, "MesoNet: A Compact Facial Video Forgery Detection Network," in 2018 IEEE International Workshop on Information Forensics and Security (WIFS), pp. 1-7.
|
| 164 |
+
[16] C. Szegedy, W. Liu, Y. Jia, P. Sermanet, S. Reed, D. Anguelov, D. Erhan, V. Vanhoucke, and A. Rabinovich, "Going deeper with convolutions," in 2015 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2015, pp. 1-9.
|
| 165 |
+
[17] M. Alzantot, Z. Wang, and M. B. Srivastava, “Deep Residual Neural Networks for Audio Spoofing Detection,” in Interspeech 2019. ISCA, pp. 1078–1082. [Online]. Available: http://www.isca-speech.org/archive/Interspeech_2019/abstracts/3174.html
|
| 166 |
+
|
| 167 |
+
[18] Y. Zhang, F. Jiang, and Z. Duan, “One-class learning towards synthetic voice spoofing detection,” IEEE Signal Processing Letters, vol. 28, pp. 937–941, 2021.
|
| 168 |
+
[19] J. Monteiro, J. Alam, and T. H. Falk, "Generalized end-to-end detection of spoofing attacks to automatic speaker recognizers," Computer Speech & Language, vol. 63, p. 101096, 2020.
|
| 169 |
+
[20] K. He, X. Zhang, S. Ren, and J. Sun, "Deep residual learning for image recognition," in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 770-778.
|
| 170 |
+
[21] Z. Zhang, X. Yi, and X. Zhao, "Fake speech detection using residual network with transformer encoder," in Proceedings of the 2021 ACM Workshop on Information Hiding and Multimedia Security, 2021, pp. 13-22.
|
| 171 |
+
[22] A. Vaswani, N. Shazeer, N. Parmar, J. Uszkoreit, L. Jones, A. N. Gomez, L. Kaiser, and I. Polosukhin, "Attention is all you need," Advances in neural information processing systems, vol. 30, 2017.
|
| 172 |
+
[23] H. Tak, J. Patino, M. Todisco, A. Nautsch, N. Evans, and A. Larcher, “End-to-End anti-spoofing with RawNet2,” in ICASSP 2021 - 2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), pp. 6369-6373.
|
| 173 |
+
[24] M. Ravanelli and Y. Bengio, "Speaker recognition from raw waveform with sincnet," in 2018 IEEE Spoken Language Technology Workshop (SLT). IEEE, 2018, pp. 1021-1028.
|
| 174 |
+
[25] W. Ge, J. Patino, M. Todisco, and N. Evans, "Raw differentiable architecture search for speech deepfake and spoofing detection," arXiv preprint arXiv:2107.12212, 2021.
|
| 175 |
+
[26] H. Tak, J.-w. Jung, J. Patino, M. Kamble, M. Todisco, and N. Evans, "End-to-end spectro-temporal graph attention networks for speaker verification anti-spoofing and speech deepfake detection," arXiv preprint arXiv:2107.12710, 2021.
|
| 176 |
+
[27] D. P. Kingma and J. Ba, "Adam: A method for stochastic optimization," in 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings, Y. Bengio and Y. LeCun, Eds., 2015. [Online]. Available: http://arxiv.org/abs/1412.6980
|
| 177 |
+
[28] N. M. Müller, F. Dieckmann, P. Czempin, R. Canals, J. Williams, and K. Böttinger. Speech is Silver, Silence is Golden: What do ASVspoof-trained Models Really Learn? [Online]. Available: http://arxiv.org/abs/2106.12914
|
| 178 |
+
[29] T. Kinnunen, K. A. Lee, H. Delgado, N. Evans, M. Todisco, M. Sahidullah, J. Yamagishi, and D. A. Reynolds, "t-DCF: a detection cost function for the tandem assessment of spoofing countermeasures and automatic speaker verification," in Odyssey 2018 The Speaker and Language Recognition Workshop. ISCA, pp. 312-319.
|
| 179 |
+
[30] "tdcf official implementation," https://www.asvspoof.org/ asvsproof2019/tDCF.python_v1.zip, (Accessed on 03/03/2022).
|
| 180 |
+
[31] J. C. Brown, “Calculation of a constant q spectral transform,” The Journal of the Acoustical Society of America, vol. 89, no. 1, pp. 425–434, 1991.
|
| 181 |
+
[32] S. S. Stevens, J. Volkmann, and E. B. Newman, “A scale for the measurement of the psychological magnitude pitch,” The journal of the acoustical society of america, vol. 8, no. 3, pp. 185–190, 1937.
|
| 182 |
+
[33] B. McFee, C. Raffel, D. Liang, D. P. Ellis, M. McVicar, E. Battenberg, and O. Nieto, "librosa: Audio and music signal analysis in python," in Proceedings of the 14th python in science conference, vol. 8. Citeseer, 2015, pp. 18-25.
|
| 183 |
+
[34] P. Virtanen, R. Gommers, T. E. Oliphant, M. Haberland, T. Reddy, D. Cournapeau, E. Burovski, P. Peterson, W. Weckesser, J. Bright et al., "Scipy 1.0: fundamental algorithms for scientific computing in python," Nature methods, vol. 17, no. 3, pp. 261-272, 2020.
|
2203.16xxx/2203.16263/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:32d099a431e6b321fc507888c4f0803f8e047ee8a0eebef682ba65f7294ba3c2
|
| 3 |
+
size 329224
|
2203.16xxx/2203.16263/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16265/92320dc4-7086-4f19-bc8e-609012aa7b1a_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16265/92320dc4-7086-4f19-bc8e-609012aa7b1a_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16265/92320dc4-7086-4f19-bc8e-609012aa7b1a_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:50a62a73664214946e28155f5a2fe599a7370b3f246f824f75307e50294dfe0f
|
| 3 |
+
size 11804609
|
2203.16xxx/2203.16265/full.md
ADDED
|
@@ -0,0 +1,405 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SeqTR: A Simple yet Universal Network for Visual Grounding
|
| 2 |
+
|
| 3 |
+
Chaoyang Zhu $^{1}$ , Yiyi Zhou $^{1}$ , Yunhang Shen $^{3}$ , Gen Luo $^{1}$ , Xingjia Pan $^{3}$ , Mingbao Lin $^{3}$ , Chao Chen $^{3}$ , Liujuan Cao $^{1*}$ , Xiaoshuai Sun $^{1,4}$ , Rongrong Ji $^{1,2,4}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ MAC Lab, Department of Artificial Intelligence, School of Informatics, Xiamen University. $^{2}$ Institute of Energy Research, Jiangxi Academy of Sciences. $^{3}$ Tencent Youtu Lab. $^{4}$ Institute of Artificial Intelligence, Xiamen University. cyzhu@stu.xmu.edu.cn, zhouyiyi@xmu.edu.cn, shenyunhang01@gmail.com, luogen@stu.xmu.edu.cn, xjia.pan@gmail.com, linmb001@outlook.com, aaronccchen@tencent.com, {caoliujuan,xssun,rrji}@xmu.edu.cn
|
| 6 |
+
|
| 7 |
+
Abstract. In this paper, we propose a simple yet universal network termed SeqTR for visual grounding tasks, e.g., phrase localization, referring expression comprehension (REC) and segmentation (RES). The canonical paradigms for visual grounding often require substantial expertise in designing network architectures and loss functions, making them hard to generalize across tasks. To simplify and unify the modeling, we cast visual grounding as a point prediction problem conditioned on image and text inputs, where either the bounding box or binary mask is represented as a sequence of discrete coordinate tokens. Under this paradigm, visual grounding tasks are unified in our SeqTR network without task-specific branches or heads, e.g., the convolutional mask decoder for RES, which greatly reduces the complexity of multi-task modeling. In addition, SeqTR also shares the same optimization objective for all tasks with a simple cross-entropy loss, further reducing the complexity of deploying hand-crafted loss functions. Experiments on five benchmark datasets demonstrate that the proposed SeqTR outperforms (or is on par with) the existing state-of-the-arts, proving that a simple yet universal approach for visual grounding is indeed feasible. Source code is available at https://github.com/sean-zhuh/SeqTR.
|
| 8 |
+
|
| 9 |
+
Keywords: Visual Grounding, Transformer
|
| 10 |
+
|
| 11 |
+
# 1 Introduction
|
| 12 |
+
|
| 13 |
+
Visual grounding [57,36,38,23,54] has emerged as a core problem in vision-language research, as both comprehensive intra-modality understanding and accurate one-to-one inter-modality correspondence establishment are required. According to the manner of grounding, it can be divided into two groups, i.e., phrase localization or referring expression comprehension (REC) at bounding
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
Fig. 1. Illustration of the serialization of grounding information. Our model directly generates the sequence of points representing the bounding box or binary mask.
|
| 17 |
+
|
| 18 |
+
box level [56,31,29,60,52,27,34,51,46,18,6,61,26], and referring expression segmentation (RES) at pixel level [56,53,2,17,20,30,34,10,50,33,21,8,26].
|
| 19 |
+
|
| 20 |
+
To accomplish the accurate vision-language alignment, existing approaches often require substantial prior knowledge and expertise in designing network architectures and loss functions. For instance, MAttNet [56] decomposes language expressions into subject, location, and relationship phrases, and designs three corresponding attention modules to compute matching score individually. Despite being faster, one-stage models also require the complex language-guided multimodal fusion and reasoning modules [35,18,27,51,34], or sophisticated cross-modal alignment via various attention mechanisms [10,33,53,17,30,34,8]. Loss functions in existing methods are also complex and tailored to each individual grounding task, such as GIoU loss [43], set-based matching loss [1], focal loss [28], dice loss [37], and contrastive alignment loss [22]. Under a multi-task setting, coefficients among different losses also need to be carefully tuned to accommodate different tasks [34,26]. Despite great progress, these highly customized approaches still suffer from the limited generalization ability.
|
| 21 |
+
|
| 22 |
+
Recent endeavors [6,8,22,26] in visual grounding shift to simplifying network architectures via Transformers [47]. Concretely, the multi-modal fusion and reasoning modules are replaced by a simple stack of transformer encoder layers [6,22,8]. However, the loss function used in these transformer-based methods is still highly customized for each individual task [28,37,22,43,1]. Moreover, these approaches still require task-specific branches or heads [34,26], i.e., the bounding box regressor and convolutional mask decoder.
|
| 23 |
+
|
| 24 |
+
In this paper, we take a step forward in simplifying the modeling of visual grounding tasks via a simple yet universal network termed SeqTR. Specifically, inspired by the recently proposed Pix2Seq [3], we first reformulate visual grounding as a point prediction problem conditioned on image and text inputs, where the grounding information, e.g., the bounding box, is serialized into a sequence of discrete coordinate tokens. Under this paradigm, different grounding tasks can be universally accomplished in the proposed SeqTR with a standard transformer encoder-decoder architecture [47]. In SeqTR, the encoder serves to update the multi-modal feature representations, while the decoder directly predicts the discrete coordinate tokens of the grounding information in an auto-regressive manner. In terms of optimization, SeqTR only uses a simple cross-entropy loss for all grounding tasks, requiring no further prior knowledge or expertise. Over
|
| 25 |
+
|
| 26 |
+
all, the proposed SeqTR greatly reduces the difficulty and complexity of both architecture design and optimization for visual grounding.
|
| 27 |
+
|
| 28 |
+
Notably, the proposed SeqTR is not just a simple multi-modal extension of Pix2Seq for the challenging open-ended visual grounding tasks. In addition to bridging the gap between object detection and visual grounding, we also apply the sequential modeling to RES via an innovative mask contour sampling scheme. As shown in Fig. 1, SeqTR transforms the pixel-wise binary mask into a sequence of $N$ points by performing clockwise sampling on the mask contour. In this case, RES, as a language-guided segmentation task, can be seamlessly integrated into the proposed SeqTR network without the additional convolutional mask decoder, demonstrating the high generalization ability of SeqTR across grounding tasks.
|
| 29 |
+
|
| 30 |
+
The proposed SeqTR achieves or is on par with the state-of-the-art performance on five benchmark datasets, i.e., RefCOCO [57], RefCOCO+ [57], RefCOCOg [36,38], ReferItGame [23], and Flickr30K Entities [39]. SeqTR also outperforms a set of large-scale BERT-style models [32,45,4,22] with much less pre-training expenditure. Main contributions are summarized as follows:
|
| 31 |
+
|
| 32 |
+
- We reformulate visual grounding tasks as a point prediction problem, and present a novel and general network, termed SeqTR, which unifies different grounding tasks in one model with the same cross-entropy loss.
|
| 33 |
+
|
| 34 |
+
- The proposed SeqTR is simple yet universal, and can be seamlessly extended to the referring expression segmentation task via an innovative mask contour sampling scheme without network architecture modifications.
|
| 35 |
+
|
| 36 |
+
- We achieve or maintain on par with the state-of-the-art performance on five visual grounding benchmark datasets, and also outperform a set of large-scale pre-trained models with much less expenditure.
|
| 37 |
+
|
| 38 |
+
# 2 Related Work
|
| 39 |
+
|
| 40 |
+
# 2.1 Referring Expression Comprehension
|
| 41 |
+
|
| 42 |
+
Early practitioners [16,59,62,56,31,49,15,29] tackle referring expression comprehension (REC) following a two-stage pipeline, where region proposals [42] are first extracted then ranked according to their similarity scores with the language query. Another line of work [60,52,27,34,51,46,18,6,61], being simpler and faster, advocates one-stage pipeline based on dense anchors [42]. RealGIN [60] proposes adaptive feature selection and global attentive reasoning unit to handle the diversity and complexity of language expressions. ReSC [51] recursively constructs sub-queries to predict the parameters of the normalization layers in the visual encoder, which is used to scale and shift visual features. LBYL [18] designs landmark feature convolution to encode the contextual information. Recent works [6,61,22,8,26] resort to Transformer-like structure [47] to perform multimodal fusion. MDETR [22] further demonstrates that Transformer is efficient when pre-trained on a large corpus of data. Compared with existing approaches, our work is simple in both the architecture and loss function, which has little requirement of task priors and expert engineering.
|
| 43 |
+
|
| 44 |
+
# 2.2 Referring Expression Segmentation
|
| 45 |
+
|
| 46 |
+
Compared to REC, referring expression segmentation (RES) grounds language query at a fine-granularity i.e., the precise pixel-wise binary mask. Typical solutions are to design various attention mechanisms to perform cross-modal alignment [33,34,8,53,10,2,17,19,30,20]. EFN [10] transforms the visual encoder into a multi-modal feature extractor with asymmetric co-attention, which fuses multimodal information at the feature learning stage. CGAN [33] performs cascaded attention reasoning with instance-level attention loss to supervise attention modeling at each stage. LTS [21] first performs relevance filtering to locate the referent, and uses this visual object prior to perform dilated convolution for the final segmentation mask. VLT [8] produces a set of queries representing different understandings of the language expression and proposes a query balance module to focus on the most reasonable and suitable query, which is then used to decode the mask via a mask decoder. In this work, we are the first to regard RES as a point prediction problem, thus the proposed SeqTR can be seamlessly extended to RES without any network architecture modifications.
|
| 47 |
+
|
| 48 |
+
# 2.3 Multi-task Visual Grounding
|
| 49 |
+
|
| 50 |
+
Multi-task visual grounding aims to jointly address REC and RES. Prior art MCN [34] constrains the REC and RES branches to attend to the same region by applying consistent energy maximization. In this way, REC can help RES better localize the referent, and RES can help REC achieve superior cross-modal alignment. RefTR [26] tackles multi-task visual grounding by sharing the same transformer architecture, but it requires an additional convolutional mask decoder for RES. In contrast, the proposed SeqTR is universal across different grounding tasks without additional branch or head. Under the point prediction paradigm, SeqTR can segment the referent without the aid from REC branch.
|
| 51 |
+
|
| 52 |
+
# 3 Method
|
| 53 |
+
|
| 54 |
+
In this section, we introduce our simple yet universal SeqTR network for visual grounding, of which structure is depicted in Fig. 2. The objective function is detailed in Sec. 3.1. Sequence construction from grounding information is elaborated in Sec. 3.2. The architecture and inference are presented in Sec. 3.3.
|
| 55 |
+
|
| 56 |
+
# 3.1 Problem Definition
|
| 57 |
+
|
| 58 |
+
Unlike existing visual grounding models [34,6,10,21,8], SeqTR aims to predict the discrete coordinate tokens of the grounding information, e.g., the bounding box or binary mask. To this end, we define the optimization objective under the point prediction paradigm as:
|
| 59 |
+
|
| 60 |
+
$$
|
| 61 |
+
\mathcal {L} = - \sum_ {i = 1} ^ {2 N} w _ {i} \log P \left(T _ {i} \mid F _ {m}, S _ {1: i - 1}\right), \tag {1}
|
| 62 |
+
$$
|
| 63 |
+
|
| 64 |
+

|
| 65 |
+
Fig. 2. Overview of the proposed SeqTR network, of which all components, i.e., multimodal fusion, cross-modal interaction, and loss function, are standard operations and shared across grounding tasks.
|
| 66 |
+
|
| 67 |
+
where $S$ and $T$ are the input and target sequences for decoder as shown in Fig. 2. $F_{m} \in R^{(H*W)\times C}$ is the multi-modal features detailed in Sec. 3.3. A per-token weight $w_{i}$ is used to scale the loss. Note that the input sequence $S_{1:i-1}$ only contains the preceding coordinate tokens when predicting the $i$ -th one. It can be implemented by putting a causal mask [40] on attention weights to only attend to previous coordinate tokens.
|
| 68 |
+
|
| 69 |
+
We construct the input sequence by prepending a [TASK] token before the sequence of points $\{x_i, y_i\}_{i=1}^N$ , and the target sequence is the one appended with an [EOS] token. These two special tokens indicate the start or end of the sequence, which are learnable embeddings. [TASK] token also indicates which grounding task the model performs on. To achieve multi-task visual grounding, we can equip each task with the corresponding [TASK] token randomly initialized with different parameters, showing great simplicity and generalization ability.
|
| 70 |
+
|
| 71 |
+
Under our point prediction reformulation, the simple cross-entropy loss conditioned on multi-modal features and preceding discrete coordinate tokens can be directly shared across tasks, avoiding the complex deployment of hand-crafted loss functions and loss coefficient tuning [37,28,1,22,43].
|
| 72 |
+
|
| 73 |
+
# 3.2 Sequence Construction from Grounding Information
|
| 74 |
+
|
| 75 |
+
A key design in SeqTR is to serialize and quantize the grounding information, e.g., the bounding box or binary mask, into a sequence of discrete coordinate tokens, which enables different grounding tasks to be universally addressed in one network architecture with the same objective.
|
| 76 |
+
|
| 77 |
+
We first review the serialization and quantization of the bounding box introduced in Pix2Seq [3]. Given a sequence of floating points $\{\tilde{x}_i,\tilde{y}_i\}_{i = 1}^N$ representing the top-left and bottom-right corner points of the bounding box ( $N$ is 2), these floating coordinates are quantized into integer bins by
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
x _ {i} = \operatorname {r o u n d} \left(\frac {\tilde {x} _ {i}}{w} * M\right), \quad y _ {i} = \operatorname {r o u n d} \left(\frac {\tilde {y} _ {i}}{h} * M\right), \tag {2}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
where each coordinate is normalized by image width $w$ and height $h$ , and $M$ is the number of quantization bins. We refer readers to Pix2Seq [3] for more
|
| 84 |
+
|
| 85 |
+

|
| 86 |
+
(a)
|
| 87 |
+
|
| 88 |
+

|
| 89 |
+
(b)
|
| 90 |
+
|
| 91 |
+

|
| 92 |
+
(c)
|
| 93 |
+
Fig. 3. Visualization of different sampling strategies. (a-b) are the original image and ground-truth. (c-d) are the sampled points and reassembled mask of center-based sampling, respectively, while (e-f) are the ones of uniform sampling.
|
| 94 |
+
|
| 95 |
+

|
| 96 |
+
(d)
|
| 97 |
+
|
| 98 |
+

|
| 99 |
+
(e)
|
| 100 |
+
|
| 101 |
+

|
| 102 |
+
(f)
|
| 103 |
+
|
| 104 |
+
discretization details. In practice, we construct a shared embedding vocabulary $E \in R^{M \times C}$ for both $x$ -axis and $y$ -axis.
|
| 105 |
+
|
| 106 |
+
While bounding boxes can be naturally determined by two of its corner points and serialized into a sequence as in Eq. 2, binary masks can not. A binary mask consists of infinite points, of which both quantities and positions impact the details of the mask significantly, thus the above serialization and quantization for bounding boxes is not directly applicable to binary masks.
|
| 107 |
+
|
| 108 |
+
To address this issue, we propose an innovative mask contour sampling scheme for the sequence construction from binary masks. As shown in Fig. 3, we sample $N$ points clockwise from the consecutive mask contour of the referred object, then, the sequence of sampled points can be quantized via Eq. 2. Following sampling strategies are experimented:
|
| 109 |
+
|
| 110 |
+
- Center-based sampling. Starting from the mass center of the binary mask, $N$ rays are emitted with the same angle interval. The intersection points between these rays and the mask contour are clockwise sampled.
|
| 111 |
+
- Uniform sampling. We uniformly sample $N$ points clockwise on top of the mask contour, which is much simpler compared to the first strategy.
|
| 112 |
+
|
| 113 |
+
Compared to the center-based sampling, uniform sampling distributes the sampled points along the mask contour more evenly, and can better represent the irregular mask especially when the outline between two adjacent sampled points is tortuous. As shown in Fig. 3, center-based sampling loses the fine details of the zebra legs, while uniform sampling preserves the mask contour more precisely.
|
| 114 |
+
|
| 115 |
+
In practice, the proposed sampling scheme slightly restricts the performance upper-bound of RES, e.g., uniformly sampling 36 points from ground-truth masks will achieve $95.63\mathrm{mIoU}$ on RefCOCO validation set. Considering current state-of-the-art performance, such a defect is still acceptable. Besides, even if we take as ground-truth the precise binary mask, the upper-bound still will not reach $100\mathrm{mIoU}$ since down-sampling operations are often necessary.
|
| 116 |
+
|
| 117 |
+
Both center-based and uniform sampling use deterministic (clockwise) ordering in the sequence of points for the binary mask, however, a binary mask is only determined by points' positions instead of the ordering. Hence we randomly shuffle points' order, which enables the model to learn which point to predict next. In Sec. 4.5, we thoroughly study the proposed sampling scheme.
|
| 118 |
+
|
| 119 |
+
# 3.3 Architecture
|
| 120 |
+
|
| 121 |
+
Language Encoder. To demonstrate the efficacy of SeqTR, we do not opt for the pre-trained language encoders such as BERT [7], hereby the language encoder is a one layer bidirectional GRU [5]. We concatenate both unidirectional hidden states $h_t = [\overrightarrow{h_t}; \overleftarrow{h_t}]$ at each step $t$ to form word features $\{h_t\}_{t=1}^T$ .
|
| 122 |
+
|
| 123 |
+
Visual Encoder. The multi-scale features of the visual encoder are unidirectionally down-sampled from the finest to coarsest spatial resolution, and flattened to generate visual features $F_{v} \in R^{(H*W) \times C}$ as input to the fusion module. $H$ and $W$ are 32 times smaller of the original image size. In contrast to previous work, we only use the coarsest scale visual features instead of the finest ones for RES task [34,21,10,33], as we do not predict the binary mask pixel-wisely, which reduces the memory footprint during training.
|
| 124 |
+
|
| 125 |
+
Fusion. Different from Pix2Seq [3], which only perceives the pixel inputs, we devise a simple yet efficient fusion module to align vision and language modalities. Given visual features $F_{v}$ and word features $\{h_t\}_{t = 1}^T$ , we first construct language feature $f_{l}\in R^{C}$ by max pooling word features along the channel dimension. We use Hadamard product between $F_{v}$ and $f_{l}$ without the linear projection to produce the multi-modal features $F_{m}\in R^{(H*W)\times C}$ to transformer encoder:
|
| 126 |
+
|
| 127 |
+
$$
|
| 128 |
+
F _ {m, i} = \sigma \left(F _ {v, i}\right) \odot \sigma \left(f _ {l}\right), \tag {3}
|
| 129 |
+
$$
|
| 130 |
+
|
| 131 |
+
where $\sigma$ is tanh function. Note that we do not concatenate word features and visual features then use the transformer encoder to perform fusion as in [6,22,8], because that the complexity will quadratically increase.
|
| 132 |
+
|
| 133 |
+
Transformer and Predictor. The standard transformer encoder updates the feature representations of multi-modal features $F_{m}$ , while the decoder predicts the target sequence in an auto-regressive manner. The hidden dimension of transformer is set to 256, the expansion rate in feed forward network (FFN) is 4, and the number of encoder and decoder layers are 6 and 3, respectively. This results in the transformer being extremely compact. Since the transformer is permutation-invariant, the $F_{m}$ and the input sequence are added with sine and learned positional encoding [47], respectively. To predict the coordinate tokens, an MLP with a final softmax function is used.
|
| 134 |
+
|
| 135 |
+
Inference. During inference, coordinates are generated in an auto-regressive manner, each coordinate is the argmax-ed index of the probabilities over the vocabulary $E$ , and mapped back to the original image scale via the inversion of Eq. 2. We predict exactly 4 discrete coordinate tokens for REC, while leaving the decision of when to prediction to [EOS] token for RES. The predicted sequence is assembled to form the bounding box or binary mask for evaluation.
|
| 136 |
+
|
| 137 |
+
# 4 Experiments
|
| 138 |
+
|
| 139 |
+
# 4.1 Datasets
|
| 140 |
+
|
| 141 |
+
RefCOCO/RefCOCO+/RefCOCOg. RefCOCO [57] contains 142,210 referring expressions, 50,000 referred objects, and 19,994 images. Referring expressions in testA set mostly describe people, while the ones in testB set mainly
|
| 142 |
+
|
| 143 |
+
describe objects except people. Similarly, RefCOCO+ [57] contains 141,564 expressions, 49,856 referred objects, and 19,992 images. Compared to RefCOCO, referring expressions of RefCOCO+ describe more about attributes of the referent, e.g., color, shape, digits, and avoid using words of absolute spatial location. RefCOCOg [36,38] has two types of partition strategy, i.e., the google split [36] and umd split [38]. Both splits have 95,010 referring expressions, 49,822 referred objects, and 25,799 images. We use the validation set as the test set following [10,50,18,51] for umd split. The language length of RefCOCOg is 8.4 words on average while that of RefCOCO and RefCOCO+ are only 3.6 and 3.5 words.
|
| 144 |
+
|
| 145 |
+
ReferItGame [23] contains 120,072 referring expressions and 99,220 referents for 19,997 images collected from the SAIAPR-12 [9] dataset. We use the cleaned berkeley split to partition the dataset, which consists of 54,127, 5,842, and 60,103 referring expressions in train, validation, and test set, respectively.
|
| 146 |
+
|
| 147 |
+
Flickr30K. Language queries in Flickr30K Entities [39] are short region phrases instead of sentences which may contain multiple objects. It contains 31,783 images with 427K referred entities in train, validation, and test set.
|
| 148 |
+
|
| 149 |
+
Pre-training dataset. Following [22], we merge region descriptions from Visual Genome (VG) [25] dataset, annotations from RefCOCO [57], RefCOCO+ [57], RefCOCOg [36,38], and ReferItGame [23] datasets, and Flickr entities [39]. This results in approximately 6.1M distinct language expressions and 174k images in train set, which are less than 200k images as in [22].
|
| 150 |
+
|
| 151 |
+
# 4.2 Evaluation Metrics
|
| 152 |
+
|
| 153 |
+
For REC and phrase localization, we evaluate the performance using Precision@0.5. The prediction is deemed correct if its intersection over union (IoU) with ground-truth box is larger than 0.5. For RES, we use $mIoU$ as the evaluation metric. Precision at 0.5, 0.7, and 0.9 thresholds are also used for ablation.
|
| 154 |
+
|
| 155 |
+
# 4.3 Implementation Details
|
| 156 |
+
|
| 157 |
+
We train SeqTR 60 epochs for REC and phrase localization, and 90 epochs for RES with batch size 128. The Adam [24] optimizer with an initial learning rate 5e-4 is used, which decays the learning rate 10 times after 50 epochs and 75 epochs for the detection and segmentation grounding tasks, respectively. Following standard practices [6,34,21,8], image size is resized to $640 \times 640$ , and the length of language expression is trimmed at 15 for RefCOCO/+ and 20 for RefCOCOg. For ablation, we train SeqTR 30 epochs unless otherwise stated. During pre-training, SeqTR is trained 15 epochs and fine-tuned another 5 epochs. The number of quantization bins is set to 1000. We use DarkNet-53 [41] as the visual encoder. More details are provided in the appendix.
|
| 158 |
+
|
| 159 |
+
# 4.4 Comparisons with State-of-the-Arts
|
| 160 |
+
|
| 161 |
+
In this section, we compare the proposed SeqTR with the state-of-the-art methods on five benchmark datasets, i.e., RefCOCO, RefCOCO+, RefCOCOg, ReferItGame, and Flickr30K Entities. Tab. 1 and Tab. 3 show the performance on
|
| 162 |
+
|
| 163 |
+
Table 1. Comparison with the state-of-the-arts on the REC task. Visual encoders of models with $\dagger$ is trained without excluding val/test images of the three datasets. RN101 refers to ResNet101 [13] and DN53 denotes DarkNet53 [41].
|
| 164 |
+
|
| 165 |
+
<table><tr><td rowspan="2">Models</td><td rowspan="2">Visual Encoder</td><td colspan="3">RefCOCO</td><td colspan="3">RefCOCO+</td><td colspan="3">RefCOCOg</td><td rowspan="2">Time (ms)</td></tr><tr><td>val</td><td>testA</td><td>testB</td><td>val</td><td>testA</td><td>testB</td><td>val-g</td><td>val-u</td><td>test-u</td></tr><tr><td colspan="12">Two-stage</td></tr><tr><td>CMN [16]</td><td>VGG16</td><td>-</td><td>71.03</td><td>65.77</td><td>-</td><td>54.32</td><td>47.76</td><td>57.47</td><td>-</td><td>-</td><td>-</td></tr><tr><td>VC [59]</td><td>VGG16</td><td>-</td><td>73.33</td><td>67.44</td><td>-</td><td>58.40</td><td>53.18</td><td>62.30</td><td>-</td><td>-</td><td>-</td></tr><tr><td>ParalAttn [62]</td><td>VGG16</td><td>-</td><td>75.31</td><td>65.52</td><td>-</td><td>61.34</td><td>50.86</td><td>58.03</td><td>-</td><td>-</td><td>-</td></tr><tr><td>MAttNet [56]</td><td>RN101</td><td>76.40</td><td>80.43</td><td>69.28</td><td>64.93</td><td>70.26</td><td>56.00</td><td>-</td><td>66.58</td><td>67.27</td><td>320</td></tr><tr><td>CM-Att-Erase [31]</td><td>RN101</td><td>78.35</td><td>83.14</td><td>71.32</td><td>68.09</td><td>73.65</td><td>58.03</td><td>-</td><td>67.99</td><td>68.67</td><td>-</td></tr><tr><td>DGA [49]</td><td>VGG16</td><td>-</td><td>78.42</td><td>65.53</td><td>-</td><td>69.07</td><td>51.99</td><td>-</td><td>-</td><td>63.28</td><td>341</td></tr><tr><td>RvG-Tree [15]</td><td>RN101</td><td>75.06</td><td>78.61</td><td>69.85</td><td>63.51</td><td>67.45</td><td>56.66</td><td>-</td><td>66.95</td><td>66.51</td><td>-</td></tr><tr><td>NMTree [29]</td><td>RN101</td><td>76.41</td><td>81.21</td><td>70.09</td><td>66.46</td><td>72.02</td><td>57.52</td><td>64.62</td><td>65.87</td><td>66.44</td><td>-</td></tr><tr><td colspan="12">One-stage</td></tr><tr><td>RealGIN [60]</td><td>DN53</td><td>77.25</td><td>78.70</td><td>72.10</td><td>62.78</td><td>67.17</td><td>54.21</td><td>-</td><td>62.75</td><td>62.33</td><td>35</td></tr><tr><td>FAOA† [52]</td><td>DN53</td><td>71.15</td><td>74.88</td><td>66.32</td><td>56.86</td><td>61.89</td><td>49.46</td><td>-</td><td>59.44</td><td>58.90</td><td>39</td></tr><tr><td>RCCF [27]</td><td>DLA34</td><td>-</td><td>81.06</td><td>71.85</td><td>-</td><td>70.35</td><td>56.32</td><td>-</td><td>-</td><td>65.73</td><td>25</td></tr><tr><td>MCN [34]</td><td>DN53</td><td>80.08</td><td>82.29</td><td>74.98</td><td>67.16</td><td>72.86</td><td>57.31</td><td>-</td><td>66.46</td><td>66.01</td><td>56</td></tr><tr><td>ReSCL[51]</td><td>DN53</td><td>77.63</td><td>80.45</td><td>72.30</td><td>63.59</td><td>68.36</td><td>56.81</td><td>63.12</td><td>67.30</td><td>67.20</td><td>36</td></tr><tr><td>Iter-Shrinking [46]</td><td>RN101</td><td>-</td><td>74.27</td><td>68.10</td><td>-</td><td>71.05</td><td>58.25</td><td>-</td><td>-</td><td>70.05</td><td>-</td></tr><tr><td>LBYL† [18]</td><td>DN53</td><td>79.67</td><td>82.91</td><td>74.15</td><td>68.64</td><td>73.38</td><td>59.49</td><td>62.70</td><td>-</td><td>-</td><td>30</td></tr><tr><td>TransVG [6]</td><td>RN101</td><td>81.02</td><td>82.72</td><td>78.35</td><td>64.82</td><td>70.70</td><td>56.94</td><td>67.02</td><td>68.67</td><td>67.73</td><td>62</td></tr><tr><td>TRAR† [61]</td><td>DN53</td><td>-</td><td>81.40</td><td>78.60</td><td>-</td><td>69.10</td><td>56.10</td><td>-</td><td>68.90</td><td>68.30</td><td>-</td></tr><tr><td>SeqTR (ours)</td><td>DN53</td><td>81.23</td><td>85.00</td><td>76.08</td><td>68.82</td><td>75.37</td><td>58.78</td><td>-</td><td>71.35</td><td>71.58</td><td>50</td></tr><tr><td>SeqTR† (ours)</td><td>DN53</td><td>83.72</td><td>86.51</td><td>81.24</td><td>71.45</td><td>76.26</td><td>64.88</td><td>71.50</td><td>74.86</td><td>74.21</td><td>50</td></tr></table>
|
| 166 |
+
|
| 167 |
+
REC and RES tasks. Tab. 4 reports the result of SeqTR pre-trained on the large corpus of data. The performance on ReferItGame and Flickr30K Entities datasets are given in Tab. 2.
|
| 168 |
+
|
| 169 |
+
The performance of SeqTR on REC and phrase localization tasks is illustrated in Tab. 1 and Tab. 2. From Tab. 1, our model performs better than two-stage models, especially MAttNet [56] while being 6 times faster. We also surpass one-stage models that exploit prior and expert knowledge, with $+2 - 7\%$ absolute improvement over LBYL [18] and ReSC [51]. Despite we predict discrete coordinate tokens in an auto-regressive manner, the inference speed of SeqTR is only $50\mathrm{ms}$ , which is real-time and comparable with one-stage models. For transformer-based models, SeqTR surpasses TransVG [6] and TRAR [61] with up to $6.27\%$ absolute performance improvement. Our SeqTR achieves new state-of-the-art performance with a simple architecture and loss function on the RefCOCO [57], RefCOCO + [57], and RefCOCOg [36,38] datasets. On the ReferItGame and Flickr30K Entities datasets which mostly contain short noun phrases, the performance boosts to 69.66 and 81.23 with a large margin over previous one-stage methods [52,44,27,51] and is comparable with current state-of-the-art methods [6,26].
|
| 170 |
+
|
| 171 |
+
Table 2. Comparison with the state-of-the-art models on the test set of Flickr30K Entities [39] and ReferItGame [23] datasets.
|
| 172 |
+
|
| 173 |
+
<table><tr><td>Models</td><td>Visual Encoder</td><td>ReferIt Game test</td><td>Flickr30k test</td><td>Time (ms)</td></tr><tr><td colspan="5">Two-stage</td></tr><tr><td>MAttNet [56]</td><td>RN101</td><td>29.04</td><td>-</td><td>320</td></tr><tr><td>SimilarityNet [48]</td><td>RN101</td><td>34.54</td><td>60.89</td><td>184</td></tr><tr><td>DDPN [58]</td><td>RN101</td><td>63.00</td><td>73.30</td><td>-</td></tr><tr><td colspan="5">One-stage</td></tr><tr><td>FAOA [52]</td><td>DN53</td><td>60.67</td><td>68.71</td><td>23</td></tr><tr><td>ZSGNet [44]</td><td>RN50</td><td>58.63</td><td>63.39</td><td>-</td></tr><tr><td>RCCF [27]</td><td>DLA34</td><td>63.79</td><td>-</td><td>25</td></tr><tr><td>ReSCL [51]</td><td>DN53</td><td>64.60</td><td>69.28</td><td>36</td></tr><tr><td>TransVG [6]</td><td>RN101</td><td>70.73</td><td>79.10</td><td>62</td></tr><tr><td>RefTR [26]</td><td>RN101</td><td>71.42</td><td>78.66</td><td>40</td></tr><tr><td>SeqTR (ours)</td><td>DN53</td><td>69.66</td><td>81.23</td><td>50</td></tr></table>
|
| 174 |
+
|
| 175 |
+
SeqTR can be seamlessly extended to RES without any network architecture modifications since we reformulate the task as a point prediction problem. As shown in Tab. 3, we outperform various models with sophisticated cross-modal alignment and reasoning mechanisms [21,33,10,34,53,19,30]. SeqTR is on par with current state-of-the-art VLT [8] which selectively aggregates responses from the diversified queries, whereas we directly produce the corresponding segmentation mask and establish one-to-one correspondence. When initialized with the pre-trained parameters using the large corpus of data, the performance boosts up to $10.78\%$ absolute improvement, proving that a simple yet universal approach for visual grounding is indeed feasible.
|
| 176 |
+
|
| 177 |
+
From Tab. 4, when pre-trained on the large corpus of text-image pairs, SeqTR is more data-efficient than the current state-of-the-art [22]. Our transformer architecture only contains 7.9M parameters which is twice as few as MDETR [22], while the performance is superior especially on the RefCOCOg dataset with up to $2.48\%$ improvement.
|
| 178 |
+
|
| 179 |
+
# 4.5 Ablation Studies
|
| 180 |
+
|
| 181 |
+
To give a comprehensive understanding of SeqTR, we discuss ablative studies on the validation set of the RefCOCO [57], RefCOCO+ [57], and RefCOCOg [38] datasets in this section.
|
| 182 |
+
|
| 183 |
+
Construction of language feature. Language feature in Sec. 3.3 can be constructed by either max/mean pooling of word features or directly using the final hidden state of bi-GRU. As shown in the upper part of Tab. 5, max pooling performs best, and is the default construction throughout this paper.
|
| 184 |
+
|
| 185 |
+
Token weight. If previously predicted points are inaccurate, model can not recover from the wrong predictions since the inference is sequential. Hence we increase a few former token weights to penalize more on the first several predicted
|
| 186 |
+
|
| 187 |
+
Table 3. Comparison with the state-of-the-arts on the RES task. Model with * is pre-trained on the large corpus of data.
|
| 188 |
+
|
| 189 |
+
<table><tr><td rowspan="2">Models</td><td rowspan="2">Visual Encoder</td><td colspan="3">RefCOCO</td><td colspan="3">RefCOCO+</td><td colspan="3">RefCOCOg</td></tr><tr><td>val</td><td>testA</td><td>testB</td><td>val</td><td>testA</td><td>testB</td><td>val-g</td><td>val-u</td><td>test-u</td></tr><tr><td>MAttNet [56]</td><td>RN101</td><td>56.51</td><td>62.37</td><td>51.70</td><td>46.67</td><td>52.39</td><td>40.08</td><td>-</td><td>47.64</td><td>48.61</td></tr><tr><td>CMSA [53]</td><td>RN101</td><td>58.32</td><td>60.61</td><td>55.09</td><td>43.76</td><td>47.60</td><td>37.89</td><td>39.98</td><td>-</td><td>-</td></tr><tr><td>STEP [2]</td><td>RN101</td><td>60.04</td><td>63.46</td><td>57.97</td><td>48.19</td><td>52.33</td><td>40.41</td><td>46.40</td><td>-</td><td>-</td></tr><tr><td>BRINet [17]</td><td>RN101</td><td>60.98</td><td>62.99</td><td>59.21</td><td>48.17</td><td>52.32</td><td>42.11</td><td>48.04</td><td>-</td><td>-</td></tr><tr><td>CMPC [19]</td><td>RN101</td><td>61.36</td><td>64.53</td><td>59.64</td><td>49.56</td><td>53.44</td><td>43.23</td><td>49.05</td><td>-</td><td>-</td></tr><tr><td>LSCM [20]</td><td>RN101</td><td>61.47</td><td>64.99</td><td>59.55</td><td>49.34</td><td>53.12</td><td>43.50</td><td>48.05</td><td>-</td><td>-</td></tr><tr><td>CMPC+ [30]</td><td>RN101</td><td>62.47</td><td>65.08</td><td>60.82</td><td>50.25</td><td>54.04</td><td>43.47</td><td>49.89</td><td>-</td><td>-</td></tr><tr><td>MCN [34]</td><td>DN53</td><td>62.44</td><td>64.20</td><td>59.71</td><td>50.62</td><td>54.99</td><td>44.69</td><td>-</td><td>49.22</td><td>49.40</td></tr><tr><td>EFN [10]</td><td>WRN101</td><td>62.76</td><td>65.69</td><td>59.67</td><td>51.50</td><td>55.24</td><td>43.01</td><td>51.93</td><td>-</td><td>-</td></tr><tr><td>BUSNet [50]</td><td>RN101</td><td>63.27</td><td>66.41</td><td>61.39</td><td>51.76</td><td>56.87</td><td>44.13</td><td>50.56</td><td>-</td><td>-</td></tr><tr><td>CGAN [33]</td><td>DN53</td><td>64.86</td><td>68.04</td><td>62.07</td><td>51.03</td><td>55.51</td><td>44.06</td><td>46.54</td><td>51.01</td><td>51.69</td></tr><tr><td>LTS [21]</td><td>DN53</td><td>65.43</td><td>67.76</td><td>63.08</td><td>54.21</td><td>58.32</td><td>48.02</td><td>-</td><td>54.40</td><td>54.25</td></tr><tr><td>VLT [8]</td><td>DN56</td><td>65.65</td><td>68.29</td><td>62.73</td><td>55.50</td><td>59.20</td><td>49.36</td><td>49.76</td><td>52.99</td><td>56.65</td></tr><tr><td>SeqTR (ours)</td><td>DN53</td><td>67.26</td><td>69.79</td><td>64.12</td><td>54.14</td><td>58.93</td><td>48.19</td><td>-</td><td>55.67</td><td>55.64</td></tr><tr><td>SeqTR* (ours)</td><td>DN53</td><td>71.70</td><td>73.31</td><td>69.82</td><td>63.04</td><td>66.73</td><td>58.97</td><td>-</td><td>64.69</td><td>65.74</td></tr></table>
|
| 190 |
+
|
| 191 |
+
Table 4. Comparison with pre-trained models on RefCOCO [57], RefCOCO+ [57], and RefCOCOg [38] datasets. We only count the parameters of transformer architecture.
|
| 192 |
+
|
| 193 |
+
<table><tr><td rowspan="2">Models</td><td rowspan="2">Visual Encoder</td><td rowspan="2">Params (M)</td><td rowspan="2">Pre-train images</td><td colspan="3">RefCOCO</td><td colspan="3">RefCOCO+</td><td>RefCOCOg</td><td></td></tr><tr><td>val</td><td>testA</td><td>testB</td><td>val</td><td>testA</td><td>testB</td><td>val-u</td><td>test-u</td></tr><tr><td>ViBERT [32]</td><td>RN101</td><td>-</td><td>3.3M</td><td>-</td><td>-</td><td>-</td><td>72.34</td><td>78.52</td><td>62.61</td><td>-</td><td>-</td></tr><tr><td>VL-BERTL [45]</td><td>RN101</td><td>-</td><td>3.3M</td><td>-</td><td>-</td><td>-</td><td>72.59</td><td>78.57</td><td>62.30</td><td>-</td><td>-</td></tr><tr><td>UNITERL [4]</td><td>RN101</td><td>-</td><td>4.6M</td><td>81.41</td><td>87.04</td><td>74.17</td><td>75.90</td><td>81.45</td><td>66.70</td><td>74.86</td><td>75.77</td></tr><tr><td>VILLA[11]</td><td>RN101</td><td>-</td><td>4.6M</td><td>82.39</td><td>87.48</td><td>74.84</td><td>76.17</td><td>81.54</td><td>66.84</td><td>76.18</td><td>76.71</td></tr><tr><td>ERNIE-ViIL [55]</td><td>RN101</td><td>-</td><td>4.3M</td><td>-</td><td>-</td><td>-</td><td>75.95</td><td>82.07</td><td>66.88</td><td>-</td><td>-</td></tr><tr><td>MDETR [22]</td><td>RN101</td><td>17.36</td><td>200K</td><td>86.75</td><td>89.58</td><td>81.41</td><td>79.52</td><td>84.09</td><td>70.62</td><td>81.64</td><td>80.89</td></tr><tr><td>RefTR [26]</td><td>RN101</td><td>17.86</td><td>100K</td><td>85.65</td><td>88.73</td><td>81.16</td><td>77.55</td><td>82.26</td><td>68.99</td><td>79.25</td><td>80.01</td></tr><tr><td>SeqTR (ours)</td><td>DN53</td><td>7.90</td><td>174K</td><td>87.00</td><td>90.15</td><td>83.59</td><td>78.69</td><td>84.51</td><td>71.87</td><td>82.69</td><td>83.37</td></tr></table>
|
| 194 |
+
|
| 195 |
+
discrete coordinate tokens. As shown in the lower part of Tab. 5, increasing the weight of first token is better than increasing the latter tokens, and setting the 1st token weight to 1.5 and subsequent tokens to 1 gives the best performance. We set $w_{i} = 1, \forall i$ for RES task.
|
| 196 |
+
|
| 197 |
+
Sampling scheme. We verify the upper bound as the mIoU of the assembled mask from the sampled points and original ground-truth. From Fig. 4 (a), we can see that the mIoU approaches nearly 100 when the number of sampled points increases, i.e., 95.57 for uniform sampling, and 91.58 for center-based sampling. Therefore, though the upper bound is limited theoretically, in practice, the research effort might be better spent on improving the real-world performance. In terms of sampling strategies, from Fig. 4 (a) and Fig. 4 (c-e), uniform sampling is consistently better than center-based sampling in terms of both the upper bound and the performance, which preserves more details of the mask illustrated in Fig. 3. The number of sampled points controls the trade-off between
|
| 198 |
+
|
| 199 |
+
Table 5. Ablation experiments on the construction of language feature and token weight. The first token is the [TASK] token, while subsequent tokens are discrete coordinate tokens, i.e., $(x_{1},y_{1},x_{2},y_{2})$ .
|
| 200 |
+
|
| 201 |
+
<table><tr><td rowspan="2">Language feature</td><td colspan="5">Token weight</td><td rowspan="2">RefCOCO val</td><td rowspan="2">RefCOCO+ val</td><td rowspan="2">RefCOCOg val-u</td></tr><tr><td>1st</td><td>2nd</td><td>3rd</td><td>4th</td><td>5th</td></tr><tr><td>mean pooling</td><td></td><td></td><td></td><td></td><td></td><td>79.73</td><td>67.12</td><td>68.97</td></tr><tr><td>max pooling</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>80.07</td><td>68.31</td><td>69.95</td></tr><tr><td>final hidden state</td><td></td><td></td><td></td><td></td><td></td><td>79.85</td><td>67.46</td><td>69.93</td></tr><tr><td rowspan="6">max pooling</td><td>1</td><td>1</td><td>1</td><td>1</td><td>1</td><td>80.07</td><td>68.31</td><td>69.95</td></tr><tr><td>1.5</td><td>1</td><td>1</td><td>1</td><td>1</td><td>80.10</td><td>68.63</td><td>70.05</td></tr><tr><td>2</td><td>1</td><td>1</td><td>1</td><td>1</td><td>80.19</td><td>68.33</td><td>70.01</td></tr><tr><td>3</td><td>1</td><td>1</td><td>1</td><td>1</td><td>80.08</td><td>67.81</td><td>69.45</td></tr><tr><td>1</td><td>2</td><td>2</td><td>1</td><td>1</td><td>79.70</td><td>67.22</td><td>69.51</td></tr><tr><td>2</td><td>2</td><td>2</td><td>1</td><td>1</td><td>80.16</td><td>67.83</td><td>69.45</td></tr></table>
|
| 202 |
+
|
| 203 |
+

|
| 204 |
+
(a)
|
| 205 |
+
|
| 206 |
+

|
| 207 |
+
(b)
|
| 208 |
+
|
| 209 |
+

|
| 210 |
+
(c) RefCOCO
|
| 211 |
+
|
| 212 |
+

|
| 213 |
+
(d) RefCOCO+
|
| 214 |
+
|
| 215 |
+

|
| 216 |
+
(e) RefCOCOg
|
| 217 |
+
Fig. 4. Ablative experiments on RES task. (a) The upper bound is averaged over validation sets (the fluctuation is within 0.2). (b) Shuffling percentage refers to the fraction of shuffled sequences within a batch, uniform sampling strategy is used. (c-e) depict the impact of sampling strategies and the number of sampled points.
|
| 218 |
+
|
| 219 |
+
the inference speed and performance, from Fig. 4 (c-e), we can see that 18 and 12 points are the best for RefCOCO and RefCOCO+/RefCOCOg datasets.
|
| 220 |
+
|
| 221 |
+
Shuffling percentage. We train SeqTR 60 epochs instead of 30 as we empirically found that point shuffling takes a longer time to converge, since the ground-truth is different for each coordinate token at each forward pass. Fig. 4 (b) shows that no shuffle and 0.2 are best for RefCOCO and RefCOCO+/RefCOCOg. As the number of shuffled sequences increases, the performance drops slightly, and we observe that SeqTR is under-fitting since the mIoU during training is lower than the one without shuffling.
|
| 222 |
+
|
| 223 |
+
Multi-task training. Previous multi-task visual grounding approaches require REC to help RES locate the referent. In contrast, SeqTR is capable to locate
|
| 224 |
+
|
| 225 |
+
Table 6. Ablation study of multi-task training. IE is the inconsistency error [34] to measure the prediction conflict between REC and RES, $\downarrow$ denotes the lower is better.
|
| 226 |
+
|
| 227 |
+
<table><tr><td>Dataset</td><td>Multi-task training</td><td>REC
|
| 228 |
+
Prec@0.5</td><td>Prec@0.5</td><td>RES
|
| 229 |
+
Prec@0.7</td><td>Prec@0.9</td><td>mIoU</td><td>IE↓</td></tr><tr><td rowspan="2">RefCOCO</td><td>X</td><td>80.38</td><td>78.03</td><td>63.35</td><td>9.75</td><td>64.20</td><td>13.93</td></tr><tr><td>✓</td><td>79.65</td><td>77.24</td><td>60.29</td><td>7.23</td><td>62.93</td><td>5.86</td></tr><tr><td rowspan="2">RefCOCO+</td><td>X</td><td>67.98</td><td>65.11</td><td>48.27</td><td>5.19</td><td>52.22</td><td>22.22</td></tr><tr><td>✓</td><td>68.79</td><td>66.67</td><td>51.02</td><td>5.46</td><td>53.65</td><td>4.85</td></tr><tr><td rowspan="2">RefCOCOg</td><td>X</td><td>69.63</td><td>65.20</td><td>46.23</td><td>5.31</td><td>53.25</td><td>22.65</td></tr><tr><td>✓</td><td>70.29</td><td>65.20</td><td>46.05</td><td>5.15</td><td>53.25</td><td>8.25</td></tr></table>
|
| 230 |
+
|
| 231 |
+
the referent at pixel level without the aid from REC. We train SeqTR 60 epochs and test whether multi-task supervision can bring further improvement. For the input sequence construction of multi-task grounding, please see the supplementary material. From Tab. 6, we can see that multi-task supervision even slightly degenerates the performance compared to the single-task variant. Though the inconsistency error significantly decreases, the location ability of RES measured by Prec@0.5, 0.7, and 0.9 stays the same, suggesting that the sampled points are independent between the sequence of the bounding box and binary mask.
|
| 232 |
+
|
| 233 |
+
# 4.6 Qualitative Results
|
| 234 |
+
|
| 235 |
+
We visualize the cross attention map averaged over decoder layers and attention heads in Fig. 5. At each prediction step, SeqTR generates a coordinate token given previous output tokens. Under this setting, a clear pattern emerges, i.e., attends to the left side of the referent when predicting $x_{1}$ , the top side of the referent when predicting $y_{1}$ , and so on. This axial attention is sensitive to the boundary of the referent, thus can more precisely ground the referred object. The predicted masks are visualized in Fig. 6. SeqTR can well comprehends attributive words and absolute or relative spatial relations, and the predicted mask aligns with the irregular outlines of the referred object such as "left cow". More qualitative results are given in the appendix.
|
| 236 |
+
|
| 237 |
+
# 5 Conclusions
|
| 238 |
+
|
| 239 |
+
In this paper we reformulate visual grounding tasks as a point prediction problem and present an innovative and general network termed SeqTR. Based on the standard transformer encoder-decoder architecture and cross-entropy loss, SeqTR unifies different visual grounding tasks under the same point prediction paradigm without any modifications. Experimental results demonstrate that SeqTR can well ground language query onto the corresponding region, suggesting that a simple yet universal approach for visual grounding is indeed feasible.
|
| 240 |
+
|
| 241 |
+

|
| 242 |
+
Fig. 5. Visualization of normalized cross attention map in transformer decoder. From left to right column, we generate $(x_{1},y_{1},x_{2},y_{2})$ in sequential order.
|
| 243 |
+
|
| 244 |
+

|
| 245 |
+
Fig. 6. Example mask predictions by SeqTR on the validation set of RefCOCO dataset, best viewed in color.
|
| 246 |
+
|
| 247 |
+
Acknowledgements. This work was supported by the National Science Fund for Distinguished Young Scholars (No. 62025603), the National Natural Science Foundation of China (No. U21B2037, No. 62176222, No. 62176223, No. 62176226, No. 62072386, No. 62072387, No. 62072389, and No. 62002305), Guangdong Basic and Applied Basic Research Foundation (No. 2019B1515120049), and the Natural Science Foundation of Fujian Province of China (No. 2021J01002).
|
| 248 |
+
|
| 249 |
+
# References
|
| 250 |
+
|
| 251 |
+
1. Carion, N., Massa, F., Synnaeve, G., Usunier, N., Kirillov, A., Zagoruyko, S.: End-to-end object detection with transformers. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 213-229 (2020)
|
| 252 |
+
2. Chen, D.J., Jia, S., Lo, Y.C., Chen, H.T., Liu, T.L.: See-through-text grouping for referring image segmentation. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV). pp. 7454-7463 (2019)
|
| 253 |
+
3. Chen, T., Saxena, S., Li, L., Fleet, D.J., Hinton, G.: Pix2seq: A language modeling framework for object detection. arXiv preprint arXiv:2109.10852 (2021)
|
| 254 |
+
4. Chen, Y.C., Li, L., Yu, L., El Kholy, A., Ahmed, F., Gan, Z., Cheng, Y., Liu, J.: Uniter: Universal image-text representation learning. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 104-120 (2020)
|
| 255 |
+
5. Chung, J., Gulcehre, C., Cho, K., Bengio, Y.: Empirical evaluation of gated recurrent neural networks on sequence modeling. arXiv preprint arXiv:1412.3555 (2014)
|
| 256 |
+
6. Deng, J., Yang, Z., Chen, T., Zhou, W., Li, H.: Transvg: End-to-end visual grounding with transformers. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV). pp. 1769-1779 (2021)
|
| 257 |
+
7. Devlin, J., Chang, M.W., Lee, K., Toutanova, K.: Bert: Pre-training of deep bidirectional transformers for language understanding. arXiv preprint arXiv:1810.04805 (2018)
|
| 258 |
+
8. Ding, H., Liu, C., Wang, S., Jiang, X.: Vision-language transformer and query generation for referring segmentation. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV). pp. 16321-16330 (2021)
|
| 259 |
+
9. Escalante, H.J., Hernández, C.A., González, J.A., López-López, A., Montes, M., Morales, E.F., Sucar, L.E., Villasenor, L., Grubinger, M.: The segmented and annotated iapr tc-12 benchmark. Computer Vision and Image Understanding (CVIU) 114(4), 419-428 (2010)
|
| 260 |
+
10. Feng, G., Hu, Z., Zhang, L., Lu, H.: Encoder fusion network with co-attention embedding for referring image segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 15506-15515 (2021)
|
| 261 |
+
11. Gan, Z., Chen, Y.C., Li, L., Zhu, C., Cheng, Y., Liu, J.: Large-scale adversarial training for vision-and-language representation learning. Advances in Neural Information Processing Systems (NeurIPS) 33, 6616-6628 (2020)
|
| 262 |
+
12. Ghiasi, G., Cui, Y., Srinivas, A., Qian, R., Lin, T.Y., Cubuk, E.D., Le, Q.V., Zoph, B.: Simple copy-paste is a strong data augmentation method for instance segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 2918-2928 (2021)
|
| 263 |
+
13. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 770-778 (2016)
|
| 264 |
+
14. Holtzman, A., Buys, J., Du, L., Forbes, M., Choi, Y.: The curious case of neural text degeneration. arXiv preprint arXiv:1904.09751 (2019)
|
| 265 |
+
15. Hong, R., Liu, D., Mo, X., He, X., Zhang, H.: Learning to compose and reason with language tree structures for visual grounding. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) (2019)
|
| 266 |
+
16. Hu, R., Rohrbach, M., Andreas, J., Darrell, T., Saenko, K.: Modeling relationships in referential expressions with compositional modular networks. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 1115-1124 (2017)
|
| 267 |
+
|
| 268 |
+
17. Hu, Z., Feng, G., Sun, J., Zhang, L., Lu, H.: Bi-directional relationship inferring network for referring image segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 4424-4433 (2020)
|
| 269 |
+
18. Huang, B., Lian, D., Luo, W., Gao, S.: Look before you leap: Learning landmark features for one-stage visual grounding. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 16888-16897 (2021)
|
| 270 |
+
19. Huang, S., Hui, T., Liu, S., Li, G., Wei, Y., Han, J., Liu, L., Li, B.: Referring image segmentation via cross-modal progressive comprehension. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 10488-10497 (2020)
|
| 271 |
+
20. Hui, T., Liu, S., Huang, S., Li, G., Yu, S., Zhang, F., Han, J.: Linguistic structure guided context modeling for referring image segmentation. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 59-75 (2020)
|
| 272 |
+
21. Jing, Y., Kong, T., Wang, W., Wang, L., Li, L., Tan, T.: Locate then segment: A strong pipeline for referring image segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 9858-9867 (2021)
|
| 273 |
+
22. Kamath, A., Singh, M., LeCun, Y., Synnaeve, G., Misra, I., Carion, N.: Mdetr-modulated detection for end-to-end multi-modal understanding. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV). pp. 1780-1790 (2021)
|
| 274 |
+
23. Kazemzadeh, S., Ordonez, V., Matten, M., Berg, T.: Referitgame: Referring to objects in photographs of natural scenes. In: Proceedings of the Conference on Empirical Methods in Natural Language Processing (EMNLP). pp. 787-798 (2014)
|
| 275 |
+
24. Kingma, D.P., Ba, J.: Adam: A method for stochastic optimization. arXiv preprint arXiv:1412.6980 (2014)
|
| 276 |
+
25. Krishna, R., Zhu, Y., Groth, O., Johnson, J., Hata, K., Kravitz, J., Chen, S., Kalantidis, Y., Li, L.J., Shamma, D.A., et al.: Visual genome: Connecting language and vision using crowdsourced dense image annotations. International Journal of Computer Vision (IJCV) 123(1), 32-73 (2017)
|
| 277 |
+
26. Li, M., Sigal, L.: Referring transformer: A one-step approach to multi-task visual grounding. Advances in Neural Information Processing Systems (NeurIPS) 34 (2021)
|
| 278 |
+
27. Liao, Y., Liu, S., Li, G., Wang, F., Chen, Y., Qian, C., Li, B.: A real-time cross-modality correlation filtering method for referring expression comprehension. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 10880-10889 (2020)
|
| 279 |
+
28. Lin, T.Y., Goyal, P., Girshick, R., He, K., Dólár, P.: Focal loss for dense object detection. In: Proceedings of the IEEE International Conference on Computer Vision (ICCV). pp. 2980-2988 (2017)
|
| 280 |
+
29. Liu, D., Zhang, H., Wu, F., Zha, Z.J.: Learning to assemble neural module tree networks for visual grounding. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV). pp. 4673-4682 (2019)
|
| 281 |
+
30. Liu, S., Hui, T., Huang, S., Wei, Y., Li, B., Li, G.: Cross-modal progressive comprehension for referring segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) (2021)
|
| 282 |
+
31. Liu, X., Wang, Z., Shao, J., Wang, X., Li, H.: Improving referring expression grounding with cross-modal attention-guided erasing. In: Proceedings of the
|
| 283 |
+
|
| 284 |
+
IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 1950-1959 (2019)
|
| 285 |
+
32. Lu, J., Batra, D., Parikh, D., Lee, S.: Vilbert: Pretraining task-agnostic visiolinguistic representations for vision-and-language tasks. Advances in Neural Information Processing Systems (NeurIPS) 32 (2019)
|
| 286 |
+
33. Luo, G., Zhou, Y., Ji, R., Sun, X., Su, J., Lin, C.W., Tian, Q.: Cascade grouped attention network for referring expression segmentation. In: Proceedings of the 28th ACM International Conference on Multimedia (MM). pp. 1274-1282 (2020)
|
| 287 |
+
34. Luo, G., Zhou, Y., Sun, X., Cao, L., Wu, C., Deng, C., Ji, R.: Multi-task collaborative network for joint referring expression comprehension and segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 10034-10043 (2020)
|
| 288 |
+
35. Luo, G., Zhou, Y., Sun, X., Ding, X., Wu, Y., Huang, F., Gao, Y., Ji, R.: Towards language-guided visual recognition via dynamic convolutions. arXiv preprint arXiv:2110.08797 (2021)
|
| 289 |
+
36. Mao, J., Huang, J., Toshev, A., Camburu, O., Yuille, A.L., Murphy, K.: Generation and comprehension of unambiguous object descriptions. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 11-20 (2016)
|
| 290 |
+
37. Miletari, F., Navab, N., Ahmadi, S.A.: V-net: Fully convolutional neural networks for volumetric medical image segmentation. In: Proceedings of the Fourth International Conference on 3D Vision (3DV). pp. 565-571. IEEE (2016)
|
| 291 |
+
38. Nagaraja, V.K., Morariu, V.I., Davis, L.S.: Modeling context between objects for referring expression understanding. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 792-807. Springer (2016)
|
| 292 |
+
39. Plummer, B.A., Wang, L., Cervantes, C.M., Caicedo, J.C., Hockenmaier, J., Lazebnik, S.: Flickr30k entities: Collecting region-to-phrase correspondences for richer image-to-sentence models. Internatioanl Journal of Computer Vision (IJCV) 123(1), 74-93 (2017)
|
| 293 |
+
40. Radford, A., Wu, J., Child, R., Luan, D., Amodei, D., Sutskever, I., et al.: Language models are unsupervised multitask learners. OpenAI blog 1(8), 9 (2019)
|
| 294 |
+
41. Redmon, J., Farhadi, A.: Yolov3: An incremental improvement. arXiv preprint arXiv:1804.02767 (2018)
|
| 295 |
+
42. Ren, S., He, K., Girshick, R., Sun, J.: Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in Neural Information Processing Systems (NeurIPS) 28 (2015)
|
| 296 |
+
43. Rezatofighi, H., Tsoi, N., Gwak, J., Sadeghian, A., Reid, I., Savarese, S.: Generalized intersection over union: A metric and a loss for bounding box regression. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 658-666 (2019)
|
| 297 |
+
44. Sadhu, A., Chen, K., Nevatia, R.: Zero-shot grounding of objects from natural language queries. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV). pp. 4694-4703 (2019)
|
| 298 |
+
45. Su, W., Zhu, X., Cao, Y., Li, B., Lu, L., Wei, F., Dai, J.: Vl-bert: Pre-training of generic visual-linguistic representations. arXiv preprint arXiv:1908.08530 (2019)
|
| 299 |
+
46. Sun, M., Xiao, J., Lim, E.G.: Iterative shrinking for referring expression grounding using deep reinforcement learning. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 14060-14069 (2021)
|
| 300 |
+
47. Vaswani, A., Shazeer, N., Parmar, N., Uszkoreit, J., Jones, L., Gomez, A.N., Kaiser, L., Polosukhin, I.: Attention is all you need. Advances in Neural Information Processing Systems (NeurIPS) 30 (2017)
|
| 301 |
+
|
| 302 |
+
48. Wang, L., Li, Y., Huang, J., Lazebnik, S.: Learning two-branch neural networks for image-text matching tasks. IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI) 41(2), 394-407 (2018)
|
| 303 |
+
49. Yang, S., Li, G., Yu, Y.: Dynamic graph attention for referring expression comprehension. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV). pp. 4644-4653 (2019)
|
| 304 |
+
50. Yang, S., Xia, M., Li, G., Zhou, H.Y., Yu, Y.: Bottom-up shift and reasoning for referring image segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 11266-11275 (2021)
|
| 305 |
+
51. Yang, Z., Chen, T., Wang, L., Luo, J.: Improving one-stage visual grounding by recursive sub-query construction. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 387-404 (2020)
|
| 306 |
+
52. Yang, Z., Gong, B., Wang, L., Huang, W., Yu, D., Luo, J.: A fast and accurate one-stage approach to visual grounding. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV). pp. 4683-4693 (2019)
|
| 307 |
+
53. Ye, L., Rochan, M., Liu, Z., Wang, Y.: Cross-modal self-attention network for referring image segmentation. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR). pp. 10502-10511 (2019)
|
| 308 |
+
54. Young, P., Lai, A., Hodosh, M., Hockenmaier, J.: From image descriptions to visual denotations: New similarity metrics for semantic inference over event descriptions. Transactions of the Association for Computational Linguistics (TACL) 2, 67-78 (2014)
|
| 309 |
+
55. Yu, F., Tang, J., Yin, W., Sun, Y., Tian, H., Wu, H., Wang, H.: Ernie-vil: Knowledge enhanced vision-language representations through scene graph. arXiv preprint arXiv:2006.16934 (2020)
|
| 310 |
+
56. Yu, L., Lin, Z., Shen, X., Yang, J., Lu, X., Bansal, M., Berg, T.L.: Mattnet: Modular attention network for referring expression comprehension. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR) (June 2018)
|
| 311 |
+
57. Yu, L., Poirson, P., Yang, S., Berg, A.C., Berg, T.L.: Modeling context in referring expressions. In: Proceedings of the European Conference on Computer Vision (ECCV). pp. 69-85 (2016)
|
| 312 |
+
58. Yu, Z., Yu, J., Xiang, C., Zhao, Z., Tian, Q., Tao, D.: Rethinking diversified and discriminative proposal generation for visual grounding. arXiv preprint arXiv:1805.03508 (2018)
|
| 313 |
+
59. Zhang, H., Niu, Y., Chang, S.F.: Grounding referring expressions in images by variational context. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 4158-4166 (2018)
|
| 314 |
+
60. Zhou, Y., Ji, R., Luo, G., Sun, X., Su, J., Ding, X., Lin, C.W., Tian, Q.: A real-time global inference network for one-stage referring expression comprehension. IEEE Transactions on Neural Networks and Learning Systems (TNNLS) (2021)
|
| 315 |
+
61. Zhou, Y., Ren, T., Zhu, C., Sun, X., Liu, J., Ding, X., Xu, M., Ji, R.: Trar: Routing the attention spans in transformer for visual question answering. In: Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV). pp. 2074-2084 (2021)
|
| 316 |
+
62. Zhuang, B., Wu, Q., Shen, C., Reid, I., Van Den Hengel, A.: Parallel attention: A unified framework for visual object discovery through dialogs and queries. In: Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition (CVPR). pp. 4252-4261 (2018)
|
| 317 |
+
|
| 318 |
+
# A Appendix
|
| 319 |
+
|
| 320 |
+
# A.1 More implementation details
|
| 321 |
+
|
| 322 |
+
Exponential moving average (EMA) with a decay rate of 0.999 is used to accelerate training convergence following [22]. In contrast to previous methods [52,51,6], in which random color distortion, affine transformation, and horizontal flipping are used to augment the image, we do not perform any data augmentation except large scale jittering (LSJ) [12] following [3], with jittering strength of 0.3 to 1.4. EMA and LSJ are disabled during pre-training and ablation studies. Label Smoothing with a smoothing factor of 0.1 is used to regularize the predictor. It takes nearly a day to train for 60 epochs on a single V100 GPU without mixed precision training.
|
| 323 |
+
|
| 324 |
+
# A.2 Sequence construction for Multi-task grounding
|
| 325 |
+
|
| 326 |
+
We construct the input and target sequence for the transformer decoder as shown in Fig. 7 when perform multi-task visual grounding. The construction is similar compared to the single-task variant except that there are two distinct [TASK] tokens, one for the grounding task at bounding box level, i.e., REC or phrase localization, and the other for the grounding task at pixel level. As discussed in the paper, multi-task training does not improve the performance, hence, we report the results of the single-task trained performance.
|
| 327 |
+
|
| 328 |
+

|
| 329 |
+
Fig. 7. Sequence construction from the bounding box and binary mask for multi-task visual grounding. [REC] and [RES] are the [TASK] tokens randomly initialized with different parameters. Coordinates with superscript $b$ are for the bounding box and $m$ for the binary mask.
|
| 330 |
+
|
| 331 |
+
# A.3 Nucleus sampling for RES
|
| 332 |
+
|
| 333 |
+
During inference, each predicted discrete coordinate token is the argmax-ed index over the normalized probabilities, here we study the impact of the stochastic Nucleus Sampling [3,14] strategy widely used in natural language generation community, which reduces duplication and increases the diversity in the predicted sequence. As shown in Tab. 7, nucleus sampling does not improve the quality of generated sequence representing the predicted binary mask and introduces an additional hyper-parameter $p$ , hence, we use argmax in the paper.
|
| 334 |
+
|
| 335 |
+
Table 7. The effect of $p$ in nucleus sampling, which samples from a truncated ranked list of discrete coordinate tokens. Setting $p$ to 0 equals to argmax selection.
|
| 336 |
+
|
| 337 |
+
<table><tr><td>top-p</td><td>RefCOCO val</td><td>RefCOCO+ val</td><td>RefCOCOg val-u</td></tr><tr><td>0.0</td><td>67.26</td><td>54.14</td><td>55.67</td></tr><tr><td>0.1</td><td>66.76</td><td>54.66</td><td>55.54</td></tr><tr><td>0.2</td><td>66.72</td><td>54.78</td><td>55.49</td></tr><tr><td>0.3</td><td>66.68</td><td>54.71</td><td>55.46</td></tr><tr><td>0.4</td><td>66.50</td><td>54.60</td><td>55.37</td></tr><tr><td>0.5</td><td>66.38</td><td>54.34</td><td>55.08</td></tr><tr><td>0.6</td><td>66.15</td><td>54.04</td><td>54.79</td></tr></table>
|
| 338 |
+
|
| 339 |
+
# A.4 More qualitative results
|
| 340 |
+
|
| 341 |
+
As shown in Fig. 8, the wrong predictions (marked with red box) can be mainly divided into two groups, i.e., the prediction either shifts to the objects of the same category with the referent but is not referenced in the query, or only aligns with the largest segment of the referent. The first case can be addressed using a better multi-modal fusion module to suppress the salient objects. However, to demonstrate the efficacy of our overall network, we do not resort to such a potentially complex fusion module. When the ground-truth binary mask contains multiple segments, i.e., occluded by other objects, we only find the contour of the largest segment and sample points atop of it, while discard other segments, this results in SeqTR only grounding the query onto the largest segment of the mask instead of our model's incapability of segmentation.
|
| 342 |
+
|
| 343 |
+

|
| 344 |
+
Silver car in back of horse cart
|
| 345 |
+
|
| 346 |
+

|
| 347 |
+
Guy with red logo on shirt
|
| 348 |
+
|
| 349 |
+

|
| 350 |
+
|
| 351 |
+

|
| 352 |
+
Arms crossed
|
| 353 |
+
|
| 354 |
+

|
| 355 |
+
Bearded guy second from left looking at cake
|
| 356 |
+
|
| 357 |
+

|
| 358 |
+
Motorcycle on right
|
| 359 |
+
|
| 360 |
+

|
| 361 |
+
Red color shirt blurry
|
| 362 |
+
|
| 363 |
+

|
| 364 |
+
Guyucking hand into breast pocket left
|
| 365 |
+
|
| 366 |
+

|
| 367 |
+
Guy on right with arms crossed
|
| 368 |
+
|
| 369 |
+

|
| 370 |
+
Motorcycle with man in orange shirt on it
|
| 371 |
+
|
| 372 |
+

|
| 373 |
+
Right woman
|
| 374 |
+
|
| 375 |
+

|
| 376 |
+
Boy second from right front row
|
| 377 |
+
|
| 378 |
+

|
| 379 |
+
Bottom green on yellow area
|
| 380 |
+
|
| 381 |
+

|
| 382 |
+
Guy instructing blue coat
|
| 383 |
+
|
| 384 |
+

|
| 385 |
+
The wine glass the animal is on top of the one with not very much wine
|
| 386 |
+
|
| 387 |
+

|
| 388 |
+
|
| 389 |
+

|
| 390 |
+
|
| 391 |
+

|
| 392 |
+
The old man in a light blue shirt looking to the left
|
| 393 |
+
Baby head
|
| 394 |
+
Fig. 8. Visualizations of the predicted masks. Ground-truth binary masks can be inferred from the language query.
|
| 395 |
+
|
| 396 |
+

|
| 397 |
+
Man standing
|
| 398 |
+
|
| 399 |
+

|
| 400 |
+
Bike next to stripe of rope
|
| 401 |
+
Center baby elephant
|
| 402 |
+
Front woman
|
| 403 |
+
|
| 404 |
+

|
| 405 |
+
Dark umbrella
|
2203.16xxx/2203.16265/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d10c4a3f90c52094be4ce6dd75b0a5f45451da6686ee1e18fadf6c743fd6350a
|
| 3 |
+
size 979224
|
2203.16xxx/2203.16265/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16317/b564aed0-5bb1-4ebd-a32c-a4a45cb20e3c_content_list.json
ADDED
|
@@ -0,0 +1,1752 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "PseCo: Pseudo Labeling and Consistency Training for Semi-Supervised Object Detection",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
227,
|
| 8 |
+
141,
|
| 9 |
+
774,
|
| 10 |
+
186
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Gang Li $^{1,2}$ , Xiang Li $^{1\\star}$ , Yujie Wang $^{2}$ , Yichao Wu $^{2}$ , Ding Liang $^{2}$ , and Shanshan Zhang $^{1\\star}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
253,
|
| 19 |
+
210,
|
| 20 |
+
750,
|
| 21 |
+
243
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "$^{1}$ Nanjing University of Science and Technology",
|
| 28 |
+
"bbox": [
|
| 29 |
+
339,
|
| 30 |
+
253,
|
| 31 |
+
661,
|
| 32 |
+
268
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "$^{2}$ SenseTime Research",
|
| 39 |
+
"bbox": [
|
| 40 |
+
424,
|
| 41 |
+
268,
|
| 42 |
+
575,
|
| 43 |
+
281
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "{gang.li, xiang.li.implus, shanshan.zhang}@njust.edu.cn",
|
| 50 |
+
"bbox": [
|
| 51 |
+
289,
|
| 52 |
+
282,
|
| 53 |
+
712,
|
| 54 |
+
297
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "{wangyujie,wuyichao,liangding}@sensetime.com",
|
| 61 |
+
"bbox": [
|
| 62 |
+
331,
|
| 63 |
+
297,
|
| 64 |
+
671,
|
| 65 |
+
310
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "Abstract. In this paper, we delve into two key techniques in Semi-Supervised Object Detection (SSOD), namely pseudo labeling and consistency training. We observe that these two techniques currently neglect some important properties of object detection, hindering efficient learning on unlabeled data. Specifically, for pseudo labeling, existing works only focus on the classification score yet fail to guarantee the localization precision of pseudo boxes; For consistency training, the widely adopted random-resize training only considers the label-level consistency but misses the feature-level one, which also plays an important role in ensuring the scale invariance. To address the problems incurred by noisy pseudo boxes, we design Noisy Pseudo box Learning (NPL) that includes Prediction-guided Label Assignment (PLA) and Positive-proposal Consistency Voting (PCV). PLA relies on model predictions to assign labels and makes it robust to even coarse pseudo boxes; while PCV leverages the regression consistency of positive proposals to reflect the localization quality of pseudo boxes. Furthermore, in consistency training, we propose Multi-view Scale-invariant Learning (MSL) that includes mechanisms of both label- and feature-level consistency, where feature consistency is achieved by aligning shifted feature pyramids between two images with identical content but varied scales. On COCO benchmark, our method, termed PSEudo labeling and COnsistency training (PseCo), outperforms the SOTA (Soft Teacher) by 2.0, 1.8, 2.0 points under $1\\%$ , $5\\%$ , and $10\\%$ labelling ratios, respectively. It also significantly improves the learning efficiency for SSOD, e.g., PseCo halves the training time of the SOTA approach but achieves even better performance. Code is available at https://github.com/ligang-cs/PseCo.",
|
| 72 |
+
"bbox": [
|
| 73 |
+
259,
|
| 74 |
+
343,
|
| 75 |
+
738,
|
| 76 |
+
704
|
| 77 |
+
],
|
| 78 |
+
"page_idx": 0
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"text": "Keywords: Semi-supervised Learning, Object Detection",
|
| 83 |
+
"bbox": [
|
| 84 |
+
261,
|
| 85 |
+
717,
|
| 86 |
+
643,
|
| 87 |
+
731
|
| 88 |
+
],
|
| 89 |
+
"page_idx": 0
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"text": "1 Introduction",
|
| 94 |
+
"text_level": 1,
|
| 95 |
+
"bbox": [
|
| 96 |
+
215,
|
| 97 |
+
756,
|
| 98 |
+
374,
|
| 99 |
+
771
|
| 100 |
+
],
|
| 101 |
+
"page_idx": 0
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"type": "text",
|
| 105 |
+
"text": "With the rapid development of deep learning, many computer vision tasks achieve significant improvements, such as image classification [2], object detection [15,1,9],",
|
| 106 |
+
"bbox": [
|
| 107 |
+
212,
|
| 108 |
+
786,
|
| 109 |
+
790,
|
| 110 |
+
816
|
| 111 |
+
],
|
| 112 |
+
"page_idx": 0
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"type": "aside_text",
|
| 116 |
+
"text": "arXiv:2203.16317v2 [cs.CV] 20 Jul 2022",
|
| 117 |
+
"bbox": [
|
| 118 |
+
22,
|
| 119 |
+
270,
|
| 120 |
+
57,
|
| 121 |
+
705
|
| 122 |
+
],
|
| 123 |
+
"page_idx": 0
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"type": "page_footnote",
|
| 127 |
+
"text": "* Corresponding author.",
|
| 128 |
+
"bbox": [
|
| 129 |
+
217,
|
| 130 |
+
824,
|
| 131 |
+
385,
|
| 132 |
+
839
|
| 133 |
+
],
|
| 134 |
+
"page_idx": 0
|
| 135 |
+
},
|
| 136 |
+
{
|
| 137 |
+
"type": "image",
|
| 138 |
+
"img_path": "images/22183c7d304a45751aa955179dbf762c3da2b1684e0d63be29215c4288c7e5c4.jpg",
|
| 139 |
+
"image_caption": [
|
| 140 |
+
"(a) Precision of pseudo boxes"
|
| 141 |
+
],
|
| 142 |
+
"image_footnote": [],
|
| 143 |
+
"bbox": [
|
| 144 |
+
230,
|
| 145 |
+
161,
|
| 146 |
+
375,
|
| 147 |
+
248
|
| 148 |
+
],
|
| 149 |
+
"page_idx": 1
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"type": "image",
|
| 153 |
+
"img_path": "images/19ca529aa41e8d9df62580c62d39f3f72ce5e241ff1224f944f5b65b861f57c0.jpg",
|
| 154 |
+
"image_caption": [
|
| 155 |
+
"(b) Relations between real quality and prediction consistency",
|
| 156 |
+
"Fig. 1: (a) The precision of pseudo boxes under various IoU thresholds. (b) The scatter diagram of the relation between the prediction consistency and their true localization quality. Some dots falling in the orange ellipse are caused by annotation errors. We show some examples in Fig. 5. (c) One specific example to demonstrate that noisy pseudo boxes will mislead label assignment."
|
| 157 |
+
],
|
| 158 |
+
"image_footnote": [],
|
| 159 |
+
"bbox": [
|
| 160 |
+
377,
|
| 161 |
+
161,
|
| 162 |
+
526,
|
| 163 |
+
247
|
| 164 |
+
],
|
| 165 |
+
"page_idx": 1
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"type": "image",
|
| 169 |
+
"img_path": "images/6a6ca2962027d18afa787255016a6c419f54f113a50c730829ab464d7762dd96.jpg",
|
| 170 |
+
"image_caption": [
|
| 171 |
+
"(c) Wrong label results brought by the Noisy Pseudo Box"
|
| 172 |
+
],
|
| 173 |
+
"image_footnote": [],
|
| 174 |
+
"bbox": [
|
| 175 |
+
529,
|
| 176 |
+
145,
|
| 177 |
+
766,
|
| 178 |
+
247
|
| 179 |
+
],
|
| 180 |
+
"page_idx": 1
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
"type": "text",
|
| 184 |
+
"text": "etc. Behind these advances, plenty of annotated data plays an important role [23]. However, labeling accurate annotations for large-scale data is usually time-consuming and expensive, especially for object detection, which requires annotating precise bounding boxes for each instance, besides category labels. Therefore, employing easily accessible unlabeled data to facilitate the model training with limited annotated data is a promising direction, named Semi-Supervised Learning, where labeled data and unlabeled data are combined together as training examples.",
|
| 185 |
+
"bbox": [
|
| 186 |
+
212,
|
| 187 |
+
380,
|
| 188 |
+
787,
|
| 189 |
+
501
|
| 190 |
+
],
|
| 191 |
+
"page_idx": 1
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"type": "text",
|
| 195 |
+
"text": "Semi-Supervised for Image Classification (SSIC) has been widely investigated in previous literature, and the learning paradigm on unlabeled data can be roughly divided into two categories: pseudo labeling [7,18] and consistency training [24,22], each of which receives much attention. Recently, some works (e.g., FixMatch [19], FlexMatch [28]) attempt to combine these two techniques into one framework and achieve state-of-the-art performance. In Semi-Supervised Object Detection (SSOD), some works borrow the key techniques (e.g. pseudo labeling, consistency training) from SSIC, and directly apply them to SSOD. Although these works [30,26] obtain gains from unlabeled data, they neglect some important properties of object detection, resulting in sub-optimal results. On the one hand, compared with image classification, pseudo labels of object detection are more complicated, containing both category and location information. On the other hand, object detection is required to capture stronger scale-invariant ability than image classification, as it needs to carefully deal with the targets with rich scales. In this work, we present a SSOD framework, termed PSEudo labeling and CConsistency training (PseCo), to integrate object detection properties into SSOD, making pseudo labeling and consistency training work better for object detection tasks.",
|
| 196 |
+
"bbox": [
|
| 197 |
+
212,
|
| 198 |
+
503,
|
| 199 |
+
787,
|
| 200 |
+
776
|
| 201 |
+
],
|
| 202 |
+
"page_idx": 1
|
| 203 |
+
},
|
| 204 |
+
{
|
| 205 |
+
"type": "text",
|
| 206 |
+
"text": "In pseudo labeling, the model produces one-hot pseudo labels on unlabeled data by itself, and only pseudo labels whose scores are above the predefined score threshold are retained. As for object detection, the pseudo label consists of both category labels and bounding boxes. Although category labels can be guaranteed",
|
| 207 |
+
"bbox": [
|
| 208 |
+
212,
|
| 209 |
+
779,
|
| 210 |
+
787,
|
| 211 |
+
840
|
| 212 |
+
],
|
| 213 |
+
"page_idx": 1
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"type": "page_number",
|
| 217 |
+
"text": "2",
|
| 218 |
+
"bbox": [
|
| 219 |
+
217,
|
| 220 |
+
114,
|
| 221 |
+
228,
|
| 222 |
+
126
|
| 223 |
+
],
|
| 224 |
+
"page_idx": 1
|
| 225 |
+
},
|
| 226 |
+
{
|
| 227 |
+
"type": "header",
|
| 228 |
+
"text": "Li et al.",
|
| 229 |
+
"bbox": [
|
| 230 |
+
271,
|
| 231 |
+
114,
|
| 232 |
+
326,
|
| 233 |
+
126
|
| 234 |
+
],
|
| 235 |
+
"page_idx": 1
|
| 236 |
+
},
|
| 237 |
+
{
|
| 238 |
+
"type": "text",
|
| 239 |
+
"text": "to be accurate via setting a high score threshold, the localization quality of pseudo box fails to be measured and guaranteed. It has been validated in previous works that the classification score is not strongly correlated with the precision of box localization [10,29,6,26]. In Fig. 1(a), we compute the precision of pseudo boxes under various Intersection-over-Union (IoU) thresholds, via comparing produced pseudo boxes with ground-truths. Under loose criterion $(\\mathrm{IoU} = 0.3)$ , precision can reach $81\\%$ , but it will drop to $31\\%$ when we lift the IoU threshold to 0.9. This dramatic precision gap indicates coarse pseudo boxes whose IoUs belong to [0.3,0.9] account for $50\\%$ . If these noisy pseudo boxes are used as targets to train the detector, it must hinder the optimization, resulting in slow convergence and inefficient learning on unlabeled data. Furthermore, we analyze the negative effects brought by noisy pseudo boxes on classification and regression tasks as follows, respectively.",
|
| 240 |
+
"bbox": [
|
| 241 |
+
212,
|
| 242 |
+
146,
|
| 243 |
+
787,
|
| 244 |
+
343
|
| 245 |
+
],
|
| 246 |
+
"page_idx": 2
|
| 247 |
+
},
|
| 248 |
+
{
|
| 249 |
+
"type": "text",
|
| 250 |
+
"text": "For the classification task, noisy pseudo boxes will mislead the label assignment, where labels are assigned based on IoUs between proposals and gt boxes (pseudo boxes in our case). As shown in Fig. 1(c), a background proposal is taken as foreground due to a large IoU value with a poorly localized pseudo box. As a result, the IoU-based label assignment will fail on unlabeled data and confuse decision boundaries between foreground and background. To address this issue, we design a prediction-guided label assignment strategy for unlabeled data, which assigns labels based on predictions of the teacher, instead of IoUs with pseudo boxes as before, making it robust for poorly localized pseudo boxes.",
|
| 251 |
+
"bbox": [
|
| 252 |
+
212,
|
| 253 |
+
347,
|
| 254 |
+
787,
|
| 255 |
+
484
|
| 256 |
+
],
|
| 257 |
+
"page_idx": 2
|
| 258 |
+
},
|
| 259 |
+
{
|
| 260 |
+
"type": "text",
|
| 261 |
+
"text": "For the regression task, it is necessary to measure the localization quality of pseudo boxes. We propose a simple yet effective method to achieve this, named Positive-proposal Consistency Voting. We empirically find that regression consistency from positive proposals is capable of reflecting the localization quality of corresponding pseudo boxes. In Fig. 1(b), we visualize the relations between predicted consistency and their true IoUs, where their positive correlations can be found. Therefore, it is reasonable to employ the estimated localization quality (i.e., regression consistency from positive proposals) to re-weight the regression losses, making precise pseudo boxes contribute more to regression supervisions.",
|
| 262 |
+
"bbox": [
|
| 263 |
+
212,
|
| 264 |
+
488,
|
| 265 |
+
787,
|
| 266 |
+
625
|
| 267 |
+
],
|
| 268 |
+
"page_idx": 2
|
| 269 |
+
},
|
| 270 |
+
{
|
| 271 |
+
"type": "text",
|
| 272 |
+
"text": "Apart from pseudo labeling, we also analyze the consistency training for SSOD. Consistency training enforces the model to generate similar predictions when fed with perturbed versions of the same image, where perturbations can be implemented by injecting various data augmentations. Through consistency training, models can be invariant to different input transformations. Current SSOD methods [30,26,14] only apply off-the-shelf, general data augmentations, most of which are borrowed from image classification. However, different from classification, object detection is an instance-based task, where object scales usually vary in a large range, and detectors are expected to handle all scale ranges. Therefore, learning strong scale-invariant ability via consistency training is important. In scale consistency, it should be allowed for the model to predict the same boxes for input images with identical contents but varied scales. To ensure label consistency, random-resizing is a common augmentation, which resizes input images and gt boxes according to a randomly generated resize ratio. Be",
|
| 273 |
+
"bbox": [
|
| 274 |
+
212,
|
| 275 |
+
628,
|
| 276 |
+
787,
|
| 277 |
+
840
|
| 278 |
+
],
|
| 279 |
+
"page_idx": 2
|
| 280 |
+
},
|
| 281 |
+
{
|
| 282 |
+
"type": "header",
|
| 283 |
+
"text": "PseCo for Semi-Supervised Object Detection",
|
| 284 |
+
"bbox": [
|
| 285 |
+
431,
|
| 286 |
+
114,
|
| 287 |
+
730,
|
| 288 |
+
128
|
| 289 |
+
],
|
| 290 |
+
"page_idx": 2
|
| 291 |
+
},
|
| 292 |
+
{
|
| 293 |
+
"type": "page_number",
|
| 294 |
+
"text": "3",
|
| 295 |
+
"bbox": [
|
| 296 |
+
774,
|
| 297 |
+
116,
|
| 298 |
+
784,
|
| 299 |
+
126
|
| 300 |
+
],
|
| 301 |
+
"page_idx": 2
|
| 302 |
+
},
|
| 303 |
+
{
|
| 304 |
+
"type": "text",
|
| 305 |
+
"text": "sides label consistency, feature consistency also plays an important role in scale-invariant learning, but it is neglected in previous works. Thanks to the pyramid structure of popular backbone networks, feature alignment can be easily implemented by shifting feature pyramid levels according to the scale changes. Motivated by this, we introduce a brand new data augmentation technique, named Multi-view Scale-invariant Learning (MSL), to learn label-level and feature-level consistency simultaneously in a simple framework.",
|
| 306 |
+
"bbox": [
|
| 307 |
+
212,
|
| 308 |
+
146,
|
| 309 |
+
782,
|
| 310 |
+
251
|
| 311 |
+
],
|
| 312 |
+
"page_idx": 3
|
| 313 |
+
},
|
| 314 |
+
{
|
| 315 |
+
"type": "text",
|
| 316 |
+
"text": "In summary, we delve into two key techniques of semi-supervised learning (e.g., pseudo labeling and consistency training) for SSOD, and integrate object detection properties into them. On COCO benchmarks, our PseCo outperforms the state-of-the-art methods by a large margin, for example, under $10\\%$ labelling ratio, it can improve a $26.9\\%$ mAP baseline to $36.1\\%$ mAP, surpassing previous methods by at least $2.0\\%$ . When labeled data is abundant, i.e., we use full COCO training set as labeled data and extra 123K unlabeled2017 as unlabeled data, our PseCo improves the $41.0\\%$ mAP baseline by $+5.1\\%$ , reaching $46.1\\%$ mAP, establishing a new state of the art. Moreover, PseCo also significantly boosts the convergence speed, e.g. PseCo halves the training time of the SOTA (Soft Teacher [26]), but achieves even better performance.",
|
| 317 |
+
"bbox": [
|
| 318 |
+
212,
|
| 319 |
+
252,
|
| 320 |
+
784,
|
| 321 |
+
417
|
| 322 |
+
],
|
| 323 |
+
"page_idx": 3
|
| 324 |
+
},
|
| 325 |
+
{
|
| 326 |
+
"type": "text",
|
| 327 |
+
"text": "2 Related Works",
|
| 328 |
+
"text_level": 1,
|
| 329 |
+
"bbox": [
|
| 330 |
+
215,
|
| 331 |
+
443,
|
| 332 |
+
395,
|
| 333 |
+
458
|
| 334 |
+
],
|
| 335 |
+
"page_idx": 3
|
| 336 |
+
},
|
| 337 |
+
{
|
| 338 |
+
"type": "text",
|
| 339 |
+
"text": "Semi-supervised learning in image classification. Semi-supervised learning can be categorized into two groups: pseudo labeling (also called self-training) and consistency training, and previous methods design learning paradigms based on one of them. Pseudo labeling [7,18,4,25] iteratively adds unlabeled data into the training procedure with pseudo labels annotated by an initially trained network. Here, only model predictions with high confidence will be transformed into the one-hot format and become pseudo labels. Noisy Student Training [25] injects noise into unlabeled data training, which equips the model with stronger generalization through training on the combination of labeled and unlabeled data. On the other hand, consistency training [22,24,1] relies on the assumption that the model should be invariant to small changes on input images or model hidden states. It enforces the model to make similar predictions on the perturbed versions of the same image, and perturbations can be implemented by injecting noise into images and hidden states. UDA [24] validates the advanced data augmentations play a crucial role in consistency training, and observes the strong augmentations found in supervised-learning can also lead to obvious improvements in semi-supervised learning.",
|
| 340 |
+
"bbox": [
|
| 341 |
+
212,
|
| 342 |
+
477,
|
| 343 |
+
784,
|
| 344 |
+
733
|
| 345 |
+
],
|
| 346 |
+
"page_idx": 3
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"type": "text",
|
| 350 |
+
"text": "Recently, some works [19,28] attempt to combine pseudo labeling and consistency training, achieving state-of-the-art performance. FixMatch [19] firstly applies the weak and strong augmentations to the same input image, respectively, to generate two versions, then uses the weakly-augmented version to generate hard pseudo labels. The model is trained on strongly-augmented versions to align predictions with pseudo labels. Based on FixMatch, FlexMatch [28] proposes to adjust score thresholds for different classes during the generation of pseudo",
|
| 351 |
+
"bbox": [
|
| 352 |
+
212,
|
| 353 |
+
734,
|
| 354 |
+
782,
|
| 355 |
+
839
|
| 356 |
+
],
|
| 357 |
+
"page_idx": 3
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"type": "page_number",
|
| 361 |
+
"text": "4",
|
| 362 |
+
"bbox": [
|
| 363 |
+
217,
|
| 364 |
+
114,
|
| 365 |
+
228,
|
| 366 |
+
126
|
| 367 |
+
],
|
| 368 |
+
"page_idx": 3
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"type": "header",
|
| 372 |
+
"text": "Li et al.",
|
| 373 |
+
"bbox": [
|
| 374 |
+
271,
|
| 375 |
+
114,
|
| 376 |
+
325,
|
| 377 |
+
126
|
| 378 |
+
],
|
| 379 |
+
"page_idx": 3
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"type": "text",
|
| 383 |
+
"text": "labels, based on curriculum learning. It has been widely validated that pseudo labeling and consistency training are two powerful techniques in semi-supervised image classification, hence in this work, we attempt to integrate object detection properties into them and make them work better for semi-supervised object detection.",
|
| 384 |
+
"bbox": [
|
| 385 |
+
212,
|
| 386 |
+
146,
|
| 387 |
+
782,
|
| 388 |
+
219
|
| 389 |
+
],
|
| 390 |
+
"page_idx": 4
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"type": "text",
|
| 394 |
+
"text": "Semi-supervised learning in object detection. STAC [20] is the first attempt to apply pseudo labeling and consistency training based on the strong data augmentations to semi-supervised object detection, however, it adopts two stages of training as Noisy Student Training [25], which prevents the pseudo labels from updating along with model training and limits the performance. After STAC, [26,30,21,27,14] borrow the idea of Exponential Moving Average (EMA) from Mean Teacher [22], and update the teacher model after each training iteration to generate instant pseudo labels, realizing the end-to-end framework. To pursue high quality of pseudo labels and overcome confirmation bias, InstantTeaching [30] and ISMT [27] introduce model ensemble to aggregate predictions from multiple teacher models which are initialized differently; similarly, Humble Teacher [21] ensembles the teacher model predictions by taking both the image and its horizontally flipped version as input. Although these ensemble methods can promote the quality of pseudo labels, they also introduce considerable computation overhead. Unbiased Teacher [14] replaces traditional Cross-entropy loss with Focal loss [12] to alleviate the class-imbalanced pseudo-labeling issue, which shows strong performance when labeled data is scarce. Soft Teacher [26] uses teacher classification scores as classification loss weights, to suppress negative effects from underlying objects missed by pseudo labels. Different from previous methods, our work elaborately analyzes whether the pseudo labeling and consistency training can be directly applied to SSOD, but gets a negative answer. To integrate object detection properties into these two techniques, we introduce Noisy Pseudo box Learning and Multi-view Scale-invariant Learning, obtaining much better performance and faster convergence speed.",
|
| 395 |
+
"bbox": [
|
| 396 |
+
212,
|
| 397 |
+
222,
|
| 398 |
+
787,
|
| 399 |
+
585
|
| 400 |
+
],
|
| 401 |
+
"page_idx": 4
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"type": "text",
|
| 405 |
+
"text": "3 Method",
|
| 406 |
+
"text_level": 1,
|
| 407 |
+
"bbox": [
|
| 408 |
+
215,
|
| 409 |
+
612,
|
| 410 |
+
330,
|
| 411 |
+
628
|
| 412 |
+
],
|
| 413 |
+
"page_idx": 4
|
| 414 |
+
},
|
| 415 |
+
{
|
| 416 |
+
"type": "text",
|
| 417 |
+
"text": "We show the framework of our PseCo in Fig. 2. On the unlabeled data, PseCo consists of Noisy Pseudo box Learning (NPL) and Multi-view Scale-invariant Learning (MSL). In the following parts, we will introduce the basic framework, the proposed NPL and MSL, respectively.",
|
| 418 |
+
"bbox": [
|
| 419 |
+
212,
|
| 420 |
+
647,
|
| 421 |
+
787,
|
| 422 |
+
709
|
| 423 |
+
],
|
| 424 |
+
"page_idx": 4
|
| 425 |
+
},
|
| 426 |
+
{
|
| 427 |
+
"type": "text",
|
| 428 |
+
"text": "3.1 The basic framework",
|
| 429 |
+
"text_level": 1,
|
| 430 |
+
"bbox": [
|
| 431 |
+
215,
|
| 432 |
+
734,
|
| 433 |
+
434,
|
| 434 |
+
750
|
| 435 |
+
],
|
| 436 |
+
"page_idx": 4
|
| 437 |
+
},
|
| 438 |
+
{
|
| 439 |
+
"type": "text",
|
| 440 |
+
"text": "At first, we directly apply standard pseudo labeling and consistency training to SSOD, building our basic framework. Following previous works [26,14,30], we also adopt Teacher-student training scheme, where the teacher model is built from the student model at every training iteration via Exponential Moving Average (EMA). We randomly sample labeled data and unlabeled data based on a sample",
|
| 441 |
+
"bbox": [
|
| 442 |
+
212,
|
| 443 |
+
763,
|
| 444 |
+
787,
|
| 445 |
+
840
|
| 446 |
+
],
|
| 447 |
+
"page_idx": 4
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"type": "header",
|
| 451 |
+
"text": "PseCo for Semi-Supervised Object Detection",
|
| 452 |
+
"bbox": [
|
| 453 |
+
431,
|
| 454 |
+
114,
|
| 455 |
+
730,
|
| 456 |
+
128
|
| 457 |
+
],
|
| 458 |
+
"page_idx": 4
|
| 459 |
+
},
|
| 460 |
+
{
|
| 461 |
+
"type": "page_number",
|
| 462 |
+
"text": "5",
|
| 463 |
+
"bbox": [
|
| 464 |
+
774,
|
| 465 |
+
116,
|
| 466 |
+
784,
|
| 467 |
+
126
|
| 468 |
+
],
|
| 469 |
+
"page_idx": 4
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"type": "image",
|
| 473 |
+
"img_path": "images/53f3e8f01d7c2202e86803ffe27dc9e29531ce904681282a50d89a8104389e88.jpg",
|
| 474 |
+
"image_caption": [
|
| 475 |
+
"Fig. 2: The framework of our PseCo. Each training batch consists of both labeled and unlabeled images. On the unlabeled images, the student model trains on view $V_{1}$ and $V_{2}$ at the same time, taking the same pseudo boxes as supervisions. View $V_{0}$ refers to input images for the teacher model."
|
| 476 |
+
],
|
| 477 |
+
"image_footnote": [],
|
| 478 |
+
"bbox": [
|
| 479 |
+
246,
|
| 480 |
+
143,
|
| 481 |
+
751,
|
| 482 |
+
359
|
| 483 |
+
],
|
| 484 |
+
"page_idx": 5
|
| 485 |
+
},
|
| 486 |
+
{
|
| 487 |
+
"type": "text",
|
| 488 |
+
"text": "ratio to form the training batch. On the labeled data, the student model is trained in a regular manner, supervised by the ground-truth boxes:",
|
| 489 |
+
"bbox": [
|
| 490 |
+
214,
|
| 491 |
+
454,
|
| 492 |
+
787,
|
| 493 |
+
484
|
| 494 |
+
],
|
| 495 |
+
"page_idx": 5
|
| 496 |
+
},
|
| 497 |
+
{
|
| 498 |
+
"type": "equation",
|
| 499 |
+
"text": "\n$$\n\\mathcal {L} ^ {l} = \\mathcal {L} _ {c l s} ^ {l} + \\mathcal {L} _ {r e g} ^ {l}. \\tag {1}\n$$\n",
|
| 500 |
+
"text_format": "latex",
|
| 501 |
+
"bbox": [
|
| 502 |
+
436,
|
| 503 |
+
496,
|
| 504 |
+
785,
|
| 505 |
+
516
|
| 506 |
+
],
|
| 507 |
+
"page_idx": 5
|
| 508 |
+
},
|
| 509 |
+
{
|
| 510 |
+
"type": "text",
|
| 511 |
+
"text": "On the unlabeled data, we firstly apply weak data augmentations (e.g. horizontal flip, random resizing) to input images, and then feed them to the teacher model for pseudo label generation. Considering the detection boxes tend to be dense even after NMS, we set a score threshold $\\tau$ and only retain boxes with scores above $\\tau$ as pseudo labels. After that, strong augmentations (e.g. cutout, rotation, brightness jitter)<sup>3</sup> will be performed on the input image to generate the training example for student model. Since high classification scores do not lead to precise localization, we abandon bounding box regression on unlabeled data, as done in [14]. Actually, applying the box regression loss on unlabeled data will cause unstable training in our experiments.",
|
| 512 |
+
"bbox": [
|
| 513 |
+
212,
|
| 514 |
+
527,
|
| 515 |
+
787,
|
| 516 |
+
678
|
| 517 |
+
],
|
| 518 |
+
"page_idx": 5
|
| 519 |
+
},
|
| 520 |
+
{
|
| 521 |
+
"type": "text",
|
| 522 |
+
"text": "Foreground-background imbalance [8,12] is an intrinsic issue in object detection, and it gets worse under the semi-supervised setting. A high score threshold $\\tau$ is usually adopted to guarantee the precision of pseudo labels, but it also results in scarcity of pseudo labels, aggravating the imbalance of foreground/background. Moreover, there also exists foreground-foreground imbalance, exactly, training examples from some specific categories can be limited when labeled data is scarce, which makes the model prone to predict the dominant classes, causing biased prediction. To alleviate these imbalance issues, we",
|
| 523 |
+
"bbox": [
|
| 524 |
+
212,
|
| 525 |
+
679,
|
| 526 |
+
787,
|
| 527 |
+
800
|
| 528 |
+
],
|
| 529 |
+
"page_idx": 5
|
| 530 |
+
},
|
| 531 |
+
{
|
| 532 |
+
"type": "page_number",
|
| 533 |
+
"text": "6",
|
| 534 |
+
"bbox": [
|
| 535 |
+
217,
|
| 536 |
+
114,
|
| 537 |
+
228,
|
| 538 |
+
126
|
| 539 |
+
],
|
| 540 |
+
"page_idx": 5
|
| 541 |
+
},
|
| 542 |
+
{
|
| 543 |
+
"type": "header",
|
| 544 |
+
"text": "Li et al.",
|
| 545 |
+
"bbox": [
|
| 546 |
+
271,
|
| 547 |
+
114,
|
| 548 |
+
326,
|
| 549 |
+
126
|
| 550 |
+
],
|
| 551 |
+
"page_idx": 5
|
| 552 |
+
},
|
| 553 |
+
{
|
| 554 |
+
"type": "page_footnote",
|
| 555 |
+
"text": "<sup>3</sup> We adopt the same data augmentations as Soft Teacher [26], please refer to [26] for more augmentation details.",
|
| 556 |
+
"bbox": [
|
| 557 |
+
217,
|
| 558 |
+
810,
|
| 559 |
+
785,
|
| 560 |
+
839
|
| 561 |
+
],
|
| 562 |
+
"page_idx": 5
|
| 563 |
+
},
|
| 564 |
+
{
|
| 565 |
+
"type": "text",
|
| 566 |
+
"text": "follow the practice of Unbiased Teacher [14], and replace the standard cross-entropy loss with focal loss [12]:",
|
| 567 |
+
"bbox": [
|
| 568 |
+
212,
|
| 569 |
+
146,
|
| 570 |
+
782,
|
| 571 |
+
176
|
| 572 |
+
],
|
| 573 |
+
"page_idx": 6
|
| 574 |
+
},
|
| 575 |
+
{
|
| 576 |
+
"type": "equation",
|
| 577 |
+
"text": "\n$$\n\\mathcal {L} _ {c l s} ^ {u} = - \\alpha_ {t} (1 - p _ {t}) ^ {\\gamma} \\log \\left(p _ {t}\\right), p _ {t} = \\left\\{ \\begin{array}{l l} p, & i f y = 1, \\\\ 1 - p, & o t h e r w i s e, \\end{array} \\right. \\tag {2}\n$$\n",
|
| 578 |
+
"text_format": "latex",
|
| 579 |
+
"bbox": [
|
| 580 |
+
307,
|
| 581 |
+
189,
|
| 582 |
+
785,
|
| 583 |
+
229
|
| 584 |
+
],
|
| 585 |
+
"page_idx": 6
|
| 586 |
+
},
|
| 587 |
+
{
|
| 588 |
+
"type": "text",
|
| 589 |
+
"text": "where parameters $\\alpha_{t}$ and $\\gamma$ adopt default settings in original focal loss paper [12]. The overall loss function is formulated as:",
|
| 590 |
+
"bbox": [
|
| 591 |
+
214,
|
| 592 |
+
238,
|
| 593 |
+
782,
|
| 594 |
+
268
|
| 595 |
+
],
|
| 596 |
+
"page_idx": 6
|
| 597 |
+
},
|
| 598 |
+
{
|
| 599 |
+
"type": "equation",
|
| 600 |
+
"text": "\n$$\n\\mathcal {L} = \\mathcal {L} ^ {l} + \\beta \\mathcal {L} ^ {u}, \\tag {3}\n$$\n",
|
| 601 |
+
"text_format": "latex",
|
| 602 |
+
"bbox": [
|
| 603 |
+
449,
|
| 604 |
+
281,
|
| 605 |
+
784,
|
| 606 |
+
297
|
| 607 |
+
],
|
| 608 |
+
"page_idx": 6
|
| 609 |
+
},
|
| 610 |
+
{
|
| 611 |
+
"type": "text",
|
| 612 |
+
"text": "where $\\beta$ is used to control the contribution of unlabeled data. In theory, our proposed method is independent of the detection framework and can be applied on both one-stage and two-stage detectors. However, considering all previous methods are based on Faster R-CNN [17] detection framework, for a fair comparison with them, we also adopt Faster R-CNN as the default detection framework.",
|
| 613 |
+
"bbox": [
|
| 614 |
+
214,
|
| 615 |
+
309,
|
| 616 |
+
782,
|
| 617 |
+
383
|
| 618 |
+
],
|
| 619 |
+
"page_idx": 6
|
| 620 |
+
},
|
| 621 |
+
{
|
| 622 |
+
"type": "text",
|
| 623 |
+
"text": "3.2 Noisy Pseudo Box Learning",
|
| 624 |
+
"text_level": 1,
|
| 625 |
+
"bbox": [
|
| 626 |
+
215,
|
| 627 |
+
407,
|
| 628 |
+
491,
|
| 629 |
+
422
|
| 630 |
+
],
|
| 631 |
+
"page_idx": 6
|
| 632 |
+
},
|
| 633 |
+
{
|
| 634 |
+
"type": "text",
|
| 635 |
+
"text": "In SSOD, pseudo labels contain both category and location. Since the score of pseudo labels can only indicate the confidence of pseudo box categories, the localization quality of pseudo boxes is not guaranteed. Imprecise pseudo boxes will mislead the label assignment and regression task, making learning on unlabeled data inefficient. Motivated by this, we introduce Prediction-guided Label Assignment and Positive-proposal Consistency Voting to reduce negative effects on the label assignment and regression task, respectively.",
|
| 636 |
+
"bbox": [
|
| 637 |
+
212,
|
| 638 |
+
431,
|
| 639 |
+
785,
|
| 640 |
+
537
|
| 641 |
+
],
|
| 642 |
+
"page_idx": 6
|
| 643 |
+
},
|
| 644 |
+
{
|
| 645 |
+
"type": "text",
|
| 646 |
+
"text": "Prediction-guided Label Assignment. The standard label assignment strategy in Faster R-CNN [17] only takes the IoUs between proposals and gt boxes (pseudo boxes in our case) into consideration and assigns foreground to those proposals, whose IoUs are above a pre-defined threshold $t$ (0.5 as default). This strategy relies on the assumption that gt boxes are precise, however, this assumption does not hold for unlabeled data obviously. As a result, some low-quality proposals will be mistakenly assigned as positive, confusing the classification boundaries between foreground and background. One specific example is shown in Fig. 1(c), where a proposal with the true IoU as 0.39 is mistakenly assigned as positive.",
|
| 647 |
+
"bbox": [
|
| 648 |
+
212,
|
| 649 |
+
537,
|
| 650 |
+
785,
|
| 651 |
+
688
|
| 652 |
+
],
|
| 653 |
+
"page_idx": 6
|
| 654 |
+
},
|
| 655 |
+
{
|
| 656 |
+
"type": "text",
|
| 657 |
+
"text": "To address this problem, we propose Prediction-guided Label Assignment (PLA), which takes teacher predictions as auxiliary information and reduces dependency on IoUs. In Teacher-student training scheme, not only can the detection results (after NMS) of teacher perform as pseudo labels, but also teacher's dense predictions (before NMS) are able to provide guidance for student model training. We share the proposals generated by the teacher RPN with the student, so that teacher predictions on these proposals can be easily transferred to student. To measure the proposal quality $(q)$ comprehensively, the classification confidence and localization precision of teacher predictions are jointly employed, concretely, $q = s^{\\alpha} \\times u^{1 - \\alpha}$ , where $s$ and $u$ denote a foreground score and an IoU",
|
| 658 |
+
"bbox": [
|
| 659 |
+
212,
|
| 660 |
+
688,
|
| 661 |
+
785,
|
| 662 |
+
840
|
| 663 |
+
],
|
| 664 |
+
"page_idx": 6
|
| 665 |
+
},
|
| 666 |
+
{
|
| 667 |
+
"type": "header",
|
| 668 |
+
"text": "PseCo for Semi-Supervised Object Detection",
|
| 669 |
+
"bbox": [
|
| 670 |
+
431,
|
| 671 |
+
114,
|
| 672 |
+
730,
|
| 673 |
+
128
|
| 674 |
+
],
|
| 675 |
+
"page_idx": 6
|
| 676 |
+
},
|
| 677 |
+
{
|
| 678 |
+
"type": "page_number",
|
| 679 |
+
"text": "7",
|
| 680 |
+
"bbox": [
|
| 681 |
+
774,
|
| 682 |
+
116,
|
| 683 |
+
784,
|
| 684 |
+
126
|
| 685 |
+
],
|
| 686 |
+
"page_idx": 6
|
| 687 |
+
},
|
| 688 |
+
{
|
| 689 |
+
"type": "text",
|
| 690 |
+
"text": "value between the regressed box and the ground truth, respectively. $\\alpha$ controls the contribution of $s$ and $u$ in the overall quality. On unlabeled data, we first construct a candidate bag for each ground truth $g$ by the traditional IoU-based strategy, where the IoU threshold $t$ is set to a relatively low value, e.g., 0.4 as default, to contain more proposals. Within each candidate bag, the proposals are firstly sorted by their quality $q$ , then top- $\\mathcal{N}$ proposals are adopted as positive samples and the rest are negatives. The number $\\mathcal{N}$ is decided by the dynamic $k$ estimation strategy proposed in OTA [3], specifically, the IoU values over the candidate bag is summed up to represent the number of positive samples. The proposed PLA gets rid of strong dependencies on IoUs and alleviates negative effects from poorly localized pseudo boxes, leading to clearer classification boundaries. Furthermore, our label assign strategy integrates more teacher knowledge into student model training, realizing better knowledge distillation.",
|
| 691 |
+
"bbox": [
|
| 692 |
+
212,
|
| 693 |
+
146,
|
| 694 |
+
787,
|
| 695 |
+
343
|
| 696 |
+
],
|
| 697 |
+
"page_idx": 7
|
| 698 |
+
},
|
| 699 |
+
{
|
| 700 |
+
"type": "text",
|
| 701 |
+
"text": "Positive-proposal Consistency Voting. Considering the classification score fails to indicate localization quality, we introduce a simple yet effective method to measure the localization quality, named Positive-proposal Consistency Voting (PCV). Assigning multiple proposals to each gt box (or pseudo box) is a common practice in CNN-based detectors [17,10,29], and we observe that the consistency of regression results from these proposals is capable of reflecting the localization quality of the corresponding pseudo box. Regression consistency $\\sigma^j$ for pseudo box (indexed by $j$ ) is formulated as:",
|
| 702 |
+
"bbox": [
|
| 703 |
+
212,
|
| 704 |
+
343,
|
| 705 |
+
787,
|
| 706 |
+
464
|
| 707 |
+
],
|
| 708 |
+
"page_idx": 7
|
| 709 |
+
},
|
| 710 |
+
{
|
| 711 |
+
"type": "equation",
|
| 712 |
+
"text": "\n$$\n\\sigma^ {j} = \\frac {\\sum_ {i = 1} ^ {N} u _ {i} ^ {j}}{N}, \\tag {4}\n$$\n",
|
| 713 |
+
"text_format": "latex",
|
| 714 |
+
"bbox": [
|
| 715 |
+
447,
|
| 716 |
+
476,
|
| 717 |
+
785,
|
| 718 |
+
508
|
| 719 |
+
],
|
| 720 |
+
"page_idx": 7
|
| 721 |
+
},
|
| 722 |
+
{
|
| 723 |
+
"type": "text",
|
| 724 |
+
"text": "where $u$ denotes an IoU value between the predicted box and the pseudo box, as defined above; $N$ denotes the number of positive proposals, assigned to the pseudo box $j$ . After obtaining $\\sigma^j$ , we employ it as the instance-wise regression loss weight:",
|
| 725 |
+
"bbox": [
|
| 726 |
+
212,
|
| 727 |
+
517,
|
| 728 |
+
787,
|
| 729 |
+
577
|
| 730 |
+
],
|
| 731 |
+
"page_idx": 7
|
| 732 |
+
},
|
| 733 |
+
{
|
| 734 |
+
"type": "equation",
|
| 735 |
+
"text": "\n$$\n\\mathcal {L} _ {r e g} ^ {u} = \\frac {1}{M N} \\sum_ {j = 1} ^ {M} \\sigma^ {j} \\sum_ {i = 1} ^ {N} \\left| r e g _ {i} ^ {j} - r \\hat {\\mathrm {e}} g _ {i} ^ {j} \\right|, \\tag {5}\n$$\n",
|
| 736 |
+
"text_format": "latex",
|
| 737 |
+
"bbox": [
|
| 738 |
+
372,
|
| 739 |
+
578,
|
| 740 |
+
785,
|
| 741 |
+
619
|
| 742 |
+
],
|
| 743 |
+
"page_idx": 7
|
| 744 |
+
},
|
| 745 |
+
{
|
| 746 |
+
"type": "text",
|
| 747 |
+
"text": "where $reg$ and $\\hat{reg}$ refer to the regression output and ground-truth, respectively. In Fig. 1(b), we depict the scatter diagram of the relation between prediction consistency $\\sigma$ of pseudo boxes and their true IoUs. It is obvious that $\\sigma$ is positively correlated with true IoUs. Note that, some dots falling in the orange ellipse are mainly caused by annotation errors. We visualize some examples in Fig. 5, where the pseudo boxes accurately detect some objects, which are missed by the ground truths.",
|
| 748 |
+
"bbox": [
|
| 749 |
+
212,
|
| 750 |
+
625,
|
| 751 |
+
787,
|
| 752 |
+
731
|
| 753 |
+
],
|
| 754 |
+
"page_idx": 7
|
| 755 |
+
},
|
| 756 |
+
{
|
| 757 |
+
"type": "text",
|
| 758 |
+
"text": "3.3 Multi-view Scale-invariant Learning",
|
| 759 |
+
"text_level": 1,
|
| 760 |
+
"bbox": [
|
| 761 |
+
215,
|
| 762 |
+
753,
|
| 763 |
+
558,
|
| 764 |
+
768
|
| 765 |
+
],
|
| 766 |
+
"page_idx": 7
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"type": "text",
|
| 770 |
+
"text": "Different from image classification, in object detection, object scales vary in a large range and detectors hardly show comparable performance on all scales. Therefore, learning scale-invariant representations from unlabeled data is considerably important for SSOD. In consistency training, strong data augmentations",
|
| 771 |
+
"bbox": [
|
| 772 |
+
212,
|
| 773 |
+
779,
|
| 774 |
+
787,
|
| 775 |
+
840
|
| 776 |
+
],
|
| 777 |
+
"page_idx": 7
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"type": "page_number",
|
| 781 |
+
"text": "8",
|
| 782 |
+
"bbox": [
|
| 783 |
+
217,
|
| 784 |
+
114,
|
| 785 |
+
228,
|
| 786 |
+
126
|
| 787 |
+
],
|
| 788 |
+
"page_idx": 7
|
| 789 |
+
},
|
| 790 |
+
{
|
| 791 |
+
"type": "header",
|
| 792 |
+
"text": "Li et al.",
|
| 793 |
+
"bbox": [
|
| 794 |
+
271,
|
| 795 |
+
114,
|
| 796 |
+
326,
|
| 797 |
+
126
|
| 798 |
+
],
|
| 799 |
+
"page_idx": 7
|
| 800 |
+
},
|
| 801 |
+
{
|
| 802 |
+
"type": "image",
|
| 803 |
+
"img_path": "images/6016c73adc4039307f0ccdd5d9b1437248c1657fe84e7637ac3b383ac5aac9c5.jpg",
|
| 804 |
+
"image_caption": [
|
| 805 |
+
"Fig. 3: Comparisons between label-level consistency learning and feature-level consistency learning. For label consistency, labels are aligned according to the resize ratio $\\alpha$ ; for feature consistency, features are aligned by shifting the feature pyramid level."
|
| 806 |
+
],
|
| 807 |
+
"image_footnote": [],
|
| 808 |
+
"bbox": [
|
| 809 |
+
241,
|
| 810 |
+
146,
|
| 811 |
+
504,
|
| 812 |
+
309
|
| 813 |
+
],
|
| 814 |
+
"page_idx": 8
|
| 815 |
+
},
|
| 816 |
+
{
|
| 817 |
+
"type": "image",
|
| 818 |
+
"img_path": "images/14b0cf55ff1acdf99d869a1aa3b3925d40f18f82a6fb7f7be2a7cbcfbaada74c.jpg",
|
| 819 |
+
"image_caption": [],
|
| 820 |
+
"image_footnote": [],
|
| 821 |
+
"bbox": [
|
| 822 |
+
522,
|
| 823 |
+
146,
|
| 824 |
+
750,
|
| 825 |
+
309
|
| 826 |
+
],
|
| 827 |
+
"page_idx": 8
|
| 828 |
+
},
|
| 829 |
+
{
|
| 830 |
+
"type": "text",
|
| 831 |
+
"text": "play a crucial role [24,25] in achieving competitive performance. Through injecting the perturbations into the input images, data augmentations equip the model with robustness to various transformations. From the perspective of scale invariance, we regard the common data augmentation strategy (e.g. random-resizing) as label-level consistency since it resizes the label according to the scale changes of input images. Unfortunately, existing works only involve the widely adopted label-level consistency but fail to consider the feature-level one. Since detection network usually has designs of rich feature pyramids, feature-level consistency is easy to implement across paired inputs [16] and should be considered seriously. In this paper, we propose Multi-view Scale-invariant Learning (MSL) that combines both label- and feature-level consistency into a simple framework, where feature-level consistency is realized by aligning shifted pyramid features between two images with identical content but different scales.",
|
| 832 |
+
"bbox": [
|
| 833 |
+
212,
|
| 834 |
+
404,
|
| 835 |
+
787,
|
| 836 |
+
599
|
| 837 |
+
],
|
| 838 |
+
"page_idx": 8
|
| 839 |
+
},
|
| 840 |
+
{
|
| 841 |
+
"type": "text",
|
| 842 |
+
"text": "To be specific, two views, namely $V_{1}$ and $V_{2}$ , are used for student training in MSL. We denote the input image for the teacher model as $V_{0}$ . Views $V_{1}$ and $V_{2}$ are constructed to learn label- and feature-level consistency, respectively. Among them, $V_{1}$ is implemented by vanilla random resizing, which rescales the input $V_{0}$ and pseudo boxes according to a resize ratio $\\alpha$ randomly sampled from the range $[\\alpha_{min}, \\alpha_{max}]$ ([0.8, 1.3] as default). For feature consistency learning, we firstly downsample $V_{1}$ by even number times (2x as default) to produce $V_{2}$ , then combine $V_{1}$ and $V_{2}$ into image pairs. Upsampling is also certainly permitted, but we only perform downsampling here for GPU memory restriction. Because the spatial sizes of adjacent FPN layers always differ by 2x, the P3-P7 layers<sup>4</sup> of $V_{1}$ can align well with P2-P6 layers of $V_{2}$ in the spatial dimension. Through feature alignment, the same pseudo boxes can supervise the student model training on both $V_{1}$ and $V_{2}$ . Integrating label consistency and feature consistency into consistency learning leads to stronger scale-invariant learning and significantly",
|
| 843 |
+
"bbox": [
|
| 844 |
+
212,
|
| 845 |
+
601,
|
| 846 |
+
787,
|
| 847 |
+
814
|
| 848 |
+
],
|
| 849 |
+
"page_idx": 8
|
| 850 |
+
},
|
| 851 |
+
{
|
| 852 |
+
"type": "header",
|
| 853 |
+
"text": "PseCo for Semi-Supervised Object Detection",
|
| 854 |
+
"bbox": [
|
| 855 |
+
431,
|
| 856 |
+
114,
|
| 857 |
+
730,
|
| 858 |
+
128
|
| 859 |
+
],
|
| 860 |
+
"page_idx": 8
|
| 861 |
+
},
|
| 862 |
+
{
|
| 863 |
+
"type": "page_number",
|
| 864 |
+
"text": "9",
|
| 865 |
+
"bbox": [
|
| 866 |
+
774,
|
| 867 |
+
116,
|
| 868 |
+
784,
|
| 869 |
+
126
|
| 870 |
+
],
|
| 871 |
+
"page_idx": 8
|
| 872 |
+
},
|
| 873 |
+
{
|
| 874 |
+
"type": "page_footnote",
|
| 875 |
+
"text": "${}^{4}{P}_{x}$ refers to the FPN layer whose feature maps are downsampled by ${2}^{x}$ times.",
|
| 876 |
+
"bbox": [
|
| 877 |
+
217,
|
| 878 |
+
824,
|
| 879 |
+
750,
|
| 880 |
+
839
|
| 881 |
+
],
|
| 882 |
+
"page_idx": 8
|
| 883 |
+
},
|
| 884 |
+
{
|
| 885 |
+
"type": "text",
|
| 886 |
+
"text": "accelerates model convergence, as we will show later in the experiments. Comparisons between label consistency and feature consistency are shown in Fig. 3.",
|
| 887 |
+
"bbox": [
|
| 888 |
+
212,
|
| 889 |
+
146,
|
| 890 |
+
782,
|
| 891 |
+
176
|
| 892 |
+
],
|
| 893 |
+
"page_idx": 9
|
| 894 |
+
},
|
| 895 |
+
{
|
| 896 |
+
"type": "text",
|
| 897 |
+
"text": "Learning scale-invariant representation from unlabeled data is also explored by SoCo [23]. However, we claim there are two intrinsic differences between MSL and SoCo: (1) MSL models scale invariance from both label consistency and image feature consistency, while SoCo only considers object feature consistency. Through aligning dense image features of shifted pyramids between paired images, our MSL can provide more comprehensive and dense supervisory signals than the SoCo, which only performs consistency on sparse objects. (2) SoCo implements feature consistency via contrastive learning, which is designed for the pretraining; in contrast, our MSL uses bounding box supervision to implement consistency learning and can be integrated into the detection task.",
|
| 898 |
+
"bbox": [
|
| 899 |
+
212,
|
| 900 |
+
176,
|
| 901 |
+
784,
|
| 902 |
+
325
|
| 903 |
+
],
|
| 904 |
+
"page_idx": 9
|
| 905 |
+
},
|
| 906 |
+
{
|
| 907 |
+
"type": "text",
|
| 908 |
+
"text": "4 Experiments",
|
| 909 |
+
"text_level": 1,
|
| 910 |
+
"bbox": [
|
| 911 |
+
215,
|
| 912 |
+
345,
|
| 913 |
+
375,
|
| 914 |
+
363
|
| 915 |
+
],
|
| 916 |
+
"page_idx": 9
|
| 917 |
+
},
|
| 918 |
+
{
|
| 919 |
+
"type": "text",
|
| 920 |
+
"text": "4.1 Dataset and Evaluation Protocol",
|
| 921 |
+
"text_level": 1,
|
| 922 |
+
"bbox": [
|
| 923 |
+
215,
|
| 924 |
+
373,
|
| 925 |
+
532,
|
| 926 |
+
387
|
| 927 |
+
],
|
| 928 |
+
"page_idx": 9
|
| 929 |
+
},
|
| 930 |
+
{
|
| 931 |
+
"type": "text",
|
| 932 |
+
"text": "In this section, we conduct extensive experiments to verify the effectiveness of PseCo on MS COCO benchmark [13]. There are two training sets, namely the train2017 set, containing 118k labeled images, and the unlabeled2017 set, containing 123k unlabeled images. The val2017 with 5k images is used as validation set, and we report all experiment results on val2017. The performance is measured by COCO average precision (denoted as mAP). Following the common practice of SSOD [20], there are two experimental settings: Partially Labeled Data and Fully Labeled Data, which are described as follows:",
|
| 933 |
+
"bbox": [
|
| 934 |
+
212,
|
| 935 |
+
393,
|
| 936 |
+
784,
|
| 937 |
+
513
|
| 938 |
+
],
|
| 939 |
+
"page_idx": 9
|
| 940 |
+
},
|
| 941 |
+
{
|
| 942 |
+
"type": "text",
|
| 943 |
+
"text": "Partially Labeled Data. We randomly sample 1, 2, 5, and $10\\%$ data from train2017 as labeled data, and use the rest as unlabeled. Under each labelling ratio, we report the mean and standard deviation over 5 different data folds.",
|
| 944 |
+
"bbox": [
|
| 945 |
+
212,
|
| 946 |
+
513,
|
| 947 |
+
784,
|
| 948 |
+
559
|
| 949 |
+
],
|
| 950 |
+
"page_idx": 9
|
| 951 |
+
},
|
| 952 |
+
{
|
| 953 |
+
"type": "text",
|
| 954 |
+
"text": "Fully Labeled Data. Under this setting, we take train2017 as the training labeled set and unlabeled2017 as the training unlabeled set.",
|
| 955 |
+
"bbox": [
|
| 956 |
+
212,
|
| 957 |
+
560,
|
| 958 |
+
784,
|
| 959 |
+
590
|
| 960 |
+
],
|
| 961 |
+
"page_idx": 9
|
| 962 |
+
},
|
| 963 |
+
{
|
| 964 |
+
"type": "text",
|
| 965 |
+
"text": "4.2 Implementation Details",
|
| 966 |
+
"text_level": 1,
|
| 967 |
+
"bbox": [
|
| 968 |
+
215,
|
| 969 |
+
608,
|
| 970 |
+
455,
|
| 971 |
+
623
|
| 972 |
+
],
|
| 973 |
+
"page_idx": 9
|
| 974 |
+
},
|
| 975 |
+
{
|
| 976 |
+
"type": "text",
|
| 977 |
+
"text": "For a fair comparison, we adopt Faster R-CNN [17] with FPN [11] as the detection framework, and ResNet-50 [5] as the backbone. The confidence threshold $\\tau$ is set to 0.5, empirically. We set $\\beta$ as 4.0 to control contributions of unlabeled data in the overall losses. The performance is evaluated on the Teacher model. Training details for Partially Labeled Data and Fully Labeled Data are described below:",
|
| 978 |
+
"bbox": [
|
| 979 |
+
212,
|
| 980 |
+
628,
|
| 981 |
+
784,
|
| 982 |
+
717
|
| 983 |
+
],
|
| 984 |
+
"page_idx": 9
|
| 985 |
+
},
|
| 986 |
+
{
|
| 987 |
+
"type": "text",
|
| 988 |
+
"text": "Partially Labeled Data. All models are trained for 180k iterations on 8 GPUs. The initial learning rate is set as 0.01 and divided by 10 at 120k and 160k iterations. The training batch in each GPU includes 5 images, where the sample ratio between unlabeled data and labeled data is set to 4:1.",
|
| 989 |
+
"bbox": [
|
| 990 |
+
212,
|
| 991 |
+
719,
|
| 992 |
+
784,
|
| 993 |
+
777
|
| 994 |
+
],
|
| 995 |
+
"page_idx": 9
|
| 996 |
+
},
|
| 997 |
+
{
|
| 998 |
+
"type": "text",
|
| 999 |
+
"text": "Fully Labeled Data. All models are trained for 720k iterations on 8 GPUs. Mini-batch in each GPU is 8 with the sample ratio between unlabeled and labeled data as 1:1. The learning rate is initialized to 0.01 and divided by 10 at 480k and 680k iterations.",
|
| 1000 |
+
"bbox": [
|
| 1001 |
+
212,
|
| 1002 |
+
779,
|
| 1003 |
+
784,
|
| 1004 |
+
838
|
| 1005 |
+
],
|
| 1006 |
+
"page_idx": 9
|
| 1007 |
+
},
|
| 1008 |
+
{
|
| 1009 |
+
"type": "page_number",
|
| 1010 |
+
"text": "10",
|
| 1011 |
+
"bbox": [
|
| 1012 |
+
217,
|
| 1013 |
+
114,
|
| 1014 |
+
235,
|
| 1015 |
+
126
|
| 1016 |
+
],
|
| 1017 |
+
"page_idx": 9
|
| 1018 |
+
},
|
| 1019 |
+
{
|
| 1020 |
+
"type": "header",
|
| 1021 |
+
"text": "Li et al.",
|
| 1022 |
+
"bbox": [
|
| 1023 |
+
271,
|
| 1024 |
+
114,
|
| 1025 |
+
325,
|
| 1026 |
+
126
|
| 1027 |
+
],
|
| 1028 |
+
"page_idx": 9
|
| 1029 |
+
},
|
| 1030 |
+
{
|
| 1031 |
+
"type": "table",
|
| 1032 |
+
"img_path": "images/af8a8d91ee36941c716c6c60734973542838f6b37dcb87b93bc90053a0f9151d.jpg",
|
| 1033 |
+
"table_caption": [
|
| 1034 |
+
"Table 1: Comparisons with the state-of-the-art methods on val2017 set under the Partially Labeled Data and Fully Labeled Data settings."
|
| 1035 |
+
],
|
| 1036 |
+
"table_footnote": [],
|
| 1037 |
+
"table_body": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"4\">Partially Labeled Data</td><td rowspan=\"2\">Fully Labeled Data</td></tr><tr><td>1%</td><td>2%</td><td>5%</td><td>10%</td></tr><tr><td>Supervised baseline</td><td>12.20±0.29</td><td>16.53±0.12</td><td>21.17±0.17</td><td>26.90±0.08</td><td>41.0</td></tr><tr><td>STAC [20]</td><td>13.97±0.35</td><td>18.25±0.25</td><td>24.38±0.12</td><td>28.64±0.21</td><td>39.2</td></tr><tr><td>Humble Teacher [21]</td><td>16.96±0.35</td><td>21.74±0.24</td><td>27.70±0.15</td><td>31.61±0.28</td><td>37.6 +4.8 → 42.4</td></tr><tr><td>ISMT [27]</td><td>18.88±0.74</td><td>22.43±0.56</td><td>26.37±0.24</td><td>30.53±0.52</td><td>37.8 +1.8 → 39.6</td></tr><tr><td>Instant-Teaching [30]</td><td>18.05±0.15</td><td>22.45±0.15</td><td>26.75±0.05</td><td>30.40±0.05</td><td>37.6 +2.6 → 40.2</td></tr><tr><td>Unbiased Teacher [14]</td><td>20.75±0.12</td><td>24.30±0.07</td><td>28.27±0.11</td><td>31.50±0.10</td><td>40.2 +1.1 → 41.3</td></tr><tr><td>Soft Teacher [26]</td><td>20.46±0.39</td><td>-</td><td>30.74±0.08</td><td>34.04±0.14</td><td>40.9 +3.6 → 44.5</td></tr><tr><td>PseCo (ours)</td><td>22.43±0.36</td><td>27.77±0.18</td><td>32.50±0.08</td><td>36.06±0.24</td><td>41.0 +5.1 → 46.1</td></tr></table>",
|
| 1038 |
+
"bbox": [
|
| 1039 |
+
259,
|
| 1040 |
+
186,
|
| 1041 |
+
741,
|
| 1042 |
+
327
|
| 1043 |
+
],
|
| 1044 |
+
"page_idx": 10
|
| 1045 |
+
},
|
| 1046 |
+
{
|
| 1047 |
+
"type": "text",
|
| 1048 |
+
"text": "4.3 Comparison with State-of-the-Art Methods",
|
| 1049 |
+
"text_level": 1,
|
| 1050 |
+
"bbox": [
|
| 1051 |
+
215,
|
| 1052 |
+
356,
|
| 1053 |
+
619,
|
| 1054 |
+
372
|
| 1055 |
+
],
|
| 1056 |
+
"page_idx": 10
|
| 1057 |
+
},
|
| 1058 |
+
{
|
| 1059 |
+
"type": "text",
|
| 1060 |
+
"text": "We compare the proposed PseCo with other state-of-the-art methods on COCO val2017 set. Comparisons under the Partially Labeled Data setting are first conducted, with results reported in Tab. 1. When labeled data is scarce (i.e., under $1\\%$ and $2\\%$ labelling ratios), our method surpasses the state-of-the-art method, Unbiased Teacher [14], by $1.7\\%$ and $3.5\\%$ , reaching 22.4 and $27.8\\mathrm{mAP}$ respectively. When more labeled data is accessible, the SOTA method is transferred to Soft Teacher [26]. Our method still outperforms it by $1.8\\%$ and $2.0\\%$ under $5\\%$ and $10\\%$ labelling ratios, respectively. Therefore, the proposed method outperforms the SOTAs by a large margin, at least $1.7\\%$ , under all labelling ratios. Compared with the supervised baseline, PseCo obtains even better performance with only $2\\%$ labeled data than the baseline with $10\\%$ labeled data, demonstrating the effectiveness of proposed semi-supervised learning techniques.",
|
| 1061 |
+
"bbox": [
|
| 1062 |
+
217,
|
| 1063 |
+
385,
|
| 1064 |
+
785,
|
| 1065 |
+
566
|
| 1066 |
+
],
|
| 1067 |
+
"page_idx": 10
|
| 1068 |
+
},
|
| 1069 |
+
{
|
| 1070 |
+
"type": "text",
|
| 1071 |
+
"text": "Moreover, we also compare the convergence speed with the previous best method (Soft Teacher [26]) in Fig. 4, where convergence curves are depicted under $10\\%$ and $5\\%$ labelling ratios. It is obvious that our method has a faster convergence speed, specifically, our method uses only $2/5$ and $1/4$ iterations of Soft Teacher to achieve the same performance under $10\\%$ and $5\\%$ labelling ratios respectively. Although we employ an extra view $(V_{2})$ to learn feature-level consistency, it only increases the training time of each iteration by $25\\%$ (from $0.72 \\text{ sec}/\\text{iter}$ to $0.91 \\text{ sec}/\\text{iter}$ ), due to the low input resolution of $V_{2}$ . In summary, we halve the training time of SOTA approach but achieve even better performance, which validates the superior learning efficiency of our method on unlabeled data.",
|
| 1072 |
+
"bbox": [
|
| 1073 |
+
217,
|
| 1074 |
+
566,
|
| 1075 |
+
785,
|
| 1076 |
+
732
|
| 1077 |
+
],
|
| 1078 |
+
"page_idx": 10
|
| 1079 |
+
},
|
| 1080 |
+
{
|
| 1081 |
+
"type": "text",
|
| 1082 |
+
"text": "The experimental results under the Fully Labeled Data setting are reported in Tab. 1, where both results of comparison methods and their supervised baseline are listed. Following the practice in Soft Teacher [26], we also apply weak augmentations to the labeled data and obtain a strong supervised baseline, $41.0\\mathrm{mAP}$ . Although with a such strong baseline, PseCo still achieves larger improvements $(+5.1\\%)$ than others and reaches $46.1\\mathrm{mAP}$ , building a new state of the art. Some qualitative results are shown in Fig. 5.",
|
| 1083 |
+
"bbox": [
|
| 1084 |
+
215,
|
| 1085 |
+
734,
|
| 1086 |
+
785,
|
| 1087 |
+
839
|
| 1088 |
+
],
|
| 1089 |
+
"page_idx": 10
|
| 1090 |
+
},
|
| 1091 |
+
{
|
| 1092 |
+
"type": "header",
|
| 1093 |
+
"text": "PseCo for Semi-Supervised Object Detection",
|
| 1094 |
+
"bbox": [
|
| 1095 |
+
431,
|
| 1096 |
+
114,
|
| 1097 |
+
730,
|
| 1098 |
+
128
|
| 1099 |
+
],
|
| 1100 |
+
"page_idx": 10
|
| 1101 |
+
},
|
| 1102 |
+
{
|
| 1103 |
+
"type": "page_number",
|
| 1104 |
+
"text": "11",
|
| 1105 |
+
"bbox": [
|
| 1106 |
+
767,
|
| 1107 |
+
116,
|
| 1108 |
+
782,
|
| 1109 |
+
126
|
| 1110 |
+
],
|
| 1111 |
+
"page_idx": 10
|
| 1112 |
+
},
|
| 1113 |
+
{
|
| 1114 |
+
"type": "image",
|
| 1115 |
+
"img_path": "images/50295af2435f392bb3dccc9fe48730dd748af4d15a466e5fdba622a7a0ff0792.jpg",
|
| 1116 |
+
"image_caption": [
|
| 1117 |
+
"(a) $10\\%$ labelling ratio"
|
| 1118 |
+
],
|
| 1119 |
+
"image_footnote": [],
|
| 1120 |
+
"bbox": [
|
| 1121 |
+
243,
|
| 1122 |
+
157,
|
| 1123 |
+
406,
|
| 1124 |
+
253
|
| 1125 |
+
],
|
| 1126 |
+
"page_idx": 11
|
| 1127 |
+
},
|
| 1128 |
+
{
|
| 1129 |
+
"type": "image",
|
| 1130 |
+
"img_path": "images/9d773ddee387dde374bf300908a09b160e2b0b049a0a7176f7edd776804cfd46.jpg",
|
| 1131 |
+
"image_caption": [
|
| 1132 |
+
"(b) $5\\%$ labelling ratio",
|
| 1133 |
+
"Fig. 4: Comparison of model convergence speed. In (a) and (b), we compare PseCo against Soft Teacher [26]. Here, we reproduce Soft Teacher using their source codes. (c) depicts the comparison between $V_{1}$ and $V_{1} \\& V_{2}$ . In legend, the numbers in brackets refer to mAP. Performance is evaluated on the teacher."
|
| 1134 |
+
],
|
| 1135 |
+
"image_footnote": [],
|
| 1136 |
+
"bbox": [
|
| 1137 |
+
416,
|
| 1138 |
+
157,
|
| 1139 |
+
576,
|
| 1140 |
+
253
|
| 1141 |
+
],
|
| 1142 |
+
"page_idx": 11
|
| 1143 |
+
},
|
| 1144 |
+
{
|
| 1145 |
+
"type": "image",
|
| 1146 |
+
"img_path": "images/c1a78674667259422965bd05254eb0975bd585d3a4c6e8cf21ef1b444662ddf6.jpg",
|
| 1147 |
+
"image_caption": [
|
| 1148 |
+
"(c) effects of view 2"
|
| 1149 |
+
],
|
| 1150 |
+
"image_footnote": [],
|
| 1151 |
+
"bbox": [
|
| 1152 |
+
588,
|
| 1153 |
+
157,
|
| 1154 |
+
750,
|
| 1155 |
+
253
|
| 1156 |
+
],
|
| 1157 |
+
"page_idx": 11
|
| 1158 |
+
},
|
| 1159 |
+
{
|
| 1160 |
+
"type": "table",
|
| 1161 |
+
"img_path": "images/5709a275acefff46f45825794b6742d6b3cd58f50c91db2b7ba871dec0ec69f4.jpg",
|
| 1162 |
+
"table_caption": [
|
| 1163 |
+
"Table 2: Ablation studies on each component of our method. MSL represents Multi-view Scale-invariant Learning; NPL represents Noisy Pseudo box Learning. In MSL, $V_{1}$ and $V_{2}$ are constructed for label- and feature-level consistency, respectively. In NPL, PCV and PLA stand for Positive-proposal Consistency Voting and Prediction-guided Label Assignment, respectively."
|
| 1164 |
+
],
|
| 1165 |
+
"table_footnote": [],
|
| 1166 |
+
"table_body": "<table><tr><td colspan=\"2\">MSL</td><td colspan=\"2\">NPL</td><td rowspan=\"2\">mAP</td><td rowspan=\"2\">\\( AP_{50} \\)</td><td rowspan=\"2\">\\( AP_{75} \\)</td></tr><tr><td>\\( V_1 \\)</td><td>\\( V_2 \\)</td><td>PCV</td><td>PLA</td></tr><tr><td colspan=\"4\"></td><td>26.8</td><td>44.9</td><td>28.4</td></tr><tr><td>✓</td><td></td><td></td><td></td><td>33.9(+7.1)</td><td>55.2</td><td>36.0</td></tr><tr><td>✓</td><td>✓</td><td></td><td></td><td>34.9(+8.1)</td><td>56.3</td><td>37.1</td></tr><tr><td>✓</td><td></td><td>✓</td><td></td><td>34.8(+8.0)</td><td>55.1</td><td>37.4</td></tr><tr><td>✓</td><td></td><td>✓</td><td>✓</td><td>35.7(+8.9)</td><td>56.4</td><td>38.4</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td></td><td>36.0(+9.2)</td><td>56.9</td><td>38.7</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>36.3(+9.5)</td><td>57.2</td><td>39.2</td></tr></table>",
|
| 1167 |
+
"bbox": [
|
| 1168 |
+
289,
|
| 1169 |
+
417,
|
| 1170 |
+
712,
|
| 1171 |
+
547
|
| 1172 |
+
],
|
| 1173 |
+
"page_idx": 11
|
| 1174 |
+
},
|
| 1175 |
+
{
|
| 1176 |
+
"type": "text",
|
| 1177 |
+
"text": "4.4 Ablation Study",
|
| 1178 |
+
"text_level": 1,
|
| 1179 |
+
"bbox": [
|
| 1180 |
+
215,
|
| 1181 |
+
574,
|
| 1182 |
+
388,
|
| 1183 |
+
590
|
| 1184 |
+
],
|
| 1185 |
+
"page_idx": 11
|
| 1186 |
+
},
|
| 1187 |
+
{
|
| 1188 |
+
"type": "text",
|
| 1189 |
+
"text": "We conduct detailed ablation studies to verify key designs. All ablation studies are conducted on a single data fold from the $10\\%$ labelling ratio.",
|
| 1190 |
+
"bbox": [
|
| 1191 |
+
212,
|
| 1192 |
+
598,
|
| 1193 |
+
785,
|
| 1194 |
+
628
|
| 1195 |
+
],
|
| 1196 |
+
"page_idx": 11
|
| 1197 |
+
},
|
| 1198 |
+
{
|
| 1199 |
+
"type": "text",
|
| 1200 |
+
"text": "Effect of individual component. In Tab. 2, we show effectiveness of each component step by step. When only using $10\\%$ labeled data as training examples, it obtains $26.8\\mathrm{mAP}$ . Next, we construct the semi-supervised baseline by applying $V_{1}$ on unlabeled data for label-level consistency learning. The baseline does not consider any adverse effects incurred by coarse pseudo boxes and obtains 33.9 mAP. Furthermore, by leveraging additional view $V_{2}$ , the feature-level scale-invariant learning is enabled, and an improvement of $+1.0\\mathrm{mAP}$ is found. On the other hand, to alleviate the issue of coarse pseudo boxes, we introduce PCV to suppress the inaccurate regression signals, improving the baseline from 33.9 to $34.8\\mathrm{mAP}$ . After that, we replace the traditional IoU-based label assignment strategy with the PLA and enjoy another $+0.9\\mathrm{mAP}$ gain. Finally, when combing MSL and NPL together, it achieves the best performance, 36.3 mAP.",
|
| 1201 |
+
"bbox": [
|
| 1202 |
+
212,
|
| 1203 |
+
628,
|
| 1204 |
+
787,
|
| 1205 |
+
809
|
| 1206 |
+
],
|
| 1207 |
+
"page_idx": 11
|
| 1208 |
+
},
|
| 1209 |
+
{
|
| 1210 |
+
"type": "text",
|
| 1211 |
+
"text": "Comparison with other regression methods. Scores of pseudo boxes can only indicate the confidence of predicted object category, thus they fail to reflect",
|
| 1212 |
+
"bbox": [
|
| 1213 |
+
212,
|
| 1214 |
+
809,
|
| 1215 |
+
785,
|
| 1216 |
+
839
|
| 1217 |
+
],
|
| 1218 |
+
"page_idx": 11
|
| 1219 |
+
},
|
| 1220 |
+
{
|
| 1221 |
+
"type": "page_number",
|
| 1222 |
+
"text": "12",
|
| 1223 |
+
"bbox": [
|
| 1224 |
+
217,
|
| 1225 |
+
114,
|
| 1226 |
+
235,
|
| 1227 |
+
126
|
| 1228 |
+
],
|
| 1229 |
+
"page_idx": 11
|
| 1230 |
+
},
|
| 1231 |
+
{
|
| 1232 |
+
"type": "header",
|
| 1233 |
+
"text": "Li et al.",
|
| 1234 |
+
"bbox": [
|
| 1235 |
+
271,
|
| 1236 |
+
114,
|
| 1237 |
+
325,
|
| 1238 |
+
126
|
| 1239 |
+
],
|
| 1240 |
+
"page_idx": 11
|
| 1241 |
+
},
|
| 1242 |
+
{
|
| 1243 |
+
"type": "table",
|
| 1244 |
+
"img_path": "images/7d58023c799210d1bbe3df90031ba7ab8f2b51d6996ae33d1aa0a93522f5f274.jpg",
|
| 1245 |
+
"table_caption": [
|
| 1246 |
+
"Table 3: Analysis of Multi-view Scale-invariant learning, which contains both the label- and feature-level consistency.",
|
| 1247 |
+
"(a) Study on label consistency."
|
| 1248 |
+
],
|
| 1249 |
+
"table_footnote": [],
|
| 1250 |
+
"table_body": "<table><tr><td>method</td><td>mAP</td><td>APS</td><td>APM</td><td>APL</td></tr><tr><td>single-scale training</td><td>32.7</td><td>19.0</td><td>36.0</td><td>42.5</td></tr><tr><td>label consistency</td><td>33.9</td><td>19.1</td><td>37.2</td><td>44.4</td></tr></table>",
|
| 1251 |
+
"bbox": [
|
| 1252 |
+
220,
|
| 1253 |
+
205,
|
| 1254 |
+
475,
|
| 1255 |
+
247
|
| 1256 |
+
],
|
| 1257 |
+
"page_idx": 12
|
| 1258 |
+
},
|
| 1259 |
+
{
|
| 1260 |
+
"type": "table",
|
| 1261 |
+
"img_path": "images/760a3441369a49dc12ef4a9036bdfaa7dfa050571c384ee405f353110bfac23c.jpg",
|
| 1262 |
+
"table_caption": [
|
| 1263 |
+
"(b) Study on feature consistency."
|
| 1264 |
+
],
|
| 1265 |
+
"table_footnote": [],
|
| 1266 |
+
"table_body": "<table><tr><td>method</td><td>mAP</td><td>APS</td><td>APM</td><td>APL</td></tr><tr><td>vanilla multi-view training</td><td>33.9</td><td>20.9</td><td>37.2</td><td>43.0</td></tr><tr><td>feature consistency</td><td>34.9</td><td>22.1</td><td>38.2</td><td>43.6</td></tr></table>",
|
| 1267 |
+
"bbox": [
|
| 1268 |
+
498,
|
| 1269 |
+
205,
|
| 1270 |
+
781,
|
| 1271 |
+
246
|
| 1272 |
+
],
|
| 1273 |
+
"page_idx": 12
|
| 1274 |
+
},
|
| 1275 |
+
{
|
| 1276 |
+
"type": "text",
|
| 1277 |
+
"text": "(a) Comparison between our PCV and other regression methods.",
|
| 1278 |
+
"bbox": [
|
| 1279 |
+
215,
|
| 1280 |
+
305,
|
| 1281 |
+
457,
|
| 1282 |
+
332
|
| 1283 |
+
],
|
| 1284 |
+
"page_idx": 12
|
| 1285 |
+
},
|
| 1286 |
+
{
|
| 1287 |
+
"type": "table",
|
| 1288 |
+
"img_path": "images/4339d13096ed9aff6136e6e9bf9098e44805376abaa51b013c763a55d93ecdef.jpg",
|
| 1289 |
+
"table_caption": [
|
| 1290 |
+
"Table 4: Ablation studies related to Positive-proposal Consistency Voting (PCV) and Prediction-guided Label Assignment (PLA)."
|
| 1291 |
+
],
|
| 1292 |
+
"table_footnote": [],
|
| 1293 |
+
"table_body": "<table><tr><td>method</td><td>mAP</td><td>\\( AP_{50} \\)</td><td>\\( AP_{75} \\)</td></tr><tr><td>abandon reg [14]</td><td>33.9</td><td>55.2</td><td>36.0</td></tr><tr><td>reg consistency [21]</td><td>34.2</td><td>55.1</td><td>36.5</td></tr><tr><td>box jittering [26]</td><td>34.5</td><td>54.9</td><td>36.9</td></tr><tr><td>PCV (ours)</td><td>34.8</td><td>55.1</td><td>37.4</td></tr></table>",
|
| 1294 |
+
"bbox": [
|
| 1295 |
+
220,
|
| 1296 |
+
333,
|
| 1297 |
+
454,
|
| 1298 |
+
400
|
| 1299 |
+
],
|
| 1300 |
+
"page_idx": 12
|
| 1301 |
+
},
|
| 1302 |
+
{
|
| 1303 |
+
"type": "text",
|
| 1304 |
+
"text": "(b) Study on hyperparameter $\\alpha$",
|
| 1305 |
+
"bbox": [
|
| 1306 |
+
475,
|
| 1307 |
+
306,
|
| 1308 |
+
620,
|
| 1309 |
+
333
|
| 1310 |
+
],
|
| 1311 |
+
"page_idx": 12
|
| 1312 |
+
},
|
| 1313 |
+
{
|
| 1314 |
+
"type": "table",
|
| 1315 |
+
"img_path": "images/bfc6e31f046aa60375ccc9f1926d7cab4b6ff48a59627b8e433fcd5462aa8688.jpg",
|
| 1316 |
+
"table_caption": [],
|
| 1317 |
+
"table_footnote": [],
|
| 1318 |
+
"table_body": "<table><tr><td>α</td><td>mAP</td><td>AP50</td><td>AP75</td></tr><tr><td>0</td><td>35.2</td><td>56.1</td><td>37.8</td></tr><tr><td>0.5</td><td>35.7</td><td>56.4</td><td>38.4</td></tr><tr><td>1.0</td><td>35.4</td><td>55.7</td><td>38.4</td></tr></table>",
|
| 1319 |
+
"bbox": [
|
| 1320 |
+
480,
|
| 1321 |
+
339,
|
| 1322 |
+
617,
|
| 1323 |
+
401
|
| 1324 |
+
],
|
| 1325 |
+
"page_idx": 12
|
| 1326 |
+
},
|
| 1327 |
+
{
|
| 1328 |
+
"type": "text",
|
| 1329 |
+
"text": "(c) Study on IoU threshold $t$ .",
|
| 1330 |
+
"bbox": [
|
| 1331 |
+
640,
|
| 1332 |
+
306,
|
| 1333 |
+
784,
|
| 1334 |
+
332
|
| 1335 |
+
],
|
| 1336 |
+
"page_idx": 12
|
| 1337 |
+
},
|
| 1338 |
+
{
|
| 1339 |
+
"type": "table",
|
| 1340 |
+
"img_path": "images/418b173cd16c2a1f33c1ee4b58cb93966157d36c6f4704344e9879a34832904a.jpg",
|
| 1341 |
+
"table_caption": [],
|
| 1342 |
+
"table_footnote": [],
|
| 1343 |
+
"table_body": "<table><tr><td>t</td><td>mAP</td><td>\\( AP_{50} \\)</td><td>\\( AP_{75} \\)</td></tr><tr><td>0.3</td><td>35.7</td><td>56.2</td><td>38.6</td></tr><tr><td>0.4</td><td>35.7</td><td>56.4</td><td>38.4</td></tr><tr><td>0.5</td><td>35.5</td><td>56.1</td><td>38.3</td></tr></table>",
|
| 1344 |
+
"bbox": [
|
| 1345 |
+
645,
|
| 1346 |
+
340,
|
| 1347 |
+
779,
|
| 1348 |
+
400
|
| 1349 |
+
],
|
| 1350 |
+
"page_idx": 12
|
| 1351 |
+
},
|
| 1352 |
+
{
|
| 1353 |
+
"type": "text",
|
| 1354 |
+
"text": "localization quality [10,14]. Naive confidence thresholding will introduce some coarse bounding boxes for regression tasks. To alleviate this issue, Unbiased Teacher [14] abandons regression losses on unlabeled data (denoted as \"abandon reg\"); Humble Teacher [21] aligns the regression predictions between the teacher and student on selected top- $\\mathcal{N}$ proposals (dubbed \"reg consistency\"); Soft Teacher [26] introduces the box jittering to calculate prediction variance on jittered pseudo boxes, which is used to filter out poorly localized pseudo boxes. In Tab. 4a, we compare our Positive-proposal Consistency Voting (PCV) with these methods. PCV obtains the best performance, concretely, on $\\mathrm{AP}_{75}$ , PCV surpasses two competitors, reg consistency and box jittering, by $0.9\\%$ and $0.5\\%$ , respectively. Although both PCV and box jittering [26] rely on prediction variance, there exist great differences. Firstly, PCV produces localization quality by intrinsic proposals, thus it avoids extra network forward on jittered boxes, enjoying higher training efficiency. Moreover, unlike the box jittering, which meticulously tunes the variance threshold, PCV is free of hyper-parameters.",
|
| 1355 |
+
"bbox": [
|
| 1356 |
+
212,
|
| 1357 |
+
430,
|
| 1358 |
+
787,
|
| 1359 |
+
657
|
| 1360 |
+
],
|
| 1361 |
+
"page_idx": 12
|
| 1362 |
+
},
|
| 1363 |
+
{
|
| 1364 |
+
"type": "text",
|
| 1365 |
+
"text": "Study on different hyper-parameters of PLA. We first investigate the performance using different $\\alpha$ in PLA, which balances the influence of classification score $(s)$ and localization precision $(u)$ in the proposal quality. Through a coarse search shown in Tab. 4b, we find that combining $s$ and $u$ yields better performance than using them individually. We then carry out experiments to study the robustness of the IoU threshold $t$ , which is used to build the candidate bag. From the Tab 4c, using lower $t$ to construct a bigger candidate bag is preferred.",
|
| 1366 |
+
"bbox": [
|
| 1367 |
+
212,
|
| 1368 |
+
657,
|
| 1369 |
+
787,
|
| 1370 |
+
762
|
| 1371 |
+
],
|
| 1372 |
+
"page_idx": 12
|
| 1373 |
+
},
|
| 1374 |
+
{
|
| 1375 |
+
"type": "text",
|
| 1376 |
+
"text": "Analysis of Multi-view Scale-invariant Learning. We propose the MSL to model scale invariance from the aspects of both label- and feature-level consistency. The studies on them are reported in Tab. 3. At first, we construct a single-scale training baseline without scale variance, where the input images for the teacher and student are kept on the same scale. It obtains $32.7\\mathrm{mAP}$ . Next,",
|
| 1377 |
+
"bbox": [
|
| 1378 |
+
212,
|
| 1379 |
+
763,
|
| 1380 |
+
787,
|
| 1381 |
+
839
|
| 1382 |
+
],
|
| 1383 |
+
"page_idx": 12
|
| 1384 |
+
},
|
| 1385 |
+
{
|
| 1386 |
+
"type": "header",
|
| 1387 |
+
"text": "PseCo for Semi-Supervised Object Detection",
|
| 1388 |
+
"bbox": [
|
| 1389 |
+
431,
|
| 1390 |
+
114,
|
| 1391 |
+
730,
|
| 1392 |
+
128
|
| 1393 |
+
],
|
| 1394 |
+
"page_idx": 12
|
| 1395 |
+
},
|
| 1396 |
+
{
|
| 1397 |
+
"type": "page_number",
|
| 1398 |
+
"text": "13",
|
| 1399 |
+
"bbox": [
|
| 1400 |
+
767,
|
| 1401 |
+
114,
|
| 1402 |
+
785,
|
| 1403 |
+
126
|
| 1404 |
+
],
|
| 1405 |
+
"page_idx": 12
|
| 1406 |
+
},
|
| 1407 |
+
{
|
| 1408 |
+
"type": "image",
|
| 1409 |
+
"img_path": "images/19b5107171d3d61704dc275ed24b4a93e97fb202ce144ea326f1c3ff45524647.jpg",
|
| 1410 |
+
"image_caption": [],
|
| 1411 |
+
"image_footnote": [],
|
| 1412 |
+
"bbox": [
|
| 1413 |
+
245,
|
| 1414 |
+
146,
|
| 1415 |
+
367,
|
| 1416 |
+
208
|
| 1417 |
+
],
|
| 1418 |
+
"page_idx": 13
|
| 1419 |
+
},
|
| 1420 |
+
{
|
| 1421 |
+
"type": "image",
|
| 1422 |
+
"img_path": "images/c6c0cefd53232d4e1e7d33e41e85feb34489973130bbe6ad79c3216e0cac8d1b.jpg",
|
| 1423 |
+
"image_caption": [
|
| 1424 |
+
"(a) Pseudo boxes produced by the teacher model"
|
| 1425 |
+
],
|
| 1426 |
+
"image_footnote": [],
|
| 1427 |
+
"bbox": [
|
| 1428 |
+
245,
|
| 1429 |
+
210,
|
| 1430 |
+
367,
|
| 1431 |
+
273
|
| 1432 |
+
],
|
| 1433 |
+
"page_idx": 13
|
| 1434 |
+
},
|
| 1435 |
+
{
|
| 1436 |
+
"type": "image",
|
| 1437 |
+
"img_path": "images/b5f3c8f822e5cb4d4a51545775cedf58cd46c84b0749e28b4345495b59408b0a.jpg",
|
| 1438 |
+
"image_caption": [],
|
| 1439 |
+
"image_footnote": [],
|
| 1440 |
+
"bbox": [
|
| 1441 |
+
369,
|
| 1442 |
+
146,
|
| 1443 |
+
460,
|
| 1444 |
+
208
|
| 1445 |
+
],
|
| 1446 |
+
"page_idx": 13
|
| 1447 |
+
},
|
| 1448 |
+
{
|
| 1449 |
+
"type": "image",
|
| 1450 |
+
"img_path": "images/5092e9bca0f5ce45f726d07b7474d9725a579fb03ea06acca55a9f207790f604.jpg",
|
| 1451 |
+
"image_caption": [
|
| 1452 |
+
"Fig. 5: (a) Some pseudo boxes (in yellow) detect objects, missed by ground-truths (in red). Numbers above the pseudo box refer to the predicted consistency $\\sigma$ . (b)(c) are the results of the supervised baseline and our method."
|
| 1453 |
+
],
|
| 1454 |
+
"image_footnote": [],
|
| 1455 |
+
"bbox": [
|
| 1456 |
+
369,
|
| 1457 |
+
210,
|
| 1458 |
+
460,
|
| 1459 |
+
273
|
| 1460 |
+
],
|
| 1461 |
+
"page_idx": 13
|
| 1462 |
+
},
|
| 1463 |
+
{
|
| 1464 |
+
"type": "image",
|
| 1465 |
+
"img_path": "images/066c011de4156cf64a7fe53a9a86b07b29b97035202b64417655d528057ae59a.jpg",
|
| 1466 |
+
"image_caption": [
|
| 1467 |
+
"(b) Detection results of supervised baseline"
|
| 1468 |
+
],
|
| 1469 |
+
"image_footnote": [],
|
| 1470 |
+
"bbox": [
|
| 1471 |
+
467,
|
| 1472 |
+
146,
|
| 1473 |
+
560,
|
| 1474 |
+
200
|
| 1475 |
+
],
|
| 1476 |
+
"page_idx": 13
|
| 1477 |
+
},
|
| 1478 |
+
{
|
| 1479 |
+
"type": "image",
|
| 1480 |
+
"img_path": "images/230e96d0ce4f409e092e38b40b0a92f733e9a55025d339bd79fc929f11dd166b.jpg",
|
| 1481 |
+
"image_caption": [],
|
| 1482 |
+
"image_footnote": [],
|
| 1483 |
+
"bbox": [
|
| 1484 |
+
562,
|
| 1485 |
+
146,
|
| 1486 |
+
645,
|
| 1487 |
+
200
|
| 1488 |
+
],
|
| 1489 |
+
"page_idx": 13
|
| 1490 |
+
},
|
| 1491 |
+
{
|
| 1492 |
+
"type": "image",
|
| 1493 |
+
"img_path": "images/3cff8af889c976e98f2444c46bc04b482ca76c058670f9cbbf3317a66ff98ff2.jpg",
|
| 1494 |
+
"image_caption": [],
|
| 1495 |
+
"image_footnote": [],
|
| 1496 |
+
"bbox": [
|
| 1497 |
+
645,
|
| 1498 |
+
146,
|
| 1499 |
+
750,
|
| 1500 |
+
200
|
| 1501 |
+
],
|
| 1502 |
+
"page_idx": 13
|
| 1503 |
+
},
|
| 1504 |
+
{
|
| 1505 |
+
"type": "image",
|
| 1506 |
+
"img_path": "images/cede318918336ea5275f736793a905a374243700a554c97574e349568596a2b3.jpg",
|
| 1507 |
+
"image_caption": [
|
| 1508 |
+
"(c) Detection results of our method"
|
| 1509 |
+
],
|
| 1510 |
+
"image_footnote": [],
|
| 1511 |
+
"bbox": [
|
| 1512 |
+
467,
|
| 1513 |
+
217,
|
| 1514 |
+
560,
|
| 1515 |
+
273
|
| 1516 |
+
],
|
| 1517 |
+
"page_idx": 13
|
| 1518 |
+
},
|
| 1519 |
+
{
|
| 1520 |
+
"type": "image",
|
| 1521 |
+
"img_path": "images/f436c2a68790dfac5062033c2b2fe39dd0f5548c462083ce24e40a66f8944254.jpg",
|
| 1522 |
+
"image_caption": [],
|
| 1523 |
+
"image_footnote": [],
|
| 1524 |
+
"bbox": [
|
| 1525 |
+
562,
|
| 1526 |
+
217,
|
| 1527 |
+
645,
|
| 1528 |
+
273
|
| 1529 |
+
],
|
| 1530 |
+
"page_idx": 13
|
| 1531 |
+
},
|
| 1532 |
+
{
|
| 1533 |
+
"type": "image",
|
| 1534 |
+
"img_path": "images/b0a9294def2c413410da061c4a36250ac0a6bb7891778ce9ec033356200d3e09.jpg",
|
| 1535 |
+
"image_caption": [],
|
| 1536 |
+
"image_footnote": [],
|
| 1537 |
+
"bbox": [
|
| 1538 |
+
645,
|
| 1539 |
+
218,
|
| 1540 |
+
751,
|
| 1541 |
+
273
|
| 1542 |
+
],
|
| 1543 |
+
"page_idx": 13
|
| 1544 |
+
},
|
| 1545 |
+
{
|
| 1546 |
+
"type": "text",
|
| 1547 |
+
"text": "we apply the different scale jitter on the teacher and student to implement label-level consistency, which surpasses the single-scale training by $1.2\\mathrm{mAP}$ . Based on the label consistency, we further introduce the view $V_{2}$ to perform feature consistency learning. It obtains $+1.0\\%$ improvements, reaching $34.9\\mathrm{mAP}$ . Apart from performance gains, the feature consistency can also significantly boost the convergence speed as depicted in Fig. 4(c). To validate the improvements introduced by the $V_{2}$ come from comprehensive scale-invariant learning, instead of vanilla multi-view training, we also add an extra view $V_{2}^{\\prime}$ besides the $V_{1}$ , where $V_{2}^{\\prime}$ is downsampled from $V_{1}$ by $2\\mathrm{x}$ and performs label consistency as $V_{1}$ . From the Tab. 3b, vanilla multi-view training with only label consistency hardly brings improvements against the single $V_{1}$ (33.9 vs $33.9\\%$ ).",
|
| 1548 |
+
"bbox": [
|
| 1549 |
+
212,
|
| 1550 |
+
363,
|
| 1551 |
+
787,
|
| 1552 |
+
527
|
| 1553 |
+
],
|
| 1554 |
+
"page_idx": 13
|
| 1555 |
+
},
|
| 1556 |
+
{
|
| 1557 |
+
"type": "text",
|
| 1558 |
+
"text": "Effect of Focal Loss. In Tab. 5, we compare the Cross Entropy (CE) Loss and Focal Loss. Thanks to the Focal Loss, an improvement of $+0.6\\mathrm{mAP}$ is achieved against the CE Loss. On the other hand, even with the CE Loss, our PseCo still surpasses the Soft Teacher by a large margin, i.e., $1.7\\mathrm{mAP}$ .",
|
| 1559 |
+
"bbox": [
|
| 1560 |
+
215,
|
| 1561 |
+
529,
|
| 1562 |
+
545,
|
| 1563 |
+
635
|
| 1564 |
+
],
|
| 1565 |
+
"page_idx": 13
|
| 1566 |
+
},
|
| 1567 |
+
{
|
| 1568 |
+
"type": "table",
|
| 1569 |
+
"img_path": "images/1d406e919ffeea62d098b7b7a06a180fa1394224592727fc481a08e34d9ffaef.jpg",
|
| 1570 |
+
"table_caption": [
|
| 1571 |
+
"Table 5: Ablation study on Focal Loss."
|
| 1572 |
+
],
|
| 1573 |
+
"table_footnote": [],
|
| 1574 |
+
"table_body": "<table><tr><td>method</td><td>mAP</td><td>\\( AP_{50} \\)</td><td>\\( AP_{75} \\)</td></tr><tr><td>PseCo w/ CE Loss</td><td>35.7</td><td>55.6</td><td>38.9</td></tr><tr><td>PseCo w/ Focal Loss</td><td>36.3</td><td>57.2</td><td>39.2</td></tr></table>",
|
| 1575 |
+
"bbox": [
|
| 1576 |
+
560,
|
| 1577 |
+
574,
|
| 1578 |
+
781,
|
| 1579 |
+
613
|
| 1580 |
+
],
|
| 1581 |
+
"page_idx": 13
|
| 1582 |
+
},
|
| 1583 |
+
{
|
| 1584 |
+
"type": "text",
|
| 1585 |
+
"text": "5 Conclusion",
|
| 1586 |
+
"text_level": 1,
|
| 1587 |
+
"bbox": [
|
| 1588 |
+
215,
|
| 1589 |
+
657,
|
| 1590 |
+
359,
|
| 1591 |
+
672
|
| 1592 |
+
],
|
| 1593 |
+
"page_idx": 13
|
| 1594 |
+
},
|
| 1595 |
+
{
|
| 1596 |
+
"type": "text",
|
| 1597 |
+
"text": "In this work, we elaborately analyze two key techniques of semi-supervised object detection (e.g. pseudo labeling and consistency training), and observe these two techniques currently neglect some important properties of object detection. Motivated by this, we propose a new SSOD framework, PseCo, to integrate object detection properties into SSOD. PseCo consists of Noisy Pseudo box Learning (NPL) and Multi-view Scale-invariant Learning (MSL). In NPL, prediction-guided label assignment and positive-proposal consistency voting are proposed to perform the robust label assignment and regression task using noisy pseudo boxes, respectively. Based on the common label-level consistency, MSL additionally designs a novel feature-level scale-invariant learning, which is neglected in",
|
| 1598 |
+
"bbox": [
|
| 1599 |
+
212,
|
| 1600 |
+
688,
|
| 1601 |
+
787,
|
| 1602 |
+
840
|
| 1603 |
+
],
|
| 1604 |
+
"page_idx": 13
|
| 1605 |
+
},
|
| 1606 |
+
{
|
| 1607 |
+
"type": "page_number",
|
| 1608 |
+
"text": "14",
|
| 1609 |
+
"bbox": [
|
| 1610 |
+
217,
|
| 1611 |
+
114,
|
| 1612 |
+
235,
|
| 1613 |
+
126
|
| 1614 |
+
],
|
| 1615 |
+
"page_idx": 13
|
| 1616 |
+
},
|
| 1617 |
+
{
|
| 1618 |
+
"type": "header",
|
| 1619 |
+
"text": "Li et al.",
|
| 1620 |
+
"bbox": [
|
| 1621 |
+
271,
|
| 1622 |
+
114,
|
| 1623 |
+
325,
|
| 1624 |
+
126
|
| 1625 |
+
],
|
| 1626 |
+
"page_idx": 13
|
| 1627 |
+
},
|
| 1628 |
+
{
|
| 1629 |
+
"type": "text",
|
| 1630 |
+
"text": "prior works. To validate the effectiveness of our method, extensive experiments are conducted on COCO benchmark. Experimental results validate PseCo surpasses the SOTAs by a large margin both in accuracy and efficiency.",
|
| 1631 |
+
"bbox": [
|
| 1632 |
+
212,
|
| 1633 |
+
146,
|
| 1634 |
+
787,
|
| 1635 |
+
193
|
| 1636 |
+
],
|
| 1637 |
+
"page_idx": 14
|
| 1638 |
+
},
|
| 1639 |
+
{
|
| 1640 |
+
"type": "text",
|
| 1641 |
+
"text": "References",
|
| 1642 |
+
"text_level": 1,
|
| 1643 |
+
"bbox": [
|
| 1644 |
+
217,
|
| 1645 |
+
214,
|
| 1646 |
+
321,
|
| 1647 |
+
231
|
| 1648 |
+
],
|
| 1649 |
+
"page_idx": 14
|
| 1650 |
+
},
|
| 1651 |
+
{
|
| 1652 |
+
"type": "list",
|
| 1653 |
+
"sub_type": "ref_text",
|
| 1654 |
+
"list_items": [
|
| 1655 |
+
"1. Berthelot, D., Carlini, N., Goodfellow, I., Papernot, N., Oliver, A., Raffel, C.A.: Mixmatch: A holistic approach to semi-supervised learning. Advances in Neural Information Processing Systems 32 (2019)",
|
| 1656 |
+
"2. Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: A large-scale hierarchical image database. In: 2009 IEEE conference on computer vision and pattern recognition. pp. 248-255. IEEE (2009)",
|
| 1657 |
+
"3. Ge, Z., Liu, S., Li, Z., Yoshie, O., Sun, J.: Ota: Optimal transport assignment for object detection. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 303-312 (2021)",
|
| 1658 |
+
"4. Grandvalet, Y., Bengio, Y.: Semi-supervised learning by entropy minimization. Advances in neural information processing systems 17 (2004)",
|
| 1659 |
+
"5. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)",
|
| 1660 |
+
"6. Jiang, B., Luo, R., Mao, J., Xiao, T., Jiang, Y.: Acquisition of localization confidence for accurate object detection. In: Proceedings of the European conference on computer vision (ECCV). pp. 784-799 (2018)",
|
| 1661 |
+
"7. Lee, D.H., et al.: Pseudo-label: The simple and efficient semi-supervised learning method for deep neural networks. In: Workshop on challenges in representation learning, ICML. vol. 3, p. 896 (2013)",
|
| 1662 |
+
"8. Li, B., Liu, Y., Wang, X.: Gradient harmonized single-stage detector. In: Proceedings of the AAAI conference on artificial intelligence. vol. 33, pp. 8577-8584 (2019)",
|
| 1663 |
+
"9. Li, G., Li, X., Wang, Y., Zhang, S., Wu, Y., Liang, D.: Knowledge distillation for object detection via rank mimicking and prediction-guided feature imitation. In: Proceedings of the AAAI Conference on Artificial Intelligence. vol. 36, pp. 1306-1313 (2022)",
|
| 1664 |
+
"0. Li, X., Lv, C., Wang, W., Li, G., Yang, L., Yang, J.: Generalized focal loss: Towards efficient representation learning for dense object detection. IEEE Transactions on Pattern Analysis and Machine Intelligence (2022)",
|
| 1665 |
+
"1. Lin, T.Y., Dóllar, P., Girshick, R., He, K., Hariharan, B., Belongie, S.: Feature pyramid networks for object detection. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2117-2125 (2017)",
|
| 1666 |
+
"2. Lin, T.Y., Goyal, P., Girshick, R., He, K., Dollar, P.: Focal loss for dense object detection. In: Proceedings of the IEEE international conference on computer vision. pp. 2980-2988 (2017)",
|
| 1667 |
+
"3. Lin, T.Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Dollár, P., Zitnick, C.L.: Microsoft coco: Common objects in context. In: European conference on computer vision. pp. 740-755. Springer (2014)",
|
| 1668 |
+
"4. Liu, Y.C., Ma, C.Y., He, Z., Kuo, C.W., Chen, K., Zhang, P., Wu, B., Kira, Z., Vajda, P.: Unbiased teacher for semi-supervised object detection. arXiv preprint arXiv:2102.09480 (2021)",
|
| 1669 |
+
"5. Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings"
|
| 1670 |
+
],
|
| 1671 |
+
"bbox": [
|
| 1672 |
+
225,
|
| 1673 |
+
244,
|
| 1674 |
+
785,
|
| 1675 |
+
839
|
| 1676 |
+
],
|
| 1677 |
+
"page_idx": 14
|
| 1678 |
+
},
|
| 1679 |
+
{
|
| 1680 |
+
"type": "header",
|
| 1681 |
+
"text": "PseCo for Semi-Supervised Object Detection",
|
| 1682 |
+
"bbox": [
|
| 1683 |
+
431,
|
| 1684 |
+
114,
|
| 1685 |
+
730,
|
| 1686 |
+
128
|
| 1687 |
+
],
|
| 1688 |
+
"page_idx": 14
|
| 1689 |
+
},
|
| 1690 |
+
{
|
| 1691 |
+
"type": "page_number",
|
| 1692 |
+
"text": "15",
|
| 1693 |
+
"bbox": [
|
| 1694 |
+
767,
|
| 1695 |
+
116,
|
| 1696 |
+
785,
|
| 1697 |
+
126
|
| 1698 |
+
],
|
| 1699 |
+
"page_idx": 14
|
| 1700 |
+
},
|
| 1701 |
+
{
|
| 1702 |
+
"type": "list",
|
| 1703 |
+
"sub_type": "ref_text",
|
| 1704 |
+
"list_items": [
|
| 1705 |
+
"of the IEEE/CVF International Conference on Computer Vision. pp. 10012-10022 (2021)",
|
| 1706 |
+
"16. Qi, L., Kuen, J., Gu, J., Lin, Z., Wang, Y., Chen, Y., Li, Y., Jia, J.: Multi-scale aligned distillation for low-resolution detection. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 14443-14453 (2021)",
|
| 1707 |
+
"17. Ren, S., He, K., Girshick, R., Sun, J.: Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems 28 (2015)",
|
| 1708 |
+
"18. Scudder, H.: Probability of error of some adaptive pattern-recognition machines. IEEE Transactions on Information Theory 11(3), 363-371 (1965)",
|
| 1709 |
+
"19. Sohn, K., Berthelot, D., Carlini, N., Zhang, Z., Zhang, H., Raffel, C.A., Cubuk, E.D., Kurakin, A., Li, C.L.: Fixmatch: Simplifying semi-supervised learning with consistency and confidence. Advances in Neural Information Processing Systems 33, 596-608 (2020)",
|
| 1710 |
+
"20. Sohn, K., Zhang, Z., Li, C.L., Zhang, H., Lee, C.Y., Pfister, T.: A simple semi-supervised learning framework for object detection. arXiv preprint arXiv:2005.04757 (2020)",
|
| 1711 |
+
"21. Tang, Y., Chen, W., Luo, Y., Zhang, Y.: Humble teachers teach better students for semi-supervised object detection. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3132-3141 (2021)",
|
| 1712 |
+
"22. Tarvainen, A., Valpola, H.: Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. Advances in neural information processing systems 30 (2017)",
|
| 1713 |
+
"23. Wei, F., Gao, Y., Wu, Z., Hu, H., Lin, S.: Aligning pretraining for detection via object-level contrastive learning. Advances in Neural Information Processing Systems 34 (2021)",
|
| 1714 |
+
"24. Xie, Q., Dai, Z., Hovy, E., Luong, T., Le, Q.: Unsupervised data augmentation for consistency training. Advances in Neural Information Processing Systems 33, 6256-6268 (2020)",
|
| 1715 |
+
"25. Xie, Q., Luong, M.T., Hovy, E., Le, Q.V.: Self-training with noisy student improves imagenet classification. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 10687-10698 (2020)",
|
| 1716 |
+
"26. Xu, M., Zhang, Z., Hu, H., Wang, J., Wang, L., Wei, F., Bai, X., Liu, Z.: End-to-end semi-supervised object detection with soft teacher. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 3060-3069 (2021)",
|
| 1717 |
+
"27. Yang, Q., Wei, X., Wang, B., Hua, X.S., Zhang, L.: Interactive self-training with mean teachers for semi-supervised object detection. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5941-5950 (2021)",
|
| 1718 |
+
"28. Zhang, B., Wang, Y., Hou, W., Wu, H., Wang, J., Okumura, M., Shinozaki, T.: Flexmatch: Boosting semi-supervised learning with curriculum pseudo labeling. Advances in Neural Information Processing Systems 34 (2021)",
|
| 1719 |
+
"29. Zhang, H., Wang, Y., Dayoub, F., Sunderhauf, N.: Varifocalnet: An iou-aware dense object detector. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8514-8523 (2021)",
|
| 1720 |
+
"30. Zhou, Q., Yu, C., Wang, Z., Qian, Q., Li, H.: Instant-teaching: An end-to-end semi-supervised object detection framework. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4081-4090 (2021)"
|
| 1721 |
+
],
|
| 1722 |
+
"bbox": [
|
| 1723 |
+
215,
|
| 1724 |
+
147,
|
| 1725 |
+
785,
|
| 1726 |
+
811
|
| 1727 |
+
],
|
| 1728 |
+
"page_idx": 15
|
| 1729 |
+
},
|
| 1730 |
+
{
|
| 1731 |
+
"type": "page_number",
|
| 1732 |
+
"text": "16",
|
| 1733 |
+
"bbox": [
|
| 1734 |
+
217,
|
| 1735 |
+
114,
|
| 1736 |
+
235,
|
| 1737 |
+
126
|
| 1738 |
+
],
|
| 1739 |
+
"page_idx": 15
|
| 1740 |
+
},
|
| 1741 |
+
{
|
| 1742 |
+
"type": "header",
|
| 1743 |
+
"text": "Li et al.",
|
| 1744 |
+
"bbox": [
|
| 1745 |
+
271,
|
| 1746 |
+
114,
|
| 1747 |
+
325,
|
| 1748 |
+
126
|
| 1749 |
+
],
|
| 1750 |
+
"page_idx": 15
|
| 1751 |
+
}
|
| 1752 |
+
]
|
2203.16xxx/2203.16317/b564aed0-5bb1-4ebd-a32c-a4a45cb20e3c_model.json
ADDED
|
@@ -0,0 +1,2201 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "aside_text",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.023,
|
| 7 |
+
0.271,
|
| 8 |
+
0.058,
|
| 9 |
+
0.707
|
| 10 |
+
],
|
| 11 |
+
"angle": 270,
|
| 12 |
+
"content": "arXiv:2203.16317v2 [cs.CV] 20 Jul 2022"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "title",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.228,
|
| 18 |
+
0.142,
|
| 19 |
+
0.776,
|
| 20 |
+
0.187
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "PseCo: Pseudo Labeling and Consistency Training for Semi-Supervised Object Detection"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.254,
|
| 29 |
+
0.212,
|
| 30 |
+
0.75,
|
| 31 |
+
0.244
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "Gang Li\\(^{1,2}\\), Xiang Li\\(^{1\\star}\\), Yujie Wang\\(^{2}\\), Yichao Wu\\(^{2}\\), Ding Liang\\(^{2}\\), and Shanshan Zhang\\(^{1\\star}\\)"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.341,
|
| 40 |
+
0.255,
|
| 41 |
+
0.662,
|
| 42 |
+
0.269
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "\\(^{1}\\) Nanjing University of Science and Technology"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.425,
|
| 51 |
+
0.269,
|
| 52 |
+
0.576,
|
| 53 |
+
0.282
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "\\(^{2}\\) SenseTime Research"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.29,
|
| 62 |
+
0.284,
|
| 63 |
+
0.714,
|
| 64 |
+
0.298
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "{gang.li, xiang.li.implus, shanshan.zhang}@njust.edu.cn"
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.332,
|
| 73 |
+
0.298,
|
| 74 |
+
0.673,
|
| 75 |
+
0.311
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "{wangyujie,wuyichao,liangding}@sensetime.com"
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.261,
|
| 84 |
+
0.344,
|
| 85 |
+
0.74,
|
| 86 |
+
0.705
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "Abstract. In this paper, we delve into two key techniques in Semi-Supervised Object Detection (SSOD), namely pseudo labeling and consistency training. We observe that these two techniques currently neglect some important properties of object detection, hindering efficient learning on unlabeled data. Specifically, for pseudo labeling, existing works only focus on the classification score yet fail to guarantee the localization precision of pseudo boxes; For consistency training, the widely adopted random-resize training only considers the label-level consistency but misses the feature-level one, which also plays an important role in ensuring the scale invariance. To address the problems incurred by noisy pseudo boxes, we design Noisy Pseudo box Learning (NPL) that includes Prediction-guided Label Assignment (PLA) and Positive-proposal Consistency Voting (PCV). PLA relies on model predictions to assign labels and makes it robust to even coarse pseudo boxes; while PCV leverages the regression consistency of positive proposals to reflect the localization quality of pseudo boxes. Furthermore, in consistency training, we propose Multi-view Scale-invariant Learning (MSL) that includes mechanisms of both label- and feature-level consistency, where feature consistency is achieved by aligning shifted feature pyramids between two images with identical content but varied scales. On COCO benchmark, our method, termed PSEudo labeling and COnsistency training (PseCo), outperforms the SOTA (Soft Teacher) by 2.0, 1.8, 2.0 points under \\(1\\%\\), \\(5\\%\\), and \\(10\\%\\) labelling ratios, respectively. It also significantly improves the learning efficiency for SSOD, e.g., PseCo halves the training time of the SOTA approach but achieves even better performance. Code is available at https://github.com/ligang-cs/PseCo."
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.263,
|
| 95 |
+
0.718,
|
| 96 |
+
0.645,
|
| 97 |
+
0.732
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "Keywords: Semi-supervised Learning, Object Detection"
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "title",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.217,
|
| 106 |
+
0.757,
|
| 107 |
+
0.375,
|
| 108 |
+
0.772
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "1 Introduction"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.214,
|
| 117 |
+
0.787,
|
| 118 |
+
0.791,
|
| 119 |
+
0.818
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "With the rapid development of deep learning, many computer vision tasks achieve significant improvements, such as image classification [2], object detection [15,1,9],"
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "page_footnote",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.218,
|
| 128 |
+
0.825,
|
| 129 |
+
0.386,
|
| 130 |
+
0.84
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "* Corresponding author."
|
| 134 |
+
}
|
| 135 |
+
],
|
| 136 |
+
[
|
| 137 |
+
{
|
| 138 |
+
"type": "page_number",
|
| 139 |
+
"bbox": [
|
| 140 |
+
0.218,
|
| 141 |
+
0.116,
|
| 142 |
+
0.23,
|
| 143 |
+
0.127
|
| 144 |
+
],
|
| 145 |
+
"angle": 0,
|
| 146 |
+
"content": "2"
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "header",
|
| 150 |
+
"bbox": [
|
| 151 |
+
0.272,
|
| 152 |
+
0.115,
|
| 153 |
+
0.327,
|
| 154 |
+
0.127
|
| 155 |
+
],
|
| 156 |
+
"angle": 0,
|
| 157 |
+
"content": "Li et al."
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "image",
|
| 161 |
+
"bbox": [
|
| 162 |
+
0.232,
|
| 163 |
+
0.162,
|
| 164 |
+
0.377,
|
| 165 |
+
0.249
|
| 166 |
+
],
|
| 167 |
+
"angle": 0,
|
| 168 |
+
"content": null
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"type": "image_caption",
|
| 172 |
+
"bbox": [
|
| 173 |
+
0.253,
|
| 174 |
+
0.252,
|
| 175 |
+
0.359,
|
| 176 |
+
0.261
|
| 177 |
+
],
|
| 178 |
+
"angle": 0,
|
| 179 |
+
"content": "(a) Precision of pseudo boxes"
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "image",
|
| 183 |
+
"bbox": [
|
| 184 |
+
0.379,
|
| 185 |
+
0.162,
|
| 186 |
+
0.527,
|
| 187 |
+
0.248
|
| 188 |
+
],
|
| 189 |
+
"angle": 0,
|
| 190 |
+
"content": null
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "image_caption",
|
| 194 |
+
"bbox": [
|
| 195 |
+
0.397,
|
| 196 |
+
0.248,
|
| 197 |
+
0.52,
|
| 198 |
+
0.265
|
| 199 |
+
],
|
| 200 |
+
"angle": 0,
|
| 201 |
+
"content": "(b) Relations between real quality and prediction consistency"
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "image",
|
| 205 |
+
"bbox": [
|
| 206 |
+
0.53,
|
| 207 |
+
0.146,
|
| 208 |
+
0.767,
|
| 209 |
+
0.248
|
| 210 |
+
],
|
| 211 |
+
"angle": 0,
|
| 212 |
+
"content": null
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"type": "image_caption",
|
| 216 |
+
"bbox": [
|
| 217 |
+
0.547,
|
| 218 |
+
0.251,
|
| 219 |
+
0.752,
|
| 220 |
+
0.261
|
| 221 |
+
],
|
| 222 |
+
"angle": 0,
|
| 223 |
+
"content": "(c) Wrong label results brought by the Noisy Pseudo Box"
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "image_caption",
|
| 227 |
+
"bbox": [
|
| 228 |
+
0.214,
|
| 229 |
+
0.269,
|
| 230 |
+
0.788,
|
| 231 |
+
0.345
|
| 232 |
+
],
|
| 233 |
+
"angle": 0,
|
| 234 |
+
"content": "Fig. 1: (a) The precision of pseudo boxes under various IoU thresholds. (b) The scatter diagram of the relation between the prediction consistency and their true localization quality. Some dots falling in the orange ellipse are caused by annotation errors. We show some examples in Fig. 5. (c) One specific example to demonstrate that noisy pseudo boxes will mislead label assignment."
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "text",
|
| 238 |
+
"bbox": [
|
| 239 |
+
0.214,
|
| 240 |
+
0.381,
|
| 241 |
+
0.788,
|
| 242 |
+
0.502
|
| 243 |
+
],
|
| 244 |
+
"angle": 0,
|
| 245 |
+
"content": "etc. Behind these advances, plenty of annotated data plays an important role [23]. However, labeling accurate annotations for large-scale data is usually time-consuming and expensive, especially for object detection, which requires annotating precise bounding boxes for each instance, besides category labels. Therefore, employing easily accessible unlabeled data to facilitate the model training with limited annotated data is a promising direction, named Semi-Supervised Learning, where labeled data and unlabeled data are combined together as training examples."
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "text",
|
| 249 |
+
"bbox": [
|
| 250 |
+
0.214,
|
| 251 |
+
0.505,
|
| 252 |
+
0.789,
|
| 253 |
+
0.777
|
| 254 |
+
],
|
| 255 |
+
"angle": 0,
|
| 256 |
+
"content": "Semi-Supervised for Image Classification (SSIC) has been widely investigated in previous literature, and the learning paradigm on unlabeled data can be roughly divided into two categories: pseudo labeling [7,18] and consistency training [24,22], each of which receives much attention. Recently, some works (e.g., FixMatch [19], FlexMatch [28]) attempt to combine these two techniques into one framework and achieve state-of-the-art performance. In Semi-Supervised Object Detection (SSOD), some works borrow the key techniques (e.g. pseudo labeling, consistency training) from SSIC, and directly apply them to SSOD. Although these works [30,26] obtain gains from unlabeled data, they neglect some important properties of object detection, resulting in sub-optimal results. On the one hand, compared with image classification, pseudo labels of object detection are more complicated, containing both category and location information. On the other hand, object detection is required to capture stronger scale-invariant ability than image classification, as it needs to carefully deal with the targets with rich scales. In this work, we present a SSOD framework, termed PSEudo labeling and CConsistency training (PseCo), to integrate object detection properties into SSOD, making pseudo labeling and consistency training work better for object detection tasks."
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "text",
|
| 260 |
+
"bbox": [
|
| 261 |
+
0.214,
|
| 262 |
+
0.78,
|
| 263 |
+
0.789,
|
| 264 |
+
0.841
|
| 265 |
+
],
|
| 266 |
+
"angle": 0,
|
| 267 |
+
"content": "In pseudo labeling, the model produces one-hot pseudo labels on unlabeled data by itself, and only pseudo labels whose scores are above the predefined score threshold are retained. As for object detection, the pseudo label consists of both category labels and bounding boxes. Although category labels can be guaranteed"
|
| 268 |
+
}
|
| 269 |
+
],
|
| 270 |
+
[
|
| 271 |
+
{
|
| 272 |
+
"type": "header",
|
| 273 |
+
"bbox": [
|
| 274 |
+
0.433,
|
| 275 |
+
0.115,
|
| 276 |
+
0.732,
|
| 277 |
+
0.129
|
| 278 |
+
],
|
| 279 |
+
"angle": 0,
|
| 280 |
+
"content": "PseCo for Semi-Supervised Object Detection"
|
| 281 |
+
},
|
| 282 |
+
{
|
| 283 |
+
"type": "page_number",
|
| 284 |
+
"bbox": [
|
| 285 |
+
0.775,
|
| 286 |
+
0.117,
|
| 287 |
+
0.785,
|
| 288 |
+
0.127
|
| 289 |
+
],
|
| 290 |
+
"angle": 0,
|
| 291 |
+
"content": "3"
|
| 292 |
+
},
|
| 293 |
+
{
|
| 294 |
+
"type": "text",
|
| 295 |
+
"bbox": [
|
| 296 |
+
0.214,
|
| 297 |
+
0.147,
|
| 298 |
+
0.788,
|
| 299 |
+
0.344
|
| 300 |
+
],
|
| 301 |
+
"angle": 0,
|
| 302 |
+
"content": "to be accurate via setting a high score threshold, the localization quality of pseudo box fails to be measured and guaranteed. It has been validated in previous works that the classification score is not strongly correlated with the precision of box localization [10,29,6,26]. In Fig. 1(a), we compute the precision of pseudo boxes under various Intersection-over-Union (IoU) thresholds, via comparing produced pseudo boxes with ground-truths. Under loose criterion \\((\\mathrm{IoU} = 0.3)\\), precision can reach \\(81\\%\\), but it will drop to \\(31\\%\\) when we lift the IoU threshold to 0.9. This dramatic precision gap indicates coarse pseudo boxes whose IoUs belong to [0.3,0.9] account for \\(50\\%\\). If these noisy pseudo boxes are used as targets to train the detector, it must hinder the optimization, resulting in slow convergence and inefficient learning on unlabeled data. Furthermore, we analyze the negative effects brought by noisy pseudo boxes on classification and regression tasks as follows, respectively."
|
| 303 |
+
},
|
| 304 |
+
{
|
| 305 |
+
"type": "text",
|
| 306 |
+
"bbox": [
|
| 307 |
+
0.214,
|
| 308 |
+
0.348,
|
| 309 |
+
0.788,
|
| 310 |
+
0.485
|
| 311 |
+
],
|
| 312 |
+
"angle": 0,
|
| 313 |
+
"content": "For the classification task, noisy pseudo boxes will mislead the label assignment, where labels are assigned based on IoUs between proposals and gt boxes (pseudo boxes in our case). As shown in Fig. 1(c), a background proposal is taken as foreground due to a large IoU value with a poorly localized pseudo box. As a result, the IoU-based label assignment will fail on unlabeled data and confuse decision boundaries between foreground and background. To address this issue, we design a prediction-guided label assignment strategy for unlabeled data, which assigns labels based on predictions of the teacher, instead of IoUs with pseudo boxes as before, making it robust for poorly localized pseudo boxes."
|
| 314 |
+
},
|
| 315 |
+
{
|
| 316 |
+
"type": "text",
|
| 317 |
+
"bbox": [
|
| 318 |
+
0.214,
|
| 319 |
+
0.489,
|
| 320 |
+
0.788,
|
| 321 |
+
0.626
|
| 322 |
+
],
|
| 323 |
+
"angle": 0,
|
| 324 |
+
"content": "For the regression task, it is necessary to measure the localization quality of pseudo boxes. We propose a simple yet effective method to achieve this, named Positive-proposal Consistency Voting. We empirically find that regression consistency from positive proposals is capable of reflecting the localization quality of corresponding pseudo boxes. In Fig. 1(b), we visualize the relations between predicted consistency and their true IoUs, where their positive correlations can be found. Therefore, it is reasonable to employ the estimated localization quality (i.e., regression consistency from positive proposals) to re-weight the regression losses, making precise pseudo boxes contribute more to regression supervisions."
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"type": "text",
|
| 328 |
+
"bbox": [
|
| 329 |
+
0.214,
|
| 330 |
+
0.629,
|
| 331 |
+
0.788,
|
| 332 |
+
0.842
|
| 333 |
+
],
|
| 334 |
+
"angle": 0,
|
| 335 |
+
"content": "Apart from pseudo labeling, we also analyze the consistency training for SSOD. Consistency training enforces the model to generate similar predictions when fed with perturbed versions of the same image, where perturbations can be implemented by injecting various data augmentations. Through consistency training, models can be invariant to different input transformations. Current SSOD methods [30,26,14] only apply off-the-shelf, general data augmentations, most of which are borrowed from image classification. However, different from classification, object detection is an instance-based task, where object scales usually vary in a large range, and detectors are expected to handle all scale ranges. Therefore, learning strong scale-invariant ability via consistency training is important. In scale consistency, it should be allowed for the model to predict the same boxes for input images with identical contents but varied scales. To ensure label consistency, random-resizing is a common augmentation, which resizes input images and gt boxes according to a randomly generated resize ratio. Be"
|
| 336 |
+
}
|
| 337 |
+
],
|
| 338 |
+
[
|
| 339 |
+
{
|
| 340 |
+
"type": "page_number",
|
| 341 |
+
"bbox": [
|
| 342 |
+
0.218,
|
| 343 |
+
0.116,
|
| 344 |
+
0.23,
|
| 345 |
+
0.127
|
| 346 |
+
],
|
| 347 |
+
"angle": 0,
|
| 348 |
+
"content": "4"
|
| 349 |
+
},
|
| 350 |
+
{
|
| 351 |
+
"type": "header",
|
| 352 |
+
"bbox": [
|
| 353 |
+
0.272,
|
| 354 |
+
0.115,
|
| 355 |
+
0.326,
|
| 356 |
+
0.127
|
| 357 |
+
],
|
| 358 |
+
"angle": 0,
|
| 359 |
+
"content": "Li et al."
|
| 360 |
+
},
|
| 361 |
+
{
|
| 362 |
+
"type": "text",
|
| 363 |
+
"bbox": [
|
| 364 |
+
0.214,
|
| 365 |
+
0.147,
|
| 366 |
+
0.784,
|
| 367 |
+
0.252
|
| 368 |
+
],
|
| 369 |
+
"angle": 0,
|
| 370 |
+
"content": "sides label consistency, feature consistency also plays an important role in scale-invariant learning, but it is neglected in previous works. Thanks to the pyramid structure of popular backbone networks, feature alignment can be easily implemented by shifting feature pyramid levels according to the scale changes. Motivated by this, we introduce a brand new data augmentation technique, named Multi-view Scale-invariant Learning (MSL), to learn label-level and feature-level consistency simultaneously in a simple framework."
|
| 371 |
+
},
|
| 372 |
+
{
|
| 373 |
+
"type": "text",
|
| 374 |
+
"bbox": [
|
| 375 |
+
0.214,
|
| 376 |
+
0.253,
|
| 377 |
+
0.785,
|
| 378 |
+
0.419
|
| 379 |
+
],
|
| 380 |
+
"angle": 0,
|
| 381 |
+
"content": "In summary, we delve into two key techniques of semi-supervised learning (e.g., pseudo labeling and consistency training) for SSOD, and integrate object detection properties into them. On COCO benchmarks, our PseCo outperforms the state-of-the-art methods by a large margin, for example, under \\(10\\%\\) labelling ratio, it can improve a \\(26.9\\%\\) mAP baseline to \\(36.1\\%\\) mAP, surpassing previous methods by at least \\(2.0\\%\\). When labeled data is abundant, i.e., we use full COCO training set as labeled data and extra 123K unlabeled2017 as unlabeled data, our PseCo improves the \\(41.0\\%\\) mAP baseline by \\(+5.1\\%\\), reaching \\(46.1\\%\\) mAP, establishing a new state of the art. Moreover, PseCo also significantly boosts the convergence speed, e.g. PseCo halves the training time of the SOTA (Soft Teacher [26]), but achieves even better performance."
|
| 382 |
+
},
|
| 383 |
+
{
|
| 384 |
+
"type": "title",
|
| 385 |
+
"bbox": [
|
| 386 |
+
0.216,
|
| 387 |
+
0.444,
|
| 388 |
+
0.396,
|
| 389 |
+
0.459
|
| 390 |
+
],
|
| 391 |
+
"angle": 0,
|
| 392 |
+
"content": "2 Related Works"
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"type": "text",
|
| 396 |
+
"bbox": [
|
| 397 |
+
0.214,
|
| 398 |
+
0.478,
|
| 399 |
+
0.785,
|
| 400 |
+
0.734
|
| 401 |
+
],
|
| 402 |
+
"angle": 0,
|
| 403 |
+
"content": "Semi-supervised learning in image classification. Semi-supervised learning can be categorized into two groups: pseudo labeling (also called self-training) and consistency training, and previous methods design learning paradigms based on one of them. Pseudo labeling [7,18,4,25] iteratively adds unlabeled data into the training procedure with pseudo labels annotated by an initially trained network. Here, only model predictions with high confidence will be transformed into the one-hot format and become pseudo labels. Noisy Student Training [25] injects noise into unlabeled data training, which equips the model with stronger generalization through training on the combination of labeled and unlabeled data. On the other hand, consistency training [22,24,1] relies on the assumption that the model should be invariant to small changes on input images or model hidden states. It enforces the model to make similar predictions on the perturbed versions of the same image, and perturbations can be implemented by injecting noise into images and hidden states. UDA [24] validates the advanced data augmentations play a crucial role in consistency training, and observes the strong augmentations found in supervised-learning can also lead to obvious improvements in semi-supervised learning."
|
| 404 |
+
},
|
| 405 |
+
{
|
| 406 |
+
"type": "text",
|
| 407 |
+
"bbox": [
|
| 408 |
+
0.214,
|
| 409 |
+
0.735,
|
| 410 |
+
0.784,
|
| 411 |
+
0.84
|
| 412 |
+
],
|
| 413 |
+
"angle": 0,
|
| 414 |
+
"content": "Recently, some works [19,28] attempt to combine pseudo labeling and consistency training, achieving state-of-the-art performance. FixMatch [19] firstly applies the weak and strong augmentations to the same input image, respectively, to generate two versions, then uses the weakly-augmented version to generate hard pseudo labels. The model is trained on strongly-augmented versions to align predictions with pseudo labels. Based on FixMatch, FlexMatch [28] proposes to adjust score thresholds for different classes during the generation of pseudo"
|
| 415 |
+
}
|
| 416 |
+
],
|
| 417 |
+
[
|
| 418 |
+
{
|
| 419 |
+
"type": "header",
|
| 420 |
+
"bbox": [
|
| 421 |
+
0.433,
|
| 422 |
+
0.115,
|
| 423 |
+
0.732,
|
| 424 |
+
0.129
|
| 425 |
+
],
|
| 426 |
+
"angle": 0,
|
| 427 |
+
"content": "PseCo for Semi-Supervised Object Detection"
|
| 428 |
+
},
|
| 429 |
+
{
|
| 430 |
+
"type": "page_number",
|
| 431 |
+
"bbox": [
|
| 432 |
+
0.775,
|
| 433 |
+
0.117,
|
| 434 |
+
0.785,
|
| 435 |
+
0.127
|
| 436 |
+
],
|
| 437 |
+
"angle": 0,
|
| 438 |
+
"content": "5"
|
| 439 |
+
},
|
| 440 |
+
{
|
| 441 |
+
"type": "text",
|
| 442 |
+
"bbox": [
|
| 443 |
+
0.214,
|
| 444 |
+
0.147,
|
| 445 |
+
0.783,
|
| 446 |
+
0.22
|
| 447 |
+
],
|
| 448 |
+
"angle": 0,
|
| 449 |
+
"content": "labels, based on curriculum learning. It has been widely validated that pseudo labeling and consistency training are two powerful techniques in semi-supervised image classification, hence in this work, we attempt to integrate object detection properties into them and make them work better for semi-supervised object detection."
|
| 450 |
+
},
|
| 451 |
+
{
|
| 452 |
+
"type": "text",
|
| 453 |
+
"bbox": [
|
| 454 |
+
0.214,
|
| 455 |
+
0.223,
|
| 456 |
+
0.788,
|
| 457 |
+
0.587
|
| 458 |
+
],
|
| 459 |
+
"angle": 0,
|
| 460 |
+
"content": "Semi-supervised learning in object detection. STAC [20] is the first attempt to apply pseudo labeling and consistency training based on the strong data augmentations to semi-supervised object detection, however, it adopts two stages of training as Noisy Student Training [25], which prevents the pseudo labels from updating along with model training and limits the performance. After STAC, [26,30,21,27,14] borrow the idea of Exponential Moving Average (EMA) from Mean Teacher [22], and update the teacher model after each training iteration to generate instant pseudo labels, realizing the end-to-end framework. To pursue high quality of pseudo labels and overcome confirmation bias, InstantTeaching [30] and ISMT [27] introduce model ensemble to aggregate predictions from multiple teacher models which are initialized differently; similarly, Humble Teacher [21] ensembles the teacher model predictions by taking both the image and its horizontally flipped version as input. Although these ensemble methods can promote the quality of pseudo labels, they also introduce considerable computation overhead. Unbiased Teacher [14] replaces traditional Cross-entropy loss with Focal loss [12] to alleviate the class-imbalanced pseudo-labeling issue, which shows strong performance when labeled data is scarce. Soft Teacher [26] uses teacher classification scores as classification loss weights, to suppress negative effects from underlying objects missed by pseudo labels. Different from previous methods, our work elaborately analyzes whether the pseudo labeling and consistency training can be directly applied to SSOD, but gets a negative answer. To integrate object detection properties into these two techniques, we introduce Noisy Pseudo box Learning and Multi-view Scale-invariant Learning, obtaining much better performance and faster convergence speed."
|
| 461 |
+
},
|
| 462 |
+
{
|
| 463 |
+
"type": "title",
|
| 464 |
+
"bbox": [
|
| 465 |
+
0.216,
|
| 466 |
+
0.613,
|
| 467 |
+
0.331,
|
| 468 |
+
0.629
|
| 469 |
+
],
|
| 470 |
+
"angle": 0,
|
| 471 |
+
"content": "3 Method"
|
| 472 |
+
},
|
| 473 |
+
{
|
| 474 |
+
"type": "text",
|
| 475 |
+
"bbox": [
|
| 476 |
+
0.214,
|
| 477 |
+
0.648,
|
| 478 |
+
0.788,
|
| 479 |
+
0.71
|
| 480 |
+
],
|
| 481 |
+
"angle": 0,
|
| 482 |
+
"content": "We show the framework of our PseCo in Fig. 2. On the unlabeled data, PseCo consists of Noisy Pseudo box Learning (NPL) and Multi-view Scale-invariant Learning (MSL). In the following parts, we will introduce the basic framework, the proposed NPL and MSL, respectively."
|
| 483 |
+
},
|
| 484 |
+
{
|
| 485 |
+
"type": "title",
|
| 486 |
+
"bbox": [
|
| 487 |
+
0.216,
|
| 488 |
+
0.736,
|
| 489 |
+
0.436,
|
| 490 |
+
0.751
|
| 491 |
+
],
|
| 492 |
+
"angle": 0,
|
| 493 |
+
"content": "3.1 The basic framework"
|
| 494 |
+
},
|
| 495 |
+
{
|
| 496 |
+
"type": "text",
|
| 497 |
+
"bbox": [
|
| 498 |
+
0.214,
|
| 499 |
+
0.765,
|
| 500 |
+
0.788,
|
| 501 |
+
0.842
|
| 502 |
+
],
|
| 503 |
+
"angle": 0,
|
| 504 |
+
"content": "At first, we directly apply standard pseudo labeling and consistency training to SSOD, building our basic framework. Following previous works [26,14,30], we also adopt Teacher-student training scheme, where the teacher model is built from the student model at every training iteration via Exponential Moving Average (EMA). We randomly sample labeled data and unlabeled data based on a sample"
|
| 505 |
+
}
|
| 506 |
+
],
|
| 507 |
+
[
|
| 508 |
+
{
|
| 509 |
+
"type": "page_number",
|
| 510 |
+
"bbox": [
|
| 511 |
+
0.218,
|
| 512 |
+
0.116,
|
| 513 |
+
0.23,
|
| 514 |
+
0.127
|
| 515 |
+
],
|
| 516 |
+
"angle": 0,
|
| 517 |
+
"content": "6"
|
| 518 |
+
},
|
| 519 |
+
{
|
| 520 |
+
"type": "header",
|
| 521 |
+
"bbox": [
|
| 522 |
+
0.272,
|
| 523 |
+
0.115,
|
| 524 |
+
0.327,
|
| 525 |
+
0.127
|
| 526 |
+
],
|
| 527 |
+
"angle": 0,
|
| 528 |
+
"content": "Li et al."
|
| 529 |
+
},
|
| 530 |
+
{
|
| 531 |
+
"type": "image",
|
| 532 |
+
"bbox": [
|
| 533 |
+
0.248,
|
| 534 |
+
0.145,
|
| 535 |
+
0.753,
|
| 536 |
+
0.36
|
| 537 |
+
],
|
| 538 |
+
"angle": 0,
|
| 539 |
+
"content": null
|
| 540 |
+
},
|
| 541 |
+
{
|
| 542 |
+
"type": "image_caption",
|
| 543 |
+
"bbox": [
|
| 544 |
+
0.215,
|
| 545 |
+
0.363,
|
| 546 |
+
0.788,
|
| 547 |
+
0.424
|
| 548 |
+
],
|
| 549 |
+
"angle": 0,
|
| 550 |
+
"content": "Fig. 2: The framework of our PseCo. Each training batch consists of both labeled and unlabeled images. On the unlabeled images, the student model trains on view \\( V_{1} \\) and \\( V_{2} \\) at the same time, taking the same pseudo boxes as supervisions. View \\( V_{0} \\) refers to input images for the teacher model."
|
| 551 |
+
},
|
| 552 |
+
{
|
| 553 |
+
"type": "text",
|
| 554 |
+
"bbox": [
|
| 555 |
+
0.215,
|
| 556 |
+
0.455,
|
| 557 |
+
0.788,
|
| 558 |
+
0.486
|
| 559 |
+
],
|
| 560 |
+
"angle": 0,
|
| 561 |
+
"content": "ratio to form the training batch. On the labeled data, the student model is trained in a regular manner, supervised by the ground-truth boxes:"
|
| 562 |
+
},
|
| 563 |
+
{
|
| 564 |
+
"type": "equation",
|
| 565 |
+
"bbox": [
|
| 566 |
+
0.437,
|
| 567 |
+
0.497,
|
| 568 |
+
0.786,
|
| 569 |
+
0.517
|
| 570 |
+
],
|
| 571 |
+
"angle": 0,
|
| 572 |
+
"content": "\\[\n\\mathcal {L} ^ {l} = \\mathcal {L} _ {c l s} ^ {l} + \\mathcal {L} _ {r e g} ^ {l}. \\tag {1}\n\\]"
|
| 573 |
+
},
|
| 574 |
+
{
|
| 575 |
+
"type": "text",
|
| 576 |
+
"bbox": [
|
| 577 |
+
0.214,
|
| 578 |
+
0.528,
|
| 579 |
+
0.788,
|
| 580 |
+
0.679
|
| 581 |
+
],
|
| 582 |
+
"angle": 0,
|
| 583 |
+
"content": "On the unlabeled data, we firstly apply weak data augmentations (e.g. horizontal flip, random resizing) to input images, and then feed them to the teacher model for pseudo label generation. Considering the detection boxes tend to be dense even after NMS, we set a score threshold \\(\\tau\\) and only retain boxes with scores above \\(\\tau\\) as pseudo labels. After that, strong augmentations (e.g. cutout, rotation, brightness jitter)<sup>3</sup> will be performed on the input image to generate the training example for student model. Since high classification scores do not lead to precise localization, we abandon bounding box regression on unlabeled data, as done in [14]. Actually, applying the box regression loss on unlabeled data will cause unstable training in our experiments."
|
| 584 |
+
},
|
| 585 |
+
{
|
| 586 |
+
"type": "text",
|
| 587 |
+
"bbox": [
|
| 588 |
+
0.214,
|
| 589 |
+
0.68,
|
| 590 |
+
0.788,
|
| 591 |
+
0.801
|
| 592 |
+
],
|
| 593 |
+
"angle": 0,
|
| 594 |
+
"content": "Foreground-background imbalance [8,12] is an intrinsic issue in object detection, and it gets worse under the semi-supervised setting. A high score threshold \\(\\tau\\) is usually adopted to guarantee the precision of pseudo labels, but it also results in scarcity of pseudo labels, aggravating the imbalance of foreground/background. Moreover, there also exists foreground-foreground imbalance, exactly, training examples from some specific categories can be limited when labeled data is scarce, which makes the model prone to predict the dominant classes, causing biased prediction. To alleviate these imbalance issues, we"
|
| 595 |
+
},
|
| 596 |
+
{
|
| 597 |
+
"type": "page_footnote",
|
| 598 |
+
"bbox": [
|
| 599 |
+
0.218,
|
| 600 |
+
0.811,
|
| 601 |
+
0.787,
|
| 602 |
+
0.84
|
| 603 |
+
],
|
| 604 |
+
"angle": 0,
|
| 605 |
+
"content": "<sup>3</sup> We adopt the same data augmentations as Soft Teacher [26], please refer to [26] for more augmentation details."
|
| 606 |
+
}
|
| 607 |
+
],
|
| 608 |
+
[
|
| 609 |
+
{
|
| 610 |
+
"type": "header",
|
| 611 |
+
"bbox": [
|
| 612 |
+
0.433,
|
| 613 |
+
0.115,
|
| 614 |
+
0.732,
|
| 615 |
+
0.129
|
| 616 |
+
],
|
| 617 |
+
"angle": 0,
|
| 618 |
+
"content": "PseCo for Semi-Supervised Object Detection"
|
| 619 |
+
},
|
| 620 |
+
{
|
| 621 |
+
"type": "page_number",
|
| 622 |
+
"bbox": [
|
| 623 |
+
0.775,
|
| 624 |
+
0.117,
|
| 625 |
+
0.785,
|
| 626 |
+
0.127
|
| 627 |
+
],
|
| 628 |
+
"angle": 0,
|
| 629 |
+
"content": "7"
|
| 630 |
+
},
|
| 631 |
+
{
|
| 632 |
+
"type": "text",
|
| 633 |
+
"bbox": [
|
| 634 |
+
0.214,
|
| 635 |
+
0.147,
|
| 636 |
+
0.784,
|
| 637 |
+
0.178
|
| 638 |
+
],
|
| 639 |
+
"angle": 0,
|
| 640 |
+
"content": "follow the practice of Unbiased Teacher [14], and replace the standard cross-entropy loss with focal loss [12]:"
|
| 641 |
+
},
|
| 642 |
+
{
|
| 643 |
+
"type": "equation",
|
| 644 |
+
"bbox": [
|
| 645 |
+
0.308,
|
| 646 |
+
0.19,
|
| 647 |
+
0.786,
|
| 648 |
+
0.23
|
| 649 |
+
],
|
| 650 |
+
"angle": 0,
|
| 651 |
+
"content": "\\[\n\\mathcal {L} _ {c l s} ^ {u} = - \\alpha_ {t} (1 - p _ {t}) ^ {\\gamma} \\log \\left(p _ {t}\\right), p _ {t} = \\left\\{ \\begin{array}{l l} p, & i f y = 1, \\\\ 1 - p, & o t h e r w i s e, \\end{array} \\right. \\tag {2}\n\\]"
|
| 652 |
+
},
|
| 653 |
+
{
|
| 654 |
+
"type": "text",
|
| 655 |
+
"bbox": [
|
| 656 |
+
0.215,
|
| 657 |
+
0.239,
|
| 658 |
+
0.784,
|
| 659 |
+
0.269
|
| 660 |
+
],
|
| 661 |
+
"angle": 0,
|
| 662 |
+
"content": "where parameters \\(\\alpha_{t}\\) and \\(\\gamma\\) adopt default settings in original focal loss paper [12]. The overall loss function is formulated as:"
|
| 663 |
+
},
|
| 664 |
+
{
|
| 665 |
+
"type": "equation",
|
| 666 |
+
"bbox": [
|
| 667 |
+
0.45,
|
| 668 |
+
0.282,
|
| 669 |
+
0.785,
|
| 670 |
+
0.298
|
| 671 |
+
],
|
| 672 |
+
"angle": 0,
|
| 673 |
+
"content": "\\[\n\\mathcal {L} = \\mathcal {L} ^ {l} + \\beta \\mathcal {L} ^ {u}, \\tag {3}\n\\]"
|
| 674 |
+
},
|
| 675 |
+
{
|
| 676 |
+
"type": "text",
|
| 677 |
+
"bbox": [
|
| 678 |
+
0.215,
|
| 679 |
+
0.31,
|
| 680 |
+
0.784,
|
| 681 |
+
0.385
|
| 682 |
+
],
|
| 683 |
+
"angle": 0,
|
| 684 |
+
"content": "where \\(\\beta\\) is used to control the contribution of unlabeled data. In theory, our proposed method is independent of the detection framework and can be applied on both one-stage and two-stage detectors. However, considering all previous methods are based on Faster R-CNN [17] detection framework, for a fair comparison with them, we also adopt Faster R-CNN as the default detection framework."
|
| 685 |
+
},
|
| 686 |
+
{
|
| 687 |
+
"type": "title",
|
| 688 |
+
"bbox": [
|
| 689 |
+
0.216,
|
| 690 |
+
0.408,
|
| 691 |
+
0.492,
|
| 692 |
+
0.423
|
| 693 |
+
],
|
| 694 |
+
"angle": 0,
|
| 695 |
+
"content": "3.2 Noisy Pseudo Box Learning"
|
| 696 |
+
},
|
| 697 |
+
{
|
| 698 |
+
"type": "text",
|
| 699 |
+
"bbox": [
|
| 700 |
+
0.214,
|
| 701 |
+
0.432,
|
| 702 |
+
0.787,
|
| 703 |
+
0.538
|
| 704 |
+
],
|
| 705 |
+
"angle": 0,
|
| 706 |
+
"content": "In SSOD, pseudo labels contain both category and location. Since the score of pseudo labels can only indicate the confidence of pseudo box categories, the localization quality of pseudo boxes is not guaranteed. Imprecise pseudo boxes will mislead the label assignment and regression task, making learning on unlabeled data inefficient. Motivated by this, we introduce Prediction-guided Label Assignment and Positive-proposal Consistency Voting to reduce negative effects on the label assignment and regression task, respectively."
|
| 707 |
+
},
|
| 708 |
+
{
|
| 709 |
+
"type": "text",
|
| 710 |
+
"bbox": [
|
| 711 |
+
0.214,
|
| 712 |
+
0.539,
|
| 713 |
+
0.787,
|
| 714 |
+
0.689
|
| 715 |
+
],
|
| 716 |
+
"angle": 0,
|
| 717 |
+
"content": "Prediction-guided Label Assignment. The standard label assignment strategy in Faster R-CNN [17] only takes the IoUs between proposals and gt boxes (pseudo boxes in our case) into consideration and assigns foreground to those proposals, whose IoUs are above a pre-defined threshold \\( t \\) (0.5 as default). This strategy relies on the assumption that gt boxes are precise, however, this assumption does not hold for unlabeled data obviously. As a result, some low-quality proposals will be mistakenly assigned as positive, confusing the classification boundaries between foreground and background. One specific example is shown in Fig. 1(c), where a proposal with the true IoU as 0.39 is mistakenly assigned as positive."
|
| 718 |
+
},
|
| 719 |
+
{
|
| 720 |
+
"type": "text",
|
| 721 |
+
"bbox": [
|
| 722 |
+
0.214,
|
| 723 |
+
0.689,
|
| 724 |
+
0.787,
|
| 725 |
+
0.841
|
| 726 |
+
],
|
| 727 |
+
"angle": 0,
|
| 728 |
+
"content": "To address this problem, we propose Prediction-guided Label Assignment (PLA), which takes teacher predictions as auxiliary information and reduces dependency on IoUs. In Teacher-student training scheme, not only can the detection results (after NMS) of teacher perform as pseudo labels, but also teacher's dense predictions (before NMS) are able to provide guidance for student model training. We share the proposals generated by the teacher RPN with the student, so that teacher predictions on these proposals can be easily transferred to student. To measure the proposal quality \\((q)\\) comprehensively, the classification confidence and localization precision of teacher predictions are jointly employed, concretely, \\(q = s^{\\alpha} \\times u^{1 - \\alpha}\\), where \\(s\\) and \\(u\\) denote a foreground score and an IoU"
|
| 729 |
+
}
|
| 730 |
+
],
|
| 731 |
+
[
|
| 732 |
+
{
|
| 733 |
+
"type": "page_number",
|
| 734 |
+
"bbox": [
|
| 735 |
+
0.218,
|
| 736 |
+
0.116,
|
| 737 |
+
0.23,
|
| 738 |
+
0.127
|
| 739 |
+
],
|
| 740 |
+
"angle": 0,
|
| 741 |
+
"content": "8"
|
| 742 |
+
},
|
| 743 |
+
{
|
| 744 |
+
"type": "header",
|
| 745 |
+
"bbox": [
|
| 746 |
+
0.272,
|
| 747 |
+
0.115,
|
| 748 |
+
0.327,
|
| 749 |
+
0.127
|
| 750 |
+
],
|
| 751 |
+
"angle": 0,
|
| 752 |
+
"content": "Li et al."
|
| 753 |
+
},
|
| 754 |
+
{
|
| 755 |
+
"type": "text",
|
| 756 |
+
"bbox": [
|
| 757 |
+
0.214,
|
| 758 |
+
0.147,
|
| 759 |
+
0.788,
|
| 760 |
+
0.344
|
| 761 |
+
],
|
| 762 |
+
"angle": 0,
|
| 763 |
+
"content": "value between the regressed box and the ground truth, respectively. \\(\\alpha\\) controls the contribution of \\(s\\) and \\(u\\) in the overall quality. On unlabeled data, we first construct a candidate bag for each ground truth \\(g\\) by the traditional IoU-based strategy, where the IoU threshold \\(t\\) is set to a relatively low value, e.g., 0.4 as default, to contain more proposals. Within each candidate bag, the proposals are firstly sorted by their quality \\(q\\), then top-\\(\\mathcal{N}\\) proposals are adopted as positive samples and the rest are negatives. The number \\(\\mathcal{N}\\) is decided by the dynamic \\(k\\) estimation strategy proposed in OTA [3], specifically, the IoU values over the candidate bag is summed up to represent the number of positive samples. The proposed PLA gets rid of strong dependencies on IoUs and alleviates negative effects from poorly localized pseudo boxes, leading to clearer classification boundaries. Furthermore, our label assign strategy integrates more teacher knowledge into student model training, realizing better knowledge distillation."
|
| 764 |
+
},
|
| 765 |
+
{
|
| 766 |
+
"type": "text",
|
| 767 |
+
"bbox": [
|
| 768 |
+
0.214,
|
| 769 |
+
0.344,
|
| 770 |
+
0.788,
|
| 771 |
+
0.465
|
| 772 |
+
],
|
| 773 |
+
"angle": 0,
|
| 774 |
+
"content": "Positive-proposal Consistency Voting. Considering the classification score fails to indicate localization quality, we introduce a simple yet effective method to measure the localization quality, named Positive-proposal Consistency Voting (PCV). Assigning multiple proposals to each gt box (or pseudo box) is a common practice in CNN-based detectors [17,10,29], and we observe that the consistency of regression results from these proposals is capable of reflecting the localization quality of the corresponding pseudo box. Regression consistency \\(\\sigma^j\\) for pseudo box (indexed by \\(j\\)) is formulated as:"
|
| 775 |
+
},
|
| 776 |
+
{
|
| 777 |
+
"type": "equation",
|
| 778 |
+
"bbox": [
|
| 779 |
+
0.449,
|
| 780 |
+
0.477,
|
| 781 |
+
0.786,
|
| 782 |
+
0.509
|
| 783 |
+
],
|
| 784 |
+
"angle": 0,
|
| 785 |
+
"content": "\\[\n\\sigma^ {j} = \\frac {\\sum_ {i = 1} ^ {N} u _ {i} ^ {j}}{N}, \\tag {4}\n\\]"
|
| 786 |
+
},
|
| 787 |
+
{
|
| 788 |
+
"type": "text",
|
| 789 |
+
"bbox": [
|
| 790 |
+
0.214,
|
| 791 |
+
0.518,
|
| 792 |
+
0.788,
|
| 793 |
+
0.578
|
| 794 |
+
],
|
| 795 |
+
"angle": 0,
|
| 796 |
+
"content": "where \\( u \\) denotes an IoU value between the predicted box and the pseudo box, as defined above; \\( N \\) denotes the number of positive proposals, assigned to the pseudo box \\( j \\). After obtaining \\( \\sigma^j \\), we employ it as the instance-wise regression loss weight:"
|
| 797 |
+
},
|
| 798 |
+
{
|
| 799 |
+
"type": "equation",
|
| 800 |
+
"bbox": [
|
| 801 |
+
0.373,
|
| 802 |
+
0.579,
|
| 803 |
+
0.786,
|
| 804 |
+
0.62
|
| 805 |
+
],
|
| 806 |
+
"angle": 0,
|
| 807 |
+
"content": "\\[\n\\mathcal {L} _ {r e g} ^ {u} = \\frac {1}{M N} \\sum_ {j = 1} ^ {M} \\sigma^ {j} \\sum_ {i = 1} ^ {N} \\left| r e g _ {i} ^ {j} - r \\hat {\\mathrm {e}} g _ {i} ^ {j} \\right|, \\tag {5}\n\\]"
|
| 808 |
+
},
|
| 809 |
+
{
|
| 810 |
+
"type": "text",
|
| 811 |
+
"bbox": [
|
| 812 |
+
0.214,
|
| 813 |
+
0.626,
|
| 814 |
+
0.788,
|
| 815 |
+
0.732
|
| 816 |
+
],
|
| 817 |
+
"angle": 0,
|
| 818 |
+
"content": "where \\(reg\\) and \\(\\hat{reg}\\) refer to the regression output and ground-truth, respectively. In Fig. 1(b), we depict the scatter diagram of the relation between prediction consistency \\(\\sigma\\) of pseudo boxes and their true IoUs. It is obvious that \\(\\sigma\\) is positively correlated with true IoUs. Note that, some dots falling in the orange ellipse are mainly caused by annotation errors. We visualize some examples in Fig. 5, where the pseudo boxes accurately detect some objects, which are missed by the ground truths."
|
| 819 |
+
},
|
| 820 |
+
{
|
| 821 |
+
"type": "title",
|
| 822 |
+
"bbox": [
|
| 823 |
+
0.216,
|
| 824 |
+
0.755,
|
| 825 |
+
0.56,
|
| 826 |
+
0.77
|
| 827 |
+
],
|
| 828 |
+
"angle": 0,
|
| 829 |
+
"content": "3.3 Multi-view Scale-invariant Learning"
|
| 830 |
+
},
|
| 831 |
+
{
|
| 832 |
+
"type": "text",
|
| 833 |
+
"bbox": [
|
| 834 |
+
0.214,
|
| 835 |
+
0.78,
|
| 836 |
+
0.788,
|
| 837 |
+
0.842
|
| 838 |
+
],
|
| 839 |
+
"angle": 0,
|
| 840 |
+
"content": "Different from image classification, in object detection, object scales vary in a large range and detectors hardly show comparable performance on all scales. Therefore, learning scale-invariant representations from unlabeled data is considerably important for SSOD. In consistency training, strong data augmentations"
|
| 841 |
+
}
|
| 842 |
+
],
|
| 843 |
+
[
|
| 844 |
+
{
|
| 845 |
+
"type": "header",
|
| 846 |
+
"bbox": [
|
| 847 |
+
0.433,
|
| 848 |
+
0.115,
|
| 849 |
+
0.732,
|
| 850 |
+
0.129
|
| 851 |
+
],
|
| 852 |
+
"angle": 0,
|
| 853 |
+
"content": "PseCo for Semi-Supervised Object Detection"
|
| 854 |
+
},
|
| 855 |
+
{
|
| 856 |
+
"type": "page_number",
|
| 857 |
+
"bbox": [
|
| 858 |
+
0.775,
|
| 859 |
+
0.117,
|
| 860 |
+
0.785,
|
| 861 |
+
0.127
|
| 862 |
+
],
|
| 863 |
+
"angle": 0,
|
| 864 |
+
"content": "9"
|
| 865 |
+
},
|
| 866 |
+
{
|
| 867 |
+
"type": "image",
|
| 868 |
+
"bbox": [
|
| 869 |
+
0.243,
|
| 870 |
+
0.147,
|
| 871 |
+
0.506,
|
| 872 |
+
0.31
|
| 873 |
+
],
|
| 874 |
+
"angle": 0,
|
| 875 |
+
"content": null
|
| 876 |
+
},
|
| 877 |
+
{
|
| 878 |
+
"type": "image",
|
| 879 |
+
"bbox": [
|
| 880 |
+
0.524,
|
| 881 |
+
0.147,
|
| 882 |
+
0.75,
|
| 883 |
+
0.31
|
| 884 |
+
],
|
| 885 |
+
"angle": 0,
|
| 886 |
+
"content": null
|
| 887 |
+
},
|
| 888 |
+
{
|
| 889 |
+
"type": "image_caption",
|
| 890 |
+
"bbox": [
|
| 891 |
+
0.214,
|
| 892 |
+
0.312,
|
| 893 |
+
0.788,
|
| 894 |
+
0.375
|
| 895 |
+
],
|
| 896 |
+
"angle": 0,
|
| 897 |
+
"content": "Fig. 3: Comparisons between label-level consistency learning and feature-level consistency learning. For label consistency, labels are aligned according to the resize ratio \\(\\alpha\\); for feature consistency, features are aligned by shifting the feature pyramid level."
|
| 898 |
+
},
|
| 899 |
+
{
|
| 900 |
+
"type": "text",
|
| 901 |
+
"bbox": [
|
| 902 |
+
0.214,
|
| 903 |
+
0.405,
|
| 904 |
+
0.789,
|
| 905 |
+
0.601
|
| 906 |
+
],
|
| 907 |
+
"angle": 0,
|
| 908 |
+
"content": "play a crucial role [24,25] in achieving competitive performance. Through injecting the perturbations into the input images, data augmentations equip the model with robustness to various transformations. From the perspective of scale invariance, we regard the common data augmentation strategy (e.g. random-resizing) as label-level consistency since it resizes the label according to the scale changes of input images. Unfortunately, existing works only involve the widely adopted label-level consistency but fail to consider the feature-level one. Since detection network usually has designs of rich feature pyramids, feature-level consistency is easy to implement across paired inputs [16] and should be considered seriously. In this paper, we propose Multi-view Scale-invariant Learning (MSL) that combines both label- and feature-level consistency into a simple framework, where feature-level consistency is realized by aligning shifted pyramid features between two images with identical content but different scales."
|
| 909 |
+
},
|
| 910 |
+
{
|
| 911 |
+
"type": "text",
|
| 912 |
+
"bbox": [
|
| 913 |
+
0.214,
|
| 914 |
+
0.602,
|
| 915 |
+
0.789,
|
| 916 |
+
0.815
|
| 917 |
+
],
|
| 918 |
+
"angle": 0,
|
| 919 |
+
"content": "To be specific, two views, namely \\( V_{1} \\) and \\( V_{2} \\), are used for student training in MSL. We denote the input image for the teacher model as \\( V_{0} \\). Views \\( V_{1} \\) and \\( V_{2} \\) are constructed to learn label- and feature-level consistency, respectively. Among them, \\( V_{1} \\) is implemented by vanilla random resizing, which rescales the input \\( V_{0} \\) and pseudo boxes according to a resize ratio \\( \\alpha \\) randomly sampled from the range \\( [\\alpha_{min}, \\alpha_{max}] \\) ([0.8, 1.3] as default). For feature consistency learning, we firstly downsample \\( V_{1} \\) by even number times (2x as default) to produce \\( V_{2} \\), then combine \\( V_{1} \\) and \\( V_{2} \\) into image pairs. Upsampling is also certainly permitted, but we only perform downsampling here for GPU memory restriction. Because the spatial sizes of adjacent FPN layers always differ by 2x, the P3-P7 layers<sup>4</sup> of \\( V_{1} \\) can align well with P2-P6 layers of \\( V_{2} \\) in the spatial dimension. Through feature alignment, the same pseudo boxes can supervise the student model training on both \\( V_{1} \\) and \\( V_{2} \\). Integrating label consistency and feature consistency into consistency learning leads to stronger scale-invariant learning and significantly"
|
| 920 |
+
},
|
| 921 |
+
{
|
| 922 |
+
"type": "page_footnote",
|
| 923 |
+
"bbox": [
|
| 924 |
+
0.218,
|
| 925 |
+
0.825,
|
| 926 |
+
0.751,
|
| 927 |
+
0.84
|
| 928 |
+
],
|
| 929 |
+
"angle": 0,
|
| 930 |
+
"content": "\\( {}^{4}{P}_{x} \\) refers to the FPN layer whose feature maps are downsampled by \\( {2}^{x} \\) times."
|
| 931 |
+
}
|
| 932 |
+
],
|
| 933 |
+
[
|
| 934 |
+
{
|
| 935 |
+
"type": "page_number",
|
| 936 |
+
"bbox": [
|
| 937 |
+
0.218,
|
| 938 |
+
0.116,
|
| 939 |
+
0.236,
|
| 940 |
+
0.127
|
| 941 |
+
],
|
| 942 |
+
"angle": 0,
|
| 943 |
+
"content": "10"
|
| 944 |
+
},
|
| 945 |
+
{
|
| 946 |
+
"type": "header",
|
| 947 |
+
"bbox": [
|
| 948 |
+
0.272,
|
| 949 |
+
0.115,
|
| 950 |
+
0.326,
|
| 951 |
+
0.127
|
| 952 |
+
],
|
| 953 |
+
"angle": 0,
|
| 954 |
+
"content": "Li et al."
|
| 955 |
+
},
|
| 956 |
+
{
|
| 957 |
+
"type": "text",
|
| 958 |
+
"bbox": [
|
| 959 |
+
0.214,
|
| 960 |
+
0.147,
|
| 961 |
+
0.784,
|
| 962 |
+
0.177
|
| 963 |
+
],
|
| 964 |
+
"angle": 0,
|
| 965 |
+
"content": "accelerates model convergence, as we will show later in the experiments. Comparisons between label consistency and feature consistency are shown in Fig. 3."
|
| 966 |
+
},
|
| 967 |
+
{
|
| 968 |
+
"type": "text",
|
| 969 |
+
"bbox": [
|
| 970 |
+
0.214,
|
| 971 |
+
0.178,
|
| 972 |
+
0.785,
|
| 973 |
+
0.327
|
| 974 |
+
],
|
| 975 |
+
"angle": 0,
|
| 976 |
+
"content": "Learning scale-invariant representation from unlabeled data is also explored by SoCo [23]. However, we claim there are two intrinsic differences between MSL and SoCo: (1) MSL models scale invariance from both label consistency and image feature consistency, while SoCo only considers object feature consistency. Through aligning dense image features of shifted pyramids between paired images, our MSL can provide more comprehensive and dense supervisory signals than the SoCo, which only performs consistency on sparse objects. (2) SoCo implements feature consistency via contrastive learning, which is designed for the pretraining; in contrast, our MSL uses bounding box supervision to implement consistency learning and can be integrated into the detection task."
|
| 977 |
+
},
|
| 978 |
+
{
|
| 979 |
+
"type": "title",
|
| 980 |
+
"bbox": [
|
| 981 |
+
0.216,
|
| 982 |
+
0.347,
|
| 983 |
+
0.376,
|
| 984 |
+
0.364
|
| 985 |
+
],
|
| 986 |
+
"angle": 0,
|
| 987 |
+
"content": "4 Experiments"
|
| 988 |
+
},
|
| 989 |
+
{
|
| 990 |
+
"type": "title",
|
| 991 |
+
"bbox": [
|
| 992 |
+
0.216,
|
| 993 |
+
0.374,
|
| 994 |
+
0.533,
|
| 995 |
+
0.388
|
| 996 |
+
],
|
| 997 |
+
"angle": 0,
|
| 998 |
+
"content": "4.1 Dataset and Evaluation Protocol"
|
| 999 |
+
},
|
| 1000 |
+
{
|
| 1001 |
+
"type": "text",
|
| 1002 |
+
"bbox": [
|
| 1003 |
+
0.214,
|
| 1004 |
+
0.395,
|
| 1005 |
+
0.785,
|
| 1006 |
+
0.514
|
| 1007 |
+
],
|
| 1008 |
+
"angle": 0,
|
| 1009 |
+
"content": "In this section, we conduct extensive experiments to verify the effectiveness of PseCo on MS COCO benchmark [13]. There are two training sets, namely the train2017 set, containing 118k labeled images, and the unlabeled2017 set, containing 123k unlabeled images. The val2017 with 5k images is used as validation set, and we report all experiment results on val2017. The performance is measured by COCO average precision (denoted as mAP). Following the common practice of SSOD [20], there are two experimental settings: Partially Labeled Data and Fully Labeled Data, which are described as follows:"
|
| 1010 |
+
},
|
| 1011 |
+
{
|
| 1012 |
+
"type": "text",
|
| 1013 |
+
"bbox": [
|
| 1014 |
+
0.214,
|
| 1015 |
+
0.515,
|
| 1016 |
+
0.785,
|
| 1017 |
+
0.56
|
| 1018 |
+
],
|
| 1019 |
+
"angle": 0,
|
| 1020 |
+
"content": "Partially Labeled Data. We randomly sample 1, 2, 5, and \\(10\\%\\) data from train2017 as labeled data, and use the rest as unlabeled. Under each labelling ratio, we report the mean and standard deviation over 5 different data folds."
|
| 1021 |
+
},
|
| 1022 |
+
{
|
| 1023 |
+
"type": "text",
|
| 1024 |
+
"bbox": [
|
| 1025 |
+
0.214,
|
| 1026 |
+
0.561,
|
| 1027 |
+
0.785,
|
| 1028 |
+
0.591
|
| 1029 |
+
],
|
| 1030 |
+
"angle": 0,
|
| 1031 |
+
"content": "Fully Labeled Data. Under this setting, we take train2017 as the training labeled set and unlabeled2017 as the training unlabeled set."
|
| 1032 |
+
},
|
| 1033 |
+
{
|
| 1034 |
+
"type": "title",
|
| 1035 |
+
"bbox": [
|
| 1036 |
+
0.216,
|
| 1037 |
+
0.609,
|
| 1038 |
+
0.457,
|
| 1039 |
+
0.624
|
| 1040 |
+
],
|
| 1041 |
+
"angle": 0,
|
| 1042 |
+
"content": "4.2 Implementation Details"
|
| 1043 |
+
},
|
| 1044 |
+
{
|
| 1045 |
+
"type": "text",
|
| 1046 |
+
"bbox": [
|
| 1047 |
+
0.214,
|
| 1048 |
+
0.629,
|
| 1049 |
+
0.785,
|
| 1050 |
+
0.718
|
| 1051 |
+
],
|
| 1052 |
+
"angle": 0,
|
| 1053 |
+
"content": "For a fair comparison, we adopt Faster R-CNN [17] with FPN [11] as the detection framework, and ResNet-50 [5] as the backbone. The confidence threshold \\(\\tau\\) is set to 0.5, empirically. We set \\(\\beta\\) as 4.0 to control contributions of unlabeled data in the overall losses. The performance is evaluated on the Teacher model. Training details for Partially Labeled Data and Fully Labeled Data are described below:"
|
| 1054 |
+
},
|
| 1055 |
+
{
|
| 1056 |
+
"type": "text",
|
| 1057 |
+
"bbox": [
|
| 1058 |
+
0.214,
|
| 1059 |
+
0.72,
|
| 1060 |
+
0.785,
|
| 1061 |
+
0.779
|
| 1062 |
+
],
|
| 1063 |
+
"angle": 0,
|
| 1064 |
+
"content": "Partially Labeled Data. All models are trained for 180k iterations on 8 GPUs. The initial learning rate is set as 0.01 and divided by 10 at 120k and 160k iterations. The training batch in each GPU includes 5 images, where the sample ratio between unlabeled data and labeled data is set to 4:1."
|
| 1065 |
+
},
|
| 1066 |
+
{
|
| 1067 |
+
"type": "text",
|
| 1068 |
+
"bbox": [
|
| 1069 |
+
0.214,
|
| 1070 |
+
0.78,
|
| 1071 |
+
0.785,
|
| 1072 |
+
0.839
|
| 1073 |
+
],
|
| 1074 |
+
"angle": 0,
|
| 1075 |
+
"content": "Fully Labeled Data. All models are trained for 720k iterations on 8 GPUs. Mini-batch in each GPU is 8 with the sample ratio between unlabeled and labeled data as 1:1. The learning rate is initialized to 0.01 and divided by 10 at 480k and 680k iterations."
|
| 1076 |
+
}
|
| 1077 |
+
],
|
| 1078 |
+
[
|
| 1079 |
+
{
|
| 1080 |
+
"type": "header",
|
| 1081 |
+
"bbox": [
|
| 1082 |
+
0.433,
|
| 1083 |
+
0.115,
|
| 1084 |
+
0.732,
|
| 1085 |
+
0.129
|
| 1086 |
+
],
|
| 1087 |
+
"angle": 0,
|
| 1088 |
+
"content": "PseCo for Semi-Supervised Object Detection"
|
| 1089 |
+
},
|
| 1090 |
+
{
|
| 1091 |
+
"type": "page_number",
|
| 1092 |
+
"bbox": [
|
| 1093 |
+
0.769,
|
| 1094 |
+
0.117,
|
| 1095 |
+
0.784,
|
| 1096 |
+
0.127
|
| 1097 |
+
],
|
| 1098 |
+
"angle": 0,
|
| 1099 |
+
"content": "11"
|
| 1100 |
+
},
|
| 1101 |
+
{
|
| 1102 |
+
"type": "table_caption",
|
| 1103 |
+
"bbox": [
|
| 1104 |
+
0.217,
|
| 1105 |
+
0.158,
|
| 1106 |
+
0.786,
|
| 1107 |
+
0.187
|
| 1108 |
+
],
|
| 1109 |
+
"angle": 0,
|
| 1110 |
+
"content": "Table 1: Comparisons with the state-of-the-art methods on val2017 set under the Partially Labeled Data and Fully Labeled Data settings."
|
| 1111 |
+
},
|
| 1112 |
+
{
|
| 1113 |
+
"type": "table",
|
| 1114 |
+
"bbox": [
|
| 1115 |
+
0.261,
|
| 1116 |
+
0.188,
|
| 1117 |
+
0.742,
|
| 1118 |
+
0.328
|
| 1119 |
+
],
|
| 1120 |
+
"angle": 0,
|
| 1121 |
+
"content": "<table><tr><td rowspan=\"2\">Method</td><td colspan=\"4\">Partially Labeled Data</td><td rowspan=\"2\">Fully Labeled Data</td></tr><tr><td>1%</td><td>2%</td><td>5%</td><td>10%</td></tr><tr><td>Supervised baseline</td><td>12.20±0.29</td><td>16.53±0.12</td><td>21.17±0.17</td><td>26.90±0.08</td><td>41.0</td></tr><tr><td>STAC [20]</td><td>13.97±0.35</td><td>18.25±0.25</td><td>24.38±0.12</td><td>28.64±0.21</td><td>39.2</td></tr><tr><td>Humble Teacher [21]</td><td>16.96±0.35</td><td>21.74±0.24</td><td>27.70±0.15</td><td>31.61±0.28</td><td>37.6 +4.8 → 42.4</td></tr><tr><td>ISMT [27]</td><td>18.88±0.74</td><td>22.43±0.56</td><td>26.37±0.24</td><td>30.53±0.52</td><td>37.8 +1.8 → 39.6</td></tr><tr><td>Instant-Teaching [30]</td><td>18.05±0.15</td><td>22.45±0.15</td><td>26.75±0.05</td><td>30.40±0.05</td><td>37.6 +2.6 → 40.2</td></tr><tr><td>Unbiased Teacher [14]</td><td>20.75±0.12</td><td>24.30±0.07</td><td>28.27±0.11</td><td>31.50±0.10</td><td>40.2 +1.1 → 41.3</td></tr><tr><td>Soft Teacher [26]</td><td>20.46±0.39</td><td>-</td><td>30.74±0.08</td><td>34.04±0.14</td><td>40.9 +3.6 → 44.5</td></tr><tr><td>PseCo (ours)</td><td>22.43±0.36</td><td>27.77±0.18</td><td>32.50±0.08</td><td>36.06±0.24</td><td>41.0 +5.1 → 46.1</td></tr></table>"
|
| 1122 |
+
},
|
| 1123 |
+
{
|
| 1124 |
+
"type": "title",
|
| 1125 |
+
"bbox": [
|
| 1126 |
+
0.217,
|
| 1127 |
+
0.357,
|
| 1128 |
+
0.62,
|
| 1129 |
+
0.373
|
| 1130 |
+
],
|
| 1131 |
+
"angle": 0,
|
| 1132 |
+
"content": "4.3 Comparison with State-of-the-Art Methods"
|
| 1133 |
+
},
|
| 1134 |
+
{
|
| 1135 |
+
"type": "text",
|
| 1136 |
+
"bbox": [
|
| 1137 |
+
0.218,
|
| 1138 |
+
0.386,
|
| 1139 |
+
0.787,
|
| 1140 |
+
0.567
|
| 1141 |
+
],
|
| 1142 |
+
"angle": 0,
|
| 1143 |
+
"content": "We compare the proposed PseCo with other state-of-the-art methods on COCO val2017 set. Comparisons under the Partially Labeled Data setting are first conducted, with results reported in Tab. 1. When labeled data is scarce (i.e., under \\(1\\%\\) and \\(2\\%\\) labelling ratios), our method surpasses the state-of-the-art method, Unbiased Teacher [14], by \\(1.7\\%\\) and \\(3.5\\%\\), reaching 22.4 and \\(27.8\\mathrm{mAP}\\) respectively. When more labeled data is accessible, the SOTA method is transferred to Soft Teacher [26]. Our method still outperforms it by \\(1.8\\%\\) and \\(2.0\\%\\) under \\(5\\%\\) and \\(10\\%\\) labelling ratios, respectively. Therefore, the proposed method outperforms the SOTAs by a large margin, at least \\(1.7\\%\\), under all labelling ratios. Compared with the supervised baseline, PseCo obtains even better performance with only \\(2\\%\\) labeled data than the baseline with \\(10\\%\\) labeled data, demonstrating the effectiveness of proposed semi-supervised learning techniques."
|
| 1144 |
+
},
|
| 1145 |
+
{
|
| 1146 |
+
"type": "text",
|
| 1147 |
+
"bbox": [
|
| 1148 |
+
0.218,
|
| 1149 |
+
0.568,
|
| 1150 |
+
0.787,
|
| 1151 |
+
0.733
|
| 1152 |
+
],
|
| 1153 |
+
"angle": 0,
|
| 1154 |
+
"content": "Moreover, we also compare the convergence speed with the previous best method (Soft Teacher [26]) in Fig. 4, where convergence curves are depicted under \\(10\\%\\) and \\(5\\%\\) labelling ratios. It is obvious that our method has a faster convergence speed, specifically, our method uses only \\(2/5\\) and \\(1/4\\) iterations of Soft Teacher to achieve the same performance under \\(10\\%\\) and \\(5\\%\\) labelling ratios respectively. Although we employ an extra view \\((V_{2})\\) to learn feature-level consistency, it only increases the training time of each iteration by \\(25\\%\\) (from \\(0.72 \\text{ sec}/\\text{iter}\\) to \\(0.91 \\text{ sec}/\\text{iter}\\)), due to the low input resolution of \\(V_{2}\\). In summary, we halve the training time of SOTA approach but achieve even better performance, which validates the superior learning efficiency of our method on unlabeled data."
|
| 1155 |
+
},
|
| 1156 |
+
{
|
| 1157 |
+
"type": "text",
|
| 1158 |
+
"bbox": [
|
| 1159 |
+
0.217,
|
| 1160 |
+
0.735,
|
| 1161 |
+
0.786,
|
| 1162 |
+
0.84
|
| 1163 |
+
],
|
| 1164 |
+
"angle": 0,
|
| 1165 |
+
"content": "The experimental results under the Fully Labeled Data setting are reported in Tab. 1, where both results of comparison methods and their supervised baseline are listed. Following the practice in Soft Teacher [26], we also apply weak augmentations to the labeled data and obtain a strong supervised baseline, \\(41.0\\mathrm{mAP}\\). Although with a such strong baseline, PseCo still achieves larger improvements \\((+5.1\\%)\\) than others and reaches \\(46.1\\mathrm{mAP}\\), building a new state of the art. Some qualitative results are shown in Fig. 5."
|
| 1166 |
+
}
|
| 1167 |
+
],
|
| 1168 |
+
[
|
| 1169 |
+
{
|
| 1170 |
+
"type": "page_number",
|
| 1171 |
+
"bbox": [
|
| 1172 |
+
0.218,
|
| 1173 |
+
0.116,
|
| 1174 |
+
0.236,
|
| 1175 |
+
0.127
|
| 1176 |
+
],
|
| 1177 |
+
"angle": 0,
|
| 1178 |
+
"content": "12"
|
| 1179 |
+
},
|
| 1180 |
+
{
|
| 1181 |
+
"type": "header",
|
| 1182 |
+
"bbox": [
|
| 1183 |
+
0.272,
|
| 1184 |
+
0.115,
|
| 1185 |
+
0.326,
|
| 1186 |
+
0.127
|
| 1187 |
+
],
|
| 1188 |
+
"angle": 0,
|
| 1189 |
+
"content": "Li et al."
|
| 1190 |
+
},
|
| 1191 |
+
{
|
| 1192 |
+
"type": "image_caption",
|
| 1193 |
+
"bbox": [
|
| 1194 |
+
0.286,
|
| 1195 |
+
0.147,
|
| 1196 |
+
0.381,
|
| 1197 |
+
0.157
|
| 1198 |
+
],
|
| 1199 |
+
"angle": 0,
|
| 1200 |
+
"content": "(a) \\(10\\%\\) labelling ratio"
|
| 1201 |
+
},
|
| 1202 |
+
{
|
| 1203 |
+
"type": "image",
|
| 1204 |
+
"bbox": [
|
| 1205 |
+
0.245,
|
| 1206 |
+
0.159,
|
| 1207 |
+
0.407,
|
| 1208 |
+
0.254
|
| 1209 |
+
],
|
| 1210 |
+
"angle": 0,
|
| 1211 |
+
"content": null
|
| 1212 |
+
},
|
| 1213 |
+
{
|
| 1214 |
+
"type": "image_caption",
|
| 1215 |
+
"bbox": [
|
| 1216 |
+
0.46,
|
| 1217 |
+
0.147,
|
| 1218 |
+
0.548,
|
| 1219 |
+
0.157
|
| 1220 |
+
],
|
| 1221 |
+
"angle": 0,
|
| 1222 |
+
"content": "(b) \\(5\\%\\) labelling ratio"
|
| 1223 |
+
},
|
| 1224 |
+
{
|
| 1225 |
+
"type": "image",
|
| 1226 |
+
"bbox": [
|
| 1227 |
+
0.418,
|
| 1228 |
+
0.158,
|
| 1229 |
+
0.578,
|
| 1230 |
+
0.254
|
| 1231 |
+
],
|
| 1232 |
+
"angle": 0,
|
| 1233 |
+
"content": null
|
| 1234 |
+
},
|
| 1235 |
+
{
|
| 1236 |
+
"type": "image_caption",
|
| 1237 |
+
"bbox": [
|
| 1238 |
+
0.633,
|
| 1239 |
+
0.147,
|
| 1240 |
+
0.717,
|
| 1241 |
+
0.157
|
| 1242 |
+
],
|
| 1243 |
+
"angle": 0,
|
| 1244 |
+
"content": "(c) effects of view 2"
|
| 1245 |
+
},
|
| 1246 |
+
{
|
| 1247 |
+
"type": "image",
|
| 1248 |
+
"bbox": [
|
| 1249 |
+
0.589,
|
| 1250 |
+
0.158,
|
| 1251 |
+
0.75,
|
| 1252 |
+
0.254
|
| 1253 |
+
],
|
| 1254 |
+
"angle": 0,
|
| 1255 |
+
"content": null
|
| 1256 |
+
},
|
| 1257 |
+
{
|
| 1258 |
+
"type": "image_caption",
|
| 1259 |
+
"bbox": [
|
| 1260 |
+
0.214,
|
| 1261 |
+
0.256,
|
| 1262 |
+
0.788,
|
| 1263 |
+
0.318
|
| 1264 |
+
],
|
| 1265 |
+
"angle": 0,
|
| 1266 |
+
"content": "Fig. 4: Comparison of model convergence speed. In (a) and (b), we compare PseCo against Soft Teacher [26]. Here, we reproduce Soft Teacher using their source codes. (c) depicts the comparison between \\( V_{1} \\) and \\( V_{1} \\& V_{2} \\). In legend, the numbers in brackets refer to mAP. Performance is evaluated on the teacher."
|
| 1267 |
+
},
|
| 1268 |
+
{
|
| 1269 |
+
"type": "table_caption",
|
| 1270 |
+
"bbox": [
|
| 1271 |
+
0.214,
|
| 1272 |
+
0.343,
|
| 1273 |
+
0.788,
|
| 1274 |
+
0.418
|
| 1275 |
+
],
|
| 1276 |
+
"angle": 0,
|
| 1277 |
+
"content": "Table 2: Ablation studies on each component of our method. MSL represents Multi-view Scale-invariant Learning; NPL represents Noisy Pseudo box Learning. In MSL, \\( V_{1} \\) and \\( V_{2} \\) are constructed for label- and feature-level consistency, respectively. In NPL, PCV and PLA stand for Positive-proposal Consistency Voting and Prediction-guided Label Assignment, respectively."
|
| 1278 |
+
},
|
| 1279 |
+
{
|
| 1280 |
+
"type": "table",
|
| 1281 |
+
"bbox": [
|
| 1282 |
+
0.29,
|
| 1283 |
+
0.419,
|
| 1284 |
+
0.713,
|
| 1285 |
+
0.548
|
| 1286 |
+
],
|
| 1287 |
+
"angle": 0,
|
| 1288 |
+
"content": "<table><tr><td colspan=\"2\">MSL</td><td colspan=\"2\">NPL</td><td rowspan=\"2\">mAP</td><td rowspan=\"2\">\\( AP_{50} \\)</td><td rowspan=\"2\">\\( AP_{75} \\)</td></tr><tr><td>\\( V_1 \\)</td><td>\\( V_2 \\)</td><td>PCV</td><td>PLA</td></tr><tr><td colspan=\"4\"></td><td>26.8</td><td>44.9</td><td>28.4</td></tr><tr><td>✓</td><td></td><td></td><td></td><td>33.9(+7.1)</td><td>55.2</td><td>36.0</td></tr><tr><td>✓</td><td>✓</td><td></td><td></td><td>34.9(+8.1)</td><td>56.3</td><td>37.1</td></tr><tr><td>✓</td><td></td><td>✓</td><td></td><td>34.8(+8.0)</td><td>55.1</td><td>37.4</td></tr><tr><td>✓</td><td></td><td>✓</td><td>✓</td><td>35.7(+8.9)</td><td>56.4</td><td>38.4</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td></td><td>36.0(+9.2)</td><td>56.9</td><td>38.7</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>36.3(+9.5)</td><td>57.2</td><td>39.2</td></tr></table>"
|
| 1289 |
+
},
|
| 1290 |
+
{
|
| 1291 |
+
"type": "title",
|
| 1292 |
+
"bbox": [
|
| 1293 |
+
0.216,
|
| 1294 |
+
0.575,
|
| 1295 |
+
0.389,
|
| 1296 |
+
0.591
|
| 1297 |
+
],
|
| 1298 |
+
"angle": 0,
|
| 1299 |
+
"content": "4.4 Ablation Study"
|
| 1300 |
+
},
|
| 1301 |
+
{
|
| 1302 |
+
"type": "text",
|
| 1303 |
+
"bbox": [
|
| 1304 |
+
0.214,
|
| 1305 |
+
0.599,
|
| 1306 |
+
0.787,
|
| 1307 |
+
0.629
|
| 1308 |
+
],
|
| 1309 |
+
"angle": 0,
|
| 1310 |
+
"content": "We conduct detailed ablation studies to verify key designs. All ablation studies are conducted on a single data fold from the \\(10\\%\\) labelling ratio."
|
| 1311 |
+
},
|
| 1312 |
+
{
|
| 1313 |
+
"type": "text",
|
| 1314 |
+
"bbox": [
|
| 1315 |
+
0.214,
|
| 1316 |
+
0.63,
|
| 1317 |
+
0.788,
|
| 1318 |
+
0.81
|
| 1319 |
+
],
|
| 1320 |
+
"angle": 0,
|
| 1321 |
+
"content": "Effect of individual component. In Tab. 2, we show effectiveness of each component step by step. When only using \\(10\\%\\) labeled data as training examples, it obtains \\(26.8\\mathrm{mAP}\\). Next, we construct the semi-supervised baseline by applying \\(V_{1}\\) on unlabeled data for label-level consistency learning. The baseline does not consider any adverse effects incurred by coarse pseudo boxes and obtains 33.9 mAP. Furthermore, by leveraging additional view \\(V_{2}\\), the feature-level scale-invariant learning is enabled, and an improvement of \\(+1.0\\mathrm{mAP}\\) is found. On the other hand, to alleviate the issue of coarse pseudo boxes, we introduce PCV to suppress the inaccurate regression signals, improving the baseline from 33.9 to \\(34.8\\mathrm{mAP}\\). After that, we replace the traditional IoU-based label assignment strategy with the PLA and enjoy another \\(+0.9\\mathrm{mAP}\\) gain. Finally, when combing MSL and NPL together, it achieves the best performance, 36.3 mAP."
|
| 1322 |
+
},
|
| 1323 |
+
{
|
| 1324 |
+
"type": "text",
|
| 1325 |
+
"bbox": [
|
| 1326 |
+
0.214,
|
| 1327 |
+
0.81,
|
| 1328 |
+
0.787,
|
| 1329 |
+
0.84
|
| 1330 |
+
],
|
| 1331 |
+
"angle": 0,
|
| 1332 |
+
"content": "Comparison with other regression methods. Scores of pseudo boxes can only indicate the confidence of predicted object category, thus they fail to reflect"
|
| 1333 |
+
}
|
| 1334 |
+
],
|
| 1335 |
+
[
|
| 1336 |
+
{
|
| 1337 |
+
"type": "header",
|
| 1338 |
+
"bbox": [
|
| 1339 |
+
0.433,
|
| 1340 |
+
0.115,
|
| 1341 |
+
0.732,
|
| 1342 |
+
0.129
|
| 1343 |
+
],
|
| 1344 |
+
"angle": 0,
|
| 1345 |
+
"content": "PseCo for Semi-Supervised Object Detection"
|
| 1346 |
+
},
|
| 1347 |
+
{
|
| 1348 |
+
"type": "page_number",
|
| 1349 |
+
"bbox": [
|
| 1350 |
+
0.769,
|
| 1351 |
+
0.116,
|
| 1352 |
+
0.786,
|
| 1353 |
+
0.127
|
| 1354 |
+
],
|
| 1355 |
+
"angle": 0,
|
| 1356 |
+
"content": "13"
|
| 1357 |
+
},
|
| 1358 |
+
{
|
| 1359 |
+
"type": "table_caption",
|
| 1360 |
+
"bbox": [
|
| 1361 |
+
0.215,
|
| 1362 |
+
0.158,
|
| 1363 |
+
0.784,
|
| 1364 |
+
0.188
|
| 1365 |
+
],
|
| 1366 |
+
"angle": 0,
|
| 1367 |
+
"content": "Table 3: Analysis of Multi-view Scale-invariant learning, which contains both the label- and feature-level consistency."
|
| 1368 |
+
},
|
| 1369 |
+
{
|
| 1370 |
+
"type": "table_caption",
|
| 1371 |
+
"bbox": [
|
| 1372 |
+
0.244,
|
| 1373 |
+
0.19,
|
| 1374 |
+
0.449,
|
| 1375 |
+
0.204
|
| 1376 |
+
],
|
| 1377 |
+
"angle": 0,
|
| 1378 |
+
"content": "(a) Study on label consistency."
|
| 1379 |
+
},
|
| 1380 |
+
{
|
| 1381 |
+
"type": "table",
|
| 1382 |
+
"bbox": [
|
| 1383 |
+
0.222,
|
| 1384 |
+
0.206,
|
| 1385 |
+
0.477,
|
| 1386 |
+
0.248
|
| 1387 |
+
],
|
| 1388 |
+
"angle": 0,
|
| 1389 |
+
"content": "<table><tr><td>method</td><td>mAP</td><td>APS</td><td>APM</td><td>APL</td></tr><tr><td>single-scale training</td><td>32.7</td><td>19.0</td><td>36.0</td><td>42.5</td></tr><tr><td>label consistency</td><td>33.9</td><td>19.1</td><td>37.2</td><td>44.4</td></tr></table>"
|
| 1390 |
+
},
|
| 1391 |
+
{
|
| 1392 |
+
"type": "table_caption",
|
| 1393 |
+
"bbox": [
|
| 1394 |
+
0.528,
|
| 1395 |
+
0.19,
|
| 1396 |
+
0.747,
|
| 1397 |
+
0.204
|
| 1398 |
+
],
|
| 1399 |
+
"angle": 0,
|
| 1400 |
+
"content": "(b) Study on feature consistency."
|
| 1401 |
+
},
|
| 1402 |
+
{
|
| 1403 |
+
"type": "table",
|
| 1404 |
+
"bbox": [
|
| 1405 |
+
0.499,
|
| 1406 |
+
0.206,
|
| 1407 |
+
0.782,
|
| 1408 |
+
0.247
|
| 1409 |
+
],
|
| 1410 |
+
"angle": 0,
|
| 1411 |
+
"content": "<table><tr><td>method</td><td>mAP</td><td>APS</td><td>APM</td><td>APL</td></tr><tr><td>vanilla multi-view training</td><td>33.9</td><td>20.9</td><td>37.2</td><td>43.0</td></tr><tr><td>feature consistency</td><td>34.9</td><td>22.1</td><td>38.2</td><td>43.6</td></tr></table>"
|
| 1412 |
+
},
|
| 1413 |
+
{
|
| 1414 |
+
"type": "table_caption",
|
| 1415 |
+
"bbox": [
|
| 1416 |
+
0.215,
|
| 1417 |
+
0.274,
|
| 1418 |
+
0.784,
|
| 1419 |
+
0.304
|
| 1420 |
+
],
|
| 1421 |
+
"angle": 0,
|
| 1422 |
+
"content": "Table 4: Ablation studies related to Positive-proposal Consistency Voting (PCV) and Prediction-guided Label Assignment (PLA)."
|
| 1423 |
+
},
|
| 1424 |
+
{
|
| 1425 |
+
"type": "text",
|
| 1426 |
+
"bbox": [
|
| 1427 |
+
0.216,
|
| 1428 |
+
0.306,
|
| 1429 |
+
0.458,
|
| 1430 |
+
0.333
|
| 1431 |
+
],
|
| 1432 |
+
"angle": 0,
|
| 1433 |
+
"content": "(a) Comparison between our PCV and other regression methods."
|
| 1434 |
+
},
|
| 1435 |
+
{
|
| 1436 |
+
"type": "table",
|
| 1437 |
+
"bbox": [
|
| 1438 |
+
0.222,
|
| 1439 |
+
0.334,
|
| 1440 |
+
0.455,
|
| 1441 |
+
0.401
|
| 1442 |
+
],
|
| 1443 |
+
"angle": 0,
|
| 1444 |
+
"content": "<table><tr><td>method</td><td>mAP</td><td>\\( AP_{50} \\)</td><td>\\( AP_{75} \\)</td></tr><tr><td>abandon reg [14]</td><td>33.9</td><td>55.2</td><td>36.0</td></tr><tr><td>reg consistency [21]</td><td>34.2</td><td>55.1</td><td>36.5</td></tr><tr><td>box jittering [26]</td><td>34.5</td><td>54.9</td><td>36.9</td></tr><tr><td>PCV (ours)</td><td>34.8</td><td>55.1</td><td>37.4</td></tr></table>"
|
| 1445 |
+
},
|
| 1446 |
+
{
|
| 1447 |
+
"type": "text",
|
| 1448 |
+
"bbox": [
|
| 1449 |
+
0.477,
|
| 1450 |
+
0.307,
|
| 1451 |
+
0.622,
|
| 1452 |
+
0.334
|
| 1453 |
+
],
|
| 1454 |
+
"angle": 0,
|
| 1455 |
+
"content": "(b) Study on hyperparameter \\(\\alpha\\)"
|
| 1456 |
+
},
|
| 1457 |
+
{
|
| 1458 |
+
"type": "table",
|
| 1459 |
+
"bbox": [
|
| 1460 |
+
0.482,
|
| 1461 |
+
0.34,
|
| 1462 |
+
0.618,
|
| 1463 |
+
0.402
|
| 1464 |
+
],
|
| 1465 |
+
"angle": 0,
|
| 1466 |
+
"content": "<table><tr><td>α</td><td>mAP</td><td>AP50</td><td>AP75</td></tr><tr><td>0</td><td>35.2</td><td>56.1</td><td>37.8</td></tr><tr><td>0.5</td><td>35.7</td><td>56.4</td><td>38.4</td></tr><tr><td>1.0</td><td>35.4</td><td>55.7</td><td>38.4</td></tr></table>"
|
| 1467 |
+
},
|
| 1468 |
+
{
|
| 1469 |
+
"type": "text",
|
| 1470 |
+
"bbox": [
|
| 1471 |
+
0.642,
|
| 1472 |
+
0.307,
|
| 1473 |
+
0.785,
|
| 1474 |
+
0.333
|
| 1475 |
+
],
|
| 1476 |
+
"angle": 0,
|
| 1477 |
+
"content": "(c) Study on IoU threshold \\( t \\)."
|
| 1478 |
+
},
|
| 1479 |
+
{
|
| 1480 |
+
"type": "table",
|
| 1481 |
+
"bbox": [
|
| 1482 |
+
0.647,
|
| 1483 |
+
0.341,
|
| 1484 |
+
0.781,
|
| 1485 |
+
0.401
|
| 1486 |
+
],
|
| 1487 |
+
"angle": 0,
|
| 1488 |
+
"content": "<table><tr><td>t</td><td>mAP</td><td>\\( AP_{50} \\)</td><td>\\( AP_{75} \\)</td></tr><tr><td>0.3</td><td>35.7</td><td>56.2</td><td>38.6</td></tr><tr><td>0.4</td><td>35.7</td><td>56.4</td><td>38.4</td></tr><tr><td>0.5</td><td>35.5</td><td>56.1</td><td>38.3</td></tr></table>"
|
| 1489 |
+
},
|
| 1490 |
+
{
|
| 1491 |
+
"type": "text",
|
| 1492 |
+
"bbox": [
|
| 1493 |
+
0.214,
|
| 1494 |
+
0.431,
|
| 1495 |
+
0.788,
|
| 1496 |
+
0.658
|
| 1497 |
+
],
|
| 1498 |
+
"angle": 0,
|
| 1499 |
+
"content": "localization quality [10,14]. Naive confidence thresholding will introduce some coarse bounding boxes for regression tasks. To alleviate this issue, Unbiased Teacher [14] abandons regression losses on unlabeled data (denoted as \"abandon reg\"); Humble Teacher [21] aligns the regression predictions between the teacher and student on selected top- \\(\\mathcal{N}\\) proposals (dubbed \"reg consistency\"); Soft Teacher [26] introduces the box jittering to calculate prediction variance on jittered pseudo boxes, which is used to filter out poorly localized pseudo boxes. In Tab. 4a, we compare our Positive-proposal Consistency Voting (PCV) with these methods. PCV obtains the best performance, concretely, on \\(\\mathrm{AP}_{75}\\), PCV surpasses two competitors, reg consistency and box jittering, by \\(0.9\\%\\) and \\(0.5\\%\\), respectively. Although both PCV and box jittering [26] rely on prediction variance, there exist great differences. Firstly, PCV produces localization quality by intrinsic proposals, thus it avoids extra network forward on jittered boxes, enjoying higher training efficiency. Moreover, unlike the box jittering, which meticulously tunes the variance threshold, PCV is free of hyper-parameters."
|
| 1500 |
+
},
|
| 1501 |
+
{
|
| 1502 |
+
"type": "text",
|
| 1503 |
+
"bbox": [
|
| 1504 |
+
0.214,
|
| 1505 |
+
0.659,
|
| 1506 |
+
0.788,
|
| 1507 |
+
0.763
|
| 1508 |
+
],
|
| 1509 |
+
"angle": 0,
|
| 1510 |
+
"content": "Study on different hyper-parameters of PLA. We first investigate the performance using different \\(\\alpha\\) in PLA, which balances the influence of classification score \\((s)\\) and localization precision \\((u)\\) in the proposal quality. Through a coarse search shown in Tab. 4b, we find that combining \\(s\\) and \\(u\\) yields better performance than using them individually. We then carry out experiments to study the robustness of the IoU threshold \\(t\\), which is used to build the candidate bag. From the Tab 4c, using lower \\(t\\) to construct a bigger candidate bag is preferred."
|
| 1511 |
+
},
|
| 1512 |
+
{
|
| 1513 |
+
"type": "text",
|
| 1514 |
+
"bbox": [
|
| 1515 |
+
0.214,
|
| 1516 |
+
0.765,
|
| 1517 |
+
0.788,
|
| 1518 |
+
0.84
|
| 1519 |
+
],
|
| 1520 |
+
"angle": 0,
|
| 1521 |
+
"content": "Analysis of Multi-view Scale-invariant Learning. We propose the MSL to model scale invariance from the aspects of both label- and feature-level consistency. The studies on them are reported in Tab. 3. At first, we construct a single-scale training baseline without scale variance, where the input images for the teacher and student are kept on the same scale. It obtains \\(32.7\\mathrm{mAP}\\). Next,"
|
| 1522 |
+
}
|
| 1523 |
+
],
|
| 1524 |
+
[
|
| 1525 |
+
{
|
| 1526 |
+
"type": "page_number",
|
| 1527 |
+
"bbox": [
|
| 1528 |
+
0.218,
|
| 1529 |
+
0.116,
|
| 1530 |
+
0.236,
|
| 1531 |
+
0.127
|
| 1532 |
+
],
|
| 1533 |
+
"angle": 0,
|
| 1534 |
+
"content": "14"
|
| 1535 |
+
},
|
| 1536 |
+
{
|
| 1537 |
+
"type": "header",
|
| 1538 |
+
"bbox": [
|
| 1539 |
+
0.272,
|
| 1540 |
+
0.115,
|
| 1541 |
+
0.326,
|
| 1542 |
+
0.127
|
| 1543 |
+
],
|
| 1544 |
+
"angle": 0,
|
| 1545 |
+
"content": "Li et al."
|
| 1546 |
+
},
|
| 1547 |
+
{
|
| 1548 |
+
"type": "image",
|
| 1549 |
+
"bbox": [
|
| 1550 |
+
0.246,
|
| 1551 |
+
0.147,
|
| 1552 |
+
0.368,
|
| 1553 |
+
0.209
|
| 1554 |
+
],
|
| 1555 |
+
"angle": 0,
|
| 1556 |
+
"content": null
|
| 1557 |
+
},
|
| 1558 |
+
{
|
| 1559 |
+
"type": "image",
|
| 1560 |
+
"bbox": [
|
| 1561 |
+
0.246,
|
| 1562 |
+
0.211,
|
| 1563 |
+
0.368,
|
| 1564 |
+
0.275
|
| 1565 |
+
],
|
| 1566 |
+
"angle": 0,
|
| 1567 |
+
"content": null
|
| 1568 |
+
},
|
| 1569 |
+
{
|
| 1570 |
+
"type": "image",
|
| 1571 |
+
"bbox": [
|
| 1572 |
+
0.37,
|
| 1573 |
+
0.147,
|
| 1574 |
+
0.462,
|
| 1575 |
+
0.209
|
| 1576 |
+
],
|
| 1577 |
+
"angle": 0,
|
| 1578 |
+
"content": null
|
| 1579 |
+
},
|
| 1580 |
+
{
|
| 1581 |
+
"type": "image",
|
| 1582 |
+
"bbox": [
|
| 1583 |
+
0.37,
|
| 1584 |
+
0.211,
|
| 1585 |
+
0.462,
|
| 1586 |
+
0.275
|
| 1587 |
+
],
|
| 1588 |
+
"angle": 0,
|
| 1589 |
+
"content": null
|
| 1590 |
+
},
|
| 1591 |
+
{
|
| 1592 |
+
"type": "image_caption",
|
| 1593 |
+
"bbox": [
|
| 1594 |
+
0.267,
|
| 1595 |
+
0.278,
|
| 1596 |
+
0.445,
|
| 1597 |
+
0.286
|
| 1598 |
+
],
|
| 1599 |
+
"angle": 0,
|
| 1600 |
+
"content": "(a) Pseudo boxes produced by the teacher model"
|
| 1601 |
+
},
|
| 1602 |
+
{
|
| 1603 |
+
"type": "image",
|
| 1604 |
+
"bbox": [
|
| 1605 |
+
0.468,
|
| 1606 |
+
0.147,
|
| 1607 |
+
0.562,
|
| 1608 |
+
0.202
|
| 1609 |
+
],
|
| 1610 |
+
"angle": 0,
|
| 1611 |
+
"content": null
|
| 1612 |
+
},
|
| 1613 |
+
{
|
| 1614 |
+
"type": "image",
|
| 1615 |
+
"bbox": [
|
| 1616 |
+
0.563,
|
| 1617 |
+
0.147,
|
| 1618 |
+
0.646,
|
| 1619 |
+
0.202
|
| 1620 |
+
],
|
| 1621 |
+
"angle": 0,
|
| 1622 |
+
"content": null
|
| 1623 |
+
},
|
| 1624 |
+
{
|
| 1625 |
+
"type": "image",
|
| 1626 |
+
"bbox": [
|
| 1627 |
+
0.647,
|
| 1628 |
+
0.147,
|
| 1629 |
+
0.751,
|
| 1630 |
+
0.202
|
| 1631 |
+
],
|
| 1632 |
+
"angle": 0,
|
| 1633 |
+
"content": null
|
| 1634 |
+
},
|
| 1635 |
+
{
|
| 1636 |
+
"type": "image_caption",
|
| 1637 |
+
"bbox": [
|
| 1638 |
+
0.535,
|
| 1639 |
+
0.204,
|
| 1640 |
+
0.691,
|
| 1641 |
+
0.212
|
| 1642 |
+
],
|
| 1643 |
+
"angle": 0,
|
| 1644 |
+
"content": "(b) Detection results of supervised baseline"
|
| 1645 |
+
},
|
| 1646 |
+
{
|
| 1647 |
+
"type": "image",
|
| 1648 |
+
"bbox": [
|
| 1649 |
+
0.468,
|
| 1650 |
+
0.218,
|
| 1651 |
+
0.562,
|
| 1652 |
+
0.274
|
| 1653 |
+
],
|
| 1654 |
+
"angle": 0,
|
| 1655 |
+
"content": null
|
| 1656 |
+
},
|
| 1657 |
+
{
|
| 1658 |
+
"type": "image",
|
| 1659 |
+
"bbox": [
|
| 1660 |
+
0.563,
|
| 1661 |
+
0.218,
|
| 1662 |
+
0.646,
|
| 1663 |
+
0.274
|
| 1664 |
+
],
|
| 1665 |
+
"angle": 0,
|
| 1666 |
+
"content": null
|
| 1667 |
+
},
|
| 1668 |
+
{
|
| 1669 |
+
"type": "image",
|
| 1670 |
+
"bbox": [
|
| 1671 |
+
0.647,
|
| 1672 |
+
0.219,
|
| 1673 |
+
0.752,
|
| 1674 |
+
0.274
|
| 1675 |
+
],
|
| 1676 |
+
"angle": 0,
|
| 1677 |
+
"content": null
|
| 1678 |
+
},
|
| 1679 |
+
{
|
| 1680 |
+
"type": "image_caption",
|
| 1681 |
+
"bbox": [
|
| 1682 |
+
0.549,
|
| 1683 |
+
0.278,
|
| 1684 |
+
0.678,
|
| 1685 |
+
0.286
|
| 1686 |
+
],
|
| 1687 |
+
"angle": 0,
|
| 1688 |
+
"content": "(c) Detection results of our method"
|
| 1689 |
+
},
|
| 1690 |
+
{
|
| 1691 |
+
"type": "image_caption",
|
| 1692 |
+
"bbox": [
|
| 1693 |
+
0.216,
|
| 1694 |
+
0.289,
|
| 1695 |
+
0.787,
|
| 1696 |
+
0.335
|
| 1697 |
+
],
|
| 1698 |
+
"angle": 0,
|
| 1699 |
+
"content": "Fig. 5: (a) Some pseudo boxes (in yellow) detect objects, missed by ground-truths (in red). Numbers above the pseudo box refer to the predicted consistency \\(\\sigma\\). (b)(c) are the results of the supervised baseline and our method."
|
| 1700 |
+
},
|
| 1701 |
+
{
|
| 1702 |
+
"type": "text",
|
| 1703 |
+
"bbox": [
|
| 1704 |
+
0.214,
|
| 1705 |
+
0.364,
|
| 1706 |
+
0.788,
|
| 1707 |
+
0.529
|
| 1708 |
+
],
|
| 1709 |
+
"angle": 0,
|
| 1710 |
+
"content": "we apply the different scale jitter on the teacher and student to implement label-level consistency, which surpasses the single-scale training by \\(1.2\\mathrm{mAP}\\). Based on the label consistency, we further introduce the view \\(V_{2}\\) to perform feature consistency learning. It obtains \\(+1.0\\%\\) improvements, reaching \\(34.9\\mathrm{mAP}\\). Apart from performance gains, the feature consistency can also significantly boost the convergence speed as depicted in Fig. 4(c). To validate the improvements introduced by the \\(V_{2}\\) come from comprehensive scale-invariant learning, instead of vanilla multi-view training, we also add an extra view \\(V_{2}^{\\prime}\\) besides the \\(V_{1}\\), where \\(V_{2}^{\\prime}\\) is downsampled from \\(V_{1}\\) by \\(2\\mathrm{x}\\) and performs label consistency as \\(V_{1}\\). From the Tab. 3b, vanilla multi-view training with only label consistency hardly brings improvements against the single \\(V_{1}\\) (33.9 vs \\(33.9\\%\\))."
|
| 1711 |
+
},
|
| 1712 |
+
{
|
| 1713 |
+
"type": "text",
|
| 1714 |
+
"bbox": [
|
| 1715 |
+
0.216,
|
| 1716 |
+
0.53,
|
| 1717 |
+
0.546,
|
| 1718 |
+
0.636
|
| 1719 |
+
],
|
| 1720 |
+
"angle": 0,
|
| 1721 |
+
"content": "Effect of Focal Loss. In Tab. 5, we compare the Cross Entropy (CE) Loss and Focal Loss. Thanks to the Focal Loss, an improvement of \\(+0.6\\mathrm{mAP}\\) is achieved against the CE Loss. On the other hand, even with the CE Loss, our PseCo still surpasses the Soft Teacher by a large margin, i.e., \\(1.7\\mathrm{mAP}\\)."
|
| 1722 |
+
},
|
| 1723 |
+
{
|
| 1724 |
+
"type": "table_caption",
|
| 1725 |
+
"bbox": [
|
| 1726 |
+
0.555,
|
| 1727 |
+
0.543,
|
| 1728 |
+
0.787,
|
| 1729 |
+
0.572
|
| 1730 |
+
],
|
| 1731 |
+
"angle": 0,
|
| 1732 |
+
"content": "Table 5: Ablation study on Focal Loss."
|
| 1733 |
+
},
|
| 1734 |
+
{
|
| 1735 |
+
"type": "table",
|
| 1736 |
+
"bbox": [
|
| 1737 |
+
0.561,
|
| 1738 |
+
0.575,
|
| 1739 |
+
0.782,
|
| 1740 |
+
0.614
|
| 1741 |
+
],
|
| 1742 |
+
"angle": 0,
|
| 1743 |
+
"content": "<table><tr><td>method</td><td>mAP</td><td>\\( AP_{50} \\)</td><td>\\( AP_{75} \\)</td></tr><tr><td>PseCo w/ CE Loss</td><td>35.7</td><td>55.6</td><td>38.9</td></tr><tr><td>PseCo w/ Focal Loss</td><td>36.3</td><td>57.2</td><td>39.2</td></tr></table>"
|
| 1744 |
+
},
|
| 1745 |
+
{
|
| 1746 |
+
"type": "title",
|
| 1747 |
+
"bbox": [
|
| 1748 |
+
0.216,
|
| 1749 |
+
0.658,
|
| 1750 |
+
0.36,
|
| 1751 |
+
0.674
|
| 1752 |
+
],
|
| 1753 |
+
"angle": 0,
|
| 1754 |
+
"content": "5 Conclusion"
|
| 1755 |
+
},
|
| 1756 |
+
{
|
| 1757 |
+
"type": "text",
|
| 1758 |
+
"bbox": [
|
| 1759 |
+
0.214,
|
| 1760 |
+
0.689,
|
| 1761 |
+
0.788,
|
| 1762 |
+
0.842
|
| 1763 |
+
],
|
| 1764 |
+
"angle": 0,
|
| 1765 |
+
"content": "In this work, we elaborately analyze two key techniques of semi-supervised object detection (e.g. pseudo labeling and consistency training), and observe these two techniques currently neglect some important properties of object detection. Motivated by this, we propose a new SSOD framework, PseCo, to integrate object detection properties into SSOD. PseCo consists of Noisy Pseudo box Learning (NPL) and Multi-view Scale-invariant Learning (MSL). In NPL, prediction-guided label assignment and positive-proposal consistency voting are proposed to perform the robust label assignment and regression task using noisy pseudo boxes, respectively. Based on the common label-level consistency, MSL additionally designs a novel feature-level scale-invariant learning, which is neglected in"
|
| 1766 |
+
}
|
| 1767 |
+
],
|
| 1768 |
+
[
|
| 1769 |
+
{
|
| 1770 |
+
"type": "header",
|
| 1771 |
+
"bbox": [
|
| 1772 |
+
0.433,
|
| 1773 |
+
0.115,
|
| 1774 |
+
0.732,
|
| 1775 |
+
0.129
|
| 1776 |
+
],
|
| 1777 |
+
"angle": 0,
|
| 1778 |
+
"content": "PseCo for Semi-Supervised Object Detection"
|
| 1779 |
+
},
|
| 1780 |
+
{
|
| 1781 |
+
"type": "page_number",
|
| 1782 |
+
"bbox": [
|
| 1783 |
+
0.769,
|
| 1784 |
+
0.117,
|
| 1785 |
+
0.786,
|
| 1786 |
+
0.127
|
| 1787 |
+
],
|
| 1788 |
+
"angle": 0,
|
| 1789 |
+
"content": "15"
|
| 1790 |
+
},
|
| 1791 |
+
{
|
| 1792 |
+
"type": "text",
|
| 1793 |
+
"bbox": [
|
| 1794 |
+
0.214,
|
| 1795 |
+
0.147,
|
| 1796 |
+
0.788,
|
| 1797 |
+
0.194
|
| 1798 |
+
],
|
| 1799 |
+
"angle": 0,
|
| 1800 |
+
"content": "prior works. To validate the effectiveness of our method, extensive experiments are conducted on COCO benchmark. Experimental results validate PseCo surpasses the SOTAs by a large margin both in accuracy and efficiency."
|
| 1801 |
+
},
|
| 1802 |
+
{
|
| 1803 |
+
"type": "title",
|
| 1804 |
+
"bbox": [
|
| 1805 |
+
0.218,
|
| 1806 |
+
0.215,
|
| 1807 |
+
0.323,
|
| 1808 |
+
0.232
|
| 1809 |
+
],
|
| 1810 |
+
"angle": 0,
|
| 1811 |
+
"content": "References"
|
| 1812 |
+
},
|
| 1813 |
+
{
|
| 1814 |
+
"type": "ref_text",
|
| 1815 |
+
"bbox": [
|
| 1816 |
+
0.226,
|
| 1817 |
+
0.246,
|
| 1818 |
+
0.786,
|
| 1819 |
+
0.288
|
| 1820 |
+
],
|
| 1821 |
+
"angle": 0,
|
| 1822 |
+
"content": "1. Berthelot, D., Carlini, N., Goodfellow, I., Papernot, N., Oliver, A., Raffel, C.A.: Mixmatch: A holistic approach to semi-supervised learning. Advances in Neural Information Processing Systems 32 (2019)"
|
| 1823 |
+
},
|
| 1824 |
+
{
|
| 1825 |
+
"type": "ref_text",
|
| 1826 |
+
"bbox": [
|
| 1827 |
+
0.226,
|
| 1828 |
+
0.289,
|
| 1829 |
+
0.787,
|
| 1830 |
+
0.329
|
| 1831 |
+
],
|
| 1832 |
+
"angle": 0,
|
| 1833 |
+
"content": "2. Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: A large-scale hierarchical image database. In: 2009 IEEE conference on computer vision and pattern recognition. pp. 248-255. IEEE (2009)"
|
| 1834 |
+
},
|
| 1835 |
+
{
|
| 1836 |
+
"type": "ref_text",
|
| 1837 |
+
"bbox": [
|
| 1838 |
+
0.226,
|
| 1839 |
+
0.33,
|
| 1840 |
+
0.786,
|
| 1841 |
+
0.371
|
| 1842 |
+
],
|
| 1843 |
+
"angle": 0,
|
| 1844 |
+
"content": "3. Ge, Z., Liu, S., Li, Z., Yoshie, O., Sun, J.: Ota: Optimal transport assignment for object detection. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 303-312 (2021)"
|
| 1845 |
+
},
|
| 1846 |
+
{
|
| 1847 |
+
"type": "ref_text",
|
| 1848 |
+
"bbox": [
|
| 1849 |
+
0.226,
|
| 1850 |
+
0.372,
|
| 1851 |
+
0.786,
|
| 1852 |
+
0.399
|
| 1853 |
+
],
|
| 1854 |
+
"angle": 0,
|
| 1855 |
+
"content": "4. Grandvalet, Y., Bengio, Y.: Semi-supervised learning by entropy minimization. Advances in neural information processing systems 17 (2004)"
|
| 1856 |
+
},
|
| 1857 |
+
{
|
| 1858 |
+
"type": "ref_text",
|
| 1859 |
+
"bbox": [
|
| 1860 |
+
0.226,
|
| 1861 |
+
0.4,
|
| 1862 |
+
0.786,
|
| 1863 |
+
0.44
|
| 1864 |
+
],
|
| 1865 |
+
"angle": 0,
|
| 1866 |
+
"content": "5. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)"
|
| 1867 |
+
},
|
| 1868 |
+
{
|
| 1869 |
+
"type": "ref_text",
|
| 1870 |
+
"bbox": [
|
| 1871 |
+
0.226,
|
| 1872 |
+
0.441,
|
| 1873 |
+
0.786,
|
| 1874 |
+
0.482
|
| 1875 |
+
],
|
| 1876 |
+
"angle": 0,
|
| 1877 |
+
"content": "6. Jiang, B., Luo, R., Mao, J., Xiao, T., Jiang, Y.: Acquisition of localization confidence for accurate object detection. In: Proceedings of the European conference on computer vision (ECCV). pp. 784-799 (2018)"
|
| 1878 |
+
},
|
| 1879 |
+
{
|
| 1880 |
+
"type": "ref_text",
|
| 1881 |
+
"bbox": [
|
| 1882 |
+
0.226,
|
| 1883 |
+
0.483,
|
| 1884 |
+
0.786,
|
| 1885 |
+
0.523
|
| 1886 |
+
],
|
| 1887 |
+
"angle": 0,
|
| 1888 |
+
"content": "7. Lee, D.H., et al.: Pseudo-label: The simple and efficient semi-supervised learning method for deep neural networks. In: Workshop on challenges in representation learning, ICML. vol. 3, p. 896 (2013)"
|
| 1889 |
+
},
|
| 1890 |
+
{
|
| 1891 |
+
"type": "ref_text",
|
| 1892 |
+
"bbox": [
|
| 1893 |
+
0.226,
|
| 1894 |
+
0.524,
|
| 1895 |
+
0.786,
|
| 1896 |
+
0.551
|
| 1897 |
+
],
|
| 1898 |
+
"angle": 0,
|
| 1899 |
+
"content": "8. Li, B., Liu, Y., Wang, X.: Gradient harmonized single-stage detector. In: Proceedings of the AAAI conference on artificial intelligence. vol. 33, pp. 8577-8584 (2019)"
|
| 1900 |
+
},
|
| 1901 |
+
{
|
| 1902 |
+
"type": "ref_text",
|
| 1903 |
+
"bbox": [
|
| 1904 |
+
0.226,
|
| 1905 |
+
0.552,
|
| 1906 |
+
0.786,
|
| 1907 |
+
0.605
|
| 1908 |
+
],
|
| 1909 |
+
"angle": 0,
|
| 1910 |
+
"content": "9. Li, G., Li, X., Wang, Y., Zhang, S., Wu, Y., Liang, D.: Knowledge distillation for object detection via rank mimicking and prediction-guided feature imitation. In: Proceedings of the AAAI Conference on Artificial Intelligence. vol. 36, pp. 1306-1313 (2022)"
|
| 1911 |
+
},
|
| 1912 |
+
{
|
| 1913 |
+
"type": "ref_text",
|
| 1914 |
+
"bbox": [
|
| 1915 |
+
0.226,
|
| 1916 |
+
0.607,
|
| 1917 |
+
0.786,
|
| 1918 |
+
0.647
|
| 1919 |
+
],
|
| 1920 |
+
"angle": 0,
|
| 1921 |
+
"content": "0. Li, X., Lv, C., Wang, W., Li, G., Yang, L., Yang, J.: Generalized focal loss: Towards efficient representation learning for dense object detection. IEEE Transactions on Pattern Analysis and Machine Intelligence (2022)"
|
| 1922 |
+
},
|
| 1923 |
+
{
|
| 1924 |
+
"type": "ref_text",
|
| 1925 |
+
"bbox": [
|
| 1926 |
+
0.226,
|
| 1927 |
+
0.648,
|
| 1928 |
+
0.786,
|
| 1929 |
+
0.688
|
| 1930 |
+
],
|
| 1931 |
+
"angle": 0,
|
| 1932 |
+
"content": "1. Lin, T.Y., Dóllar, P., Girshick, R., He, K., Hariharan, B., Belongie, S.: Feature pyramid networks for object detection. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2117-2125 (2017)"
|
| 1933 |
+
},
|
| 1934 |
+
{
|
| 1935 |
+
"type": "ref_text",
|
| 1936 |
+
"bbox": [
|
| 1937 |
+
0.226,
|
| 1938 |
+
0.689,
|
| 1939 |
+
0.786,
|
| 1940 |
+
0.731
|
| 1941 |
+
],
|
| 1942 |
+
"angle": 0,
|
| 1943 |
+
"content": "2. Lin, T.Y., Goyal, P., Girshick, R., He, K., Dollar, P.: Focal loss for dense object detection. In: Proceedings of the IEEE international conference on computer vision. pp. 2980-2988 (2017)"
|
| 1944 |
+
},
|
| 1945 |
+
{
|
| 1946 |
+
"type": "ref_text",
|
| 1947 |
+
"bbox": [
|
| 1948 |
+
0.226,
|
| 1949 |
+
0.731,
|
| 1950 |
+
0.786,
|
| 1951 |
+
0.772
|
| 1952 |
+
],
|
| 1953 |
+
"angle": 0,
|
| 1954 |
+
"content": "3. Lin, T.Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Dollár, P., Zitnick, C.L.: Microsoft coco: Common objects in context. In: European conference on computer vision. pp. 740-755. Springer (2014)"
|
| 1955 |
+
},
|
| 1956 |
+
{
|
| 1957 |
+
"type": "ref_text",
|
| 1958 |
+
"bbox": [
|
| 1959 |
+
0.226,
|
| 1960 |
+
0.773,
|
| 1961 |
+
0.786,
|
| 1962 |
+
0.812
|
| 1963 |
+
],
|
| 1964 |
+
"angle": 0,
|
| 1965 |
+
"content": "4. Liu, Y.C., Ma, C.Y., He, Z., Kuo, C.W., Chen, K., Zhang, P., Wu, B., Kira, Z., Vajda, P.: Unbiased teacher for semi-supervised object detection. arXiv preprint arXiv:2102.09480 (2021)"
|
| 1966 |
+
},
|
| 1967 |
+
{
|
| 1968 |
+
"type": "ref_text",
|
| 1969 |
+
"bbox": [
|
| 1970 |
+
0.226,
|
| 1971 |
+
0.813,
|
| 1972 |
+
0.786,
|
| 1973 |
+
0.84
|
| 1974 |
+
],
|
| 1975 |
+
"angle": 0,
|
| 1976 |
+
"content": "5. Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings"
|
| 1977 |
+
},
|
| 1978 |
+
{
|
| 1979 |
+
"type": "list",
|
| 1980 |
+
"bbox": [
|
| 1981 |
+
0.226,
|
| 1982 |
+
0.246,
|
| 1983 |
+
0.787,
|
| 1984 |
+
0.84
|
| 1985 |
+
],
|
| 1986 |
+
"angle": 0,
|
| 1987 |
+
"content": null
|
| 1988 |
+
}
|
| 1989 |
+
],
|
| 1990 |
+
[
|
| 1991 |
+
{
|
| 1992 |
+
"type": "page_number",
|
| 1993 |
+
"bbox": [
|
| 1994 |
+
0.218,
|
| 1995 |
+
0.116,
|
| 1996 |
+
0.236,
|
| 1997 |
+
0.127
|
| 1998 |
+
],
|
| 1999 |
+
"angle": 0,
|
| 2000 |
+
"content": "16"
|
| 2001 |
+
},
|
| 2002 |
+
{
|
| 2003 |
+
"type": "header",
|
| 2004 |
+
"bbox": [
|
| 2005 |
+
0.272,
|
| 2006 |
+
0.115,
|
| 2007 |
+
0.326,
|
| 2008 |
+
0.127
|
| 2009 |
+
],
|
| 2010 |
+
"angle": 0,
|
| 2011 |
+
"content": "Li et al."
|
| 2012 |
+
},
|
| 2013 |
+
{
|
| 2014 |
+
"type": "ref_text",
|
| 2015 |
+
"bbox": [
|
| 2016 |
+
0.245,
|
| 2017 |
+
0.148,
|
| 2018 |
+
0.785,
|
| 2019 |
+
0.175
|
| 2020 |
+
],
|
| 2021 |
+
"angle": 0,
|
| 2022 |
+
"content": "of the IEEE/CVF International Conference on Computer Vision. pp. 10012-10022 (2021)"
|
| 2023 |
+
},
|
| 2024 |
+
{
|
| 2025 |
+
"type": "ref_text",
|
| 2026 |
+
"bbox": [
|
| 2027 |
+
0.218,
|
| 2028 |
+
0.177,
|
| 2029 |
+
0.786,
|
| 2030 |
+
0.217
|
| 2031 |
+
],
|
| 2032 |
+
"angle": 0,
|
| 2033 |
+
"content": "16. Qi, L., Kuen, J., Gu, J., Lin, Z., Wang, Y., Chen, Y., Li, Y., Jia, J.: Multi-scale aligned distillation for low-resolution detection. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 14443-14453 (2021)"
|
| 2034 |
+
},
|
| 2035 |
+
{
|
| 2036 |
+
"type": "ref_text",
|
| 2037 |
+
"bbox": [
|
| 2038 |
+
0.218,
|
| 2039 |
+
0.218,
|
| 2040 |
+
0.786,
|
| 2041 |
+
0.258
|
| 2042 |
+
],
|
| 2043 |
+
"angle": 0,
|
| 2044 |
+
"content": "17. Ren, S., He, K., Girshick, R., Sun, J.: Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems 28 (2015)"
|
| 2045 |
+
},
|
| 2046 |
+
{
|
| 2047 |
+
"type": "ref_text",
|
| 2048 |
+
"bbox": [
|
| 2049 |
+
0.219,
|
| 2050 |
+
0.26,
|
| 2051 |
+
0.785,
|
| 2052 |
+
0.285
|
| 2053 |
+
],
|
| 2054 |
+
"angle": 0,
|
| 2055 |
+
"content": "18. Scudder, H.: Probability of error of some adaptive pattern-recognition machines. IEEE Transactions on Information Theory 11(3), 363-371 (1965)"
|
| 2056 |
+
},
|
| 2057 |
+
{
|
| 2058 |
+
"type": "ref_text",
|
| 2059 |
+
"bbox": [
|
| 2060 |
+
0.219,
|
| 2061 |
+
0.287,
|
| 2062 |
+
0.786,
|
| 2063 |
+
0.34
|
| 2064 |
+
],
|
| 2065 |
+
"angle": 0,
|
| 2066 |
+
"content": "19. Sohn, K., Berthelot, D., Carlini, N., Zhang, Z., Zhang, H., Raffel, C.A., Cubuk, E.D., Kurakin, A., Li, C.L.: Fixmatch: Simplifying semi-supervised learning with consistency and confidence. Advances in Neural Information Processing Systems 33, 596-608 (2020)"
|
| 2067 |
+
},
|
| 2068 |
+
{
|
| 2069 |
+
"type": "ref_text",
|
| 2070 |
+
"bbox": [
|
| 2071 |
+
0.218,
|
| 2072 |
+
0.343,
|
| 2073 |
+
0.786,
|
| 2074 |
+
0.383
|
| 2075 |
+
],
|
| 2076 |
+
"angle": 0,
|
| 2077 |
+
"content": "20. Sohn, K., Zhang, Z., Li, C.L., Zhang, H., Lee, C.Y., Pfister, T.: A simple semi-supervised learning framework for object detection. arXiv preprint arXiv:2005.04757 (2020)"
|
| 2078 |
+
},
|
| 2079 |
+
{
|
| 2080 |
+
"type": "ref_text",
|
| 2081 |
+
"bbox": [
|
| 2082 |
+
0.217,
|
| 2083 |
+
0.384,
|
| 2084 |
+
0.786,
|
| 2085 |
+
0.424
|
| 2086 |
+
],
|
| 2087 |
+
"angle": 0,
|
| 2088 |
+
"content": "21. Tang, Y., Chen, W., Luo, Y., Zhang, Y.: Humble teachers teach better students for semi-supervised object detection. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3132-3141 (2021)"
|
| 2089 |
+
},
|
| 2090 |
+
{
|
| 2091 |
+
"type": "ref_text",
|
| 2092 |
+
"bbox": [
|
| 2093 |
+
0.217,
|
| 2094 |
+
0.426,
|
| 2095 |
+
0.786,
|
| 2096 |
+
0.466
|
| 2097 |
+
],
|
| 2098 |
+
"angle": 0,
|
| 2099 |
+
"content": "22. Tarvainen, A., Valpola, H.: Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. Advances in neural information processing systems 30 (2017)"
|
| 2100 |
+
},
|
| 2101 |
+
{
|
| 2102 |
+
"type": "ref_text",
|
| 2103 |
+
"bbox": [
|
| 2104 |
+
0.217,
|
| 2105 |
+
0.468,
|
| 2106 |
+
0.786,
|
| 2107 |
+
0.507
|
| 2108 |
+
],
|
| 2109 |
+
"angle": 0,
|
| 2110 |
+
"content": "23. Wei, F., Gao, Y., Wu, Z., Hu, H., Lin, S.: Aligning pretraining for detection via object-level contrastive learning. Advances in Neural Information Processing Systems 34 (2021)"
|
| 2111 |
+
},
|
| 2112 |
+
{
|
| 2113 |
+
"type": "ref_text",
|
| 2114 |
+
"bbox": [
|
| 2115 |
+
0.217,
|
| 2116 |
+
0.509,
|
| 2117 |
+
0.786,
|
| 2118 |
+
0.548
|
| 2119 |
+
],
|
| 2120 |
+
"angle": 0,
|
| 2121 |
+
"content": "24. Xie, Q., Dai, Z., Hovy, E., Luong, T., Le, Q.: Unsupervised data augmentation for consistency training. Advances in Neural Information Processing Systems 33, 6256-6268 (2020)"
|
| 2122 |
+
},
|
| 2123 |
+
{
|
| 2124 |
+
"type": "ref_text",
|
| 2125 |
+
"bbox": [
|
| 2126 |
+
0.217,
|
| 2127 |
+
0.55,
|
| 2128 |
+
0.786,
|
| 2129 |
+
0.59
|
| 2130 |
+
],
|
| 2131 |
+
"angle": 0,
|
| 2132 |
+
"content": "25. Xie, Q., Luong, M.T., Hovy, E., Le, Q.V.: Self-training with noisy student improves imagenet classification. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 10687-10698 (2020)"
|
| 2133 |
+
},
|
| 2134 |
+
{
|
| 2135 |
+
"type": "ref_text",
|
| 2136 |
+
"bbox": [
|
| 2137 |
+
0.217,
|
| 2138 |
+
0.592,
|
| 2139 |
+
0.786,
|
| 2140 |
+
0.632
|
| 2141 |
+
],
|
| 2142 |
+
"angle": 0,
|
| 2143 |
+
"content": "26. Xu, M., Zhang, Z., Hu, H., Wang, J., Wang, L., Wei, F., Bai, X., Liu, Z.: End-to-end semi-supervised object detection with soft teacher. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 3060-3069 (2021)"
|
| 2144 |
+
},
|
| 2145 |
+
{
|
| 2146 |
+
"type": "ref_text",
|
| 2147 |
+
"bbox": [
|
| 2148 |
+
0.217,
|
| 2149 |
+
0.634,
|
| 2150 |
+
0.786,
|
| 2151 |
+
0.687
|
| 2152 |
+
],
|
| 2153 |
+
"angle": 0,
|
| 2154 |
+
"content": "27. Yang, Q., Wei, X., Wang, B., Hua, X.S., Zhang, L.: Interactive self-training with mean teachers for semi-supervised object detection. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5941-5950 (2021)"
|
| 2155 |
+
},
|
| 2156 |
+
{
|
| 2157 |
+
"type": "ref_text",
|
| 2158 |
+
"bbox": [
|
| 2159 |
+
0.217,
|
| 2160 |
+
0.689,
|
| 2161 |
+
0.786,
|
| 2162 |
+
0.729
|
| 2163 |
+
],
|
| 2164 |
+
"angle": 0,
|
| 2165 |
+
"content": "28. Zhang, B., Wang, Y., Hou, W., Wu, H., Wang, J., Okumura, M., Shinozaki, T.: Flexmatch: Boosting semi-supervised learning with curriculum pseudo labeling. Advances in Neural Information Processing Systems 34 (2021)"
|
| 2166 |
+
},
|
| 2167 |
+
{
|
| 2168 |
+
"type": "ref_text",
|
| 2169 |
+
"bbox": [
|
| 2170 |
+
0.217,
|
| 2171 |
+
0.731,
|
| 2172 |
+
0.786,
|
| 2173 |
+
0.77
|
| 2174 |
+
],
|
| 2175 |
+
"angle": 0,
|
| 2176 |
+
"content": "29. Zhang, H., Wang, Y., Dayoub, F., Sunderhauf, N.: Varifocalnet: An iou-aware dense object detector. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8514-8523 (2021)"
|
| 2177 |
+
},
|
| 2178 |
+
{
|
| 2179 |
+
"type": "ref_text",
|
| 2180 |
+
"bbox": [
|
| 2181 |
+
0.217,
|
| 2182 |
+
0.772,
|
| 2183 |
+
0.786,
|
| 2184 |
+
0.812
|
| 2185 |
+
],
|
| 2186 |
+
"angle": 0,
|
| 2187 |
+
"content": "30. Zhou, Q., Yu, C., Wang, Z., Qian, Q., Li, H.: Instant-teaching: An end-to-end semi-supervised object detection framework. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4081-4090 (2021)"
|
| 2188 |
+
},
|
| 2189 |
+
{
|
| 2190 |
+
"type": "list",
|
| 2191 |
+
"bbox": [
|
| 2192 |
+
0.217,
|
| 2193 |
+
0.148,
|
| 2194 |
+
0.786,
|
| 2195 |
+
0.812
|
| 2196 |
+
],
|
| 2197 |
+
"angle": 0,
|
| 2198 |
+
"content": null
|
| 2199 |
+
}
|
| 2200 |
+
]
|
| 2201 |
+
]
|
2203.16xxx/2203.16317/b564aed0-5bb1-4ebd-a32c-a4a45cb20e3c_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d415d90e69b8bbf3fc5f7e9e37bd76f5b1f6d394a24a9ac1d3cd81de711a8e24
|
| 3 |
+
size 3519185
|
2203.16xxx/2203.16317/full.md
ADDED
|
@@ -0,0 +1,287 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# PseCo: Pseudo Labeling and Consistency Training for Semi-Supervised Object Detection
|
| 2 |
+
|
| 3 |
+
Gang Li $^{1,2}$ , Xiang Li $^{1\star}$ , Yujie Wang $^{2}$ , Yichao Wu $^{2}$ , Ding Liang $^{2}$ , and Shanshan Zhang $^{1\star}$
|
| 4 |
+
|
| 5 |
+
$^{1}$ Nanjing University of Science and Technology
|
| 6 |
+
|
| 7 |
+
$^{2}$ SenseTime Research
|
| 8 |
+
|
| 9 |
+
{gang.li, xiang.li.implus, shanshan.zhang}@njust.edu.cn
|
| 10 |
+
|
| 11 |
+
{wangyujie,wuyichao,liangding}@sensetime.com
|
| 12 |
+
|
| 13 |
+
Abstract. In this paper, we delve into two key techniques in Semi-Supervised Object Detection (SSOD), namely pseudo labeling and consistency training. We observe that these two techniques currently neglect some important properties of object detection, hindering efficient learning on unlabeled data. Specifically, for pseudo labeling, existing works only focus on the classification score yet fail to guarantee the localization precision of pseudo boxes; For consistency training, the widely adopted random-resize training only considers the label-level consistency but misses the feature-level one, which also plays an important role in ensuring the scale invariance. To address the problems incurred by noisy pseudo boxes, we design Noisy Pseudo box Learning (NPL) that includes Prediction-guided Label Assignment (PLA) and Positive-proposal Consistency Voting (PCV). PLA relies on model predictions to assign labels and makes it robust to even coarse pseudo boxes; while PCV leverages the regression consistency of positive proposals to reflect the localization quality of pseudo boxes. Furthermore, in consistency training, we propose Multi-view Scale-invariant Learning (MSL) that includes mechanisms of both label- and feature-level consistency, where feature consistency is achieved by aligning shifted feature pyramids between two images with identical content but varied scales. On COCO benchmark, our method, termed PSEudo labeling and COnsistency training (PseCo), outperforms the SOTA (Soft Teacher) by 2.0, 1.8, 2.0 points under $1\%$ , $5\%$ , and $10\%$ labelling ratios, respectively. It also significantly improves the learning efficiency for SSOD, e.g., PseCo halves the training time of the SOTA approach but achieves even better performance. Code is available at https://github.com/ligang-cs/PseCo.
|
| 14 |
+
|
| 15 |
+
Keywords: Semi-supervised Learning, Object Detection
|
| 16 |
+
|
| 17 |
+
# 1 Introduction
|
| 18 |
+
|
| 19 |
+
With the rapid development of deep learning, many computer vision tasks achieve significant improvements, such as image classification [2], object detection [15,1,9],
|
| 20 |
+
|
| 21 |
+

|
| 22 |
+
(a) Precision of pseudo boxes
|
| 23 |
+
|
| 24 |
+

|
| 25 |
+
(b) Relations between real quality and prediction consistency
|
| 26 |
+
Fig. 1: (a) The precision of pseudo boxes under various IoU thresholds. (b) The scatter diagram of the relation between the prediction consistency and their true localization quality. Some dots falling in the orange ellipse are caused by annotation errors. We show some examples in Fig. 5. (c) One specific example to demonstrate that noisy pseudo boxes will mislead label assignment.
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
(c) Wrong label results brought by the Noisy Pseudo Box
|
| 30 |
+
|
| 31 |
+
etc. Behind these advances, plenty of annotated data plays an important role [23]. However, labeling accurate annotations for large-scale data is usually time-consuming and expensive, especially for object detection, which requires annotating precise bounding boxes for each instance, besides category labels. Therefore, employing easily accessible unlabeled data to facilitate the model training with limited annotated data is a promising direction, named Semi-Supervised Learning, where labeled data and unlabeled data are combined together as training examples.
|
| 32 |
+
|
| 33 |
+
Semi-Supervised for Image Classification (SSIC) has been widely investigated in previous literature, and the learning paradigm on unlabeled data can be roughly divided into two categories: pseudo labeling [7,18] and consistency training [24,22], each of which receives much attention. Recently, some works (e.g., FixMatch [19], FlexMatch [28]) attempt to combine these two techniques into one framework and achieve state-of-the-art performance. In Semi-Supervised Object Detection (SSOD), some works borrow the key techniques (e.g. pseudo labeling, consistency training) from SSIC, and directly apply them to SSOD. Although these works [30,26] obtain gains from unlabeled data, they neglect some important properties of object detection, resulting in sub-optimal results. On the one hand, compared with image classification, pseudo labels of object detection are more complicated, containing both category and location information. On the other hand, object detection is required to capture stronger scale-invariant ability than image classification, as it needs to carefully deal with the targets with rich scales. In this work, we present a SSOD framework, termed PSEudo labeling and CConsistency training (PseCo), to integrate object detection properties into SSOD, making pseudo labeling and consistency training work better for object detection tasks.
|
| 34 |
+
|
| 35 |
+
In pseudo labeling, the model produces one-hot pseudo labels on unlabeled data by itself, and only pseudo labels whose scores are above the predefined score threshold are retained. As for object detection, the pseudo label consists of both category labels and bounding boxes. Although category labels can be guaranteed
|
| 36 |
+
|
| 37 |
+
to be accurate via setting a high score threshold, the localization quality of pseudo box fails to be measured and guaranteed. It has been validated in previous works that the classification score is not strongly correlated with the precision of box localization [10,29,6,26]. In Fig. 1(a), we compute the precision of pseudo boxes under various Intersection-over-Union (IoU) thresholds, via comparing produced pseudo boxes with ground-truths. Under loose criterion $(\mathrm{IoU} = 0.3)$ , precision can reach $81\%$ , but it will drop to $31\%$ when we lift the IoU threshold to 0.9. This dramatic precision gap indicates coarse pseudo boxes whose IoUs belong to [0.3,0.9] account for $50\%$ . If these noisy pseudo boxes are used as targets to train the detector, it must hinder the optimization, resulting in slow convergence and inefficient learning on unlabeled data. Furthermore, we analyze the negative effects brought by noisy pseudo boxes on classification and regression tasks as follows, respectively.
|
| 38 |
+
|
| 39 |
+
For the classification task, noisy pseudo boxes will mislead the label assignment, where labels are assigned based on IoUs between proposals and gt boxes (pseudo boxes in our case). As shown in Fig. 1(c), a background proposal is taken as foreground due to a large IoU value with a poorly localized pseudo box. As a result, the IoU-based label assignment will fail on unlabeled data and confuse decision boundaries between foreground and background. To address this issue, we design a prediction-guided label assignment strategy for unlabeled data, which assigns labels based on predictions of the teacher, instead of IoUs with pseudo boxes as before, making it robust for poorly localized pseudo boxes.
|
| 40 |
+
|
| 41 |
+
For the regression task, it is necessary to measure the localization quality of pseudo boxes. We propose a simple yet effective method to achieve this, named Positive-proposal Consistency Voting. We empirically find that regression consistency from positive proposals is capable of reflecting the localization quality of corresponding pseudo boxes. In Fig. 1(b), we visualize the relations between predicted consistency and their true IoUs, where their positive correlations can be found. Therefore, it is reasonable to employ the estimated localization quality (i.e., regression consistency from positive proposals) to re-weight the regression losses, making precise pseudo boxes contribute more to regression supervisions.
|
| 42 |
+
|
| 43 |
+
Apart from pseudo labeling, we also analyze the consistency training for SSOD. Consistency training enforces the model to generate similar predictions when fed with perturbed versions of the same image, where perturbations can be implemented by injecting various data augmentations. Through consistency training, models can be invariant to different input transformations. Current SSOD methods [30,26,14] only apply off-the-shelf, general data augmentations, most of which are borrowed from image classification. However, different from classification, object detection is an instance-based task, where object scales usually vary in a large range, and detectors are expected to handle all scale ranges. Therefore, learning strong scale-invariant ability via consistency training is important. In scale consistency, it should be allowed for the model to predict the same boxes for input images with identical contents but varied scales. To ensure label consistency, random-resizing is a common augmentation, which resizes input images and gt boxes according to a randomly generated resize ratio. Be
|
| 44 |
+
|
| 45 |
+
sides label consistency, feature consistency also plays an important role in scale-invariant learning, but it is neglected in previous works. Thanks to the pyramid structure of popular backbone networks, feature alignment can be easily implemented by shifting feature pyramid levels according to the scale changes. Motivated by this, we introduce a brand new data augmentation technique, named Multi-view Scale-invariant Learning (MSL), to learn label-level and feature-level consistency simultaneously in a simple framework.
|
| 46 |
+
|
| 47 |
+
In summary, we delve into two key techniques of semi-supervised learning (e.g., pseudo labeling and consistency training) for SSOD, and integrate object detection properties into them. On COCO benchmarks, our PseCo outperforms the state-of-the-art methods by a large margin, for example, under $10\%$ labelling ratio, it can improve a $26.9\%$ mAP baseline to $36.1\%$ mAP, surpassing previous methods by at least $2.0\%$ . When labeled data is abundant, i.e., we use full COCO training set as labeled data and extra 123K unlabeled2017 as unlabeled data, our PseCo improves the $41.0\%$ mAP baseline by $+5.1\%$ , reaching $46.1\%$ mAP, establishing a new state of the art. Moreover, PseCo also significantly boosts the convergence speed, e.g. PseCo halves the training time of the SOTA (Soft Teacher [26]), but achieves even better performance.
|
| 48 |
+
|
| 49 |
+
# 2 Related Works
|
| 50 |
+
|
| 51 |
+
Semi-supervised learning in image classification. Semi-supervised learning can be categorized into two groups: pseudo labeling (also called self-training) and consistency training, and previous methods design learning paradigms based on one of them. Pseudo labeling [7,18,4,25] iteratively adds unlabeled data into the training procedure with pseudo labels annotated by an initially trained network. Here, only model predictions with high confidence will be transformed into the one-hot format and become pseudo labels. Noisy Student Training [25] injects noise into unlabeled data training, which equips the model with stronger generalization through training on the combination of labeled and unlabeled data. On the other hand, consistency training [22,24,1] relies on the assumption that the model should be invariant to small changes on input images or model hidden states. It enforces the model to make similar predictions on the perturbed versions of the same image, and perturbations can be implemented by injecting noise into images and hidden states. UDA [24] validates the advanced data augmentations play a crucial role in consistency training, and observes the strong augmentations found in supervised-learning can also lead to obvious improvements in semi-supervised learning.
|
| 52 |
+
|
| 53 |
+
Recently, some works [19,28] attempt to combine pseudo labeling and consistency training, achieving state-of-the-art performance. FixMatch [19] firstly applies the weak and strong augmentations to the same input image, respectively, to generate two versions, then uses the weakly-augmented version to generate hard pseudo labels. The model is trained on strongly-augmented versions to align predictions with pseudo labels. Based on FixMatch, FlexMatch [28] proposes to adjust score thresholds for different classes during the generation of pseudo
|
| 54 |
+
|
| 55 |
+
labels, based on curriculum learning. It has been widely validated that pseudo labeling and consistency training are two powerful techniques in semi-supervised image classification, hence in this work, we attempt to integrate object detection properties into them and make them work better for semi-supervised object detection.
|
| 56 |
+
|
| 57 |
+
Semi-supervised learning in object detection. STAC [20] is the first attempt to apply pseudo labeling and consistency training based on the strong data augmentations to semi-supervised object detection, however, it adopts two stages of training as Noisy Student Training [25], which prevents the pseudo labels from updating along with model training and limits the performance. After STAC, [26,30,21,27,14] borrow the idea of Exponential Moving Average (EMA) from Mean Teacher [22], and update the teacher model after each training iteration to generate instant pseudo labels, realizing the end-to-end framework. To pursue high quality of pseudo labels and overcome confirmation bias, InstantTeaching [30] and ISMT [27] introduce model ensemble to aggregate predictions from multiple teacher models which are initialized differently; similarly, Humble Teacher [21] ensembles the teacher model predictions by taking both the image and its horizontally flipped version as input. Although these ensemble methods can promote the quality of pseudo labels, they also introduce considerable computation overhead. Unbiased Teacher [14] replaces traditional Cross-entropy loss with Focal loss [12] to alleviate the class-imbalanced pseudo-labeling issue, which shows strong performance when labeled data is scarce. Soft Teacher [26] uses teacher classification scores as classification loss weights, to suppress negative effects from underlying objects missed by pseudo labels. Different from previous methods, our work elaborately analyzes whether the pseudo labeling and consistency training can be directly applied to SSOD, but gets a negative answer. To integrate object detection properties into these two techniques, we introduce Noisy Pseudo box Learning and Multi-view Scale-invariant Learning, obtaining much better performance and faster convergence speed.
|
| 58 |
+
|
| 59 |
+
# 3 Method
|
| 60 |
+
|
| 61 |
+
We show the framework of our PseCo in Fig. 2. On the unlabeled data, PseCo consists of Noisy Pseudo box Learning (NPL) and Multi-view Scale-invariant Learning (MSL). In the following parts, we will introduce the basic framework, the proposed NPL and MSL, respectively.
|
| 62 |
+
|
| 63 |
+
# 3.1 The basic framework
|
| 64 |
+
|
| 65 |
+
At first, we directly apply standard pseudo labeling and consistency training to SSOD, building our basic framework. Following previous works [26,14,30], we also adopt Teacher-student training scheme, where the teacher model is built from the student model at every training iteration via Exponential Moving Average (EMA). We randomly sample labeled data and unlabeled data based on a sample
|
| 66 |
+
|
| 67 |
+

|
| 68 |
+
Fig. 2: The framework of our PseCo. Each training batch consists of both labeled and unlabeled images. On the unlabeled images, the student model trains on view $V_{1}$ and $V_{2}$ at the same time, taking the same pseudo boxes as supervisions. View $V_{0}$ refers to input images for the teacher model.
|
| 69 |
+
|
| 70 |
+
ratio to form the training batch. On the labeled data, the student model is trained in a regular manner, supervised by the ground-truth boxes:
|
| 71 |
+
|
| 72 |
+
$$
|
| 73 |
+
\mathcal {L} ^ {l} = \mathcal {L} _ {c l s} ^ {l} + \mathcal {L} _ {r e g} ^ {l}. \tag {1}
|
| 74 |
+
$$
|
| 75 |
+
|
| 76 |
+
On the unlabeled data, we firstly apply weak data augmentations (e.g. horizontal flip, random resizing) to input images, and then feed them to the teacher model for pseudo label generation. Considering the detection boxes tend to be dense even after NMS, we set a score threshold $\tau$ and only retain boxes with scores above $\tau$ as pseudo labels. After that, strong augmentations (e.g. cutout, rotation, brightness jitter)<sup>3</sup> will be performed on the input image to generate the training example for student model. Since high classification scores do not lead to precise localization, we abandon bounding box regression on unlabeled data, as done in [14]. Actually, applying the box regression loss on unlabeled data will cause unstable training in our experiments.
|
| 77 |
+
|
| 78 |
+
Foreground-background imbalance [8,12] is an intrinsic issue in object detection, and it gets worse under the semi-supervised setting. A high score threshold $\tau$ is usually adopted to guarantee the precision of pseudo labels, but it also results in scarcity of pseudo labels, aggravating the imbalance of foreground/background. Moreover, there also exists foreground-foreground imbalance, exactly, training examples from some specific categories can be limited when labeled data is scarce, which makes the model prone to predict the dominant classes, causing biased prediction. To alleviate these imbalance issues, we
|
| 79 |
+
|
| 80 |
+
follow the practice of Unbiased Teacher [14], and replace the standard cross-entropy loss with focal loss [12]:
|
| 81 |
+
|
| 82 |
+
$$
|
| 83 |
+
\mathcal {L} _ {c l s} ^ {u} = - \alpha_ {t} (1 - p _ {t}) ^ {\gamma} \log \left(p _ {t}\right), p _ {t} = \left\{ \begin{array}{l l} p, & i f y = 1, \\ 1 - p, & o t h e r w i s e, \end{array} \right. \tag {2}
|
| 84 |
+
$$
|
| 85 |
+
|
| 86 |
+
where parameters $\alpha_{t}$ and $\gamma$ adopt default settings in original focal loss paper [12]. The overall loss function is formulated as:
|
| 87 |
+
|
| 88 |
+
$$
|
| 89 |
+
\mathcal {L} = \mathcal {L} ^ {l} + \beta \mathcal {L} ^ {u}, \tag {3}
|
| 90 |
+
$$
|
| 91 |
+
|
| 92 |
+
where $\beta$ is used to control the contribution of unlabeled data. In theory, our proposed method is independent of the detection framework and can be applied on both one-stage and two-stage detectors. However, considering all previous methods are based on Faster R-CNN [17] detection framework, for a fair comparison with them, we also adopt Faster R-CNN as the default detection framework.
|
| 93 |
+
|
| 94 |
+
# 3.2 Noisy Pseudo Box Learning
|
| 95 |
+
|
| 96 |
+
In SSOD, pseudo labels contain both category and location. Since the score of pseudo labels can only indicate the confidence of pseudo box categories, the localization quality of pseudo boxes is not guaranteed. Imprecise pseudo boxes will mislead the label assignment and regression task, making learning on unlabeled data inefficient. Motivated by this, we introduce Prediction-guided Label Assignment and Positive-proposal Consistency Voting to reduce negative effects on the label assignment and regression task, respectively.
|
| 97 |
+
|
| 98 |
+
Prediction-guided Label Assignment. The standard label assignment strategy in Faster R-CNN [17] only takes the IoUs between proposals and gt boxes (pseudo boxes in our case) into consideration and assigns foreground to those proposals, whose IoUs are above a pre-defined threshold $t$ (0.5 as default). This strategy relies on the assumption that gt boxes are precise, however, this assumption does not hold for unlabeled data obviously. As a result, some low-quality proposals will be mistakenly assigned as positive, confusing the classification boundaries between foreground and background. One specific example is shown in Fig. 1(c), where a proposal with the true IoU as 0.39 is mistakenly assigned as positive.
|
| 99 |
+
|
| 100 |
+
To address this problem, we propose Prediction-guided Label Assignment (PLA), which takes teacher predictions as auxiliary information and reduces dependency on IoUs. In Teacher-student training scheme, not only can the detection results (after NMS) of teacher perform as pseudo labels, but also teacher's dense predictions (before NMS) are able to provide guidance for student model training. We share the proposals generated by the teacher RPN with the student, so that teacher predictions on these proposals can be easily transferred to student. To measure the proposal quality $(q)$ comprehensively, the classification confidence and localization precision of teacher predictions are jointly employed, concretely, $q = s^{\alpha} \times u^{1 - \alpha}$ , where $s$ and $u$ denote a foreground score and an IoU
|
| 101 |
+
|
| 102 |
+
value between the regressed box and the ground truth, respectively. $\alpha$ controls the contribution of $s$ and $u$ in the overall quality. On unlabeled data, we first construct a candidate bag for each ground truth $g$ by the traditional IoU-based strategy, where the IoU threshold $t$ is set to a relatively low value, e.g., 0.4 as default, to contain more proposals. Within each candidate bag, the proposals are firstly sorted by their quality $q$ , then top- $\mathcal{N}$ proposals are adopted as positive samples and the rest are negatives. The number $\mathcal{N}$ is decided by the dynamic $k$ estimation strategy proposed in OTA [3], specifically, the IoU values over the candidate bag is summed up to represent the number of positive samples. The proposed PLA gets rid of strong dependencies on IoUs and alleviates negative effects from poorly localized pseudo boxes, leading to clearer classification boundaries. Furthermore, our label assign strategy integrates more teacher knowledge into student model training, realizing better knowledge distillation.
|
| 103 |
+
|
| 104 |
+
Positive-proposal Consistency Voting. Considering the classification score fails to indicate localization quality, we introduce a simple yet effective method to measure the localization quality, named Positive-proposal Consistency Voting (PCV). Assigning multiple proposals to each gt box (or pseudo box) is a common practice in CNN-based detectors [17,10,29], and we observe that the consistency of regression results from these proposals is capable of reflecting the localization quality of the corresponding pseudo box. Regression consistency $\sigma^j$ for pseudo box (indexed by $j$ ) is formulated as:
|
| 105 |
+
|
| 106 |
+
$$
|
| 107 |
+
\sigma^ {j} = \frac {\sum_ {i = 1} ^ {N} u _ {i} ^ {j}}{N}, \tag {4}
|
| 108 |
+
$$
|
| 109 |
+
|
| 110 |
+
where $u$ denotes an IoU value between the predicted box and the pseudo box, as defined above; $N$ denotes the number of positive proposals, assigned to the pseudo box $j$ . After obtaining $\sigma^j$ , we employ it as the instance-wise regression loss weight:
|
| 111 |
+
|
| 112 |
+
$$
|
| 113 |
+
\mathcal {L} _ {r e g} ^ {u} = \frac {1}{M N} \sum_ {j = 1} ^ {M} \sigma^ {j} \sum_ {i = 1} ^ {N} \left| r e g _ {i} ^ {j} - r \hat {\mathrm {e}} g _ {i} ^ {j} \right|, \tag {5}
|
| 114 |
+
$$
|
| 115 |
+
|
| 116 |
+
where $reg$ and $\hat{reg}$ refer to the regression output and ground-truth, respectively. In Fig. 1(b), we depict the scatter diagram of the relation between prediction consistency $\sigma$ of pseudo boxes and their true IoUs. It is obvious that $\sigma$ is positively correlated with true IoUs. Note that, some dots falling in the orange ellipse are mainly caused by annotation errors. We visualize some examples in Fig. 5, where the pseudo boxes accurately detect some objects, which are missed by the ground truths.
|
| 117 |
+
|
| 118 |
+
# 3.3 Multi-view Scale-invariant Learning
|
| 119 |
+
|
| 120 |
+
Different from image classification, in object detection, object scales vary in a large range and detectors hardly show comparable performance on all scales. Therefore, learning scale-invariant representations from unlabeled data is considerably important for SSOD. In consistency training, strong data augmentations
|
| 121 |
+
|
| 122 |
+

|
| 123 |
+
Fig. 3: Comparisons between label-level consistency learning and feature-level consistency learning. For label consistency, labels are aligned according to the resize ratio $\alpha$ ; for feature consistency, features are aligned by shifting the feature pyramid level.
|
| 124 |
+
|
| 125 |
+

|
| 126 |
+
|
| 127 |
+
play a crucial role [24,25] in achieving competitive performance. Through injecting the perturbations into the input images, data augmentations equip the model with robustness to various transformations. From the perspective of scale invariance, we regard the common data augmentation strategy (e.g. random-resizing) as label-level consistency since it resizes the label according to the scale changes of input images. Unfortunately, existing works only involve the widely adopted label-level consistency but fail to consider the feature-level one. Since detection network usually has designs of rich feature pyramids, feature-level consistency is easy to implement across paired inputs [16] and should be considered seriously. In this paper, we propose Multi-view Scale-invariant Learning (MSL) that combines both label- and feature-level consistency into a simple framework, where feature-level consistency is realized by aligning shifted pyramid features between two images with identical content but different scales.
|
| 128 |
+
|
| 129 |
+
To be specific, two views, namely $V_{1}$ and $V_{2}$ , are used for student training in MSL. We denote the input image for the teacher model as $V_{0}$ . Views $V_{1}$ and $V_{2}$ are constructed to learn label- and feature-level consistency, respectively. Among them, $V_{1}$ is implemented by vanilla random resizing, which rescales the input $V_{0}$ and pseudo boxes according to a resize ratio $\alpha$ randomly sampled from the range $[\alpha_{min}, \alpha_{max}]$ ([0.8, 1.3] as default). For feature consistency learning, we firstly downsample $V_{1}$ by even number times (2x as default) to produce $V_{2}$ , then combine $V_{1}$ and $V_{2}$ into image pairs. Upsampling is also certainly permitted, but we only perform downsampling here for GPU memory restriction. Because the spatial sizes of adjacent FPN layers always differ by 2x, the P3-P7 layers<sup>4</sup> of $V_{1}$ can align well with P2-P6 layers of $V_{2}$ in the spatial dimension. Through feature alignment, the same pseudo boxes can supervise the student model training on both $V_{1}$ and $V_{2}$ . Integrating label consistency and feature consistency into consistency learning leads to stronger scale-invariant learning and significantly
|
| 130 |
+
|
| 131 |
+
accelerates model convergence, as we will show later in the experiments. Comparisons between label consistency and feature consistency are shown in Fig. 3.
|
| 132 |
+
|
| 133 |
+
Learning scale-invariant representation from unlabeled data is also explored by SoCo [23]. However, we claim there are two intrinsic differences between MSL and SoCo: (1) MSL models scale invariance from both label consistency and image feature consistency, while SoCo only considers object feature consistency. Through aligning dense image features of shifted pyramids between paired images, our MSL can provide more comprehensive and dense supervisory signals than the SoCo, which only performs consistency on sparse objects. (2) SoCo implements feature consistency via contrastive learning, which is designed for the pretraining; in contrast, our MSL uses bounding box supervision to implement consistency learning and can be integrated into the detection task.
|
| 134 |
+
|
| 135 |
+
# 4 Experiments
|
| 136 |
+
|
| 137 |
+
# 4.1 Dataset and Evaluation Protocol
|
| 138 |
+
|
| 139 |
+
In this section, we conduct extensive experiments to verify the effectiveness of PseCo on MS COCO benchmark [13]. There are two training sets, namely the train2017 set, containing 118k labeled images, and the unlabeled2017 set, containing 123k unlabeled images. The val2017 with 5k images is used as validation set, and we report all experiment results on val2017. The performance is measured by COCO average precision (denoted as mAP). Following the common practice of SSOD [20], there are two experimental settings: Partially Labeled Data and Fully Labeled Data, which are described as follows:
|
| 140 |
+
|
| 141 |
+
Partially Labeled Data. We randomly sample 1, 2, 5, and $10\%$ data from train2017 as labeled data, and use the rest as unlabeled. Under each labelling ratio, we report the mean and standard deviation over 5 different data folds.
|
| 142 |
+
|
| 143 |
+
Fully Labeled Data. Under this setting, we take train2017 as the training labeled set and unlabeled2017 as the training unlabeled set.
|
| 144 |
+
|
| 145 |
+
# 4.2 Implementation Details
|
| 146 |
+
|
| 147 |
+
For a fair comparison, we adopt Faster R-CNN [17] with FPN [11] as the detection framework, and ResNet-50 [5] as the backbone. The confidence threshold $\tau$ is set to 0.5, empirically. We set $\beta$ as 4.0 to control contributions of unlabeled data in the overall losses. The performance is evaluated on the Teacher model. Training details for Partially Labeled Data and Fully Labeled Data are described below:
|
| 148 |
+
|
| 149 |
+
Partially Labeled Data. All models are trained for 180k iterations on 8 GPUs. The initial learning rate is set as 0.01 and divided by 10 at 120k and 160k iterations. The training batch in each GPU includes 5 images, where the sample ratio between unlabeled data and labeled data is set to 4:1.
|
| 150 |
+
|
| 151 |
+
Fully Labeled Data. All models are trained for 720k iterations on 8 GPUs. Mini-batch in each GPU is 8 with the sample ratio between unlabeled and labeled data as 1:1. The learning rate is initialized to 0.01 and divided by 10 at 480k and 680k iterations.
|
| 152 |
+
|
| 153 |
+
Table 1: Comparisons with the state-of-the-art methods on val2017 set under the Partially Labeled Data and Fully Labeled Data settings.
|
| 154 |
+
|
| 155 |
+
<table><tr><td rowspan="2">Method</td><td colspan="4">Partially Labeled Data</td><td rowspan="2">Fully Labeled Data</td></tr><tr><td>1%</td><td>2%</td><td>5%</td><td>10%</td></tr><tr><td>Supervised baseline</td><td>12.20±0.29</td><td>16.53±0.12</td><td>21.17±0.17</td><td>26.90±0.08</td><td>41.0</td></tr><tr><td>STAC [20]</td><td>13.97±0.35</td><td>18.25±0.25</td><td>24.38±0.12</td><td>28.64±0.21</td><td>39.2</td></tr><tr><td>Humble Teacher [21]</td><td>16.96±0.35</td><td>21.74±0.24</td><td>27.70±0.15</td><td>31.61±0.28</td><td>37.6 +4.8 → 42.4</td></tr><tr><td>ISMT [27]</td><td>18.88±0.74</td><td>22.43±0.56</td><td>26.37±0.24</td><td>30.53±0.52</td><td>37.8 +1.8 → 39.6</td></tr><tr><td>Instant-Teaching [30]</td><td>18.05±0.15</td><td>22.45±0.15</td><td>26.75±0.05</td><td>30.40±0.05</td><td>37.6 +2.6 → 40.2</td></tr><tr><td>Unbiased Teacher [14]</td><td>20.75±0.12</td><td>24.30±0.07</td><td>28.27±0.11</td><td>31.50±0.10</td><td>40.2 +1.1 → 41.3</td></tr><tr><td>Soft Teacher [26]</td><td>20.46±0.39</td><td>-</td><td>30.74±0.08</td><td>34.04±0.14</td><td>40.9 +3.6 → 44.5</td></tr><tr><td>PseCo (ours)</td><td>22.43±0.36</td><td>27.77±0.18</td><td>32.50±0.08</td><td>36.06±0.24</td><td>41.0 +5.1 → 46.1</td></tr></table>
|
| 156 |
+
|
| 157 |
+
# 4.3 Comparison with State-of-the-Art Methods
|
| 158 |
+
|
| 159 |
+
We compare the proposed PseCo with other state-of-the-art methods on COCO val2017 set. Comparisons under the Partially Labeled Data setting are first conducted, with results reported in Tab. 1. When labeled data is scarce (i.e., under $1\%$ and $2\%$ labelling ratios), our method surpasses the state-of-the-art method, Unbiased Teacher [14], by $1.7\%$ and $3.5\%$ , reaching 22.4 and $27.8\mathrm{mAP}$ respectively. When more labeled data is accessible, the SOTA method is transferred to Soft Teacher [26]. Our method still outperforms it by $1.8\%$ and $2.0\%$ under $5\%$ and $10\%$ labelling ratios, respectively. Therefore, the proposed method outperforms the SOTAs by a large margin, at least $1.7\%$ , under all labelling ratios. Compared with the supervised baseline, PseCo obtains even better performance with only $2\%$ labeled data than the baseline with $10\%$ labeled data, demonstrating the effectiveness of proposed semi-supervised learning techniques.
|
| 160 |
+
|
| 161 |
+
Moreover, we also compare the convergence speed with the previous best method (Soft Teacher [26]) in Fig. 4, where convergence curves are depicted under $10\%$ and $5\%$ labelling ratios. It is obvious that our method has a faster convergence speed, specifically, our method uses only $2/5$ and $1/4$ iterations of Soft Teacher to achieve the same performance under $10\%$ and $5\%$ labelling ratios respectively. Although we employ an extra view $(V_{2})$ to learn feature-level consistency, it only increases the training time of each iteration by $25\%$ (from $0.72 \text{ sec}/\text{iter}$ to $0.91 \text{ sec}/\text{iter}$ ), due to the low input resolution of $V_{2}$ . In summary, we halve the training time of SOTA approach but achieve even better performance, which validates the superior learning efficiency of our method on unlabeled data.
|
| 162 |
+
|
| 163 |
+
The experimental results under the Fully Labeled Data setting are reported in Tab. 1, where both results of comparison methods and their supervised baseline are listed. Following the practice in Soft Teacher [26], we also apply weak augmentations to the labeled data and obtain a strong supervised baseline, $41.0\mathrm{mAP}$ . Although with a such strong baseline, PseCo still achieves larger improvements $(+5.1\%)$ than others and reaches $46.1\mathrm{mAP}$ , building a new state of the art. Some qualitative results are shown in Fig. 5.
|
| 164 |
+
|
| 165 |
+

|
| 166 |
+
(a) $10\%$ labelling ratio
|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
(b) $5\%$ labelling ratio
|
| 170 |
+
Fig. 4: Comparison of model convergence speed. In (a) and (b), we compare PseCo against Soft Teacher [26]. Here, we reproduce Soft Teacher using their source codes. (c) depicts the comparison between $V_{1}$ and $V_{1} \& V_{2}$ . In legend, the numbers in brackets refer to mAP. Performance is evaluated on the teacher.
|
| 171 |
+
|
| 172 |
+

|
| 173 |
+
(c) effects of view 2
|
| 174 |
+
|
| 175 |
+
Table 2: Ablation studies on each component of our method. MSL represents Multi-view Scale-invariant Learning; NPL represents Noisy Pseudo box Learning. In MSL, $V_{1}$ and $V_{2}$ are constructed for label- and feature-level consistency, respectively. In NPL, PCV and PLA stand for Positive-proposal Consistency Voting and Prediction-guided Label Assignment, respectively.
|
| 176 |
+
|
| 177 |
+
<table><tr><td colspan="2">MSL</td><td colspan="2">NPL</td><td rowspan="2">mAP</td><td rowspan="2">\( AP_{50} \)</td><td rowspan="2">\( AP_{75} \)</td></tr><tr><td>\( V_1 \)</td><td>\( V_2 \)</td><td>PCV</td><td>PLA</td></tr><tr><td colspan="4"></td><td>26.8</td><td>44.9</td><td>28.4</td></tr><tr><td>✓</td><td></td><td></td><td></td><td>33.9(+7.1)</td><td>55.2</td><td>36.0</td></tr><tr><td>✓</td><td>✓</td><td></td><td></td><td>34.9(+8.1)</td><td>56.3</td><td>37.1</td></tr><tr><td>✓</td><td></td><td>✓</td><td></td><td>34.8(+8.0)</td><td>55.1</td><td>37.4</td></tr><tr><td>✓</td><td></td><td>✓</td><td>✓</td><td>35.7(+8.9)</td><td>56.4</td><td>38.4</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td></td><td>36.0(+9.2)</td><td>56.9</td><td>38.7</td></tr><tr><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>36.3(+9.5)</td><td>57.2</td><td>39.2</td></tr></table>
|
| 178 |
+
|
| 179 |
+
# 4.4 Ablation Study
|
| 180 |
+
|
| 181 |
+
We conduct detailed ablation studies to verify key designs. All ablation studies are conducted on a single data fold from the $10\%$ labelling ratio.
|
| 182 |
+
|
| 183 |
+
Effect of individual component. In Tab. 2, we show effectiveness of each component step by step. When only using $10\%$ labeled data as training examples, it obtains $26.8\mathrm{mAP}$ . Next, we construct the semi-supervised baseline by applying $V_{1}$ on unlabeled data for label-level consistency learning. The baseline does not consider any adverse effects incurred by coarse pseudo boxes and obtains 33.9 mAP. Furthermore, by leveraging additional view $V_{2}$ , the feature-level scale-invariant learning is enabled, and an improvement of $+1.0\mathrm{mAP}$ is found. On the other hand, to alleviate the issue of coarse pseudo boxes, we introduce PCV to suppress the inaccurate regression signals, improving the baseline from 33.9 to $34.8\mathrm{mAP}$ . After that, we replace the traditional IoU-based label assignment strategy with the PLA and enjoy another $+0.9\mathrm{mAP}$ gain. Finally, when combing MSL and NPL together, it achieves the best performance, 36.3 mAP.
|
| 184 |
+
|
| 185 |
+
Comparison with other regression methods. Scores of pseudo boxes can only indicate the confidence of predicted object category, thus they fail to reflect
|
| 186 |
+
|
| 187 |
+
Table 3: Analysis of Multi-view Scale-invariant learning, which contains both the label- and feature-level consistency.
|
| 188 |
+
(a) Study on label consistency.
|
| 189 |
+
|
| 190 |
+
<table><tr><td>method</td><td>mAP</td><td>APS</td><td>APM</td><td>APL</td></tr><tr><td>single-scale training</td><td>32.7</td><td>19.0</td><td>36.0</td><td>42.5</td></tr><tr><td>label consistency</td><td>33.9</td><td>19.1</td><td>37.2</td><td>44.4</td></tr></table>
|
| 191 |
+
|
| 192 |
+
(b) Study on feature consistency.
|
| 193 |
+
|
| 194 |
+
<table><tr><td>method</td><td>mAP</td><td>APS</td><td>APM</td><td>APL</td></tr><tr><td>vanilla multi-view training</td><td>33.9</td><td>20.9</td><td>37.2</td><td>43.0</td></tr><tr><td>feature consistency</td><td>34.9</td><td>22.1</td><td>38.2</td><td>43.6</td></tr></table>
|
| 195 |
+
|
| 196 |
+
(a) Comparison between our PCV and other regression methods.
|
| 197 |
+
|
| 198 |
+
Table 4: Ablation studies related to Positive-proposal Consistency Voting (PCV) and Prediction-guided Label Assignment (PLA).
|
| 199 |
+
|
| 200 |
+
<table><tr><td>method</td><td>mAP</td><td>\( AP_{50} \)</td><td>\( AP_{75} \)</td></tr><tr><td>abandon reg [14]</td><td>33.9</td><td>55.2</td><td>36.0</td></tr><tr><td>reg consistency [21]</td><td>34.2</td><td>55.1</td><td>36.5</td></tr><tr><td>box jittering [26]</td><td>34.5</td><td>54.9</td><td>36.9</td></tr><tr><td>PCV (ours)</td><td>34.8</td><td>55.1</td><td>37.4</td></tr></table>
|
| 201 |
+
|
| 202 |
+
(b) Study on hyperparameter $\alpha$
|
| 203 |
+
|
| 204 |
+
<table><tr><td>α</td><td>mAP</td><td>AP50</td><td>AP75</td></tr><tr><td>0</td><td>35.2</td><td>56.1</td><td>37.8</td></tr><tr><td>0.5</td><td>35.7</td><td>56.4</td><td>38.4</td></tr><tr><td>1.0</td><td>35.4</td><td>55.7</td><td>38.4</td></tr></table>
|
| 205 |
+
|
| 206 |
+
(c) Study on IoU threshold $t$ .
|
| 207 |
+
|
| 208 |
+
<table><tr><td>t</td><td>mAP</td><td>\( AP_{50} \)</td><td>\( AP_{75} \)</td></tr><tr><td>0.3</td><td>35.7</td><td>56.2</td><td>38.6</td></tr><tr><td>0.4</td><td>35.7</td><td>56.4</td><td>38.4</td></tr><tr><td>0.5</td><td>35.5</td><td>56.1</td><td>38.3</td></tr></table>
|
| 209 |
+
|
| 210 |
+
localization quality [10,14]. Naive confidence thresholding will introduce some coarse bounding boxes for regression tasks. To alleviate this issue, Unbiased Teacher [14] abandons regression losses on unlabeled data (denoted as "abandon reg"); Humble Teacher [21] aligns the regression predictions between the teacher and student on selected top- $\mathcal{N}$ proposals (dubbed "reg consistency"); Soft Teacher [26] introduces the box jittering to calculate prediction variance on jittered pseudo boxes, which is used to filter out poorly localized pseudo boxes. In Tab. 4a, we compare our Positive-proposal Consistency Voting (PCV) with these methods. PCV obtains the best performance, concretely, on $\mathrm{AP}_{75}$ , PCV surpasses two competitors, reg consistency and box jittering, by $0.9\%$ and $0.5\%$ , respectively. Although both PCV and box jittering [26] rely on prediction variance, there exist great differences. Firstly, PCV produces localization quality by intrinsic proposals, thus it avoids extra network forward on jittered boxes, enjoying higher training efficiency. Moreover, unlike the box jittering, which meticulously tunes the variance threshold, PCV is free of hyper-parameters.
|
| 211 |
+
|
| 212 |
+
Study on different hyper-parameters of PLA. We first investigate the performance using different $\alpha$ in PLA, which balances the influence of classification score $(s)$ and localization precision $(u)$ in the proposal quality. Through a coarse search shown in Tab. 4b, we find that combining $s$ and $u$ yields better performance than using them individually. We then carry out experiments to study the robustness of the IoU threshold $t$ , which is used to build the candidate bag. From the Tab 4c, using lower $t$ to construct a bigger candidate bag is preferred.
|
| 213 |
+
|
| 214 |
+
Analysis of Multi-view Scale-invariant Learning. We propose the MSL to model scale invariance from the aspects of both label- and feature-level consistency. The studies on them are reported in Tab. 3. At first, we construct a single-scale training baseline without scale variance, where the input images for the teacher and student are kept on the same scale. It obtains $32.7\mathrm{mAP}$ . Next,
|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
|
| 218 |
+

|
| 219 |
+
(a) Pseudo boxes produced by the teacher model
|
| 220 |
+
|
| 221 |
+

|
| 222 |
+
|
| 223 |
+

|
| 224 |
+
Fig. 5: (a) Some pseudo boxes (in yellow) detect objects, missed by ground-truths (in red). Numbers above the pseudo box refer to the predicted consistency $\sigma$ . (b)(c) are the results of the supervised baseline and our method.
|
| 225 |
+
|
| 226 |
+

|
| 227 |
+
(b) Detection results of supervised baseline
|
| 228 |
+
|
| 229 |
+

|
| 230 |
+
|
| 231 |
+

|
| 232 |
+
|
| 233 |
+

|
| 234 |
+
(c) Detection results of our method
|
| 235 |
+
|
| 236 |
+

|
| 237 |
+
|
| 238 |
+

|
| 239 |
+
|
| 240 |
+
we apply the different scale jitter on the teacher and student to implement label-level consistency, which surpasses the single-scale training by $1.2\mathrm{mAP}$ . Based on the label consistency, we further introduce the view $V_{2}$ to perform feature consistency learning. It obtains $+1.0\%$ improvements, reaching $34.9\mathrm{mAP}$ . Apart from performance gains, the feature consistency can also significantly boost the convergence speed as depicted in Fig. 4(c). To validate the improvements introduced by the $V_{2}$ come from comprehensive scale-invariant learning, instead of vanilla multi-view training, we also add an extra view $V_{2}^{\prime}$ besides the $V_{1}$ , where $V_{2}^{\prime}$ is downsampled from $V_{1}$ by $2\mathrm{x}$ and performs label consistency as $V_{1}$ . From the Tab. 3b, vanilla multi-view training with only label consistency hardly brings improvements against the single $V_{1}$ (33.9 vs $33.9\%$ ).
|
| 241 |
+
|
| 242 |
+
Effect of Focal Loss. In Tab. 5, we compare the Cross Entropy (CE) Loss and Focal Loss. Thanks to the Focal Loss, an improvement of $+0.6\mathrm{mAP}$ is achieved against the CE Loss. On the other hand, even with the CE Loss, our PseCo still surpasses the Soft Teacher by a large margin, i.e., $1.7\mathrm{mAP}$ .
|
| 243 |
+
|
| 244 |
+
Table 5: Ablation study on Focal Loss.
|
| 245 |
+
|
| 246 |
+
<table><tr><td>method</td><td>mAP</td><td>\( AP_{50} \)</td><td>\( AP_{75} \)</td></tr><tr><td>PseCo w/ CE Loss</td><td>35.7</td><td>55.6</td><td>38.9</td></tr><tr><td>PseCo w/ Focal Loss</td><td>36.3</td><td>57.2</td><td>39.2</td></tr></table>
|
| 247 |
+
|
| 248 |
+
# 5 Conclusion
|
| 249 |
+
|
| 250 |
+
In this work, we elaborately analyze two key techniques of semi-supervised object detection (e.g. pseudo labeling and consistency training), and observe these two techniques currently neglect some important properties of object detection. Motivated by this, we propose a new SSOD framework, PseCo, to integrate object detection properties into SSOD. PseCo consists of Noisy Pseudo box Learning (NPL) and Multi-view Scale-invariant Learning (MSL). In NPL, prediction-guided label assignment and positive-proposal consistency voting are proposed to perform the robust label assignment and regression task using noisy pseudo boxes, respectively. Based on the common label-level consistency, MSL additionally designs a novel feature-level scale-invariant learning, which is neglected in
|
| 251 |
+
|
| 252 |
+
prior works. To validate the effectiveness of our method, extensive experiments are conducted on COCO benchmark. Experimental results validate PseCo surpasses the SOTAs by a large margin both in accuracy and efficiency.
|
| 253 |
+
|
| 254 |
+
# References
|
| 255 |
+
|
| 256 |
+
1. Berthelot, D., Carlini, N., Goodfellow, I., Papernot, N., Oliver, A., Raffel, C.A.: Mixmatch: A holistic approach to semi-supervised learning. Advances in Neural Information Processing Systems 32 (2019)
|
| 257 |
+
2. Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., Fei-Fei, L.: Imagenet: A large-scale hierarchical image database. In: 2009 IEEE conference on computer vision and pattern recognition. pp. 248-255. IEEE (2009)
|
| 258 |
+
3. Ge, Z., Liu, S., Li, Z., Yoshie, O., Sun, J.: Ota: Optimal transport assignment for object detection. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 303-312 (2021)
|
| 259 |
+
4. Grandvalet, Y., Bengio, Y.: Semi-supervised learning by entropy minimization. Advances in neural information processing systems 17 (2004)
|
| 260 |
+
5. He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 770-778 (2016)
|
| 261 |
+
6. Jiang, B., Luo, R., Mao, J., Xiao, T., Jiang, Y.: Acquisition of localization confidence for accurate object detection. In: Proceedings of the European conference on computer vision (ECCV). pp. 784-799 (2018)
|
| 262 |
+
7. Lee, D.H., et al.: Pseudo-label: The simple and efficient semi-supervised learning method for deep neural networks. In: Workshop on challenges in representation learning, ICML. vol. 3, p. 896 (2013)
|
| 263 |
+
8. Li, B., Liu, Y., Wang, X.: Gradient harmonized single-stage detector. In: Proceedings of the AAAI conference on artificial intelligence. vol. 33, pp. 8577-8584 (2019)
|
| 264 |
+
9. Li, G., Li, X., Wang, Y., Zhang, S., Wu, Y., Liang, D.: Knowledge distillation for object detection via rank mimicking and prediction-guided feature imitation. In: Proceedings of the AAAI Conference on Artificial Intelligence. vol. 36, pp. 1306-1313 (2022)
|
| 265 |
+
0. Li, X., Lv, C., Wang, W., Li, G., Yang, L., Yang, J.: Generalized focal loss: Towards efficient representation learning for dense object detection. IEEE Transactions on Pattern Analysis and Machine Intelligence (2022)
|
| 266 |
+
1. Lin, T.Y., Dóllar, P., Girshick, R., He, K., Hariharan, B., Belongie, S.: Feature pyramid networks for object detection. In: Proceedings of the IEEE conference on computer vision and pattern recognition. pp. 2117-2125 (2017)
|
| 267 |
+
2. Lin, T.Y., Goyal, P., Girshick, R., He, K., Dollar, P.: Focal loss for dense object detection. In: Proceedings of the IEEE international conference on computer vision. pp. 2980-2988 (2017)
|
| 268 |
+
3. Lin, T.Y., Maire, M., Belongie, S., Hays, J., Perona, P., Ramanan, D., Dollár, P., Zitnick, C.L.: Microsoft coco: Common objects in context. In: European conference on computer vision. pp. 740-755. Springer (2014)
|
| 269 |
+
4. Liu, Y.C., Ma, C.Y., He, Z., Kuo, C.W., Chen, K., Zhang, P., Wu, B., Kira, Z., Vajda, P.: Unbiased teacher for semi-supervised object detection. arXiv preprint arXiv:2102.09480 (2021)
|
| 270 |
+
5. Liu, Z., Lin, Y., Cao, Y., Hu, H., Wei, Y., Zhang, Z., Lin, S., Guo, B.: Swin transformer: Hierarchical vision transformer using shifted windows. In: Proceedings
|
| 271 |
+
|
| 272 |
+
of the IEEE/CVF International Conference on Computer Vision. pp. 10012-10022 (2021)
|
| 273 |
+
16. Qi, L., Kuen, J., Gu, J., Lin, Z., Wang, Y., Chen, Y., Li, Y., Jia, J.: Multi-scale aligned distillation for low-resolution detection. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 14443-14453 (2021)
|
| 274 |
+
17. Ren, S., He, K., Girshick, R., Sun, J.: Faster r-cnn: Towards real-time object detection with region proposal networks. Advances in neural information processing systems 28 (2015)
|
| 275 |
+
18. Scudder, H.: Probability of error of some adaptive pattern-recognition machines. IEEE Transactions on Information Theory 11(3), 363-371 (1965)
|
| 276 |
+
19. Sohn, K., Berthelot, D., Carlini, N., Zhang, Z., Zhang, H., Raffel, C.A., Cubuk, E.D., Kurakin, A., Li, C.L.: Fixmatch: Simplifying semi-supervised learning with consistency and confidence. Advances in Neural Information Processing Systems 33, 596-608 (2020)
|
| 277 |
+
20. Sohn, K., Zhang, Z., Li, C.L., Zhang, H., Lee, C.Y., Pfister, T.: A simple semi-supervised learning framework for object detection. arXiv preprint arXiv:2005.04757 (2020)
|
| 278 |
+
21. Tang, Y., Chen, W., Luo, Y., Zhang, Y.: Humble teachers teach better students for semi-supervised object detection. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 3132-3141 (2021)
|
| 279 |
+
22. Tarvainen, A., Valpola, H.: Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results. Advances in neural information processing systems 30 (2017)
|
| 280 |
+
23. Wei, F., Gao, Y., Wu, Z., Hu, H., Lin, S.: Aligning pretraining for detection via object-level contrastive learning. Advances in Neural Information Processing Systems 34 (2021)
|
| 281 |
+
24. Xie, Q., Dai, Z., Hovy, E., Luong, T., Le, Q.: Unsupervised data augmentation for consistency training. Advances in Neural Information Processing Systems 33, 6256-6268 (2020)
|
| 282 |
+
25. Xie, Q., Luong, M.T., Hovy, E., Le, Q.V.: Self-training with noisy student improves imagenet classification. In: Proceedings of the IEEE/CVF conference on computer vision and pattern recognition. pp. 10687-10698 (2020)
|
| 283 |
+
26. Xu, M., Zhang, Z., Hu, H., Wang, J., Wang, L., Wei, F., Bai, X., Liu, Z.: End-to-end semi-supervised object detection with soft teacher. In: Proceedings of the IEEE/CVF International Conference on Computer Vision. pp. 3060-3069 (2021)
|
| 284 |
+
27. Yang, Q., Wei, X., Wang, B., Hua, X.S., Zhang, L.: Interactive self-training with mean teachers for semi-supervised object detection. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 5941-5950 (2021)
|
| 285 |
+
28. Zhang, B., Wang, Y., Hou, W., Wu, H., Wang, J., Okumura, M., Shinozaki, T.: Flexmatch: Boosting semi-supervised learning with curriculum pseudo labeling. Advances in Neural Information Processing Systems 34 (2021)
|
| 286 |
+
29. Zhang, H., Wang, Y., Dayoub, F., Sunderhauf, N.: Varifocalnet: An iou-aware dense object detector. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 8514-8523 (2021)
|
| 287 |
+
30. Zhou, Q., Yu, C., Wang, Z., Qian, Q., Li, H.: Instant-teaching: An end-to-end semi-supervised object detection framework. In: Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. pp. 4081-4090 (2021)
|
2203.16xxx/2203.16317/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c8cdb2028db70597f3d6f3ca3d2dfae71444751eef189569355cc63e780f5169
|
| 3 |
+
size 405628
|
2203.16xxx/2203.16317/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16318/bdf4203f-60cc-4b35-a342-da2e1d7ac014_content_list.json
ADDED
|
@@ -0,0 +1,1064 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Near-Field Communications for 6G: Fundamentals, Challenges, Potentials, and Future Directions",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
86,
|
| 8 |
+
70,
|
| 9 |
+
913,
|
| 10 |
+
137
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Mingyao Cui, Zidong Wu, Yu Lu, Xiuhong Wei, and Linglong Dai, Fellow, IEEE",
|
| 17 |
+
"bbox": [
|
| 18 |
+
192,
|
| 19 |
+
147,
|
| 20 |
+
803,
|
| 21 |
+
164
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Abstract—Extremely large-scale antenna array (ELAA) is a common feature of several key candidate technologies for the sixth generation (6G) mobile networks, such as ultra-massive multiple-input-multiple-output (UM-MIMO), cell-free massive MIMO, reconfigurable intelligent surface (RIS), and terahertz communications. Since the number of antennas is very large for ELAA, the electromagnetic radiation field needs to be modeled by near-field spherical waves, which differs from the conventional planar-wave-based radiation model of 5G massive MIMO. As a result, near-field communications will become essential in 6G wireless networks. In this article, we systematically investigate the emerging near-field communication techniques. Firstly, we present the fundamentals of near-field communications and the metric to determine the near-field ranges in typical communication scenarios. Then, we investigate recent studies specific to near-field communications by classifying them into two categories, i.e., techniques addressing the challenges and those exploiting the potentials in near-field regions. Their principles, recent progress, pros and cons are discussed. More importantly, several open problems and future research directions for near-field communications are pointed out. We believe that this article would inspire more innovations for this important research topic of near-field communications for 6G.",
|
| 28 |
+
"bbox": [
|
| 29 |
+
73,
|
| 30 |
+
220,
|
| 31 |
+
491,
|
| 32 |
+
512
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Index Terms—6G, ELAA, near-field communications, spherical wavefront.",
|
| 39 |
+
"bbox": [
|
| 40 |
+
73,
|
| 41 |
+
517,
|
| 42 |
+
491,
|
| 43 |
+
542
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "I. INTRODUCTION",
|
| 50 |
+
"text_level": 1,
|
| 51 |
+
"bbox": [
|
| 52 |
+
215,
|
| 53 |
+
554,
|
| 54 |
+
351,
|
| 55 |
+
566
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "The sixth generation (6G) mobile networks are promising to empower emerging applications, such as holographic video, digital replica, etc. For fulfilling these visions, tremendous research efforts have been endeavored to develop new wireless technologies to meet the key performance indicators (KPIs) of 6G, which are much superior to those of 5G [1]. For instance, thanks to the enormous spatial multiplexing and beamforming gain, ultra-massive multiple-input-multiple-output (UM-MIMO) and cell-free massive MIMO (CF-MIMO) are expected to accomplish a 10-fold increase in the spectral efficiency for 6G [1]. Besides, by dynamically manipulating the wireless environment through thousands of antennas, reconfigurable intelligent surface (RIS) brings new possibilities for capacity and coverage enhancement [2]. Moreover, millimeter-wave (mmWave) and terahertz (THz) UM-MIMO can offer abundant spectral resources for supporting $100\\times$ peak data rate improvement (e.g., Tbps) in 6G mobile communications",
|
| 62 |
+
"bbox": [
|
| 63 |
+
73,
|
| 64 |
+
573,
|
| 65 |
+
491,
|
| 66 |
+
830
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "All authors are with the Beijing National Research Center for Information Science and Technology (BNRist) as well as the Department of Electronic Engineering, Tsinghua University, Beijing 100084, China (e-mails: {cmy20, wuzd19, y-lu19, weixh19} @ mails.tsinghua.edu.cn, daill@tsinghua.edu.cn).",
|
| 73 |
+
"bbox": [
|
| 74 |
+
73,
|
| 75 |
+
840,
|
| 76 |
+
491,
|
| 77 |
+
886
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "text",
|
| 83 |
+
"text": "© 2022 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.",
|
| 84 |
+
"bbox": [
|
| 85 |
+
73,
|
| 86 |
+
886,
|
| 87 |
+
491,
|
| 88 |
+
944
|
| 89 |
+
],
|
| 90 |
+
"page_idx": 0
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"type": "text",
|
| 94 |
+
"text": "[3]. Despite being suitable for different application scenarios with various KPIs, all the above technologies, including UM-MIMO, CF-MIMO, RIS, and THz communications, share a common feature: They all usually require a very large number of antennas to attain their expected performance, i.e., extremely large-scale antenna arrays (ELAA) are essential to these different candidate technologies for 6G.",
|
| 95 |
+
"bbox": [
|
| 96 |
+
501,
|
| 97 |
+
219,
|
| 98 |
+
924,
|
| 99 |
+
325
|
| 100 |
+
],
|
| 101 |
+
"page_idx": 0
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"type": "text",
|
| 105 |
+
"text": "Compared with massive MIMO, the key technology in 5G networks, ELAA for 6G not only means a sharp increase in the number of antennas but also results in a fundamental change of the electromagnetic (EM) characteristics. The EM radiation field can generally be divided into far-field and radiation near-field regions. The boundary between these two regions is determined by the Rayleigh distance, also called the Fraunhofer distance [4]. Rayleigh distance is proportional to the product of the square of array aperture and carrier frequency [4]. Outside the Rayleigh distance, it is the far-field region, where the EM field can be approximately modeled by planar waves. Within the Rayleigh distance, the near-field propagation becomes dominant, where the EM field has to be accurately modeled by spherical waves.",
|
| 106 |
+
"bbox": [
|
| 107 |
+
501,
|
| 108 |
+
325,
|
| 109 |
+
921,
|
| 110 |
+
536
|
| 111 |
+
],
|
| 112 |
+
"page_idx": 0
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"type": "text",
|
| 116 |
+
"text": "Since the number of antennas is not very large in 5G massive MIMO systems, the Rayleigh distance of up to several meters is negligible. Thus, existing 5G communications are mainly developed from far-field communication theories and techniques. However, with the significant increase of the antenna number and carrier frequency in future 6G systems, the near-field region of ELAA will expand by orders of magnitude. For instance, a 3200-element ELAA at $2.4\\mathrm{GHz}$ was developed in [2]. With an array size of $2\\mathrm{m} \\times 3\\mathrm{m}$ , its Rayleigh distance is about 200 meters, which is larger than the radius of a typical 5G cell. Accordingly, near-field communications will become essential components in future 6G mobile networks where the spherical propagation model needs to be considered, which is obviously different from the existing far-field 5G systems. Unfortunately, the near-field propagation introduces several new challenges in ELAA systems, which should be identified and addressed to empower 6G communications.",
|
| 117 |
+
"bbox": [
|
| 118 |
+
501,
|
| 119 |
+
536,
|
| 120 |
+
921,
|
| 121 |
+
792
|
| 122 |
+
],
|
| 123 |
+
"page_idx": 0
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"type": "text",
|
| 127 |
+
"text": "In this article, we systematically investigate the recent nearfield communication techniques for 6G. The key features of this article can be summarized as follows:",
|
| 128 |
+
"bbox": [
|
| 129 |
+
503,
|
| 130 |
+
792,
|
| 131 |
+
924,
|
| 132 |
+
835
|
| 133 |
+
],
|
| 134 |
+
"page_idx": 0
|
| 135 |
+
},
|
| 136 |
+
{
|
| 137 |
+
"type": "text",
|
| 138 |
+
"text": "- To begin with, the fundamental differences between far-field and near-field communications are explained. Comparatively speaking, the planar wavefront in the farfield can steer the signal energy towards a specific physical angle. On the contrary, the near-field spherical wavefront achieves energy focusing on both angle and distance domain. Moreover, the Rayleigh distance that quantifies",
|
| 139 |
+
"bbox": [
|
| 140 |
+
519,
|
| 141 |
+
839,
|
| 142 |
+
924,
|
| 143 |
+
946
|
| 144 |
+
],
|
| 145 |
+
"page_idx": 0
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"type": "page_number",
|
| 149 |
+
"text": "1",
|
| 150 |
+
"bbox": [
|
| 151 |
+
911,
|
| 152 |
+
30,
|
| 153 |
+
919,
|
| 154 |
+
40
|
| 155 |
+
],
|
| 156 |
+
"page_idx": 0
|
| 157 |
+
},
|
| 158 |
+
{
|
| 159 |
+
"type": "aside_text",
|
| 160 |
+
"text": "arXiv:2203.16318v6 [cs.IT] 15 Sep 2022",
|
| 161 |
+
"bbox": [
|
| 162 |
+
22,
|
| 163 |
+
273,
|
| 164 |
+
58,
|
| 165 |
+
707
|
| 166 |
+
],
|
| 167 |
+
"page_idx": 0
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"type": "text",
|
| 171 |
+
"text": "the boundary between far-field and near-field regions is introduced, and its derivation is explained in detail. Based on this derivation, we further extend the classical Rayleigh distance, for MIMO channels with a direct base station (BS)-user equipment (UE) link, to the one for RIS-aided communications, where a cascaded channel is utilized for presenting the BS-RIS-UE link.",
|
| 172 |
+
"bbox": [
|
| 173 |
+
106,
|
| 174 |
+
69,
|
| 175 |
+
491,
|
| 176 |
+
174
|
| 177 |
+
],
|
| 178 |
+
"page_idx": 1
|
| 179 |
+
},
|
| 180 |
+
{
|
| 181 |
+
"type": "list",
|
| 182 |
+
"sub_type": "text",
|
| 183 |
+
"list_items": [
|
| 184 |
+
"- Additionally, we investigate the emerging near-field communication techniques by classifying them into two types, i.e., techniques addressing the challenges and those exploiting the potentials in near-field regions. On the one hand, as most techniques specific to far-field often suffer from a severe performance loss in the near-field area, the first type of techniques aims to compensate for this loss, such as near-field channel estimation and beamforming. On the other hand, the second kind of study has revealed that the nature of near-field spherical wavefront can also be exploited to provide new possibilities for capacity enhancement and accessibility improvement. The principles, recent progress, pros and cons of these two categories of research are discussed in detail.",
|
| 185 |
+
"- Finally, several open problems and future research directions for near-field communications are pointed out. For example, the improvement of Rayleigh distance considering various communication metrics need to be analyzed, artificial intelligence (AI) is expected to enable high-performance near-field transmissions with low complexity, and hybrid far- and near-field communications also require in-depth study."
|
| 186 |
+
],
|
| 187 |
+
"bbox": [
|
| 188 |
+
91,
|
| 189 |
+
175,
|
| 190 |
+
491,
|
| 191 |
+
507
|
| 192 |
+
],
|
| 193 |
+
"page_idx": 1
|
| 194 |
+
},
|
| 195 |
+
{
|
| 196 |
+
"type": "text",
|
| 197 |
+
"text": "II. FUNDAMENTALS OF NEAR-FIELD COMMUNICATIONS",
|
| 198 |
+
"text_level": 1,
|
| 199 |
+
"bbox": [
|
| 200 |
+
81,
|
| 201 |
+
523,
|
| 202 |
+
483,
|
| 203 |
+
537
|
| 204 |
+
],
|
| 205 |
+
"page_idx": 1
|
| 206 |
+
},
|
| 207 |
+
{
|
| 208 |
+
"type": "text",
|
| 209 |
+
"text": "In this section, we first present the differences between farfield and near-field communications. Then, we will identify the principle to determine the boundary between the far-field and near-field regions in several typical application scenarios.",
|
| 210 |
+
"bbox": [
|
| 211 |
+
73,
|
| 212 |
+
542,
|
| 213 |
+
491,
|
| 214 |
+
604
|
| 215 |
+
],
|
| 216 |
+
"page_idx": 1
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
"type": "text",
|
| 220 |
+
"text": "A. Far-Field Communications vs. Near-Field Communications",
|
| 221 |
+
"text_level": 1,
|
| 222 |
+
"bbox": [
|
| 223 |
+
73,
|
| 224 |
+
623,
|
| 225 |
+
491,
|
| 226 |
+
638
|
| 227 |
+
],
|
| 228 |
+
"page_idx": 1
|
| 229 |
+
},
|
| 230 |
+
{
|
| 231 |
+
"type": "text",
|
| 232 |
+
"text": "The critical characteristics of far-field and near-field communications are shown in Fig. 1. We consider an uplink communication scenario, while the discussions in this article are also valid for downlink scenarios. The BS is equipped with an ELAA. A widely adopted metric to determine the boundary between far-field and near-field regions is the Rayleigh distance, also called the Fraunhofer distance [4]. When the communication distance between the BS and UE (BS-UE distance) is larger than the Rayleigh distance, the UE is located in the far-field region of the BS. Then, EM waves impinging on the BS array can be approximately modeled as planar waves. By contrast, when the BS-UE distance is shorter than the Rayleigh distance, the UE is located in the near-field region of the BS. In this region, EM waves impinging on the BS array must be accurately modeled as spherical waves [5].",
|
| 233 |
+
"bbox": [
|
| 234 |
+
73,
|
| 235 |
+
642,
|
| 236 |
+
491,
|
| 237 |
+
868
|
| 238 |
+
],
|
| 239 |
+
"page_idx": 1
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"type": "text",
|
| 243 |
+
"text": "More precisely, the planar wave is a long-distance approximation of the spherical wave. In far-field regions, the phase of EM waves can be elegantly approximated by a linear function of the antenna index through Taylor expansion. This concise linear phase forms a planar wavefront only related to an incident",
|
| 244 |
+
"bbox": [
|
| 245 |
+
73,
|
| 246 |
+
869,
|
| 247 |
+
491,
|
| 248 |
+
946
|
| 249 |
+
],
|
| 250 |
+
"page_idx": 1
|
| 251 |
+
},
|
| 252 |
+
{
|
| 253 |
+
"type": "image",
|
| 254 |
+
"img_path": "images/0a533fc729f1ef3f226586e28a7e738e1c1092fc45ae17ced2da4ce1c1ced6a0.jpg",
|
| 255 |
+
"image_caption": [
|
| 256 |
+
"Fig. 1: Far-field planar wavefront vs. near-field spherical wavefront. The plots at the bottom illustrate the normalized received signal energy in the physical space achieved by near-field beamfocusing (bottom left) and far-field beamsteering (bottom right)."
|
| 257 |
+
],
|
| 258 |
+
"image_footnote": [],
|
| 259 |
+
"bbox": [
|
| 260 |
+
509,
|
| 261 |
+
69,
|
| 262 |
+
919,
|
| 263 |
+
266
|
| 264 |
+
],
|
| 265 |
+
"page_idx": 1
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"type": "text",
|
| 269 |
+
"text": "angle. Accordingly, by the utilization of planar wavefronts, far-field beamforming can steer the beam energy towards a specific angle over different distances, which is also termed as beamsteering, as shown in the bottom right figure of Fig. 1. Unfortunately, this concise linear phase fails to thoroughly reveal the information of spherical waves. In near-field regions, the phase of spherical waves should be accurately derived based on the physical geometry, which is a non-linear function of the antenna index. The information of the incident angle and distance in each path between BS and UE is embedded in this non-linear phase. Exploiting the extra distance information of spherical wavefronts, near-field beamforming is able to focus the beam energy on a specific location, where energy focusing on both the angle and distance domain is achievable, as shown in the bottom left figure of Fig. 1. Owing to this property, beamforming in the near-field is also called beamfocusing.",
|
| 270 |
+
"bbox": [
|
| 271 |
+
501,
|
| 272 |
+
342,
|
| 273 |
+
921,
|
| 274 |
+
583
|
| 275 |
+
],
|
| 276 |
+
"page_idx": 1
|
| 277 |
+
},
|
| 278 |
+
{
|
| 279 |
+
"type": "text",
|
| 280 |
+
"text": "The differences between far-field planar wavefronts and near-field spherical wavefronts bring several challenges and potentials to wireless communications, which will be detailed in the following sections.",
|
| 281 |
+
"bbox": [
|
| 282 |
+
503,
|
| 283 |
+
584,
|
| 284 |
+
921,
|
| 285 |
+
643
|
| 286 |
+
],
|
| 287 |
+
"page_idx": 1
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"type": "text",
|
| 291 |
+
"text": "B. Rayleigh Distance",
|
| 292 |
+
"text_level": 1,
|
| 293 |
+
"bbox": [
|
| 294 |
+
504,
|
| 295 |
+
667,
|
| 296 |
+
653,
|
| 297 |
+
681
|
| 298 |
+
],
|
| 299 |
+
"page_idx": 1
|
| 300 |
+
},
|
| 301 |
+
{
|
| 302 |
+
"type": "text",
|
| 303 |
+
"text": "The most crucial premise for near-field communications is quantifying the boundary between the far-field and near-field regions, i.e., the Rayleigh distance. Generally, the classical Rayleigh distance is proportional to the square of the array aperture and the inverse of the wavelength. Its derivation can be summarized as follows [4]. The true phase of the EM wave impinging on a BS antenna has to be calculated based on the accurate spherical wave model. In far-field scenarios, this phase is usually approximated by its first-order Taylor expansion based on the planar wavefront model. This approximation results in a phase discrepancy, which increases when the distance decreases. When the largest phase discrepancy among all BS and UE antennas reaches $\\pi /8$ , the distance between the BS array center and the UE array center is defined as the Rayleigh distance. Accordingly, if the communication distance is shorter than the Rayleigh distance, the largest phase discrepancy will be larger than $\\pi /8$ . In this case, the far-field",
|
| 304 |
+
"bbox": [
|
| 305 |
+
501,
|
| 306 |
+
686,
|
| 307 |
+
921,
|
| 308 |
+
946
|
| 309 |
+
],
|
| 310 |
+
"page_idx": 1
|
| 311 |
+
},
|
| 312 |
+
{
|
| 313 |
+
"type": "page_number",
|
| 314 |
+
"text": "2",
|
| 315 |
+
"bbox": [
|
| 316 |
+
911,
|
| 317 |
+
30,
|
| 318 |
+
919,
|
| 319 |
+
40
|
| 320 |
+
],
|
| 321 |
+
"page_idx": 1
|
| 322 |
+
},
|
| 323 |
+
{
|
| 324 |
+
"type": "image",
|
| 325 |
+
"img_path": "images/16e3bd4a9e754204de02fd04c8bcce227682dc0729cecf48b17a80e1c9e1dd63.jpg",
|
| 326 |
+
"image_caption": [
|
| 327 |
+
"Fig. 2: Near-field ranges for typical scenarios."
|
| 328 |
+
],
|
| 329 |
+
"image_footnote": [],
|
| 330 |
+
"bbox": [
|
| 331 |
+
114,
|
| 332 |
+
65,
|
| 333 |
+
897,
|
| 334 |
+
244
|
| 335 |
+
],
|
| 336 |
+
"page_idx": 2
|
| 337 |
+
},
|
| 338 |
+
{
|
| 339 |
+
"type": "text",
|
| 340 |
+
"text": "approximation becomes inaccurate, and thus the near-field propagation needs to be utilized.",
|
| 341 |
+
"bbox": [
|
| 342 |
+
73,
|
| 343 |
+
276,
|
| 344 |
+
491,
|
| 345 |
+
306
|
| 346 |
+
],
|
| 347 |
+
"page_idx": 2
|
| 348 |
+
},
|
| 349 |
+
{
|
| 350 |
+
"type": "text",
|
| 351 |
+
"text": "Based on this definition, the near-field ranges for SIMO, MISO, and MIMO communication systems can be obtained. As illustrated in Fig. 2, the near-field range of SIMO/MISO scenarios is precisely determined by the classical Rayleigh distance, which is proportional to the square of BS array aperture. For the MIMO scenario, since ELAs are employed at two sides of the BS-UE link, both the BS array aperture and the UE array aperture contribute to the Rayleigh distance, i.e., the near-field range is proportional to the square of the sum of BS array aperture and UE array aperture.",
|
| 352 |
+
"bbox": [
|
| 353 |
+
73,
|
| 354 |
+
306,
|
| 355 |
+
491,
|
| 356 |
+
459
|
| 357 |
+
],
|
| 358 |
+
"page_idx": 2
|
| 359 |
+
},
|
| 360 |
+
{
|
| 361 |
+
"type": "text",
|
| 362 |
+
"text": "Interestingly enough, we further extend the conventional Rayleigh distance derived in SIMO/MISO/MIMO systems to that in RIS-aided communication systems, as shown in Fig. 2. Unlike SIMO/MISO/MIMO channels with a direct BS-UE link, the cascaded BS-RIS-UE channel in RIS systems comprises the BS-RIS and RIS-UE links. Therefore, when calculating phase discrepancy, the BS-RIS distance and the RIS-UE distance need to be added. Then, capturing the largest phase discrepancy of $\\pi /8$ , the near-field range in RIS systems is determined by the harmonic mean of the BS-RIS distance and the RIS-UE distance, as shown in Fig. 2. It can be further implied from Fig. 2 that, as long as any of these two distances is shorter than the Rayleigh distance, RIS-aided communication is operating in the near-field area. Therefore, near-field propagation is more likely to happen in RIS systems.",
|
| 363 |
+
"bbox": [
|
| 364 |
+
73,
|
| 365 |
+
460,
|
| 366 |
+
493,
|
| 367 |
+
686
|
| 368 |
+
],
|
| 369 |
+
"page_idx": 2
|
| 370 |
+
},
|
| 371 |
+
{
|
| 372 |
+
"type": "text",
|
| 373 |
+
"text": "With the dramatically increased number of antennas and carrier frequency, the near-field range of ELAA considerably expands. For instance, we have recently fabricated a 0.36-meter-aperture ELAA at $28\\mathrm{GHz}$ . If it is employed in SIMO/MISO scenarios, its near-field range is about 25 meters. When both transmitter and receiver are equipped with this array, the near-field range becomes 100 meters. Moreover, if this ELAA works as a RIS with a BS-RIS distance of 50 meters, the near-field propagation should be accepted once the RIS-UE distance is shorter than 50 meters. In summary, near-field communications come to be an indispensable part of future 6G.",
|
| 374 |
+
"bbox": [
|
| 375 |
+
73,
|
| 376 |
+
686,
|
| 377 |
+
495,
|
| 378 |
+
853
|
| 379 |
+
],
|
| 380 |
+
"page_idx": 2
|
| 381 |
+
},
|
| 382 |
+
{
|
| 383 |
+
"type": "text",
|
| 384 |
+
"text": "III. CHALLENGES OF NEAR-FIELD COMMUNICATIONS",
|
| 385 |
+
"text_level": 1,
|
| 386 |
+
"bbox": [
|
| 387 |
+
89,
|
| 388 |
+
875,
|
| 389 |
+
477,
|
| 390 |
+
888
|
| 391 |
+
],
|
| 392 |
+
"page_idx": 2
|
| 393 |
+
},
|
| 394 |
+
{
|
| 395 |
+
"type": "text",
|
| 396 |
+
"text": "The near-field propagation causes several challenges to wireless communications, i.e., existing 5G transmission methods specific for far-field suffer from severe performance loss in",
|
| 397 |
+
"bbox": [
|
| 398 |
+
73,
|
| 399 |
+
898,
|
| 400 |
+
493,
|
| 401 |
+
946
|
| 402 |
+
],
|
| 403 |
+
"page_idx": 2
|
| 404 |
+
},
|
| 405 |
+
{
|
| 406 |
+
"type": "image",
|
| 407 |
+
"img_path": "images/ea3872894e6e099d44d2561f7e545ee21c0aa70d13697107264de1f95c8d4b95.jpg",
|
| 408 |
+
"image_caption": [
|
| 409 |
+
"Fig. 3: Near-field codebook with non-uniform grids."
|
| 410 |
+
],
|
| 411 |
+
"image_footnote": [],
|
| 412 |
+
"bbox": [
|
| 413 |
+
529,
|
| 414 |
+
277,
|
| 415 |
+
882,
|
| 416 |
+
503
|
| 417 |
+
],
|
| 418 |
+
"page_idx": 2
|
| 419 |
+
},
|
| 420 |
+
{
|
| 421 |
+
"type": "text",
|
| 422 |
+
"text": "near-field areas. Technologies recently developed for addressing these challenges are discussed in this section.",
|
| 423 |
+
"bbox": [
|
| 424 |
+
503,
|
| 425 |
+
547,
|
| 426 |
+
921,
|
| 427 |
+
578
|
| 428 |
+
],
|
| 429 |
+
"page_idx": 2
|
| 430 |
+
},
|
| 431 |
+
{
|
| 432 |
+
"type": "text",
|
| 433 |
+
"text": "A. Near-Field Channel Estimation",
|
| 434 |
+
"text_level": 1,
|
| 435 |
+
"bbox": [
|
| 436 |
+
504,
|
| 437 |
+
604,
|
| 438 |
+
743,
|
| 439 |
+
619
|
| 440 |
+
],
|
| 441 |
+
"page_idx": 2
|
| 442 |
+
},
|
| 443 |
+
{
|
| 444 |
+
"type": "text",
|
| 445 |
+
"text": "Challenge: Accurate channel estimation is required to attain the expected performance gain of ELAA. As the number of channel paths is usually much smaller than that of antennas, channel estimation methods with low pilot overhead generally design suitable codebooks to transform the channel into a sparse representation. For the far-field codebook, each codeword of the codebook corresponds to a planar wave associated with one incident angle. Ideally, each far-field path can be represented by only one codeword. With this far-field codebook, the angle-domain representation of the channel can be obtained, and it is usually sparse due to the limited paths. Then, beam training and compressed sensing (CS) methods are applied to estimate far-field channels with low pilot overhead accurately. However, this far-field planar-wave codebook mismatches the actual near-field spherical-wave channel. This mismatch induces that a single near-field path should be jointly described by multiple codewords of the far-field codebook. Accordingly, the near-field angle-domain channel is not sparse anymore, which inevitably leads to the degradation of channel estimation accuracy. Therefore, near-field codebooks suitable for near-field channels need to be carefully created.",
|
| 446 |
+
"bbox": [
|
| 447 |
+
501,
|
| 448 |
+
627,
|
| 449 |
+
924,
|
| 450 |
+
946
|
| 451 |
+
],
|
| 452 |
+
"page_idx": 2
|
| 453 |
+
},
|
| 454 |
+
{
|
| 455 |
+
"type": "page_number",
|
| 456 |
+
"text": "3",
|
| 457 |
+
"bbox": [
|
| 458 |
+
911,
|
| 459 |
+
30,
|
| 460 |
+
919,
|
| 461 |
+
40
|
| 462 |
+
],
|
| 463 |
+
"page_idx": 2
|
| 464 |
+
},
|
| 465 |
+
{
|
| 466 |
+
"type": "image",
|
| 467 |
+
"img_path": "images/231152303daff14980278a4f26a030810d3a2a659094ff2d450aa254b243394b.jpg",
|
| 468 |
+
"image_caption": [
|
| 469 |
+
"Fig. 4: This figure illustrates the far-field beam split effect (left) and the near-field beam split effect (right). Far-field beam split makes beams at different frequencies transmit towards different directions, while near-field beam split makes beams at different frequencies be focused on various locations."
|
| 470 |
+
],
|
| 471 |
+
"image_footnote": [],
|
| 472 |
+
"bbox": [
|
| 473 |
+
174,
|
| 474 |
+
69,
|
| 475 |
+
823,
|
| 476 |
+
300
|
| 477 |
+
],
|
| 478 |
+
"page_idx": 3
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"type": "text",
|
| 482 |
+
"text": "Recent progress: Some recent works have been endeavored to design near-field codebooks utilizing spherical wavefronts [5], [6]. In [6], the entire two-dimensional physical space is uniformly divided into multiple grids. Each grid is associated with a near-field array response vector, and all of these vectors construct the near-field codebook. With this codebook, the joint angle-distance information of each near-field path is extracted. Then, the near-field channel can be estimated by CS methods with low pilot overhead. However, with the decrease of BSUE distance, the near-field propagation becomes dominant, and the distance information gradually becomes more crucial. Therefore, we can conceive the intuition that the grids should be sparse far away from the ELAA but dense near the ELAA. Without considering this intuition, the codebook in [6] is hard to attain satisfactory channel estimation performance in the entire near-field region. To this end, by minimizing the largest coherence among codewords in the near-field codebook, authors in [5] mathematically prove this intuition, i.e., the angle space could be uniformly divided, while the distance space should be non-uniformly divided. As shown in Fig. 3, the shorter the distance, the denser the grid. With the help of this non-uniform codebook, a polar-domain sparse channel representation and corresponding CS-based algorithms are proposed in [5] to accomplish accurate channel estimation in both near- and farfield areas.",
|
| 483 |
+
"bbox": [
|
| 484 |
+
73,
|
| 485 |
+
359,
|
| 486 |
+
493,
|
| 487 |
+
736
|
| 488 |
+
],
|
| 489 |
+
"page_idx": 3
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "text",
|
| 493 |
+
"text": "B. Near-Field Beam Split",
|
| 494 |
+
"text_level": 1,
|
| 495 |
+
"bbox": [
|
| 496 |
+
75,
|
| 497 |
+
758,
|
| 498 |
+
253,
|
| 499 |
+
773
|
| 500 |
+
],
|
| 501 |
+
"page_idx": 3
|
| 502 |
+
},
|
| 503 |
+
{
|
| 504 |
+
"type": "text",
|
| 505 |
+
"text": "Challenge: In THz wideband systems, ELAA might encounter a beam split phenomenon, also known as beam squint and spatial-wideband effect. Existing THz beamforming architecture often employs analog phase-shifters (PSs) [7], which usually tune the same phase shift for signals at different frequencies. Nonetheless, the actual phase of the EM wave is the product of the signal propagation delay and the frequency-dependent wavenumber. As a result, the signal propagation delay can be compensated through a phase shift adequately only for a narrow band signal. Phase errors are introduced for the other frequencies, thus causing the beam split effect.",
|
| 506 |
+
"bbox": [
|
| 507 |
+
73,
|
| 508 |
+
777,
|
| 509 |
+
495,
|
| 510 |
+
945
|
| 511 |
+
],
|
| 512 |
+
"page_idx": 3
|
| 513 |
+
},
|
| 514 |
+
{
|
| 515 |
+
"type": "text",
|
| 516 |
+
"text": "In fact, the impact of beam split on far-field and near-field propagations also differs.",
|
| 517 |
+
"bbox": [
|
| 518 |
+
503,
|
| 519 |
+
359,
|
| 520 |
+
921,
|
| 521 |
+
388
|
| 522 |
+
],
|
| 523 |
+
"page_idx": 3
|
| 524 |
+
},
|
| 525 |
+
{
|
| 526 |
+
"type": "text",
|
| 527 |
+
"text": "In far-field, beam split leads to the fact that beams at different frequencies are transmitting towards different angles, as shown in the left figure of Fig. 4. For near-field beam split, however, beams are focused at both different angles and various distances due to the split of spherical waves, as shown in the right figure of Fig. 4. Both far-field and nearfield beam splits severely reduce the received signal energy of frequency components misaligned with the user location. Over the years, extensive works have been proposed to mitigate farfield beam split by tuning frequency-dependent phase shifts with planar wavefronts through true-time-delay-based (TTD-based) beamforming instead of PS-based beamforming. Unfortunately, owing to the discrepancy between planar and spherical waves, these schemes addressing the far-field beam split no longer work well in the near-field, posing challenges to THz ELAA communications.",
|
| 528 |
+
"bbox": [
|
| 529 |
+
501,
|
| 530 |
+
392,
|
| 531 |
+
923,
|
| 532 |
+
633
|
| 533 |
+
],
|
| 534 |
+
"page_idx": 3
|
| 535 |
+
},
|
| 536 |
+
{
|
| 537 |
+
"type": "text",
|
| 538 |
+
"text": "Recent progress: Recently, a few efforts have tried to overcome the near-field beam split effect. In [8], a variant of chirp sequence is utilized to design the phase shifts, for flattening the beamfocusing gain across frequencies with the sacrifice of the maximum beamfocusing gain. This method can slightly alleviate the near-field beam split effect, but its spectral efficiency degrades as well when the bandwidth is very large, as the beams are still generated by PSs. To this end, a phase-delay focusing (PDF) method is proposed in [9] exploiting TTD-based beamforming. To further illustrate, the BS ELAA is first partitioned into multiple sub-arrays. The UE is assumed to be located in the far-field area of each small sub-array but within the near-field range of the ELAA. Then, one TTD line is inserted between each sub-array and the radio-frequency (RF) chain to realize frequency-dependent phase shifts. Finally, the frequency-dependent phase variations across different sub-arrays induced by spherical wavefronts are compensated by the inserted TTD line. As a result, beams over the working band are focused at the target UE location [9].",
|
| 539 |
+
"bbox": [
|
| 540 |
+
503,
|
| 541 |
+
638,
|
| 542 |
+
923,
|
| 543 |
+
925
|
| 544 |
+
],
|
| 545 |
+
"page_idx": 3
|
| 546 |
+
},
|
| 547 |
+
{
|
| 548 |
+
"type": "text",
|
| 549 |
+
"text": "In conclusion, the first solution [8] follows the PS-based",
|
| 550 |
+
"bbox": [
|
| 551 |
+
519,
|
| 552 |
+
928,
|
| 553 |
+
921,
|
| 554 |
+
944
|
| 555 |
+
],
|
| 556 |
+
"page_idx": 3
|
| 557 |
+
},
|
| 558 |
+
{
|
| 559 |
+
"type": "page_number",
|
| 560 |
+
"text": "4",
|
| 561 |
+
"bbox": [
|
| 562 |
+
911,
|
| 563 |
+
31,
|
| 564 |
+
919,
|
| 565 |
+
40
|
| 566 |
+
],
|
| 567 |
+
"page_idx": 3
|
| 568 |
+
},
|
| 569 |
+
{
|
| 570 |
+
"type": "text",
|
| 571 |
+
"text": "beamforming, which is easy to implement but the achievable performance is unsatisfactory. The second scheme [9] can nearly eliminate the near-field beam split effect but requires the implementation of TTD lines. In fact, although deploying TTD lines by optical fibers has been demonstrated in the optical domain, this kind of deployment is non-trivial to be extended to THz ELAA communications. Fortunately, recent advances in graphene-based plasmonic waveguides provide low-cost solutions for implementing TTD lines at high frequencies [7].",
|
| 572 |
+
"bbox": [
|
| 573 |
+
73,
|
| 574 |
+
69,
|
| 575 |
+
491,
|
| 576 |
+
205
|
| 577 |
+
],
|
| 578 |
+
"page_idx": 4
|
| 579 |
+
},
|
| 580 |
+
{
|
| 581 |
+
"type": "text",
|
| 582 |
+
"text": "IV. POTENTIALS FOR NEAR-FIELD COMMUNICATIONS",
|
| 583 |
+
"text_level": 1,
|
| 584 |
+
"bbox": [
|
| 585 |
+
89,
|
| 586 |
+
222,
|
| 587 |
+
477,
|
| 588 |
+
237
|
| 589 |
+
],
|
| 590 |
+
"page_idx": 4
|
| 591 |
+
},
|
| 592 |
+
{
|
| 593 |
+
"type": "text",
|
| 594 |
+
"text": "Unlike the aforementioned works for dealing with the performance degradation in the near-field, some recent studies have surprisingly revealed that 6G networks can also benefit from near-field propagation. In this section, we will discuss those studies exploiting the potentials of near-field propagation to improve communication performance.",
|
| 595 |
+
"bbox": [
|
| 596 |
+
73,
|
| 597 |
+
241,
|
| 598 |
+
491,
|
| 599 |
+
333
|
| 600 |
+
],
|
| 601 |
+
"page_idx": 4
|
| 602 |
+
},
|
| 603 |
+
{
|
| 604 |
+
"type": "text",
|
| 605 |
+
"text": "A. Capacity Enhancement",
|
| 606 |
+
"text_level": 1,
|
| 607 |
+
"bbox": [
|
| 608 |
+
73,
|
| 609 |
+
352,
|
| 610 |
+
256,
|
| 611 |
+
366
|
| 612 |
+
],
|
| 613 |
+
"page_idx": 4
|
| 614 |
+
},
|
| 615 |
+
{
|
| 616 |
+
"type": "text",
|
| 617 |
+
"text": "Potential: The spatial multiplexing gain of MIMO communications considerably increases with the transition from far-field regions to near-field regions. In far-field MIMO communications, the line-of-sight (LoS) channel can be represented by a rank-one matrix, where the spatial degrees-of-freedom (DoFs) are very limited. By contrast, the near-field LoS channel can be rank-sufficient derived from the geometric relationship under the spherical propagation model. The increased rank indicates dramatically improved spatial DoFs in the near-field region. Precisely, based on the expansion of prolate spheroidal wave functions, it is proved in [10] that near-field spatial DoFs are proportional to the product of the BS and UE array apertures and inversely proportional to the BS-UE distance. This conclusion is further improved in [11] by meticulously designing the beamfocusing vectors of the BS and UE arrays. As shown in Fig. 5, the DoFs increase from 1 to 20 when the BS-UE distance decreases from 350 meters to 10 meters. Thanks to the increased DoFs, the near-field LoS path enables simultaneous transmission of multiple data streams by MIMO precoding, as opposed to the rank-one far-field LoS channel supporting only one data stream. The increased spatial DoFs can be exploited as an additional spatial multiplexing gain, which offers a new possibility for a significant capacity enhancement.",
|
| 618 |
+
"bbox": [
|
| 619 |
+
73,
|
| 620 |
+
369,
|
| 621 |
+
491,
|
| 622 |
+
717
|
| 623 |
+
],
|
| 624 |
+
"page_idx": 4
|
| 625 |
+
},
|
| 626 |
+
{
|
| 627 |
+
"type": "text",
|
| 628 |
+
"text": "Recent progress: Recently, some novel precoding architectures have been proposed to leverage these extra near-field DoFs for MIMO capacity enhancement [12], [13]. Firstly, distance-aware precoding (DAP) is developed in [12]. Unlike classical hybrid precoding with a fixed and limited number of RF chains (e.g., 2 or 4 fixed RF chains), the DAP architecture could flexibly adjust the number of RF chains to match the distance-related DoFs, which is achieved by deploying a selection network to configure each RF chain as active or inactive. For instance, in the far-field region, only one RF chain is activated for data transmission. When communication distance is decreasing to 10-20 meters, around 20 activated RF chains are enough to adapt to the DoFs, as shown in Fig. 5. By doing so, the number of transmitted data streams dynamically matches the DoFs. Simulations demonstrate the DAP could",
|
| 629 |
+
"bbox": [
|
| 630 |
+
73,
|
| 631 |
+
718,
|
| 632 |
+
493,
|
| 633 |
+
944
|
| 634 |
+
],
|
| 635 |
+
"page_idx": 4
|
| 636 |
+
},
|
| 637 |
+
{
|
| 638 |
+
"type": "image",
|
| 639 |
+
"img_path": "images/876d39702e457a1c694bd3b5d3218a190c944a0d4b3693f210bc21cf6ee3d424.jpg",
|
| 640 |
+
"image_caption": [
|
| 641 |
+
"Fig. 5: The spatial DoF increases in the near-field region."
|
| 642 |
+
],
|
| 643 |
+
"image_footnote": [],
|
| 644 |
+
"bbox": [
|
| 645 |
+
521,
|
| 646 |
+
82,
|
| 647 |
+
893,
|
| 648 |
+
311
|
| 649 |
+
],
|
| 650 |
+
"page_idx": 4
|
| 651 |
+
},
|
| 652 |
+
{
|
| 653 |
+
"type": "text",
|
| 654 |
+
"text": "significantly increase the spectral efficiency while its energy efficiency is comparable with hybrid precoding. To avoid the utilization of extra RF chains, another effort to harvest the potential spatial multiplexing gain in near-field areas is the widely-spaced multi-subarray (WSMS) precoding [13]. In this architecture, the sub-arrays are widely spaced to enlarge the array aperture, artificially creating the expansion of the near-field region. Compared with classical hybrid precoding, the number of sub-arrays and the sub-array spacing should be additionally designed in the WSMS architecture. To this end, [13] first assumes planar-wave propagation within each sub-array and spherical-wave propagation across different sub-arrays similar to [9]. Then, [13] jointly optimizes the number of sub-arrays, their spacing, and the precoding matrix for maximizing the achievable rate. Simulations demonstrate that WSMS could achieve nearly $200\\%$ higher spectral efficiency than classical hybrid precoding.",
|
| 655 |
+
"bbox": [
|
| 656 |
+
501,
|
| 657 |
+
348,
|
| 658 |
+
924,
|
| 659 |
+
606
|
| 660 |
+
],
|
| 661 |
+
"page_idx": 4
|
| 662 |
+
},
|
| 663 |
+
{
|
| 664 |
+
"type": "text",
|
| 665 |
+
"text": "B. Accessibility Improvement",
|
| 666 |
+
"text_level": 1,
|
| 667 |
+
"bbox": [
|
| 668 |
+
504,
|
| 669 |
+
635,
|
| 670 |
+
705,
|
| 671 |
+
650
|
| 672 |
+
],
|
| 673 |
+
"page_idx": 4
|
| 674 |
+
},
|
| 675 |
+
{
|
| 676 |
+
"type": "text",
|
| 677 |
+
"text": "Potential: Near-field propagation is also able to improve accessibility in multi-user (MU) communications. To increase the spectral efficiency in MU-MIMO communications, space division multiple access (SDMA) is widely considered to distinguish users through orthogonal or near-orthogonal spatial beams. Thus, multiple users can share the same time and frequency resources. For far-field SDMA, utilizing beamsteering to generate beams with planar wavefronts can distinguish users at different angles. A downside is that users located at similar angles will severely interfere with each other, and thus can not simultaneously access the network through farfield SDMA. Fortunately, near-field beamfocusing enjoys the capability of energy focusing on the joint angle-distance domain. Hence, near-field SDMA could generate beams with spherical wavefronts to simultaneously serve users located at similar angles but different distances, as shown in Fig. 6. The distance information of spherical wavefronts supplies a newizable dimension for multi-user access, which is not achievable for conventional far-field SDMA.",
|
| 678 |
+
"bbox": [
|
| 679 |
+
501,
|
| 680 |
+
657,
|
| 681 |
+
924,
|
| 682 |
+
944
|
| 683 |
+
],
|
| 684 |
+
"page_idx": 4
|
| 685 |
+
},
|
| 686 |
+
{
|
| 687 |
+
"type": "page_number",
|
| 688 |
+
"text": "5",
|
| 689 |
+
"bbox": [
|
| 690 |
+
911,
|
| 691 |
+
30,
|
| 692 |
+
919,
|
| 693 |
+
40
|
| 694 |
+
],
|
| 695 |
+
"page_idx": 4
|
| 696 |
+
},
|
| 697 |
+
{
|
| 698 |
+
"type": "image",
|
| 699 |
+
"img_path": "images/f6b9024939b4f40d399fd833e3b45a5590bcfc6ed4a910a466f3330a3b8489d7.jpg",
|
| 700 |
+
"image_caption": [
|
| 701 |
+
"Fig. 6: Near-field beamfocusing is able to serve multiple users in the same angle."
|
| 702 |
+
],
|
| 703 |
+
"image_footnote": [],
|
| 704 |
+
"bbox": [
|
| 705 |
+
114,
|
| 706 |
+
70,
|
| 707 |
+
369,
|
| 708 |
+
243
|
| 709 |
+
],
|
| 710 |
+
"page_idx": 5
|
| 711 |
+
},
|
| 712 |
+
{
|
| 713 |
+
"type": "image",
|
| 714 |
+
"img_path": "images/a7ee6f0a7fa00e3639ec33a440b50242e92703f8a2417f401d04a51577603edf.jpg",
|
| 715 |
+
"image_caption": [],
|
| 716 |
+
"image_footnote": [],
|
| 717 |
+
"bbox": [
|
| 718 |
+
372,
|
| 719 |
+
70,
|
| 720 |
+
627,
|
| 721 |
+
244
|
| 722 |
+
],
|
| 723 |
+
"page_idx": 5
|
| 724 |
+
},
|
| 725 |
+
{
|
| 726 |
+
"type": "image",
|
| 727 |
+
"img_path": "images/3d17f6645d135dc4f4a9138971d9cfd3447071f52b567d041722b39d436b8a4a.jpg",
|
| 728 |
+
"image_caption": [],
|
| 729 |
+
"image_footnote": [],
|
| 730 |
+
"bbox": [
|
| 731 |
+
629,
|
| 732 |
+
71,
|
| 733 |
+
883,
|
| 734 |
+
244
|
| 735 |
+
],
|
| 736 |
+
"page_idx": 5
|
| 737 |
+
},
|
| 738 |
+
{
|
| 739 |
+
"type": "text",
|
| 740 |
+
"text": "Recent progress: Taking advantage of the capability of beamfocusing, the authors in [14] have studied the near-field multi-user transmission considering fully-digital precoding, hybrid precoding, and transmissive reconfigurable metasurface (RMS). By optimizing the sum rate in multi-user systems through alternating optimization, all considered precoding architectures can naturally generate beams with spherical wavefronts to distinguish users located at similar angles but different distances. The simulation results demonstrate that near-field propagation has the potential of enhancing multi-user accessibility.",
|
| 741 |
+
"bbox": [
|
| 742 |
+
73,
|
| 743 |
+
279,
|
| 744 |
+
493,
|
| 745 |
+
446
|
| 746 |
+
],
|
| 747 |
+
"page_idx": 5
|
| 748 |
+
},
|
| 749 |
+
{
|
| 750 |
+
"type": "text",
|
| 751 |
+
"text": "V. FUTURE RESEARCH DIRECTIONS",
|
| 752 |
+
"text_level": 1,
|
| 753 |
+
"bbox": [
|
| 754 |
+
153,
|
| 755 |
+
465,
|
| 756 |
+
411,
|
| 757 |
+
478
|
| 758 |
+
],
|
| 759 |
+
"page_idx": 5
|
| 760 |
+
},
|
| 761 |
+
{
|
| 762 |
+
"type": "text",
|
| 763 |
+
"text": "In this section, several future research directions for nearfield communications will be pointed out.",
|
| 764 |
+
"bbox": [
|
| 765 |
+
73,
|
| 766 |
+
484,
|
| 767 |
+
491,
|
| 768 |
+
513
|
| 769 |
+
],
|
| 770 |
+
"page_idx": 5
|
| 771 |
+
},
|
| 772 |
+
{
|
| 773 |
+
"type": "text",
|
| 774 |
+
"text": "A. Near-Field Communication Theory",
|
| 775 |
+
"text_level": 1,
|
| 776 |
+
"bbox": [
|
| 777 |
+
73,
|
| 778 |
+
536,
|
| 779 |
+
336,
|
| 780 |
+
551
|
| 781 |
+
],
|
| 782 |
+
"page_idx": 5
|
| 783 |
+
},
|
| 784 |
+
{
|
| 785 |
+
"type": "text",
|
| 786 |
+
"text": "1) Improvement of Rayleigh Distance: As a widely adopted quantification of near-field range, Rayleigh distance is attained in terms of phase discrepancy. For communication metrics directly affected by phase discrepancy, such as channel estimation accuracy, Rayleigh distance can accurately capture the degradation of these metrics when applying far-field transmission schemes in the near-field region. On the contrary, some metrics are directly influenced by other factors instead of phase discrepancy, e.g., capacity is determined by beamforming gain and channel rank. Accordingly, classical Rayleigh distance probably cannot capture the performance loss of these metrics well. To this end, several recent works have endeavored to improve classical Rayleigh distance in terms of some vital communication metrics. For instance, an effective Rayleigh distance (ERD) is derived in [9] for the accurate description of beamforming gain loss and capacity loss. Nevertheless, ERD is only valid for MISO communications, while more discussion should be made to improve Rayleigh distance in more practical scenarios under more general metrics, e.g., channel rank and energy efficiency in MIMO and RIS systems.",
|
| 787 |
+
"bbox": [
|
| 788 |
+
73,
|
| 789 |
+
556,
|
| 790 |
+
491,
|
| 791 |
+
859
|
| 792 |
+
],
|
| 793 |
+
"page_idx": 5
|
| 794 |
+
},
|
| 795 |
+
{
|
| 796 |
+
"type": "text",
|
| 797 |
+
"text": "B. Near-Field Transmission Technologies",
|
| 798 |
+
"text_level": 1,
|
| 799 |
+
"bbox": [
|
| 800 |
+
73,
|
| 801 |
+
878,
|
| 802 |
+
357,
|
| 803 |
+
895
|
| 804 |
+
],
|
| 805 |
+
"page_idx": 5
|
| 806 |
+
},
|
| 807 |
+
{
|
| 808 |
+
"type": "text",
|
| 809 |
+
"text": "1) AI-Aided Near-Field Communications: Different from far-field communications, the transmission algorithms for nearfield communications are more complex. To be specific, since",
|
| 810 |
+
"bbox": [
|
| 811 |
+
73,
|
| 812 |
+
898,
|
| 813 |
+
491,
|
| 814 |
+
945
|
| 815 |
+
],
|
| 816 |
+
"page_idx": 5
|
| 817 |
+
},
|
| 818 |
+
{
|
| 819 |
+
"type": "text",
|
| 820 |
+
"text": "extra grids on the distance domain are required, as mentioned in Section III-A [5], the size of near-field codebooks is usually much larger than that of far-field codebooks, leading to high-complexity channel estimation and beamforming. Moreover, the non-linear phase characteristics of spherical waves make the design of near-field beam training and precoding algorithms more complicated than that in far-field areas. AI-based transmission methods are promising to address these problems since they can mine the features of near-field environments through non-linear neural networks. Currently, there are plenty of works elaborating on AI-based far-field transmissions (references are not provided here since the number of references is limited in this magazine), while AI-based near-field transmissions have not been well studied.",
|
| 821 |
+
"bbox": [
|
| 822 |
+
501,
|
| 823 |
+
279,
|
| 824 |
+
924,
|
| 825 |
+
489
|
| 826 |
+
],
|
| 827 |
+
"page_idx": 5
|
| 828 |
+
},
|
| 829 |
+
{
|
| 830 |
+
"type": "list",
|
| 831 |
+
"sub_type": "text",
|
| 832 |
+
"list_items": [
|
| 833 |
+
"2) RIS-Aided Near-Field Communications: Compared with MIMO communications, the near-field propagation becomes even more dominant and complex in RIS-aided systems. In MIMO communications, based on the spherical propagation model, the EM waves form spherical equiphase surfaces at the receiver. On the contrary, in RIS-aided systems, the phase of received EM waves is accumulated by the propagation delays through the BE-RIS and RIS-UE links. Based on the geometry relationship, the equiphase surfaces become ellipses in the near-field range instead of spherical. Accordingly, the research on beamfocusing [15], channel estimation, and multiple access techniques taking into account this ellipses-equiphase property are required for RIS-aided near-field communications.",
|
| 834 |
+
"3) Hybrid Far- and Near-Field Communications: In practical systems, communication environments usually exist with both far-field and near-field signal components. First, in multi-user systems with multi-path channels, some users and scatterers may be far away from the BS while others are located in the near-field region of the BS, which constitutes a hybrid far- and near-field (hybrid-field) communication scenario. Additionally, it is worth mentioning that the Rayleigh distance is proportional to frequency. Thus, in an ultra-wideband or frequency-hopping system with a very large frequency span, its near-field range varies dramatically across the bandwidth. Chances are that the signal components at low frequencies may operate in far-field regions while those at high frequencies with larger Rayleigh distances are propagating in the near-field areas, which also contributes to hybrid-field communications. Consequently, the above factors make hybrid-field communications practical and crucial in future 6G networks. Thus, hybrid-field"
|
| 835 |
+
],
|
| 836 |
+
"bbox": [
|
| 837 |
+
503,
|
| 838 |
+
491,
|
| 839 |
+
923,
|
| 840 |
+
945
|
| 841 |
+
],
|
| 842 |
+
"page_idx": 5
|
| 843 |
+
},
|
| 844 |
+
{
|
| 845 |
+
"type": "page_number",
|
| 846 |
+
"text": "6",
|
| 847 |
+
"bbox": [
|
| 848 |
+
911,
|
| 849 |
+
31,
|
| 850 |
+
919,
|
| 851 |
+
40
|
| 852 |
+
],
|
| 853 |
+
"page_idx": 5
|
| 854 |
+
},
|
| 855 |
+
{
|
| 856 |
+
"type": "text",
|
| 857 |
+
"text": "transmission techniques handling both far-field and near-field signal components deserve in-depth study.",
|
| 858 |
+
"bbox": [
|
| 859 |
+
73,
|
| 860 |
+
69,
|
| 861 |
+
491,
|
| 862 |
+
98
|
| 863 |
+
],
|
| 864 |
+
"page_idx": 6
|
| 865 |
+
},
|
| 866 |
+
{
|
| 867 |
+
"type": "text",
|
| 868 |
+
"text": "4) Spatial Non-Stationarity Effect on Near-Field Communications: Except for near-field propagation, the spatial nonstationarity effect is another fundamental characteristic of ELAA compared to 5G massive MIMO, where different scatterers and users are visible to different portions of the ELAA. This effect leads to the fact that only a part of the ELAA can receive the spherical EM waves radiated by a scatterer or a user. The angular power spectral and average received power rapidly vary over the ELAA. Recently, there have been intensive works dealing with the non-stationarity effect and near-field propagation simultaneously [6]. However, the impart of non-stationarity on other emerging near-field communications has not been well studied, such as RIS-aided systems and hybrid-field communications.",
|
| 869 |
+
"bbox": [
|
| 870 |
+
73,
|
| 871 |
+
99,
|
| 872 |
+
491,
|
| 873 |
+
311
|
| 874 |
+
],
|
| 875 |
+
"page_idx": 6
|
| 876 |
+
},
|
| 877 |
+
{
|
| 878 |
+
"type": "text",
|
| 879 |
+
"text": "C. Hardware Development",
|
| 880 |
+
"text_level": 1,
|
| 881 |
+
"bbox": [
|
| 882 |
+
75,
|
| 883 |
+
332,
|
| 884 |
+
261,
|
| 885 |
+
348
|
| 886 |
+
],
|
| 887 |
+
"page_idx": 6
|
| 888 |
+
},
|
| 889 |
+
{
|
| 890 |
+
"type": "text",
|
| 891 |
+
"text": "To verify the effectiveness of near-field transmission technologies, hardware developments and over-the-air experiments are of great significance. For example, for alleviating the nearfield beam split effect, TTD lines need to be meticulously designed in the THz domain. The hardware developments of WSMS and DAP architectures are worth being carried out to exploit the near-field spatial DoFs. Besides, implementing these techniques still has to overcome several hardware impairment issues, including In-phase/Quadrature imbalance, low-efficiency power amplifier at high frequency, etc. All these challenges should be carefully addressed to enable the implementation of 6G near-field communications.",
|
| 892 |
+
"bbox": [
|
| 893 |
+
73,
|
| 894 |
+
352,
|
| 895 |
+
493,
|
| 896 |
+
532
|
| 897 |
+
],
|
| 898 |
+
"page_idx": 6
|
| 899 |
+
},
|
| 900 |
+
{
|
| 901 |
+
"type": "text",
|
| 902 |
+
"text": "VI. CONCLUSIONS",
|
| 903 |
+
"text_level": 1,
|
| 904 |
+
"bbox": [
|
| 905 |
+
214,
|
| 906 |
+
551,
|
| 907 |
+
352,
|
| 908 |
+
566
|
| 909 |
+
],
|
| 910 |
+
"page_idx": 6
|
| 911 |
+
},
|
| 912 |
+
{
|
| 913 |
+
"type": "text",
|
| 914 |
+
"text": "With the evolution from massive MIMO to ELAA, near-field propagation with spherical wavefront becomes indispensable in 6G networks, where conventional far-field propagation with planar wavefront is not valid anymore. In this article, we revealed that near-field propagation is a double-edged sword, i.e., it brings both challenges and potentials to 6G communications. We first introduced the non-linear phase property of spherical waves and explained the derivation of near-field range in terms of phase discrepancy. Then, we discussed the technical challenges of channel estimation and beam split caused by near-field propagation and presented the recent solutions. In addition, some appealing works that exploit the capability of spherical waves to improve capacity and accessibility were investigated. Several future research directions for near-field communications, such as improvement of Rayleigh distance and hybrid-field transmissions, were also highlighted, which are expected to inspire more innovations on 6G near-field communications.",
|
| 915 |
+
"bbox": [
|
| 916 |
+
73,
|
| 917 |
+
571,
|
| 918 |
+
491,
|
| 919 |
+
844
|
| 920 |
+
],
|
| 921 |
+
"page_idx": 6
|
| 922 |
+
},
|
| 923 |
+
{
|
| 924 |
+
"type": "text",
|
| 925 |
+
"text": "ACKNOWLEDGEMENT",
|
| 926 |
+
"text_level": 1,
|
| 927 |
+
"bbox": [
|
| 928 |
+
205,
|
| 929 |
+
864,
|
| 930 |
+
361,
|
| 931 |
+
877
|
| 932 |
+
],
|
| 933 |
+
"page_idx": 6
|
| 934 |
+
},
|
| 935 |
+
{
|
| 936 |
+
"type": "text",
|
| 937 |
+
"text": "This work was supported in part by the National Key Research and Development Program of China (Grant No.2020YFB1807201), in part by the National Natural Science Foundation of China (Grant No.62031019).",
|
| 938 |
+
"bbox": [
|
| 939 |
+
73,
|
| 940 |
+
883,
|
| 941 |
+
491,
|
| 942 |
+
946
|
| 943 |
+
],
|
| 944 |
+
"page_idx": 6
|
| 945 |
+
},
|
| 946 |
+
{
|
| 947 |
+
"type": "text",
|
| 948 |
+
"text": "REFERENCES",
|
| 949 |
+
"text_level": 1,
|
| 950 |
+
"bbox": [
|
| 951 |
+
665,
|
| 952 |
+
69,
|
| 953 |
+
761,
|
| 954 |
+
82
|
| 955 |
+
],
|
| 956 |
+
"page_idx": 6
|
| 957 |
+
},
|
| 958 |
+
{
|
| 959 |
+
"type": "list",
|
| 960 |
+
"sub_type": "ref_text",
|
| 961 |
+
"list_items": [
|
| 962 |
+
"[1] W. Saad, M. Bennis, and M. Chen, \"A vision of 6G wireless systems: Applications, trends, technologies, and open research problems,\" IEEE Network, vol. 34, no. 3, pp. 134-142, May 2020.",
|
| 963 |
+
"[2] M. Uusitalo, P. Rugeland, M. Boldi, E. C. Strinati, and Y. Zou, \"RFocus: Beamforming using thousands of passive antennas,\" in Proc. 17th USENIX Symposium on Networked Systems Design and Implementation (NSDI '20), Feb. 2020.",
|
| 964 |
+
"[3] I. F. Akyildiz and J. M. Jornet, “Realizing ultra-massive MIMO (1024×1024) communication in the (0.06–10) Terahertz band,” Nano Communication Networks, vol. 8, pp. 46–54, 2016.",
|
| 965 |
+
"[4] K. T. Selvan and R. Janaswamy, “Fraunhofer and Fresnel distances: Unified derivation for aperture antennas,” IEEE Antennas Propag. Mag., vol. 59, no. 4, pp. 12–15, Aug. 2017.",
|
| 966 |
+
"[5] M. Cui and L. Dai, \"Channel estimation for extremely large-scale MIMO: Far-field or near-field?\" IEEE Trans. Commun., vol. 70, no. 4, pp. 2663-2677, Jan. 2022.",
|
| 967 |
+
"[6] Y. Han, S. Jin, C. Wen, and X. Ma, \"Channel estimation for extremely large-scale massive MIMO systems,\" IEEE Wireless Commun. Lett., vol. 9, no. 5, pp. 633-637, May 2020.",
|
| 968 |
+
"[7] A. Singh, M. Andrello, E. Einarsson, N. Thawdarl, and J. M. Jornet, \"A hybrid intelligent reflecting surface with graphene-based control elements for THz communications,\" in Proc. 2020 IEEE 21st International Workshop on Signal Processing Advances in Wireless Communications (SPAWC), May 2020, pp. 1-5.",
|
| 969 |
+
"[8] N. J. Myers and R. W. Heath, \"Infocus: A spatial coding technique to mitigate misfocus in near-field los beamforming,\" IEEE Trans. Wireless Commun., vol. 21, no. 4, pp. 2193-2209, Apr. 2022.",
|
| 970 |
+
"[9] M. Cui, L. Dai, R. Schober, and L. Hanzo, “Near-field wideband beamforming for extremely large antenna array,” arXiv preprint arXiv:2109.10054, Sep. 2021.",
|
| 971 |
+
"[10] D. A. Miller, \"Waves, modes, communications, and optics: A tutorial,\" Adv. in Opt. and Photon., vol. 11, no. 3, pp. 679-825, Sep. 2019.",
|
| 972 |
+
"[11] N. Decarli and D. Dardari, \"Communication modes with large intelligent surfaces in the near field,\" IEEE Access, vol. 9, pp. 165-648-165-666, Sep. 2021.",
|
| 973 |
+
"[12] Z. Wu, M. Cui, Z. Zhang, and L. Dai, \"Distance-aware precoding for near-field capacity improvement in XL-MIMO,\" in Proc. 2022 IEEE 95th Vehicular Technology Conference (VTC2022-Spring), 2022, pp. 1-5.",
|
| 974 |
+
"[13] L. Yan, Y. Chen, C. Han, and J. Yuan, \"Joint inter-path and intrapath multiplexing for terahertz widely-spaced multi-subarray hybrid beamforming systems,\" IEEE Trans. Commun., vol. 70, no. 2, pp. 1391-1406, Feb. 2022.",
|
| 975 |
+
"[14] H. Zhang, N. Shlezinger, F. Guidi, D. Dardari, M. F. Imani, and Y. C. Eldar, \"Beam focusing for near-field multi-user MIMO communications,\" IEEE Trans. Wireless Commun., 2022.",
|
| 976 |
+
"[15] K. Dovelos, S. D. Assimonis, H. Quoc Ngo, B. Bellalta, and M. Matthaiou, \"Intelligent reflecting surfaces at Terahertz Bands: Channel modeling and analysis,\" in Proc. 2021 IEEE International Conference on Communications Workshops (ICC Workshops), Jun. 2021, pp. 1-6."
|
| 977 |
+
],
|
| 978 |
+
"bbox": [
|
| 979 |
+
506,
|
| 980 |
+
89,
|
| 981 |
+
921,
|
| 982 |
+
646
|
| 983 |
+
],
|
| 984 |
+
"page_idx": 6
|
| 985 |
+
},
|
| 986 |
+
{
|
| 987 |
+
"type": "text",
|
| 988 |
+
"text": "BIOGRAPHIES",
|
| 989 |
+
"text_level": 1,
|
| 990 |
+
"bbox": [
|
| 991 |
+
661,
|
| 992 |
+
664,
|
| 993 |
+
764,
|
| 994 |
+
676
|
| 995 |
+
],
|
| 996 |
+
"page_idx": 6
|
| 997 |
+
},
|
| 998 |
+
{
|
| 999 |
+
"type": "text",
|
| 1000 |
+
"text": "Mingyao Cui is a M.S. researcher in BNrist from Tsinghua University, Beijing, China.",
|
| 1001 |
+
"bbox": [
|
| 1002 |
+
504,
|
| 1003 |
+
681,
|
| 1004 |
+
919,
|
| 1005 |
+
712
|
| 1006 |
+
],
|
| 1007 |
+
"page_idx": 6
|
| 1008 |
+
},
|
| 1009 |
+
{
|
| 1010 |
+
"type": "text",
|
| 1011 |
+
"text": "Zidong Wu is a Ph.D. researcher in BNRist from Tsinghua University, Beijing, China.",
|
| 1012 |
+
"bbox": [
|
| 1013 |
+
504,
|
| 1014 |
+
727,
|
| 1015 |
+
919,
|
| 1016 |
+
758
|
| 1017 |
+
],
|
| 1018 |
+
"page_idx": 6
|
| 1019 |
+
},
|
| 1020 |
+
{
|
| 1021 |
+
"type": "text",
|
| 1022 |
+
"text": "Yu Lu is a Ph.D. researcher in BNRist from Tsinghua University, Beijing, China.",
|
| 1023 |
+
"bbox": [
|
| 1024 |
+
504,
|
| 1025 |
+
772,
|
| 1026 |
+
919,
|
| 1027 |
+
803
|
| 1028 |
+
],
|
| 1029 |
+
"page_idx": 6
|
| 1030 |
+
},
|
| 1031 |
+
{
|
| 1032 |
+
"type": "text",
|
| 1033 |
+
"text": "Xiuhong Wei is a M.S. researcher in BNRist from Tsinghua University, Beijing, China.",
|
| 1034 |
+
"bbox": [
|
| 1035 |
+
504,
|
| 1036 |
+
816,
|
| 1037 |
+
919,
|
| 1038 |
+
848
|
| 1039 |
+
],
|
| 1040 |
+
"page_idx": 6
|
| 1041 |
+
},
|
| 1042 |
+
{
|
| 1043 |
+
"type": "text",
|
| 1044 |
+
"text": "Linglong Dai is an associate professor at Tsinghua University. His current research interests include RIS, massive MIMO, mmWave and THz communications, and machine learning for wireless communications. He has received six conference best paper awards and four journal best paper awards.",
|
| 1045 |
+
"bbox": [
|
| 1046 |
+
503,
|
| 1047 |
+
863,
|
| 1048 |
+
921,
|
| 1049 |
+
939
|
| 1050 |
+
],
|
| 1051 |
+
"page_idx": 6
|
| 1052 |
+
},
|
| 1053 |
+
{
|
| 1054 |
+
"type": "page_number",
|
| 1055 |
+
"text": "7",
|
| 1056 |
+
"bbox": [
|
| 1057 |
+
911,
|
| 1058 |
+
30,
|
| 1059 |
+
919,
|
| 1060 |
+
40
|
| 1061 |
+
],
|
| 1062 |
+
"page_idx": 6
|
| 1063 |
+
}
|
| 1064 |
+
]
|
2203.16xxx/2203.16318/bdf4203f-60cc-4b35-a342-da2e1d7ac014_model.json
ADDED
|
@@ -0,0 +1,1281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "page_number",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.912,
|
| 7 |
+
0.031,
|
| 8 |
+
0.921,
|
| 9 |
+
0.041
|
| 10 |
+
],
|
| 11 |
+
"angle": 0,
|
| 12 |
+
"content": "1"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "aside_text",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.023,
|
| 18 |
+
0.275,
|
| 19 |
+
0.06,
|
| 20 |
+
0.708
|
| 21 |
+
],
|
| 22 |
+
"angle": 270,
|
| 23 |
+
"content": "arXiv:2203.16318v6 [cs.IT] 15 Sep 2022"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "title",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.088,
|
| 29 |
+
0.071,
|
| 30 |
+
0.914,
|
| 31 |
+
0.138
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "Near-Field Communications for 6G: Fundamentals, Challenges, Potentials, and Future Directions"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.193,
|
| 40 |
+
0.148,
|
| 41 |
+
0.804,
|
| 42 |
+
0.165
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "Mingyao Cui, Zidong Wu, Yu Lu, Xiuhong Wei, and Linglong Dai, Fellow, IEEE"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.074,
|
| 51 |
+
0.221,
|
| 52 |
+
0.493,
|
| 53 |
+
0.513
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "Abstract—Extremely large-scale antenna array (ELAA) is a common feature of several key candidate technologies for the sixth generation (6G) mobile networks, such as ultra-massive multiple-input-multiple-output (UM-MIMO), cell-free massive MIMO, reconfigurable intelligent surface (RIS), and terahertz communications. Since the number of antennas is very large for ELAA, the electromagnetic radiation field needs to be modeled by near-field spherical waves, which differs from the conventional planar-wave-based radiation model of 5G massive MIMO. As a result, near-field communications will become essential in 6G wireless networks. In this article, we systematically investigate the emerging near-field communication techniques. Firstly, we present the fundamentals of near-field communications and the metric to determine the near-field ranges in typical communication scenarios. Then, we investigate recent studies specific to near-field communications by classifying them into two categories, i.e., techniques addressing the challenges and those exploiting the potentials in near-field regions. Their principles, recent progress, pros and cons are discussed. More importantly, several open problems and future research directions for near-field communications are pointed out. We believe that this article would inspire more innovations for this important research topic of near-field communications for 6G."
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.075,
|
| 62 |
+
0.518,
|
| 63 |
+
0.492,
|
| 64 |
+
0.544
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "Index Terms—6G, ELAA, near-field communications, spherical wavefront."
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "title",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.217,
|
| 73 |
+
0.555,
|
| 74 |
+
0.352,
|
| 75 |
+
0.568
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "I. INTRODUCTION"
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.074,
|
| 84 |
+
0.574,
|
| 85 |
+
0.493,
|
| 86 |
+
0.831
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "The sixth generation (6G) mobile networks are promising to empower emerging applications, such as holographic video, digital replica, etc. For fulfilling these visions, tremendous research efforts have been endeavored to develop new wireless technologies to meet the key performance indicators (KPIs) of 6G, which are much superior to those of 5G [1]. For instance, thanks to the enormous spatial multiplexing and beamforming gain, ultra-massive multiple-input-multiple-output (UM-MIMO) and cell-free massive MIMO (CF-MIMO) are expected to accomplish a 10-fold increase in the spectral efficiency for 6G [1]. Besides, by dynamically manipulating the wireless environment through thousands of antennas, reconfigurable intelligent surface (RIS) brings new possibilities for capacity and coverage enhancement [2]. Moreover, millimeter-wave (mmWave) and terahertz (THz) UM-MIMO can offer abundant spectral resources for supporting \\(100\\times\\) peak data rate improvement (e.g., Tbps) in 6G mobile communications"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.074,
|
| 95 |
+
0.842,
|
| 96 |
+
0.493,
|
| 97 |
+
0.887
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "All authors are with the Beijing National Research Center for Information Science and Technology (BNRist) as well as the Department of Electronic Engineering, Tsinghua University, Beijing 100084, China (e-mails: {cmy20, wuzd19, y-lu19, weixh19} @ mails.tsinghua.edu.cn, daill@tsinghua.edu.cn)."
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.074,
|
| 106 |
+
0.887,
|
| 107 |
+
0.493,
|
| 108 |
+
0.945
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "© 2022 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works."
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.503,
|
| 117 |
+
0.22,
|
| 118 |
+
0.925,
|
| 119 |
+
0.326
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "[3]. Despite being suitable for different application scenarios with various KPIs, all the above technologies, including UM-MIMO, CF-MIMO, RIS, and THz communications, share a common feature: They all usually require a very large number of antennas to attain their expected performance, i.e., extremely large-scale antenna arrays (ELAA) are essential to these different candidate technologies for 6G."
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.503,
|
| 128 |
+
0.326,
|
| 129 |
+
0.923,
|
| 130 |
+
0.537
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "Compared with massive MIMO, the key technology in 5G networks, ELAA for 6G not only means a sharp increase in the number of antennas but also results in a fundamental change of the electromagnetic (EM) characteristics. The EM radiation field can generally be divided into far-field and radiation near-field regions. The boundary between these two regions is determined by the Rayleigh distance, also called the Fraunhofer distance [4]. Rayleigh distance is proportional to the product of the square of array aperture and carrier frequency [4]. Outside the Rayleigh distance, it is the far-field region, where the EM field can be approximately modeled by planar waves. Within the Rayleigh distance, the near-field propagation becomes dominant, where the EM field has to be accurately modeled by spherical waves."
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.503,
|
| 139 |
+
0.537,
|
| 140 |
+
0.923,
|
| 141 |
+
0.793
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "Since the number of antennas is not very large in 5G massive MIMO systems, the Rayleigh distance of up to several meters is negligible. Thus, existing 5G communications are mainly developed from far-field communication theories and techniques. However, with the significant increase of the antenna number and carrier frequency in future 6G systems, the near-field region of ELAA will expand by orders of magnitude. For instance, a 3200-element ELAA at \\(2.4\\mathrm{GHz}\\) was developed in [2]. With an array size of \\(2\\mathrm{m} \\times 3\\mathrm{m}\\), its Rayleigh distance is about 200 meters, which is larger than the radius of a typical 5G cell. Accordingly, near-field communications will become essential components in future 6G mobile networks where the spherical propagation model needs to be considered, which is obviously different from the existing far-field 5G systems. Unfortunately, the near-field propagation introduces several new challenges in ELAA systems, which should be identified and addressed to empower 6G communications."
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"bbox": [
|
| 149 |
+
0.504,
|
| 150 |
+
0.793,
|
| 151 |
+
0.925,
|
| 152 |
+
0.837
|
| 153 |
+
],
|
| 154 |
+
"angle": 0,
|
| 155 |
+
"content": "In this article, we systematically investigate the recent nearfield communication techniques for 6G. The key features of this article can be summarized as follows:"
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "text",
|
| 159 |
+
"bbox": [
|
| 160 |
+
0.52,
|
| 161 |
+
0.84,
|
| 162 |
+
0.925,
|
| 163 |
+
0.947
|
| 164 |
+
],
|
| 165 |
+
"angle": 0,
|
| 166 |
+
"content": "- To begin with, the fundamental differences between far-field and near-field communications are explained. Comparatively speaking, the planar wavefront in the farfield can steer the signal energy towards a specific physical angle. On the contrary, the near-field spherical wavefront achieves energy focusing on both angle and distance domain. Moreover, the Rayleigh distance that quantifies"
|
| 167 |
+
}
|
| 168 |
+
],
|
| 169 |
+
[
|
| 170 |
+
{
|
| 171 |
+
"type": "page_number",
|
| 172 |
+
"bbox": [
|
| 173 |
+
0.912,
|
| 174 |
+
0.031,
|
| 175 |
+
0.921,
|
| 176 |
+
0.041
|
| 177 |
+
],
|
| 178 |
+
"angle": 0,
|
| 179 |
+
"content": "2"
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "text",
|
| 183 |
+
"bbox": [
|
| 184 |
+
0.107,
|
| 185 |
+
0.07,
|
| 186 |
+
0.493,
|
| 187 |
+
0.175
|
| 188 |
+
],
|
| 189 |
+
"angle": 0,
|
| 190 |
+
"content": "the boundary between far-field and near-field regions is introduced, and its derivation is explained in detail. Based on this derivation, we further extend the classical Rayleigh distance, for MIMO channels with a direct base station (BS)-user equipment (UE) link, to the one for RIS-aided communications, where a cascaded channel is utilized for presenting the BS-RIS-UE link."
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "text",
|
| 194 |
+
"bbox": [
|
| 195 |
+
0.093,
|
| 196 |
+
0.176,
|
| 197 |
+
0.493,
|
| 198 |
+
0.385
|
| 199 |
+
],
|
| 200 |
+
"angle": 0,
|
| 201 |
+
"content": "- Additionally, we investigate the emerging near-field communication techniques by classifying them into two types, i.e., techniques addressing the challenges and those exploiting the potentials in near-field regions. On the one hand, as most techniques specific to far-field often suffer from a severe performance loss in the near-field area, the first type of techniques aims to compensate for this loss, such as near-field channel estimation and beamforming. On the other hand, the second kind of study has revealed that the nature of near-field spherical wavefront can also be exploited to provide new possibilities for capacity enhancement and accessibility improvement. The principles, recent progress, pros and cons of these two categories of research are discussed in detail."
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "text",
|
| 205 |
+
"bbox": [
|
| 206 |
+
0.092,
|
| 207 |
+
0.387,
|
| 208 |
+
0.493,
|
| 209 |
+
0.508
|
| 210 |
+
],
|
| 211 |
+
"angle": 0,
|
| 212 |
+
"content": "- Finally, several open problems and future research directions for near-field communications are pointed out. For example, the improvement of Rayleigh distance considering various communication metrics need to be analyzed, artificial intelligence (AI) is expected to enable high-performance near-field transmissions with low complexity, and hybrid far- and near-field communications also require in-depth study."
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"type": "list",
|
| 216 |
+
"bbox": [
|
| 217 |
+
0.092,
|
| 218 |
+
0.176,
|
| 219 |
+
0.493,
|
| 220 |
+
0.508
|
| 221 |
+
],
|
| 222 |
+
"angle": 0,
|
| 223 |
+
"content": null
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "title",
|
| 227 |
+
"bbox": [
|
| 228 |
+
0.083,
|
| 229 |
+
0.525,
|
| 230 |
+
0.484,
|
| 231 |
+
0.539
|
| 232 |
+
],
|
| 233 |
+
"angle": 0,
|
| 234 |
+
"content": "II. FUNDAMENTALS OF NEAR-FIELD COMMUNICATIONS"
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "text",
|
| 238 |
+
"bbox": [
|
| 239 |
+
0.075,
|
| 240 |
+
0.544,
|
| 241 |
+
0.493,
|
| 242 |
+
0.605
|
| 243 |
+
],
|
| 244 |
+
"angle": 0,
|
| 245 |
+
"content": "In this section, we first present the differences between farfield and near-field communications. Then, we will identify the principle to determine the boundary between the far-field and near-field regions in several typical application scenarios."
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "title",
|
| 249 |
+
"bbox": [
|
| 250 |
+
0.074,
|
| 251 |
+
0.624,
|
| 252 |
+
0.493,
|
| 253 |
+
0.639
|
| 254 |
+
],
|
| 255 |
+
"angle": 0,
|
| 256 |
+
"content": "A. Far-Field Communications vs. Near-Field Communications"
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "text",
|
| 260 |
+
"bbox": [
|
| 261 |
+
0.074,
|
| 262 |
+
0.643,
|
| 263 |
+
0.493,
|
| 264 |
+
0.869
|
| 265 |
+
],
|
| 266 |
+
"angle": 0,
|
| 267 |
+
"content": "The critical characteristics of far-field and near-field communications are shown in Fig. 1. We consider an uplink communication scenario, while the discussions in this article are also valid for downlink scenarios. The BS is equipped with an ELAA. A widely adopted metric to determine the boundary between far-field and near-field regions is the Rayleigh distance, also called the Fraunhofer distance [4]. When the communication distance between the BS and UE (BS-UE distance) is larger than the Rayleigh distance, the UE is located in the far-field region of the BS. Then, EM waves impinging on the BS array can be approximately modeled as planar waves. By contrast, when the BS-UE distance is shorter than the Rayleigh distance, the UE is located in the near-field region of the BS. In this region, EM waves impinging on the BS array must be accurately modeled as spherical waves [5]."
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"bbox": [
|
| 272 |
+
0.074,
|
| 273 |
+
0.87,
|
| 274 |
+
0.493,
|
| 275 |
+
0.947
|
| 276 |
+
],
|
| 277 |
+
"angle": 0,
|
| 278 |
+
"content": "More precisely, the planar wave is a long-distance approximation of the spherical wave. In far-field regions, the phase of EM waves can be elegantly approximated by a linear function of the antenna index through Taylor expansion. This concise linear phase forms a planar wavefront only related to an incident"
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "image",
|
| 282 |
+
"bbox": [
|
| 283 |
+
0.51,
|
| 284 |
+
0.07,
|
| 285 |
+
0.92,
|
| 286 |
+
0.267
|
| 287 |
+
],
|
| 288 |
+
"angle": 0,
|
| 289 |
+
"content": null
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "image_caption",
|
| 293 |
+
"bbox": [
|
| 294 |
+
0.504,
|
| 295 |
+
0.275,
|
| 296 |
+
0.924,
|
| 297 |
+
0.328
|
| 298 |
+
],
|
| 299 |
+
"angle": 0,
|
| 300 |
+
"content": "Fig. 1: Far-field planar wavefront vs. near-field spherical wavefront. The plots at the bottom illustrate the normalized received signal energy in the physical space achieved by near-field beamfocusing (bottom left) and far-field beamsteering (bottom right)."
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "text",
|
| 304 |
+
"bbox": [
|
| 305 |
+
0.503,
|
| 306 |
+
0.343,
|
| 307 |
+
0.922,
|
| 308 |
+
0.584
|
| 309 |
+
],
|
| 310 |
+
"angle": 0,
|
| 311 |
+
"content": "angle. Accordingly, by the utilization of planar wavefronts, far-field beamforming can steer the beam energy towards a specific angle over different distances, which is also termed as beamsteering, as shown in the bottom right figure of Fig. 1. Unfortunately, this concise linear phase fails to thoroughly reveal the information of spherical waves. In near-field regions, the phase of spherical waves should be accurately derived based on the physical geometry, which is a non-linear function of the antenna index. The information of the incident angle and distance in each path between BS and UE is embedded in this non-linear phase. Exploiting the extra distance information of spherical wavefronts, near-field beamforming is able to focus the beam energy on a specific location, where energy focusing on both the angle and distance domain is achievable, as shown in the bottom left figure of Fig. 1. Owing to this property, beamforming in the near-field is also called beamfocusing."
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"bbox": [
|
| 316 |
+
0.504,
|
| 317 |
+
0.585,
|
| 318 |
+
0.922,
|
| 319 |
+
0.645
|
| 320 |
+
],
|
| 321 |
+
"angle": 0,
|
| 322 |
+
"content": "The differences between far-field planar wavefronts and near-field spherical wavefronts bring several challenges and potentials to wireless communications, which will be detailed in the following sections."
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "title",
|
| 326 |
+
"bbox": [
|
| 327 |
+
0.505,
|
| 328 |
+
0.668,
|
| 329 |
+
0.655,
|
| 330 |
+
0.683
|
| 331 |
+
],
|
| 332 |
+
"angle": 0,
|
| 333 |
+
"content": "B. Rayleigh Distance"
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "text",
|
| 337 |
+
"bbox": [
|
| 338 |
+
0.503,
|
| 339 |
+
0.688,
|
| 340 |
+
0.922,
|
| 341 |
+
0.947
|
| 342 |
+
],
|
| 343 |
+
"angle": 0,
|
| 344 |
+
"content": "The most crucial premise for near-field communications is quantifying the boundary between the far-field and near-field regions, i.e., the Rayleigh distance. Generally, the classical Rayleigh distance is proportional to the square of the array aperture and the inverse of the wavelength. Its derivation can be summarized as follows [4]. The true phase of the EM wave impinging on a BS antenna has to be calculated based on the accurate spherical wave model. In far-field scenarios, this phase is usually approximated by its first-order Taylor expansion based on the planar wavefront model. This approximation results in a phase discrepancy, which increases when the distance decreases. When the largest phase discrepancy among all BS and UE antennas reaches \\(\\pi /8\\), the distance between the BS array center and the UE array center is defined as the Rayleigh distance. Accordingly, if the communication distance is shorter than the Rayleigh distance, the largest phase discrepancy will be larger than \\(\\pi /8\\). In this case, the far-field"
|
| 345 |
+
}
|
| 346 |
+
],
|
| 347 |
+
[
|
| 348 |
+
{
|
| 349 |
+
"type": "page_number",
|
| 350 |
+
"bbox": [
|
| 351 |
+
0.912,
|
| 352 |
+
0.031,
|
| 353 |
+
0.921,
|
| 354 |
+
0.041
|
| 355 |
+
],
|
| 356 |
+
"angle": 0,
|
| 357 |
+
"content": "3"
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"type": "image",
|
| 361 |
+
"bbox": [
|
| 362 |
+
0.116,
|
| 363 |
+
0.066,
|
| 364 |
+
0.898,
|
| 365 |
+
0.245
|
| 366 |
+
],
|
| 367 |
+
"angle": 0,
|
| 368 |
+
"content": null
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"type": "image_caption",
|
| 372 |
+
"bbox": [
|
| 373 |
+
0.356,
|
| 374 |
+
0.249,
|
| 375 |
+
0.642,
|
| 376 |
+
0.263
|
| 377 |
+
],
|
| 378 |
+
"angle": 0,
|
| 379 |
+
"content": "Fig. 2: Near-field ranges for typical scenarios."
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"type": "text",
|
| 383 |
+
"bbox": [
|
| 384 |
+
0.074,
|
| 385 |
+
0.277,
|
| 386 |
+
0.492,
|
| 387 |
+
0.307
|
| 388 |
+
],
|
| 389 |
+
"angle": 0,
|
| 390 |
+
"content": "approximation becomes inaccurate, and thus the near-field propagation needs to be utilized."
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"type": "text",
|
| 394 |
+
"bbox": [
|
| 395 |
+
0.074,
|
| 396 |
+
0.308,
|
| 397 |
+
0.493,
|
| 398 |
+
0.46
|
| 399 |
+
],
|
| 400 |
+
"angle": 0,
|
| 401 |
+
"content": "Based on this definition, the near-field ranges for SIMO, MISO, and MIMO communication systems can be obtained. As illustrated in Fig. 2, the near-field range of SIMO/MISO scenarios is precisely determined by the classical Rayleigh distance, which is proportional to the square of BS array aperture. For the MIMO scenario, since ELAs are employed at two sides of the BS-UE link, both the BS array aperture and the UE array aperture contribute to the Rayleigh distance, i.e., the near-field range is proportional to the square of the sum of BS array aperture and UE array aperture."
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"type": "text",
|
| 405 |
+
"bbox": [
|
| 406 |
+
0.074,
|
| 407 |
+
0.461,
|
| 408 |
+
0.495,
|
| 409 |
+
0.687
|
| 410 |
+
],
|
| 411 |
+
"angle": 0,
|
| 412 |
+
"content": "Interestingly enough, we further extend the conventional Rayleigh distance derived in SIMO/MISO/MIMO systems to that in RIS-aided communication systems, as shown in Fig. 2. Unlike SIMO/MISO/MIMO channels with a direct BS-UE link, the cascaded BS-RIS-UE channel in RIS systems comprises the BS-RIS and RIS-UE links. Therefore, when calculating phase discrepancy, the BS-RIS distance and the RIS-UE distance need to be added. Then, capturing the largest phase discrepancy of \\(\\pi /8\\), the near-field range in RIS systems is determined by the harmonic mean of the BS-RIS distance and the RIS-UE distance, as shown in Fig. 2. It can be further implied from Fig. 2 that, as long as any of these two distances is shorter than the Rayleigh distance, RIS-aided communication is operating in the near-field area. Therefore, near-field propagation is more likely to happen in RIS systems."
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"type": "text",
|
| 416 |
+
"bbox": [
|
| 417 |
+
0.074,
|
| 418 |
+
0.687,
|
| 419 |
+
0.496,
|
| 420 |
+
0.854
|
| 421 |
+
],
|
| 422 |
+
"angle": 0,
|
| 423 |
+
"content": "With the dramatically increased number of antennas and carrier frequency, the near-field range of ELAA considerably expands. For instance, we have recently fabricated a 0.36-meter-aperture ELAA at \\(28\\mathrm{GHz}\\). If it is employed in SIMO/MISO scenarios, its near-field range is about 25 meters. When both transmitter and receiver are equipped with this array, the near-field range becomes 100 meters. Moreover, if this ELAA works as a RIS with a BS-RIS distance of 50 meters, the near-field propagation should be accepted once the RIS-UE distance is shorter than 50 meters. In summary, near-field communications come to be an indispensable part of future 6G."
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"type": "title",
|
| 427 |
+
"bbox": [
|
| 428 |
+
0.09,
|
| 429 |
+
0.876,
|
| 430 |
+
0.478,
|
| 431 |
+
0.89
|
| 432 |
+
],
|
| 433 |
+
"angle": 0,
|
| 434 |
+
"content": "III. CHALLENGES OF NEAR-FIELD COMMUNICATIONS"
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"type": "text",
|
| 438 |
+
"bbox": [
|
| 439 |
+
0.075,
|
| 440 |
+
0.899,
|
| 441 |
+
0.494,
|
| 442 |
+
0.947
|
| 443 |
+
],
|
| 444 |
+
"angle": 0,
|
| 445 |
+
"content": "The near-field propagation causes several challenges to wireless communications, i.e., existing 5G transmission methods specific for far-field suffer from severe performance loss in"
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"type": "image",
|
| 449 |
+
"bbox": [
|
| 450 |
+
0.53,
|
| 451 |
+
0.279,
|
| 452 |
+
0.883,
|
| 453 |
+
0.505
|
| 454 |
+
],
|
| 455 |
+
"angle": 0,
|
| 456 |
+
"content": null
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "image_caption",
|
| 460 |
+
"bbox": [
|
| 461 |
+
0.551,
|
| 462 |
+
0.51,
|
| 463 |
+
0.874,
|
| 464 |
+
0.525
|
| 465 |
+
],
|
| 466 |
+
"angle": 0,
|
| 467 |
+
"content": "Fig. 3: Near-field codebook with non-uniform grids."
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "text",
|
| 471 |
+
"bbox": [
|
| 472 |
+
0.504,
|
| 473 |
+
0.548,
|
| 474 |
+
0.922,
|
| 475 |
+
0.579
|
| 476 |
+
],
|
| 477 |
+
"angle": 0,
|
| 478 |
+
"content": "near-field areas. Technologies recently developed for addressing these challenges are discussed in this section."
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"type": "title",
|
| 482 |
+
"bbox": [
|
| 483 |
+
0.505,
|
| 484 |
+
0.606,
|
| 485 |
+
0.744,
|
| 486 |
+
0.62
|
| 487 |
+
],
|
| 488 |
+
"angle": 0,
|
| 489 |
+
"content": "A. Near-Field Channel Estimation"
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "text",
|
| 493 |
+
"bbox": [
|
| 494 |
+
0.503,
|
| 495 |
+
0.628,
|
| 496 |
+
0.925,
|
| 497 |
+
0.947
|
| 498 |
+
],
|
| 499 |
+
"angle": 0,
|
| 500 |
+
"content": "Challenge: Accurate channel estimation is required to attain the expected performance gain of ELAA. As the number of channel paths is usually much smaller than that of antennas, channel estimation methods with low pilot overhead generally design suitable codebooks to transform the channel into a sparse representation. For the far-field codebook, each codeword of the codebook corresponds to a planar wave associated with one incident angle. Ideally, each far-field path can be represented by only one codeword. With this far-field codebook, the angle-domain representation of the channel can be obtained, and it is usually sparse due to the limited paths. Then, beam training and compressed sensing (CS) methods are applied to estimate far-field channels with low pilot overhead accurately. However, this far-field planar-wave codebook mismatches the actual near-field spherical-wave channel. This mismatch induces that a single near-field path should be jointly described by multiple codewords of the far-field codebook. Accordingly, the near-field angle-domain channel is not sparse anymore, which inevitably leads to the degradation of channel estimation accuracy. Therefore, near-field codebooks suitable for near-field channels need to be carefully created."
|
| 501 |
+
}
|
| 502 |
+
],
|
| 503 |
+
[
|
| 504 |
+
{
|
| 505 |
+
"type": "page_number",
|
| 506 |
+
"bbox": [
|
| 507 |
+
0.912,
|
| 508 |
+
0.032,
|
| 509 |
+
0.921,
|
| 510 |
+
0.041
|
| 511 |
+
],
|
| 512 |
+
"angle": 0,
|
| 513 |
+
"content": "4"
|
| 514 |
+
},
|
| 515 |
+
{
|
| 516 |
+
"type": "image",
|
| 517 |
+
"bbox": [
|
| 518 |
+
0.175,
|
| 519 |
+
0.07,
|
| 520 |
+
0.825,
|
| 521 |
+
0.301
|
| 522 |
+
],
|
| 523 |
+
"angle": 0,
|
| 524 |
+
"content": null
|
| 525 |
+
},
|
| 526 |
+
{
|
| 527 |
+
"type": "image_caption",
|
| 528 |
+
"bbox": [
|
| 529 |
+
0.074,
|
| 530 |
+
0.305,
|
| 531 |
+
0.924,
|
| 532 |
+
0.345
|
| 533 |
+
],
|
| 534 |
+
"angle": 0,
|
| 535 |
+
"content": "Fig. 4: This figure illustrates the far-field beam split effect (left) and the near-field beam split effect (right). Far-field beam split makes beams at different frequencies transmit towards different directions, while near-field beam split makes beams at different frequencies be focused on various locations."
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"type": "text",
|
| 539 |
+
"bbox": [
|
| 540 |
+
0.074,
|
| 541 |
+
0.36,
|
| 542 |
+
0.494,
|
| 543 |
+
0.737
|
| 544 |
+
],
|
| 545 |
+
"angle": 0,
|
| 546 |
+
"content": "Recent progress: Some recent works have been endeavored to design near-field codebooks utilizing spherical wavefronts [5], [6]. In [6], the entire two-dimensional physical space is uniformly divided into multiple grids. Each grid is associated with a near-field array response vector, and all of these vectors construct the near-field codebook. With this codebook, the joint angle-distance information of each near-field path is extracted. Then, the near-field channel can be estimated by CS methods with low pilot overhead. However, with the decrease of BSUE distance, the near-field propagation becomes dominant, and the distance information gradually becomes more crucial. Therefore, we can conceive the intuition that the grids should be sparse far away from the ELAA but dense near the ELAA. Without considering this intuition, the codebook in [6] is hard to attain satisfactory channel estimation performance in the entire near-field region. To this end, by minimizing the largest coherence among codewords in the near-field codebook, authors in [5] mathematically prove this intuition, i.e., the angle space could be uniformly divided, while the distance space should be non-uniformly divided. As shown in Fig. 3, the shorter the distance, the denser the grid. With the help of this non-uniform codebook, a polar-domain sparse channel representation and corresponding CS-based algorithms are proposed in [5] to accomplish accurate channel estimation in both near- and farfield areas."
|
| 547 |
+
},
|
| 548 |
+
{
|
| 549 |
+
"type": "title",
|
| 550 |
+
"bbox": [
|
| 551 |
+
0.076,
|
| 552 |
+
0.759,
|
| 553 |
+
0.254,
|
| 554 |
+
0.774
|
| 555 |
+
],
|
| 556 |
+
"angle": 0,
|
| 557 |
+
"content": "B. Near-Field Beam Split"
|
| 558 |
+
},
|
| 559 |
+
{
|
| 560 |
+
"type": "text",
|
| 561 |
+
"bbox": [
|
| 562 |
+
0.074,
|
| 563 |
+
0.779,
|
| 564 |
+
0.496,
|
| 565 |
+
0.946
|
| 566 |
+
],
|
| 567 |
+
"angle": 0,
|
| 568 |
+
"content": "Challenge: In THz wideband systems, ELAA might encounter a beam split phenomenon, also known as beam squint and spatial-wideband effect. Existing THz beamforming architecture often employs analog phase-shifters (PSs) [7], which usually tune the same phase shift for signals at different frequencies. Nonetheless, the actual phase of the EM wave is the product of the signal propagation delay and the frequency-dependent wavenumber. As a result, the signal propagation delay can be compensated through a phase shift adequately only for a narrow band signal. Phase errors are introduced for the other frequencies, thus causing the beam split effect."
|
| 569 |
+
},
|
| 570 |
+
{
|
| 571 |
+
"type": "text",
|
| 572 |
+
"bbox": [
|
| 573 |
+
0.504,
|
| 574 |
+
0.36,
|
| 575 |
+
0.922,
|
| 576 |
+
0.39
|
| 577 |
+
],
|
| 578 |
+
"angle": 0,
|
| 579 |
+
"content": "In fact, the impact of beam split on far-field and near-field propagations also differs."
|
| 580 |
+
},
|
| 581 |
+
{
|
| 582 |
+
"type": "text",
|
| 583 |
+
"bbox": [
|
| 584 |
+
0.503,
|
| 585 |
+
0.393,
|
| 586 |
+
0.924,
|
| 587 |
+
0.634
|
| 588 |
+
],
|
| 589 |
+
"angle": 0,
|
| 590 |
+
"content": "In far-field, beam split leads to the fact that beams at different frequencies are transmitting towards different angles, as shown in the left figure of Fig. 4. For near-field beam split, however, beams are focused at both different angles and various distances due to the split of spherical waves, as shown in the right figure of Fig. 4. Both far-field and nearfield beam splits severely reduce the received signal energy of frequency components misaligned with the user location. Over the years, extensive works have been proposed to mitigate farfield beam split by tuning frequency-dependent phase shifts with planar wavefronts through true-time-delay-based (TTD-based) beamforming instead of PS-based beamforming. Unfortunately, owing to the discrepancy between planar and spherical waves, these schemes addressing the far-field beam split no longer work well in the near-field, posing challenges to THz ELAA communications."
|
| 591 |
+
},
|
| 592 |
+
{
|
| 593 |
+
"type": "text",
|
| 594 |
+
"bbox": [
|
| 595 |
+
0.504,
|
| 596 |
+
0.639,
|
| 597 |
+
0.924,
|
| 598 |
+
0.926
|
| 599 |
+
],
|
| 600 |
+
"angle": 0,
|
| 601 |
+
"content": "Recent progress: Recently, a few efforts have tried to overcome the near-field beam split effect. In [8], a variant of chirp sequence is utilized to design the phase shifts, for flattening the beamfocusing gain across frequencies with the sacrifice of the maximum beamfocusing gain. This method can slightly alleviate the near-field beam split effect, but its spectral efficiency degrades as well when the bandwidth is very large, as the beams are still generated by PSs. To this end, a phase-delay focusing (PDF) method is proposed in [9] exploiting TTD-based beamforming. To further illustrate, the BS ELAA is first partitioned into multiple sub-arrays. The UE is assumed to be located in the far-field area of each small sub-array but within the near-field range of the ELAA. Then, one TTD line is inserted between each sub-array and the radio-frequency (RF) chain to realize frequency-dependent phase shifts. Finally, the frequency-dependent phase variations across different sub-arrays induced by spherical wavefronts are compensated by the inserted TTD line. As a result, beams over the working band are focused at the target UE location [9]."
|
| 602 |
+
},
|
| 603 |
+
{
|
| 604 |
+
"type": "text",
|
| 605 |
+
"bbox": [
|
| 606 |
+
0.52,
|
| 607 |
+
0.929,
|
| 608 |
+
0.922,
|
| 609 |
+
0.945
|
| 610 |
+
],
|
| 611 |
+
"angle": 0,
|
| 612 |
+
"content": "In conclusion, the first solution [8] follows the PS-based"
|
| 613 |
+
}
|
| 614 |
+
],
|
| 615 |
+
[
|
| 616 |
+
{
|
| 617 |
+
"type": "page_number",
|
| 618 |
+
"bbox": [
|
| 619 |
+
0.912,
|
| 620 |
+
0.031,
|
| 621 |
+
0.921,
|
| 622 |
+
0.041
|
| 623 |
+
],
|
| 624 |
+
"angle": 0,
|
| 625 |
+
"content": "5"
|
| 626 |
+
},
|
| 627 |
+
{
|
| 628 |
+
"type": "text",
|
| 629 |
+
"bbox": [
|
| 630 |
+
0.074,
|
| 631 |
+
0.07,
|
| 632 |
+
0.493,
|
| 633 |
+
0.206
|
| 634 |
+
],
|
| 635 |
+
"angle": 0,
|
| 636 |
+
"content": "beamforming, which is easy to implement but the achievable performance is unsatisfactory. The second scheme [9] can nearly eliminate the near-field beam split effect but requires the implementation of TTD lines. In fact, although deploying TTD lines by optical fibers has been demonstrated in the optical domain, this kind of deployment is non-trivial to be extended to THz ELAA communications. Fortunately, recent advances in graphene-based plasmonic waveguides provide low-cost solutions for implementing TTD lines at high frequencies [7]."
|
| 637 |
+
},
|
| 638 |
+
{
|
| 639 |
+
"type": "title",
|
| 640 |
+
"bbox": [
|
| 641 |
+
0.09,
|
| 642 |
+
0.223,
|
| 643 |
+
0.478,
|
| 644 |
+
0.238
|
| 645 |
+
],
|
| 646 |
+
"angle": 0,
|
| 647 |
+
"content": "IV. POTENTIALS FOR NEAR-FIELD COMMUNICATIONS"
|
| 648 |
+
},
|
| 649 |
+
{
|
| 650 |
+
"type": "text",
|
| 651 |
+
"bbox": [
|
| 652 |
+
0.075,
|
| 653 |
+
0.242,
|
| 654 |
+
0.493,
|
| 655 |
+
0.334
|
| 656 |
+
],
|
| 657 |
+
"angle": 0,
|
| 658 |
+
"content": "Unlike the aforementioned works for dealing with the performance degradation in the near-field, some recent studies have surprisingly revealed that 6G networks can also benefit from near-field propagation. In this section, we will discuss those studies exploiting the potentials of near-field propagation to improve communication performance."
|
| 659 |
+
},
|
| 660 |
+
{
|
| 661 |
+
"type": "title",
|
| 662 |
+
"bbox": [
|
| 663 |
+
0.075,
|
| 664 |
+
0.353,
|
| 665 |
+
0.258,
|
| 666 |
+
0.367
|
| 667 |
+
],
|
| 668 |
+
"angle": 0,
|
| 669 |
+
"content": "A. Capacity Enhancement"
|
| 670 |
+
},
|
| 671 |
+
{
|
| 672 |
+
"type": "text",
|
| 673 |
+
"bbox": [
|
| 674 |
+
0.074,
|
| 675 |
+
0.371,
|
| 676 |
+
0.493,
|
| 677 |
+
0.718
|
| 678 |
+
],
|
| 679 |
+
"angle": 0,
|
| 680 |
+
"content": "Potential: The spatial multiplexing gain of MIMO communications considerably increases with the transition from far-field regions to near-field regions. In far-field MIMO communications, the line-of-sight (LoS) channel can be represented by a rank-one matrix, where the spatial degrees-of-freedom (DoFs) are very limited. By contrast, the near-field LoS channel can be rank-sufficient derived from the geometric relationship under the spherical propagation model. The increased rank indicates dramatically improved spatial DoFs in the near-field region. Precisely, based on the expansion of prolate spheroidal wave functions, it is proved in [10] that near-field spatial DoFs are proportional to the product of the BS and UE array apertures and inversely proportional to the BS-UE distance. This conclusion is further improved in [11] by meticulously designing the beamfocusing vectors of the BS and UE arrays. As shown in Fig. 5, the DoFs increase from 1 to 20 when the BS-UE distance decreases from 350 meters to 10 meters. Thanks to the increased DoFs, the near-field LoS path enables simultaneous transmission of multiple data streams by MIMO precoding, as opposed to the rank-one far-field LoS channel supporting only one data stream. The increased spatial DoFs can be exploited as an additional spatial multiplexing gain, which offers a new possibility for a significant capacity enhancement."
|
| 681 |
+
},
|
| 682 |
+
{
|
| 683 |
+
"type": "text",
|
| 684 |
+
"bbox": [
|
| 685 |
+
0.074,
|
| 686 |
+
0.719,
|
| 687 |
+
0.495,
|
| 688 |
+
0.945
|
| 689 |
+
],
|
| 690 |
+
"angle": 0,
|
| 691 |
+
"content": "Recent progress: Recently, some novel precoding architectures have been proposed to leverage these extra near-field DoFs for MIMO capacity enhancement [12], [13]. Firstly, distance-aware precoding (DAP) is developed in [12]. Unlike classical hybrid precoding with a fixed and limited number of RF chains (e.g., 2 or 4 fixed RF chains), the DAP architecture could flexibly adjust the number of RF chains to match the distance-related DoFs, which is achieved by deploying a selection network to configure each RF chain as active or inactive. For instance, in the far-field region, only one RF chain is activated for data transmission. When communication distance is decreasing to 10-20 meters, around 20 activated RF chains are enough to adapt to the DoFs, as shown in Fig. 5. By doing so, the number of transmitted data streams dynamically matches the DoFs. Simulations demonstrate the DAP could"
|
| 692 |
+
},
|
| 693 |
+
{
|
| 694 |
+
"type": "image",
|
| 695 |
+
"bbox": [
|
| 696 |
+
0.522,
|
| 697 |
+
0.083,
|
| 698 |
+
0.895,
|
| 699 |
+
0.312
|
| 700 |
+
],
|
| 701 |
+
"angle": 0,
|
| 702 |
+
"content": null
|
| 703 |
+
},
|
| 704 |
+
{
|
| 705 |
+
"type": "image_caption",
|
| 706 |
+
"bbox": [
|
| 707 |
+
0.536,
|
| 708 |
+
0.315,
|
| 709 |
+
0.89,
|
| 710 |
+
0.331
|
| 711 |
+
],
|
| 712 |
+
"angle": 0,
|
| 713 |
+
"content": "Fig. 5: The spatial DoF increases in the near-field region."
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"type": "text",
|
| 717 |
+
"bbox": [
|
| 718 |
+
0.503,
|
| 719 |
+
0.349,
|
| 720 |
+
0.925,
|
| 721 |
+
0.607
|
| 722 |
+
],
|
| 723 |
+
"angle": 0,
|
| 724 |
+
"content": "significantly increase the spectral efficiency while its energy efficiency is comparable with hybrid precoding. To avoid the utilization of extra RF chains, another effort to harvest the potential spatial multiplexing gain in near-field areas is the widely-spaced multi-subarray (WSMS) precoding [13]. In this architecture, the sub-arrays are widely spaced to enlarge the array aperture, artificially creating the expansion of the near-field region. Compared with classical hybrid precoding, the number of sub-arrays and the sub-array spacing should be additionally designed in the WSMS architecture. To this end, [13] first assumes planar-wave propagation within each sub-array and spherical-wave propagation across different sub-arrays similar to [9]. Then, [13] jointly optimizes the number of sub-arrays, their spacing, and the precoding matrix for maximizing the achievable rate. Simulations demonstrate that WSMS could achieve nearly \\(200\\%\\) higher spectral efficiency than classical hybrid precoding."
|
| 725 |
+
},
|
| 726 |
+
{
|
| 727 |
+
"type": "title",
|
| 728 |
+
"bbox": [
|
| 729 |
+
0.505,
|
| 730 |
+
0.636,
|
| 731 |
+
0.707,
|
| 732 |
+
0.651
|
| 733 |
+
],
|
| 734 |
+
"angle": 0,
|
| 735 |
+
"content": "B. Accessibility Improvement"
|
| 736 |
+
},
|
| 737 |
+
{
|
| 738 |
+
"type": "text",
|
| 739 |
+
"bbox": [
|
| 740 |
+
0.503,
|
| 741 |
+
0.658,
|
| 742 |
+
0.925,
|
| 743 |
+
0.945
|
| 744 |
+
],
|
| 745 |
+
"angle": 0,
|
| 746 |
+
"content": "Potential: Near-field propagation is also able to improve accessibility in multi-user (MU) communications. To increase the spectral efficiency in MU-MIMO communications, space division multiple access (SDMA) is widely considered to distinguish users through orthogonal or near-orthogonal spatial beams. Thus, multiple users can share the same time and frequency resources. For far-field SDMA, utilizing beamsteering to generate beams with planar wavefronts can distinguish users at different angles. A downside is that users located at similar angles will severely interfere with each other, and thus can not simultaneously access the network through farfield SDMA. Fortunately, near-field beamfocusing enjoys the capability of energy focusing on the joint angle-distance domain. Hence, near-field SDMA could generate beams with spherical wavefronts to simultaneously serve users located at similar angles but different distances, as shown in Fig. 6. The distance information of spherical wavefronts supplies a newizable dimension for multi-user access, which is not achievable for conventional far-field SDMA."
|
| 747 |
+
}
|
| 748 |
+
],
|
| 749 |
+
[
|
| 750 |
+
{
|
| 751 |
+
"type": "page_number",
|
| 752 |
+
"bbox": [
|
| 753 |
+
0.912,
|
| 754 |
+
0.032,
|
| 755 |
+
0.921,
|
| 756 |
+
0.041
|
| 757 |
+
],
|
| 758 |
+
"angle": 0,
|
| 759 |
+
"content": "6"
|
| 760 |
+
},
|
| 761 |
+
{
|
| 762 |
+
"type": "image",
|
| 763 |
+
"bbox": [
|
| 764 |
+
0.115,
|
| 765 |
+
0.071,
|
| 766 |
+
0.37,
|
| 767 |
+
0.244
|
| 768 |
+
],
|
| 769 |
+
"angle": 0,
|
| 770 |
+
"content": null
|
| 771 |
+
},
|
| 772 |
+
{
|
| 773 |
+
"type": "image",
|
| 774 |
+
"bbox": [
|
| 775 |
+
0.373,
|
| 776 |
+
0.071,
|
| 777 |
+
0.628,
|
| 778 |
+
0.245
|
| 779 |
+
],
|
| 780 |
+
"angle": 0,
|
| 781 |
+
"content": null
|
| 782 |
+
},
|
| 783 |
+
{
|
| 784 |
+
"type": "image",
|
| 785 |
+
"bbox": [
|
| 786 |
+
0.63,
|
| 787 |
+
0.072,
|
| 788 |
+
0.885,
|
| 789 |
+
0.245
|
| 790 |
+
],
|
| 791 |
+
"angle": 0,
|
| 792 |
+
"content": null
|
| 793 |
+
},
|
| 794 |
+
{
|
| 795 |
+
"type": "image_caption",
|
| 796 |
+
"bbox": [
|
| 797 |
+
0.248,
|
| 798 |
+
0.25,
|
| 799 |
+
0.747,
|
| 800 |
+
0.264
|
| 801 |
+
],
|
| 802 |
+
"angle": 0,
|
| 803 |
+
"content": "Fig. 6: Near-field beamfocusing is able to serve multiple users in the same angle."
|
| 804 |
+
},
|
| 805 |
+
{
|
| 806 |
+
"type": "text",
|
| 807 |
+
"bbox": [
|
| 808 |
+
0.074,
|
| 809 |
+
0.28,
|
| 810 |
+
0.494,
|
| 811 |
+
0.448
|
| 812 |
+
],
|
| 813 |
+
"angle": 0,
|
| 814 |
+
"content": "Recent progress: Taking advantage of the capability of beamfocusing, the authors in [14] have studied the near-field multi-user transmission considering fully-digital precoding, hybrid precoding, and transmissive reconfigurable metasurface (RMS). By optimizing the sum rate in multi-user systems through alternating optimization, all considered precoding architectures can naturally generate beams with spherical wavefronts to distinguish users located at similar angles but different distances. The simulation results demonstrate that near-field propagation has the potential of enhancing multi-user accessibility."
|
| 815 |
+
},
|
| 816 |
+
{
|
| 817 |
+
"type": "title",
|
| 818 |
+
"bbox": [
|
| 819 |
+
0.155,
|
| 820 |
+
0.466,
|
| 821 |
+
0.413,
|
| 822 |
+
0.479
|
| 823 |
+
],
|
| 824 |
+
"angle": 0,
|
| 825 |
+
"content": "V. FUTURE RESEARCH DIRECTIONS"
|
| 826 |
+
},
|
| 827 |
+
{
|
| 828 |
+
"type": "text",
|
| 829 |
+
"bbox": [
|
| 830 |
+
0.075,
|
| 831 |
+
0.485,
|
| 832 |
+
0.493,
|
| 833 |
+
0.515
|
| 834 |
+
],
|
| 835 |
+
"angle": 0,
|
| 836 |
+
"content": "In this section, several future research directions for nearfield communications will be pointed out."
|
| 837 |
+
},
|
| 838 |
+
{
|
| 839 |
+
"type": "title",
|
| 840 |
+
"bbox": [
|
| 841 |
+
0.075,
|
| 842 |
+
0.537,
|
| 843 |
+
0.338,
|
| 844 |
+
0.552
|
| 845 |
+
],
|
| 846 |
+
"angle": 0,
|
| 847 |
+
"content": "A. Near-Field Communication Theory"
|
| 848 |
+
},
|
| 849 |
+
{
|
| 850 |
+
"type": "text",
|
| 851 |
+
"bbox": [
|
| 852 |
+
0.074,
|
| 853 |
+
0.557,
|
| 854 |
+
0.493,
|
| 855 |
+
0.86
|
| 856 |
+
],
|
| 857 |
+
"angle": 0,
|
| 858 |
+
"content": "1) Improvement of Rayleigh Distance: As a widely adopted quantification of near-field range, Rayleigh distance is attained in terms of phase discrepancy. For communication metrics directly affected by phase discrepancy, such as channel estimation accuracy, Rayleigh distance can accurately capture the degradation of these metrics when applying far-field transmission schemes in the near-field region. On the contrary, some metrics are directly influenced by other factors instead of phase discrepancy, e.g., capacity is determined by beamforming gain and channel rank. Accordingly, classical Rayleigh distance probably cannot capture the performance loss of these metrics well. To this end, several recent works have endeavored to improve classical Rayleigh distance in terms of some vital communication metrics. For instance, an effective Rayleigh distance (ERD) is derived in [9] for the accurate description of beamforming gain loss and capacity loss. Nevertheless, ERD is only valid for MISO communications, while more discussion should be made to improve Rayleigh distance in more practical scenarios under more general metrics, e.g., channel rank and energy efficiency in MIMO and RIS systems."
|
| 859 |
+
},
|
| 860 |
+
{
|
| 861 |
+
"type": "title",
|
| 862 |
+
"bbox": [
|
| 863 |
+
0.075,
|
| 864 |
+
0.88,
|
| 865 |
+
0.358,
|
| 866 |
+
0.896
|
| 867 |
+
],
|
| 868 |
+
"angle": 0,
|
| 869 |
+
"content": "B. Near-Field Transmission Technologies"
|
| 870 |
+
},
|
| 871 |
+
{
|
| 872 |
+
"type": "text",
|
| 873 |
+
"bbox": [
|
| 874 |
+
0.075,
|
| 875 |
+
0.899,
|
| 876 |
+
0.493,
|
| 877 |
+
0.946
|
| 878 |
+
],
|
| 879 |
+
"angle": 0,
|
| 880 |
+
"content": "1) AI-Aided Near-Field Communications: Different from far-field communications, the transmission algorithms for nearfield communications are more complex. To be specific, since"
|
| 881 |
+
},
|
| 882 |
+
{
|
| 883 |
+
"type": "text",
|
| 884 |
+
"bbox": [
|
| 885 |
+
0.503,
|
| 886 |
+
0.28,
|
| 887 |
+
0.925,
|
| 888 |
+
0.49
|
| 889 |
+
],
|
| 890 |
+
"angle": 0,
|
| 891 |
+
"content": "extra grids on the distance domain are required, as mentioned in Section III-A [5], the size of near-field codebooks is usually much larger than that of far-field codebooks, leading to high-complexity channel estimation and beamforming. Moreover, the non-linear phase characteristics of spherical waves make the design of near-field beam training and precoding algorithms more complicated than that in far-field areas. AI-based transmission methods are promising to address these problems since they can mine the features of near-field environments through non-linear neural networks. Currently, there are plenty of works elaborating on AI-based far-field transmissions (references are not provided here since the number of references is limited in this magazine), while AI-based near-field transmissions have not been well studied."
|
| 892 |
+
},
|
| 893 |
+
{
|
| 894 |
+
"type": "text",
|
| 895 |
+
"bbox": [
|
| 896 |
+
0.504,
|
| 897 |
+
0.492,
|
| 898 |
+
0.922,
|
| 899 |
+
0.688
|
| 900 |
+
],
|
| 901 |
+
"angle": 0,
|
| 902 |
+
"content": "2) RIS-Aided Near-Field Communications: Compared with MIMO communications, the near-field propagation becomes even more dominant and complex in RIS-aided systems. In MIMO communications, based on the spherical propagation model, the EM waves form spherical equiphase surfaces at the receiver. On the contrary, in RIS-aided systems, the phase of received EM waves is accumulated by the propagation delays through the BE-RIS and RIS-UE links. Based on the geometry relationship, the equiphase surfaces become ellipses in the near-field range instead of spherical. Accordingly, the research on beamfocusing [15], channel estimation, and multiple access techniques taking into account this ellipses-equiphase property are required for RIS-aided near-field communications."
|
| 903 |
+
},
|
| 904 |
+
{
|
| 905 |
+
"type": "text",
|
| 906 |
+
"bbox": [
|
| 907 |
+
0.504,
|
| 908 |
+
0.689,
|
| 909 |
+
0.924,
|
| 910 |
+
0.946
|
| 911 |
+
],
|
| 912 |
+
"angle": 0,
|
| 913 |
+
"content": "3) Hybrid Far- and Near-Field Communications: In practical systems, communication environments usually exist with both far-field and near-field signal components. First, in multi-user systems with multi-path channels, some users and scatterers may be far away from the BS while others are located in the near-field region of the BS, which constitutes a hybrid far- and near-field (hybrid-field) communication scenario. Additionally, it is worth mentioning that the Rayleigh distance is proportional to frequency. Thus, in an ultra-wideband or frequency-hopping system with a very large frequency span, its near-field range varies dramatically across the bandwidth. Chances are that the signal components at low frequencies may operate in far-field regions while those at high frequencies with larger Rayleigh distances are propagating in the near-field areas, which also contributes to hybrid-field communications. Consequently, the above factors make hybrid-field communications practical and crucial in future 6G networks. Thus, hybrid-field"
|
| 914 |
+
},
|
| 915 |
+
{
|
| 916 |
+
"type": "list",
|
| 917 |
+
"bbox": [
|
| 918 |
+
0.504,
|
| 919 |
+
0.492,
|
| 920 |
+
0.924,
|
| 921 |
+
0.946
|
| 922 |
+
],
|
| 923 |
+
"angle": 0,
|
| 924 |
+
"content": null
|
| 925 |
+
}
|
| 926 |
+
],
|
| 927 |
+
[
|
| 928 |
+
{
|
| 929 |
+
"type": "page_number",
|
| 930 |
+
"bbox": [
|
| 931 |
+
0.912,
|
| 932 |
+
0.031,
|
| 933 |
+
0.921,
|
| 934 |
+
0.041
|
| 935 |
+
],
|
| 936 |
+
"angle": 0,
|
| 937 |
+
"content": "7"
|
| 938 |
+
},
|
| 939 |
+
{
|
| 940 |
+
"type": "text",
|
| 941 |
+
"bbox": [
|
| 942 |
+
0.075,
|
| 943 |
+
0.07,
|
| 944 |
+
0.492,
|
| 945 |
+
0.099
|
| 946 |
+
],
|
| 947 |
+
"angle": 0,
|
| 948 |
+
"content": "transmission techniques handling both far-field and near-field signal components deserve in-depth study."
|
| 949 |
+
},
|
| 950 |
+
{
|
| 951 |
+
"type": "text",
|
| 952 |
+
"bbox": [
|
| 953 |
+
0.074,
|
| 954 |
+
0.101,
|
| 955 |
+
0.493,
|
| 956 |
+
0.312
|
| 957 |
+
],
|
| 958 |
+
"angle": 0,
|
| 959 |
+
"content": "4) Spatial Non-Stationarity Effect on Near-Field Communications: Except for near-field propagation, the spatial nonstationarity effect is another fundamental characteristic of ELAA compared to 5G massive MIMO, where different scatterers and users are visible to different portions of the ELAA. This effect leads to the fact that only a part of the ELAA can receive the spherical EM waves radiated by a scatterer or a user. The angular power spectral and average received power rapidly vary over the ELAA. Recently, there have been intensive works dealing with the non-stationarity effect and near-field propagation simultaneously [6]. However, the impart of non-stationarity on other emerging near-field communications has not been well studied, such as RIS-aided systems and hybrid-field communications."
|
| 960 |
+
},
|
| 961 |
+
{
|
| 962 |
+
"type": "title",
|
| 963 |
+
"bbox": [
|
| 964 |
+
0.076,
|
| 965 |
+
0.333,
|
| 966 |
+
0.263,
|
| 967 |
+
0.349
|
| 968 |
+
],
|
| 969 |
+
"angle": 0,
|
| 970 |
+
"content": "C. Hardware Development"
|
| 971 |
+
},
|
| 972 |
+
{
|
| 973 |
+
"type": "text",
|
| 974 |
+
"bbox": [
|
| 975 |
+
0.074,
|
| 976 |
+
0.353,
|
| 977 |
+
0.495,
|
| 978 |
+
0.534
|
| 979 |
+
],
|
| 980 |
+
"angle": 0,
|
| 981 |
+
"content": "To verify the effectiveness of near-field transmission technologies, hardware developments and over-the-air experiments are of great significance. For example, for alleviating the nearfield beam split effect, TTD lines need to be meticulously designed in the THz domain. The hardware developments of WSMS and DAP architectures are worth being carried out to exploit the near-field spatial DoFs. Besides, implementing these techniques still has to overcome several hardware impairment issues, including In-phase/Quadrature imbalance, low-efficiency power amplifier at high frequency, etc. All these challenges should be carefully addressed to enable the implementation of 6G near-field communications."
|
| 982 |
+
},
|
| 983 |
+
{
|
| 984 |
+
"type": "title",
|
| 985 |
+
"bbox": [
|
| 986 |
+
0.215,
|
| 987 |
+
0.553,
|
| 988 |
+
0.353,
|
| 989 |
+
0.567
|
| 990 |
+
],
|
| 991 |
+
"angle": 0,
|
| 992 |
+
"content": "VI. CONCLUSIONS"
|
| 993 |
+
},
|
| 994 |
+
{
|
| 995 |
+
"type": "text",
|
| 996 |
+
"bbox": [
|
| 997 |
+
0.074,
|
| 998 |
+
0.573,
|
| 999 |
+
0.493,
|
| 1000 |
+
0.845
|
| 1001 |
+
],
|
| 1002 |
+
"angle": 0,
|
| 1003 |
+
"content": "With the evolution from massive MIMO to ELAA, near-field propagation with spherical wavefront becomes indispensable in 6G networks, where conventional far-field propagation with planar wavefront is not valid anymore. In this article, we revealed that near-field propagation is a double-edged sword, i.e., it brings both challenges and potentials to 6G communications. We first introduced the non-linear phase property of spherical waves and explained the derivation of near-field range in terms of phase discrepancy. Then, we discussed the technical challenges of channel estimation and beam split caused by near-field propagation and presented the recent solutions. In addition, some appealing works that exploit the capability of spherical waves to improve capacity and accessibility were investigated. Several future research directions for near-field communications, such as improvement of Rayleigh distance and hybrid-field transmissions, were also highlighted, which are expected to inspire more innovations on 6G near-field communications."
|
| 1004 |
+
},
|
| 1005 |
+
{
|
| 1006 |
+
"type": "title",
|
| 1007 |
+
"bbox": [
|
| 1008 |
+
0.206,
|
| 1009 |
+
0.865,
|
| 1010 |
+
0.362,
|
| 1011 |
+
0.878
|
| 1012 |
+
],
|
| 1013 |
+
"angle": 0,
|
| 1014 |
+
"content": "ACKNOWLEDGEMENT"
|
| 1015 |
+
},
|
| 1016 |
+
{
|
| 1017 |
+
"type": "text",
|
| 1018 |
+
"bbox": [
|
| 1019 |
+
0.075,
|
| 1020 |
+
0.885,
|
| 1021 |
+
0.492,
|
| 1022 |
+
0.947
|
| 1023 |
+
],
|
| 1024 |
+
"angle": 0,
|
| 1025 |
+
"content": "This work was supported in part by the National Key Research and Development Program of China (Grant No.2020YFB1807201), in part by the National Natural Science Foundation of China (Grant No.62031019)."
|
| 1026 |
+
},
|
| 1027 |
+
{
|
| 1028 |
+
"type": "title",
|
| 1029 |
+
"bbox": [
|
| 1030 |
+
0.666,
|
| 1031 |
+
0.07,
|
| 1032 |
+
0.762,
|
| 1033 |
+
0.083
|
| 1034 |
+
],
|
| 1035 |
+
"angle": 0,
|
| 1036 |
+
"content": "REFERENCES"
|
| 1037 |
+
},
|
| 1038 |
+
{
|
| 1039 |
+
"type": "ref_text",
|
| 1040 |
+
"bbox": [
|
| 1041 |
+
0.515,
|
| 1042 |
+
0.09,
|
| 1043 |
+
0.922,
|
| 1044 |
+
0.125
|
| 1045 |
+
],
|
| 1046 |
+
"angle": 0,
|
| 1047 |
+
"content": "[1] W. Saad, M. Bennis, and M. Chen, \"A vision of 6G wireless systems: Applications, trends, technologies, and open research problems,\" IEEE Network, vol. 34, no. 3, pp. 134-142, May 2020."
|
| 1048 |
+
},
|
| 1049 |
+
{
|
| 1050 |
+
"type": "ref_text",
|
| 1051 |
+
"bbox": [
|
| 1052 |
+
0.516,
|
| 1053 |
+
0.126,
|
| 1054 |
+
0.922,
|
| 1055 |
+
0.17
|
| 1056 |
+
],
|
| 1057 |
+
"angle": 0,
|
| 1058 |
+
"content": "[2] M. Uusitalo, P. Rugeland, M. Boldi, E. C. Strinati, and Y. Zou, \"RFocus: Beamforming using thousands of passive antennas,\" in Proc. 17th USENIX Symposium on Networked Systems Design and Implementation (NSDI '20), Feb. 2020."
|
| 1059 |
+
},
|
| 1060 |
+
{
|
| 1061 |
+
"type": "ref_text",
|
| 1062 |
+
"bbox": [
|
| 1063 |
+
0.515,
|
| 1064 |
+
0.17,
|
| 1065 |
+
0.921,
|
| 1066 |
+
0.204
|
| 1067 |
+
],
|
| 1068 |
+
"angle": 0,
|
| 1069 |
+
"content": "[3] I. F. Akyildiz and J. M. Jornet, “Realizing ultra-massive MIMO (1024×1024) communication in the (0.06–10) Terahertz band,” Nano Communication Networks, vol. 8, pp. 46–54, 2016."
|
| 1070 |
+
},
|
| 1071 |
+
{
|
| 1072 |
+
"type": "ref_text",
|
| 1073 |
+
"bbox": [
|
| 1074 |
+
0.516,
|
| 1075 |
+
0.205,
|
| 1076 |
+
0.922,
|
| 1077 |
+
0.238
|
| 1078 |
+
],
|
| 1079 |
+
"angle": 0,
|
| 1080 |
+
"content": "[4] K. T. Selvan and R. Janaswamy, “Fraunhofer and Fresnel distances: Unified derivation for aperture antennas,” IEEE Antennas Propag. Mag., vol. 59, no. 4, pp. 12–15, Aug. 2017."
|
| 1081 |
+
},
|
| 1082 |
+
{
|
| 1083 |
+
"type": "ref_text",
|
| 1084 |
+
"bbox": [
|
| 1085 |
+
0.516,
|
| 1086 |
+
0.239,
|
| 1087 |
+
0.922,
|
| 1088 |
+
0.272
|
| 1089 |
+
],
|
| 1090 |
+
"angle": 0,
|
| 1091 |
+
"content": "[5] M. Cui and L. Dai, \"Channel estimation for extremely large-scale MIMO: Far-field or near-field?\" IEEE Trans. Commun., vol. 70, no. 4, pp. 2663-2677, Jan. 2022."
|
| 1092 |
+
},
|
| 1093 |
+
{
|
| 1094 |
+
"type": "ref_text",
|
| 1095 |
+
"bbox": [
|
| 1096 |
+
0.516,
|
| 1097 |
+
0.273,
|
| 1098 |
+
0.921,
|
| 1099 |
+
0.305
|
| 1100 |
+
],
|
| 1101 |
+
"angle": 0,
|
| 1102 |
+
"content": "[6] Y. Han, S. Jin, C. Wen, and X. Ma, \"Channel estimation for extremely large-scale massive MIMO systems,\" IEEE Wireless Commun. Lett., vol. 9, no. 5, pp. 633-637, May 2020."
|
| 1103 |
+
},
|
| 1104 |
+
{
|
| 1105 |
+
"type": "ref_text",
|
| 1106 |
+
"bbox": [
|
| 1107 |
+
0.516,
|
| 1108 |
+
0.306,
|
| 1109 |
+
0.92,
|
| 1110 |
+
0.363
|
| 1111 |
+
],
|
| 1112 |
+
"angle": 0,
|
| 1113 |
+
"content": "[7] A. Singh, M. Andrello, E. Einarsson, N. Thawdarl, and J. M. Jornet, \"A hybrid intelligent reflecting surface with graphene-based control elements for THz communications,\" in Proc. 2020 IEEE 21st International Workshop on Signal Processing Advances in Wireless Communications (SPAWC), May 2020, pp. 1-5."
|
| 1114 |
+
},
|
| 1115 |
+
{
|
| 1116 |
+
"type": "ref_text",
|
| 1117 |
+
"bbox": [
|
| 1118 |
+
0.516,
|
| 1119 |
+
0.363,
|
| 1120 |
+
0.92,
|
| 1121 |
+
0.397
|
| 1122 |
+
],
|
| 1123 |
+
"angle": 0,
|
| 1124 |
+
"content": "[8] N. J. Myers and R. W. Heath, \"Infocus: A spatial coding technique to mitigate misfocus in near-field los beamforming,\" IEEE Trans. Wireless Commun., vol. 21, no. 4, pp. 2193-2209, Apr. 2022."
|
| 1125 |
+
},
|
| 1126 |
+
{
|
| 1127 |
+
"type": "ref_text",
|
| 1128 |
+
"bbox": [
|
| 1129 |
+
0.516,
|
| 1130 |
+
0.398,
|
| 1131 |
+
0.92,
|
| 1132 |
+
0.431
|
| 1133 |
+
],
|
| 1134 |
+
"angle": 0,
|
| 1135 |
+
"content": "[9] M. Cui, L. Dai, R. Schober, and L. Hanzo, “Near-field wideband beamforming for extremely large antenna array,” arXiv preprint arXiv:2109.10054, Sep. 2021."
|
| 1136 |
+
},
|
| 1137 |
+
{
|
| 1138 |
+
"type": "ref_text",
|
| 1139 |
+
"bbox": [
|
| 1140 |
+
0.508,
|
| 1141 |
+
0.431,
|
| 1142 |
+
0.921,
|
| 1143 |
+
0.453
|
| 1144 |
+
],
|
| 1145 |
+
"angle": 0,
|
| 1146 |
+
"content": "[10] D. A. Miller, \"Waves, modes, communications, and optics: A tutorial,\" Adv. in Opt. and Photon., vol. 11, no. 3, pp. 679-825, Sep. 2019."
|
| 1147 |
+
},
|
| 1148 |
+
{
|
| 1149 |
+
"type": "ref_text",
|
| 1150 |
+
"bbox": [
|
| 1151 |
+
0.508,
|
| 1152 |
+
0.454,
|
| 1153 |
+
0.921,
|
| 1154 |
+
0.487
|
| 1155 |
+
],
|
| 1156 |
+
"angle": 0,
|
| 1157 |
+
"content": "[11] N. Decarli and D. Dardari, \"Communication modes with large intelligent surfaces in the near field,\" IEEE Access, vol. 9, pp. 165-648-165-666, Sep. 2021."
|
| 1158 |
+
},
|
| 1159 |
+
{
|
| 1160 |
+
"type": "ref_text",
|
| 1161 |
+
"bbox": [
|
| 1162 |
+
0.508,
|
| 1163 |
+
0.488,
|
| 1164 |
+
0.921,
|
| 1165 |
+
0.522
|
| 1166 |
+
],
|
| 1167 |
+
"angle": 0,
|
| 1168 |
+
"content": "[12] Z. Wu, M. Cui, Z. Zhang, and L. Dai, \"Distance-aware precoding for near-field capacity improvement in XL-MIMO,\" in Proc. 2022 IEEE 95th Vehicular Technology Conference (VTC2022-Spring), 2022, pp. 1-5."
|
| 1169 |
+
},
|
| 1170 |
+
{
|
| 1171 |
+
"type": "ref_text",
|
| 1172 |
+
"bbox": [
|
| 1173 |
+
0.508,
|
| 1174 |
+
0.522,
|
| 1175 |
+
0.921,
|
| 1176 |
+
0.566
|
| 1177 |
+
],
|
| 1178 |
+
"angle": 0,
|
| 1179 |
+
"content": "[13] L. Yan, Y. Chen, C. Han, and J. Yuan, \"Joint inter-path and intrapath multiplexing for terahertz widely-spaced multi-subarray hybrid beamforming systems,\" IEEE Trans. Commun., vol. 70, no. 2, pp. 1391-1406, Feb. 2022."
|
| 1180 |
+
},
|
| 1181 |
+
{
|
| 1182 |
+
"type": "ref_text",
|
| 1183 |
+
"bbox": [
|
| 1184 |
+
0.508,
|
| 1185 |
+
0.567,
|
| 1186 |
+
0.921,
|
| 1187 |
+
0.6
|
| 1188 |
+
],
|
| 1189 |
+
"angle": 0,
|
| 1190 |
+
"content": "[14] H. Zhang, N. Shlezinger, F. Guidi, D. Dardari, M. F. Imani, and Y. C. Eldar, \"Beam focusing for near-field multi-user MIMO communications,\" IEEE Trans. Wireless Commun., 2022."
|
| 1191 |
+
},
|
| 1192 |
+
{
|
| 1193 |
+
"type": "ref_text",
|
| 1194 |
+
"bbox": [
|
| 1195 |
+
0.508,
|
| 1196 |
+
0.601,
|
| 1197 |
+
0.921,
|
| 1198 |
+
0.647
|
| 1199 |
+
],
|
| 1200 |
+
"angle": 0,
|
| 1201 |
+
"content": "[15] K. Dovelos, S. D. Assimonis, H. Quoc Ngo, B. Bellalta, and M. Matthaiou, \"Intelligent reflecting surfaces at Terahertz Bands: Channel modeling and analysis,\" in Proc. 2021 IEEE International Conference on Communications Workshops (ICC Workshops), Jun. 2021, pp. 1-6."
|
| 1202 |
+
},
|
| 1203 |
+
{
|
| 1204 |
+
"type": "list",
|
| 1205 |
+
"bbox": [
|
| 1206 |
+
0.508,
|
| 1207 |
+
0.09,
|
| 1208 |
+
0.922,
|
| 1209 |
+
0.647
|
| 1210 |
+
],
|
| 1211 |
+
"angle": 0,
|
| 1212 |
+
"content": null
|
| 1213 |
+
},
|
| 1214 |
+
{
|
| 1215 |
+
"type": "title",
|
| 1216 |
+
"bbox": [
|
| 1217 |
+
0.663,
|
| 1218 |
+
0.665,
|
| 1219 |
+
0.765,
|
| 1220 |
+
0.677
|
| 1221 |
+
],
|
| 1222 |
+
"angle": 0,
|
| 1223 |
+
"content": "BIOGRAPHIES"
|
| 1224 |
+
},
|
| 1225 |
+
{
|
| 1226 |
+
"type": "text",
|
| 1227 |
+
"bbox": [
|
| 1228 |
+
0.505,
|
| 1229 |
+
0.683,
|
| 1230 |
+
0.921,
|
| 1231 |
+
0.713
|
| 1232 |
+
],
|
| 1233 |
+
"angle": 0,
|
| 1234 |
+
"content": "Mingyao Cui is a M.S. researcher in BNrist from Tsinghua University, Beijing, China."
|
| 1235 |
+
},
|
| 1236 |
+
{
|
| 1237 |
+
"type": "text",
|
| 1238 |
+
"bbox": [
|
| 1239 |
+
0.505,
|
| 1240 |
+
0.728,
|
| 1241 |
+
0.92,
|
| 1242 |
+
0.759
|
| 1243 |
+
],
|
| 1244 |
+
"angle": 0,
|
| 1245 |
+
"content": "Zidong Wu is a Ph.D. researcher in BNRist from Tsinghua University, Beijing, China."
|
| 1246 |
+
},
|
| 1247 |
+
{
|
| 1248 |
+
"type": "text",
|
| 1249 |
+
"bbox": [
|
| 1250 |
+
0.505,
|
| 1251 |
+
0.773,
|
| 1252 |
+
0.92,
|
| 1253 |
+
0.804
|
| 1254 |
+
],
|
| 1255 |
+
"angle": 0,
|
| 1256 |
+
"content": "Yu Lu is a Ph.D. researcher in BNRist from Tsinghua University, Beijing, China."
|
| 1257 |
+
},
|
| 1258 |
+
{
|
| 1259 |
+
"type": "text",
|
| 1260 |
+
"bbox": [
|
| 1261 |
+
0.505,
|
| 1262 |
+
0.818,
|
| 1263 |
+
0.92,
|
| 1264 |
+
0.849
|
| 1265 |
+
],
|
| 1266 |
+
"angle": 0,
|
| 1267 |
+
"content": "Xiuhong Wei is a M.S. researcher in BNRist from Tsinghua University, Beijing, China."
|
| 1268 |
+
},
|
| 1269 |
+
{
|
| 1270 |
+
"type": "text",
|
| 1271 |
+
"bbox": [
|
| 1272 |
+
0.504,
|
| 1273 |
+
0.864,
|
| 1274 |
+
0.922,
|
| 1275 |
+
0.94
|
| 1276 |
+
],
|
| 1277 |
+
"angle": 0,
|
| 1278 |
+
"content": "Linglong Dai is an associate professor at Tsinghua University. His current research interests include RIS, massive MIMO, mmWave and THz communications, and machine learning for wireless communications. He has received six conference best paper awards and four journal best paper awards."
|
| 1279 |
+
}
|
| 1280 |
+
]
|
| 1281 |
+
]
|
2203.16xxx/2203.16318/bdf4203f-60cc-4b35-a342-da2e1d7ac014_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8e6923690111501382a3b52b564015efed80a7f531802db7a40350214fa49fcc
|
| 3 |
+
size 1573372
|
2203.16xxx/2203.16318/full.md
ADDED
|
@@ -0,0 +1,185 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Near-Field Communications for 6G: Fundamentals, Challenges, Potentials, and Future Directions
|
| 2 |
+
|
| 3 |
+
Mingyao Cui, Zidong Wu, Yu Lu, Xiuhong Wei, and Linglong Dai, Fellow, IEEE
|
| 4 |
+
|
| 5 |
+
Abstract—Extremely large-scale antenna array (ELAA) is a common feature of several key candidate technologies for the sixth generation (6G) mobile networks, such as ultra-massive multiple-input-multiple-output (UM-MIMO), cell-free massive MIMO, reconfigurable intelligent surface (RIS), and terahertz communications. Since the number of antennas is very large for ELAA, the electromagnetic radiation field needs to be modeled by near-field spherical waves, which differs from the conventional planar-wave-based radiation model of 5G massive MIMO. As a result, near-field communications will become essential in 6G wireless networks. In this article, we systematically investigate the emerging near-field communication techniques. Firstly, we present the fundamentals of near-field communications and the metric to determine the near-field ranges in typical communication scenarios. Then, we investigate recent studies specific to near-field communications by classifying them into two categories, i.e., techniques addressing the challenges and those exploiting the potentials in near-field regions. Their principles, recent progress, pros and cons are discussed. More importantly, several open problems and future research directions for near-field communications are pointed out. We believe that this article would inspire more innovations for this important research topic of near-field communications for 6G.
|
| 6 |
+
|
| 7 |
+
Index Terms—6G, ELAA, near-field communications, spherical wavefront.
|
| 8 |
+
|
| 9 |
+
# I. INTRODUCTION
|
| 10 |
+
|
| 11 |
+
The sixth generation (6G) mobile networks are promising to empower emerging applications, such as holographic video, digital replica, etc. For fulfilling these visions, tremendous research efforts have been endeavored to develop new wireless technologies to meet the key performance indicators (KPIs) of 6G, which are much superior to those of 5G [1]. For instance, thanks to the enormous spatial multiplexing and beamforming gain, ultra-massive multiple-input-multiple-output (UM-MIMO) and cell-free massive MIMO (CF-MIMO) are expected to accomplish a 10-fold increase in the spectral efficiency for 6G [1]. Besides, by dynamically manipulating the wireless environment through thousands of antennas, reconfigurable intelligent surface (RIS) brings new possibilities for capacity and coverage enhancement [2]. Moreover, millimeter-wave (mmWave) and terahertz (THz) UM-MIMO can offer abundant spectral resources for supporting $100\times$ peak data rate improvement (e.g., Tbps) in 6G mobile communications
|
| 12 |
+
|
| 13 |
+
All authors are with the Beijing National Research Center for Information Science and Technology (BNRist) as well as the Department of Electronic Engineering, Tsinghua University, Beijing 100084, China (e-mails: {cmy20, wuzd19, y-lu19, weixh19} @ mails.tsinghua.edu.cn, daill@tsinghua.edu.cn).
|
| 14 |
+
|
| 15 |
+
© 2022 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.
|
| 16 |
+
|
| 17 |
+
[3]. Despite being suitable for different application scenarios with various KPIs, all the above technologies, including UM-MIMO, CF-MIMO, RIS, and THz communications, share a common feature: They all usually require a very large number of antennas to attain their expected performance, i.e., extremely large-scale antenna arrays (ELAA) are essential to these different candidate technologies for 6G.
|
| 18 |
+
|
| 19 |
+
Compared with massive MIMO, the key technology in 5G networks, ELAA for 6G not only means a sharp increase in the number of antennas but also results in a fundamental change of the electromagnetic (EM) characteristics. The EM radiation field can generally be divided into far-field and radiation near-field regions. The boundary between these two regions is determined by the Rayleigh distance, also called the Fraunhofer distance [4]. Rayleigh distance is proportional to the product of the square of array aperture and carrier frequency [4]. Outside the Rayleigh distance, it is the far-field region, where the EM field can be approximately modeled by planar waves. Within the Rayleigh distance, the near-field propagation becomes dominant, where the EM field has to be accurately modeled by spherical waves.
|
| 20 |
+
|
| 21 |
+
Since the number of antennas is not very large in 5G massive MIMO systems, the Rayleigh distance of up to several meters is negligible. Thus, existing 5G communications are mainly developed from far-field communication theories and techniques. However, with the significant increase of the antenna number and carrier frequency in future 6G systems, the near-field region of ELAA will expand by orders of magnitude. For instance, a 3200-element ELAA at $2.4\mathrm{GHz}$ was developed in [2]. With an array size of $2\mathrm{m} \times 3\mathrm{m}$ , its Rayleigh distance is about 200 meters, which is larger than the radius of a typical 5G cell. Accordingly, near-field communications will become essential components in future 6G mobile networks where the spherical propagation model needs to be considered, which is obviously different from the existing far-field 5G systems. Unfortunately, the near-field propagation introduces several new challenges in ELAA systems, which should be identified and addressed to empower 6G communications.
|
| 22 |
+
|
| 23 |
+
In this article, we systematically investigate the recent nearfield communication techniques for 6G. The key features of this article can be summarized as follows:
|
| 24 |
+
|
| 25 |
+
- To begin with, the fundamental differences between far-field and near-field communications are explained. Comparatively speaking, the planar wavefront in the farfield can steer the signal energy towards a specific physical angle. On the contrary, the near-field spherical wavefront achieves energy focusing on both angle and distance domain. Moreover, the Rayleigh distance that quantifies
|
| 26 |
+
|
| 27 |
+
the boundary between far-field and near-field regions is introduced, and its derivation is explained in detail. Based on this derivation, we further extend the classical Rayleigh distance, for MIMO channels with a direct base station (BS)-user equipment (UE) link, to the one for RIS-aided communications, where a cascaded channel is utilized for presenting the BS-RIS-UE link.
|
| 28 |
+
|
| 29 |
+
- Additionally, we investigate the emerging near-field communication techniques by classifying them into two types, i.e., techniques addressing the challenges and those exploiting the potentials in near-field regions. On the one hand, as most techniques specific to far-field often suffer from a severe performance loss in the near-field area, the first type of techniques aims to compensate for this loss, such as near-field channel estimation and beamforming. On the other hand, the second kind of study has revealed that the nature of near-field spherical wavefront can also be exploited to provide new possibilities for capacity enhancement and accessibility improvement. The principles, recent progress, pros and cons of these two categories of research are discussed in detail.
|
| 30 |
+
- Finally, several open problems and future research directions for near-field communications are pointed out. For example, the improvement of Rayleigh distance considering various communication metrics need to be analyzed, artificial intelligence (AI) is expected to enable high-performance near-field transmissions with low complexity, and hybrid far- and near-field communications also require in-depth study.
|
| 31 |
+
|
| 32 |
+
# II. FUNDAMENTALS OF NEAR-FIELD COMMUNICATIONS
|
| 33 |
+
|
| 34 |
+
In this section, we first present the differences between farfield and near-field communications. Then, we will identify the principle to determine the boundary between the far-field and near-field regions in several typical application scenarios.
|
| 35 |
+
|
| 36 |
+
# A. Far-Field Communications vs. Near-Field Communications
|
| 37 |
+
|
| 38 |
+
The critical characteristics of far-field and near-field communications are shown in Fig. 1. We consider an uplink communication scenario, while the discussions in this article are also valid for downlink scenarios. The BS is equipped with an ELAA. A widely adopted metric to determine the boundary between far-field and near-field regions is the Rayleigh distance, also called the Fraunhofer distance [4]. When the communication distance between the BS and UE (BS-UE distance) is larger than the Rayleigh distance, the UE is located in the far-field region of the BS. Then, EM waves impinging on the BS array can be approximately modeled as planar waves. By contrast, when the BS-UE distance is shorter than the Rayleigh distance, the UE is located in the near-field region of the BS. In this region, EM waves impinging on the BS array must be accurately modeled as spherical waves [5].
|
| 39 |
+
|
| 40 |
+
More precisely, the planar wave is a long-distance approximation of the spherical wave. In far-field regions, the phase of EM waves can be elegantly approximated by a linear function of the antenna index through Taylor expansion. This concise linear phase forms a planar wavefront only related to an incident
|
| 41 |
+
|
| 42 |
+

|
| 43 |
+
Fig. 1: Far-field planar wavefront vs. near-field spherical wavefront. The plots at the bottom illustrate the normalized received signal energy in the physical space achieved by near-field beamfocusing (bottom left) and far-field beamsteering (bottom right).
|
| 44 |
+
|
| 45 |
+
angle. Accordingly, by the utilization of planar wavefronts, far-field beamforming can steer the beam energy towards a specific angle over different distances, which is also termed as beamsteering, as shown in the bottom right figure of Fig. 1. Unfortunately, this concise linear phase fails to thoroughly reveal the information of spherical waves. In near-field regions, the phase of spherical waves should be accurately derived based on the physical geometry, which is a non-linear function of the antenna index. The information of the incident angle and distance in each path between BS and UE is embedded in this non-linear phase. Exploiting the extra distance information of spherical wavefronts, near-field beamforming is able to focus the beam energy on a specific location, where energy focusing on both the angle and distance domain is achievable, as shown in the bottom left figure of Fig. 1. Owing to this property, beamforming in the near-field is also called beamfocusing.
|
| 46 |
+
|
| 47 |
+
The differences between far-field planar wavefronts and near-field spherical wavefronts bring several challenges and potentials to wireless communications, which will be detailed in the following sections.
|
| 48 |
+
|
| 49 |
+
# B. Rayleigh Distance
|
| 50 |
+
|
| 51 |
+
The most crucial premise for near-field communications is quantifying the boundary between the far-field and near-field regions, i.e., the Rayleigh distance. Generally, the classical Rayleigh distance is proportional to the square of the array aperture and the inverse of the wavelength. Its derivation can be summarized as follows [4]. The true phase of the EM wave impinging on a BS antenna has to be calculated based on the accurate spherical wave model. In far-field scenarios, this phase is usually approximated by its first-order Taylor expansion based on the planar wavefront model. This approximation results in a phase discrepancy, which increases when the distance decreases. When the largest phase discrepancy among all BS and UE antennas reaches $\pi /8$ , the distance between the BS array center and the UE array center is defined as the Rayleigh distance. Accordingly, if the communication distance is shorter than the Rayleigh distance, the largest phase discrepancy will be larger than $\pi /8$ . In this case, the far-field
|
| 52 |
+
|
| 53 |
+

|
| 54 |
+
Fig. 2: Near-field ranges for typical scenarios.
|
| 55 |
+
|
| 56 |
+
approximation becomes inaccurate, and thus the near-field propagation needs to be utilized.
|
| 57 |
+
|
| 58 |
+
Based on this definition, the near-field ranges for SIMO, MISO, and MIMO communication systems can be obtained. As illustrated in Fig. 2, the near-field range of SIMO/MISO scenarios is precisely determined by the classical Rayleigh distance, which is proportional to the square of BS array aperture. For the MIMO scenario, since ELAs are employed at two sides of the BS-UE link, both the BS array aperture and the UE array aperture contribute to the Rayleigh distance, i.e., the near-field range is proportional to the square of the sum of BS array aperture and UE array aperture.
|
| 59 |
+
|
| 60 |
+
Interestingly enough, we further extend the conventional Rayleigh distance derived in SIMO/MISO/MIMO systems to that in RIS-aided communication systems, as shown in Fig. 2. Unlike SIMO/MISO/MIMO channels with a direct BS-UE link, the cascaded BS-RIS-UE channel in RIS systems comprises the BS-RIS and RIS-UE links. Therefore, when calculating phase discrepancy, the BS-RIS distance and the RIS-UE distance need to be added. Then, capturing the largest phase discrepancy of $\pi /8$ , the near-field range in RIS systems is determined by the harmonic mean of the BS-RIS distance and the RIS-UE distance, as shown in Fig. 2. It can be further implied from Fig. 2 that, as long as any of these two distances is shorter than the Rayleigh distance, RIS-aided communication is operating in the near-field area. Therefore, near-field propagation is more likely to happen in RIS systems.
|
| 61 |
+
|
| 62 |
+
With the dramatically increased number of antennas and carrier frequency, the near-field range of ELAA considerably expands. For instance, we have recently fabricated a 0.36-meter-aperture ELAA at $28\mathrm{GHz}$ . If it is employed in SIMO/MISO scenarios, its near-field range is about 25 meters. When both transmitter and receiver are equipped with this array, the near-field range becomes 100 meters. Moreover, if this ELAA works as a RIS with a BS-RIS distance of 50 meters, the near-field propagation should be accepted once the RIS-UE distance is shorter than 50 meters. In summary, near-field communications come to be an indispensable part of future 6G.
|
| 63 |
+
|
| 64 |
+
# III. CHALLENGES OF NEAR-FIELD COMMUNICATIONS
|
| 65 |
+
|
| 66 |
+
The near-field propagation causes several challenges to wireless communications, i.e., existing 5G transmission methods specific for far-field suffer from severe performance loss in
|
| 67 |
+
|
| 68 |
+

|
| 69 |
+
Fig. 3: Near-field codebook with non-uniform grids.
|
| 70 |
+
|
| 71 |
+
near-field areas. Technologies recently developed for addressing these challenges are discussed in this section.
|
| 72 |
+
|
| 73 |
+
# A. Near-Field Channel Estimation
|
| 74 |
+
|
| 75 |
+
Challenge: Accurate channel estimation is required to attain the expected performance gain of ELAA. As the number of channel paths is usually much smaller than that of antennas, channel estimation methods with low pilot overhead generally design suitable codebooks to transform the channel into a sparse representation. For the far-field codebook, each codeword of the codebook corresponds to a planar wave associated with one incident angle. Ideally, each far-field path can be represented by only one codeword. With this far-field codebook, the angle-domain representation of the channel can be obtained, and it is usually sparse due to the limited paths. Then, beam training and compressed sensing (CS) methods are applied to estimate far-field channels with low pilot overhead accurately. However, this far-field planar-wave codebook mismatches the actual near-field spherical-wave channel. This mismatch induces that a single near-field path should be jointly described by multiple codewords of the far-field codebook. Accordingly, the near-field angle-domain channel is not sparse anymore, which inevitably leads to the degradation of channel estimation accuracy. Therefore, near-field codebooks suitable for near-field channels need to be carefully created.
|
| 76 |
+
|
| 77 |
+

|
| 78 |
+
Fig. 4: This figure illustrates the far-field beam split effect (left) and the near-field beam split effect (right). Far-field beam split makes beams at different frequencies transmit towards different directions, while near-field beam split makes beams at different frequencies be focused on various locations.
|
| 79 |
+
|
| 80 |
+
Recent progress: Some recent works have been endeavored to design near-field codebooks utilizing spherical wavefronts [5], [6]. In [6], the entire two-dimensional physical space is uniformly divided into multiple grids. Each grid is associated with a near-field array response vector, and all of these vectors construct the near-field codebook. With this codebook, the joint angle-distance information of each near-field path is extracted. Then, the near-field channel can be estimated by CS methods with low pilot overhead. However, with the decrease of BSUE distance, the near-field propagation becomes dominant, and the distance information gradually becomes more crucial. Therefore, we can conceive the intuition that the grids should be sparse far away from the ELAA but dense near the ELAA. Without considering this intuition, the codebook in [6] is hard to attain satisfactory channel estimation performance in the entire near-field region. To this end, by minimizing the largest coherence among codewords in the near-field codebook, authors in [5] mathematically prove this intuition, i.e., the angle space could be uniformly divided, while the distance space should be non-uniformly divided. As shown in Fig. 3, the shorter the distance, the denser the grid. With the help of this non-uniform codebook, a polar-domain sparse channel representation and corresponding CS-based algorithms are proposed in [5] to accomplish accurate channel estimation in both near- and farfield areas.
|
| 81 |
+
|
| 82 |
+
# B. Near-Field Beam Split
|
| 83 |
+
|
| 84 |
+
Challenge: In THz wideband systems, ELAA might encounter a beam split phenomenon, also known as beam squint and spatial-wideband effect. Existing THz beamforming architecture often employs analog phase-shifters (PSs) [7], which usually tune the same phase shift for signals at different frequencies. Nonetheless, the actual phase of the EM wave is the product of the signal propagation delay and the frequency-dependent wavenumber. As a result, the signal propagation delay can be compensated through a phase shift adequately only for a narrow band signal. Phase errors are introduced for the other frequencies, thus causing the beam split effect.
|
| 85 |
+
|
| 86 |
+
In fact, the impact of beam split on far-field and near-field propagations also differs.
|
| 87 |
+
|
| 88 |
+
In far-field, beam split leads to the fact that beams at different frequencies are transmitting towards different angles, as shown in the left figure of Fig. 4. For near-field beam split, however, beams are focused at both different angles and various distances due to the split of spherical waves, as shown in the right figure of Fig. 4. Both far-field and nearfield beam splits severely reduce the received signal energy of frequency components misaligned with the user location. Over the years, extensive works have been proposed to mitigate farfield beam split by tuning frequency-dependent phase shifts with planar wavefronts through true-time-delay-based (TTD-based) beamforming instead of PS-based beamforming. Unfortunately, owing to the discrepancy between planar and spherical waves, these schemes addressing the far-field beam split no longer work well in the near-field, posing challenges to THz ELAA communications.
|
| 89 |
+
|
| 90 |
+
Recent progress: Recently, a few efforts have tried to overcome the near-field beam split effect. In [8], a variant of chirp sequence is utilized to design the phase shifts, for flattening the beamfocusing gain across frequencies with the sacrifice of the maximum beamfocusing gain. This method can slightly alleviate the near-field beam split effect, but its spectral efficiency degrades as well when the bandwidth is very large, as the beams are still generated by PSs. To this end, a phase-delay focusing (PDF) method is proposed in [9] exploiting TTD-based beamforming. To further illustrate, the BS ELAA is first partitioned into multiple sub-arrays. The UE is assumed to be located in the far-field area of each small sub-array but within the near-field range of the ELAA. Then, one TTD line is inserted between each sub-array and the radio-frequency (RF) chain to realize frequency-dependent phase shifts. Finally, the frequency-dependent phase variations across different sub-arrays induced by spherical wavefronts are compensated by the inserted TTD line. As a result, beams over the working band are focused at the target UE location [9].
|
| 91 |
+
|
| 92 |
+
In conclusion, the first solution [8] follows the PS-based
|
| 93 |
+
|
| 94 |
+
beamforming, which is easy to implement but the achievable performance is unsatisfactory. The second scheme [9] can nearly eliminate the near-field beam split effect but requires the implementation of TTD lines. In fact, although deploying TTD lines by optical fibers has been demonstrated in the optical domain, this kind of deployment is non-trivial to be extended to THz ELAA communications. Fortunately, recent advances in graphene-based plasmonic waveguides provide low-cost solutions for implementing TTD lines at high frequencies [7].
|
| 95 |
+
|
| 96 |
+
# IV. POTENTIALS FOR NEAR-FIELD COMMUNICATIONS
|
| 97 |
+
|
| 98 |
+
Unlike the aforementioned works for dealing with the performance degradation in the near-field, some recent studies have surprisingly revealed that 6G networks can also benefit from near-field propagation. In this section, we will discuss those studies exploiting the potentials of near-field propagation to improve communication performance.
|
| 99 |
+
|
| 100 |
+
# A. Capacity Enhancement
|
| 101 |
+
|
| 102 |
+
Potential: The spatial multiplexing gain of MIMO communications considerably increases with the transition from far-field regions to near-field regions. In far-field MIMO communications, the line-of-sight (LoS) channel can be represented by a rank-one matrix, where the spatial degrees-of-freedom (DoFs) are very limited. By contrast, the near-field LoS channel can be rank-sufficient derived from the geometric relationship under the spherical propagation model. The increased rank indicates dramatically improved spatial DoFs in the near-field region. Precisely, based on the expansion of prolate spheroidal wave functions, it is proved in [10] that near-field spatial DoFs are proportional to the product of the BS and UE array apertures and inversely proportional to the BS-UE distance. This conclusion is further improved in [11] by meticulously designing the beamfocusing vectors of the BS and UE arrays. As shown in Fig. 5, the DoFs increase from 1 to 20 when the BS-UE distance decreases from 350 meters to 10 meters. Thanks to the increased DoFs, the near-field LoS path enables simultaneous transmission of multiple data streams by MIMO precoding, as opposed to the rank-one far-field LoS channel supporting only one data stream. The increased spatial DoFs can be exploited as an additional spatial multiplexing gain, which offers a new possibility for a significant capacity enhancement.
|
| 103 |
+
|
| 104 |
+
Recent progress: Recently, some novel precoding architectures have been proposed to leverage these extra near-field DoFs for MIMO capacity enhancement [12], [13]. Firstly, distance-aware precoding (DAP) is developed in [12]. Unlike classical hybrid precoding with a fixed and limited number of RF chains (e.g., 2 or 4 fixed RF chains), the DAP architecture could flexibly adjust the number of RF chains to match the distance-related DoFs, which is achieved by deploying a selection network to configure each RF chain as active or inactive. For instance, in the far-field region, only one RF chain is activated for data transmission. When communication distance is decreasing to 10-20 meters, around 20 activated RF chains are enough to adapt to the DoFs, as shown in Fig. 5. By doing so, the number of transmitted data streams dynamically matches the DoFs. Simulations demonstrate the DAP could
|
| 105 |
+
|
| 106 |
+

|
| 107 |
+
Fig. 5: The spatial DoF increases in the near-field region.
|
| 108 |
+
|
| 109 |
+
significantly increase the spectral efficiency while its energy efficiency is comparable with hybrid precoding. To avoid the utilization of extra RF chains, another effort to harvest the potential spatial multiplexing gain in near-field areas is the widely-spaced multi-subarray (WSMS) precoding [13]. In this architecture, the sub-arrays are widely spaced to enlarge the array aperture, artificially creating the expansion of the near-field region. Compared with classical hybrid precoding, the number of sub-arrays and the sub-array spacing should be additionally designed in the WSMS architecture. To this end, [13] first assumes planar-wave propagation within each sub-array and spherical-wave propagation across different sub-arrays similar to [9]. Then, [13] jointly optimizes the number of sub-arrays, their spacing, and the precoding matrix for maximizing the achievable rate. Simulations demonstrate that WSMS could achieve nearly $200\%$ higher spectral efficiency than classical hybrid precoding.
|
| 110 |
+
|
| 111 |
+
# B. Accessibility Improvement
|
| 112 |
+
|
| 113 |
+
Potential: Near-field propagation is also able to improve accessibility in multi-user (MU) communications. To increase the spectral efficiency in MU-MIMO communications, space division multiple access (SDMA) is widely considered to distinguish users through orthogonal or near-orthogonal spatial beams. Thus, multiple users can share the same time and frequency resources. For far-field SDMA, utilizing beamsteering to generate beams with planar wavefronts can distinguish users at different angles. A downside is that users located at similar angles will severely interfere with each other, and thus can not simultaneously access the network through farfield SDMA. Fortunately, near-field beamfocusing enjoys the capability of energy focusing on the joint angle-distance domain. Hence, near-field SDMA could generate beams with spherical wavefronts to simultaneously serve users located at similar angles but different distances, as shown in Fig. 6. The distance information of spherical wavefronts supplies a newizable dimension for multi-user access, which is not achievable for conventional far-field SDMA.
|
| 114 |
+
|
| 115 |
+

|
| 116 |
+
Fig. 6: Near-field beamfocusing is able to serve multiple users in the same angle.
|
| 117 |
+
|
| 118 |
+

|
| 119 |
+
|
| 120 |
+

|
| 121 |
+
|
| 122 |
+
Recent progress: Taking advantage of the capability of beamfocusing, the authors in [14] have studied the near-field multi-user transmission considering fully-digital precoding, hybrid precoding, and transmissive reconfigurable metasurface (RMS). By optimizing the sum rate in multi-user systems through alternating optimization, all considered precoding architectures can naturally generate beams with spherical wavefronts to distinguish users located at similar angles but different distances. The simulation results demonstrate that near-field propagation has the potential of enhancing multi-user accessibility.
|
| 123 |
+
|
| 124 |
+
# V. FUTURE RESEARCH DIRECTIONS
|
| 125 |
+
|
| 126 |
+
In this section, several future research directions for nearfield communications will be pointed out.
|
| 127 |
+
|
| 128 |
+
# A. Near-Field Communication Theory
|
| 129 |
+
|
| 130 |
+
1) Improvement of Rayleigh Distance: As a widely adopted quantification of near-field range, Rayleigh distance is attained in terms of phase discrepancy. For communication metrics directly affected by phase discrepancy, such as channel estimation accuracy, Rayleigh distance can accurately capture the degradation of these metrics when applying far-field transmission schemes in the near-field region. On the contrary, some metrics are directly influenced by other factors instead of phase discrepancy, e.g., capacity is determined by beamforming gain and channel rank. Accordingly, classical Rayleigh distance probably cannot capture the performance loss of these metrics well. To this end, several recent works have endeavored to improve classical Rayleigh distance in terms of some vital communication metrics. For instance, an effective Rayleigh distance (ERD) is derived in [9] for the accurate description of beamforming gain loss and capacity loss. Nevertheless, ERD is only valid for MISO communications, while more discussion should be made to improve Rayleigh distance in more practical scenarios under more general metrics, e.g., channel rank and energy efficiency in MIMO and RIS systems.
|
| 131 |
+
|
| 132 |
+
# B. Near-Field Transmission Technologies
|
| 133 |
+
|
| 134 |
+
1) AI-Aided Near-Field Communications: Different from far-field communications, the transmission algorithms for nearfield communications are more complex. To be specific, since
|
| 135 |
+
|
| 136 |
+
extra grids on the distance domain are required, as mentioned in Section III-A [5], the size of near-field codebooks is usually much larger than that of far-field codebooks, leading to high-complexity channel estimation and beamforming. Moreover, the non-linear phase characteristics of spherical waves make the design of near-field beam training and precoding algorithms more complicated than that in far-field areas. AI-based transmission methods are promising to address these problems since they can mine the features of near-field environments through non-linear neural networks. Currently, there are plenty of works elaborating on AI-based far-field transmissions (references are not provided here since the number of references is limited in this magazine), while AI-based near-field transmissions have not been well studied.
|
| 137 |
+
|
| 138 |
+
2) RIS-Aided Near-Field Communications: Compared with MIMO communications, the near-field propagation becomes even more dominant and complex in RIS-aided systems. In MIMO communications, based on the spherical propagation model, the EM waves form spherical equiphase surfaces at the receiver. On the contrary, in RIS-aided systems, the phase of received EM waves is accumulated by the propagation delays through the BE-RIS and RIS-UE links. Based on the geometry relationship, the equiphase surfaces become ellipses in the near-field range instead of spherical. Accordingly, the research on beamfocusing [15], channel estimation, and multiple access techniques taking into account this ellipses-equiphase property are required for RIS-aided near-field communications.
|
| 139 |
+
3) Hybrid Far- and Near-Field Communications: In practical systems, communication environments usually exist with both far-field and near-field signal components. First, in multi-user systems with multi-path channels, some users and scatterers may be far away from the BS while others are located in the near-field region of the BS, which constitutes a hybrid far- and near-field (hybrid-field) communication scenario. Additionally, it is worth mentioning that the Rayleigh distance is proportional to frequency. Thus, in an ultra-wideband or frequency-hopping system with a very large frequency span, its near-field range varies dramatically across the bandwidth. Chances are that the signal components at low frequencies may operate in far-field regions while those at high frequencies with larger Rayleigh distances are propagating in the near-field areas, which also contributes to hybrid-field communications. Consequently, the above factors make hybrid-field communications practical and crucial in future 6G networks. Thus, hybrid-field
|
| 140 |
+
|
| 141 |
+
transmission techniques handling both far-field and near-field signal components deserve in-depth study.
|
| 142 |
+
|
| 143 |
+
4) Spatial Non-Stationarity Effect on Near-Field Communications: Except for near-field propagation, the spatial nonstationarity effect is another fundamental characteristic of ELAA compared to 5G massive MIMO, where different scatterers and users are visible to different portions of the ELAA. This effect leads to the fact that only a part of the ELAA can receive the spherical EM waves radiated by a scatterer or a user. The angular power spectral and average received power rapidly vary over the ELAA. Recently, there have been intensive works dealing with the non-stationarity effect and near-field propagation simultaneously [6]. However, the impart of non-stationarity on other emerging near-field communications has not been well studied, such as RIS-aided systems and hybrid-field communications.
|
| 144 |
+
|
| 145 |
+
# C. Hardware Development
|
| 146 |
+
|
| 147 |
+
To verify the effectiveness of near-field transmission technologies, hardware developments and over-the-air experiments are of great significance. For example, for alleviating the nearfield beam split effect, TTD lines need to be meticulously designed in the THz domain. The hardware developments of WSMS and DAP architectures are worth being carried out to exploit the near-field spatial DoFs. Besides, implementing these techniques still has to overcome several hardware impairment issues, including In-phase/Quadrature imbalance, low-efficiency power amplifier at high frequency, etc. All these challenges should be carefully addressed to enable the implementation of 6G near-field communications.
|
| 148 |
+
|
| 149 |
+
# VI. CONCLUSIONS
|
| 150 |
+
|
| 151 |
+
With the evolution from massive MIMO to ELAA, near-field propagation with spherical wavefront becomes indispensable in 6G networks, where conventional far-field propagation with planar wavefront is not valid anymore. In this article, we revealed that near-field propagation is a double-edged sword, i.e., it brings both challenges and potentials to 6G communications. We first introduced the non-linear phase property of spherical waves and explained the derivation of near-field range in terms of phase discrepancy. Then, we discussed the technical challenges of channel estimation and beam split caused by near-field propagation and presented the recent solutions. In addition, some appealing works that exploit the capability of spherical waves to improve capacity and accessibility were investigated. Several future research directions for near-field communications, such as improvement of Rayleigh distance and hybrid-field transmissions, were also highlighted, which are expected to inspire more innovations on 6G near-field communications.
|
| 152 |
+
|
| 153 |
+
# ACKNOWLEDGEMENT
|
| 154 |
+
|
| 155 |
+
This work was supported in part by the National Key Research and Development Program of China (Grant No.2020YFB1807201), in part by the National Natural Science Foundation of China (Grant No.62031019).
|
| 156 |
+
|
| 157 |
+
# REFERENCES
|
| 158 |
+
|
| 159 |
+
[1] W. Saad, M. Bennis, and M. Chen, "A vision of 6G wireless systems: Applications, trends, technologies, and open research problems," IEEE Network, vol. 34, no. 3, pp. 134-142, May 2020.
|
| 160 |
+
[2] M. Uusitalo, P. Rugeland, M. Boldi, E. C. Strinati, and Y. Zou, "RFocus: Beamforming using thousands of passive antennas," in Proc. 17th USENIX Symposium on Networked Systems Design and Implementation (NSDI '20), Feb. 2020.
|
| 161 |
+
[3] I. F. Akyildiz and J. M. Jornet, “Realizing ultra-massive MIMO (1024×1024) communication in the (0.06–10) Terahertz band,” Nano Communication Networks, vol. 8, pp. 46–54, 2016.
|
| 162 |
+
[4] K. T. Selvan and R. Janaswamy, “Fraunhofer and Fresnel distances: Unified derivation for aperture antennas,” IEEE Antennas Propag. Mag., vol. 59, no. 4, pp. 12–15, Aug. 2017.
|
| 163 |
+
[5] M. Cui and L. Dai, "Channel estimation for extremely large-scale MIMO: Far-field or near-field?" IEEE Trans. Commun., vol. 70, no. 4, pp. 2663-2677, Jan. 2022.
|
| 164 |
+
[6] Y. Han, S. Jin, C. Wen, and X. Ma, "Channel estimation for extremely large-scale massive MIMO systems," IEEE Wireless Commun. Lett., vol. 9, no. 5, pp. 633-637, May 2020.
|
| 165 |
+
[7] A. Singh, M. Andrello, E. Einarsson, N. Thawdarl, and J. M. Jornet, "A hybrid intelligent reflecting surface with graphene-based control elements for THz communications," in Proc. 2020 IEEE 21st International Workshop on Signal Processing Advances in Wireless Communications (SPAWC), May 2020, pp. 1-5.
|
| 166 |
+
[8] N. J. Myers and R. W. Heath, "Infocus: A spatial coding technique to mitigate misfocus in near-field los beamforming," IEEE Trans. Wireless Commun., vol. 21, no. 4, pp. 2193-2209, Apr. 2022.
|
| 167 |
+
[9] M. Cui, L. Dai, R. Schober, and L. Hanzo, “Near-field wideband beamforming for extremely large antenna array,” arXiv preprint arXiv:2109.10054, Sep. 2021.
|
| 168 |
+
[10] D. A. Miller, "Waves, modes, communications, and optics: A tutorial," Adv. in Opt. and Photon., vol. 11, no. 3, pp. 679-825, Sep. 2019.
|
| 169 |
+
[11] N. Decarli and D. Dardari, "Communication modes with large intelligent surfaces in the near field," IEEE Access, vol. 9, pp. 165-648-165-666, Sep. 2021.
|
| 170 |
+
[12] Z. Wu, M. Cui, Z. Zhang, and L. Dai, "Distance-aware precoding for near-field capacity improvement in XL-MIMO," in Proc. 2022 IEEE 95th Vehicular Technology Conference (VTC2022-Spring), 2022, pp. 1-5.
|
| 171 |
+
[13] L. Yan, Y. Chen, C. Han, and J. Yuan, "Joint inter-path and intrapath multiplexing for terahertz widely-spaced multi-subarray hybrid beamforming systems," IEEE Trans. Commun., vol. 70, no. 2, pp. 1391-1406, Feb. 2022.
|
| 172 |
+
[14] H. Zhang, N. Shlezinger, F. Guidi, D. Dardari, M. F. Imani, and Y. C. Eldar, "Beam focusing for near-field multi-user MIMO communications," IEEE Trans. Wireless Commun., 2022.
|
| 173 |
+
[15] K. Dovelos, S. D. Assimonis, H. Quoc Ngo, B. Bellalta, and M. Matthaiou, "Intelligent reflecting surfaces at Terahertz Bands: Channel modeling and analysis," in Proc. 2021 IEEE International Conference on Communications Workshops (ICC Workshops), Jun. 2021, pp. 1-6.
|
| 174 |
+
|
| 175 |
+
# BIOGRAPHIES
|
| 176 |
+
|
| 177 |
+
Mingyao Cui is a M.S. researcher in BNrist from Tsinghua University, Beijing, China.
|
| 178 |
+
|
| 179 |
+
Zidong Wu is a Ph.D. researcher in BNRist from Tsinghua University, Beijing, China.
|
| 180 |
+
|
| 181 |
+
Yu Lu is a Ph.D. researcher in BNRist from Tsinghua University, Beijing, China.
|
| 182 |
+
|
| 183 |
+
Xiuhong Wei is a M.S. researcher in BNRist from Tsinghua University, Beijing, China.
|
| 184 |
+
|
| 185 |
+
Linglong Dai is an associate professor at Tsinghua University. His current research interests include RIS, massive MIMO, mmWave and THz communications, and machine learning for wireless communications. He has received six conference best paper awards and four journal best paper awards.
|
2203.16xxx/2203.16318/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:964119a10c8da52ac0a4e3e8a5b53ab231da5ce3cac687f8aa4e6ae56085e2d8
|
| 3 |
+
size 302645
|
2203.16xxx/2203.16318/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16365/2f6d35a2-62f5-42ff-899d-7af22007695b_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16365/2f6d35a2-62f5-42ff-899d-7af22007695b_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16365/2f6d35a2-62f5-42ff-899d-7af22007695b_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ea064c5c3ed8bd4378d251cd29dbe980d5d855a56f9092526a2e6b5eca4ee015
|
| 3 |
+
size 2165228
|
2203.16xxx/2203.16365/full.md
ADDED
|
@@ -0,0 +1,562 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# RESEARCH
|
| 2 |
+
|
| 3 |
+
# Open Access
|
| 4 |
+
|
| 5 |
+
# IGRF-RFE: a hybrid feature selection method for MLP-based network intrusion detection on UNSW-NB15 dataset
|
| 6 |
+
|
| 7 |
+

|
| 8 |
+
|
| 9 |
+
Yuhua Yin $^{1*}$ , Julian Jang-Jaccard $^{1}$ , Wen Xu $^{1}$ , Amardeep Singh $^{1}$ , Jinting Zhu $^{1}$ , Fariza Sabrina $^{2}$ and Jin Kwak $^{3}$
|
| 10 |
+
|
| 11 |
+
*Correspondence:
|
| 12 |
+
|
| 13 |
+
yuhua.yin.1@uni.massey.ac.nz
|
| 14 |
+
|
| 15 |
+
Comp Sci/Info Tech,
|
| 16 |
+
|
| 17 |
+
Cybersecurity Lab, Massey
|
| 18 |
+
|
| 19 |
+
University, Auckland, New
|
| 20 |
+
|
| 21 |
+
Zealand
|
| 22 |
+
|
| 23 |
+
$^{2}$ School of Engineering
|
| 24 |
+
|
| 25 |
+
and Technology, Central
|
| 26 |
+
|
| 27 |
+
Queensland University, Sydney, Australia
|
| 28 |
+
|
| 29 |
+
<sup>3</sup> Department of Cyber Security,
|
| 30 |
+
|
| 31 |
+
Ajou University, Suwon, Republic
|
| 32 |
+
|
| 33 |
+
of Korea
|
| 34 |
+
|
| 35 |
+
# Abstract
|
| 36 |
+
|
| 37 |
+
The effectiveness of machine learning models can be significantly averse to redundant and irrelevant features present in the large dataset which can cause drastic performance degradation. This paper proposes IGRF-RFE: a hybrid feature selection method tasked for multi-class network anomalies using a multilayer perceptron (MLP) network. IGRF-RFE exploits the qualities of both a filter method for its speed and a wrapper method for its relevance search. In the first phase of our approach, we use a combination of two filter methods, information gain (IG) and random forest (RF) respectively, to reduce the feature subset search space. By combining these two filter methods, the influence of less important features but with the high-frequency values selected by IG is more effectively managed by RF resulting in more relevant features to be included in the feature subset search space. In the second phase of our approach, we use a machine learning-based wrapper method that provides a recursive feature elimination (RFE) to further reduce feature dimensions while taking into account the relevance of similar features. Our experimental results obtained based on the UNSW-NB15 dataset confirmed that our proposed method can improve the accuracy of anomaly detection as it can select more relevant features while reducing the feature space. The results show that the feature is reduced from 42 to 23 while the multi-classification accuracy of MLP is improved from $82.25\%$ to $84.24\%$ .
|
| 38 |
+
|
| 39 |
+
# Introduction
|
| 40 |
+
|
| 41 |
+
The Internet has changed the way people communicate, work, build businesses, and live our daily life dramatically. However, with the increasing number of network connections and network services, network attacks have become a major challenge for human society. According to Norton's annual security report published in 2021, a network attack occurs every $39\mathrm{~s}$ globally [1]. In terms of attack forms, network attacks can be categorized into active attacks and passive attacks [2]. Active attacks can have great impacts on system usability, and the most typical example is a denial of service attack. Passive attacks aim to capture important information in computer systems.
|
| 42 |
+
|
| 43 |
+
To mitigate the risk of different types of attacks, intrusion detection systems have been developed to detect malicious behaviors in the network [3, 4]. An early intrusion
|
| 44 |
+
|
| 45 |
+
detection system was proposed by Denning in 1987, who described a model based on audit records and statistical methods to identify system anomalies [5]. Modern intrusion detection systems can be mainly divided into three categories, which are signature-based, anomaly-based, and hybrid [6]. Signature-based IDS matches different types of attacks against a pre-specified database of signatures. One of its disadvantages is that it cannot effectively detect unknown attacks because of Zero-day attacks and outdated databases. Anomaly-based IDS detects attacks by learning normal and anomalous network behaviors and has better detection capabilities for unknown attacks. However, due to the problems of redundant features and class imbalance in intrusion detection datasets, anomaly-based IDS has been shown to lead to false positives. Hybrid IDS combines signature-based and anomaly-based approaches [7].
|
| 46 |
+
|
| 47 |
+
The network intrusion detection tasks have become more complex in recent years as new network attacks continue to emerge and network data traffic increases. Consequently, machine learning has been widely used in intrusion detection systems because of its ability to learn and identify patterns from complex data through statistical methods and advanced algorithms [8]. Intrusion detection methods based on machine learning can be divided into two categories: supervised learning and unsupervised learning. In supervised learning, machine learning methods such as decision trees and random forests classify network behavior by learning from the labeled data [9]. Unsupervised intrusion detection methods such as K-means and hidden Markov models focus on the clustering problem [10] to group network behaviors [11].
|
| 48 |
+
|
| 49 |
+
Deep learning is a major branch of machine learning that is based on neural networks with at least two hidden layers. Deep learning is better suited at automatically learning and extracting features from large data sets, and has shown promising performance [8, 12-15]. In spite of these advantages, feature engineering still plays an important role in deep learning models when faced with high dimensional structured data [16]. High-dimension, redundant and irrelevant features may make the model overfitting during the learning process and result in a high false positive rate in the real network environment [17]. There has been a wide range of research applying different feature selection methods to assist the Intrusion Detection System (IDS) to improve performance and reduce the false positive rate.
|
| 50 |
+
|
| 51 |
+
A single feature selection method is based on the assumption of importance indicators to eliminate unimportant features. For example, information gain uses the information entropy between features and labels as feature importance indicators, while random forest judges the importance of features based on multiple decision trees. To avoid biased feature importance metrics, using hybrid feature selection methods can combine different metrics to prevent removing important features. Hsu et al. also pointed out that hybrid feature selection approaches would achieve more stable performance than a single feature selection method [18]. The purpose of this paper is to propose a hybrid feature selection method to help improve the multi-classification performance of intrusion detection systems on UNSW-NB15 dataset. We proposed a hybrid feature selection method named IGRF-RFE which combines both filter and wrapper methods that can reduce feature subset search space and eliminates redundant features.
|
| 52 |
+
|
| 53 |
+
The contributions of our work are as follows:
|
| 54 |
+
|
| 55 |
+
- We proposed an MLP-based intrusion detection system using a novel hybrid feature selection method called IGRF-RFE. IGRF-RFE is composed of both filter feature selection and wrapper feature selection methods. In the first step, it applies the ensemble feature selection method based on Information Gain and Random Forest Importance to reduce the dimension of features. This step can reduce the feature subset to a reasonable range while referring to two different feature importance metrics. Then, recursive feature elimination(RFE) as a wrapper method is applied to the reduced feature subset to remove features that negatively impact the actual model performance. An MLP classifier with two hidden layers is used in the RFE and the final classifier.
|
| 56 |
+
- Since there are many duplicate data in UNSW-NB15, feature selection can not be directly applied as this can cause overfitting. We analyzed the intrusion detection dataset and removed duplicated data before ranking the features to avoid ranking bias typically associated with selecting features that can cause overfitting.
|
| 57 |
+
- During data pre-processing, we removed the minority classes for UNSW-NB15. In addition, we employed a resampling technique to ensure a balance between normal and abnormal classes. This can avoid another type of overfitting problem typically associated with the classes with the limited number of samples available for training.
|
| 58 |
+
- The experimental results obtained based on the UNSW-NB15 dataset showed that our proposed model can reduce feature dimension from 42 to 23 while achieving a detection accuracy of $84.24\%$ compared to $82.25\%$ before feature selection.
|
| 59 |
+
|
| 60 |
+
We organized the rest of the paper as follows. In section "Related works", we discussed related works on feature selection methods for intrusion detection systems. In section "Proposed method", we introduced our MLP-based intrusion detection system as well as IGRF-RFE feature selection methods. In section "Experiments and results", we presented our experiment details and results. The conclusion and future works were present in section "Conclusion and future work".
|
| 61 |
+
|
| 62 |
+
# Related works
|
| 63 |
+
|
| 64 |
+
In machine learning, feature selection is an important measure that can help eliminate low-value features, avoid overfitting, reduce detection time and improve model accuracy. Defined by methodologies, feature selection methods can be divided into three categories: filter methods, embedded methods, and wrapper methods [19]. Filter methods can rank features based on some metrics such as statistical measures, information distance, and correlations for example to select the best-ranked features [20]. As filter methods are model-independent, feature importance is consistent and does not require recalculation. Embedded methods obtain feature importance scores from tree-based machine learning algorithms such as random forest, C4.5, and Xgboost. After ranking the features by importance, similar to filter methods, forward feature search or backward feature elimination can be applied to select feature subsets [21]. Wrapper methods evaluate the quality of feature subsets based on their actual performance on machine learning models [22]. Wrapper methods are not model-independent and thus can be based on any models. Wrapper methods perform actual training on the model for each evaluation of a feature subset, as a result, they are more time and computational consuming than
|
| 65 |
+
|
| 66 |
+
filter methods. To reduce selection time, random search algorithms or other methods are typically used together with the wrapper method.
|
| 67 |
+
|
| 68 |
+
Zhou et al. proposed a feature selection method CFS-BA for intrusion detection systems, which was based on correlation feature selection and bat algorithm [23]. The purpose of this method was to find the least relevant feature subset through an optimized random search algorithm. In this study, an ensemble voting classifier based on random forest, C4.5, and Forest PA were used, and experiments were performed on three datasets NSL-KDD, AWID, and CIC-IDS2017. The results showed that CFS-BA could reduce the number of features of these three datasets to 10, 8, and 13, and improved accuracy by $4.5\%$ , $1.3\%$ , and $2.2\%$ in binary classification respectively.
|
| 69 |
+
|
| 70 |
+
The researchers in [24] proposed a filter feature selection method using the Gini index for intrusion detection systems and used the GBDT model as the classifier. In this study, the PSO algorithm was also used to find the optimal hyper-parameters for GBDT. To verify the effectiveness of this model, the author applied the model to the NSL-KDD dataset, and the Gini index method reduced the number of features from 41 to 18. The optimized GBDT classifier could achieve a performance of $86\%$ in accuracy and $3.83\%$ in false positive rate.
|
| 71 |
+
|
| 72 |
+
Kasongo et al. used Xgboost as an ensemble feature selection method for intrusion detection systems in their research and made a performance analysis on the UNSW-NB15 dataset using machine learning models [25]. According to the feature importance ranked by Xgboost, the researchers selected the 19 most important features from the 42 features. The results showed that in the binary classification based on decision Trees, Xgboost feature selection improved the accuracy by $1.9\%$ compared with the baseline performance using all features.
|
| 73 |
+
|
| 74 |
+
Eunice et al. proposed an intrusion detection system using random forests and deep neural networks (DNN) [26]. Their experiments used random forests to select different numbers of features and then used them in different layers of DNN. The experimental results showed that the best binary classification accuracy is $82.1\%$ when 20 features were selected, and the DNN layer was 4. However, their experiments did not consider multi-classification performance under their proposed model.
|
| 75 |
+
|
| 76 |
+
In Prasad et al's work, a multi-level correlation-based feature selection was proposed in the intrusion detection systems on the UNSW-NB15 dataset [27]. In the two-level feature selection approach, Pearson correlation was used to evaluate feature-to-feature and feature-to-label correlations. If a pair of features' correlations were larger than 0.9, the redundant feature with a more significant mean absolute correlation was removed. In addition, feature-to-label correlation metrics were used for importance filtering. The experiment finally selected 15 features for a decision tree model and achieved a multiclassification accuracy of $95.2\%$ . In their work, instead of the pre-prepared $10\%$ training and test sets, they used the full dataset.
|
| 77 |
+
|
| 78 |
+
In the research by Alazzam et al. [28], a feature selection method based on Pigeon Inspired Optimizer (PIO), inspired by the behavior of pigeon groups, was proposed. In the study, the author proposed an improved PIO algorithm based on cosine similarity named Cosine PIO and compared it with Sigmoid PIO. The NSL-KDD, KDDCup99, and UNSW-NB15 datasets were used in the experiments. In binary classification, Cosine PIO performed better than Sigmoid PIO in all three datasets. It selected 5 features in
|
| 79 |
+
|
| 80 |
+
NSL-KDD, 7 features in KDDCup99, 5 features in UNSW-NB15, and achieved an accuracy of $88.3\%$ in NSL-KDD, $96\%$ in KDDCup99, and $91.7\%$ in UNSW-NB15.
|
| 81 |
+
|
| 82 |
+
Zhang et al. used Information Gain and ReliefF feature selection methods in a random forest-based intrusion detection system [29]. They conducted three sets of experiments on the NSL-KDD dataset. The researchers first examined the performance using Information Gain and ReliefF alone and then compared it with their combined method IG-ReliefF. The IG-ReliefF method could first use IG to reduce the dimension of features and then used the ReliefF method to rank the importance, which could effectively reduce the time and computational requirements required for feature selection. The experimental results showed that ReliefF could achieve higher accuracy than the individual IG and ReliefF methods.
|
| 83 |
+
|
| 84 |
+
Megantara et al. implemented a hybrid feature selection method based on Gini importance and recursive feature elimination (RFE) on the NSL-KDD dataset [30]. Gini importance was used as a filter method to rank feature importance, and RFE was used to further optimize the number of features through a decision tree-based wrapper method. Using the decision tree as a classifier, DOS, Probe, R2L, and U2R classes achieved $88.98\%$ , $91.18\%$ , $81.29\%$ , and $99.42\%$ accuracy in performance separately.
|
| 85 |
+
|
| 86 |
+
In [31], Ustebay et al. used a random forest-based recursive feature elimination algorithm in CICIDS2017 which contained 80 features. The experiment evaluated the results of selecting 1 to 80 features using recursive feature elimination. The 4 most important features Source Port, Flow Packet/s, Flow IAT Mean, and Flow IAT Std were identified, and the MLP-based IDS achieved $89\%$ accuracy in performance. Because only a small part of the dataset was used for training in the experiment, the performance was not high, but the small data could reflect the generalizability of the model and feature in the real network environment.
|
| 87 |
+
|
| 88 |
+
Zong et al. implemented IG-TS, a random forest-based two-stage IDS on the UNSW-NB15 dataset [32]. The Information Gain feature selection method was used to reduce features and SMOTE was used to oversample the minority class. The first stage of the model focused on minority class classification, and the second stage focused on majority class classification. Combining the results of the two stages, IG-TS could achieve an accuracy of $85.78\%$ .
|
| 89 |
+
|
| 90 |
+
Kumar et al. proposed an ensemble intrusion detection system based on multiple tree models (C5, CHAID, CART, QUEST) [33]. In the study, the authors used the UNSW-NB15 dataset for training and generated a real-time dataset to evaluate the performance of the model against unknown attacks. Through Information gain feature selection, the author reduced the number of features to 13. Then they used the reduced features to detect five majority classes which were DoS, Probe, Normal, Generic, and Exploit. The model achieved $83.4\%$ accuracy in performance, evaluated in a real network environment.
|
| 91 |
+
|
| 92 |
+
These existing works exhibit a number of limitations. When decision trees or random forests are used alone. These techniques usually assign the same importance to correlated features but should have been a fraction, affecting the model interpretability. A combination of filter methods can address the disadvantage of a single method thus producing better outcomes. Also, the majority of the filter methods used in the existing state-of-the-art are generally univariate which ranks each feature independently of the
|
| 93 |
+
|
| 94 |
+
rest. As a result, they tend to ignore any interaction that occurs between features thus often redundant variables are not eliminated. To address this issue, a wrapper method can be used to supplement the limitations of the univariate nature of the filter method. A wrapper method provides learning-based feature selection after evaluating the pros and cons of the features. When training (for feature selection), a wrapper method has the capability to take into account the relevance of features across the same feature subset space. This capability provides a more enhanced feature selection when the relative relevance across features should be accounted for.
|
| 95 |
+
|
| 96 |
+
# Proposed method
|
| 97 |
+
|
| 98 |
+
In this section, we introduced the overview of our proposed model—shown in Fig. 1. The UNSW-NB15 dataset contains 39 numerical features and 3 categorical features and provides a training set and a test set. Since it cannot be used in the MLP model directly, data pre-processing is applied to encode the dataset. During data preprocessing, we performed techniques including data cleaning, minority removal, oversampling, encoding, and normalization of the dataset. After data preprocessing, we divided the dataset into a training set, a validation set, and a test set. The training set and validation set are used in the feature selection and training process while the test set is used to verify the final performance of the model. Our proposed method has two steps. First, we applied an ensemble feature selection method based on information gain and random forest importance to filter important features. Then we performed recursive feature elimination on the reduced features to further optimize the feature subset. After feature selection, we used the obtained optimal feature subset to train the MLP model. The final performance on the test set provided the effectiveness of our proposed model.
|
| 99 |
+
|
| 100 |
+
# Ensemble feature selection with information gain and Random Forest Importance
|
| 101 |
+
|
| 102 |
+
# Information gain
|
| 103 |
+
|
| 104 |
+
Information gain (IG) is a univariate filter feature selection method based on information entropy [34]. Entropy is a concept in information theory proposed by Shannon [35] and is often used to measure the uncertainty of a variant. When dealing with
|
| 105 |
+
|
| 106 |
+

|
| 107 |
+
Fig.1 Our proposed model
|
| 108 |
+
|
| 109 |
+
high-dimensional datasets, there may exist features that are highly skewed or contain little information, which affects machine learning performance. In classification problems, IG feature selection takes the amount of information as the importance metric by calculating the information entropy of each feature. As defined in Eq. 1, the information gain of a feature is equal to the entropy of the class label minus the conditional entropy of the class label under the feature. The formulas for class feature entropy and conditional class entropy are defined in Eqs. 2 and 3. After calculating the information gain for each feature, they can be ranked and selected according to the importance metric.
|
| 110 |
+
|
| 111 |
+
$$
|
| 112 |
+
I G (Y, X) = H (Y) - H (Y | X) \tag {1}
|
| 113 |
+
$$
|
| 114 |
+
|
| 115 |
+
In Eq. 7, $Y$ represents the class vector and $X$ represents a feature vector.
|
| 116 |
+
|
| 117 |
+
$$
|
| 118 |
+
H (Y) = - \sum_ {i = 1} ^ {n} p \left(y _ {i}\right) \log_ {2} p \left(y _ {i}\right) \tag {2}
|
| 119 |
+
$$
|
| 120 |
+
|
| 121 |
+
In Eq. 8, $n$ is the number of classes in vector $Y$ and $p(y_{\mathrm{i}})$ represents the probability of class $y_{\mathrm{i}}$ in class vector $Y$ .
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
H (Y | X) = - \sum_ {i = 1} ^ {m} p \left(x _ {i}\right) H (Y | X = x _ {i}) \tag {3}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
In Eq. 9, $m$ is the number of values contained in the feature vector $X$ and $p(x_{\mathrm{i}})$ represents the probability of value $x_{\mathrm{i}}$ in the feature vector $X$ .
|
| 128 |
+
|
| 129 |
+
# Random forest feature importance
|
| 130 |
+
|
| 131 |
+
Random forest is a machine learning method based on multiple decision trees, which is often used for many regression and classification tasks [36]. Different from decision trees, random forests add randomness to multiple decision trees to avoid overfitting and have better generalizability. When a random forest is used as a classifier (see Fig. 2), it first determines how many trees to build, and then uses the bootstrap sampling technique to randomly select a subset of the data for each decision tree. Another part of the randomness comes from the features used by each decision tree, and random feature subsets also lead to better generalizability and robustness. After training, the random forest classifier can generate the prediction with a higher probability by a voting method based on the prediction of each tree.
|
| 132 |
+
|
| 133 |
+
The random forest can also be used as an embedded feature selection method. The model can produce the importance score for each feature, which can be used to select the most important features and remove features that are not important for performance. The feature importance of random forest mainly depends on the node impurity property in decision trees. When generating a node in a decision tree, a feature's position and priority are determined based on the Gini index or entropy in a node. The lower Gini index or entropy represents less impurity and higher importance. The feature importance of random forests calculates the impurity of each feature in each tree and can get an average importance score (see Algorithm 1).
|
| 134 |
+
|
| 135 |
+

|
| 136 |
+
Fig.2 Random forest classifier
|
| 137 |
+
|
| 138 |
+
```txt
|
| 139 |
+
Algorithm 1: Random Forest Feature Importance
|
| 140 |
+
T: trees in random forest $\{t_1, t_2, \ldots, t_m\}$
|
| 141 |
+
F: features in dataset $\{f_1, f_2, \ldots, f_n\}$
|
| 142 |
+
for i from 1 to n do
|
| 143 |
+
for tree $t \in T$ do
|
| 144 |
+
N: nodes using feature $f_i$ in tree $t$ $\{n_1, n_2, \ldots, n_p\}$
|
| 145 |
+
for node $n \in N$ do
|
| 146 |
+
compute impurity decrease at $n$ as a score s.
|
| 147 |
+
weight the score s by number of samples.
|
| 148 |
+
add up the score s to score S.
|
| 149 |
+
end
|
| 150 |
+
end
|
| 151 |
+
end
|
| 152 |
+
/* get importance for feature $f_i$ */
|
| 153 |
+
$f_i\_importance =$ average score S over all trees $t$ using feature $f_i$ .
|
| 154 |
+
end
|
| 155 |
+
```
|
| 156 |
+
|
| 157 |
+
# Ensemble feature selection
|
| 158 |
+
|
| 159 |
+
Ensemble feature selection with information gain (IG) and random forest (RF) importance is the first step of our feature selection method. In this step, the ensemble feature selection method is only applied to 39 numerical features, and 3 categorical features are preserved to avoid loss of important information. As seen in Fig. 3, it first pre-processes the input training set to remove duplicate data. Duplicate data may reduce the generalizability of the result because selected features may overfit classes or instances with more repetitions. Subsequently, it calculates the importance of each feature using information gain and random forest respectively. The importance scores are normalized to a value between 0 and 1. By ranking and visualizing the importance scores, thresholds are selected to differentiate obviously unimportant features and other features [37]. A
|
| 160 |
+
|
| 161 |
+

|
| 162 |
+
Fig. 3 Workflow of ensemble feature selection with IG and RF
|
| 163 |
+
|
| 164 |
+
feature is retained if its importance is larger than the threshold, while it is removed if its importance is lower than the threshold. We assume that significant features could exist in both reduced feature subsets selected based on IG and RF metrics, so their union set is used for further feature optimization.
|
| 165 |
+
|
| 166 |
+
# Recursive feature elimination
|
| 167 |
+
|
| 168 |
+
Recursive feature elimination(RFE) is the second step in our feature selection method. RFE is a wrapper feature selection method, which can evaluate the importance of features iteratively based on machine learning performance by recursively eliminating each feature [38]. RFE removes the least important features in each iteration until the best performance is obtained or a specified number of features is reached. In our RFE algorithm (see Algorithm 2), the input training set and validation set contain only the reduced numeric features from the first stage and all categorical features. Other inputs to the algorithm include a positive integer patient $p$ and a list init_features containing selected features in the first stage. Patient $p$ is introduced to stop RFE in time if better performance cannot be obtained in several iterations, while init_features can be used to reduce the search space of RFE without starting from all features. Before recursive feature elimination, variables must be initialized. $f_{-}len$ represents the number of initial features, which determines the iterations of RFE in the worst case. best_performance is used to record the best performance during RFE, keep_features stores the features selected after each iteration of the RFE, selected_features stores the feature subset for best performance, and rm_list stores removed numeric features during RFE. In the process of recursive feature elimination, an empty dictionary performance_dict will be initialized at the start of each iteration to store the validation performance with MLP after
|
| 169 |
+
|
| 170 |
+
eliminating each feature. In the evaluate elimination function, the score is calculated by averaging the accuracy of 10 different experiments each of which is set with a different random seed. Subsequently, patient $p$ determines whether to continue RFE. If patient $p$ is larger than 0, one iteration of RFE is performed, and the local best performance of the iteration is obtained. After comparing the local best performance and global best performance, global best performance and selected features are updated.
|
| 171 |
+
|
| 172 |
+
Algorithm 2: Pseudocode of RFE with MLP
|
| 173 |
+
```txt
|
| 174 |
+
Input: Training set $T$ ,Validation set $V$
|
| 175 |
+
patient $p$ /\* monitor training performance \*/ init_features $= [f_{1},f_{2},f_{3},\dots ,f_{n}]$ /\* a list of original features \*/
|
| 176 |
+
Output:
|
| 177 |
+
selected_features /\* a list of selected features \*/
|
| 178 |
+
begin /\* Recursive Feature Elimination \*/ for init_features do if $p > 0$ then for keep_features do /\* take a feature from the list of features to be evaluated \*/ temp_rm.append(keep_features[j]) /\* evaluate the feature performance with MLP \*/ score $\equiv$ evaluate eliminatio(T,V,temp_rm) /\* store the score of each elimination \*/ performance_dict[keep_features[j]] $\equiv$ score end /\* get the feature name of best performance during elimination \*/ max_key $\equiv$ get_max_key(performance_dict) rm_list.append(max_key) keep_features.remove(max_key) /\* check if the performance of elimination is better than best performance \*/ if performance_dict[max_key] $\rightharpoondown$ best_performance then best_performance $\equiv$ performance_dict[max_key] selected_features $\equiv$ keep_features increase patient counter else reduce patient counter end else return selected_features end
|
| 179 |
+
```
|
| 180 |
+
|
| 181 |
+
# MLP classifier
|
| 182 |
+
|
| 183 |
+
# Multilayer perceptron (MLP)
|
| 184 |
+
|
| 185 |
+
MLP is a feed-forward artificial neural network with multiple hidden layers [39] (see Fig. 4). For classification problems, the amount of neurons in the output layer of MLP is equal to the number of classes to be classified while the number of neurons in the input layer is associated with the number of features. The layers between the input and output layers are often fully connected layers and are trained by backpropagation. When performing forward propagation, the network calculates the output of each layer based on
|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
Fig.4 Basic MLP model
|
| 189 |
+
|
| 190 |
+
an activation function from the previous layer as well as corresponding weight and bias values, as shown in Eq. 4.
|
| 191 |
+
|
| 192 |
+
$$
|
| 193 |
+
Z ^ {[ l ]} = W ^ {[ l ]} A ^ {[ l - 1 ]} + b ^ {[ l ]} \tag {4}
|
| 194 |
+
$$
|
| 195 |
+
|
| 196 |
+
where $Z^{[l]}$ represents the output matrix, $W^{[l]}$ is weight matrix and $b^{[l]}$ represents bias vector.
|
| 197 |
+
|
| 198 |
+
Since the output of an MLP could be any value, an activation function is used to normalize the output. The activation function can transform the output of each layer to a certain range as shown in Eq. 5.
|
| 199 |
+
|
| 200 |
+
$$
|
| 201 |
+
A ^ {[ l ]} = g \left(Z ^ {[ l ]}\right) \tag {5}
|
| 202 |
+
$$
|
| 203 |
+
|
| 204 |
+
where $\mathrm{A}^{[I]}$ represents the activated output matrix.
|
| 205 |
+
|
| 206 |
+
In our proposed method, we used Relu as the activation function for the hidden layer and Softmax as the activation function for the final output layer. Relu, as defined in Eq. 6, is an activation function that only transforms values less than zero to 0. The Softmax activation function, as defined in Eq. 7, is usually used for multi-classification, which can improve the defects of the sigmoid function for multi-classification, and ensure that the probability sum of the output layer is equal to 1. It can help determine the most probable prediction.
|
| 207 |
+
|
| 208 |
+
$$
|
| 209 |
+
a _ {\text {r e l u}} = \max (0, z) \tag {6}
|
| 210 |
+
$$
|
| 211 |
+
|
| 212 |
+
$$
|
| 213 |
+
a _ {\text {s o f t}} = \frac {e ^ {z _ {i}}}{\sum_ {j = 1} ^ {J} e ^ {z _ {j}}} \tag {7}
|
| 214 |
+
$$
|
| 215 |
+
|
| 216 |
+
where $J$ is the class number, $z_{i}$ represents the ith output value
|
| 217 |
+
|
| 218 |
+
The loss function as defined in Eq. 8 is used to calculate the error between the predicted value and the actual value, and then use back-propagation to adjust the weights w and bias b.
|
| 219 |
+
|
| 220 |
+
$$
|
| 221 |
+
L (y, \hat {y}) = \frac {1}{m} \sum_ {i = 1} ^ {m} \left(y _ {i} - \hat {y} _ {i}\right) ^ {2} \tag {8}
|
| 222 |
+
$$
|
| 223 |
+
|
| 224 |
+
where $m$ is the number of samples, $\hat{y}$ is the predicted value, and $y$ is the exact value.
|
| 225 |
+
|
| 226 |
+
# Batch normalization
|
| 227 |
+
|
| 228 |
+
For deep learning models, it is important to avoid overfitting. In a deep neural network, if the layers are too deep, it is possible to have gradient vanishing or gradient explosion problems, which may affect the performance of the model and may cause overfitting. Batch normalization, as defined in Eq. 9, is a method proposed by Loffe and Szegedy [40] to solve the gradient explosion or gradient vanishing. After each hidden layer, batch normalization normalizes the correspondent output values to avoid values that are too large or too small. It first takes the difference between each output and the vector's mean value and then divides it by a standard deviation. In this study, batch normalization is added after each hidden layer of our MLP model to avoid overfitting.
|
| 229 |
+
|
| 230 |
+
$$
|
| 231 |
+
X _ {\mathrm {i}} = \frac {X _ {\mathrm {i}} - M e a n _ {\mathrm {i}}}{S t d D e v _ {\mathrm {i}}} \tag {9}
|
| 232 |
+
$$
|
| 233 |
+
|
| 234 |
+
where $\mathrm{X_i}$ is the ith hidden layer's output matrix, $\mathrm{Mean_i}$ is the mean value of $\mathrm{X_i}$ , and $\mathrm{StdDev_i}$ is the standard deviation of $\mathrm{X_i}$ .
|
| 235 |
+
|
| 236 |
+
# Classification
|
| 237 |
+
|
| 238 |
+
In this study, we implemented the MLP as a classifier with two hidden layers using the Relu activation function, each of which contains 128 neurons (see Fig. 5). After each hidden layer, batch normalization is added as a means of regularization. The selected features and pre-processed data are fed into the neural network through the input layer, the model is trained through forward and backward propagation and the output layer produces the probability of each class using the Softmax activation function. In the prediction stage, after producing a class probability vector, the argmax function, as defined in Eq. 10, finds the largest number among them and returns its index.
|
| 239 |
+
|
| 240 |
+
$$
|
| 241 |
+
\operatorname {r e s u l t} = \operatorname {a r g m a x} (\text {p r o b a b i l i t y} _ {\text {v e c t o r}}) \tag {10}
|
| 242 |
+
$$
|
| 243 |
+
|
| 244 |
+
Our model was trained with Adam's optimization algorithm, which adaptively adjusts the learning rate based on recent gradients for the weight. Also, our model used the learning rate $= 0.0003$ , the batch size $= 64$ , and the epochs $= 300$ . To avoid overfitting, we apply the early-stopping technique, which can stop training in time when overfitting is observed, and restore the best model parameters. We set the parameter of early-stopping $= 30$ . If the loss of the validation set does not decrease for more than 30 consecutive epochs, it is determined that the model has been overfitted thus the training stops and any changes are rolled back.
|
| 245 |
+
|
| 246 |
+
# Computational complexity
|
| 247 |
+
|
| 248 |
+
By analyzing the computational complexity of the single feature selection algorithm and our proposed hybrid feature selection, it can be found that the worst-case computational complexity of the two feature selection algorithms used in the first step,
|
| 249 |
+
|
| 250 |
+

|
| 251 |
+
Fig. 5 Our Specified MLP Classifier
|
| 252 |
+
|
| 253 |
+
Table 1 Hardware and environment specification
|
| 254 |
+
|
| 255 |
+
<table><tr><td>Unit</td><td>Description</td></tr><tr><td>Processor</td><td>AMD Ryzen 7 2700</td></tr><tr><td>RAM</td><td>16 GB</td></tr><tr><td>GPU</td><td>AMD RX580</td></tr><tr><td>Operating System</td><td>Ubuntu 20.04.4 LTS</td></tr><tr><td>Packages</td><td>Tensorflow 2.4.1, Sklearn 1.0.2, Numpy, Pandas and matplotlib</td></tr></table>
|
| 256 |
+
|
| 257 |
+
IG and RF, is $O(n)$ (see Eqs. 11 and 12). The worst-case computational complexity of our proposed IGRF-RFE is the same as that of RFE, which is $O(n^2)$ (see Eqs. 13 and 14).
|
| 258 |
+
|
| 259 |
+
$$
|
| 260 |
+
T _ {I G} (n) = C _ {1} * n = O (n) \tag {11}
|
| 261 |
+
$$
|
| 262 |
+
|
| 263 |
+
$$
|
| 264 |
+
T _ {R F} (n) = C _ {2} * n = O (n) \tag {12}
|
| 265 |
+
$$
|
| 266 |
+
|
| 267 |
+
$$
|
| 268 |
+
T _ {R F E} (n) = C _ {3} * n * (n - 1) / 2 = O \left(n ^ {2}\right) \tag {13}
|
| 269 |
+
$$
|
| 270 |
+
|
| 271 |
+
$$
|
| 272 |
+
T _ {I G R F - R F E} (n) = C _ {1} * n + C _ {2} * n + C _ {4} * n (n - 1) / 2 = O \left(n ^ {2}\right) \tag {14}
|
| 273 |
+
$$
|
| 274 |
+
|
| 275 |
+
where $n$ represents the number of features, $C_1, C_2, C_3, C_4$ represent constant numbers
|
| 276 |
+
|
| 277 |
+
# Experiments and results
|
| 278 |
+
|
| 279 |
+
# Hardware and environment setting
|
| 280 |
+
|
| 281 |
+
Our experiments were conducted on a desktop running with Ubuntu 20.04.4 LTS operating system. The hardware used on the desktop consists of 16GB RAM, AMD Ryzen 7 2700 processor, and an AMD RX580 graphics card. Our experimental environment was based on python 3.8 and the MLP model was created on TensorFlow 2.4.1. Scikit-Learn, Numpy, pandas, matplotlib, and other packages provide data processing, feature selection, and visualization functions for our experiments. Specific hardware and environmental information are presented in Table 1.
|
| 282 |
+
|
| 283 |
+
# The UNSW-NB15 dataset
|
| 284 |
+
|
| 285 |
+
For intrusion detection systems based on machine learning methods, datasets play a vital role in the effectiveness against unknown attacks, test performance, and generalizability. The IDS datasets are required to contain a sufficient number of different types of attacks and reflect real-world attack scenarios. A well-known IDS dataset is KDDCup99, which has been widely used in many previous studies. KDDCup99 is a dataset created from MIT Lincoln Laboratory's simulated experiments on cyberattacks to help build machine learning classifiers for intrusion detection [41]. NSL-KDD was created as a cleaned version of KDDCup99, removing the duplicate data in KDD99, and rebuilding the training and testing data [42]. However, these two datasets met with criticism that they do not meet the network security requirements of today due to the lack of modern attack types, the imbalanced distribution of training and test sets, and the lack of support for some common network protocols [43].
|
| 286 |
+
|
| 287 |
+
For the shortcomings of KDDCup/NSL-KDD, Moustafa and Slay [43] created a more complex intrusion detection dataset named UNSW-NB15 to reflect better modern attacks and protocols. UNSW-NB15 is a dataset extracted from 100 GB of normal and modern attack traffic by researchers at the Australian Centre for Cyber Security(ACCS) using the IXIA tool. The complete UNSW-NB15 dataset contains 2.5 million records of data, covering one normal class and nine attack classes which are Analysis, Backdoor, DoS, Exploits, Fuzzers, Generic, Reconnaissance, Shellcode, and Worms. The original data consists of 49 features which can be divided into six groups: flow features, basic features, content features, time features, additional generated features, and labeled features.
|
| 288 |
+
|
| 289 |
+
The creator of the dataset also provided a $10\%$ partitioned dataset, split into a training set (175,341 records) and a test set (82,332 records) (see Table 2). The statistical distributions of the training and test set samples have been verified to be highly correlated, which means the partitioning is reliable for the machine learning model [44]. There are some minority classes: Analysis, Backdoor, Shellcode, and Worms, whose proportion is less than $2\%$ . In the $10\%$ dataset, a few meaningless features were removed, and the number of features was reduced to 42, including 38 numerical features and 3 categorical features (see Table 3). In this research, we used the $10\%$ dataset for classification.
|
| 290 |
+
|
| 291 |
+
Table 2 Records of ${10}\%$ UNSW-NB15 dataset
|
| 292 |
+
|
| 293 |
+
<table><tr><td>Class</td><td>Training dataset</td><td>Test dataset</td></tr><tr><td>Normal</td><td>56000</td><td>37000</td></tr><tr><td>Generic</td><td>40000</td><td>18871</td></tr><tr><td>Exploits</td><td>33393</td><td>11132</td></tr><tr><td>Fuzzers</td><td>18184</td><td>6062</td></tr><tr><td>DoS</td><td>12264</td><td>4089</td></tr><tr><td>Reconnaissance</td><td>10491</td><td>3496</td></tr><tr><td>Analysis</td><td>2000</td><td>667</td></tr><tr><td>Backdoor</td><td>1746</td><td>583</td></tr><tr><td>Shellcode</td><td>1133</td><td>378</td></tr><tr><td>Worms</td><td>130</td><td>44</td></tr><tr><td>Total</td><td>175341</td><td>82332</td></tr></table>
|
| 294 |
+
|
| 295 |
+
Table 3 UNSW-NB15 feature data types
|
| 296 |
+
|
| 297 |
+
<table><tr><td>No.</td><td>Feature</td><td>Dtype</td><td>No</td><td>Feature</td><td>Dtype</td></tr><tr><td>0</td><td>dur</td><td>float64</td><td>22</td><td>dwin</td><td>int64</td></tr><tr><td>1</td><td>proto</td><td>object</td><td>23</td><td>tcprtt</td><td>float64</td></tr><tr><td>2</td><td>service</td><td>object</td><td>24</td><td>synack</td><td>float64</td></tr><tr><td>3</td><td>state</td><td>object</td><td>25</td><td>ackdat</td><td>float64</td></tr><tr><td>4</td><td>spkts</td><td>int64</td><td>26</td><td>smean</td><td>int64</td></tr><tr><td>5</td><td>dpkts</td><td>int64</td><td>27</td><td>dmean</td><td>int64</td></tr><tr><td>6</td><td>sbytes</td><td>int64</td><td>28</td><td>trans_depth</td><td>int64</td></tr><tr><td>7</td><td>dbytes</td><td>int64</td><td>29</td><td>response_body_len</td><td>int64</td></tr><tr><td>8</td><td>rate</td><td>float64</td><td>30</td><td>ct_srv_src</td><td>int64</td></tr><tr><td>9</td><td>sttl</td><td>int64</td><td>31</td><td>ct_state_ttl</td><td>int64</td></tr><tr><td>10</td><td>dTtl</td><td>int64</td><td>32</td><td>ct.dst_ltm</td><td>int64</td></tr><tr><td>11</td><td>sload</td><td>float64</td><td>33</td><td>ct_src_dport_ltm</td><td>int64</td></tr><tr><td>12</td><td>dload</td><td>float64</td><td>34</td><td>ct.dst_sport_ltm</td><td>int64</td></tr><tr><td>13</td><td>sloss</td><td>int64</td><td>35</td><td>ct.dst_src_ltm</td><td>int64</td></tr><tr><td>14</td><td>dloss</td><td>int64</td><td>36</td><td>isftp_login</td><td>int64</td></tr><tr><td>15</td><td>sinpkt</td><td>float64</td><td>37</td><td>ctftp_cmd</td><td>int64</td></tr><tr><td>16</td><td>dinpkt</td><td>float64</td><td>38</td><td>ct_flw_http_mthd</td><td>int64</td></tr><tr><td>17</td><td>sjit</td><td>float64</td><td>39</td><td>ct_src_ltm</td><td>int64</td></tr><tr><td>18</td><td>djit</td><td>float64</td><td>40</td><td>ct_srvdst</td><td>int64</td></tr><tr><td>19</td><td>swin</td><td>int64</td><td>41</td><td>is_sm_jps_port</td><td>int64</td></tr><tr><td>20</td><td>stcpb</td><td>int64</td><td>42</td><td>attack_cat</td><td>object</td></tr><tr><td>21</td><td>dtcpb</td><td>int64</td><td>43</td><td>label</td><td>int64</td></tr></table>
|
| 298 |
+
|
| 299 |
+
# Data pre-processing
|
| 300 |
+
|
| 301 |
+
In this section, we discussed the procedure and methods we use for the data preprocessing process.
|
| 302 |
+
|
| 303 |
+
# Cleaning
|
| 304 |
+
|
| 305 |
+
In the training and test sets provided by UNSW-NB15, there are 44 original features. 42 of them are meaningful features and 2 features are the class labels of the attack. 'attack_cat' is a multi-class label and 'label' is a binary-class label. As our MLP model is designed to perform multi-classification for intrusion detection, 'label' was removed. In addition, we also cleaned 44 rows with null values in the dataset.
|
| 306 |
+
|
| 307 |
+
# Minority removal
|
| 308 |
+
|
| 309 |
+
Extremely imbalanced datasets can have a negative impact on machine learning performance. Since the imbalanced dataset is not the focus of this study, we removed 4 minority classes: 'Analysis', 'Backdoor', 'Shellcode', 'Worms', which accounted for only $1.141\%$ , $0.996\%$ , $0.646\%$ , and $0.074\%$ of the training set.
|
| 310 |
+
|
| 311 |
+
# Oversampling
|
| 312 |
+
|
| 313 |
+
We observed that the proportions of data samples for different classes were the same in the given training set and test set while the normal class has the largest difference between them. The normal class accounts for only $32.9\%$ in the training set, while
|
| 314 |
+
|
| 315 |
+

|
| 316 |
+
Fig. 6 Class proportion before and after oversampling
|
| 317 |
+
|
| 318 |
+
45.9% instances are the normal class in the test set (see Fig. 6). Dataset imbalance could cause a serious performance issue that affects the training process of MLP models. In UNSW-N15, the proportion of a normal class in the training set is much less than the proportion of a normal class in the test set, which may lead to an overfitting issue towards abnormal classes. To address this issue, we adopted an oversampling technique by double sampling the normal class so that the proportion of the normal class reaches $49.5\%$ .
|
| 319 |
+
|
| 320 |
+
# One-hot encoding
|
| 321 |
+
|
| 322 |
+
There are three categorical features in the dataset: 'service', 'proto', and 'state', which contain 13, 9133 nominal values respectively. These features were transformed using one-hot encoding, making each nominal value a binary feature.
|
| 323 |
+
|
| 324 |
+
# Normalization
|
| 325 |
+
|
| 326 |
+
Normalization can unify the value range of each feature and eliminate the bias during MLP model training caused by different value scales. We used MinMax Normalization to convert the range of feature values between 0 and 1 [45]. As defined in Eq. 15, the new value is calculated by the difference between the min value divided by the scale size.
|
| 327 |
+
|
| 328 |
+
$$
|
| 329 |
+
x _ {i} ^ {\prime} = \frac {x _ {i} - \operatorname* {m i n} \left(x _ {i}\right)}{\operatorname* {m a x} \left(x _ {i}\right) - \operatorname* {m i n} \left(x _ {i}\right)} \tag {15}
|
| 330 |
+
$$
|
| 331 |
+
|
| 332 |
+
where $x_{\mathrm{i}}$ represents the $i$ th feature vector, $\min(x_{\mathrm{i}})$ returns the minimum value of the vector and $\max(x_{\mathrm{i}})$ returns the maximum value of the vector.
|
| 333 |
+
|
| 334 |
+
# Training, validation, and test set preparation
|
| 335 |
+
|
| 336 |
+
In Fig. 7, we applied PCA to the original training and test sets provided by UNSW-NB15, reduced them to three dimensions, and visualized their distribution. The distribution of different classes can be seen in the visualization of PCA latent space, which increases the interpretability of the data. Although PCA visualization cannot represent all dimensions of the data, it can be found that in three-dimensional space, there is a lot of overlap between different types of attack and normal classes. On the other hand, from the 3-dimensional visualization of the training set and the test set, it can be seen that the spatial distribution of the training set and the test set in some areas is not the same.
|
| 337 |
+
|
| 338 |
+
Machine learning usually divides data sets to use for different purposes. The training set is used to fit the model, the validation set is used to estimate the loss in training, and the test set is used to verify the performance of the model [46]. These three sets suppose to contain separate data samples to avoid biased performance caused by data leakage. The UNSW-NB15 dataset does not provide a separate validation set, as such most previous studies holdout a validation set from the training set. However, in the PCA visualization (see Fig. 7), we observed that the original training set and test set have different distributions in 3-dimensional space, so the model fitted based on the training set may not reflect the performance of the test set. In this case, the model may overfit the special distribution of the training set and cannot generalize well. The validation set usually needs to be the same distribution as the test set to correctly estimate the training loss of the model [47]. As a result, in our study, we split the original test set to construct a new validation set and test set (see Table 4). The new validation set and test set have the same distribution to help the model avoid overfitting. In Table 4, the training, validation, and test set do not overlap, and the ratio of the three datasets is 68:16:16.
|
| 339 |
+
|
| 340 |
+

|
| 341 |
+
PCA visualization of original UNSW-NB15 training set
|
| 342 |
+
Fig. 7 The PCA visualization of original training and test set
|
| 343 |
+
|
| 344 |
+

|
| 345 |
+
PCA visualization of original UNSW-NB15 test set
|
| 346 |
+
|
| 347 |
+
Table 4 Records of training, validation and test set to be used in our model
|
| 348 |
+
|
| 349 |
+
<table><tr><td>Class</td><td>Training set</td><td>Validation set</td><td>Test set</td></tr><tr><td>Normal</td><td>56000</td><td>18500</td><td>18500</td></tr><tr><td>Generic</td><td>40000</td><td>9436</td><td>9435</td></tr><tr><td>Exploits</td><td>33393</td><td>5566</td><td>5566</td></tr><tr><td>Fuzzers</td><td>18184</td><td>3031</td><td>3031</td></tr><tr><td>DoS</td><td>12264</td><td>2044</td><td>2045</td></tr><tr><td>Reconnaissance</td><td>10491</td><td>1748</td><td>1748</td></tr><tr><td>Total</td><td>170332</td><td>40325</td><td>40325</td></tr></table>
|
| 350 |
+
|
| 351 |
+
Table 5 Simplified confusion matrix
|
| 352 |
+
|
| 353 |
+
<table><tr><td rowspan="2" colspan="2">Labeled samples</td><td colspan="2">Predicted cass</td></tr><tr><td>Positive</td><td>Negative</td></tr><tr><td rowspan="2">Actual Class</td><td>Positive</td><td>TP</td><td>FN</td></tr><tr><td>Negative</td><td>FP</td><td>TN</td></tr></table>
|
| 354 |
+
|
| 355 |
+
# Evaluation metrics
|
| 356 |
+
|
| 357 |
+
As our work is a multi-classification task, we used accuracy, recall, precision, false positive rate (FPR), f1 score, and AUC-ROC curve as our performance metrics. Table 5 presents a simplified confusion matrix that differentiates the classification results. Based on the one versus all principle, there are generally four cases in machine learning classification tasks, where:
|
| 358 |
+
|
| 359 |
+
True Positive (TP): represents correctly classified positive samples
|
| 360 |
+
False Negative (FN): represents incorrectly classified positive samples
|
| 361 |
+
False Positive (FP): represents incorrectly classified negative samples
|
| 362 |
+
True Negative (TN): represents correctly classified negative samples
|
| 363 |
+
|
| 364 |
+
Accuracy as defined in Eq. 16 calculates the ratio of correctly classified samples to all samples.
|
| 365 |
+
|
| 366 |
+
$$
|
| 367 |
+
A c c u r a c y = \frac {T P + T N}{T P + T N + F P + F N} \tag {16}
|
| 368 |
+
$$
|
| 369 |
+
|
| 370 |
+
Recall as defined in Eq. 17 calculates the ratio of correctly classified positive samples to all samples that were supposed to be positive.
|
| 371 |
+
|
| 372 |
+
$$
|
| 373 |
+
\operatorname {R e c a l l} (\text {T r u e P o s i t i v e R a t e}) = \frac {T P}{T P + F N} \tag {17}
|
| 374 |
+
$$
|
| 375 |
+
|
| 376 |
+
Precision, as defined in Eq. 18, calculates the ratio of actually classified positive samples to all samples that are predicted to be positive.
|
| 377 |
+
|
| 378 |
+
$$
|
| 379 |
+
P r e c i s i o n = \frac {T P}{T P + F P} \tag {18}
|
| 380 |
+
$$
|
| 381 |
+
|
| 382 |
+
False positive rate (FPR) as defined in Eq. 19 calculates the ratio of incorrectly classified positive samples to all samples that were supposed to be negative.
|
| 383 |
+
|
| 384 |
+
$$
|
| 385 |
+
F P R = \frac {F P}{T N + F P} \tag {19}
|
| 386 |
+
$$
|
| 387 |
+
|
| 388 |
+
F1 score as defined in Eq. 20 calculates the harmony mean of recall and precision. It can be used as a performance metric to solve the defects of recall and precision when faced with multi-class imbalanced data.
|
| 389 |
+
|
| 390 |
+
$$
|
| 391 |
+
F 1 = 2 \times \left(\frac {\text {P r e c i s i o n} \times \text {R e c a l l}}{\text {P r e c i s i o n} + \text {R e c a l l}}\right) \tag {20}
|
| 392 |
+
$$
|
| 393 |
+
|
| 394 |
+
The Receiver operating characteristic (ROC) curve shows the FPR and TPR of the model prediction at different thresholds. The area under the ROC curve (AUC) as defined in Eq. 21 calculates the area under the ROC, and it can be used to judge the performance of the model.
|
| 395 |
+
|
| 396 |
+
$$
|
| 397 |
+
A U C _ {R O C} = \int_ {0} ^ {1} \frac {T P}{T P + F N} d \frac {F P}{T N + F P} \tag {21}
|
| 398 |
+
$$
|
| 399 |
+
|
| 400 |
+
# Results
|
| 401 |
+
|
| 402 |
+
Before the ensemble feature selection with IG and RF, we removed the duplicate samples in the training set to avoid overfitting features. Then, we applied the information gain
|
| 403 |
+
|
| 404 |
+

|
| 405 |
+
Fig. 8 Numeric feature importance ranking by information gain
|
| 406 |
+
|
| 407 |
+

|
| 408 |
+
Fig. 9 Numeric feature importance ranking by random forest
|
| 409 |
+
|
| 410 |
+

|
| 411 |
+
Fig. 10 Numeric feature union set of IG and RF feature selection
|
| 412 |
+
|
| 413 |
+
and a random forest classifier with 1000 trees on the training set to obtain the importance ranking of 39 numeric features (see Figs. 8 and 9). There are some low importance features in IG ranking and RF importance ranking, which may degrade the performance of the model. We choose 0.25 and 0.02 as the thresholds for two feature selection methods respectively to filter important features. Therefore, in IG ranking, features with an importance score greater than 0.25 were retained while features with an importance score greater than 0.02 were retained in RF importance ranking. After removing unimportant features from these two metrics respectively, two feature subsets were obtained.
|
| 414 |
+
|
| 415 |
+
22 features were retained by IG feature selection and 19 features were retrained by RF importance feature selection. To take their union set in Fig. 10, it can be seen that they have 17 features in common and their union set have 24 numeric features which can be used as a part of the reduced feature subset for further feature reduction in the second step.
|
| 416 |
+
|
| 417 |
+
We applied the 24 numerical features obtained in the IGRF ensemble step and 3 categorical features to the wrapper-based RFE feature selection method. 10 random seeds from 2022 to 2031 were chosen to average the score in the evaluate eliminatio function for each elimination. In the experiment, the patient parameter in our proposed model was set to 5, which means that the RFE process is stopped if the performance does not improve in the cumulative five iterations. After applying our hybrid feature selection method on UNSW-NB15, 23 important features were finally selected including 20 numerical features and 3 categorical features (see Table 6). In Fig. 11, the confusion matrix of multi-classification is displayed, where the horizontal axis is the predicted label and the vertical axis is the true label. It can be seen that there are some misclassifications among different classes. DoS, Fuzzer, and Reconnaissance were often misclassified as Exploits. Approximately $88.31\%$ of DoS samples, $27.78\%$ of Fuzzer samples, and $18.82\%$ of Reconnaissance samples were misclassified as Exploits, which might have been the reason for their poor performances. In addition, 889 samples of the Fuzzer class were misclassified as normal while 966 normal samples were misclassified as Fuzzer class.
|
| 418 |
+
|
| 419 |
+
In Table 7, the performance of our IDS model is presented. In multi-classification, the MLP model based on the IGRF-RFE feature selection method has an accuracy of $84.24\%$ . Since the UNSW-NB15 dataset has multiple imbalanced classes, the f1 score is a better measure to use for the performance of each class. The Generic attack has the highest f1 score of $98.20\%$ , followed by the normal class with $93.11\%$ . DoS and Fuzzer attacks have lower f1 scores of $11.09\%$ and $42.26\%$ respectively, which may be the insufficient samples in the UNSW-NB15 dataset. DoS class only occupies $5.4\%$ of the training set and Fuzzers only accounts for $8.0\%$ , which may make the MLP model can not fit them well when training. Although the Exploits and Reconnaissance classes do not have as many samples as Generic and Normal, they have f1 scores of $72.55\%$ and $78.83\%$ . Moreover,
|
| 420 |
+
|
| 421 |
+
Table 6 Selected features by IGRF-RFE
|
| 422 |
+
|
| 423 |
+
<table><tr><td>No.</td><td>Feature</td><td>Dtype</td><td>No.</td><td>Feature</td><td>Dtype</td></tr><tr><td>0</td><td>dur</td><td>float64</td><td>14</td><td>dloss</td><td>float64</td></tr><tr><td>1</td><td>proto</td><td>object</td><td>15</td><td>sinpkt</td><td>float64</td></tr><tr><td>2</td><td>service</td><td>object</td><td>16</td><td>dinpkt</td><td>float64</td></tr><tr><td>3</td><td>state</td><td>object</td><td>18</td><td>djit</td><td>float64</td></tr><tr><td>4</td><td>spkts</td><td>float64</td><td>23</td><td>tcprtt</td><td>float64</td></tr><tr><td>5</td><td>dpkts</td><td>float64</td><td>24</td><td>synack</td><td>float64</td></tr><tr><td>6</td><td>sbytes</td><td>float64</td><td>25</td><td>ackdat</td><td>float64</td></tr><tr><td>7</td><td>dbytes</td><td>float64</td><td>26</td><td>smean</td><td>float64</td></tr><tr><td>8</td><td>rate</td><td>float64</td><td>27</td><td>dmean</td><td>float64</td></tr><tr><td>9</td><td>sttl</td><td>float64</td><td>31</td><td>ct_state ttl</td><td>float64</td></tr><tr><td>10</td><td>dttl</td><td>float64</td><td>35</td><td>ct.dst_src_ltm</td><td>float64</td></tr><tr><td>12</td><td>dload</td><td>float64</td><td></td><td></td><td></td></tr></table>
|
| 424 |
+
|
| 425 |
+

|
| 426 |
+
Fig. 11 Confusion matrix of our results
|
| 427 |
+
|
| 428 |
+
Table 7 Evaluation metrics of multi-classification of UNSW-NB15
|
| 429 |
+
|
| 430 |
+
<table><tr><td></td><td>Precision</td><td>Recall</td><td>F1 Score</td><td>FPR</td><td>Accuracy</td></tr><tr><td>DoS</td><td>0.3612</td><td>0.0655</td><td>0.1109</td><td>0.0062</td><td>84.24%</td></tr><tr><td>Expl.</td><td>0.5980</td><td>0.9222</td><td>0.7255</td><td>0.0993</td><td></td></tr><tr><td>Fuzz.</td><td>0.4930</td><td>0.3698</td><td>0.4226</td><td>0.0309</td><td></td></tr><tr><td>Gene.</td><td>0.9982</td><td>0.9662</td><td>0.9820</td><td>0.0005</td><td></td></tr><tr><td>Norm.</td><td>0.9388</td><td>0.9236</td><td>0.9311</td><td>0.0510</td><td></td></tr><tr><td>Recon.</td><td>0.7807</td><td>0.7883</td><td>0.7845</td><td>0.0100</td><td></td></tr><tr><td>Weighted Avg.</td><td>0.8360</td><td>0.8424</td><td>0.8285</td><td>0.0403</td><td></td></tr></table>
|
| 431 |
+
|
| 432 |
+
a lower false positive rate (FPR) also reflects one aspect of the model performance. Our model has a weighted FPR of 0.0403, which means that only about $4\%$ of negative samples are misclassified to be positive samples. The Generic attack class has the lowest FPR of 0.0005, while the Exploits has the worst FPR of 0.993. Also, although DoS has the lowest f1 score, it has the second-lowest FPR of 0.0062.
|
| 433 |
+
|
| 434 |
+
In Fig. 12, we applied the one vs all methodology to generate the Receiver Operating Characteristic (ROC) curve for each class, which can help understand the quality of the predicted probability. Generic and normal classes have the higher AUCs (area under the ROC curve) of 1 and 0.99 respectively. However, DoS and Fuzzer classes have the lower AUCs of only 0.95 and 0.89. Overall, the multi-class ROC curve reflects a good performance of our model's detection capability.
|
| 435 |
+
|
| 436 |
+

|
| 437 |
+
Fig. 12 ROC (receiver operating characteristic) curve of multi-classification
|
| 438 |
+
|
| 439 |
+
Table 8 Performance of different feature subsets
|
| 440 |
+
|
| 441 |
+
<table><tr><td>Subsets</td><td>Num.</td><td>Precision (%)</td><td>Recall (%)</td><td>F1 score (%)</td><td>Accuracy (%)</td></tr><tr><td>All Features</td><td>42</td><td>80.37</td><td>82.25</td><td>80.22</td><td>82.25</td></tr><tr><td>IG</td><td>25</td><td>82.24</td><td>83.13</td><td>81.49</td><td>83.13</td></tr><tr><td>RF</td><td>22</td><td>82.43</td><td>83.42</td><td>81.62</td><td>83.42</td></tr><tr><td>IG &RF Uni.</td><td>27</td><td>83.30</td><td>80.60</td><td>80.36</td><td>80.60</td></tr><tr><td>IG &RF Inter.</td><td>20</td><td>81.84</td><td>82.90</td><td>81.67</td><td>82.90</td></tr><tr><td>IGRF-RFE</td><td>23</td><td>83.60</td><td>84.24</td><td>82.85</td><td>84.24</td></tr></table>
|
| 442 |
+
|
| 443 |
+
# Comparison
|
| 444 |
+
|
| 445 |
+
From Table 8, we compared our results with the performance of different feature selection methods and feature subsets used in our experiments. It can be seen that the feature subset selected by our IGRF-RFE method performs better than other feature subsets in the Table. Our feature selection method improves nearly $2\%$ in accuracy and $2.6\%$ in weighted f1 score over using all features. Furthermore, we evaluate the performance of feature subsets using standalone IG and RF and their union and intersection sets. Using IG and RF's feature subsets as well as their intersection improves the performance of the model, but they are still lower than our methods in every evaluation metric.
|
| 446 |
+
|
| 447 |
+
We also compared the performance of our proposed method with other similar previous works (see Table 9). Among similar works using MLP models, our MLP model with the IGRF-RFE feature selection method achieves better performance in both f1 score and accuracy. Our hybrid feature selection method obtains 23 important features and outperforms the standalone IG method or some other tree-based feature selection methods. It is worth mentioning that although our performance is lower than the $95.2\%$ accuracy achieved by Prasad et al's work [27], it is because different studies use varying
|
| 448 |
+
|
| 449 |
+
Table 9 Comparison results of other methods on UNSW-NB15 dataset
|
| 450 |
+
|
| 451 |
+
<table><tr><td>Work</td><td>Classifier</td><td>FS Method</td><td>No. of
|
| 452 |
+
features</td><td>F1 score(%)</td><td>Accuracy(%)</td></tr><tr><td>Kasongo and Sun [25]</td><td>ANN</td><td>XGBoost</td><td>19</td><td>77.28</td><td>77.51</td></tr><tr><td>Roy and Singh [48]</td><td>MLP</td><td>IG</td><td>20</td><td>-</td><td>84.1</td></tr><tr><td>Kasongo and Sun [49]</td><td>FFDNN</td><td>WFEU</td><td>22</td><td>-</td><td>77.16</td></tr><tr><td>Eunice et al. [26]</td><td>DNN</td><td>DT</td><td>20</td><td>-</td><td>82.1</td></tr><tr><td>Moustafa and Slay [44]</td><td>ANN</td><td>-</td><td>42</td><td>-</td><td>81.34</td></tr><tr><td>Proposed method</td><td>MLP</td><td>IGRF-RFE</td><td>23</td><td>82.85</td><td>84.24</td></tr></table>
|
| 453 |
+
|
| 454 |
+
amounts of data for UNSW-NB15. Our study used the $10\%$ pre-partitioned dataset from UNSW-NB15's author which is validated by statistical distributions, and our results are still competitive among similar methods.
|
| 455 |
+
|
| 456 |
+
# Conclusion and future work
|
| 457 |
+
|
| 458 |
+
In this paper, we proposed a hybrid feature selection method IGRF-RFE for MLP-based intrusion detection systems and applied it to a modern IDS dataset UNSW-NB15. IGRF-RFE consists of two feature reduction steps including IGRF ensemble feature selection and recursive feature elimination with MLP. In IGRF ensemble feature reduction, 24 important numerical features were obtained according to the importance ranking of numerical features ranked by Information Gain IG) and Random Forest (RF) methods. After feeding 24 numerical features and 3 categorical features into the wrapper-based RFE algorithm, we obtained an optimal feature subset with 20 numerical features and 3 categorical features for our MLP model. Our hybrid feature selection approach has a worst-case computational complexity of $O(n^{2})$ , which is equivalent to that of the normal RFE algorithm. By introducing patient parameter $p$ , our algorithm can stop earlier to save computational resources. The experimental results showed that our feature selection method could achieve an accuracy of $84.24\%$ and a weighted f1 score of $82.85\%$ , which was better than standalone IG and RF feature selection methods as well as other similar previous work.
|
| 459 |
+
|
| 460 |
+
The results also show that using the proposed IGRF-RFE feature selection method can effectively select important features and improve the performance of intrusion detection systems. At the same time, the method can also be applied to the feature selection of other structured datasets. In the future, we plan to apply our proposed feature selection method to different intrusion detection datasets with advanced re-sampling techniques as well as with other machine learning models [50].
|
| 461 |
+
|
| 462 |
+
# Acknowledgements
|
| 463 |
+
|
| 464 |
+
Not applicable.
|
| 465 |
+
|
| 466 |
+
# Author contributions
|
| 467 |
+
|
| 468 |
+
Conceptualization, YY and JJ-J; methodology, YY and JJ-J; software, YY; formal analysis, YY; writing—original draft preparation, YY and JJ-J; writing—review and editing, YY, JJ-J, WX, AS, JZ, FS and JK; funding acquisition, JJ-J; project administration, JJ-J. All authors have read and agreed to the published version of the manuscript. All authors read and approved the final manuscript.
|
| 469 |
+
|
| 470 |
+
# Funding
|
| 471 |
+
|
| 472 |
+
This work is supported by the Cyber Security Research Programme-Artificial Intelligence for Automating Response to Threats from the Ministry of Business, Innovation, and Employment (MBIE) of New Zealand as a part of the Catalyst Strategy Funds under the Grant Number MAUX1912.
|
| 473 |
+
|
| 474 |
+
# Availability of data and materials
|
| 475 |
+
|
| 476 |
+
The dataset used in this research is available on https://www.unsw.adfa.edu.au/unsw-canberra-cyber/cybersecurity/ADFA-NB15-Datasets/.
|
| 477 |
+
|
| 478 |
+
# Declarations
|
| 479 |
+
|
| 480 |
+
# Ethics approval and consent to participate
|
| 481 |
+
|
| 482 |
+
Not applicable.
|
| 483 |
+
|
| 484 |
+
# Consent for publication
|
| 485 |
+
|
| 486 |
+
The authors agree to publish this paper.
|
| 487 |
+
|
| 488 |
+
# Competing interests
|
| 489 |
+
|
| 490 |
+
The authors declare that they have no competing interests.
|
| 491 |
+
|
| 492 |
+
Received: 23 June 2022 Accepted: 21 January 2023
|
| 493 |
+
|
| 494 |
+
Published online: 05 February 2023
|
| 495 |
+
|
| 496 |
+
# References
|
| 497 |
+
|
| 498 |
+
1. Stouffer C. "115 cybersecurity statistics and trends you need to know in 2021," 2021, accessed 2022-02-22. https://us.norton.com/internetsecurity-emerging-threats-cyberthreat-trends-cybersecurity-threat-review.html
|
| 499 |
+
2. Lazarevic A, Kumar V, Srivastava J. Intrusion detection: a survey. In Managing cyber threats. Springer, pp. 19-78 2005.
|
| 500 |
+
3. Latha S, Prakash SJ. A survey on network attacks and intrusion detection systems. In 2017 4th International Conference on Advanced Computing and Communication Systems (ICACCS). IEEE, pp. 1-7 2017.
|
| 501 |
+
4. Jang-Jaccard J, Nepal S. A survey of emerging threats in cybersecurity. J Comput Syst Sci. 2014;80(5):973-93.
|
| 502 |
+
5. Denning DE. An intrusion-detection model. IEEE Trans Softw Eng. 1987;2:222-32.
|
| 503 |
+
6. Singh R, Kumar H, Singla RK, Ketti RR. Internet attacks and intrusion detection system: a review of the literature. Online Information Review, 2017.
|
| 504 |
+
7. Elshoush HT, Osman IM. Alert correlation in collaborative intelligent intrusion detection systems-a survey. Appl Soft Comput. 2011;11(7):4349-65.
|
| 505 |
+
8. Drewek-Ossowicka A, Pietrolaj M, Rumiński J. A survey of neural networks usage for intrusion detection systems. J Ambient Intell Humaniz Comput. 2021;12(1):497-514.
|
| 506 |
+
9. Zebari R, Abdulazeez A, Zeebaree D, Zebari D, Saeed J. A comprehensive review of dimensionality reduction techniques for feature selection and feature extraction. J Appl Sci Technol Trends. 2020;1(2):56-70.
|
| 507 |
+
10. Zhu J, Jang-Jaccard J, Liu T, Zhou J. Joint spectral clustering based on optimal graph and feature selection. Neural Process Lett. 2021;53(1):257-73.
|
| 508 |
+
11. Dua M, et al. Machine learning approach to ids: a comprehensive review. In: 3rd International conference on Electronics, Communication and Aerospace Technology (ICECA). IEEE. 2019;2019:117-21.
|
| 509 |
+
12. Zhu J, Jang-Jaccard J, Singh A, Welch I, Harith A-S, Camtepe S. A few-shot meta-learning based siamese neural network using entropy features for ransomware classification.Comput Secur.2022;117:102691.
|
| 510 |
+
13. Alavizadeh H, Alavizadeh H, Jang-Jaccard J. Deep q-learning based reinforcement learning approach for network intrusion detection. Computers. 2022;11(3):41.
|
| 511 |
+
14. Liu T, Sabrina F, Jang-Jaccard J, Xu W, Wei Y. Artificial intelligence-enabled ddos detection for blockchain-based smart transport systems. Sensors. 2021;22(1):32.
|
| 512 |
+
15. Wei Y, Jang-Jaccard J, Sabrina F, Singh A, Xu W, Camtepe S. Ae-mlp: a hybrid deep learning approach for ddos detection and classification. IEEE Access. 2021;9:146 810-146 821.
|
| 513 |
+
16. Haq AU, Zeb A, Lei Z, Zhang D. Forecasting daily stock trend using multi-filter feature selection and deep learning. Expert Syst Appl. 2021;168: 114444.
|
| 514 |
+
17. Dong G, Liu H. Feature engineering for machine learning and data analytics. Boca Raton: CRC Press; 2018.
|
| 515 |
+
18. Hsu H-H, Hsieh C-W, Lu M-D. Hybrid feature selection by combining filters and wrappers. Expert Syst Appl. 2011;38(7):8144-50.
|
| 516 |
+
19. Jovic A, Brkić K, Bogunović N. A review of feature selection methods with applications. In: 38th international convention on information and communication technology, electronics and microelectronics (MIPRO). IEEE. 2015;2015:1200-5.
|
| 517 |
+
20. Sánchez-Marono N, Alonso-Betanzos A, Tombilla-Sanromán M. Filter methods for feature selection-a comparative study. In International Conference on Intelligent Data Engineering and Automated Learning. Springer, pp. 178-187. 2007.
|
| 518 |
+
21. Liu H, Zhou M, Liu Q. An embedded feature selection method for imbalanced data classification. IEEE/CAA Journal of Autom Sinica. 2019;6(3):703-15.
|
| 519 |
+
22. El Aboudi N, Benhlima L. Review on wrapper feature selection approaches. In 2016 International Conference on Engineering & MIS (ICEMIS). IEEE, pp. 1-5. 2016.
|
| 520 |
+
23. Zhou Y, Cheng G, Jiang S, Dai M. Building an efficient intrusion detection system based on feature selection and ensemble classifier. Computer networks. 2020;174: 107247.
|
| 521 |
+
24. Li L, Yu Y, Bai S, Cheng J, Chen X. Towards effective network intrusion detection: a hybrid model integrating gini index and gbt with PSO. J Sensors. 2018;2018.
|
| 522 |
+
25. Kasongo SM, Sun Y. Performance analysis of intrusion detection systems using a feature selection method on the unsw-nb15 dataset. J Big Data. 2020;7(1):1-20.
|
| 523 |
+
|
| 524 |
+
26. Eunice AD, Gao Q, Zhu M-Y, Chen Z, Na L. Network anomaly detection technology based on deep learning. In 2021 IEEE 3rd International Conference on Frontiers Technology of Information and Computer (ICFTIC). IEEE, pp. 6-9, 2021.
|
| 525 |
+
27. Prasad M, Gupta RK, Tripathi S. A multi-level correlation-based feature selection for intrusion detection. Arab J Sci Eng. 2022;1-11.
|
| 526 |
+
28. Alazzam H, Sharieh A, Sabri KE. A feature selection algorithm for intrusion detection system based on pigeon inspired optimizer. Expert Syst Appl. 2020;148: 113249.
|
| 527 |
+
29. Zhang Y, Ren X, Zhang J. Intrusion detection method based on information gain and relief feature selection. In 2019 International Joint Conference on Neural Networks (IJCNN). IEEE, pp. 1-5, 2019.
|
| 528 |
+
30. Megantara AA, Ahmad T. Feature importance ranking for increasing performance of intrusion detection system. In: 2020 3rd International Conference on Computer and Informatics Engineering (IC2IE). IEEE, pp. 37-42, 2020.
|
| 529 |
+
31. Ustebay S, Turgut Z, Aydin MA. Intrusion detection system with recursive feature elimination by using random forest and deep learning classifier. In: international congress on big data, deep learning and fighting cyber terrorism (IBIGDELFT). IEEE. 2018;2018:71-6.
|
| 530 |
+
32. Zong W, Chow Y-W, Susilo W. A two-stage classifier approach for network intrusion detection. In: International Conference on Information Security Practice and Experience. Springer, pp. 329-340, 2018.
|
| 531 |
+
33. Kumar V, Sinha D, Das AK, Pandey SC, Goswami RT. An integrated rule based intrusion detection system: analysis on unsw-nb15 data set and the real time online dataset. Clust Comput. 2020;23(2):1397-418.
|
| 532 |
+
34. Dhal P, Azad C. A comprehensive survey on feature selection in the various fields of machine learning. Appl Intell. 2021;1-39.
|
| 533 |
+
35. Li J, Cheng K, Wang S, Morstatter F, Trevino RP, Tang J, Liu H. Feature selection: a data perspective. ACM Comput Surv. 2017;50(6):1-45.
|
| 534 |
+
36. Biau G, Scornet E. A random forest guided tour. Test. 2016;25(2):197-227.
|
| 535 |
+
37. Stiawan D, Idris MYB, Bamhdi AM, Budiarto R, et al. Cicids-2017 dataset feature analysis with information gain for anomaly detection. IEEE Access. 2020;8:132 911-132 921.
|
| 536 |
+
38. Kuhn M, Johnson K, et al. Applied predictive modeling, vol. 26. Cham: Springer; 2013.
|
| 537 |
+
39. Taud H, Mas J. Multilayer perceptron (mlp), In: Geomatic approaches for modeling land change scenarios. Springer, pp. 451-455, 2018.
|
| 538 |
+
40. Ioffe S, Szegedy C. Batch normalization: accelerating deep network training by reducing internal covariate shift. In: International conference on machine learning. PMLR, pp. 448-456, 2015.
|
| 539 |
+
41. KDDCup1999. 2007. http://kdd.ics.uci.edu/databases/kddcup99/KDDCUP99.htm
|
| 540 |
+
42. Tavallae M, Bagheri E, Lu W, Ghorbani AA. A detailed analysis of the kdd cup 99 data set. In: IEEE symposium on computational intelligence for security and defense applications. Ieee. 2009;2009:1-6.
|
| 541 |
+
43. Moustafa N, Slay J. Unsw-nb15: a comprehensive data set for network intrusion detection systems (unsw-nb15 network data set). In: military communications and information systems conference (MilCIS). IEEE. 2015;2015:1-6.
|
| 542 |
+
44. Moustafa N, Slay J. The evaluation of network anomaly detection systems: statistical analysis of the unsw-nb15 data set and the comparison with the kdd99 data set. Inf Secur J. 2016;25(1-3):18-31.
|
| 543 |
+
45. Patro S, Sahu KK. Normalization: a preprocessing stage. arXiv preprintarXiv:1503.06462, 2015.
|
| 544 |
+
46. Russell S, Norvig P. Artificial intelligence: a modern approach, vol. 7458. 3rd ed. Upper Saddle River: Pearson Education; 2010.
|
| 545 |
+
47. Kuhn M, Johnson K, et al. Applied predictive modeling, vol. 26. Berlin: Springer; 2013.
|
| 546 |
+
48. Roy A, Singh KJ. Multi-classification of unsw-nb15 dataset for network anomaly detection system. In Proceedings of International Conference on Communication and Computational Technologies. Springer, pp. 429-451, 2021.
|
| 547 |
+
49. Kasongo SM, Sun Y. A deep learning method with wrapper based feature extraction for wireless intrusion detection system. Comput Secur. 2020;92: 101752.
|
| 548 |
+
50. Feng S, Liu Q, Patel A, Bazai SU, Jin C-K, Kim JS, Sarrafzadeh M, Azzollini D, Yeoh J, Kim E et al. Automated pneumothorax triaging in chest X-rays in the new zealand population using deep-learning algorithms. J Med Imaging Radiat Oncol. 2022;66(8):1035-43.
|
| 549 |
+
|
| 550 |
+
# Publisher's Note
|
| 551 |
+
|
| 552 |
+
Springer Nature remains neutral with regard to jurisdictional claims in published maps and institutional affiliations.
|
| 553 |
+
|
| 554 |
+
# Submit your manuscript to a SpringerOpen journal and benefit from:
|
| 555 |
+
|
| 556 |
+
$\triangleright$ Convenient online submission
|
| 557 |
+
Rigorous peer review
|
| 558 |
+
$\triangleright$ Open access: articles freely available online
|
| 559 |
+
High visibility within the field
|
| 560 |
+
Retaining the copyright to your article
|
| 561 |
+
|
| 562 |
+
Submit your next manuscript at $\triangleright$ springeropen.com
|
2203.16xxx/2203.16365/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9670f0a6ef91532e1322475df7a3b706f5114ab6856a6284e9344d6143aee402
|
| 3 |
+
size 832519
|
2203.16xxx/2203.16365/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16369/b6c3126c-a1ee-4c00-afaf-ac1a4f247180_content_list.json
ADDED
|
@@ -0,0 +1,1757 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Incorporating Dynamic Semantics into Pre-Trained Language Model for Aspect-based Sentiment Analysis",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
124,
|
| 8 |
+
79,
|
| 9 |
+
872,
|
| 10 |
+
118
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Kai Zhang $^{1}$ , Kun Zhang $^{2}$ , Mengdi Zhang $^{3}$ , Hongke Zhao $^{4}$ , Qi Liu $^{1,*}$ , Wei Wu $^{3}$ , Enhong Chen $^{1}$",
|
| 17 |
+
"bbox": [
|
| 18 |
+
211,
|
| 19 |
+
124,
|
| 20 |
+
794,
|
| 21 |
+
158
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "<sup>1</sup> School of Data Science, University of Science and Technology of China",
|
| 28 |
+
"bbox": [
|
| 29 |
+
203,
|
| 30 |
+
159,
|
| 31 |
+
800,
|
| 32 |
+
175
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "$^{2}$ School of Computer Science and Information Engineering, Hefei University of Technology",
|
| 39 |
+
"bbox": [
|
| 40 |
+
126,
|
| 41 |
+
175,
|
| 42 |
+
875,
|
| 43 |
+
192
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "$^{3}$ Meituan; $^{4}$ College of Management and Economics, Tianjin University kkzhang0808@mail.ustc.edu.cn; {qiliuql, cheneh}@ustc.edu.cn {zhang1028kun, wuwei19850318, mdz} @ gmail.com; hongke@tju.edu.cn",
|
| 50 |
+
"bbox": [
|
| 51 |
+
142,
|
| 52 |
+
192,
|
| 53 |
+
862,
|
| 54 |
+
241
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "Abstract",
|
| 61 |
+
"text_level": 1,
|
| 62 |
+
"bbox": [
|
| 63 |
+
260,
|
| 64 |
+
252,
|
| 65 |
+
339,
|
| 66 |
+
266
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "Aspect-based sentiment analysis (ABSA) predicts sentiment polarity towards a specific aspect in the given sentence. While pre-trained language models such as BERT have achieved great success, incorporating dynamic semantic changes into ABSA remains challenging. To this end, in this paper, we propose to address this problem by Dynamic Re-weighting BERT (DR-BERT), a novel method designed to learn dynamic aspect-oriented semantics for ABSA. Specifically, we first take the Stack-BERT layers as a primary encoder to grasp the overall semantic of the sentence and then fine-tune it by incorporating a lightweight Dynamic Re-weighting Adapter (DRA). Note that the DRA can pay close attention to a small region of the sentences at each step and re-weigh the vitally important words for better aspect-aware sentiment understanding. Finally, experimental results on three benchmark datasets demonstrate the effectiveness and the rationality of our proposed model and provide good interpretable insights for future semantic modeling.",
|
| 73 |
+
"bbox": [
|
| 74 |
+
141,
|
| 75 |
+
282,
|
| 76 |
+
460,
|
| 77 |
+
609
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "text",
|
| 83 |
+
"text": "1 Introduction",
|
| 84 |
+
"text_level": 1,
|
| 85 |
+
"bbox": [
|
| 86 |
+
114,
|
| 87 |
+
625,
|
| 88 |
+
258,
|
| 89 |
+
640
|
| 90 |
+
],
|
| 91 |
+
"page_idx": 0
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"type": "text",
|
| 95 |
+
"text": "Aspect-based sentiment analysis is a branch of sentiment analysis, which aims to identify sentiment polarity of the specific aspect in a sentence (Jiang et al., 2011). For example, given a sentence \"The restaurant has attentive service, but the food is terrible,\" the task aims to predict the sentiment polarities towards \"service\" and \"food\", which should be positive and negative respectively.",
|
| 96 |
+
"bbox": [
|
| 97 |
+
112,
|
| 98 |
+
653,
|
| 99 |
+
487,
|
| 100 |
+
780
|
| 101 |
+
],
|
| 102 |
+
"page_idx": 0
|
| 103 |
+
},
|
| 104 |
+
{
|
| 105 |
+
"type": "text",
|
| 106 |
+
"text": "As a fundamental technology, the ABSA task has broad applications, such as recommender system (Chin et al., 2018; Zhang et al., 2021b) and question answering (Wang et al., 2019). Therefore, a great amount of research has been attracted from both academia and industry. Among them, deep neural networks (DNN) (Nguyen and Shirai, 2015;",
|
| 107 |
+
"bbox": [
|
| 108 |
+
112,
|
| 109 |
+
782,
|
| 110 |
+
487,
|
| 111 |
+
894
|
| 112 |
+
],
|
| 113 |
+
"page_idx": 0
|
| 114 |
+
},
|
| 115 |
+
{
|
| 116 |
+
"type": "text",
|
| 117 |
+
"text": "Tang et al., 2015, 2016; Zheng et al., 2020), attention mechanism (Wang et al., 2016; Ma et al., 2017) and graph neural/attention networks (Huang and Carley, 2019; Zhang et al., 2019a; Wang et al., 2020) have significantly improved the performance through deep feature alignment between the aspect representations and context representations.",
|
| 118 |
+
"bbox": [
|
| 119 |
+
507,
|
| 120 |
+
252,
|
| 121 |
+
884,
|
| 122 |
+
365
|
| 123 |
+
],
|
| 124 |
+
"page_idx": 0
|
| 125 |
+
},
|
| 126 |
+
{
|
| 127 |
+
"type": "text",
|
| 128 |
+
"text": "Recently, the large-scaled pre-trained language models, such as Bidirectional Encoder Representations from Transformers (BERT) (Devlin et al., 2019), realize a breakthrough for improving many language tasks, which further attracts considerable attention to enhance the semantic representations. In ABSA, Xu et al. (2019a) designed BERT-PT, which explores a novel post-training approach on the BERT model. Song et al. (2019) further proposed a text pair classification model BERT-SPC, which prepares the input sequence by appending the aspects into the contextual sentence. Although great success has been achieved by the above studies, some critical problems remain when directly applying attention mechanisms or fine-tuning the pre-trained BERT in the task of ABSA.",
|
| 129 |
+
"bbox": [
|
| 130 |
+
507,
|
| 131 |
+
368,
|
| 132 |
+
884,
|
| 133 |
+
625
|
| 134 |
+
],
|
| 135 |
+
"page_idx": 0
|
| 136 |
+
},
|
| 137 |
+
{
|
| 138 |
+
"type": "text",
|
| 139 |
+
"text": "Specifically, most of the existing approaches select all the important words from a contextual sentence at one time. However, according to neuroscience studies, the essential words during semantic comprehension are dynamically changing with the reading process and should be repeatedly considered (Kuperberg, 2007; Tononi, 2008; Brouwer et al., 2021). For example, when judging the sentiment polarity of the aspect \"system memory\" in a review sentence \"It could be a perfect laptop if it would have faster system memory and its radeon would have DDR5 instead of DDR3\", the important words should change from general sentiment words {\"faster\", \"perfect\", \"laptop\"} into aspect-aware words {\"would have\", \"faster\", \"could\", \"be\", \"perfect)}. Through these dynamic changes, the sentiment polarity will change from positive to the ground truth sentiment label negative.",
|
| 140 |
+
"bbox": [
|
| 141 |
+
505,
|
| 142 |
+
629,
|
| 143 |
+
885,
|
| 144 |
+
919
|
| 145 |
+
],
|
| 146 |
+
"page_idx": 0
|
| 147 |
+
},
|
| 148 |
+
{
|
| 149 |
+
"type": "aside_text",
|
| 150 |
+
"text": "arXiv:2203.16369v2 [cs.CL] 23 Nov 2022",
|
| 151 |
+
"bbox": [
|
| 152 |
+
21,
|
| 153 |
+
304,
|
| 154 |
+
60,
|
| 155 |
+
724
|
| 156 |
+
],
|
| 157 |
+
"page_idx": 0
|
| 158 |
+
},
|
| 159 |
+
{
|
| 160 |
+
"type": "page_footnote",
|
| 161 |
+
"text": "* Corresponding author.",
|
| 162 |
+
"bbox": [
|
| 163 |
+
147,
|
| 164 |
+
904,
|
| 165 |
+
297,
|
| 166 |
+
917
|
| 167 |
+
],
|
| 168 |
+
"page_idx": 0
|
| 169 |
+
},
|
| 170 |
+
{
|
| 171 |
+
"type": "text",
|
| 172 |
+
"text": "Meanwhile, simply initializing the encoder with a pre-trained BERT does not effectively boost the performance in ABSA as we expected (Huang and Carley, 2019; Xu et al., 2019a; Wang et al., 2020). One possible reason could be that training on two specific tasks, i.e., Next Sentence Prediction and Masked LM, with rich resources leads to better semantic of the overall sentences. However, the ABSA task is conditional, which means the model needs to understand the regional semantics of sentences by fully considering the given aspect. For instance, BERT tends to understand the global sentiment of the above sentence \"It could be a perfect laptop ... of DDR3\" regardless of which aspect is given. But in ABSA, the sentence is more likely to be different sentiment meanings for different aspects (e.g., negative for \"system memory\" while positive for \"DDR5\"). Therefore, the vanilla BERT is hardly to pay closer attention to relevant information for the specific aspect, especially when there are multiple aspects in one sentence.",
|
| 173 |
+
"bbox": [
|
| 174 |
+
115,
|
| 175 |
+
85,
|
| 176 |
+
485,
|
| 177 |
+
420
|
| 178 |
+
],
|
| 179 |
+
"page_idx": 1
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "text",
|
| 183 |
+
"text": "To equip the pre-trained models with the ability to capture the aspect-aware dynamic semantics, we present a Dynamic Re-weighting BERT (DR-BERT) model, which considers the aspect-aware dynamic semantics in a pre-trained learning framework. Specifically, we first take the Stack-BERT layers as primary sentence encoder to learn overall semantics of the whole sentences. Then, we devise a Dynamic Re-weighting Adapter (DRA), which aims to pay most careful attention to a small region of the contextual sentence and dynamically select and re-weight one critical word at each step for better aspect-aware sentiment understanding. Finally, to overcome the limitation of vanilla BERT mentioned above, we incorporate the light-weighted DRA into each BERT encoder layer and fine-tune it to adapt to the ABSA task. We conduct extensive experiments on three widely-used datasets where the results demonstrate the effectiveness, rationality and interpretability of the proposed model.",
|
| 184 |
+
"bbox": [
|
| 185 |
+
115,
|
| 186 |
+
423,
|
| 187 |
+
485,
|
| 188 |
+
744
|
| 189 |
+
],
|
| 190 |
+
"page_idx": 1
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "text",
|
| 194 |
+
"text": "2 Related Work",
|
| 195 |
+
"text_level": 1,
|
| 196 |
+
"bbox": [
|
| 197 |
+
115,
|
| 198 |
+
760,
|
| 199 |
+
267,
|
| 200 |
+
774
|
| 201 |
+
],
|
| 202 |
+
"page_idx": 1
|
| 203 |
+
},
|
| 204 |
+
{
|
| 205 |
+
"type": "text",
|
| 206 |
+
"text": "2.1 Aspect-based Sentiment Analysis",
|
| 207 |
+
"text_level": 1,
|
| 208 |
+
"bbox": [
|
| 209 |
+
115,
|
| 210 |
+
785,
|
| 211 |
+
418,
|
| 212 |
+
801
|
| 213 |
+
],
|
| 214 |
+
"page_idx": 1
|
| 215 |
+
},
|
| 216 |
+
{
|
| 217 |
+
"type": "text",
|
| 218 |
+
"text": "Aspect-based sentiment analysis identifies specific aspect's sentiment polarity in the sentence. Some approaches (Ding and Liu, 2007; Jiang et al., 2011; Kiritchenko et al., 2014) designed numerous rules-based models for ABSA. For example, Ding and Liu (2007) first performed dependency parsing to determine sentiment polarity about the aspects.",
|
| 219 |
+
"bbox": [
|
| 220 |
+
115,
|
| 221 |
+
806,
|
| 222 |
+
485,
|
| 223 |
+
917
|
| 224 |
+
],
|
| 225 |
+
"page_idx": 1
|
| 226 |
+
},
|
| 227 |
+
{
|
| 228 |
+
"type": "text",
|
| 229 |
+
"text": "In recent years, most research studies make use of the attention mechanism to learn the word's semantic relation (Tang et al., 2015, 2016; Wang et al., 2016; Ma et al., 2017; Xing et al., 2019; Liang et al., 2019; Zhang et al., 2021a). Among them, Wang et al. (2016) proposed an attention-based LSTM to identify important information relating to the aspect. Ma et al. (2017) developed an interactive attention to model the aspect and sentence interactively. Fan et al. (2018) defined a multi-grained network to link the words from aspect and sentence. Li et al. (2018) designed a target-specific network to integrate aspect information into sentence. Tan et al. (2019) introduced a dual attention to distinguish conflicting opinions.",
|
| 230 |
+
"bbox": [
|
| 231 |
+
512,
|
| 232 |
+
84,
|
| 233 |
+
880,
|
| 234 |
+
324
|
| 235 |
+
],
|
| 236 |
+
"page_idx": 1
|
| 237 |
+
},
|
| 238 |
+
{
|
| 239 |
+
"type": "text",
|
| 240 |
+
"text": "In addition, another research trend is to leverage syntactic knowledge to learn syntax-aware features of the aspect (Tang et al., 2019; Huang and Carley, 2019; Zhang et al., 2019a; Sun et al., 2019; Wang et al., 2020; Tang et al., 2020; Chen et al., 2020; Li et al., 2021; Tian et al., 2021). For example, Tang et al. (2020) developed dependency graph enhanced dual-transformer network to fuse the flat representations. More recently, pre-trained methods have been proved remarkably successful in the ABSA task. Song et al. (2019) devised an attentional encoder and a BERT-SPC model to learn features between aspect and context. Wang et al. (2020) reshaped the dependency trees and proposed a relational graph attention network to encode the syntax relation feature. Tian et al. (2021) explicitly utilize dependency types with a type-aware graph networks to learn aspect-aware relations.",
|
| 241 |
+
"bbox": [
|
| 242 |
+
512,
|
| 243 |
+
326,
|
| 244 |
+
880,
|
| 245 |
+
614
|
| 246 |
+
],
|
| 247 |
+
"page_idx": 1
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"type": "text",
|
| 251 |
+
"text": "However, these methods largely ignore the procedure of dynamic semantic comprehension (Kuperberg, 2007; Kuperberg and Jaeger, 2016; Wang et al., 2017; Zhang et al., 2019c; Brouwer et al., 2021) and can not fully reveal dynamic semantic changes of the aspect-related words. Thus, it's hard for ABSA models to achieve the same performance as human-level sentiment understanding.",
|
| 252 |
+
"bbox": [
|
| 253 |
+
512,
|
| 254 |
+
615,
|
| 255 |
+
880,
|
| 256 |
+
741
|
| 257 |
+
],
|
| 258 |
+
"page_idx": 1
|
| 259 |
+
},
|
| 260 |
+
{
|
| 261 |
+
"type": "text",
|
| 262 |
+
"text": "2.2 Human Semantic Comprehension",
|
| 263 |
+
"text_level": 1,
|
| 264 |
+
"bbox": [
|
| 265 |
+
512,
|
| 266 |
+
755,
|
| 267 |
+
818,
|
| 268 |
+
770
|
| 269 |
+
],
|
| 270 |
+
"page_idx": 1
|
| 271 |
+
},
|
| 272 |
+
{
|
| 273 |
+
"type": "text",
|
| 274 |
+
"text": "Actually, no matter in the early days or now, imitating the procedure of human semantic comprehension has always been one of the original intention of many studies (Bezdek, 1992; Wang et al., 2017; Zheng et al., 2019; Li et al., 2019; Zhang et al., 2019d; Peng et al., 2020; Golan et al., 2020), such as machine reading comprehension (Zhang et al., 2019d; Peng et al., 2020), visual object detecting (Spampinato et al., 2017) and relevance estima",
|
| 275 |
+
"bbox": [
|
| 276 |
+
512,
|
| 277 |
+
774,
|
| 278 |
+
880,
|
| 279 |
+
917
|
| 280 |
+
],
|
| 281 |
+
"page_idx": 1
|
| 282 |
+
},
|
| 283 |
+
{
|
| 284 |
+
"type": "image",
|
| 285 |
+
"img_path": "images/86e4723684fca705b2aaab1f3f69b55e4238dff9974eb583fe4ac6889816a8bd.jpg",
|
| 286 |
+
"image_caption": [
|
| 287 |
+
"Figure 1: An illustration of the proposed framework. The blue blocks constitute a pre-trained BERT model which are frozen during fine-tuning, and the right block represents the dynamic re-weighting adapter that is inserted after each BERT encoder layer and trained during fine-tuning. Moreover, $S$ and $A$ represent the sentence sequence and the aspect sequence respectively. $N$ indicates the number of layers of the BERT encoder."
|
| 288 |
+
],
|
| 289 |
+
"image_footnote": [],
|
| 290 |
+
"bbox": [
|
| 291 |
+
208,
|
| 292 |
+
83,
|
| 293 |
+
783,
|
| 294 |
+
269
|
| 295 |
+
],
|
| 296 |
+
"page_idx": 2
|
| 297 |
+
},
|
| 298 |
+
{
|
| 299 |
+
"type": "text",
|
| 300 |
+
"text": "tion (Li et al., 2019). For example, attention mechanism (Vaswani et al., 2017) has a widespread influence, which allows the model to focus on important parts of the input as human's attention. Spampinato et al. (2017) aimed to learn human-based features via brain-based visual object. Wang et al. (2017) built a dynamic attention model to model human preferences for article recommendation.",
|
| 301 |
+
"bbox": [
|
| 302 |
+
112,
|
| 303 |
+
354,
|
| 304 |
+
487,
|
| 305 |
+
480
|
| 306 |
+
],
|
| 307 |
+
"page_idx": 2
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"type": "text",
|
| 311 |
+
"text": "Moreover, some psychologists and psycholinguists have also done many research on the mechanisms of human semantic comprehension (Kuperberg, 2007; Kuperberg and Jaeger, 2016; Brouwer et al., 2021). Specifically, some scholars (Yang and McConkie, 1999; Rayner, 1998) found that most people may focus on 1.5 words. Moreover, Koch and Tsuchiya (2007) and Tononi (2008) assumed that people can only remember the meaning of about 7 to 9 words at each time. The phenomenons indicate that most people only focused on a small region of the sentence at one time and need to repeatedly process important parts for better semantic understanding (Sharmin et al., 2015).",
|
| 312 |
+
"bbox": [
|
| 313 |
+
112,
|
| 314 |
+
483,
|
| 315 |
+
489,
|
| 316 |
+
706
|
| 317 |
+
],
|
| 318 |
+
"page_idx": 2
|
| 319 |
+
},
|
| 320 |
+
{
|
| 321 |
+
"type": "text",
|
| 322 |
+
"text": "Inspired by the above research and linguistic psychology theories, in this paper, we explore aspect-aware semantic changes of the ABSA task by incorporating the procedure of dynamic semantic comprehension into the pre-trained language model.",
|
| 323 |
+
"bbox": [
|
| 324 |
+
112,
|
| 325 |
+
708,
|
| 326 |
+
489,
|
| 327 |
+
789
|
| 328 |
+
],
|
| 329 |
+
"page_idx": 2
|
| 330 |
+
},
|
| 331 |
+
{
|
| 332 |
+
"type": "text",
|
| 333 |
+
"text": "3 Dynamic Re-weighting BERT",
|
| 334 |
+
"text_level": 1,
|
| 335 |
+
"bbox": [
|
| 336 |
+
112,
|
| 337 |
+
799,
|
| 338 |
+
405,
|
| 339 |
+
815
|
| 340 |
+
],
|
| 341 |
+
"page_idx": 2
|
| 342 |
+
},
|
| 343 |
+
{
|
| 344 |
+
"type": "text",
|
| 345 |
+
"text": "In this section, we introduce the technical detail of DR-BERT. Specifically, we start with the problem definition, followed by an overall architecture of DR-BERT as illustrated in Figure 1.",
|
| 346 |
+
"bbox": [
|
| 347 |
+
112,
|
| 348 |
+
820,
|
| 349 |
+
487,
|
| 350 |
+
883
|
| 351 |
+
],
|
| 352 |
+
"page_idx": 2
|
| 353 |
+
},
|
| 354 |
+
{
|
| 355 |
+
"type": "text",
|
| 356 |
+
"text": "Problem Definition In ABSA, a sentence-aspect pair $(S,A)$ is given. In this paper, the sentence is",
|
| 357 |
+
"bbox": [
|
| 358 |
+
112,
|
| 359 |
+
887,
|
| 360 |
+
487,
|
| 361 |
+
919
|
| 362 |
+
],
|
| 363 |
+
"page_idx": 2
|
| 364 |
+
},
|
| 365 |
+
{
|
| 366 |
+
"type": "text",
|
| 367 |
+
"text": "represented as $S = \\{w_1^s, w_2^s, \\dots, w_{l_s}^s\\}$ which consists of a series of $l_s$ words. The specific aspect is denoted as $A = \\{w_1^a, w_2^a, \\dots, w_{l_a}^a\\}$ which is a part of $S$ . $l_a$ is the length of aspect words. The goal of ABSA is to learn a sentiment classifier that can precisely predict the sentiment polarity of sentence $S$ for specific aspect $A$ . As the aspect-related information plays a key role in the prediction (Li et al., 2018; Zheng et al., 2020), this paper aims to dynamically select and encode the aspect-aware semantic information through the proposed model.",
|
| 368 |
+
"bbox": [
|
| 369 |
+
507,
|
| 370 |
+
354,
|
| 371 |
+
884,
|
| 372 |
+
531
|
| 373 |
+
],
|
| 374 |
+
"page_idx": 2
|
| 375 |
+
},
|
| 376 |
+
{
|
| 377 |
+
"type": "text",
|
| 378 |
+
"text": "Overall Architecture DR-BERT mainly contains two components (i.e., BERT encoder and Dynamic Re-weighting Adapter), together with two modules (i.e., the embedding module and sentiment prediction module). The technical details of each part will be elaborated on as follows.",
|
| 379 |
+
"bbox": [
|
| 380 |
+
507,
|
| 381 |
+
533,
|
| 382 |
+
882,
|
| 383 |
+
630
|
| 384 |
+
],
|
| 385 |
+
"page_idx": 2
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"type": "text",
|
| 389 |
+
"text": "3.1 Embedding Module",
|
| 390 |
+
"text_level": 1,
|
| 391 |
+
"bbox": [
|
| 392 |
+
507,
|
| 393 |
+
642,
|
| 394 |
+
712,
|
| 395 |
+
657
|
| 396 |
+
],
|
| 397 |
+
"page_idx": 2
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"type": "text",
|
| 401 |
+
"text": "To represent semantic information of the aspect words and context words better, we first map each word into a low-dimensional vector. Specifically, the inputs of DR-BERT are the sentence sequence and the corresponding aspect sequence. For the sentence sequence, we construct the BERT input as \"[CLS]\" + sentence + \"[SEP]\" and the sentence $S = \\{w_1^s, w_2^s, \\dots, w_{l_s}\\}$ can be transformed into the hidden states $\\mathbf{s} = \\{\\mathbf{s}_i \\mid i = 1, 2, \\dots, l_s\\}$ with BERT embedding. For aspect sequences, we adopt the same method to get the representation vector of each word. Thus, through the embedding module, the aspect sequence $A = \\{w_1^a, w_2^a, \\dots, w_{l_a}^a\\}$ is mapped to $\\mathbf{a}^s = \\{\\mathbf{a}_j \\mid j = 1, 2, \\dots, l_a\\}$ . Note that, if the aspect sequence is a single word like \"food\", the aspect representation is the embedding of the",
|
| 402 |
+
"bbox": [
|
| 403 |
+
507,
|
| 404 |
+
661,
|
| 405 |
+
884,
|
| 406 |
+
919
|
| 407 |
+
],
|
| 408 |
+
"page_idx": 2
|
| 409 |
+
},
|
| 410 |
+
{
|
| 411 |
+
"type": "text",
|
| 412 |
+
"text": "single word \"food\". While for the cases where the sequence contains multiple words such as \"system memory\", the aspect representation is the average of each word embedding (Sun et al., 2015). We can denote the aspect embedding process as:",
|
| 413 |
+
"bbox": [
|
| 414 |
+
112,
|
| 415 |
+
84,
|
| 416 |
+
487,
|
| 417 |
+
165
|
| 418 |
+
],
|
| 419 |
+
"page_idx": 3
|
| 420 |
+
},
|
| 421 |
+
{
|
| 422 |
+
"type": "equation",
|
| 423 |
+
"text": "\n$$\n\\mathbf {a} = \\left\\{ \\begin{array}{l l} \\mathbf {a} _ {1}, \\text {i f} l _ {a} = 1, \\\\ (\\sum_ {j = 1} ^ {l _ {a}} \\mathbf {a} _ {j}) / l _ {a}, \\text {i f} l _ {a} > 1, \\end{array} \\right. \\tag {1}\n$$\n",
|
| 424 |
+
"text_format": "latex",
|
| 425 |
+
"bbox": [
|
| 426 |
+
137,
|
| 427 |
+
175,
|
| 428 |
+
487,
|
| 429 |
+
231
|
| 430 |
+
],
|
| 431 |
+
"page_idx": 3
|
| 432 |
+
},
|
| 433 |
+
{
|
| 434 |
+
"type": "text",
|
| 435 |
+
"text": "where $\\mathbf{a}_j$ is the embedding of word $j$ in the aspect sequence. a denotes the embedding of the aspect.",
|
| 436 |
+
"bbox": [
|
| 437 |
+
112,
|
| 438 |
+
240,
|
| 439 |
+
487,
|
| 440 |
+
272
|
| 441 |
+
],
|
| 442 |
+
"page_idx": 3
|
| 443 |
+
},
|
| 444 |
+
{
|
| 445 |
+
"type": "text",
|
| 446 |
+
"text": "3.2 BERT Encoder",
|
| 447 |
+
"text_level": 1,
|
| 448 |
+
"bbox": [
|
| 449 |
+
114,
|
| 450 |
+
284,
|
| 451 |
+
280,
|
| 452 |
+
298
|
| 453 |
+
],
|
| 454 |
+
"page_idx": 3
|
| 455 |
+
},
|
| 456 |
+
{
|
| 457 |
+
"type": "text",
|
| 458 |
+
"text": "The architecture of BERT (Devlin et al., 2019) is akin to the Transformer (Vaswani et al., 2017). For simplicity, we omit some architecture details such as position encoding, layer normalization (Xu et al., 2019b) and residual connections (He et al., 2016).",
|
| 459 |
+
"bbox": [
|
| 460 |
+
112,
|
| 461 |
+
305,
|
| 462 |
+
487,
|
| 463 |
+
385
|
| 464 |
+
],
|
| 465 |
+
"page_idx": 3
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"type": "text",
|
| 469 |
+
"text": "1) Multi-head Self-attention Mechanism. In recent years, the multi-head self-attention mechanism (MultiHead) has received a wide range of applications in natural language processing. In the paper, we adopt MultiHead with $h$ heads to obtain the overall semantics of the whole sentence. The product from each self-attention network is then concatenated and finally transformed as:",
|
| 470 |
+
"bbox": [
|
| 471 |
+
112,
|
| 472 |
+
388,
|
| 473 |
+
487,
|
| 474 |
+
514
|
| 475 |
+
],
|
| 476 |
+
"page_idx": 3
|
| 477 |
+
},
|
| 478 |
+
{
|
| 479 |
+
"type": "equation",
|
| 480 |
+
"text": "\n$$\n\\begin{array}{l} \\mathbf {m} = \\left\\{\\mathbf {m} _ {i} \\mid i = 1, 2, \\dots , l _ {s} \\right\\} \\tag {2} \\\\ = \\mathbf {M u l t i H e a d} (\\mathbf {s} \\mathbf {W} _ {h} ^ {Q}, \\mathbf {s} \\mathbf {W} _ {h} ^ {K}, \\mathbf {s} \\mathbf {W} _ {h} ^ {V}), \\\\ \\end{array}\n$$\n",
|
| 481 |
+
"text_format": "latex",
|
| 482 |
+
"bbox": [
|
| 483 |
+
142,
|
| 484 |
+
529,
|
| 485 |
+
485,
|
| 486 |
+
568
|
| 487 |
+
],
|
| 488 |
+
"page_idx": 3
|
| 489 |
+
},
|
| 490 |
+
{
|
| 491 |
+
"type": "text",
|
| 492 |
+
"text": "where $h$ denotes the $h$ -th attention head, $\\mathbf{W}_i^Q$ , $\\mathbf{W}_i^K$ and $\\mathbf{W}_i^V$ are learnable parameters. Finally, the output feature is $\\mathbf{m} = \\{\\mathbf{m}_i \\mid i = 1,2,\\dots,l_s\\}$ . For detailed implementation of MultiHead, please refer to Transformer (Vaswani et al., 2017).",
|
| 493 |
+
"bbox": [
|
| 494 |
+
112,
|
| 495 |
+
581,
|
| 496 |
+
487,
|
| 497 |
+
663
|
| 498 |
+
],
|
| 499 |
+
"page_idx": 3
|
| 500 |
+
},
|
| 501 |
+
{
|
| 502 |
+
"type": "text",
|
| 503 |
+
"text": "2) Position-wise Feed-Forward Network. Since the multi-head attention is a series of linear transformations, we then apply the position-wise feedforward network (FFN) to learn the feature's nonlinear transformation. Specifically, the FFN consists of two linear transformations along with a ReLU activation in between. More formally:",
|
| 504 |
+
"bbox": [
|
| 505 |
+
112,
|
| 506 |
+
665,
|
| 507 |
+
489,
|
| 508 |
+
778
|
| 509 |
+
],
|
| 510 |
+
"page_idx": 3
|
| 511 |
+
},
|
| 512 |
+
{
|
| 513 |
+
"type": "equation",
|
| 514 |
+
"text": "\n$$\n\\begin{array}{l} \\mathbf {f} = \\left\\{\\mathbf {f} _ {i} \\mid i = 1, 2, \\dots , l _ {s} \\right\\} \\tag {3} \\\\ = \\mathbf {m a x} (0, \\mathbf {m W} _ {1} + \\mathbf {b} _ {1}) \\mathbf {W} _ {2} + \\mathbf {b} _ {2}, \\\\ \\end{array}\n$$\n",
|
| 515 |
+
"text_format": "latex",
|
| 516 |
+
"bbox": [
|
| 517 |
+
166,
|
| 518 |
+
790,
|
| 519 |
+
485,
|
| 520 |
+
826
|
| 521 |
+
],
|
| 522 |
+
"page_idx": 3
|
| 523 |
+
},
|
| 524 |
+
{
|
| 525 |
+
"type": "text",
|
| 526 |
+
"text": "where $\\mathbf{W}_1, \\mathbf{b}_1, \\mathbf{W}_2$ and $\\mathbf{b}_2$ are learnable parameters in the linear transformations.",
|
| 527 |
+
"bbox": [
|
| 528 |
+
112,
|
| 529 |
+
838,
|
| 530 |
+
487,
|
| 531 |
+
869
|
| 532 |
+
],
|
| 533 |
+
"page_idx": 3
|
| 534 |
+
},
|
| 535 |
+
{
|
| 536 |
+
"type": "text",
|
| 537 |
+
"text": "So far, with the input $S = \\{w_1^s, w_2^s, \\dots, w_{l_s}^s\\}$ , we obtain the hidden states $\\mathbf{f} = \\{\\mathbf{f}_i \\mid i = 1, 2, \\dots, l_s\\}$ via the BERT encoder. Then, for the words' hidden",
|
| 538 |
+
"bbox": [
|
| 539 |
+
112,
|
| 540 |
+
871,
|
| 541 |
+
487,
|
| 542 |
+
917
|
| 543 |
+
],
|
| 544 |
+
"page_idx": 3
|
| 545 |
+
},
|
| 546 |
+
{
|
| 547 |
+
"type": "text",
|
| 548 |
+
"text": "states of the sentence from FFN, we utilize the max-pooling operation to fairly select crucial features in the sentence (Lai et al., 2015; Zhang et al., 2019b), so as to obtain the original sentence representation $\\mathbf{h}_s$ at the beginning of each re-weighting step:",
|
| 549 |
+
"bbox": [
|
| 550 |
+
507,
|
| 551 |
+
84,
|
| 552 |
+
882,
|
| 553 |
+
165
|
| 554 |
+
],
|
| 555 |
+
"page_idx": 3
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"type": "equation",
|
| 559 |
+
"text": "\n$$\n\\mathbf {h} _ {s} = \\operatorname {M a x} _ {\\text {P o o l i n g}} \\left(\\mathbf {f} _ {i} \\mid i = 1, 2, \\dots , l _ {s}\\right). \\tag {4}\n$$\n",
|
| 560 |
+
"text_format": "latex",
|
| 561 |
+
"bbox": [
|
| 562 |
+
527,
|
| 563 |
+
172,
|
| 564 |
+
880,
|
| 565 |
+
189
|
| 566 |
+
],
|
| 567 |
+
"page_idx": 3
|
| 568 |
+
},
|
| 569 |
+
{
|
| 570 |
+
"type": "text",
|
| 571 |
+
"text": "3.3 Dynamic Re-weighting Adapter (DRA)",
|
| 572 |
+
"text_level": 1,
|
| 573 |
+
"bbox": [
|
| 574 |
+
507,
|
| 575 |
+
197,
|
| 576 |
+
860,
|
| 577 |
+
212
|
| 578 |
+
],
|
| 579 |
+
"page_idx": 3
|
| 580 |
+
},
|
| 581 |
+
{
|
| 582 |
+
"type": "text",
|
| 583 |
+
"text": "The currently attention mechanism in deep learning is essentially similar to the selective visual attention of human beings (Vaswani et al., 2017; You et al., 2016). However, as for the text semantic understanding, human brain will discover the intentional relationship of words at a sentential level (Taatgen et al., 2007; Sha et al., 2016; Sen et al., 2020) and link the incoming semantic information with preexisting information stored within memory. Thus, we design a dynamic re-weighting adapter (DRA) which can dynamically emphasize the important aspect-aware words for the ABSA task.",
|
| 584 |
+
"bbox": [
|
| 585 |
+
507,
|
| 586 |
+
217,
|
| 587 |
+
882,
|
| 588 |
+
409
|
| 589 |
+
],
|
| 590 |
+
"page_idx": 3
|
| 591 |
+
},
|
| 592 |
+
{
|
| 593 |
+
"type": "text",
|
| 594 |
+
"text": "As shown in the right part of Figure 1, based on overall semantics of the whole sentence, DRA further selects the most important word at each step with consideration of the specific aspect representation. Specifically, the inputs of DRA are the final outputs of the BERT encoder (i.e., $\\mathbf{h}_s$ ) and the original aspect embedding (i.e., a). In each step, we first utilize re-weighting attention to choose the word for current input from the input sequence ( $\\{\\mathbf{s}_i \\mid i = 1,2,\\dots,l_s\\}$ ). Then, we utilize Gated Recurrent Unit (GRU)(Cho et al., 2014) to encode the chosen word and update the semantic representation of the review sentence.",
|
| 595 |
+
"bbox": [
|
| 596 |
+
507,
|
| 597 |
+
411,
|
| 598 |
+
882,
|
| 599 |
+
619
|
| 600 |
+
],
|
| 601 |
+
"page_idx": 3
|
| 602 |
+
},
|
| 603 |
+
{
|
| 604 |
+
"type": "text",
|
| 605 |
+
"text": "Formally, we regard the calculation process as:",
|
| 606 |
+
"bbox": [
|
| 607 |
+
526,
|
| 608 |
+
620,
|
| 609 |
+
875,
|
| 610 |
+
636
|
| 611 |
+
],
|
| 612 |
+
"page_idx": 3
|
| 613 |
+
},
|
| 614 |
+
{
|
| 615 |
+
"type": "equation",
|
| 616 |
+
"text": "\n$$\n\\mathbf {a} _ {t} = F \\left(\\left[ \\mathbf {s} _ {1}, \\mathbf {s} _ {2}, \\dots , \\mathbf {s} _ {l _ {s}} \\right], \\mathbf {h} _ {t - 1}, \\mathbf {a}\\right), \\tag {5}\n$$\n",
|
| 617 |
+
"text_format": "latex",
|
| 618 |
+
"bbox": [
|
| 619 |
+
566,
|
| 620 |
+
642,
|
| 621 |
+
880,
|
| 622 |
+
667
|
| 623 |
+
],
|
| 624 |
+
"page_idx": 3
|
| 625 |
+
},
|
| 626 |
+
{
|
| 627 |
+
"type": "equation",
|
| 628 |
+
"text": "\n$$\n\\mathbf {h} _ {t} = G R U \\left(\\mathbf {a} _ {t}, \\mathbf {h} _ {t - 1}\\right), \\quad t \\in [ 1, T ]\n$$\n",
|
| 629 |
+
"text_format": "latex",
|
| 630 |
+
"bbox": [
|
| 631 |
+
564,
|
| 632 |
+
662,
|
| 633 |
+
826,
|
| 634 |
+
677
|
| 635 |
+
],
|
| 636 |
+
"page_idx": 3
|
| 637 |
+
},
|
| 638 |
+
{
|
| 639 |
+
"type": "text",
|
| 640 |
+
"text": "where $\\mathbf{a}$ is the original embedding vector of the aspect words. $\\mathbf{a}_t$ is the output of re-weighting function $F$ . $T$ denotes the dynamic re-weighting length over the sentences, which represents the cognitive threshold of human beings. $\\mathbf{h}_0 = \\mathbf{h}_s$ is the initial state and $\\mathbf{h}_T$ is the output hidden states of DRA.",
|
| 641 |
+
"bbox": [
|
| 642 |
+
507,
|
| 643 |
+
682,
|
| 644 |
+
882,
|
| 645 |
+
778
|
| 646 |
+
],
|
| 647 |
+
"page_idx": 3
|
| 648 |
+
},
|
| 649 |
+
{
|
| 650 |
+
"type": "text",
|
| 651 |
+
"text": "1) The Re-weighting Function. More specifically, we utilize the attention mechanism to achieve the re-weighting function $\\mathrm{F}$ , which aims to select the most important aspect-related word at each step. The calculation can be formulated as:",
|
| 652 |
+
"bbox": [
|
| 653 |
+
507,
|
| 654 |
+
781,
|
| 655 |
+
882,
|
| 656 |
+
859
|
| 657 |
+
],
|
| 658 |
+
"page_idx": 3
|
| 659 |
+
},
|
| 660 |
+
{
|
| 661 |
+
"type": "equation",
|
| 662 |
+
"text": "\n$$\n\\begin{array}{l} \\mathbf {S} = \\left[ \\mathbf {s} _ {1}, \\mathbf {s} _ {2}, \\dots , \\mathbf {s} _ {l _ {s}} \\right], \\\\ \\mathbf {M} = \\mathbf {W} _ {s} \\mathbf {S} + \\left(\\mathbf {W} _ {d} \\mathbf {h} _ {t - 1} + \\mathbf {W} _ {a} \\mathbf {a}\\right) \\otimes \\mathbf {w}, \\tag {6} \\\\ \\mathbf {m} = \\omega^ {T} \\tanh (\\mathbf {M}), \\\\ \\end{array}\n$$\n",
|
| 663 |
+
"text_format": "latex",
|
| 664 |
+
"bbox": [
|
| 665 |
+
537,
|
| 666 |
+
864,
|
| 667 |
+
880,
|
| 668 |
+
920
|
| 669 |
+
],
|
| 670 |
+
"page_idx": 3
|
| 671 |
+
},
|
| 672 |
+
{
|
| 673 |
+
"type": "text",
|
| 674 |
+
"text": "where $\\mathbf{S}$ denotes the original sentence embedding, $\\mathbf{M}$ is the fusion representation of the aspects and the sentences. $\\mathbf{W}_s$ , $\\mathbf{W}_d$ , $\\mathbf{W}_a$ and $\\omega$ are trainable parameters. $\\mathbf{w} \\in \\mathbb{R}^{l_s}$ is a row vector of 1 and $\\otimes$ denotes the outer product.",
|
| 675 |
+
"bbox": [
|
| 676 |
+
112,
|
| 677 |
+
84,
|
| 678 |
+
487,
|
| 679 |
+
162
|
| 680 |
+
],
|
| 681 |
+
"page_idx": 4
|
| 682 |
+
},
|
| 683 |
+
{
|
| 684 |
+
"type": "text",
|
| 685 |
+
"text": "Subsequently, to better encode aspect-aware semantics, we choose the most important word (i.e., one word) at each step for the specific aspect.",
|
| 686 |
+
"bbox": [
|
| 687 |
+
112,
|
| 688 |
+
165,
|
| 689 |
+
489,
|
| 690 |
+
212
|
| 691 |
+
],
|
| 692 |
+
"page_idx": 4
|
| 693 |
+
},
|
| 694 |
+
{
|
| 695 |
+
"type": "equation",
|
| 696 |
+
"text": "\n$$\n\\alpha_ {i} = \\frac {\\exp (m _ {i})}{\\sum_ {k = 1} ^ {l _ {s}} \\exp (m _ {k})}, \\tag {7}\n$$\n",
|
| 697 |
+
"text_format": "latex",
|
| 698 |
+
"bbox": [
|
| 699 |
+
179,
|
| 700 |
+
219,
|
| 701 |
+
485,
|
| 702 |
+
256
|
| 703 |
+
],
|
| 704 |
+
"page_idx": 4
|
| 705 |
+
},
|
| 706 |
+
{
|
| 707 |
+
"type": "equation",
|
| 708 |
+
"text": "\n$$\n\\mathbf {a} _ {t} = \\mathbf {s} _ {j}, (j = \\operatorname {I n d e x} (\\max (\\alpha_ {i})))\n$$\n",
|
| 709 |
+
"text_format": "latex",
|
| 710 |
+
"bbox": [
|
| 711 |
+
181,
|
| 712 |
+
259,
|
| 713 |
+
420,
|
| 714 |
+
275
|
| 715 |
+
],
|
| 716 |
+
"page_idx": 4
|
| 717 |
+
},
|
| 718 |
+
{
|
| 719 |
+
"type": "text",
|
| 720 |
+
"text": "where $m_{i}$ and $\\alpha_{i}$ are the hidden state and the attention score of $i$ -th word in the sentence. $\\mathbf{a}_t$ is the chosen word which is most related to the specific aspect at $t$ -th step. However, $\\operatorname{Index}(\\max(\\cdot))$ operation has no derivative, which means its gradient could not be calculated. Inspired by softmax function, we modify the Eq.7 and employ the following operation to re-weight the contextual words:",
|
| 721 |
+
"bbox": [
|
| 722 |
+
112,
|
| 723 |
+
282,
|
| 724 |
+
489,
|
| 725 |
+
411
|
| 726 |
+
],
|
| 727 |
+
"page_idx": 4
|
| 728 |
+
},
|
| 729 |
+
{
|
| 730 |
+
"type": "equation",
|
| 731 |
+
"text": "\n$$\n\\mathbf {a} _ {t} = \\sum_ {i = 1} ^ {l _ {\\mathrm {s}}} \\frac {\\exp (\\lambda m _ {i})}{\\sum_ {k = 1} ^ {l _ {\\mathrm {s}}} \\exp (\\lambda m _ {k})} \\mathbf {s} _ {i}. \\tag {8}\n$$\n",
|
| 732 |
+
"text_format": "latex",
|
| 733 |
+
"bbox": [
|
| 734 |
+
184,
|
| 735 |
+
419,
|
| 736 |
+
485,
|
| 737 |
+
461
|
| 738 |
+
],
|
| 739 |
+
"page_idx": 4
|
| 740 |
+
},
|
| 741 |
+
{
|
| 742 |
+
"type": "text",
|
| 743 |
+
"text": "Note that, we design a hyper-parameter $\\lambda$ to ensure our model achieves the above purpose. Specifically, the softmax function can exponentially increase or decrease the signal, thereby highlighting the information we want to enhance. Thus, when $\\lambda$ is an arbitrarily large value, the attention score of the chosen word is infinitely close to 1, and other words are infinitely close to 0. In this way, the most important word (i.e., one word) will be extract from the context at each re-weighting step.",
|
| 744 |
+
"bbox": [
|
| 745 |
+
112,
|
| 746 |
+
468,
|
| 747 |
+
487,
|
| 748 |
+
630
|
| 749 |
+
],
|
| 750 |
+
"page_idx": 4
|
| 751 |
+
},
|
| 752 |
+
{
|
| 753 |
+
"type": "text",
|
| 754 |
+
"text": "2) The GRU Function. To better encode semantic of the whole sentence, we also employ GRU to further imitate the procedure of human semantic comprehension under the specific context, which is consistent with the process of people adjusting to a new text based on their understanding behavior. Therefore, given a previous vector embedding, the hidden vectors of GRU are calculated by receiving it as input:",
|
| 755 |
+
"bbox": [
|
| 756 |
+
112,
|
| 757 |
+
631,
|
| 758 |
+
489,
|
| 759 |
+
775
|
| 760 |
+
],
|
| 761 |
+
"page_idx": 4
|
| 762 |
+
},
|
| 763 |
+
{
|
| 764 |
+
"type": "equation",
|
| 765 |
+
"text": "\n$$\nz _ {t} = \\sigma \\left(\\mathbf {W} _ {z} \\cdot \\left[ \\mathbf {h} _ {t - 1}, \\mathbf {a} _ {t} \\right]\\right)\n$$\n",
|
| 766 |
+
"text_format": "latex",
|
| 767 |
+
"bbox": [
|
| 768 |
+
179,
|
| 769 |
+
783,
|
| 770 |
+
364,
|
| 771 |
+
800
|
| 772 |
+
],
|
| 773 |
+
"page_idx": 4
|
| 774 |
+
},
|
| 775 |
+
{
|
| 776 |
+
"type": "equation",
|
| 777 |
+
"text": "\n$$\n\\begin{array}{l} r _ {t} = \\sigma \\left(\\mathbf {W} _ {r} \\cdot \\left[ \\mathbf {h} _ {t - 1}, \\mathbf {a} _ {t} \\right]\\right) \\\\ \\tilde {r} = \\left. \\begin{array}{l l l l l l l l l l l l l l l l l l l l l l} & & & & & & & & & & & & & & & & & & & & \\\\ & & & & & & & & & & & & & & & & & & & \\\\ & & & & & & & & & & & & & & & & & & & \\\\ & & & & & & & & & & & & & & & & & & & \\\\ \\end{array} \\right] \\end{array} \\tag {9}\n$$\n",
|
| 778 |
+
"text_format": "latex",
|
| 779 |
+
"bbox": [
|
| 780 |
+
181,
|
| 781 |
+
803,
|
| 782 |
+
485,
|
| 783 |
+
829
|
| 784 |
+
],
|
| 785 |
+
"page_idx": 4
|
| 786 |
+
},
|
| 787 |
+
{
|
| 788 |
+
"type": "equation",
|
| 789 |
+
"text": "\n$$\n\\tilde {\\mathbf {h}} _ {t} = \\mathrm {t a n h} \\left(\\mathbf {W} \\cdot \\left[ r _ {t} * \\mathbf {h} _ {t - 1}, \\mathbf {a} _ {t} \\right]\\right)\n$$\n",
|
| 790 |
+
"text_format": "latex",
|
| 791 |
+
"bbox": [
|
| 792 |
+
181,
|
| 793 |
+
824,
|
| 794 |
+
416,
|
| 795 |
+
841
|
| 796 |
+
],
|
| 797 |
+
"page_idx": 4
|
| 798 |
+
},
|
| 799 |
+
{
|
| 800 |
+
"type": "equation",
|
| 801 |
+
"text": "\n$$\n\\mathbf {h} _ {t} = \\left(1 - z _ {t}\\right) * \\mathbf {h} _ {t - 1} + z _ {t} * \\tilde {\\mathbf {h}} _ {t},\n$$\n",
|
| 802 |
+
"text_format": "latex",
|
| 803 |
+
"bbox": [
|
| 804 |
+
179,
|
| 805 |
+
845,
|
| 806 |
+
420,
|
| 807 |
+
863
|
| 808 |
+
],
|
| 809 |
+
"page_idx": 4
|
| 810 |
+
},
|
| 811 |
+
{
|
| 812 |
+
"type": "text",
|
| 813 |
+
"text": "where $\\sigma$ is the logistic sigmoid function. $z_{t}$ and $r_t$ denote the update gate and reset gate respectively at the time step $t$ .",
|
| 814 |
+
"bbox": [
|
| 815 |
+
112,
|
| 816 |
+
871,
|
| 817 |
+
487,
|
| 818 |
+
917
|
| 819 |
+
],
|
| 820 |
+
"page_idx": 4
|
| 821 |
+
},
|
| 822 |
+
{
|
| 823 |
+
"type": "table",
|
| 824 |
+
"img_path": "images/b2b93ed7fe76bcf457dbe38f6914a5c4f989f0e04c501b0d2aecb592b9074b50.jpg",
|
| 825 |
+
"table_caption": [],
|
| 826 |
+
"table_footnote": [],
|
| 827 |
+
"table_body": "<table><tr><td rowspan=\"2\">Datasets</td><td colspan=\"2\">#Positive</td><td colspan=\"2\">#Negative</td><td colspan=\"2\">#Neural</td><td rowspan=\"2\">#L</td><td rowspan=\"2\">#M</td></tr><tr><td>Train</td><td>Test</td><td>Train</td><td>Test</td><td>Train</td><td>Test</td></tr><tr><td>Restaurant</td><td>2164</td><td>728</td><td>807</td><td>196</td><td>637</td><td>196</td><td>20</td><td>45.5</td></tr><tr><td>Laptop</td><td>994</td><td>341</td><td>870</td><td>128</td><td>464</td><td>169</td><td>19</td><td>36.5</td></tr><tr><td>Twitter</td><td>1561</td><td>173</td><td>1560</td><td>173</td><td>3127</td><td>346</td><td>16</td><td>10.2</td></tr></table>",
|
| 828 |
+
"bbox": [
|
| 829 |
+
514,
|
| 830 |
+
87,
|
| 831 |
+
878,
|
| 832 |
+
175
|
| 833 |
+
],
|
| 834 |
+
"page_idx": 4
|
| 835 |
+
},
|
| 836 |
+
{
|
| 837 |
+
"type": "text",
|
| 838 |
+
"text": "Table 1: The statistics of three benchmark datasets. #L is the average length of sentences. #M is the proportion $(\\%)$ of samples with multiple (i.e., more than 1) aspects.",
|
| 839 |
+
"bbox": [
|
| 840 |
+
507,
|
| 841 |
+
184,
|
| 842 |
+
882,
|
| 843 |
+
229
|
| 844 |
+
],
|
| 845 |
+
"page_idx": 4
|
| 846 |
+
},
|
| 847 |
+
{
|
| 848 |
+
"type": "text",
|
| 849 |
+
"text": "3.4 Sentiment Predicting",
|
| 850 |
+
"text_level": 1,
|
| 851 |
+
"bbox": [
|
| 852 |
+
509,
|
| 853 |
+
243,
|
| 854 |
+
721,
|
| 855 |
+
258
|
| 856 |
+
],
|
| 857 |
+
"page_idx": 4
|
| 858 |
+
},
|
| 859 |
+
{
|
| 860 |
+
"type": "text",
|
| 861 |
+
"text": "After applying BERT layers and DRA on the input sentence, its root representation (i.e., s) is converted into the feature representation e:",
|
| 862 |
+
"bbox": [
|
| 863 |
+
507,
|
| 864 |
+
263,
|
| 865 |
+
880,
|
| 866 |
+
311
|
| 867 |
+
],
|
| 868 |
+
"page_idx": 4
|
| 869 |
+
},
|
| 870 |
+
{
|
| 871 |
+
"type": "equation",
|
| 872 |
+
"text": "\n$$\n\\begin{array}{l} \\mathbf {e} = \\left\\{\\mathbf {e} _ {i} \\mid i = 1, 2, \\dots , l _ {s} \\right\\} \\tag {10} \\\\ = \\left(\\mathbf {W} _ {e} \\mathbf {f} + \\mathbf {U} _ {e} \\mathbf {h} _ {T} + \\mathbf {b} _ {e}\\right), \\\\ \\end{array}\n$$\n",
|
| 873 |
+
"text_format": "latex",
|
| 874 |
+
"bbox": [
|
| 875 |
+
589,
|
| 876 |
+
323,
|
| 877 |
+
880,
|
| 878 |
+
359
|
| 879 |
+
],
|
| 880 |
+
"page_idx": 4
|
| 881 |
+
},
|
| 882 |
+
{
|
| 883 |
+
"type": "text",
|
| 884 |
+
"text": "where $\\mathbf{W}_e$ , $\\mathbf{U}_e$ and $\\mathbf{b}_e$ are trainable parameters. After $N$ -th stacked BERT layers, we obtain the final representation of the sentence (i.e., $\\mathbf{e}_N$ ). Then, we feed it into a Multilayer Perceptron (MLP) and map it to the probabilities over the different sentiment polarities via a softmax layer:",
|
| 885 |
+
"bbox": [
|
| 886 |
+
507,
|
| 887 |
+
370,
|
| 888 |
+
882,
|
| 889 |
+
467
|
| 890 |
+
],
|
| 891 |
+
"page_idx": 4
|
| 892 |
+
},
|
| 893 |
+
{
|
| 894 |
+
"type": "equation",
|
| 895 |
+
"text": "\n$$\n\\mathbf {R} _ {l} = \\operatorname {R e l u} \\left(\\mathbf {W} _ {l} \\mathbf {R} _ {l - 1} + \\mathbf {b} _ {l}\\right), \\tag {11}\n$$\n",
|
| 896 |
+
"text_format": "latex",
|
| 897 |
+
"bbox": [
|
| 898 |
+
581,
|
| 899 |
+
479,
|
| 900 |
+
880,
|
| 901 |
+
502
|
| 902 |
+
],
|
| 903 |
+
"page_idx": 4
|
| 904 |
+
},
|
| 905 |
+
{
|
| 906 |
+
"type": "equation",
|
| 907 |
+
"text": "\n$$\n\\hat {\\mathbf {y}} = \\operatorname {s o f t m a x} \\left(\\mathbf {W} _ {o} \\mathbf {R} _ {h} + \\mathbf {b} _ {o}\\right),\n$$\n",
|
| 908 |
+
"text_format": "latex",
|
| 909 |
+
"bbox": [
|
| 910 |
+
583,
|
| 911 |
+
499,
|
| 912 |
+
808,
|
| 913 |
+
514
|
| 914 |
+
],
|
| 915 |
+
"page_idx": 4
|
| 916 |
+
},
|
| 917 |
+
{
|
| 918 |
+
"type": "text",
|
| 919 |
+
"text": "where $\\mathbf{W}_l, \\mathbf{W}_o, \\mathbf{b}_l$ and $\\mathbf{b}_o$ are learned parameters. $\\mathbf{R}_l$ is the hidden state of $l$ -th layer MLP ( $\\mathbf{R}_0 = \\mathbf{e}_N$ , $l \\in [1,h]$ ). $\\mathbf{R}_h$ is the state of final layer which is also regarded as the output of the MLP. $\\hat{\\mathbf{y}}$ is the predicted sentiment polarity distribution.",
|
| 920 |
+
"bbox": [
|
| 921 |
+
507,
|
| 922 |
+
526,
|
| 923 |
+
882,
|
| 924 |
+
606
|
| 925 |
+
],
|
| 926 |
+
"page_idx": 4
|
| 927 |
+
},
|
| 928 |
+
{
|
| 929 |
+
"type": "text",
|
| 930 |
+
"text": "3.5 Model Training",
|
| 931 |
+
"text_level": 1,
|
| 932 |
+
"bbox": [
|
| 933 |
+
507,
|
| 934 |
+
617,
|
| 935 |
+
680,
|
| 936 |
+
633
|
| 937 |
+
],
|
| 938 |
+
"page_idx": 4
|
| 939 |
+
},
|
| 940 |
+
{
|
| 941 |
+
"type": "text",
|
| 942 |
+
"text": "Finally, we applies the cross-entropy loss function for model training:",
|
| 943 |
+
"bbox": [
|
| 944 |
+
507,
|
| 945 |
+
638,
|
| 946 |
+
880,
|
| 947 |
+
670
|
| 948 |
+
],
|
| 949 |
+
"page_idx": 4
|
| 950 |
+
},
|
| 951 |
+
{
|
| 952 |
+
"type": "equation",
|
| 953 |
+
"text": "\n$$\n\\mathcal {L} = - \\sum_ {i = 1} ^ {M} \\sum_ {j = 1} ^ {C} y _ {i} ^ {j} \\log \\left(\\hat {y} _ {i} ^ {j}\\right) + \\beta \\| \\Theta \\| _ {2} ^ {2}, \\tag {12}\n$$\n",
|
| 954 |
+
"text_format": "latex",
|
| 955 |
+
"bbox": [
|
| 956 |
+
534,
|
| 957 |
+
680,
|
| 958 |
+
880,
|
| 959 |
+
725
|
| 960 |
+
],
|
| 961 |
+
"page_idx": 4
|
| 962 |
+
},
|
| 963 |
+
{
|
| 964 |
+
"type": "text",
|
| 965 |
+
"text": "where $y_{i}^{j}$ is the ground truth sentiment polarity. $C$ is the number of labels (i.e, 3 in our task). $M$ is the number of training samples. $\\Theta$ corresponds to all of the trainable parameters.",
|
| 966 |
+
"bbox": [
|
| 967 |
+
507,
|
| 968 |
+
731,
|
| 969 |
+
880,
|
| 970 |
+
797
|
| 971 |
+
],
|
| 972 |
+
"page_idx": 4
|
| 973 |
+
},
|
| 974 |
+
{
|
| 975 |
+
"type": "text",
|
| 976 |
+
"text": "4 Experiment",
|
| 977 |
+
"text_level": 1,
|
| 978 |
+
"bbox": [
|
| 979 |
+
507,
|
| 980 |
+
809,
|
| 981 |
+
645,
|
| 982 |
+
825
|
| 983 |
+
],
|
| 984 |
+
"page_idx": 4
|
| 985 |
+
},
|
| 986 |
+
{
|
| 987 |
+
"type": "text",
|
| 988 |
+
"text": "4.1 Datasets",
|
| 989 |
+
"text_level": 1,
|
| 990 |
+
"bbox": [
|
| 991 |
+
507,
|
| 992 |
+
834,
|
| 993 |
+
621,
|
| 994 |
+
848
|
| 995 |
+
],
|
| 996 |
+
"page_idx": 4
|
| 997 |
+
},
|
| 998 |
+
{
|
| 999 |
+
"type": "text",
|
| 1000 |
+
"text": "We mainly conduct experiments on three benchmark ABSA datasets, including \"Laptop\", \"Restaurant\" (Pontiki et al., 2014) and \"Twitter\" (Dong et al., 2014). Each data item is labeled with three",
|
| 1001 |
+
"bbox": [
|
| 1002 |
+
507,
|
| 1003 |
+
854,
|
| 1004 |
+
882,
|
| 1005 |
+
917
|
| 1006 |
+
],
|
| 1007 |
+
"page_idx": 4
|
| 1008 |
+
},
|
| 1009 |
+
{
|
| 1010 |
+
"type": "table",
|
| 1011 |
+
"img_path": "images/d60d80864bff068f4542e1fbe8def7c9977623b73535d65e800273ce9106868d.jpg",
|
| 1012 |
+
"table_caption": [],
|
| 1013 |
+
"table_footnote": [],
|
| 1014 |
+
"table_body": "<table><tr><td rowspan=\"2\">Category</td><td rowspan=\"2\">Datasets Methods</td><td colspan=\"2\">Laptop</td><td colspan=\"2\">Restaurant</td><td colspan=\"2\">Twitter</td></tr><tr><td>Accuracy</td><td>F1-score</td><td>Accuracy</td><td>F1-score</td><td>Accuracy</td><td>F1-score</td></tr><tr><td rowspan=\"6\">Attention.</td><td>ATAE-LSTM (Wang et al., 2016)</td><td>68.57</td><td>64.52</td><td>76.58</td><td>67.39</td><td>67.27</td><td>66.43</td></tr><tr><td>IAN (Ma et al., 2017)</td><td>70.84</td><td>65.73</td><td>76.88</td><td>68.36</td><td>68.74</td><td>67.61</td></tr><tr><td>MemNet (Tang et al., 2016)</td><td>72.32</td><td>67.03</td><td>78.12</td><td>68.99</td><td>70.19</td><td>68.22</td></tr><tr><td>AOA (Huang et al., 2018)</td><td>74.56</td><td>68.77</td><td>79.42</td><td>70.43</td><td>71.68</td><td>69.25</td></tr><tr><td>MGNet (Fan et al., 2018)</td><td>75.37</td><td>71.26</td><td>81.28</td><td>72.07</td><td>72.54</td><td>70.78</td></tr><tr><td>TNet (Li et al., 2018)</td><td>76.54</td><td>71.75</td><td>80.69</td><td>71.27</td><td>74.93</td><td>73.60</td></tr><tr><td rowspan=\"6\">Pre-trained.</td><td>BERT (Devlin et al., 2019)</td><td>77.29</td><td>73.36</td><td>82.40</td><td>73.17</td><td>73.42</td><td>72.17</td></tr><tr><td>BERT-PT (Xu et al., 2019a)</td><td>78.07</td><td>75.08</td><td>84.95</td><td>76.96</td><td>-</td><td>-</td></tr><tr><td>BERT-SPC (Song et al., 2019)</td><td>78.99</td><td>75.03</td><td>84.46</td><td>76.98</td><td>74.13</td><td>72.73</td></tr><tr><td>AEN-BERT (Song et al., 2019)</td><td>79.93</td><td>76.31</td><td>83.12</td><td>73.76</td><td>74.71</td><td>73.13</td></tr><tr><td>RGAT-BERT (Wang et al., 2020)</td><td>78.21</td><td>74.07</td><td>86.60</td><td>81.35</td><td>76.15</td><td>74.88</td></tr><tr><td>T-GCN (Tian et al., 2021)</td><td>80.88</td><td>77.03</td><td>86.16</td><td>79.95</td><td>76.45</td><td>75.25</td></tr><tr><td>Ours.</td><td>DR-BERT</td><td>81.45</td><td>78.16</td><td>87.72</td><td>82.31</td><td>77.24</td><td>76.10</td></tr></table>",
|
| 1015 |
+
"bbox": [
|
| 1016 |
+
119,
|
| 1017 |
+
82,
|
| 1018 |
+
884,
|
| 1019 |
+
313
|
| 1020 |
+
],
|
| 1021 |
+
"page_idx": 5
|
| 1022 |
+
},
|
| 1023 |
+
{
|
| 1024 |
+
"type": "text",
|
| 1025 |
+
"text": "Table 2: Experimental results (%) in three benchmark datasets. We underline the best performed baseline.",
|
| 1026 |
+
"bbox": [
|
| 1027 |
+
139,
|
| 1028 |
+
321,
|
| 1029 |
+
853,
|
| 1030 |
+
336
|
| 1031 |
+
],
|
| 1032 |
+
"page_idx": 5
|
| 1033 |
+
},
|
| 1034 |
+
{
|
| 1035 |
+
"type": "text",
|
| 1036 |
+
"text": "sentiment polarities (i.e., positive, negative and neutral). The statistics of the datasets are presented in Table 1. Moreover, we follow the dataset configurations of previous studies strictly. For all datasets, we randomly sample $10\\%$ items from the training set and regard them as the development set.",
|
| 1037 |
+
"bbox": [
|
| 1038 |
+
112,
|
| 1039 |
+
357,
|
| 1040 |
+
489,
|
| 1041 |
+
455
|
| 1042 |
+
],
|
| 1043 |
+
"page_idx": 5
|
| 1044 |
+
},
|
| 1045 |
+
{
|
| 1046 |
+
"type": "text",
|
| 1047 |
+
"text": "4.2 Hyperparameters Settings",
|
| 1048 |
+
"text_level": 1,
|
| 1049 |
+
"bbox": [
|
| 1050 |
+
112,
|
| 1051 |
+
464,
|
| 1052 |
+
368,
|
| 1053 |
+
480
|
| 1054 |
+
],
|
| 1055 |
+
"page_idx": 5
|
| 1056 |
+
},
|
| 1057 |
+
{
|
| 1058 |
+
"type": "text",
|
| 1059 |
+
"text": "In the implementation, we build our framework based on the official bert-base models ( $\\mathfrak{n}_{\\mathrm{layers}} = 12$ , $\\mathfrak{n}_{\\mathrm{heads}} = 12$ , $\\mathfrak{n}_{\\mathrm{hidden}} = 768$ ). The hidden size of GRUs and re-weighting length of DRA are set to 256 and 7. The learning rate is tuned amongst [2e-5, 5e-5 and 1e-3] and the batch size is manually tested in [16, 32, 64, 128]. The dropout rate is set to 0.2. The hyper-parameter $l$ , $\\beta$ and $\\lambda$ have been carefully adjusted, and final values are set to 3, 0.8 and 100 respectively. The model is trained using the Adam optimizer and evaluated by two widely used metrics. The parameters of baseline models are in accordance with the default configuration of the original paper. We run our model three times with different seeds and report the average performance.",
|
| 1060 |
+
"bbox": [
|
| 1061 |
+
112,
|
| 1062 |
+
485,
|
| 1063 |
+
489,
|
| 1064 |
+
726
|
| 1065 |
+
],
|
| 1066 |
+
"page_idx": 5
|
| 1067 |
+
},
|
| 1068 |
+
{
|
| 1069 |
+
"type": "text",
|
| 1070 |
+
"text": "4.3 Baselines",
|
| 1071 |
+
"text_level": 1,
|
| 1072 |
+
"bbox": [
|
| 1073 |
+
112,
|
| 1074 |
+
736,
|
| 1075 |
+
233,
|
| 1076 |
+
750
|
| 1077 |
+
],
|
| 1078 |
+
"page_idx": 5
|
| 1079 |
+
},
|
| 1080 |
+
{
|
| 1081 |
+
"type": "list",
|
| 1082 |
+
"sub_type": "text",
|
| 1083 |
+
"list_items": [
|
| 1084 |
+
"- Attention-based Models: MemNet (Tang et al., 2016), ATAE-LSTM (Wang et al., 2016), IAN (Ma et al., 2017), AOA (Huang et al., 2018), MGNet (Fan et al., 2018), TNet (Li et al., 2018).",
|
| 1085 |
+
"- Pre-trained Models: Fine-tune BERT (Devlin et al., 2019), BERT-PT (Xu et al., 2019a), BERT-SPC, AEN-BERT (Song et al., 2019), RGAT-BERT (Wang et al., 2020), TGCN (Tian et al., 2021)."
|
| 1086 |
+
],
|
| 1087 |
+
"bbox": [
|
| 1088 |
+
136,
|
| 1089 |
+
756,
|
| 1090 |
+
489,
|
| 1091 |
+
917
|
| 1092 |
+
],
|
| 1093 |
+
"page_idx": 5
|
| 1094 |
+
},
|
| 1095 |
+
{
|
| 1096 |
+
"type": "text",
|
| 1097 |
+
"text": "The baseline methods have comprehensive coverage of the recent related SOTA models recently. Most of them are detailed in Section 2.1. For space-saving, we do not detail them in this section.",
|
| 1098 |
+
"bbox": [
|
| 1099 |
+
507,
|
| 1100 |
+
357,
|
| 1101 |
+
884,
|
| 1102 |
+
422
|
| 1103 |
+
],
|
| 1104 |
+
"page_idx": 5
|
| 1105 |
+
},
|
| 1106 |
+
{
|
| 1107 |
+
"type": "text",
|
| 1108 |
+
"text": "4.4 Experimental Results",
|
| 1109 |
+
"text_level": 1,
|
| 1110 |
+
"bbox": [
|
| 1111 |
+
507,
|
| 1112 |
+
439,
|
| 1113 |
+
726,
|
| 1114 |
+
455
|
| 1115 |
+
],
|
| 1116 |
+
"page_idx": 5
|
| 1117 |
+
},
|
| 1118 |
+
{
|
| 1119 |
+
"type": "text",
|
| 1120 |
+
"text": "From the results in Table 2, we have the following observations. First, BERT-based methods beat most of the attention-based methods (e.g., IAN and TNet) in both metrics. The phenomenon indicates the powerful ability of the pre-trained language models. That is also why we adopt BERT as base encoder to learn the overall semantic representation of the whole sentences.",
|
| 1121 |
+
"bbox": [
|
| 1122 |
+
505,
|
| 1123 |
+
464,
|
| 1124 |
+
882,
|
| 1125 |
+
592
|
| 1126 |
+
],
|
| 1127 |
+
"page_idx": 5
|
| 1128 |
+
},
|
| 1129 |
+
{
|
| 1130 |
+
"type": "text",
|
| 1131 |
+
"text": "Second, by comparing non-specific BERT models (i.e., BERT and BERT-PT) with task-specific models (e.g., RGAT-BERT) for ABSA, we find that the task-specific BERT models perform better than the non-specific models. Specifically, we can also observe the performance trend that T-GCN&RGAT-BERT > AEN-BERT > BERT-PT > BERT, which is consistent with the previous assumption that aspect-related information is the crucial influence factor for the performance of the ABSA model.",
|
| 1132 |
+
"bbox": [
|
| 1133 |
+
507,
|
| 1134 |
+
595,
|
| 1135 |
+
884,
|
| 1136 |
+
755
|
| 1137 |
+
],
|
| 1138 |
+
"page_idx": 5
|
| 1139 |
+
},
|
| 1140 |
+
{
|
| 1141 |
+
"type": "text",
|
| 1142 |
+
"text": "Finally, despite the outstanding performance of previous models, our DR-BERT still outperforms the most advanced baseline (i.e., T-GCN or RGAT-BERT) no matter in terms of Accuracy or F1-score. The results demonstrate the effectiveness of the dynamic modeling strategy based on the procedure of semantic comprehension. Meantime, it also indicates that our proposed DRA can better grasp the aspect-aware semantics of the sentence than other BERT plus-in components in previous methods.",
|
| 1143 |
+
"bbox": [
|
| 1144 |
+
507,
|
| 1145 |
+
758,
|
| 1146 |
+
885,
|
| 1147 |
+
919
|
| 1148 |
+
],
|
| 1149 |
+
"page_idx": 5
|
| 1150 |
+
},
|
| 1151 |
+
{
|
| 1152 |
+
"type": "table",
|
| 1153 |
+
"img_path": "images/7007877739fda7957132e657eea61359be10ca183c192bd077a42e449da44424.jpg",
|
| 1154 |
+
"table_caption": [],
|
| 1155 |
+
"table_footnote": [],
|
| 1156 |
+
"table_body": "<table><tr><td rowspan=\"2\">Model Variants</td><td colspan=\"2\">Laptop</td></tr><tr><td>Accuracy</td><td>F1-score</td></tr><tr><td>BERT-Base</td><td>77.29</td><td>73.36</td></tr><tr><td>(1): + MLP</td><td>77.94</td><td>74.42</td></tr><tr><td>(2): + DRA</td><td>80.66</td><td>77.13</td></tr><tr><td>(3): + DRA on top 3 layers</td><td>78.64</td><td>75.16</td></tr><tr><td>(4): + DRA on top 6 layers</td><td>79.17</td><td>75.93</td></tr><tr><td>(5): + DRA on top 9 layers</td><td>80.22</td><td>76.49</td></tr><tr><td>(6): DR-BERT</td><td>81.45</td><td>78.16</td></tr></table>",
|
| 1157 |
+
"bbox": [
|
| 1158 |
+
121,
|
| 1159 |
+
82,
|
| 1160 |
+
478,
|
| 1161 |
+
235
|
| 1162 |
+
],
|
| 1163 |
+
"page_idx": 6
|
| 1164 |
+
},
|
| 1165 |
+
{
|
| 1166 |
+
"type": "text",
|
| 1167 |
+
"text": "Table 3: The ablation study on different components which conducted on the test set of the Laptop dataset. \"BERT-Base\" indicates the vanilla BERT. \"+\" indicates the setting with plus-in components.",
|
| 1168 |
+
"bbox": [
|
| 1169 |
+
112,
|
| 1170 |
+
243,
|
| 1171 |
+
489,
|
| 1172 |
+
300
|
| 1173 |
+
],
|
| 1174 |
+
"page_idx": 6
|
| 1175 |
+
},
|
| 1176 |
+
{
|
| 1177 |
+
"type": "image",
|
| 1178 |
+
"img_path": "images/be1ff10416cae748a8b13c61c0cf45fc7261fbef3e8643c82563bcf2597f5426.jpg",
|
| 1179 |
+
"image_caption": [
|
| 1180 |
+
"(a) The performance (Accuracy) in three dataset."
|
| 1181 |
+
],
|
| 1182 |
+
"image_footnote": [],
|
| 1183 |
+
"bbox": [
|
| 1184 |
+
115,
|
| 1185 |
+
311,
|
| 1186 |
+
235,
|
| 1187 |
+
370
|
| 1188 |
+
],
|
| 1189 |
+
"page_idx": 6
|
| 1190 |
+
},
|
| 1191 |
+
{
|
| 1192 |
+
"type": "image",
|
| 1193 |
+
"img_path": "images/25c4cf222d20799198a0217bf10915bb4d9dc19463c0d35095cfee44493008e5.jpg",
|
| 1194 |
+
"image_caption": [],
|
| 1195 |
+
"image_footnote": [],
|
| 1196 |
+
"bbox": [
|
| 1197 |
+
238,
|
| 1198 |
+
312,
|
| 1199 |
+
359,
|
| 1200 |
+
370
|
| 1201 |
+
],
|
| 1202 |
+
"page_idx": 6
|
| 1203 |
+
},
|
| 1204 |
+
{
|
| 1205 |
+
"type": "image",
|
| 1206 |
+
"img_path": "images/f3666baaa4c2ce0d4c64b92d8760ea8164a5e5e3fb82429f43d55d5dc6e1084a.jpg",
|
| 1207 |
+
"image_caption": [
|
| 1208 |
+
"Figure 3: Comparison of the semantic understanding process between human reading and DRA when judging the sentiment polarity of aspect \"food\". (a) is the visualization of the human understanding process from the eye tracker†. (b) denotes aspect-aware words from re-weighting function."
|
| 1209 |
+
],
|
| 1210 |
+
"image_footnote": [],
|
| 1211 |
+
"bbox": [
|
| 1212 |
+
364,
|
| 1213 |
+
312,
|
| 1214 |
+
484,
|
| 1215 |
+
370
|
| 1216 |
+
],
|
| 1217 |
+
"page_idx": 6
|
| 1218 |
+
},
|
| 1219 |
+
{
|
| 1220 |
+
"type": "image",
|
| 1221 |
+
"img_path": "images/49f841bbec7f793e723a3966dda1c47dc07706c8071dbb0494487d54255d9811.jpg",
|
| 1222 |
+
"image_caption": [
|
| 1223 |
+
"(b) The performance (F1-score) in three dataset."
|
| 1224 |
+
],
|
| 1225 |
+
"image_footnote": [],
|
| 1226 |
+
"bbox": [
|
| 1227 |
+
117,
|
| 1228 |
+
394,
|
| 1229 |
+
236,
|
| 1230 |
+
454
|
| 1231 |
+
],
|
| 1232 |
+
"page_idx": 6
|
| 1233 |
+
},
|
| 1234 |
+
{
|
| 1235 |
+
"type": "image",
|
| 1236 |
+
"img_path": "images/178ebe9b51b87b0751f6f42b87b2f0ea4fc0b4bfef4640f838a5bb5049164729.jpg",
|
| 1237 |
+
"image_caption": [
|
| 1238 |
+
"Figure 2: The ablation study on the re-weighting length of the adapter. Red lines indicate Accuracy/ F1 scores while blue and green lines indicate the performance of the best baseline and BERT-base model respectively."
|
| 1239 |
+
],
|
| 1240 |
+
"image_footnote": [],
|
| 1241 |
+
"bbox": [
|
| 1242 |
+
240,
|
| 1243 |
+
395,
|
| 1244 |
+
359,
|
| 1245 |
+
454
|
| 1246 |
+
],
|
| 1247 |
+
"page_idx": 6
|
| 1248 |
+
},
|
| 1249 |
+
{
|
| 1250 |
+
"type": "image",
|
| 1251 |
+
"img_path": "images/acd12c3e859e4c23aea49a455f20f5624077b912c26aade9a73703de130aadf3.jpg",
|
| 1252 |
+
"image_caption": [],
|
| 1253 |
+
"image_footnote": [],
|
| 1254 |
+
"bbox": [
|
| 1255 |
+
364,
|
| 1256 |
+
395,
|
| 1257 |
+
484,
|
| 1258 |
+
454
|
| 1259 |
+
],
|
| 1260 |
+
"page_idx": 6
|
| 1261 |
+
},
|
| 1262 |
+
{
|
| 1263 |
+
"type": "text",
|
| 1264 |
+
"text": "4.5 Ablation Study",
|
| 1265 |
+
"text_level": 1,
|
| 1266 |
+
"bbox": [
|
| 1267 |
+
112,
|
| 1268 |
+
544,
|
| 1269 |
+
280,
|
| 1270 |
+
558
|
| 1271 |
+
],
|
| 1272 |
+
"page_idx": 6
|
| 1273 |
+
},
|
| 1274 |
+
{
|
| 1275 |
+
"type": "text",
|
| 1276 |
+
"text": "Ablations on the Proposed Components. In Table 3, we study the influence of different components in our framework, including the DRA and MLPs. We can find that without utilizing adapters and MLPs, DR-BERT degenerates into the BERT model, which gains the worst performance among all the variants. The phenomenon indicates the effective of the DRA and MLP modules. Moreover, through comparing (1) and (2), we can easily conclude that DRA plays a more crucial role in the final sentiment prediction than MLPs.",
|
| 1277 |
+
"bbox": [
|
| 1278 |
+
112,
|
| 1279 |
+
564,
|
| 1280 |
+
487,
|
| 1281 |
+
740
|
| 1282 |
+
],
|
| 1283 |
+
"page_idx": 6
|
| 1284 |
+
},
|
| 1285 |
+
{
|
| 1286 |
+
"type": "text",
|
| 1287 |
+
"text": "Since BERT models are usually quite deep (e.g., 12 layers), we only insert the dynamic re-weighting adapter into top layers (i.e., 3-th, 6-th, and 9-th layers) to further verify the effectiveness of the DRA module. The results are shown in Table 3 (3), (4), and (5). We observe that when introducing adapters to the top layers of DR-BERT, our framework still outperforms the BERT model, showing that the DRA is efficient in encoding the aspect-aware semantics over the whole sentence. In addition, we can also find that the more adapter incorporated",
|
| 1288 |
+
"bbox": [
|
| 1289 |
+
112,
|
| 1290 |
+
741,
|
| 1291 |
+
489,
|
| 1292 |
+
919
|
| 1293 |
+
],
|
| 1294 |
+
"page_idx": 6
|
| 1295 |
+
},
|
| 1296 |
+
{
|
| 1297 |
+
"type": "image",
|
| 1298 |
+
"img_path": "images/33254869f85229b195d891b214c28b63f1eddc11b487b4cbd78d72b653807300.jpg",
|
| 1299 |
+
"image_caption": [
|
| 1300 |
+
"(a) Human Cognition",
|
| 1301 |
+
"(b) DRA Chosen Words"
|
| 1302 |
+
],
|
| 1303 |
+
"image_footnote": [
|
| 1304 |
+
"food, better, while, definitely, not, return"
|
| 1305 |
+
],
|
| 1306 |
+
"bbox": [
|
| 1307 |
+
556,
|
| 1308 |
+
82,
|
| 1309 |
+
833,
|
| 1310 |
+
143
|
| 1311 |
+
],
|
| 1312 |
+
"page_idx": 6
|
| 1313 |
+
},
|
| 1314 |
+
{
|
| 1315 |
+
"type": "text",
|
| 1316 |
+
"text": "in BERT layers the higher performance gained, illustrating the importance of modeling the deep dynamic semantics over the sentence.",
|
| 1317 |
+
"bbox": [
|
| 1318 |
+
507,
|
| 1319 |
+
307,
|
| 1320 |
+
882,
|
| 1321 |
+
354
|
| 1322 |
+
],
|
| 1323 |
+
"page_idx": 6
|
| 1324 |
+
},
|
| 1325 |
+
{
|
| 1326 |
+
"type": "text",
|
| 1327 |
+
"text": "Ablations on the Scale of Adapter. In this subsection, we investigate the influence of the scale of adapters on different datasets. As shown in Figure 2, we tune the adapter's dynamic re-weighting length $(T)$ in a wide range (i.e., 2 to 10). Specifically, the performance of DR-BERT first becomes better with the increasing of re-weighting length and achieving the best result at around 7. Then, as the length continues to increase, the performance continues to decline. This phenomenon is consistent with the psychological findings that human memory focuses on nearly seven words (Tononi, 2008; Koch and Tsuchiya, 2007), which further indicates the effectiveness of DRA in modeling human-like (dynamic) semantic comprehension.",
|
| 1328 |
+
"bbox": [
|
| 1329 |
+
507,
|
| 1330 |
+
357,
|
| 1331 |
+
882,
|
| 1332 |
+
598
|
| 1333 |
+
],
|
| 1334 |
+
"page_idx": 6
|
| 1335 |
+
},
|
| 1336 |
+
{
|
| 1337 |
+
"type": "text",
|
| 1338 |
+
"text": "Besides, compared with the best-performed baseline (blue lines), our model can achieve better performance with only 4 or 5 times of re-weighting at most test sets, illustrating the efficiency of the reweighting adapter. On the other hand, we can also find that DR-BERT always gives superior performance compared to the BERT-based model (green lines), even with the lowest re-weighting length. All those results show that DR-BERT could better comprehend aspect-aware dynamic semantics in aspect-based sentiment analysis.",
|
| 1339 |
+
"bbox": [
|
| 1340 |
+
507,
|
| 1341 |
+
599,
|
| 1342 |
+
882,
|
| 1343 |
+
776
|
| 1344 |
+
],
|
| 1345 |
+
"page_idx": 6
|
| 1346 |
+
},
|
| 1347 |
+
{
|
| 1348 |
+
"type": "text",
|
| 1349 |
+
"text": "4.6 Interpretability Verification",
|
| 1350 |
+
"text_level": 1,
|
| 1351 |
+
"bbox": [
|
| 1352 |
+
507,
|
| 1353 |
+
787,
|
| 1354 |
+
773,
|
| 1355 |
+
802
|
| 1356 |
+
],
|
| 1357 |
+
"page_idx": 6
|
| 1358 |
+
},
|
| 1359 |
+
{
|
| 1360 |
+
"type": "text",
|
| 1361 |
+
"text": "Comparison of Semantic Comprehension. To evaluate model rationality and interpretability, we conduct an study for dynamic semantic comprehension by eye tracker. As shown in Figure 3 (a),",
|
| 1362 |
+
"bbox": [
|
| 1363 |
+
507,
|
| 1364 |
+
807,
|
| 1365 |
+
882,
|
| 1366 |
+
872
|
| 1367 |
+
],
|
| 1368 |
+
"page_idx": 6
|
| 1369 |
+
},
|
| 1370 |
+
{
|
| 1371 |
+
"type": "page_footnote",
|
| 1372 |
+
"text": "†The procedure of the human semantic comprehension is generated by the eye tracker: https://www.tobiipro.com/product-listing/nano/",
|
| 1373 |
+
"bbox": [
|
| 1374 |
+
507,
|
| 1375 |
+
879,
|
| 1376 |
+
884,
|
| 1377 |
+
917
|
| 1378 |
+
],
|
| 1379 |
+
"page_idx": 6
|
| 1380 |
+
},
|
| 1381 |
+
{
|
| 1382 |
+
"type": "image",
|
| 1383 |
+
"img_path": "images/bb9a72a6a095c49882fa9a9a1b3dbd6a5c4ecbfd5dbe82a11918cc5afe6331e3.jpg",
|
| 1384 |
+
"image_caption": [
|
| 1385 |
+
"Figure 4: Visualization results of multiple aspects in the same sentence. The blue part indicates the aspect and its ground truth. The middle subfigures represent the procedure of human's semantic comprehension which is targeted at one specific aspect. The green subfigures are the predicted labels and the chosen word sequences from DRA."
|
| 1386 |
+
],
|
| 1387 |
+
"image_footnote": [],
|
| 1388 |
+
"bbox": [
|
| 1389 |
+
152,
|
| 1390 |
+
80,
|
| 1391 |
+
845,
|
| 1392 |
+
204
|
| 1393 |
+
],
|
| 1394 |
+
"page_idx": 7
|
| 1395 |
+
},
|
| 1396 |
+
{
|
| 1397 |
+
"type": "table",
|
| 1398 |
+
"img_path": "images/0f3825c81a7716d734b1c9f77eb88e05221b841fcc5b4808768f653e83daa721.jpg",
|
| 1399 |
+
"table_caption": [],
|
| 1400 |
+
"table_footnote": [],
|
| 1401 |
+
"table_body": "<table><tr><td>Case Examples. The label in brackets represents ground truth.</td><td>BERT-base</td><td>RGAT-BERT</td><td>DR-BERT</td></tr><tr><td>Aspects: “system memory”(Neg.), “DDR5”(Pos.), “DDR3”(Neg.)</td><td>Pos/Neg/Neg</td><td>Neg/Pos/Pos</td><td>Neg/Pos/Neg</td></tr><tr><td>Sentence: It could be a perfect laptop if it would have faster system memory and its radeon would have DDR5 instead of DDR3.</td><td>X / X / X</td><td>✓ / ✓ / X</td><td>✓ / ✓ / ✓</td></tr><tr><td>Aspects: “Supplied software” (Neu.), “software” (Pos.), “Windows” (Neg.)</td><td>Pos/ Pos/ Pos</td><td>Pos/Pos/Neu</td><td>Pos/Pos/Neg</td></tr><tr><td>Sentence: Supplied software: The software that comes with this machine is greatly welcomed compared to what Windows comes with.</td><td>X / ✓ / X</td><td>X / ✓ / X</td><td>X / ✓ / ✓</td></tr><tr><td>Aspects: “waiter” (Neg.), “served” (Neg.), “specials” (Pos.)</td><td>Neg/Neg/Neg</td><td>Neg/Neg/Neu</td><td>Neg/Neg/Pos</td></tr><tr><td>Sentence: First, the waiter who served us neglected to fill us in on the specials, which I would have chosen had I known about them.</td><td>✓ / ✓ / X</td><td>✓ / ✓ / X</td><td>✓ / ✓ / ✓</td></tr></table>",
|
| 1402 |
+
"bbox": [
|
| 1403 |
+
115,
|
| 1404 |
+
269,
|
| 1405 |
+
878,
|
| 1406 |
+
418
|
| 1407 |
+
],
|
| 1408 |
+
"page_idx": 7
|
| 1409 |
+
},
|
| 1410 |
+
{
|
| 1411 |
+
"type": "text",
|
| 1412 |
+
"text": "Table 4: Error analysis of two review items from laptop and restaurant. The colored words in brackets represents ground truth sentiment label of the corresponding aspect. The symbol $\\checkmark$ means the predicting sentiment is correct, and the other symbol means the predicting sentiment is wrong.",
|
| 1413 |
+
"bbox": [
|
| 1414 |
+
112,
|
| 1415 |
+
426,
|
| 1416 |
+
882,
|
| 1417 |
+
470
|
| 1418 |
+
],
|
| 1419 |
+
"page_idx": 7
|
| 1420 |
+
},
|
| 1421 |
+
{
|
| 1422 |
+
"type": "text",
|
| 1423 |
+
"text": "when a person tries to understand a relatively long sentence, he/she first read the entire sentence. Subsequently, after giving a specific aspect, he/she will dynamically select related words based on the previous memory state until he/she fully understands the sentiment polarity of the given aspect.",
|
| 1424 |
+
"bbox": [
|
| 1425 |
+
112,
|
| 1426 |
+
494,
|
| 1427 |
+
487,
|
| 1428 |
+
590
|
| 1429 |
+
],
|
| 1430 |
+
"page_idx": 7
|
| 1431 |
+
},
|
| 1432 |
+
{
|
| 1433 |
+
"type": "text",
|
| 1434 |
+
"text": "Interestingly, the above phenomenon is consistent with our dynamic re-weighting adapter's chosen result. Specifically, as Figure 3 (b) shows, with the re-weighting function $F$ (i.e., Equation 5 and 6), our model dynamically choose the words \"food, better, while, definitely, not, ...\" which have proven to be very important for predicting the sentiment of aspect \"food\" in Figure 3 (a). Those experimental results again fully indicate the effectiveness and interpretability of our proposed model in dynamic learning aspect-aware information.",
|
| 1435 |
+
"bbox": [
|
| 1436 |
+
112,
|
| 1437 |
+
594,
|
| 1438 |
+
487,
|
| 1439 |
+
770
|
| 1440 |
+
],
|
| 1441 |
+
"page_idx": 7
|
| 1442 |
+
},
|
| 1443 |
+
{
|
| 1444 |
+
"type": "text",
|
| 1445 |
+
"text": "The Influence of multiple Aspects. As aspect-related information plays a key role in ABSA and at least $10.2\\%$ of reviews contain multiple aspects as shown in Table 1, we are curious about the model's performance in the complex scenarios, e.g., a review sentence contains multiple aspects. Therefore, we randomly choose an example to explore how the selection of the context words will correspondingly change with different inputs. The visualization re",
|
| 1446 |
+
"bbox": [
|
| 1447 |
+
112,
|
| 1448 |
+
774,
|
| 1449 |
+
489,
|
| 1450 |
+
919
|
| 1451 |
+
],
|
| 1452 |
+
"page_idx": 7
|
| 1453 |
+
},
|
| 1454 |
+
{
|
| 1455 |
+
"type": "text",
|
| 1456 |
+
"text": "sults are shown in Figure 4. Specifically, the chosen sentence has three different aspects with their sentiment polarity, i.e., \"System memory\"-negative, \"DDR5\"-positive and \"DDR3\"-negative. Take the aspect \"DDR5\" as example, it is positive which is contrary to \"DDR3\". After receiving the overall semantic of the whole sentence, readers tend to associate \"DDR5\" with the context words {\"would\", \"have\"} to predict the correct sentiment \"positive\". For other two aspects, the observations are consistent with \"DDR5\". In summary, all those results show that DR-BERT could dynamically extract the vital information to achieve aspect-aware semantic understanding even in a more complex scenario.",
|
| 1457 |
+
"bbox": [
|
| 1458 |
+
505,
|
| 1459 |
+
494,
|
| 1460 |
+
884,
|
| 1461 |
+
720
|
| 1462 |
+
],
|
| 1463 |
+
"page_idx": 7
|
| 1464 |
+
},
|
| 1465 |
+
{
|
| 1466 |
+
"type": "text",
|
| 1467 |
+
"text": "4.7 Error Analysis",
|
| 1468 |
+
"text_level": 1,
|
| 1469 |
+
"bbox": [
|
| 1470 |
+
507,
|
| 1471 |
+
734,
|
| 1472 |
+
672,
|
| 1473 |
+
750
|
| 1474 |
+
],
|
| 1475 |
+
"page_idx": 7
|
| 1476 |
+
},
|
| 1477 |
+
{
|
| 1478 |
+
"type": "text",
|
| 1479 |
+
"text": "Table 4 displays three review examples and their prediction results by BERT, RGAT-BERT, and our DR-BERT. As we can see from the \"BERT-base\" column, when there are multiple aspects, the vanilla BERT often makes the wrong classification since it tends to learn the overall sentiment polarity of the sentences instead of the aspect-aware semantic. While RGAT-BERT can alleviate the problem to a certain extent, it is also hard to predict the accurate sentiment label with few dependency relations. For",
|
| 1480 |
+
"bbox": [
|
| 1481 |
+
505,
|
| 1482 |
+
758,
|
| 1483 |
+
884,
|
| 1484 |
+
919
|
| 1485 |
+
],
|
| 1486 |
+
"page_idx": 7
|
| 1487 |
+
},
|
| 1488 |
+
{
|
| 1489 |
+
"type": "table",
|
| 1490 |
+
"img_path": "images/69da54fb4f17abbaf59c106a3644062c48859a973cb490e094eee8f591425589.jpg",
|
| 1491 |
+
"table_caption": [],
|
| 1492 |
+
"table_footnote": [],
|
| 1493 |
+
"table_body": "<table><tr><td rowspan=\"2\">Methods</td><td colspan=\"3\">Laptop</td><td colspan=\"3\">Restaurant</td><td colspan=\"3\">Twitter</td></tr><tr><td>S</td><td>E</td><td>T</td><td>S</td><td>E</td><td>T</td><td>S</td><td>E</td><td>T</td></tr><tr><td>(1) DR-BERT</td><td>157s</td><td>10</td><td>26.1m</td><td>183s</td><td>10</td><td>30.5m</td><td>379s</td><td>10</td><td>63.2m</td></tr><tr><td>(2) T-GCN-BERT</td><td>168s</td><td>10</td><td>28.0m</td><td>188s</td><td>10</td><td>31.3m</td><td>411s</td><td>10</td><td>68.5m</td></tr><tr><td>(3) BERT-base</td><td>133s</td><td>10</td><td>22.2m</td><td>158s</td><td>10</td><td>26.3m</td><td>242s</td><td>10</td><td>40.3m</td></tr><tr><td>(4) ATAE-LSTM</td><td>3s</td><td>30</td><td>1.50m</td><td>4s</td><td>30</td><td>2.00m</td><td>5s</td><td>30</td><td>2.50m</td></tr></table>",
|
| 1494 |
+
"bbox": [
|
| 1495 |
+
127,
|
| 1496 |
+
83,
|
| 1497 |
+
870,
|
| 1498 |
+
187
|
| 1499 |
+
],
|
| 1500 |
+
"page_idx": 8
|
| 1501 |
+
},
|
| 1502 |
+
{
|
| 1503 |
+
"type": "text",
|
| 1504 |
+
"text": "Table 5: Runtime comparison between DR-BERT, T-GCN-BERT, BERT-base and ATAE-LSTM. Specifically, \"S\" represents the training time (seconds) for a single epoch, \"E\" denotes the number of training epochs, and \"T\" is the total training time (minutes).",
|
| 1505 |
+
"bbox": [
|
| 1506 |
+
112,
|
| 1507 |
+
198,
|
| 1508 |
+
884,
|
| 1509 |
+
242
|
| 1510 |
+
],
|
| 1511 |
+
"page_idx": 8
|
| 1512 |
+
},
|
| 1513 |
+
{
|
| 1514 |
+
"type": "text",
|
| 1515 |
+
"text": "example, in the first sentence, \"DDR3\" has few helpful syntactic dependency relations. Therefore, RGAT-BERT makes a wrong sentiment prediction. However, our DR-BERT model, succeeding in predicting most sentiment labels by considering the dynamic changing of the aspect-aware semantic. For other two case examples, the observations are consistent. Note that, for aspect \"Supplied software\" in second sentence, two overlap aspects appear in the same sentence makes it more difficult to distinguish the different sentiment between them. Thus, precisely determine its sentiment polarity is a big challenge for human, let alone deep learning models. This also leaves space for future exploration.",
|
| 1516 |
+
"bbox": [
|
| 1517 |
+
110,
|
| 1518 |
+
261,
|
| 1519 |
+
492,
|
| 1520 |
+
487
|
| 1521 |
+
],
|
| 1522 |
+
"page_idx": 8
|
| 1523 |
+
},
|
| 1524 |
+
{
|
| 1525 |
+
"type": "text",
|
| 1526 |
+
"text": "5 Computation Time Comparison",
|
| 1527 |
+
"text_level": 1,
|
| 1528 |
+
"bbox": [
|
| 1529 |
+
112,
|
| 1530 |
+
504,
|
| 1531 |
+
423,
|
| 1532 |
+
520
|
| 1533 |
+
],
|
| 1534 |
+
"page_idx": 8
|
| 1535 |
+
},
|
| 1536 |
+
{
|
| 1537 |
+
"type": "text",
|
| 1538 |
+
"text": "We also compared the computation runtime of three baseline methods. All of the models are performed on a Linux server with 64 Intel(R) CPUs and 4 Tesla V100 32GB GPUs. From the results shown in Table 5, we can first observe that the training time of a single epoch in DR-BERT performs better than T-GCN, which is based on GCN. Meanwhile, the training time of all these BERT-based models is similar (i.e., there is no significant difference). The possible reason is that the official datasets are small, and it is hard to influence the overall runtime of PLMs with such a small amount of data. Second, compared with other models, the training time of the ATAE-LSTM model is less (always an order of magnitude lower). For example, the ATAE-LSTM only needs about two minutes to achieve optimal performance in the restaurant dataset, while BERT-based models require more than 26 minutes. Therefore, though DR-BERT contains a Dynamic Re-weighting adapter based on GRU, the computation time is much lower than the BERT-based framework. In summary, the observations above show that the computation time of our DR-BERT model is within an acceptable range.",
|
| 1539 |
+
"bbox": [
|
| 1540 |
+
110,
|
| 1541 |
+
533,
|
| 1542 |
+
489,
|
| 1543 |
+
919
|
| 1544 |
+
],
|
| 1545 |
+
"page_idx": 8
|
| 1546 |
+
},
|
| 1547 |
+
{
|
| 1548 |
+
"type": "text",
|
| 1549 |
+
"text": "6 Conclusion and Future Works",
|
| 1550 |
+
"text_level": 1,
|
| 1551 |
+
"bbox": [
|
| 1552 |
+
507,
|
| 1553 |
+
261,
|
| 1554 |
+
806,
|
| 1555 |
+
277
|
| 1556 |
+
],
|
| 1557 |
+
"page_idx": 8
|
| 1558 |
+
},
|
| 1559 |
+
{
|
| 1560 |
+
"type": "text",
|
| 1561 |
+
"text": "This paper introduced a new approach named Dynamic Re-weighting BERT (DR-BERT) for aspect-based sentiment analysis. Specifically, we first employed the BERT layers as a base encoder to learn the overall semantic features of the whole sentence. Then, inspired by human semantic comprehension, we devised a new Dynamic Re-weighting Adapter (DRA) to enhance aspect-aware semantic features in the sentiment learning process. In addition, we inserted the DRA into the BERT layers to address the limitations of the vanilla pre-trained model in ABSA task. Extensive experiments on three benchmark datasets demonstrated the effectiveness and interpretability of the proposed model, with good semantic comprehension insights for future nature language modeling. Moreover, the error analysis was performed on incorrectly predicted examples, leading to some insights into the ABSA task.",
|
| 1562 |
+
"bbox": [
|
| 1563 |
+
505,
|
| 1564 |
+
286,
|
| 1565 |
+
884,
|
| 1566 |
+
575
|
| 1567 |
+
],
|
| 1568 |
+
"page_idx": 8
|
| 1569 |
+
},
|
| 1570 |
+
{
|
| 1571 |
+
"type": "text",
|
| 1572 |
+
"text": "We hope our research can help boost excellent work for aspect-based sentiment analysis from different perspectives. In the future, we plan to extend our method to other tasks like Sentence Semantic Matching, Relation Extraction, etc., which can also benefit from utilizing the dynamic semantics. Besides, we will explore whether DR-BERT can make any positive changes based on previous mistakes during the dynamic semantic understanding.",
|
| 1573 |
+
"bbox": [
|
| 1574 |
+
507,
|
| 1575 |
+
576,
|
| 1576 |
+
884,
|
| 1577 |
+
721
|
| 1578 |
+
],
|
| 1579 |
+
"page_idx": 8
|
| 1580 |
+
},
|
| 1581 |
+
{
|
| 1582 |
+
"type": "text",
|
| 1583 |
+
"text": "7 Acknowledgments",
|
| 1584 |
+
"text_level": 1,
|
| 1585 |
+
"bbox": [
|
| 1586 |
+
507,
|
| 1587 |
+
733,
|
| 1588 |
+
702,
|
| 1589 |
+
749
|
| 1590 |
+
],
|
| 1591 |
+
"page_idx": 8
|
| 1592 |
+
},
|
| 1593 |
+
{
|
| 1594 |
+
"type": "text",
|
| 1595 |
+
"text": "We would like to thank the anonymous reviewers for the helpful comments. This research was partially supported by grants from the National Key R&D Program of China (No. 2021YFF0901003), and the National Natural Science Foundation of China (No. 61922073, 61727809, 62006066 and 72101176). We appreciate all the authors for their fruitful discussions. We also special thanks to all the first-line healthcare providers that are fighting the war of COVID-19.",
|
| 1596 |
+
"bbox": [
|
| 1597 |
+
507,
|
| 1598 |
+
758,
|
| 1599 |
+
884,
|
| 1600 |
+
917
|
| 1601 |
+
],
|
| 1602 |
+
"page_idx": 8
|
| 1603 |
+
},
|
| 1604 |
+
{
|
| 1605 |
+
"type": "text",
|
| 1606 |
+
"text": "References",
|
| 1607 |
+
"text_level": 1,
|
| 1608 |
+
"bbox": [
|
| 1609 |
+
115,
|
| 1610 |
+
84,
|
| 1611 |
+
213,
|
| 1612 |
+
98
|
| 1613 |
+
],
|
| 1614 |
+
"page_idx": 9
|
| 1615 |
+
},
|
| 1616 |
+
{
|
| 1617 |
+
"type": "list",
|
| 1618 |
+
"sub_type": "ref_text",
|
| 1619 |
+
"list_items": [
|
| 1620 |
+
"James C Bezdek. 1992. On the relationship between neural networks, pattern recognition and intelligence. International journal of approximate reasoning, 6(2):85-107.",
|
| 1621 |
+
"Harm Brouwer, Francesca Delogu, Noortje J Venhuizen, and Matthew W Crocker. 2021. Neurobehavioral correlates of surprisal in language comprehension: A neurocomputational model. Frontiers in Psychology, 12:110.",
|
| 1622 |
+
"Chenhua Chen, Zhiyang Teng, and Yue Zhang. 2020. Inducing target-specific latent structures for aspect sentiment classification. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 5596-5607.",
|
| 1623 |
+
"Jin Yao Chin, Kaiqi Zhao, Shafiq Joty, and Gao Cong. 2018. Anr: Aspect-based neural recommender. In Proceedings of the 27th ACM International Conference on Information and Knowledge Management, pages 147-156.",
|
| 1624 |
+
"Kyunghyun Cho, Bart van, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, and Yoshua Bengio. 2014. Learning phrase representations using rnn encoder-decoder for statistical machine translation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1724-1734.",
|
| 1625 |
+
"Kevin Clark, Urvashi Khandelwal, Omer Levy, and Christopher D. Manning. 2019. What does BERT look at? an analysis of BERT's attention. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 276-286, Florence, Italy. Association for Computational Linguistics.",
|
| 1626 |
+
"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4171-4186.",
|
| 1627 |
+
"Xiaowen Ding and Bing Liu. 2007. The utility of linguistic rules in opinion mining. In Proceedings of the 30th annual international ACM SIGIR conference on Research and development in information retrieval, pages 811-812.",
|
| 1628 |
+
"Li Dong, Furu Wei, Chuanqi Tan, Duyu Tang, Ming Zhou, and Ke Xu. 2014. Adaptive recursive neural network for target-dependent twitter sentiment classification. In Proceedings of the 52nd annual meeting of the association for computational linguistics (volume 2: Short papers), pages 49-54.",
|
| 1629 |
+
"Feifan Fan, Yansong Feng, and Dongyan Zhao. 2018. Multi-grained attention network for aspect-level sentiment classification. In Proceedings of the 2018 conference on empirical methods in natural language processing, pages 3433-3442."
|
| 1630 |
+
],
|
| 1631 |
+
"bbox": [
|
| 1632 |
+
115,
|
| 1633 |
+
105,
|
| 1634 |
+
489,
|
| 1635 |
+
919
|
| 1636 |
+
],
|
| 1637 |
+
"page_idx": 9
|
| 1638 |
+
},
|
| 1639 |
+
{
|
| 1640 |
+
"type": "list",
|
| 1641 |
+
"sub_type": "ref_text",
|
| 1642 |
+
"list_items": [
|
| 1643 |
+
"Tal Golan, Prashant C Raju, and Nikolaus Kriegesko-orte. 2020. Controversial stimuli: Pitting neural networks against each other as models of human cognition. Proceedings of the National Academy of Sciences, 117(47):29330-29337.",
|
| 1644 |
+
"Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), pages 770-778.",
|
| 1645 |
+
"Binxuan Huang and Kathleen M Carley. 2019. Syntax-aware aspect level sentiment classification with graph attention networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 5469-5477.",
|
| 1646 |
+
"Binxuan Huang, Yanglan Ou, and Kathleen M Carley. 2018. Aspect level sentiment classification with attention-over-attention neural networks. In International Conference on Social Computing, Behavioral-Cultural Modeling and Prediction and Behavior Representation in Modeling and Simulation, pages 197-206. Springer.",
|
| 1647 |
+
"Long Jiang, Mo Yu, Ming Zhou, Xiaohua Liu, and Tiejun Zhao. 2011. Target-dependent twitter sentiment classification. In Proceedings of the 49th annual meeting of the association for computational linguistics, pages 151-160.",
|
| 1648 |
+
"Svetlana Kiritchenko, Xiaodan Zhu, Colin Cherry, and Saif Mohammad. 2014. Nrc-canada-2014: Detecting aspects and sentiment in customer reviews. In Proceedings of the 8th international workshop on semantic evaluation (SemEval 2014), pages 437-442.",
|
| 1649 |
+
"Christof Koch and Naotsugu Tsuchiya. 2007. Attention and consciousness: two distinct brain processes. Trends in cognitive sciences, 11(1):16-22.",
|
| 1650 |
+
"Gina R Kuperberg. 2007. Neural mechanisms of language comprehension: Challenges to syntax. *Brain research*, 1146:23-49.",
|
| 1651 |
+
"Gina R and T Florian Jaeger. 2016. What do we mean by prediction in language comprehension? Language, cognition and neuroscience, 31(1):32-59.",
|
| 1652 |
+
"Siwei Lai, Liheng Xu, Kang Liu, and Jun Zhao. 2015. Recurrent convolutional neural networks for text classification. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 29.",
|
| 1653 |
+
"Ruifan Li, Hao Chen, Fangxiang Feng, Zhanyu Ma, Xiaojie Wang, and Eduard Hovy. 2021. Dual graph convolutional networks for aspect-based sentiment analysis. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 6319-6329."
|
| 1654 |
+
],
|
| 1655 |
+
"bbox": [
|
| 1656 |
+
510,
|
| 1657 |
+
85,
|
| 1658 |
+
884,
|
| 1659 |
+
919
|
| 1660 |
+
],
|
| 1661 |
+
"page_idx": 9
|
| 1662 |
+
},
|
| 1663 |
+
{
|
| 1664 |
+
"type": "list",
|
| 1665 |
+
"sub_type": "ref_text",
|
| 1666 |
+
"list_items": [
|
| 1667 |
+
"Xiangsheng Li, Jiaxin Mao, Chao Wang, Yiqun Liu, Min Zhang, and Shaoping Ma. 2019. Teach machine how to read: reading behavior inspired relevance estimation. In Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 795-804.",
|
| 1668 |
+
"Xin Li, Lidong Bing, Wai Lam, and Bei Shi. 2018. Transformation networks for target-oriented sentiment classification. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, pages 946-956.",
|
| 1669 |
+
"Yunlong Liang, Fandong Meng, Jinchao Zhang, Jinan Xu, Yufeng Chen, and Jie Zhou. 2019. A novel aspect-guided deep transition model for aspect based sentiment analysis. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 5569-5580.",
|
| 1670 |
+
"Dehong Ma, Sujian Li, Xiaodong Zhang, and Houfeng Wang. 2017. Interactive attention networks for aspect-level sentiment classification. In Proceedings of the 26th International Joint Conference on Artificial Intelligence, pages 4068-4074.",
|
| 1671 |
+
"Thien Hai Nguyen and Kiyoaki Shirai. 2015. Phrasernn: Phrase recursive neural network for aspect-based sentiment analysis. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 2509-2514.",
|
| 1672 |
+
"Wei Peng, Yue Hu, Luxi Xing, Yuqiang Xie, Jing Yu, Yajing Sun, and Xiangpeng Wei. 2020. Bidirectional cognitive thinking network for machine reading comprehension. In Proceedings of the 28th International Conference on Computational Linguistics, pages 2613-2623.",
|
| 1673 |
+
"Maria Pontiki, Dimitris Galanis, John Pavlopoulos, Harris Papageorgiou, Ion Androutsopoulos, and Suresh Manandhar. 2014. SemEval-2014 task 4: Aspect based sentiment analysis. In Proceedings of the 8th International Workshop on Semantic Evaluation (SemEval 2014), pages 27-35, Dublin, Ireland. Association for Computational Linguistics.",
|
| 1674 |
+
"Keith Rayner. 1998. Eye movements in reading and information processing: 20 years of research. *Psychological bulletin*, 124(3):372.",
|
| 1675 |
+
"Cansu Sen, Thomas Hartvigsen, Biao Yin, Xiangnan Kong, and Elke Runden. 2020. Human attention maps for text classification: Do humans and neural networks focus on the same words? In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4596-4608.",
|
| 1676 |
+
"Lei Sha, Baobao Chang, Zhifang Sui, and Sujian Li. 2016. Reading and thinking: Re-read LSTM unit for textual entailment recognition. In Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers, pages 2870-2879."
|
| 1677 |
+
],
|
| 1678 |
+
"bbox": [
|
| 1679 |
+
115,
|
| 1680 |
+
85,
|
| 1681 |
+
489,
|
| 1682 |
+
917
|
| 1683 |
+
],
|
| 1684 |
+
"page_idx": 10
|
| 1685 |
+
},
|
| 1686 |
+
{
|
| 1687 |
+
"type": "list",
|
| 1688 |
+
"sub_type": "ref_text",
|
| 1689 |
+
"list_items": [
|
| 1690 |
+
"Selina Sharmin, Oleg Špakov, and Kari-Jouko Räihä. 2015. Dynamic text presentation in print interpreting—an eye movement study of reading behaviour. International Journal of Human-Computer Studies, 78:17–30.",
|
| 1691 |
+
"Youwei Song, Jiahai Wang, Tao Jiang, Zhiyue Liu, and Yanghui Rao. 2019. Attentional encoder network for targeted sentiment classification. arXiv preprint arXiv:1902.09314.",
|
| 1692 |
+
"Concetto Spampinato, Simone Palazzo, Isaak Kavasidis, Daniela Giordano, Nasim Souly, and Mubarak Shah. 2017. Deep learning human mind for automated visual classification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6809-6817.",
|
| 1693 |
+
"Chi Sun, Luyao Huang, and Xipeng Qiu. 2019. Utilizing bert for aspect-based sentiment analysis via constructing auxiliary sentence. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 380-385.",
|
| 1694 |
+
"Yaming Sun, Lei Lin, Duyu Tang, Nan Yang, Zhenzhou Ji, and Xiaolong Wang. 2015. Modeling mention, context and entity with neural networks for entity disambiguation. In Twenty-fourth international joint conference on artificial intelligence.",
|
| 1695 |
+
"Niels A Taatgen, Hedderik Van Rijn, and John Anderson. 2007. An integrated theory of prospective time interval estimation: The role of cognition, attention, and learning. Psychological review, 114(3):577.",
|
| 1696 |
+
"Xingwei Tan, Yi Cai, and Changxi Zhu. 2019. Recognizing conflict opinions in aspect-level sentiment classification with dual attention networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing (EMNLP-IJCNLP), pages 3426-3431.",
|
| 1697 |
+
"Duyu Tang, Bing Qin, Xiaocheng Feng, and Ting Liu. 2015. Effective lstms for target-dependent sentiment classification. arXiv preprint arXiv:1512.01100.",
|
| 1698 |
+
"Duyu Tang, Bing Qin, and Ting Liu. 2016. Aspect level sentiment classification with deep memory network. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 214-224.",
|
| 1699 |
+
"Hao Tang, Donghong Ji, Chenliang Li, and Qiji Zhou. 2020. Dependency graph enhanced dual-transformer structure for aspect-based sentiment classification. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 6578-6588.",
|
| 1700 |
+
"Jialong Tang, Ziyao Lu, Jinsong Su, Yubin Ge, Linfeng Song, Le Sun, and Jiebo Luo. 2019. Progressive self-supervised attention learning for aspect-level sentiment analysis. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 557-566."
|
| 1701 |
+
],
|
| 1702 |
+
"bbox": [
|
| 1703 |
+
510,
|
| 1704 |
+
85,
|
| 1705 |
+
882,
|
| 1706 |
+
917
|
| 1707 |
+
],
|
| 1708 |
+
"page_idx": 10
|
| 1709 |
+
},
|
| 1710 |
+
{
|
| 1711 |
+
"type": "list",
|
| 1712 |
+
"sub_type": "ref_text",
|
| 1713 |
+
"list_items": [
|
| 1714 |
+
"Yuanhe Tian, Guimin Chen, and Yan Song. 2021. Aspect-based sentiment analysis with type-aware graph convolutional networks and layer ensemble. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 2910-2922.",
|
| 1715 |
+
"Giulio Tononi. 2008. Consciousness as integrated information: a provisional manifesto. The Biological Bulletin, 215(3):216-242.",
|
| 1716 |
+
"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. arXiv preprint arXiv:1706.03762.",
|
| 1717 |
+
"Jingjing Wang, Changlong Sun, Shoushan Li, Xiaozhong Liu, Luo Si, Min Zhang, and Guodong Zhou. 2019. Aspect sentiment classification towards question-answering with reinforced bidirectional attention network. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3548-3557.",
|
| 1718 |
+
"Kai Wang, Weizhou Shen, Yunyi Yang, Xiaojun Quan, and Rui Wang. 2020. Relational graph attention network for aspect-based sentiment analysis. In Proceedings of 58th Annual Meeting of the Association for Computational Linguistics, pages 3229-3238.",
|
| 1719 |
+
"Xuejian Wang, Lantao Yu, Kan Ren, Guanyu Tao, Weinan Zhang, and et al. 2017. Dynamic attention deep model for article recommendation by learning human editors' demonstration. In Proceedings of the 23rd international conference on knowledge discovery and data mining, pages 2051-2059.",
|
| 1720 |
+
"Yequan Wang, Minlie Huang, Xiaoyan Zhu, and Li Zhao. 2016. Attention-based LSTM for aspect-level sentiment classification. In Proceedings of the 2016 conference on empirical methods in natural language processing, pages 606-615.",
|
| 1721 |
+
"Bowen Xing, Lejian Liao, Dandan Song, and et al. 2019. Earlier attention? aspect-aware LSTM for aspect-based sentiment analysis. In *IJCAI*.",
|
| 1722 |
+
"Hu Xu, Bing Liu, Lei Shu, and S Yu Philip. 2019a. Bert post-training for review reading comprehension and aspect-based sentiment analysis. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 2324-2335.",
|
| 1723 |
+
"Hu Xu, Lei Shu, S Yu Philip, and Bing Liu. 2020. Understanding pre-trained bert for aspect-based sentiment analysis. In Proceedings of the 28th International Conference on Computational Linguistics, pages 244-250.",
|
| 1724 |
+
"Jingjing Xu, Xu Sun, Zhiyuan Zhang, Guangxiang Zhao, and Junyang Lin. 2019b. Understanding and improving layer normalization. Advances in Neural Information Processing Systems, 32."
|
| 1725 |
+
],
|
| 1726 |
+
"bbox": [
|
| 1727 |
+
115,
|
| 1728 |
+
85,
|
| 1729 |
+
489,
|
| 1730 |
+
917
|
| 1731 |
+
],
|
| 1732 |
+
"page_idx": 11
|
| 1733 |
+
},
|
| 1734 |
+
{
|
| 1735 |
+
"type": "list",
|
| 1736 |
+
"sub_type": "ref_text",
|
| 1737 |
+
"list_items": [
|
| 1738 |
+
"Hsien-Ming Yang and George W McConkie. 1999. Reading chinese: Some basic eye-movement characteristics. Reading Chinese script: A cognitive analysis, pages 207-222.",
|
| 1739 |
+
"Quanzeng You, Hailin Jin, Zhaowen Wang, Chen Fang, and Jiebo Luo. 2016. Image captioning with semantic attention. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4651-4659.",
|
| 1740 |
+
"Chen Zhang, Qiuchi Li, and Dawei Song. 2019a. Aspect-based sentiment classification with aspect-specific graph convolutional networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 4568-4578.",
|
| 1741 |
+
"Kai Zhang, Qi Liu, Hao Qian, Biao Xiang, Qing Cui, Jun Zhou, and Enhong Chen. 2021a. Eatn: An efficient adaptive transfer network for aspect-level sentiment analysis. IEEE Transactions on Knowledge and Data Engineering.",
|
| 1742 |
+
"Kai Zhang, Hao Qian, Qi Liu, Zhiqiang Zhang, Jun Zhou, and et al. 2021b. Sifn: A sentiment-aware interactive fusion network for review-based item recommendation. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management, pages 3627-3631.",
|
| 1743 |
+
"Kai Zhang, Hefu Zhang, Qi Liu, Hongke Zhao, Hengshu Zhu, and Enhong Chen. 2019b. Interactive attention transfer network for cross-domain sentiment classification. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 5773-5780.",
|
| 1744 |
+
"Kai Zhang, Hongke Zhao, Qi Liu, Zhen Pan, and Enhong Chen. 2019c. A dynamic and cooperative tracking system for crowdfunding. arXiv preprint arXiv:2002.00847.",
|
| 1745 |
+
"Kun Zhang, Guangyi Lv, Linyuan Wang, Le Wu, Enhong Chen, Fangzhao Wu, and Xing Xie. 2019d. Drr-net: Dynamic re-read network for sentence semantic matching. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 7442-7449.",
|
| 1746 |
+
"Yaowei Zheng, Richong Zhang, Samuel Mensah, and Yongyi Mao. 2020. Replicate, walk, and stop on syntax: An effective neural network model for aspect-level sentiment classification. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 9685-9692.",
|
| 1747 |
+
"Yukun Zheng, Jiaxin Mao, Yiqun Liu, Zixin Ye, Min Zhang, and Shaoping Ma. 2019. Human behavior inspired machine reading comprehension. In Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 425-434."
|
| 1748 |
+
],
|
| 1749 |
+
"bbox": [
|
| 1750 |
+
510,
|
| 1751 |
+
85,
|
| 1752 |
+
882,
|
| 1753 |
+
885
|
| 1754 |
+
],
|
| 1755 |
+
"page_idx": 11
|
| 1756 |
+
}
|
| 1757 |
+
]
|
2203.16xxx/2203.16369/b6c3126c-a1ee-4c00-afaf-ac1a4f247180_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16369/b6c3126c-a1ee-4c00-afaf-ac1a4f247180_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:53b4d0e77f4c4ea83d21595f50ca20a451feb9c7aa9d68903c15a782674289f9
|
| 3 |
+
size 820144
|
2203.16xxx/2203.16369/full.md
ADDED
|
@@ -0,0 +1,384 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Incorporating Dynamic Semantics into Pre-Trained Language Model for Aspect-based Sentiment Analysis
|
| 2 |
+
|
| 3 |
+
Kai Zhang $^{1}$ , Kun Zhang $^{2}$ , Mengdi Zhang $^{3}$ , Hongke Zhao $^{4}$ , Qi Liu $^{1,*}$ , Wei Wu $^{3}$ , Enhong Chen $^{1}$
|
| 4 |
+
|
| 5 |
+
<sup>1</sup> School of Data Science, University of Science and Technology of China
|
| 6 |
+
|
| 7 |
+
$^{2}$ School of Computer Science and Information Engineering, Hefei University of Technology
|
| 8 |
+
|
| 9 |
+
$^{3}$ Meituan; $^{4}$ College of Management and Economics, Tianjin University kkzhang0808@mail.ustc.edu.cn; {qiliuql, cheneh}@ustc.edu.cn {zhang1028kun, wuwei19850318, mdz} @ gmail.com; hongke@tju.edu.cn
|
| 10 |
+
|
| 11 |
+
# Abstract
|
| 12 |
+
|
| 13 |
+
Aspect-based sentiment analysis (ABSA) predicts sentiment polarity towards a specific aspect in the given sentence. While pre-trained language models such as BERT have achieved great success, incorporating dynamic semantic changes into ABSA remains challenging. To this end, in this paper, we propose to address this problem by Dynamic Re-weighting BERT (DR-BERT), a novel method designed to learn dynamic aspect-oriented semantics for ABSA. Specifically, we first take the Stack-BERT layers as a primary encoder to grasp the overall semantic of the sentence and then fine-tune it by incorporating a lightweight Dynamic Re-weighting Adapter (DRA). Note that the DRA can pay close attention to a small region of the sentences at each step and re-weigh the vitally important words for better aspect-aware sentiment understanding. Finally, experimental results on three benchmark datasets demonstrate the effectiveness and the rationality of our proposed model and provide good interpretable insights for future semantic modeling.
|
| 14 |
+
|
| 15 |
+
# 1 Introduction
|
| 16 |
+
|
| 17 |
+
Aspect-based sentiment analysis is a branch of sentiment analysis, which aims to identify sentiment polarity of the specific aspect in a sentence (Jiang et al., 2011). For example, given a sentence "The restaurant has attentive service, but the food is terrible," the task aims to predict the sentiment polarities towards "service" and "food", which should be positive and negative respectively.
|
| 18 |
+
|
| 19 |
+
As a fundamental technology, the ABSA task has broad applications, such as recommender system (Chin et al., 2018; Zhang et al., 2021b) and question answering (Wang et al., 2019). Therefore, a great amount of research has been attracted from both academia and industry. Among them, deep neural networks (DNN) (Nguyen and Shirai, 2015;
|
| 20 |
+
|
| 21 |
+
Tang et al., 2015, 2016; Zheng et al., 2020), attention mechanism (Wang et al., 2016; Ma et al., 2017) and graph neural/attention networks (Huang and Carley, 2019; Zhang et al., 2019a; Wang et al., 2020) have significantly improved the performance through deep feature alignment between the aspect representations and context representations.
|
| 22 |
+
|
| 23 |
+
Recently, the large-scaled pre-trained language models, such as Bidirectional Encoder Representations from Transformers (BERT) (Devlin et al., 2019), realize a breakthrough for improving many language tasks, which further attracts considerable attention to enhance the semantic representations. In ABSA, Xu et al. (2019a) designed BERT-PT, which explores a novel post-training approach on the BERT model. Song et al. (2019) further proposed a text pair classification model BERT-SPC, which prepares the input sequence by appending the aspects into the contextual sentence. Although great success has been achieved by the above studies, some critical problems remain when directly applying attention mechanisms or fine-tuning the pre-trained BERT in the task of ABSA.
|
| 24 |
+
|
| 25 |
+
Specifically, most of the existing approaches select all the important words from a contextual sentence at one time. However, according to neuroscience studies, the essential words during semantic comprehension are dynamically changing with the reading process and should be repeatedly considered (Kuperberg, 2007; Tononi, 2008; Brouwer et al., 2021). For example, when judging the sentiment polarity of the aspect "system memory" in a review sentence "It could be a perfect laptop if it would have faster system memory and its radeon would have DDR5 instead of DDR3", the important words should change from general sentiment words {"faster", "perfect", "laptop"} into aspect-aware words {"would have", "faster", "could", "be", "perfect)}. Through these dynamic changes, the sentiment polarity will change from positive to the ground truth sentiment label negative.
|
| 26 |
+
|
| 27 |
+
Meanwhile, simply initializing the encoder with a pre-trained BERT does not effectively boost the performance in ABSA as we expected (Huang and Carley, 2019; Xu et al., 2019a; Wang et al., 2020). One possible reason could be that training on two specific tasks, i.e., Next Sentence Prediction and Masked LM, with rich resources leads to better semantic of the overall sentences. However, the ABSA task is conditional, which means the model needs to understand the regional semantics of sentences by fully considering the given aspect. For instance, BERT tends to understand the global sentiment of the above sentence "It could be a perfect laptop ... of DDR3" regardless of which aspect is given. But in ABSA, the sentence is more likely to be different sentiment meanings for different aspects (e.g., negative for "system memory" while positive for "DDR5"). Therefore, the vanilla BERT is hardly to pay closer attention to relevant information for the specific aspect, especially when there are multiple aspects in one sentence.
|
| 28 |
+
|
| 29 |
+
To equip the pre-trained models with the ability to capture the aspect-aware dynamic semantics, we present a Dynamic Re-weighting BERT (DR-BERT) model, which considers the aspect-aware dynamic semantics in a pre-trained learning framework. Specifically, we first take the Stack-BERT layers as primary sentence encoder to learn overall semantics of the whole sentences. Then, we devise a Dynamic Re-weighting Adapter (DRA), which aims to pay most careful attention to a small region of the contextual sentence and dynamically select and re-weight one critical word at each step for better aspect-aware sentiment understanding. Finally, to overcome the limitation of vanilla BERT mentioned above, we incorporate the light-weighted DRA into each BERT encoder layer and fine-tune it to adapt to the ABSA task. We conduct extensive experiments on three widely-used datasets where the results demonstrate the effectiveness, rationality and interpretability of the proposed model.
|
| 30 |
+
|
| 31 |
+
# 2 Related Work
|
| 32 |
+
|
| 33 |
+
# 2.1 Aspect-based Sentiment Analysis
|
| 34 |
+
|
| 35 |
+
Aspect-based sentiment analysis identifies specific aspect's sentiment polarity in the sentence. Some approaches (Ding and Liu, 2007; Jiang et al., 2011; Kiritchenko et al., 2014) designed numerous rules-based models for ABSA. For example, Ding and Liu (2007) first performed dependency parsing to determine sentiment polarity about the aspects.
|
| 36 |
+
|
| 37 |
+
In recent years, most research studies make use of the attention mechanism to learn the word's semantic relation (Tang et al., 2015, 2016; Wang et al., 2016; Ma et al., 2017; Xing et al., 2019; Liang et al., 2019; Zhang et al., 2021a). Among them, Wang et al. (2016) proposed an attention-based LSTM to identify important information relating to the aspect. Ma et al. (2017) developed an interactive attention to model the aspect and sentence interactively. Fan et al. (2018) defined a multi-grained network to link the words from aspect and sentence. Li et al. (2018) designed a target-specific network to integrate aspect information into sentence. Tan et al. (2019) introduced a dual attention to distinguish conflicting opinions.
|
| 38 |
+
|
| 39 |
+
In addition, another research trend is to leverage syntactic knowledge to learn syntax-aware features of the aspect (Tang et al., 2019; Huang and Carley, 2019; Zhang et al., 2019a; Sun et al., 2019; Wang et al., 2020; Tang et al., 2020; Chen et al., 2020; Li et al., 2021; Tian et al., 2021). For example, Tang et al. (2020) developed dependency graph enhanced dual-transformer network to fuse the flat representations. More recently, pre-trained methods have been proved remarkably successful in the ABSA task. Song et al. (2019) devised an attentional encoder and a BERT-SPC model to learn features between aspect and context. Wang et al. (2020) reshaped the dependency trees and proposed a relational graph attention network to encode the syntax relation feature. Tian et al. (2021) explicitly utilize dependency types with a type-aware graph networks to learn aspect-aware relations.
|
| 40 |
+
|
| 41 |
+
However, these methods largely ignore the procedure of dynamic semantic comprehension (Kuperberg, 2007; Kuperberg and Jaeger, 2016; Wang et al., 2017; Zhang et al., 2019c; Brouwer et al., 2021) and can not fully reveal dynamic semantic changes of the aspect-related words. Thus, it's hard for ABSA models to achieve the same performance as human-level sentiment understanding.
|
| 42 |
+
|
| 43 |
+
# 2.2 Human Semantic Comprehension
|
| 44 |
+
|
| 45 |
+
Actually, no matter in the early days or now, imitating the procedure of human semantic comprehension has always been one of the original intention of many studies (Bezdek, 1992; Wang et al., 2017; Zheng et al., 2019; Li et al., 2019; Zhang et al., 2019d; Peng et al., 2020; Golan et al., 2020), such as machine reading comprehension (Zhang et al., 2019d; Peng et al., 2020), visual object detecting (Spampinato et al., 2017) and relevance estima
|
| 46 |
+
|
| 47 |
+

|
| 48 |
+
Figure 1: An illustration of the proposed framework. The blue blocks constitute a pre-trained BERT model which are frozen during fine-tuning, and the right block represents the dynamic re-weighting adapter that is inserted after each BERT encoder layer and trained during fine-tuning. Moreover, $S$ and $A$ represent the sentence sequence and the aspect sequence respectively. $N$ indicates the number of layers of the BERT encoder.
|
| 49 |
+
|
| 50 |
+
tion (Li et al., 2019). For example, attention mechanism (Vaswani et al., 2017) has a widespread influence, which allows the model to focus on important parts of the input as human's attention. Spampinato et al. (2017) aimed to learn human-based features via brain-based visual object. Wang et al. (2017) built a dynamic attention model to model human preferences for article recommendation.
|
| 51 |
+
|
| 52 |
+
Moreover, some psychologists and psycholinguists have also done many research on the mechanisms of human semantic comprehension (Kuperberg, 2007; Kuperberg and Jaeger, 2016; Brouwer et al., 2021). Specifically, some scholars (Yang and McConkie, 1999; Rayner, 1998) found that most people may focus on 1.5 words. Moreover, Koch and Tsuchiya (2007) and Tononi (2008) assumed that people can only remember the meaning of about 7 to 9 words at each time. The phenomenons indicate that most people only focused on a small region of the sentence at one time and need to repeatedly process important parts for better semantic understanding (Sharmin et al., 2015).
|
| 53 |
+
|
| 54 |
+
Inspired by the above research and linguistic psychology theories, in this paper, we explore aspect-aware semantic changes of the ABSA task by incorporating the procedure of dynamic semantic comprehension into the pre-trained language model.
|
| 55 |
+
|
| 56 |
+
# 3 Dynamic Re-weighting BERT
|
| 57 |
+
|
| 58 |
+
In this section, we introduce the technical detail of DR-BERT. Specifically, we start with the problem definition, followed by an overall architecture of DR-BERT as illustrated in Figure 1.
|
| 59 |
+
|
| 60 |
+
Problem Definition In ABSA, a sentence-aspect pair $(S,A)$ is given. In this paper, the sentence is
|
| 61 |
+
|
| 62 |
+
represented as $S = \{w_1^s, w_2^s, \dots, w_{l_s}^s\}$ which consists of a series of $l_s$ words. The specific aspect is denoted as $A = \{w_1^a, w_2^a, \dots, w_{l_a}^a\}$ which is a part of $S$ . $l_a$ is the length of aspect words. The goal of ABSA is to learn a sentiment classifier that can precisely predict the sentiment polarity of sentence $S$ for specific aspect $A$ . As the aspect-related information plays a key role in the prediction (Li et al., 2018; Zheng et al., 2020), this paper aims to dynamically select and encode the aspect-aware semantic information through the proposed model.
|
| 63 |
+
|
| 64 |
+
Overall Architecture DR-BERT mainly contains two components (i.e., BERT encoder and Dynamic Re-weighting Adapter), together with two modules (i.e., the embedding module and sentiment prediction module). The technical details of each part will be elaborated on as follows.
|
| 65 |
+
|
| 66 |
+
# 3.1 Embedding Module
|
| 67 |
+
|
| 68 |
+
To represent semantic information of the aspect words and context words better, we first map each word into a low-dimensional vector. Specifically, the inputs of DR-BERT are the sentence sequence and the corresponding aspect sequence. For the sentence sequence, we construct the BERT input as "[CLS]" + sentence + "[SEP]" and the sentence $S = \{w_1^s, w_2^s, \dots, w_{l_s}\}$ can be transformed into the hidden states $\mathbf{s} = \{\mathbf{s}_i \mid i = 1, 2, \dots, l_s\}$ with BERT embedding. For aspect sequences, we adopt the same method to get the representation vector of each word. Thus, through the embedding module, the aspect sequence $A = \{w_1^a, w_2^a, \dots, w_{l_a}^a\}$ is mapped to $\mathbf{a}^s = \{\mathbf{a}_j \mid j = 1, 2, \dots, l_a\}$ . Note that, if the aspect sequence is a single word like "food", the aspect representation is the embedding of the
|
| 69 |
+
|
| 70 |
+
single word "food". While for the cases where the sequence contains multiple words such as "system memory", the aspect representation is the average of each word embedding (Sun et al., 2015). We can denote the aspect embedding process as:
|
| 71 |
+
|
| 72 |
+
$$
|
| 73 |
+
\mathbf {a} = \left\{ \begin{array}{l l} \mathbf {a} _ {1}, \text {i f} l _ {a} = 1, \\ (\sum_ {j = 1} ^ {l _ {a}} \mathbf {a} _ {j}) / l _ {a}, \text {i f} l _ {a} > 1, \end{array} \right. \tag {1}
|
| 74 |
+
$$
|
| 75 |
+
|
| 76 |
+
where $\mathbf{a}_j$ is the embedding of word $j$ in the aspect sequence. a denotes the embedding of the aspect.
|
| 77 |
+
|
| 78 |
+
# 3.2 BERT Encoder
|
| 79 |
+
|
| 80 |
+
The architecture of BERT (Devlin et al., 2019) is akin to the Transformer (Vaswani et al., 2017). For simplicity, we omit some architecture details such as position encoding, layer normalization (Xu et al., 2019b) and residual connections (He et al., 2016).
|
| 81 |
+
|
| 82 |
+
1) Multi-head Self-attention Mechanism. In recent years, the multi-head self-attention mechanism (MultiHead) has received a wide range of applications in natural language processing. In the paper, we adopt MultiHead with $h$ heads to obtain the overall semantics of the whole sentence. The product from each self-attention network is then concatenated and finally transformed as:
|
| 83 |
+
|
| 84 |
+
$$
|
| 85 |
+
\begin{array}{l} \mathbf {m} = \left\{\mathbf {m} _ {i} \mid i = 1, 2, \dots , l _ {s} \right\} \tag {2} \\ = \mathbf {M u l t i H e a d} (\mathbf {s} \mathbf {W} _ {h} ^ {Q}, \mathbf {s} \mathbf {W} _ {h} ^ {K}, \mathbf {s} \mathbf {W} _ {h} ^ {V}), \\ \end{array}
|
| 86 |
+
$$
|
| 87 |
+
|
| 88 |
+
where $h$ denotes the $h$ -th attention head, $\mathbf{W}_i^Q$ , $\mathbf{W}_i^K$ and $\mathbf{W}_i^V$ are learnable parameters. Finally, the output feature is $\mathbf{m} = \{\mathbf{m}_i \mid i = 1,2,\dots,l_s\}$ . For detailed implementation of MultiHead, please refer to Transformer (Vaswani et al., 2017).
|
| 89 |
+
|
| 90 |
+
2) Position-wise Feed-Forward Network. Since the multi-head attention is a series of linear transformations, we then apply the position-wise feedforward network (FFN) to learn the feature's nonlinear transformation. Specifically, the FFN consists of two linear transformations along with a ReLU activation in between. More formally:
|
| 91 |
+
|
| 92 |
+
$$
|
| 93 |
+
\begin{array}{l} \mathbf {f} = \left\{\mathbf {f} _ {i} \mid i = 1, 2, \dots , l _ {s} \right\} \tag {3} \\ = \mathbf {m a x} (0, \mathbf {m W} _ {1} + \mathbf {b} _ {1}) \mathbf {W} _ {2} + \mathbf {b} _ {2}, \\ \end{array}
|
| 94 |
+
$$
|
| 95 |
+
|
| 96 |
+
where $\mathbf{W}_1, \mathbf{b}_1, \mathbf{W}_2$ and $\mathbf{b}_2$ are learnable parameters in the linear transformations.
|
| 97 |
+
|
| 98 |
+
So far, with the input $S = \{w_1^s, w_2^s, \dots, w_{l_s}^s\}$ , we obtain the hidden states $\mathbf{f} = \{\mathbf{f}_i \mid i = 1, 2, \dots, l_s\}$ via the BERT encoder. Then, for the words' hidden
|
| 99 |
+
|
| 100 |
+
states of the sentence from FFN, we utilize the max-pooling operation to fairly select crucial features in the sentence (Lai et al., 2015; Zhang et al., 2019b), so as to obtain the original sentence representation $\mathbf{h}_s$ at the beginning of each re-weighting step:
|
| 101 |
+
|
| 102 |
+
$$
|
| 103 |
+
\mathbf {h} _ {s} = \operatorname {M a x} _ {\text {P o o l i n g}} \left(\mathbf {f} _ {i} \mid i = 1, 2, \dots , l _ {s}\right). \tag {4}
|
| 104 |
+
$$
|
| 105 |
+
|
| 106 |
+
# 3.3 Dynamic Re-weighting Adapter (DRA)
|
| 107 |
+
|
| 108 |
+
The currently attention mechanism in deep learning is essentially similar to the selective visual attention of human beings (Vaswani et al., 2017; You et al., 2016). However, as for the text semantic understanding, human brain will discover the intentional relationship of words at a sentential level (Taatgen et al., 2007; Sha et al., 2016; Sen et al., 2020) and link the incoming semantic information with preexisting information stored within memory. Thus, we design a dynamic re-weighting adapter (DRA) which can dynamically emphasize the important aspect-aware words for the ABSA task.
|
| 109 |
+
|
| 110 |
+
As shown in the right part of Figure 1, based on overall semantics of the whole sentence, DRA further selects the most important word at each step with consideration of the specific aspect representation. Specifically, the inputs of DRA are the final outputs of the BERT encoder (i.e., $\mathbf{h}_s$ ) and the original aspect embedding (i.e., a). In each step, we first utilize re-weighting attention to choose the word for current input from the input sequence ( $\{\mathbf{s}_i \mid i = 1,2,\dots,l_s\}$ ). Then, we utilize Gated Recurrent Unit (GRU)(Cho et al., 2014) to encode the chosen word and update the semantic representation of the review sentence.
|
| 111 |
+
|
| 112 |
+
Formally, we regard the calculation process as:
|
| 113 |
+
|
| 114 |
+
$$
|
| 115 |
+
\mathbf {a} _ {t} = F \left(\left[ \mathbf {s} _ {1}, \mathbf {s} _ {2}, \dots , \mathbf {s} _ {l _ {s}} \right], \mathbf {h} _ {t - 1}, \mathbf {a}\right), \tag {5}
|
| 116 |
+
$$
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
\mathbf {h} _ {t} = G R U \left(\mathbf {a} _ {t}, \mathbf {h} _ {t - 1}\right), \quad t \in [ 1, T ]
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
where $\mathbf{a}$ is the original embedding vector of the aspect words. $\mathbf{a}_t$ is the output of re-weighting function $F$ . $T$ denotes the dynamic re-weighting length over the sentences, which represents the cognitive threshold of human beings. $\mathbf{h}_0 = \mathbf{h}_s$ is the initial state and $\mathbf{h}_T$ is the output hidden states of DRA.
|
| 123 |
+
|
| 124 |
+
1) The Re-weighting Function. More specifically, we utilize the attention mechanism to achieve the re-weighting function $\mathrm{F}$ , which aims to select the most important aspect-related word at each step. The calculation can be formulated as:
|
| 125 |
+
|
| 126 |
+
$$
|
| 127 |
+
\begin{array}{l} \mathbf {S} = \left[ \mathbf {s} _ {1}, \mathbf {s} _ {2}, \dots , \mathbf {s} _ {l _ {s}} \right], \\ \mathbf {M} = \mathbf {W} _ {s} \mathbf {S} + \left(\mathbf {W} _ {d} \mathbf {h} _ {t - 1} + \mathbf {W} _ {a} \mathbf {a}\right) \otimes \mathbf {w}, \tag {6} \\ \mathbf {m} = \omega^ {T} \tanh (\mathbf {M}), \\ \end{array}
|
| 128 |
+
$$
|
| 129 |
+
|
| 130 |
+
where $\mathbf{S}$ denotes the original sentence embedding, $\mathbf{M}$ is the fusion representation of the aspects and the sentences. $\mathbf{W}_s$ , $\mathbf{W}_d$ , $\mathbf{W}_a$ and $\omega$ are trainable parameters. $\mathbf{w} \in \mathbb{R}^{l_s}$ is a row vector of 1 and $\otimes$ denotes the outer product.
|
| 131 |
+
|
| 132 |
+
Subsequently, to better encode aspect-aware semantics, we choose the most important word (i.e., one word) at each step for the specific aspect.
|
| 133 |
+
|
| 134 |
+
$$
|
| 135 |
+
\alpha_ {i} = \frac {\exp (m _ {i})}{\sum_ {k = 1} ^ {l _ {s}} \exp (m _ {k})}, \tag {7}
|
| 136 |
+
$$
|
| 137 |
+
|
| 138 |
+
$$
|
| 139 |
+
\mathbf {a} _ {t} = \mathbf {s} _ {j}, (j = \operatorname {I n d e x} (\max (\alpha_ {i})))
|
| 140 |
+
$$
|
| 141 |
+
|
| 142 |
+
where $m_{i}$ and $\alpha_{i}$ are the hidden state and the attention score of $i$ -th word in the sentence. $\mathbf{a}_t$ is the chosen word which is most related to the specific aspect at $t$ -th step. However, $\operatorname{Index}(\max(\cdot))$ operation has no derivative, which means its gradient could not be calculated. Inspired by softmax function, we modify the Eq.7 and employ the following operation to re-weight the contextual words:
|
| 143 |
+
|
| 144 |
+
$$
|
| 145 |
+
\mathbf {a} _ {t} = \sum_ {i = 1} ^ {l _ {\mathrm {s}}} \frac {\exp (\lambda m _ {i})}{\sum_ {k = 1} ^ {l _ {\mathrm {s}}} \exp (\lambda m _ {k})} \mathbf {s} _ {i}. \tag {8}
|
| 146 |
+
$$
|
| 147 |
+
|
| 148 |
+
Note that, we design a hyper-parameter $\lambda$ to ensure our model achieves the above purpose. Specifically, the softmax function can exponentially increase or decrease the signal, thereby highlighting the information we want to enhance. Thus, when $\lambda$ is an arbitrarily large value, the attention score of the chosen word is infinitely close to 1, and other words are infinitely close to 0. In this way, the most important word (i.e., one word) will be extract from the context at each re-weighting step.
|
| 149 |
+
|
| 150 |
+
2) The GRU Function. To better encode semantic of the whole sentence, we also employ GRU to further imitate the procedure of human semantic comprehension under the specific context, which is consistent with the process of people adjusting to a new text based on their understanding behavior. Therefore, given a previous vector embedding, the hidden vectors of GRU are calculated by receiving it as input:
|
| 151 |
+
|
| 152 |
+
$$
|
| 153 |
+
z _ {t} = \sigma \left(\mathbf {W} _ {z} \cdot \left[ \mathbf {h} _ {t - 1}, \mathbf {a} _ {t} \right]\right)
|
| 154 |
+
$$
|
| 155 |
+
|
| 156 |
+
$$
|
| 157 |
+
\begin{array}{l} r _ {t} = \sigma \left(\mathbf {W} _ {r} \cdot \left[ \mathbf {h} _ {t - 1}, \mathbf {a} _ {t} \right]\right) \\ \tilde {r} = \left. \begin{array}{l l l l l l l l l l l l l l l l l l l l l l} & & & & & & & & & & & & & & & & & & & & \\ & & & & & & & & & & & & & & & & & & & \\ & & & & & & & & & & & & & & & & & & & \\ & & & & & & & & & & & & & & & & & & & \\ \end{array} \right] \end{array} \tag {9}
|
| 158 |
+
$$
|
| 159 |
+
|
| 160 |
+
$$
|
| 161 |
+
\tilde {\mathbf {h}} _ {t} = \mathrm {t a n h} \left(\mathbf {W} \cdot \left[ r _ {t} * \mathbf {h} _ {t - 1}, \mathbf {a} _ {t} \right]\right)
|
| 162 |
+
$$
|
| 163 |
+
|
| 164 |
+
$$
|
| 165 |
+
\mathbf {h} _ {t} = \left(1 - z _ {t}\right) * \mathbf {h} _ {t - 1} + z _ {t} * \tilde {\mathbf {h}} _ {t},
|
| 166 |
+
$$
|
| 167 |
+
|
| 168 |
+
where $\sigma$ is the logistic sigmoid function. $z_{t}$ and $r_t$ denote the update gate and reset gate respectively at the time step $t$ .
|
| 169 |
+
|
| 170 |
+
<table><tr><td rowspan="2">Datasets</td><td colspan="2">#Positive</td><td colspan="2">#Negative</td><td colspan="2">#Neural</td><td rowspan="2">#L</td><td rowspan="2">#M</td></tr><tr><td>Train</td><td>Test</td><td>Train</td><td>Test</td><td>Train</td><td>Test</td></tr><tr><td>Restaurant</td><td>2164</td><td>728</td><td>807</td><td>196</td><td>637</td><td>196</td><td>20</td><td>45.5</td></tr><tr><td>Laptop</td><td>994</td><td>341</td><td>870</td><td>128</td><td>464</td><td>169</td><td>19</td><td>36.5</td></tr><tr><td>Twitter</td><td>1561</td><td>173</td><td>1560</td><td>173</td><td>3127</td><td>346</td><td>16</td><td>10.2</td></tr></table>
|
| 171 |
+
|
| 172 |
+
Table 1: The statistics of three benchmark datasets. #L is the average length of sentences. #M is the proportion $(\%)$ of samples with multiple (i.e., more than 1) aspects.
|
| 173 |
+
|
| 174 |
+
# 3.4 Sentiment Predicting
|
| 175 |
+
|
| 176 |
+
After applying BERT layers and DRA on the input sentence, its root representation (i.e., s) is converted into the feature representation e:
|
| 177 |
+
|
| 178 |
+
$$
|
| 179 |
+
\begin{array}{l} \mathbf {e} = \left\{\mathbf {e} _ {i} \mid i = 1, 2, \dots , l _ {s} \right\} \tag {10} \\ = \left(\mathbf {W} _ {e} \mathbf {f} + \mathbf {U} _ {e} \mathbf {h} _ {T} + \mathbf {b} _ {e}\right), \\ \end{array}
|
| 180 |
+
$$
|
| 181 |
+
|
| 182 |
+
where $\mathbf{W}_e$ , $\mathbf{U}_e$ and $\mathbf{b}_e$ are trainable parameters. After $N$ -th stacked BERT layers, we obtain the final representation of the sentence (i.e., $\mathbf{e}_N$ ). Then, we feed it into a Multilayer Perceptron (MLP) and map it to the probabilities over the different sentiment polarities via a softmax layer:
|
| 183 |
+
|
| 184 |
+
$$
|
| 185 |
+
\mathbf {R} _ {l} = \operatorname {R e l u} \left(\mathbf {W} _ {l} \mathbf {R} _ {l - 1} + \mathbf {b} _ {l}\right), \tag {11}
|
| 186 |
+
$$
|
| 187 |
+
|
| 188 |
+
$$
|
| 189 |
+
\hat {\mathbf {y}} = \operatorname {s o f t m a x} \left(\mathbf {W} _ {o} \mathbf {R} _ {h} + \mathbf {b} _ {o}\right),
|
| 190 |
+
$$
|
| 191 |
+
|
| 192 |
+
where $\mathbf{W}_l, \mathbf{W}_o, \mathbf{b}_l$ and $\mathbf{b}_o$ are learned parameters. $\mathbf{R}_l$ is the hidden state of $l$ -th layer MLP ( $\mathbf{R}_0 = \mathbf{e}_N$ , $l \in [1,h]$ ). $\mathbf{R}_h$ is the state of final layer which is also regarded as the output of the MLP. $\hat{\mathbf{y}}$ is the predicted sentiment polarity distribution.
|
| 193 |
+
|
| 194 |
+
# 3.5 Model Training
|
| 195 |
+
|
| 196 |
+
Finally, we applies the cross-entropy loss function for model training:
|
| 197 |
+
|
| 198 |
+
$$
|
| 199 |
+
\mathcal {L} = - \sum_ {i = 1} ^ {M} \sum_ {j = 1} ^ {C} y _ {i} ^ {j} \log \left(\hat {y} _ {i} ^ {j}\right) + \beta \| \Theta \| _ {2} ^ {2}, \tag {12}
|
| 200 |
+
$$
|
| 201 |
+
|
| 202 |
+
where $y_{i}^{j}$ is the ground truth sentiment polarity. $C$ is the number of labels (i.e, 3 in our task). $M$ is the number of training samples. $\Theta$ corresponds to all of the trainable parameters.
|
| 203 |
+
|
| 204 |
+
# 4 Experiment
|
| 205 |
+
|
| 206 |
+
# 4.1 Datasets
|
| 207 |
+
|
| 208 |
+
We mainly conduct experiments on three benchmark ABSA datasets, including "Laptop", "Restaurant" (Pontiki et al., 2014) and "Twitter" (Dong et al., 2014). Each data item is labeled with three
|
| 209 |
+
|
| 210 |
+
<table><tr><td rowspan="2">Category</td><td rowspan="2">Datasets Methods</td><td colspan="2">Laptop</td><td colspan="2">Restaurant</td><td colspan="2">Twitter</td></tr><tr><td>Accuracy</td><td>F1-score</td><td>Accuracy</td><td>F1-score</td><td>Accuracy</td><td>F1-score</td></tr><tr><td rowspan="6">Attention.</td><td>ATAE-LSTM (Wang et al., 2016)</td><td>68.57</td><td>64.52</td><td>76.58</td><td>67.39</td><td>67.27</td><td>66.43</td></tr><tr><td>IAN (Ma et al., 2017)</td><td>70.84</td><td>65.73</td><td>76.88</td><td>68.36</td><td>68.74</td><td>67.61</td></tr><tr><td>MemNet (Tang et al., 2016)</td><td>72.32</td><td>67.03</td><td>78.12</td><td>68.99</td><td>70.19</td><td>68.22</td></tr><tr><td>AOA (Huang et al., 2018)</td><td>74.56</td><td>68.77</td><td>79.42</td><td>70.43</td><td>71.68</td><td>69.25</td></tr><tr><td>MGNet (Fan et al., 2018)</td><td>75.37</td><td>71.26</td><td>81.28</td><td>72.07</td><td>72.54</td><td>70.78</td></tr><tr><td>TNet (Li et al., 2018)</td><td>76.54</td><td>71.75</td><td>80.69</td><td>71.27</td><td>74.93</td><td>73.60</td></tr><tr><td rowspan="6">Pre-trained.</td><td>BERT (Devlin et al., 2019)</td><td>77.29</td><td>73.36</td><td>82.40</td><td>73.17</td><td>73.42</td><td>72.17</td></tr><tr><td>BERT-PT (Xu et al., 2019a)</td><td>78.07</td><td>75.08</td><td>84.95</td><td>76.96</td><td>-</td><td>-</td></tr><tr><td>BERT-SPC (Song et al., 2019)</td><td>78.99</td><td>75.03</td><td>84.46</td><td>76.98</td><td>74.13</td><td>72.73</td></tr><tr><td>AEN-BERT (Song et al., 2019)</td><td>79.93</td><td>76.31</td><td>83.12</td><td>73.76</td><td>74.71</td><td>73.13</td></tr><tr><td>RGAT-BERT (Wang et al., 2020)</td><td>78.21</td><td>74.07</td><td>86.60</td><td>81.35</td><td>76.15</td><td>74.88</td></tr><tr><td>T-GCN (Tian et al., 2021)</td><td>80.88</td><td>77.03</td><td>86.16</td><td>79.95</td><td>76.45</td><td>75.25</td></tr><tr><td>Ours.</td><td>DR-BERT</td><td>81.45</td><td>78.16</td><td>87.72</td><td>82.31</td><td>77.24</td><td>76.10</td></tr></table>
|
| 211 |
+
|
| 212 |
+
Table 2: Experimental results (%) in three benchmark datasets. We underline the best performed baseline.
|
| 213 |
+
|
| 214 |
+
sentiment polarities (i.e., positive, negative and neutral). The statistics of the datasets are presented in Table 1. Moreover, we follow the dataset configurations of previous studies strictly. For all datasets, we randomly sample $10\%$ items from the training set and regard them as the development set.
|
| 215 |
+
|
| 216 |
+
# 4.2 Hyperparameters Settings
|
| 217 |
+
|
| 218 |
+
In the implementation, we build our framework based on the official bert-base models ( $\mathfrak{n}_{\mathrm{layers}} = 12$ , $\mathfrak{n}_{\mathrm{heads}} = 12$ , $\mathfrak{n}_{\mathrm{hidden}} = 768$ ). The hidden size of GRUs and re-weighting length of DRA are set to 256 and 7. The learning rate is tuned amongst [2e-5, 5e-5 and 1e-3] and the batch size is manually tested in [16, 32, 64, 128]. The dropout rate is set to 0.2. The hyper-parameter $l$ , $\beta$ and $\lambda$ have been carefully adjusted, and final values are set to 3, 0.8 and 100 respectively. The model is trained using the Adam optimizer and evaluated by two widely used metrics. The parameters of baseline models are in accordance with the default configuration of the original paper. We run our model three times with different seeds and report the average performance.
|
| 219 |
+
|
| 220 |
+
# 4.3 Baselines
|
| 221 |
+
|
| 222 |
+
- Attention-based Models: MemNet (Tang et al., 2016), ATAE-LSTM (Wang et al., 2016), IAN (Ma et al., 2017), AOA (Huang et al., 2018), MGNet (Fan et al., 2018), TNet (Li et al., 2018).
|
| 223 |
+
- Pre-trained Models: Fine-tune BERT (Devlin et al., 2019), BERT-PT (Xu et al., 2019a), BERT-SPC, AEN-BERT (Song et al., 2019), RGAT-BERT (Wang et al., 2020), TGCN (Tian et al., 2021).
|
| 224 |
+
|
| 225 |
+
The baseline methods have comprehensive coverage of the recent related SOTA models recently. Most of them are detailed in Section 2.1. For space-saving, we do not detail them in this section.
|
| 226 |
+
|
| 227 |
+
# 4.4 Experimental Results
|
| 228 |
+
|
| 229 |
+
From the results in Table 2, we have the following observations. First, BERT-based methods beat most of the attention-based methods (e.g., IAN and TNet) in both metrics. The phenomenon indicates the powerful ability of the pre-trained language models. That is also why we adopt BERT as base encoder to learn the overall semantic representation of the whole sentences.
|
| 230 |
+
|
| 231 |
+
Second, by comparing non-specific BERT models (i.e., BERT and BERT-PT) with task-specific models (e.g., RGAT-BERT) for ABSA, we find that the task-specific BERT models perform better than the non-specific models. Specifically, we can also observe the performance trend that T-GCN&RGAT-BERT > AEN-BERT > BERT-PT > BERT, which is consistent with the previous assumption that aspect-related information is the crucial influence factor for the performance of the ABSA model.
|
| 232 |
+
|
| 233 |
+
Finally, despite the outstanding performance of previous models, our DR-BERT still outperforms the most advanced baseline (i.e., T-GCN or RGAT-BERT) no matter in terms of Accuracy or F1-score. The results demonstrate the effectiveness of the dynamic modeling strategy based on the procedure of semantic comprehension. Meantime, it also indicates that our proposed DRA can better grasp the aspect-aware semantics of the sentence than other BERT plus-in components in previous methods.
|
| 234 |
+
|
| 235 |
+
<table><tr><td rowspan="2">Model Variants</td><td colspan="2">Laptop</td></tr><tr><td>Accuracy</td><td>F1-score</td></tr><tr><td>BERT-Base</td><td>77.29</td><td>73.36</td></tr><tr><td>(1): + MLP</td><td>77.94</td><td>74.42</td></tr><tr><td>(2): + DRA</td><td>80.66</td><td>77.13</td></tr><tr><td>(3): + DRA on top 3 layers</td><td>78.64</td><td>75.16</td></tr><tr><td>(4): + DRA on top 6 layers</td><td>79.17</td><td>75.93</td></tr><tr><td>(5): + DRA on top 9 layers</td><td>80.22</td><td>76.49</td></tr><tr><td>(6): DR-BERT</td><td>81.45</td><td>78.16</td></tr></table>
|
| 236 |
+
|
| 237 |
+
Table 3: The ablation study on different components which conducted on the test set of the Laptop dataset. "BERT-Base" indicates the vanilla BERT. "+" indicates the setting with plus-in components.
|
| 238 |
+
|
| 239 |
+

|
| 240 |
+
(a) The performance (Accuracy) in three dataset.
|
| 241 |
+
|
| 242 |
+

|
| 243 |
+
|
| 244 |
+

|
| 245 |
+
Figure 3: Comparison of the semantic understanding process between human reading and DRA when judging the sentiment polarity of aspect "food". (a) is the visualization of the human understanding process from the eye tracker†. (b) denotes aspect-aware words from re-weighting function.
|
| 246 |
+
|
| 247 |
+

|
| 248 |
+
(b) The performance (F1-score) in three dataset.
|
| 249 |
+
|
| 250 |
+

|
| 251 |
+
Figure 2: The ablation study on the re-weighting length of the adapter. Red lines indicate Accuracy/ F1 scores while blue and green lines indicate the performance of the best baseline and BERT-base model respectively.
|
| 252 |
+
|
| 253 |
+

|
| 254 |
+
|
| 255 |
+
# 4.5 Ablation Study
|
| 256 |
+
|
| 257 |
+
Ablations on the Proposed Components. In Table 3, we study the influence of different components in our framework, including the DRA and MLPs. We can find that without utilizing adapters and MLPs, DR-BERT degenerates into the BERT model, which gains the worst performance among all the variants. The phenomenon indicates the effective of the DRA and MLP modules. Moreover, through comparing (1) and (2), we can easily conclude that DRA plays a more crucial role in the final sentiment prediction than MLPs.
|
| 258 |
+
|
| 259 |
+
Since BERT models are usually quite deep (e.g., 12 layers), we only insert the dynamic re-weighting adapter into top layers (i.e., 3-th, 6-th, and 9-th layers) to further verify the effectiveness of the DRA module. The results are shown in Table 3 (3), (4), and (5). We observe that when introducing adapters to the top layers of DR-BERT, our framework still outperforms the BERT model, showing that the DRA is efficient in encoding the aspect-aware semantics over the whole sentence. In addition, we can also find that the more adapter incorporated
|
| 260 |
+
|
| 261 |
+
(a) Human Cognition
|
| 262 |
+
(b) DRA Chosen Words
|
| 263 |
+

|
| 264 |
+
food, better, while, definitely, not, return
|
| 265 |
+
|
| 266 |
+
in BERT layers the higher performance gained, illustrating the importance of modeling the deep dynamic semantics over the sentence.
|
| 267 |
+
|
| 268 |
+
Ablations on the Scale of Adapter. In this subsection, we investigate the influence of the scale of adapters on different datasets. As shown in Figure 2, we tune the adapter's dynamic re-weighting length $(T)$ in a wide range (i.e., 2 to 10). Specifically, the performance of DR-BERT first becomes better with the increasing of re-weighting length and achieving the best result at around 7. Then, as the length continues to increase, the performance continues to decline. This phenomenon is consistent with the psychological findings that human memory focuses on nearly seven words (Tononi, 2008; Koch and Tsuchiya, 2007), which further indicates the effectiveness of DRA in modeling human-like (dynamic) semantic comprehension.
|
| 269 |
+
|
| 270 |
+
Besides, compared with the best-performed baseline (blue lines), our model can achieve better performance with only 4 or 5 times of re-weighting at most test sets, illustrating the efficiency of the reweighting adapter. On the other hand, we can also find that DR-BERT always gives superior performance compared to the BERT-based model (green lines), even with the lowest re-weighting length. All those results show that DR-BERT could better comprehend aspect-aware dynamic semantics in aspect-based sentiment analysis.
|
| 271 |
+
|
| 272 |
+
# 4.6 Interpretability Verification
|
| 273 |
+
|
| 274 |
+
Comparison of Semantic Comprehension. To evaluate model rationality and interpretability, we conduct an study for dynamic semantic comprehension by eye tracker. As shown in Figure 3 (a),
|
| 275 |
+
|
| 276 |
+

|
| 277 |
+
Figure 4: Visualization results of multiple aspects in the same sentence. The blue part indicates the aspect and its ground truth. The middle subfigures represent the procedure of human's semantic comprehension which is targeted at one specific aspect. The green subfigures are the predicted labels and the chosen word sequences from DRA.
|
| 278 |
+
|
| 279 |
+
<table><tr><td>Case Examples. The label in brackets represents ground truth.</td><td>BERT-base</td><td>RGAT-BERT</td><td>DR-BERT</td></tr><tr><td>Aspects: “system memory”(Neg.), “DDR5”(Pos.), “DDR3”(Neg.)</td><td>Pos/Neg/Neg</td><td>Neg/Pos/Pos</td><td>Neg/Pos/Neg</td></tr><tr><td>Sentence: It could be a perfect laptop if it would have faster system memory and its radeon would have DDR5 instead of DDR3.</td><td>X / X / X</td><td>✓ / ✓ / X</td><td>✓ / ✓ / ✓</td></tr><tr><td>Aspects: “Supplied software” (Neu.), “software” (Pos.), “Windows” (Neg.)</td><td>Pos/ Pos/ Pos</td><td>Pos/Pos/Neu</td><td>Pos/Pos/Neg</td></tr><tr><td>Sentence: Supplied software: The software that comes with this machine is greatly welcomed compared to what Windows comes with.</td><td>X / ✓ / X</td><td>X / ✓ / X</td><td>X / ✓ / ✓</td></tr><tr><td>Aspects: “waiter” (Neg.), “served” (Neg.), “specials” (Pos.)</td><td>Neg/Neg/Neg</td><td>Neg/Neg/Neu</td><td>Neg/Neg/Pos</td></tr><tr><td>Sentence: First, the waiter who served us neglected to fill us in on the specials, which I would have chosen had I known about them.</td><td>✓ / ✓ / X</td><td>✓ / ✓ / X</td><td>✓ / ✓ / ✓</td></tr></table>
|
| 280 |
+
|
| 281 |
+
Table 4: Error analysis of two review items from laptop and restaurant. The colored words in brackets represents ground truth sentiment label of the corresponding aspect. The symbol $\checkmark$ means the predicting sentiment is correct, and the other symbol means the predicting sentiment is wrong.
|
| 282 |
+
|
| 283 |
+
when a person tries to understand a relatively long sentence, he/she first read the entire sentence. Subsequently, after giving a specific aspect, he/she will dynamically select related words based on the previous memory state until he/she fully understands the sentiment polarity of the given aspect.
|
| 284 |
+
|
| 285 |
+
Interestingly, the above phenomenon is consistent with our dynamic re-weighting adapter's chosen result. Specifically, as Figure 3 (b) shows, with the re-weighting function $F$ (i.e., Equation 5 and 6), our model dynamically choose the words "food, better, while, definitely, not, ..." which have proven to be very important for predicting the sentiment of aspect "food" in Figure 3 (a). Those experimental results again fully indicate the effectiveness and interpretability of our proposed model in dynamic learning aspect-aware information.
|
| 286 |
+
|
| 287 |
+
The Influence of multiple Aspects. As aspect-related information plays a key role in ABSA and at least $10.2\%$ of reviews contain multiple aspects as shown in Table 1, we are curious about the model's performance in the complex scenarios, e.g., a review sentence contains multiple aspects. Therefore, we randomly choose an example to explore how the selection of the context words will correspondingly change with different inputs. The visualization re
|
| 288 |
+
|
| 289 |
+
sults are shown in Figure 4. Specifically, the chosen sentence has three different aspects with their sentiment polarity, i.e., "System memory"-negative, "DDR5"-positive and "DDR3"-negative. Take the aspect "DDR5" as example, it is positive which is contrary to "DDR3". After receiving the overall semantic of the whole sentence, readers tend to associate "DDR5" with the context words {"would", "have"} to predict the correct sentiment "positive". For other two aspects, the observations are consistent with "DDR5". In summary, all those results show that DR-BERT could dynamically extract the vital information to achieve aspect-aware semantic understanding even in a more complex scenario.
|
| 290 |
+
|
| 291 |
+
# 4.7 Error Analysis
|
| 292 |
+
|
| 293 |
+
Table 4 displays three review examples and their prediction results by BERT, RGAT-BERT, and our DR-BERT. As we can see from the "BERT-base" column, when there are multiple aspects, the vanilla BERT often makes the wrong classification since it tends to learn the overall sentiment polarity of the sentences instead of the aspect-aware semantic. While RGAT-BERT can alleviate the problem to a certain extent, it is also hard to predict the accurate sentiment label with few dependency relations. For
|
| 294 |
+
|
| 295 |
+
<table><tr><td rowspan="2">Methods</td><td colspan="3">Laptop</td><td colspan="3">Restaurant</td><td colspan="3">Twitter</td></tr><tr><td>S</td><td>E</td><td>T</td><td>S</td><td>E</td><td>T</td><td>S</td><td>E</td><td>T</td></tr><tr><td>(1) DR-BERT</td><td>157s</td><td>10</td><td>26.1m</td><td>183s</td><td>10</td><td>30.5m</td><td>379s</td><td>10</td><td>63.2m</td></tr><tr><td>(2) T-GCN-BERT</td><td>168s</td><td>10</td><td>28.0m</td><td>188s</td><td>10</td><td>31.3m</td><td>411s</td><td>10</td><td>68.5m</td></tr><tr><td>(3) BERT-base</td><td>133s</td><td>10</td><td>22.2m</td><td>158s</td><td>10</td><td>26.3m</td><td>242s</td><td>10</td><td>40.3m</td></tr><tr><td>(4) ATAE-LSTM</td><td>3s</td><td>30</td><td>1.50m</td><td>4s</td><td>30</td><td>2.00m</td><td>5s</td><td>30</td><td>2.50m</td></tr></table>
|
| 296 |
+
|
| 297 |
+
Table 5: Runtime comparison between DR-BERT, T-GCN-BERT, BERT-base and ATAE-LSTM. Specifically, "S" represents the training time (seconds) for a single epoch, "E" denotes the number of training epochs, and "T" is the total training time (minutes).
|
| 298 |
+
|
| 299 |
+
example, in the first sentence, "DDR3" has few helpful syntactic dependency relations. Therefore, RGAT-BERT makes a wrong sentiment prediction. However, our DR-BERT model, succeeding in predicting most sentiment labels by considering the dynamic changing of the aspect-aware semantic. For other two case examples, the observations are consistent. Note that, for aspect "Supplied software" in second sentence, two overlap aspects appear in the same sentence makes it more difficult to distinguish the different sentiment between them. Thus, precisely determine its sentiment polarity is a big challenge for human, let alone deep learning models. This also leaves space for future exploration.
|
| 300 |
+
|
| 301 |
+
# 5 Computation Time Comparison
|
| 302 |
+
|
| 303 |
+
We also compared the computation runtime of three baseline methods. All of the models are performed on a Linux server with 64 Intel(R) CPUs and 4 Tesla V100 32GB GPUs. From the results shown in Table 5, we can first observe that the training time of a single epoch in DR-BERT performs better than T-GCN, which is based on GCN. Meanwhile, the training time of all these BERT-based models is similar (i.e., there is no significant difference). The possible reason is that the official datasets are small, and it is hard to influence the overall runtime of PLMs with such a small amount of data. Second, compared with other models, the training time of the ATAE-LSTM model is less (always an order of magnitude lower). For example, the ATAE-LSTM only needs about two minutes to achieve optimal performance in the restaurant dataset, while BERT-based models require more than 26 minutes. Therefore, though DR-BERT contains a Dynamic Re-weighting adapter based on GRU, the computation time is much lower than the BERT-based framework. In summary, the observations above show that the computation time of our DR-BERT model is within an acceptable range.
|
| 304 |
+
|
| 305 |
+
# 6 Conclusion and Future Works
|
| 306 |
+
|
| 307 |
+
This paper introduced a new approach named Dynamic Re-weighting BERT (DR-BERT) for aspect-based sentiment analysis. Specifically, we first employed the BERT layers as a base encoder to learn the overall semantic features of the whole sentence. Then, inspired by human semantic comprehension, we devised a new Dynamic Re-weighting Adapter (DRA) to enhance aspect-aware semantic features in the sentiment learning process. In addition, we inserted the DRA into the BERT layers to address the limitations of the vanilla pre-trained model in ABSA task. Extensive experiments on three benchmark datasets demonstrated the effectiveness and interpretability of the proposed model, with good semantic comprehension insights for future nature language modeling. Moreover, the error analysis was performed on incorrectly predicted examples, leading to some insights into the ABSA task.
|
| 308 |
+
|
| 309 |
+
We hope our research can help boost excellent work for aspect-based sentiment analysis from different perspectives. In the future, we plan to extend our method to other tasks like Sentence Semantic Matching, Relation Extraction, etc., which can also benefit from utilizing the dynamic semantics. Besides, we will explore whether DR-BERT can make any positive changes based on previous mistakes during the dynamic semantic understanding.
|
| 310 |
+
|
| 311 |
+
# 7 Acknowledgments
|
| 312 |
+
|
| 313 |
+
We would like to thank the anonymous reviewers for the helpful comments. This research was partially supported by grants from the National Key R&D Program of China (No. 2021YFF0901003), and the National Natural Science Foundation of China (No. 61922073, 61727809, 62006066 and 72101176). We appreciate all the authors for their fruitful discussions. We also special thanks to all the first-line healthcare providers that are fighting the war of COVID-19.
|
| 314 |
+
|
| 315 |
+
# References
|
| 316 |
+
|
| 317 |
+
James C Bezdek. 1992. On the relationship between neural networks, pattern recognition and intelligence. International journal of approximate reasoning, 6(2):85-107.
|
| 318 |
+
Harm Brouwer, Francesca Delogu, Noortje J Venhuizen, and Matthew W Crocker. 2021. Neurobehavioral correlates of surprisal in language comprehension: A neurocomputational model. Frontiers in Psychology, 12:110.
|
| 319 |
+
Chenhua Chen, Zhiyang Teng, and Yue Zhang. 2020. Inducing target-specific latent structures for aspect sentiment classification. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 5596-5607.
|
| 320 |
+
Jin Yao Chin, Kaiqi Zhao, Shafiq Joty, and Gao Cong. 2018. Anr: Aspect-based neural recommender. In Proceedings of the 27th ACM International Conference on Information and Knowledge Management, pages 147-156.
|
| 321 |
+
Kyunghyun Cho, Bart van, Dzmitry Bahdanau, Fethi Bougares, Holger Schwenk, and Yoshua Bengio. 2014. Learning phrase representations using rnn encoder-decoder for statistical machine translation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1724-1734.
|
| 322 |
+
Kevin Clark, Urvashi Khandelwal, Omer Levy, and Christopher D. Manning. 2019. What does BERT look at? an analysis of BERT's attention. In Proceedings of the 2019 ACL Workshop BlackboxNLP: Analyzing and Interpreting Neural Networks for NLP, pages 276-286, Florence, Italy. Association for Computational Linguistics.
|
| 323 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. Bert: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 4171-4186.
|
| 324 |
+
Xiaowen Ding and Bing Liu. 2007. The utility of linguistic rules in opinion mining. In Proceedings of the 30th annual international ACM SIGIR conference on Research and development in information retrieval, pages 811-812.
|
| 325 |
+
Li Dong, Furu Wei, Chuanqi Tan, Duyu Tang, Ming Zhou, and Ke Xu. 2014. Adaptive recursive neural network for target-dependent twitter sentiment classification. In Proceedings of the 52nd annual meeting of the association for computational linguistics (volume 2: Short papers), pages 49-54.
|
| 326 |
+
Feifan Fan, Yansong Feng, and Dongyan Zhao. 2018. Multi-grained attention network for aspect-level sentiment classification. In Proceedings of the 2018 conference on empirical methods in natural language processing, pages 3433-3442.
|
| 327 |
+
|
| 328 |
+
Tal Golan, Prashant C Raju, and Nikolaus Kriegesko-orte. 2020. Controversial stimuli: Pitting neural networks against each other as models of human cognition. Proceedings of the National Academy of Sciences, 117(47):29330-29337.
|
| 329 |
+
Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep residual learning for image recognition. In Proceedings of the IEEE conference on computer vision and pattern recognition (CVPR), pages 770-778.
|
| 330 |
+
Binxuan Huang and Kathleen M Carley. 2019. Syntax-aware aspect level sentiment classification with graph attention networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 5469-5477.
|
| 331 |
+
Binxuan Huang, Yanglan Ou, and Kathleen M Carley. 2018. Aspect level sentiment classification with attention-over-attention neural networks. In International Conference on Social Computing, Behavioral-Cultural Modeling and Prediction and Behavior Representation in Modeling and Simulation, pages 197-206. Springer.
|
| 332 |
+
Long Jiang, Mo Yu, Ming Zhou, Xiaohua Liu, and Tiejun Zhao. 2011. Target-dependent twitter sentiment classification. In Proceedings of the 49th annual meeting of the association for computational linguistics, pages 151-160.
|
| 333 |
+
Svetlana Kiritchenko, Xiaodan Zhu, Colin Cherry, and Saif Mohammad. 2014. Nrc-canada-2014: Detecting aspects and sentiment in customer reviews. In Proceedings of the 8th international workshop on semantic evaluation (SemEval 2014), pages 437-442.
|
| 334 |
+
Christof Koch and Naotsugu Tsuchiya. 2007. Attention and consciousness: two distinct brain processes. Trends in cognitive sciences, 11(1):16-22.
|
| 335 |
+
Gina R Kuperberg. 2007. Neural mechanisms of language comprehension: Challenges to syntax. *Brain research*, 1146:23-49.
|
| 336 |
+
Gina R and T Florian Jaeger. 2016. What do we mean by prediction in language comprehension? Language, cognition and neuroscience, 31(1):32-59.
|
| 337 |
+
Siwei Lai, Liheng Xu, Kang Liu, and Jun Zhao. 2015. Recurrent convolutional neural networks for text classification. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 29.
|
| 338 |
+
Ruifan Li, Hao Chen, Fangxiang Feng, Zhanyu Ma, Xiaojie Wang, and Eduard Hovy. 2021. Dual graph convolutional networks for aspect-based sentiment analysis. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 6319-6329.
|
| 339 |
+
|
| 340 |
+
Xiangsheng Li, Jiaxin Mao, Chao Wang, Yiqun Liu, Min Zhang, and Shaoping Ma. 2019. Teach machine how to read: reading behavior inspired relevance estimation. In Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 795-804.
|
| 341 |
+
Xin Li, Lidong Bing, Wai Lam, and Bei Shi. 2018. Transformation networks for target-oriented sentiment classification. In Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics, pages 946-956.
|
| 342 |
+
Yunlong Liang, Fandong Meng, Jinchao Zhang, Jinan Xu, Yufeng Chen, and Jie Zhou. 2019. A novel aspect-guided deep transition model for aspect based sentiment analysis. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 5569-5580.
|
| 343 |
+
Dehong Ma, Sujian Li, Xiaodong Zhang, and Houfeng Wang. 2017. Interactive attention networks for aspect-level sentiment classification. In Proceedings of the 26th International Joint Conference on Artificial Intelligence, pages 4068-4074.
|
| 344 |
+
Thien Hai Nguyen and Kiyoaki Shirai. 2015. Phrasernn: Phrase recursive neural network for aspect-based sentiment analysis. In Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 2509-2514.
|
| 345 |
+
Wei Peng, Yue Hu, Luxi Xing, Yuqiang Xie, Jing Yu, Yajing Sun, and Xiangpeng Wei. 2020. Bidirectional cognitive thinking network for machine reading comprehension. In Proceedings of the 28th International Conference on Computational Linguistics, pages 2613-2623.
|
| 346 |
+
Maria Pontiki, Dimitris Galanis, John Pavlopoulos, Harris Papageorgiou, Ion Androutsopoulos, and Suresh Manandhar. 2014. SemEval-2014 task 4: Aspect based sentiment analysis. In Proceedings of the 8th International Workshop on Semantic Evaluation (SemEval 2014), pages 27-35, Dublin, Ireland. Association for Computational Linguistics.
|
| 347 |
+
Keith Rayner. 1998. Eye movements in reading and information processing: 20 years of research. *Psychological bulletin*, 124(3):372.
|
| 348 |
+
Cansu Sen, Thomas Hartvigsen, Biao Yin, Xiangnan Kong, and Elke Runden. 2020. Human attention maps for text classification: Do humans and neural networks focus on the same words? In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 4596-4608.
|
| 349 |
+
Lei Sha, Baobao Chang, Zhifang Sui, and Sujian Li. 2016. Reading and thinking: Re-read LSTM unit for textual entailment recognition. In Proceedings of COLING 2016, the 26th International Conference on Computational Linguistics: Technical Papers, pages 2870-2879.
|
| 350 |
+
|
| 351 |
+
Selina Sharmin, Oleg Špakov, and Kari-Jouko Räihä. 2015. Dynamic text presentation in print interpreting—an eye movement study of reading behaviour. International Journal of Human-Computer Studies, 78:17–30.
|
| 352 |
+
Youwei Song, Jiahai Wang, Tao Jiang, Zhiyue Liu, and Yanghui Rao. 2019. Attentional encoder network for targeted sentiment classification. arXiv preprint arXiv:1902.09314.
|
| 353 |
+
Concetto Spampinato, Simone Palazzo, Isaak Kavasidis, Daniela Giordano, Nasim Souly, and Mubarak Shah. 2017. Deep learning human mind for automated visual classification. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 6809-6817.
|
| 354 |
+
Chi Sun, Luyao Huang, and Xipeng Qiu. 2019. Utilizing bert for aspect-based sentiment analysis via constructing auxiliary sentence. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 380-385.
|
| 355 |
+
Yaming Sun, Lei Lin, Duyu Tang, Nan Yang, Zhenzhou Ji, and Xiaolong Wang. 2015. Modeling mention, context and entity with neural networks for entity disambiguation. In Twenty-fourth international joint conference on artificial intelligence.
|
| 356 |
+
Niels A Taatgen, Hedderik Van Rijn, and John Anderson. 2007. An integrated theory of prospective time interval estimation: The role of cognition, attention, and learning. Psychological review, 114(3):577.
|
| 357 |
+
Xingwei Tan, Yi Cai, and Changxi Zhu. 2019. Recognizing conflict opinions in aspect-level sentiment classification with dual attention networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing (EMNLP-IJCNLP), pages 3426-3431.
|
| 358 |
+
Duyu Tang, Bing Qin, Xiaocheng Feng, and Ting Liu. 2015. Effective lstms for target-dependent sentiment classification. arXiv preprint arXiv:1512.01100.
|
| 359 |
+
Duyu Tang, Bing Qin, and Ting Liu. 2016. Aspect level sentiment classification with deep memory network. In Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing, pages 214-224.
|
| 360 |
+
Hao Tang, Donghong Ji, Chenliang Li, and Qiji Zhou. 2020. Dependency graph enhanced dual-transformer structure for aspect-based sentiment classification. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 6578-6588.
|
| 361 |
+
Jialong Tang, Ziyao Lu, Jinsong Su, Yubin Ge, Linfeng Song, Le Sun, and Jiebo Luo. 2019. Progressive self-supervised attention learning for aspect-level sentiment analysis. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 557-566.
|
| 362 |
+
|
| 363 |
+
Yuanhe Tian, Guimin Chen, and Yan Song. 2021. Aspect-based sentiment analysis with type-aware graph convolutional networks and layer ensemble. In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pages 2910-2922.
|
| 364 |
+
Giulio Tononi. 2008. Consciousness as integrated information: a provisional manifesto. The Biological Bulletin, 215(3):216-242.
|
| 365 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. arXiv preprint arXiv:1706.03762.
|
| 366 |
+
Jingjing Wang, Changlong Sun, Shoushan Li, Xiaozhong Liu, Luo Si, Min Zhang, and Guodong Zhou. 2019. Aspect sentiment classification towards question-answering with reinforced bidirectional attention network. In Proceedings of the 57th Annual Meeting of the Association for Computational Linguistics, pages 3548-3557.
|
| 367 |
+
Kai Wang, Weizhou Shen, Yunyi Yang, Xiaojun Quan, and Rui Wang. 2020. Relational graph attention network for aspect-based sentiment analysis. In Proceedings of 58th Annual Meeting of the Association for Computational Linguistics, pages 3229-3238.
|
| 368 |
+
Xuejian Wang, Lantao Yu, Kan Ren, Guanyu Tao, Weinan Zhang, and et al. 2017. Dynamic attention deep model for article recommendation by learning human editors' demonstration. In Proceedings of the 23rd international conference on knowledge discovery and data mining, pages 2051-2059.
|
| 369 |
+
Yequan Wang, Minlie Huang, Xiaoyan Zhu, and Li Zhao. 2016. Attention-based LSTM for aspect-level sentiment classification. In Proceedings of the 2016 conference on empirical methods in natural language processing, pages 606-615.
|
| 370 |
+
Bowen Xing, Lejian Liao, Dandan Song, and et al. 2019. Earlier attention? aspect-aware LSTM for aspect-based sentiment analysis. In *IJCAI*.
|
| 371 |
+
Hu Xu, Bing Liu, Lei Shu, and S Yu Philip. 2019a. Bert post-training for review reading comprehension and aspect-based sentiment analysis. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 2324-2335.
|
| 372 |
+
Hu Xu, Lei Shu, S Yu Philip, and Bing Liu. 2020. Understanding pre-trained bert for aspect-based sentiment analysis. In Proceedings of the 28th International Conference on Computational Linguistics, pages 244-250.
|
| 373 |
+
Jingjing Xu, Xu Sun, Zhiyuan Zhang, Guangxiang Zhao, and Junyang Lin. 2019b. Understanding and improving layer normalization. Advances in Neural Information Processing Systems, 32.
|
| 374 |
+
|
| 375 |
+
Hsien-Ming Yang and George W McConkie. 1999. Reading chinese: Some basic eye-movement characteristics. Reading Chinese script: A cognitive analysis, pages 207-222.
|
| 376 |
+
Quanzeng You, Hailin Jin, Zhaowen Wang, Chen Fang, and Jiebo Luo. 2016. Image captioning with semantic attention. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4651-4659.
|
| 377 |
+
Chen Zhang, Qiuchi Li, and Dawei Song. 2019a. Aspect-based sentiment classification with aspect-specific graph convolutional networks. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 4568-4578.
|
| 378 |
+
Kai Zhang, Qi Liu, Hao Qian, Biao Xiang, Qing Cui, Jun Zhou, and Enhong Chen. 2021a. Eatn: An efficient adaptive transfer network for aspect-level sentiment analysis. IEEE Transactions on Knowledge and Data Engineering.
|
| 379 |
+
Kai Zhang, Hao Qian, Qi Liu, Zhiqiang Zhang, Jun Zhou, and et al. 2021b. Sifn: A sentiment-aware interactive fusion network for review-based item recommendation. In Proceedings of the 30th ACM International Conference on Information & Knowledge Management, pages 3627-3631.
|
| 380 |
+
Kai Zhang, Hefu Zhang, Qi Liu, Hongke Zhao, Hengshu Zhu, and Enhong Chen. 2019b. Interactive attention transfer network for cross-domain sentiment classification. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 33, pages 5773-5780.
|
| 381 |
+
Kai Zhang, Hongke Zhao, Qi Liu, Zhen Pan, and Enhong Chen. 2019c. A dynamic and cooperative tracking system for crowdfunding. arXiv preprint arXiv:2002.00847.
|
| 382 |
+
Kun Zhang, Guangyi Lv, Linyuan Wang, Le Wu, Enhong Chen, Fangzhao Wu, and Xing Xie. 2019d. Drr-net: Dynamic re-read network for sentence semantic matching. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 7442-7449.
|
| 383 |
+
Yaowei Zheng, Richong Zhang, Samuel Mensah, and Yongyi Mao. 2020. Replicate, walk, and stop on syntax: An effective neural network model for aspect-level sentiment classification. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 9685-9692.
|
| 384 |
+
Yukun Zheng, Jiaxin Mao, Yiqun Liu, Zixin Ye, Min Zhang, and Shaoping Ma. 2019. Human behavior inspired machine reading comprehension. In Proceedings of the 42nd International ACM SIGIR Conference on Research and Development in Information Retrieval, pages 425-434.
|
2203.16xxx/2203.16369/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0e941a01278483be2438a5cbaf4367df6d0a5e1074aaa04442a4417b3835f884
|
| 3 |
+
size 541155
|
2203.16xxx/2203.16369/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16421/c0eaf7a9-2c57-463e-87f4-1bf6bb2119e5_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16421/c0eaf7a9-2c57-463e-87f4-1bf6bb2119e5_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16421/c0eaf7a9-2c57-463e-87f4-1bf6bb2119e5_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:862d9b7e286f0acceadb992b509c5879b4260dc544f39b92b50877d8fd31ce24
|
| 3 |
+
size 12882551
|
2203.16xxx/2203.16421/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16421/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ea97991822baee40fa03817d3f5a6b1860035fac36154a7ba99c6fec4a73594d
|
| 3 |
+
size 2143064
|
2203.16xxx/2203.16421/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2203.16xxx/2203.16427/7e4731b1-1524-48d4-9792-fa541b30a263_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|