Add Batch d27b8905-b815-4f57-9e12-19a793d9d6ad
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +64 -0
- 2202.11xxx/2202.11855/08c4e89b-f55c-4f84-bef3-fcf83f2937a8_content_list.json +0 -0
- 2202.11xxx/2202.11855/08c4e89b-f55c-4f84-bef3-fcf83f2937a8_model.json +0 -0
- 2202.11xxx/2202.11855/08c4e89b-f55c-4f84-bef3-fcf83f2937a8_origin.pdf +3 -0
- 2202.11xxx/2202.11855/full.md +0 -0
- 2202.11xxx/2202.11855/images.zip +3 -0
- 2202.11xxx/2202.11855/layout.json +0 -0
- 2202.11xxx/2202.11884/edf7b602-b852-4c85-9e17-0cbd193b4c28_content_list.json +2018 -0
- 2202.11xxx/2202.11884/edf7b602-b852-4c85-9e17-0cbd193b4c28_model.json +0 -0
- 2202.11xxx/2202.11884/edf7b602-b852-4c85-9e17-0cbd193b4c28_origin.pdf +3 -0
- 2202.11xxx/2202.11884/full.md +397 -0
- 2202.11xxx/2202.11884/images.zip +3 -0
- 2202.11xxx/2202.11884/layout.json +0 -0
- 2202.11xxx/2202.11907/13c6c70f-eb0a-4855-9c93-c58436de7c44_content_list.json +1155 -0
- 2202.11xxx/2202.11907/13c6c70f-eb0a-4855-9c93-c58436de7c44_model.json +1745 -0
- 2202.11xxx/2202.11907/13c6c70f-eb0a-4855-9c93-c58436de7c44_origin.pdf +3 -0
- 2202.11xxx/2202.11907/full.md +253 -0
- 2202.11xxx/2202.11907/images.zip +3 -0
- 2202.11xxx/2202.11907/layout.json +0 -0
- 2202.11xxx/2202.11911/9037d395-d951-4662-9f75-505b7890fe99_content_list.json +1571 -0
- 2202.11xxx/2202.11911/9037d395-d951-4662-9f75-505b7890fe99_model.json +2009 -0
- 2202.11xxx/2202.11911/9037d395-d951-4662-9f75-505b7890fe99_origin.pdf +3 -0
- 2202.11xxx/2202.11911/full.md +291 -0
- 2202.11xxx/2202.11911/images.zip +3 -0
- 2202.11xxx/2202.11911/layout.json +0 -0
- 2202.11xxx/2202.11912/f4c9ccdd-f304-4b1d-89a0-9fd9c3d45733_content_list.json +0 -0
- 2202.11xxx/2202.11912/f4c9ccdd-f304-4b1d-89a0-9fd9c3d45733_model.json +0 -0
- 2202.11xxx/2202.11912/f4c9ccdd-f304-4b1d-89a0-9fd9c3d45733_origin.pdf +3 -0
- 2202.11xxx/2202.11912/full.md +0 -0
- 2202.11xxx/2202.11912/images.zip +3 -0
- 2202.11xxx/2202.11912/layout.json +0 -0
- 2202.11xxx/2202.11915/6d305bc4-4eda-4ae4-9704-6ea8af50946e_content_list.json +2020 -0
- 2202.11xxx/2202.11915/6d305bc4-4eda-4ae4-9704-6ea8af50946e_model.json +0 -0
- 2202.11xxx/2202.11915/6d305bc4-4eda-4ae4-9704-6ea8af50946e_origin.pdf +3 -0
- 2202.11xxx/2202.11915/full.md +431 -0
- 2202.11xxx/2202.11915/images.zip +3 -0
- 2202.11xxx/2202.11915/layout.json +0 -0
- 2202.11xxx/2202.11917/2c37ab02-567a-4330-8d08-faf65b9d17f7_content_list.json +0 -0
- 2202.11xxx/2202.11917/2c37ab02-567a-4330-8d08-faf65b9d17f7_model.json +0 -0
- 2202.11xxx/2202.11917/2c37ab02-567a-4330-8d08-faf65b9d17f7_origin.pdf +3 -0
- 2202.11xxx/2202.11917/full.md +0 -0
- 2202.11xxx/2202.11917/images.zip +3 -0
- 2202.11xxx/2202.11917/layout.json +0 -0
- 2202.11xxx/2202.11923/2469feeb-f44a-460f-890c-2b605e397b0b_content_list.json +1347 -0
- 2202.11xxx/2202.11923/2469feeb-f44a-460f-890c-2b605e397b0b_model.json +1927 -0
- 2202.11xxx/2202.11923/2469feeb-f44a-460f-890c-2b605e397b0b_origin.pdf +3 -0
- 2202.11xxx/2202.11923/full.md +267 -0
- 2202.11xxx/2202.11923/images.zip +3 -0
- 2202.11xxx/2202.11923/layout.json +0 -0
- 2202.11xxx/2202.11946/897481d3-8eb4-4b14-a2ea-a190292c9934_content_list.json +0 -0
.gitattributes
CHANGED
|
@@ -7672,3 +7672,67 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 7672 |
2203.00xxx/2203.00633/5d99a0f8-f903-4027-90ee-37267742c2cd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7673 |
2203.00xxx/2203.00638/86aa3a66-bece-4d13-88cb-8d77ee5dbbda_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7674 |
2203.00xxx/2203.00663/cd5f8cbd-3f25-42ff-a4bf-4bdde5095917_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7672 |
2203.00xxx/2203.00633/5d99a0f8-f903-4027-90ee-37267742c2cd_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7673 |
2203.00xxx/2203.00638/86aa3a66-bece-4d13-88cb-8d77ee5dbbda_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7674 |
2203.00xxx/2203.00663/cd5f8cbd-3f25-42ff-a4bf-4bdde5095917_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7675 |
+
2202.11xxx/2202.11855/08c4e89b-f55c-4f84-bef3-fcf83f2937a8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7676 |
+
2202.11xxx/2202.11884/edf7b602-b852-4c85-9e17-0cbd193b4c28_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7677 |
+
2202.11xxx/2202.11907/13c6c70f-eb0a-4855-9c93-c58436de7c44_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7678 |
+
2202.11xxx/2202.11911/9037d395-d951-4662-9f75-505b7890fe99_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7679 |
+
2202.11xxx/2202.11912/f4c9ccdd-f304-4b1d-89a0-9fd9c3d45733_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7680 |
+
2202.11xxx/2202.11915/6d305bc4-4eda-4ae4-9704-6ea8af50946e_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7681 |
+
2202.11xxx/2202.11917/2c37ab02-567a-4330-8d08-faf65b9d17f7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7682 |
+
2202.11xxx/2202.11923/2469feeb-f44a-460f-890c-2b605e397b0b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7683 |
+
2202.11xxx/2202.11946/897481d3-8eb4-4b14-a2ea-a190292c9934_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7684 |
+
2202.11xxx/2202.11958/eb0384b5-9492-4e3d-bc58-9e3997385ae5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7685 |
+
2202.11xxx/2202.11983/7f1f7f4e-c43d-4143-86ed-dd11d70dc1ea_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7686 |
+
2202.12xxx/2202.12015/ccb6bfe5-5b3b-4e4f-86a5-3ed7ca0e15d5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7687 |
+
2202.12xxx/2202.12024/0f9cfa6b-acdc-4292-b169-df89297ac963_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7688 |
+
2202.12xxx/2202.12028/016375ad-361b-4b60-97f9-98b569b40df7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7689 |
+
2202.12xxx/2202.12031/92834075-db97-4c37-b5ec-5dd8edfebaee_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7690 |
+
2202.12xxx/2202.12040/659abfdd-c58c-4115-b586-fbd4f2fd5b39_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7691 |
+
2202.12xxx/2202.12100/616709b3-e01d-42d7-8ea5-768c2a22f639_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7692 |
+
2202.12xxx/2202.12109/8dd575ec-f2b5-4123-9ac3-842f0cc086fc_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7693 |
+
2202.12xxx/2202.12116/8c914f7b-38fb-45fc-b702-f7c8d56d1ad5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7694 |
+
2202.12xxx/2202.12165/92685e27-658d-41ff-a801-32ef31538c15_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7695 |
+
2202.12xxx/2202.12172/4e982382-9f93-426d-b19e-f884433f3a3b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7696 |
+
2202.12xxx/2202.12177/8fd4040b-4717-44aa-81c4-8fbf7801f964_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7697 |
+
2202.12xxx/2202.12181/88b2d3f6-8739-4fde-97d2-64951abaefc7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7698 |
+
2202.12xxx/2202.12194/2301b707-640e-4572-93ca-5edd881de59c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7699 |
+
2202.12xxx/2202.12197/c509fff0-0cfb-4b81-ab14-33d820bc0295_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7700 |
+
2202.12xxx/2202.12205/c96aef5f-9619-4fc7-ad06-4ae33795fc11_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7701 |
+
2202.12xxx/2202.12219/f325a7ab-6f26-4ce9-bf86-cca5a75cf6a9_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7702 |
+
2202.12xxx/2202.12233/f2dc1bb1-f130-4c19-9de4-0af156e794b8_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7703 |
+
2202.12xxx/2202.12299/825b3e55-9dc4-4ba9-b9ce-3625566f4558_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7704 |
+
2202.12xxx/2202.12350/a2a93463-8cf4-4a69-b92b-279c7e4df6ec_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7705 |
+
2202.12xxx/2202.12361/abe29899-2c9d-4555-8451-f1054517ff73_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7706 |
+
2202.12xxx/2202.12362/f2e535f9-df43-45d6-8891-f6a8a884b98f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7707 |
+
2202.12xxx/2202.12385/4851db9a-8eb5-4bff-843c-61439cbe2798_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7708 |
+
2202.12xxx/2202.12441/117f81f1-f4be-4e3a-93fb-a7878731cdec_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7709 |
+
2202.12xxx/2202.12456/aee6973b-a421-4e48-abf8-ebd5b368ec1c_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7710 |
+
2202.12xxx/2202.12478/cfed3294-3e76-412b-b760-cae75113ab5f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7711 |
+
2202.12xxx/2202.12499/5fb02ae5-d458-4631-bdc4-ef39a4e32112_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7712 |
+
2202.12xxx/2202.12501/bf0f4e2a-f050-4979-934e-5016816d03c3_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7713 |
+
2202.12xxx/2202.12513/596b7a8d-6c10-4b40-863b-78ddbaa7760d_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7714 |
+
2202.12xxx/2202.12555/4a9503ac-27d8-4d73-b889-36fcaa7ecb32_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7715 |
+
2202.12xxx/2202.12575/061e3044-652e-4082-83d4-0c77f6a30a65_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7716 |
+
2202.12xxx/2202.12587/21c8ee15-c912-42f2-8c23-3a4f4bd0195a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7717 |
+
2202.12xxx/2202.12670/aef2ae30-dae4-44fe-bbc0-8d826ad22520_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7718 |
+
2202.12xxx/2202.12692/7ce4d1e0-faab-436c-b605-ac62ef6b9740_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7719 |
+
2202.12xxx/2202.12760/8c88c28b-b700-4937-aad2-2dbee02994d4_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7720 |
+
2202.12xxx/2202.12793/b8946555-eedb-47be-ba5c-4f4f3be04133_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7721 |
+
2202.12xxx/2202.12796/094f2b98-ef60-4a5a-978b-566ab8bcd910_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7722 |
+
2202.12xxx/2202.12837/e0b6ed5a-caf3-4cc7-a6a3-062477f15612_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7723 |
+
2202.12xxx/2202.12924/7715ca9e-05f1-443b-b5e0-9a6567700073_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7724 |
+
2202.12xxx/2202.12972/17f6cf89-052b-4350-b2b0-4e610f30b45f_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7725 |
+
2202.12xxx/2202.12998/6f0537a6-e403-4fc4-b6cc-9bd5baa71509_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7726 |
+
2202.13xxx/2202.13013/b5caff42-0392-47ba-b051-b9c057039d6a_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7727 |
+
2202.13xxx/2202.13028/dfc3512a-00b1-44c6-be62-cf6a35cdecb5_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7728 |
+
2202.13xxx/2202.13037/d170a1ef-f7e6-4574-89c2-46b3d5c8e1d6_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7729 |
+
2202.13xxx/2202.13047/9fae61c7-1137-4d10-8ca6-5533a35975b7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7730 |
+
2202.13xxx/2202.13058/bdccdc7d-3a7c-456d-84dc-381b2a1a4744_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7731 |
+
2202.13xxx/2202.13065/3d5befc6-3c3e-48f1-9a92-ad4f03d2e755_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7732 |
+
2202.13xxx/2202.13066/fc1c998f-ddda-4182-9948-8ee4d0bfc659_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7733 |
+
2202.13xxx/2202.13084/a3e75c5b-aa8e-4112-b7a6-cf79fe7d1621_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7734 |
+
2202.13xxx/2202.13785/96bc6101-219a-4a8c-a9be-ceb88c890605_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7735 |
+
2202.13xxx/2202.13864/6e843a40-1929-4e86-b178-0f0586de2a1b_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7736 |
+
2203.00xxx/2203.00003/20bc11c3-9e8b-441e-8340-ee70bb0a6e74_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7737 |
+
2203.00xxx/2203.00451/3722325e-e439-462f-adf7-d0b194143899_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
| 7738 |
+
2203.01xxx/2203.01175/08f55786-8c30-4807-ab01-26ff9a7183b7_origin.pdf filter=lfs diff=lfs merge=lfs -text
|
2202.11xxx/2202.11855/08c4e89b-f55c-4f84-bef3-fcf83f2937a8_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11855/08c4e89b-f55c-4f84-bef3-fcf83f2937a8_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11855/08c4e89b-f55c-4f84-bef3-fcf83f2937a8_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b2549d11f1a7ef3b92d963176cd8838737779f45d4c399193e0ea4d0a0aa7545
|
| 3 |
+
size 2180699
|
2202.11xxx/2202.11855/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11855/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:299680f4adb9c43cc78067a39d55b2c677dad1dbb339c6cbd84e14df02031797
|
| 3 |
+
size 997772
|
2202.11xxx/2202.11855/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11884/edf7b602-b852-4c85-9e17-0cbd193b4c28_content_list.json
ADDED
|
@@ -0,0 +1,2018 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "M2I: From Factored Marginal Trajectory Prediction to Interactive Prediction",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
94,
|
| 8 |
+
130,
|
| 9 |
+
875,
|
| 10 |
+
151
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Qiao Sun $^{1*}$ Xin Huang $^{2*†}$ $^{1}$ IIIS, Tsinghua University",
|
| 17 |
+
"bbox": [
|
| 18 |
+
176,
|
| 19 |
+
179,
|
| 20 |
+
398,
|
| 21 |
+
219
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Junru Gu $^{1}$ Brian C. Williams $^{2}$ Hang Zhao $^{1\\ddagger}$ $^{2}$ CSAIL, Massachusetts Institute of Technology",
|
| 28 |
+
"bbox": [
|
| 29 |
+
410,
|
| 30 |
+
179,
|
| 31 |
+
795,
|
| 32 |
+
219
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Abstract",
|
| 39 |
+
"text_level": 1,
|
| 40 |
+
"bbox": [
|
| 41 |
+
233,
|
| 42 |
+
251,
|
| 43 |
+
312,
|
| 44 |
+
266
|
| 45 |
+
],
|
| 46 |
+
"page_idx": 0
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"type": "text",
|
| 50 |
+
"text": "Predicting future motions of road participants is an important task for driving autonomously in urban scenes. Existing models excel at predicting marginal trajectories for single agents, yet it remains an open question to jointly predict scene compliant trajectories over multiple agents. The challenge is due to exponentially increasing prediction space as a function of the number of agents. In this work, we exploit the underlying relations between interacting agents and decouple the joint prediction problem into marginal prediction problems. Our proposed approach M2I first classifies interacting agents as pairs of influencers and reactors, and then leverages a marginal prediction model and a conditional prediction model to predict trajectories for the influencers and reactors, respectively. The predictions from interacting agents are combined and selected according to their joint likelihoods. Experiments show that our simple but effective approach achieves state-of-the-art performance on the Waymo Open Motion Dataset interactive prediction benchmark.",
|
| 51 |
+
"bbox": [
|
| 52 |
+
73,
|
| 53 |
+
282,
|
| 54 |
+
472,
|
| 55 |
+
570
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "1. Introduction",
|
| 62 |
+
"text_level": 1,
|
| 63 |
+
"bbox": [
|
| 64 |
+
76,
|
| 65 |
+
580,
|
| 66 |
+
209,
|
| 67 |
+
594
|
| 68 |
+
],
|
| 69 |
+
"page_idx": 0
|
| 70 |
+
},
|
| 71 |
+
{
|
| 72 |
+
"type": "text",
|
| 73 |
+
"text": "Trajectory prediction is widely used by intelligent driving systems to infer future motions of nearby agents and identify risky scenarios to enable safe driving. Recent advances have shown great success in predicting accurate trajectories by learning from real-world driving examples. Many existing trajectory prediction works [5, 12, 15, 26, 28, 38] focus on generating marginal prediction samples of future trajectories over individual agents, failing to reason about their interactions in the future. As a result, the prediction samples over multiple agents may overlap with each other and result in sub-optimal performance.",
|
| 74 |
+
"bbox": [
|
| 75 |
+
73,
|
| 76 |
+
598,
|
| 77 |
+
468,
|
| 78 |
+
763
|
| 79 |
+
],
|
| 80 |
+
"page_idx": 0
|
| 81 |
+
},
|
| 82 |
+
{
|
| 83 |
+
"type": "text",
|
| 84 |
+
"text": "We present a motivating example in Fig. 1, in which a marginal predictor produces a set of prediction samples separately for two interacting agents, as visualized in the top left figure. While the predictions for each agent are rea",
|
| 85 |
+
"bbox": [
|
| 86 |
+
75,
|
| 87 |
+
766,
|
| 88 |
+
468,
|
| 89 |
+
828
|
| 90 |
+
],
|
| 91 |
+
"page_idx": 0
|
| 92 |
+
},
|
| 93 |
+
{
|
| 94 |
+
"type": "image",
|
| 95 |
+
"img_path": "images/1e43df5df9efbaa3ce291cf48e0217f1bb4a485de38d56aef063faddd03974de.jpg",
|
| 96 |
+
"image_caption": [
|
| 97 |
+
"Joint Prediction using Traditional Marginal Predictors"
|
| 98 |
+
],
|
| 99 |
+
"image_footnote": [],
|
| 100 |
+
"bbox": [
|
| 101 |
+
506,
|
| 102 |
+
252,
|
| 103 |
+
883,
|
| 104 |
+
310
|
| 105 |
+
],
|
| 106 |
+
"page_idx": 0
|
| 107 |
+
},
|
| 108 |
+
{
|
| 109 |
+
"type": "image",
|
| 110 |
+
"img_path": "images/c9f33f7332d80595aa89054d43b2fa0af8e5282929ec10fd516bca8fdb0c2269.jpg",
|
| 111 |
+
"image_caption": [
|
| 112 |
+
"Joint Prediction using M2I",
|
| 113 |
+
"Figure 1. A motivating example of M2I. Top: Traditional marginal predictor often produces scene inconsistent trajectory predictions that collide with each other. Even for non-colliding predictions, it ignores the potential interaction between agent futures and may predict unrealistic behaviors. Bottom: Our proposed approach M2I predicts scene compliant trajectories by first identifying an influencer reactor pair in the scene. It then predicts marginal trajectories for the influencer and reactive trajectories for the reactor."
|
| 114 |
+
],
|
| 115 |
+
"image_footnote": [],
|
| 116 |
+
"bbox": [
|
| 117 |
+
506,
|
| 118 |
+
323,
|
| 119 |
+
883,
|
| 120 |
+
378
|
| 121 |
+
],
|
| 122 |
+
"page_idx": 0
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"text": "sonable without considering the presence of the other, some trajectory pairs will collide when considering them jointly. For instance, it is unlikely that the red agent turns left while the blue agent goes forward, as indicated in the top middle example in Fig. 1. Therefore, it is necessary to predict scene compliant trajectories with the existence of multiple agents to support better prediction accuracy.",
|
| 127 |
+
"bbox": [
|
| 128 |
+
496,
|
| 129 |
+
507,
|
| 130 |
+
890,
|
| 131 |
+
613
|
| 132 |
+
],
|
| 133 |
+
"page_idx": 0
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"text": "To generate scene compliant trajectories, one can learn a joint predictor to predict trajectories in a joint space over multiple agents; however, it suffers from an exponentially increasing prediction space as the number of agents increases. As investigated by [15], while it is feasible to predict a set of goals for a single agent, the goal space increases exponentially with the number of agents and becomes unmanageable for even two agents with a few hundred goal candidates for each agent. A more computationally efficient alternative to producing scene compliant trajectories is to post-process marginal prediction samples by pruning colliding ones; however, such an ad-hoc approach fails to take into account potential agent interactions in the future and may ignore other conflicts which are hard to prune by heuristics. For instance, although the prediction sample in the top right figure in Fig. 1 is collision-free, the red agent may slow down when turning left to keep a safe distance from the blue agent. Such an interactive behavior is hard to be captured by a marginal predictor as it is unaware of the",
|
| 138 |
+
"bbox": [
|
| 139 |
+
496,
|
| 140 |
+
614,
|
| 141 |
+
892,
|
| 142 |
+
900
|
| 143 |
+
],
|
| 144 |
+
"page_idx": 0
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "aside_text",
|
| 148 |
+
"text": "arXiv:2202.11884v2 [cs.RO] 28 Mar 2022",
|
| 149 |
+
"bbox": [
|
| 150 |
+
22,
|
| 151 |
+
260,
|
| 152 |
+
57,
|
| 153 |
+
705
|
| 154 |
+
],
|
| 155 |
+
"page_idx": 0
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "page_footnote",
|
| 159 |
+
"text": "*Denotes equal contribution. Code and demo available at paper website: https://tsinghua-mars-lab.github.io/M2I/ alan.qiao.sun@gmail.com, xhuang@csail.mit.edu. †X. Huang was supported in part by Qualcomm Innovation Fellowship. ‡Corresponding at: hangzhao@mail.tsinghua.edu.cn.",
|
| 160 |
+
"bbox": [
|
| 161 |
+
75,
|
| 162 |
+
838,
|
| 163 |
+
468,
|
| 164 |
+
900
|
| 165 |
+
],
|
| 166 |
+
"page_idx": 0
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"type": "text",
|
| 170 |
+
"text": "future behavior of the other agents in the scene.",
|
| 171 |
+
"bbox": [
|
| 172 |
+
76,
|
| 173 |
+
90,
|
| 174 |
+
392,
|
| 175 |
+
104
|
| 176 |
+
],
|
| 177 |
+
"page_idx": 1
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"type": "text",
|
| 181 |
+
"text": "In this paper, we propose M2I that leverages marginal and conditional trajectory predictors to efficiently predict scene compliant multi-agent trajectories, by approximating the joint distribution as a product of a marginal distribution and a conditional distribution. The factorization assumes two types of agents: the influencer that behaves independently without considering the other agents, and the reactor that reacts to the behavior of the influencer. This assumption is inspired by the recent study on the underlying correlations between interactive agent trajectories [39]. Under the assumption, we leverage a standard marginal predictor to generate prediction samples for the influencer, and a conditional predictor to roll out future trajectories for the reactor conditioned on the future trajectory of the influencer. The advantage of our proposed approach M2I is illustrated in the bottom figures in Fig. 1, in which we first predict the relations of the interactive agents. Given the relations, we predict the future trajectories of the influencer and then predict reactive behaviors of the reactor conditioned on each influencer prediction. As causality in driving interaction remains an open problem [39], we pre-label the influencer-reactor relation based on a heuristic, and propose a relation predictor to classify interactive relations at inference time.",
|
| 182 |
+
"bbox": [
|
| 183 |
+
76,
|
| 184 |
+
108,
|
| 185 |
+
472,
|
| 186 |
+
455
|
| 187 |
+
],
|
| 188 |
+
"page_idx": 1
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"type": "text",
|
| 192 |
+
"text": "Our contributions are three-fold. First, we propose a simple but effective framework M2I that leverages marginal and conditional predictors to generate accurate and scene compliant multi-agent trajectories. The framework does not assume a specific predictor structure, allowing it to be adopted by a wide range of backbone prediction models. Second, we propose a relation predictor that infers high-level relations among interactive agents to decouple the prediction space. Third, we demonstrate our framework using a goal-conditioned prediction model. Experiments show that M2I achieves state-of-the-art performance on the Waymo Open Motion Dataset interactive prediction benchmark.",
|
| 193 |
+
"bbox": [
|
| 194 |
+
76,
|
| 195 |
+
455,
|
| 196 |
+
472,
|
| 197 |
+
638
|
| 198 |
+
],
|
| 199 |
+
"page_idx": 1
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"type": "text",
|
| 203 |
+
"text": "2. Related Work",
|
| 204 |
+
"text_level": 1,
|
| 205 |
+
"bbox": [
|
| 206 |
+
76,
|
| 207 |
+
648,
|
| 208 |
+
218,
|
| 209 |
+
664
|
| 210 |
+
],
|
| 211 |
+
"page_idx": 1
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"type": "text",
|
| 215 |
+
"text": "Trajectory prediction for traffic agents has been studied extensively in recent years. Due to uncertainty in human intent, the future trajectories are probabilistic and multimodal. To handle the multi-modality problem, [5, 35] propose models that output behavior predictions as Gaussian mixture models (GMMs), in which each mixture component represents a single modality. Instead of parameterizing the prediction distribution, generative models, such as generative adversarial models (GANs) [16, 18, 47] and (conditional) variational autoencoders (VAEs) [26, 29, 35, 44], produce trajectory samples to approximate the distribution space. These generative models suffer from sample inefficiency and require many samples to cover diverse driving scenarios [18].",
|
| 216 |
+
"bbox": [
|
| 217 |
+
76,
|
| 218 |
+
672,
|
| 219 |
+
472,
|
| 220 |
+
883
|
| 221 |
+
],
|
| 222 |
+
"page_idx": 1
|
| 223 |
+
},
|
| 224 |
+
{
|
| 225 |
+
"type": "text",
|
| 226 |
+
"text": "More recently, a family of models are proposed to im",
|
| 227 |
+
"bbox": [
|
| 228 |
+
96,
|
| 229 |
+
885,
|
| 230 |
+
468,
|
| 231 |
+
901
|
| 232 |
+
],
|
| 233 |
+
"page_idx": 1
|
| 234 |
+
},
|
| 235 |
+
{
|
| 236 |
+
"type": "text",
|
| 237 |
+
"text": "prove prediction accuracy and coverage by first predicting high-level intentions, such as goal targets [11, 13, 15, 29, 34, 46], lanes to follow [21, 37], and maneuver actions [8, 9, 19, 24], before predicting low-level trajectories conditioning on the intention. Such models demonstrate great success in predicting accurate trajectories for single agents in popular trajectory prediction benchmarks, such as Argov-verse [6] and Waymo Open Motion Dataset [10]. While our proposed approach M2I can use an arbitrary prediction model, we choose to adopt an anchor-free goal-based predictor [15] because of its outstanding performance.",
|
| 238 |
+
"bbox": [
|
| 239 |
+
496,
|
| 240 |
+
90,
|
| 241 |
+
890,
|
| 242 |
+
257
|
| 243 |
+
],
|
| 244 |
+
"page_idx": 1
|
| 245 |
+
},
|
| 246 |
+
{
|
| 247 |
+
"type": "text",
|
| 248 |
+
"text": "In the rest of the section, we introduce the literature closely related to our approach, on interactive trajectory prediction and conditional trajectory prediction.",
|
| 249 |
+
"bbox": [
|
| 250 |
+
496,
|
| 251 |
+
257,
|
| 252 |
+
890,
|
| 253 |
+
301
|
| 254 |
+
],
|
| 255 |
+
"page_idx": 1
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"type": "text",
|
| 259 |
+
"text": "2.1. Interactive Trajectory Prediction",
|
| 260 |
+
"text_level": 1,
|
| 261 |
+
"bbox": [
|
| 262 |
+
500,
|
| 263 |
+
306,
|
| 264 |
+
790,
|
| 265 |
+
323
|
| 266 |
+
],
|
| 267 |
+
"page_idx": 1
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"text": "Predicting scene compliant trajectories for multiple agents remains an open question due to its complexity. Early work leverages hand-crafted interaction models, such as social forces [17] and energy functions [43]. These handcrafted functions require manual tuning and have difficulties modeling highly complicated and nonlinear interactions. In contrast, learning-based methods achieve better accuracy by learning interactions from realistic driving data: [2, 16] utilize social pooling mechanisms to capture social influences from neighbor agents to predict interactive pedestrian trajectories in crowded scenes; [3, 4, 31, 35] build a graph neural network (GNN) to learn the agent-to-agent interactions; [22, 27, 32, 33, 38] leverage attention and transformer mechanisms to learn multi-agent interaction behaviors. In this work, we build a sparse graph with directed edges representing dependencies between agent nodes, but our approach differs from existing graph-based models in a few ways. First, it adopts explicit influencer-reactor relations and offers better interpretability in agent interactions. Second, M2I predicts scene compliant trajectories through marginal and conditional predictors to afford better computational efficiency. Third, it utilizes the future trajectory of influencer agents to predict conditional behaviors for the reactors for better accuracy. This also allows M2I to be used for counterfactual reasoning in simulation applications by varying influencer trajectories.",
|
| 272 |
+
"bbox": [
|
| 273 |
+
496,
|
| 274 |
+
327,
|
| 275 |
+
890,
|
| 276 |
+
718
|
| 277 |
+
],
|
| 278 |
+
"page_idx": 1
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "text",
|
| 282 |
+
"text": "Existing marginal prediction work produces scene compliant trajectories by leveraging an auxiliary collision loss [27] or a critic based on an inverse reinforcement learning framework [40] that discourages colliding trajectories. In this work, we focus on identifying agent relations explicitly as influencers and reactors to generate scene compliant predictions. Our work is relevant to [23, 25] that predicts interacting types before predicting scene compliant trajectories, but we further exploit the structure of the decoupled relations and the influence of low-level influencer trajectories, as opposed to only providing the high-level interaction labels as the input to the trajectory predictor.",
|
| 283 |
+
"bbox": [
|
| 284 |
+
496,
|
| 285 |
+
719,
|
| 286 |
+
890,
|
| 287 |
+
900
|
| 288 |
+
],
|
| 289 |
+
"page_idx": 1
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "image",
|
| 293 |
+
"img_path": "images/bde350c2e759f9ca68fd1011fc4b0a4c998661392e42715d04d8892255bb9058.jpg",
|
| 294 |
+
"image_caption": [
|
| 295 |
+
"Figure 2. Overview of M2I. The relation predictor predicts influencer-reactor relations for interacting agents. The marginal predictor generates marginal predictions for the influencer. The conditional predictor generates predictions for the reactor, conditioned on each influencer trajectory. The sample selector chooses a subset of representative joint samples as output."
|
| 296 |
+
],
|
| 297 |
+
"image_footnote": [],
|
| 298 |
+
"bbox": [
|
| 299 |
+
163,
|
| 300 |
+
90,
|
| 301 |
+
803,
|
| 302 |
+
220
|
| 303 |
+
],
|
| 304 |
+
"page_idx": 2
|
| 305 |
+
},
|
| 306 |
+
{
|
| 307 |
+
"type": "text",
|
| 308 |
+
"text": "2.2. Conditional Trajectory Prediction",
|
| 309 |
+
"text_level": 1,
|
| 310 |
+
"bbox": [
|
| 311 |
+
75,
|
| 312 |
+
285,
|
| 313 |
+
375,
|
| 314 |
+
301
|
| 315 |
+
],
|
| 316 |
+
"page_idx": 2
|
| 317 |
+
},
|
| 318 |
+
{
|
| 319 |
+
"type": "text",
|
| 320 |
+
"text": "Conditional prediction approaches study the correlations between future agent trajectories, by predicting trajectories conditioned on the future trajectory of another agent [20,35,39]. These approaches often rely on the future trajectory of the autonomous vehicle or a robot whose future plan is known to the predictor. Our work goes beyond by conditioning on the future trajectory of another agent to be predicted. Despite the prediction errors of the conditioned agent, we show that our model outperforms marginal predictors that do not account for the interactive correlations.",
|
| 321 |
+
"bbox": [
|
| 322 |
+
73,
|
| 323 |
+
309,
|
| 324 |
+
470,
|
| 325 |
+
459
|
| 326 |
+
],
|
| 327 |
+
"page_idx": 2
|
| 328 |
+
},
|
| 329 |
+
{
|
| 330 |
+
"type": "text",
|
| 331 |
+
"text": "3. Approach",
|
| 332 |
+
"text_level": 1,
|
| 333 |
+
"bbox": [
|
| 334 |
+
75,
|
| 335 |
+
468,
|
| 336 |
+
186,
|
| 337 |
+
484
|
| 338 |
+
],
|
| 339 |
+
"page_idx": 2
|
| 340 |
+
},
|
| 341 |
+
{
|
| 342 |
+
"type": "text",
|
| 343 |
+
"text": "In this section, we introduce a formal problem formulation and an overview of M2I, followed by detailed explanations of each model used in the approach.",
|
| 344 |
+
"bbox": [
|
| 345 |
+
75,
|
| 346 |
+
489,
|
| 347 |
+
468,
|
| 348 |
+
536
|
| 349 |
+
],
|
| 350 |
+
"page_idx": 2
|
| 351 |
+
},
|
| 352 |
+
{
|
| 353 |
+
"type": "text",
|
| 354 |
+
"text": "3.1. Problem Formulation",
|
| 355 |
+
"text_level": 1,
|
| 356 |
+
"bbox": [
|
| 357 |
+
75,
|
| 358 |
+
546,
|
| 359 |
+
279,
|
| 360 |
+
561
|
| 361 |
+
],
|
| 362 |
+
"page_idx": 2
|
| 363 |
+
},
|
| 364 |
+
{
|
| 365 |
+
"type": "text",
|
| 366 |
+
"text": "Given observed states $X = (M, S)$ , including the map states $M$ and the observed states $S$ of all agents in a scene, the goal is to predict the future states of the interacting agents $Y$ up to a finite horizon $T$ . We assume the interacting agents are pre-labeled in a given scene, which is available in common interactive prediction datasets such as [10, 45]. As the distribution over $Y$ is a joint distribution over multiple agents, we approximate it as the factorization over a marginal distribution and a conditional distribution:",
|
| 367 |
+
"bbox": [
|
| 368 |
+
75,
|
| 369 |
+
569,
|
| 370 |
+
468,
|
| 371 |
+
705
|
| 372 |
+
],
|
| 373 |
+
"page_idx": 2
|
| 374 |
+
},
|
| 375 |
+
{
|
| 376 |
+
"type": "equation",
|
| 377 |
+
"text": "\n$$\nP (Y | X) = P \\left(Y _ {I}, Y _ {R} | X\\right) \\approx P \\left(Y _ {I} | X\\right) P \\left(Y _ {R} | X, Y _ {I}\\right). \\tag {1}\n$$\n",
|
| 378 |
+
"text_format": "latex",
|
| 379 |
+
"bbox": [
|
| 380 |
+
88,
|
| 381 |
+
718,
|
| 382 |
+
468,
|
| 383 |
+
734
|
| 384 |
+
],
|
| 385 |
+
"page_idx": 2
|
| 386 |
+
},
|
| 387 |
+
{
|
| 388 |
+
"type": "text",
|
| 389 |
+
"text": "The factorization in Eq. (1) first assigns the interacting agents as the influencer $Y_{I}$ and the reactor $Y_{R}$ , and decouples the joint distribution as the marginal distribution over the influencer and the conditional distribution over the reactor. This factorization allows us to reduce the complexity of learning a joint distribution to learning more tractable distributions. In the case where two agents are not interacting, the factorization can be simplified as two marginal distributions:",
|
| 390 |
+
"bbox": [
|
| 391 |
+
75,
|
| 392 |
+
748,
|
| 393 |
+
470,
|
| 394 |
+
882
|
| 395 |
+
],
|
| 396 |
+
"page_idx": 2
|
| 397 |
+
},
|
| 398 |
+
{
|
| 399 |
+
"type": "equation",
|
| 400 |
+
"text": "\n$$\nP (Y | X) \\approx P \\left(Y _ {I} | X\\right) P \\left(Y _ {R} | X\\right), \\tag {2}\n$$\n",
|
| 401 |
+
"text_format": "latex",
|
| 402 |
+
"bbox": [
|
| 403 |
+
165,
|
| 404 |
+
883,
|
| 405 |
+
468,
|
| 406 |
+
901
|
| 407 |
+
],
|
| 408 |
+
"page_idx": 2
|
| 409 |
+
},
|
| 410 |
+
{
|
| 411 |
+
"type": "text",
|
| 412 |
+
"text": "where there is no conditional dependence between the agents. Such independence is presumed by many marginal prediction models that predict the marginal distribution without considering other agents in the scene.",
|
| 413 |
+
"bbox": [
|
| 414 |
+
496,
|
| 415 |
+
285,
|
| 416 |
+
890,
|
| 417 |
+
345
|
| 418 |
+
],
|
| 419 |
+
"page_idx": 2
|
| 420 |
+
},
|
| 421 |
+
{
|
| 422 |
+
"type": "text",
|
| 423 |
+
"text": "We focus on two interactive agents in this paper and aim to tackle the pairwise interactive trajectory prediction problem proposed by [10]. For scenarios involving more than two interactive agents, our approach can be modified by predicting the relations over all the agents and chaining multiple marginal and conditional distributions together, assuming no loopy influence:",
|
| 424 |
+
"bbox": [
|
| 425 |
+
496,
|
| 426 |
+
345,
|
| 427 |
+
890,
|
| 428 |
+
450
|
| 429 |
+
],
|
| 430 |
+
"page_idx": 2
|
| 431 |
+
},
|
| 432 |
+
{
|
| 433 |
+
"type": "equation",
|
| 434 |
+
"text": "\n$$\nP _ {N > 2} (Y | X) \\approx \\prod_ {i = 1} ^ {N} P \\left(Y _ {i} | X, \\mathbf {Y} _ {i} ^ {\\inf }\\right), \\tag {3}\n$$\n",
|
| 435 |
+
"text_format": "latex",
|
| 436 |
+
"bbox": [
|
| 437 |
+
578,
|
| 438 |
+
455,
|
| 439 |
+
890,
|
| 440 |
+
496
|
| 441 |
+
],
|
| 442 |
+
"page_idx": 2
|
| 443 |
+
},
|
| 444 |
+
{
|
| 445 |
+
"type": "text",
|
| 446 |
+
"text": "where $N$ is the number of total interactive agents, and $\\mathbf{Y}_i^{\\mathrm{inf}}$ is the set of influencer agents for agent $i$ predicted by the relation predictor. We refer to examples of multi-agent relation predictions in Appendix C.",
|
| 447 |
+
"bbox": [
|
| 448 |
+
496,
|
| 449 |
+
502,
|
| 450 |
+
890,
|
| 451 |
+
564
|
| 452 |
+
],
|
| 453 |
+
"page_idx": 2
|
| 454 |
+
},
|
| 455 |
+
{
|
| 456 |
+
"type": "text",
|
| 457 |
+
"text": "3.2. Model Overview",
|
| 458 |
+
"text_level": 1,
|
| 459 |
+
"bbox": [
|
| 460 |
+
500,
|
| 461 |
+
571,
|
| 462 |
+
663,
|
| 463 |
+
587
|
| 464 |
+
],
|
| 465 |
+
"page_idx": 2
|
| 466 |
+
},
|
| 467 |
+
{
|
| 468 |
+
"type": "text",
|
| 469 |
+
"text": "Our proposed approach M2I is summarized in Fig. 2. It includes a relation predictor to predict the influencer and the reactor in a scene, a marginal predictor to predict future trajectories of the influencer, a conditional predictor to predict future trajectories of the reactor conditioned on the future trajectory of the influencer, and a sample selector to select a set of representative joint prediction samples. Although M2I includes three different learned models, they share the same encoder-decoder structure and adopt the same context encoder to learn context information, as illustrated in Fig. 3. The conditional predictor takes an augmented scene context input that includes the influencer future trajectory to learn reactive behaviors for the reactor. In the following, we introduce each model with more details.",
|
| 470 |
+
"bbox": [
|
| 471 |
+
496,
|
| 472 |
+
595,
|
| 473 |
+
890,
|
| 474 |
+
806
|
| 475 |
+
],
|
| 476 |
+
"page_idx": 2
|
| 477 |
+
},
|
| 478 |
+
{
|
| 479 |
+
"type": "text",
|
| 480 |
+
"text": "3.3. Relation Predictor",
|
| 481 |
+
"text_level": 1,
|
| 482 |
+
"bbox": [
|
| 483 |
+
500,
|
| 484 |
+
816,
|
| 485 |
+
679,
|
| 486 |
+
830
|
| 487 |
+
],
|
| 488 |
+
"page_idx": 2
|
| 489 |
+
},
|
| 490 |
+
{
|
| 491 |
+
"type": "text",
|
| 492 |
+
"text": "We propose a relation predictor to classify whether an interacting agent is an influencer or a reactor, based on the pass yield relation between two agents. Similar to [23], we assume three types of relations: PASS, YIELD, and NONE,",
|
| 493 |
+
"bbox": [
|
| 494 |
+
496,
|
| 495 |
+
840,
|
| 496 |
+
890,
|
| 497 |
+
900
|
| 498 |
+
],
|
| 499 |
+
"page_idx": 2
|
| 500 |
+
},
|
| 501 |
+
{
|
| 502 |
+
"type": "image",
|
| 503 |
+
"img_path": "images/10b0a7af778dd024f23823746282b28fefee20165dee6b88159956f5db989d7b.jpg",
|
| 504 |
+
"image_caption": [
|
| 505 |
+
"Figure 3. M2I includes three models that share the same context encoder. The relation predictor includes a relation prediction head to predict distribution over relation types. The marginal predictor adopts a trajectory prediction head to produce multi-modal prediction samples. The conditional trajectory predictor takes an augmented scene context input as the influencer future trajectory."
|
| 506 |
+
],
|
| 507 |
+
"image_footnote": [],
|
| 508 |
+
"bbox": [
|
| 509 |
+
183,
|
| 510 |
+
89,
|
| 511 |
+
787,
|
| 512 |
+
265
|
| 513 |
+
],
|
| 514 |
+
"page_idx": 3
|
| 515 |
+
},
|
| 516 |
+
{
|
| 517 |
+
"type": "text",
|
| 518 |
+
"text": "and determine the relation using the following heuristics. Given two agent future trajectories $y_{1}$ and $y_{2}$ with $T$ steps, we first compute the closest spatial distance between two agents to determine whether a pass yield relation exists:",
|
| 519 |
+
"bbox": [
|
| 520 |
+
75,
|
| 521 |
+
323,
|
| 522 |
+
470,
|
| 523 |
+
385
|
| 524 |
+
],
|
| 525 |
+
"page_idx": 3
|
| 526 |
+
},
|
| 527 |
+
{
|
| 528 |
+
"type": "equation",
|
| 529 |
+
"text": "\n$$\nd _ {I} = \\min _ {\\tau_ {1} = 1} ^ {T} \\min _ {\\tau_ {2} = 1} ^ {T} | | y _ {1} ^ {\\tau_ {1}} - y _ {2} ^ {\\tau_ {2}} | | _ {2}. \\tag {4}\n$$\n",
|
| 530 |
+
"text_format": "latex",
|
| 531 |
+
"bbox": [
|
| 532 |
+
145,
|
| 533 |
+
396,
|
| 534 |
+
468,
|
| 535 |
+
417
|
| 536 |
+
],
|
| 537 |
+
"page_idx": 3
|
| 538 |
+
},
|
| 539 |
+
{
|
| 540 |
+
"type": "text",
|
| 541 |
+
"text": "If $d_I > \\epsilon_d$ , which is a dynamic threshold depending on the agent size, the agents never get too close to each other and thus we label the relation type as none. Otherwise, we obtain the time step from each agent at which they reach the closest spatial distance, such that:",
|
| 542 |
+
"bbox": [
|
| 543 |
+
75,
|
| 544 |
+
428,
|
| 545 |
+
470,
|
| 546 |
+
503
|
| 547 |
+
],
|
| 548 |
+
"page_idx": 3
|
| 549 |
+
},
|
| 550 |
+
{
|
| 551 |
+
"type": "equation",
|
| 552 |
+
"text": "\n$$\nt _ {1} = \\arg \\min _ {\\tau_ {1} = 1} ^ {T} \\min _ {\\tau_ {2} = 1} ^ {T} \\| y _ {1} ^ {\\tau_ {1}} - y _ {2} ^ {\\tau_ {2}} \\| _ {2}, \\tag {5}\n$$\n",
|
| 553 |
+
"text_format": "latex",
|
| 554 |
+
"bbox": [
|
| 555 |
+
120,
|
| 556 |
+
515,
|
| 557 |
+
468,
|
| 558 |
+
535
|
| 559 |
+
],
|
| 560 |
+
"page_idx": 3
|
| 561 |
+
},
|
| 562 |
+
{
|
| 563 |
+
"type": "equation",
|
| 564 |
+
"text": "\n$$\nt _ {2} = \\arg \\min _ {\\tau_ {2} = 1} ^ {T} \\min _ {\\tau_ {1} = 1} ^ {T} \\| y _ {1} ^ {\\tau_ {1}} - y _ {2} ^ {\\tau_ {2}} \\| _ {2}. \\tag {6}\n$$\n",
|
| 565 |
+
"text_format": "latex",
|
| 566 |
+
"bbox": [
|
| 567 |
+
122,
|
| 568 |
+
536,
|
| 569 |
+
468,
|
| 570 |
+
555
|
| 571 |
+
],
|
| 572 |
+
"page_idx": 3
|
| 573 |
+
},
|
| 574 |
+
{
|
| 575 |
+
"type": "text",
|
| 576 |
+
"text": "When $t_1 > t_2$ , we define that agent 1 yields to agent 2, as it takes longer for agent 1 to reach the interaction point. Otherwise, we define that agent 1 passes agent 2.",
|
| 577 |
+
"bbox": [
|
| 578 |
+
75,
|
| 579 |
+
566,
|
| 580 |
+
468,
|
| 581 |
+
612
|
| 582 |
+
],
|
| 583 |
+
"page_idx": 3
|
| 584 |
+
},
|
| 585 |
+
{
|
| 586 |
+
"type": "text",
|
| 587 |
+
"text": "After labeling the training data with three interaction types, we propose an encoder-decoder-based model to classify an input scenario into a distribution over these types. As shown in Fig. 3, the relation predictor model consists of a context encoder that extracts the context information, including the observed states of the interacting agents and nearby agents and map coordinates, into a hidden vector, as well as a relation prediction head that outputs the probability over each relation type. There is a rich set of literature on learning context information from a traffic scene, such as [7, 12, 14, 28]. Our model could utilize any existing context encoder thanks to its modular design, and we defer a detailed explanation of our choice in Sec. 4. The relation prediction head consists of one layer of multi-layer perceptron (MLP) to output the probability logits over each relation.",
|
| 588 |
+
"bbox": [
|
| 589 |
+
75,
|
| 590 |
+
614,
|
| 591 |
+
468,
|
| 592 |
+
852
|
| 593 |
+
],
|
| 594 |
+
"page_idx": 3
|
| 595 |
+
},
|
| 596 |
+
{
|
| 597 |
+
"type": "text",
|
| 598 |
+
"text": "The loss to train the relation predictor is defined as:",
|
| 599 |
+
"bbox": [
|
| 600 |
+
96,
|
| 601 |
+
856,
|
| 602 |
+
437,
|
| 603 |
+
871
|
| 604 |
+
],
|
| 605 |
+
"page_idx": 3
|
| 606 |
+
},
|
| 607 |
+
{
|
| 608 |
+
"type": "equation",
|
| 609 |
+
"text": "\n$$\n\\mathcal {L} _ {\\text {r e l a t i o n}} = \\mathcal {L} _ {c e} (R, \\hat {R}), \\tag {7}\n$$\n",
|
| 610 |
+
"text_format": "latex",
|
| 611 |
+
"bbox": [
|
| 612 |
+
197,
|
| 613 |
+
883,
|
| 614 |
+
468,
|
| 615 |
+
901
|
| 616 |
+
],
|
| 617 |
+
"page_idx": 3
|
| 618 |
+
},
|
| 619 |
+
{
|
| 620 |
+
"type": "text",
|
| 621 |
+
"text": "where $\\mathcal{L}_{ce}$ is the cross entropy loss, $R$ is the predicted relation distribution, and $\\hat{R}$ is the ground truth relation.",
|
| 622 |
+
"bbox": [
|
| 623 |
+
498,
|
| 624 |
+
324,
|
| 625 |
+
890,
|
| 626 |
+
353
|
| 627 |
+
],
|
| 628 |
+
"page_idx": 3
|
| 629 |
+
},
|
| 630 |
+
{
|
| 631 |
+
"type": "text",
|
| 632 |
+
"text": "Given the predicted relation, we can assign each agent as an influencer or a reactor. If the relation is none, both agents are influencer, such that their future behaviors are independent of each other, as in Eq. (2). If the relation is agent 1 yielding to agent 2, we assign agent 1 as the reactor and agent 2 as the influencer. If the relation is agent 1 passing agent 2, we flip the influencer and reactor labels.",
|
| 633 |
+
"bbox": [
|
| 634 |
+
496,
|
| 635 |
+
354,
|
| 636 |
+
890,
|
| 637 |
+
460
|
| 638 |
+
],
|
| 639 |
+
"page_idx": 3
|
| 640 |
+
},
|
| 641 |
+
{
|
| 642 |
+
"type": "text",
|
| 643 |
+
"text": "3.4. Marginal Trajectory Predictor",
|
| 644 |
+
"text_level": 1,
|
| 645 |
+
"bbox": [
|
| 646 |
+
498,
|
| 647 |
+
472,
|
| 648 |
+
769,
|
| 649 |
+
488
|
| 650 |
+
],
|
| 651 |
+
"page_idx": 3
|
| 652 |
+
},
|
| 653 |
+
{
|
| 654 |
+
"type": "text",
|
| 655 |
+
"text": "We propose a marginal trajectory predictor for the influencer based on an encoder-decoder structure, as shown in Fig. 3, which is widely adopted in the trajectory prediction literature [10, 14, 46]. The predictor utilizes the same context encoder as in Sec. 3.3, and generates a set of prediction samples associated with confidence scores using a trajectory prediction head. Although our approach can take an arbitrary prediction head, we focus on an anchor-free goal-based prediction head because of its outstanding performance in trajectory prediction benchmarks, and defer a detailed explanation in Sec. 4.",
|
| 656 |
+
"bbox": [
|
| 657 |
+
496,
|
| 658 |
+
496,
|
| 659 |
+
890,
|
| 660 |
+
662
|
| 661 |
+
],
|
| 662 |
+
"page_idx": 3
|
| 663 |
+
},
|
| 664 |
+
{
|
| 665 |
+
"type": "text",
|
| 666 |
+
"text": "3.5. Conditional Trajectory Predictor",
|
| 667 |
+
"text_level": 1,
|
| 668 |
+
"bbox": [
|
| 669 |
+
498,
|
| 670 |
+
674,
|
| 671 |
+
790,
|
| 672 |
+
690
|
| 673 |
+
],
|
| 674 |
+
"page_idx": 3
|
| 675 |
+
},
|
| 676 |
+
{
|
| 677 |
+
"type": "text",
|
| 678 |
+
"text": "The conditional trajectory predictor is similar to the marginal predictor, except that it takes an augmented scene context that includes the future trajectory of the influencer, as shown in Fig. 3. This allows the features of the influencer future trajectory to be extracted and learned in the same way as other context features. The encoded scene feature is used by the trajectory prediction head, which shares the same model as in the marginal predictor, to produce multimodal prediction samples.",
|
| 679 |
+
"bbox": [
|
| 680 |
+
496,
|
| 681 |
+
698,
|
| 682 |
+
890,
|
| 683 |
+
834
|
| 684 |
+
],
|
| 685 |
+
"page_idx": 3
|
| 686 |
+
},
|
| 687 |
+
{
|
| 688 |
+
"type": "text",
|
| 689 |
+
"text": "3.6. Sample Selector",
|
| 690 |
+
"text_level": 1,
|
| 691 |
+
"bbox": [
|
| 692 |
+
500,
|
| 693 |
+
845,
|
| 694 |
+
660,
|
| 695 |
+
861
|
| 696 |
+
],
|
| 697 |
+
"page_idx": 3
|
| 698 |
+
},
|
| 699 |
+
{
|
| 700 |
+
"type": "text",
|
| 701 |
+
"text": "Given the predicted relations of the influencer and the reactor, we predict $N$ samples with confidence scores (or",
|
| 702 |
+
"bbox": [
|
| 703 |
+
500,
|
| 704 |
+
869,
|
| 705 |
+
890,
|
| 706 |
+
900
|
| 707 |
+
],
|
| 708 |
+
"page_idx": 3
|
| 709 |
+
},
|
| 710 |
+
{
|
| 711 |
+
"type": "text",
|
| 712 |
+
"text": "probabilities) for the influencer using the marginal predictor, and for each influencer sample, we predict $N$ samples for the reactor using the conditional predictor. The number of joint samples is thus $N^2$ , and the probability of each joint sample is a product of the marginal probability and the conditional probability. We further reduce the size of the joint samples to $K$ as evaluating each prediction sample for downstream tasks such as risk assessment can be expensive [41]. In M2I, we select the $K$ samples from $N^2$ candidates with the highest joint likelihoods.",
|
| 713 |
+
"bbox": [
|
| 714 |
+
75,
|
| 715 |
+
90,
|
| 716 |
+
472,
|
| 717 |
+
243
|
| 718 |
+
],
|
| 719 |
+
"page_idx": 4
|
| 720 |
+
},
|
| 721 |
+
{
|
| 722 |
+
"type": "text",
|
| 723 |
+
"text": "3.7. Inference",
|
| 724 |
+
"text_level": 1,
|
| 725 |
+
"bbox": [
|
| 726 |
+
76,
|
| 727 |
+
253,
|
| 728 |
+
187,
|
| 729 |
+
268
|
| 730 |
+
],
|
| 731 |
+
"page_idx": 4
|
| 732 |
+
},
|
| 733 |
+
{
|
| 734 |
+
"type": "text",
|
| 735 |
+
"text": "At inference time, we generate the joint predictions following the procedure illustrated in Fig. 2. First, we call the relation predictor and choose the interaction relation with the highest probability. Second, for the predicted influencer, we generate $N$ trajectory samples using the marginal predictor. Third, for each influencer sample, we generate $N$ samples for the predicted reactor using the conditional predictor. Fourth, we use the sample selector to select $K$ representative samples from $N^2$ candidates. In the case where the predicted relation is none, we use the marginal predictor for both agents to obtain $N^2$ trajectory pairs, and follow the same sample selection step.",
|
| 736 |
+
"bbox": [
|
| 737 |
+
75,
|
| 738 |
+
276,
|
| 739 |
+
472,
|
| 740 |
+
458
|
| 741 |
+
],
|
| 742 |
+
"page_idx": 4
|
| 743 |
+
},
|
| 744 |
+
{
|
| 745 |
+
"type": "text",
|
| 746 |
+
"text": "4. Experiments",
|
| 747 |
+
"text_level": 1,
|
| 748 |
+
"bbox": [
|
| 749 |
+
76,
|
| 750 |
+
473,
|
| 751 |
+
209,
|
| 752 |
+
489
|
| 753 |
+
],
|
| 754 |
+
"page_idx": 4
|
| 755 |
+
},
|
| 756 |
+
{
|
| 757 |
+
"type": "text",
|
| 758 |
+
"text": "In this section, we introduce the dataset benchmark and details of the model, followed by a series of experiments to demonstrate the effectiveness of M2I.",
|
| 759 |
+
"bbox": [
|
| 760 |
+
75,
|
| 761 |
+
500,
|
| 762 |
+
470,
|
| 763 |
+
544
|
| 764 |
+
],
|
| 765 |
+
"page_idx": 4
|
| 766 |
+
},
|
| 767 |
+
{
|
| 768 |
+
"type": "text",
|
| 769 |
+
"text": "4.1. Dataset",
|
| 770 |
+
"text_level": 1,
|
| 771 |
+
"bbox": [
|
| 772 |
+
76,
|
| 773 |
+
555,
|
| 774 |
+
171,
|
| 775 |
+
569
|
| 776 |
+
],
|
| 777 |
+
"page_idx": 4
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"type": "text",
|
| 781 |
+
"text": "We train and validate M2I in the Waymo Open Motion Dataset (WOMD), a large-scale driving dataset collected from realistic traffic scenarios. We focus on the interactive prediction task to predict the joint future trajectories of two interacting agents for the next 8 seconds with 80 time steps, given the observations, including 1.1 seconds of agent states with 11 time steps that may include missing observations and the map state. The dataset includes 204,166 scenarios in the training set and 43,479 examples in the validation set. The dataset provides labels on which agents are likely to interact, yet it does not specify how they interact. During training, we pre-label the interaction type (yield, pass, or none) of the interacting agents according to Sec. 3.3.",
|
| 782 |
+
"bbox": [
|
| 783 |
+
75,
|
| 784 |
+
579,
|
| 785 |
+
472,
|
| 786 |
+
776
|
| 787 |
+
],
|
| 788 |
+
"page_idx": 4
|
| 789 |
+
},
|
| 790 |
+
{
|
| 791 |
+
"type": "text",
|
| 792 |
+
"text": "4.2. Metrics",
|
| 793 |
+
"text_level": 1,
|
| 794 |
+
"bbox": [
|
| 795 |
+
76,
|
| 796 |
+
786,
|
| 797 |
+
173,
|
| 798 |
+
800
|
| 799 |
+
],
|
| 800 |
+
"page_idx": 4
|
| 801 |
+
},
|
| 802 |
+
{
|
| 803 |
+
"type": "text",
|
| 804 |
+
"text": "We follow the WOMD benchmark by using the following metrics: minADE measures the average displacement error between the ground truth future joint trajectory and the closest predicted sample out of $K = 6$ joint samples. This metric is widely adopted since [16] to measure the prediction error against a multi-modal distribution. minFDE",
|
| 805 |
+
"bbox": [
|
| 806 |
+
75,
|
| 807 |
+
809,
|
| 808 |
+
470,
|
| 809 |
+
902
|
| 810 |
+
],
|
| 811 |
+
"page_idx": 4
|
| 812 |
+
},
|
| 813 |
+
{
|
| 814 |
+
"type": "text",
|
| 815 |
+
"text": "measures the final displacement error between the ground truth end positions in the joint trajectory and the closest predicted end positions from $K$ joint samples. Miss rate (MR) measures the percentage of none of the $K$ joint prediction samples are within a given lateral and longitudinal threshold of the ground truth trajectory. The threshold depends on the initial velocity of the predicted agents. More details are described in [10]. Overlap rate (OR) measures the level of scene compliance as the percentage of the predicted trajectory of any agent overlapping with the predicted trajectories of other agents. This metric only considers the most likely joint prediction sample. A lower overlap rate indicates the predictions are more scene compliant. In this paper, we slightly modify the metric definition compared to the original version of WOMD, which considers the overlapping among all objects including the ones not predicted, so that we can measure directly the overlapping between predicted agents. Mean average precision (mAP) measures the area under the precision-recall curve of the prediction samples given their confidence scores. Compared to minADE/minFDE metrics that are only measured against the best sample regardless of its score, mAP measures the quality of confidence score and penalizes false positive predictions [10]. It is the official ranking metric used by WOMD benchmark and we refer to [10] for the implementation.",
|
| 816 |
+
"bbox": [
|
| 817 |
+
496,
|
| 818 |
+
90,
|
| 819 |
+
893,
|
| 820 |
+
470
|
| 821 |
+
],
|
| 822 |
+
"page_idx": 4
|
| 823 |
+
},
|
| 824 |
+
{
|
| 825 |
+
"type": "text",
|
| 826 |
+
"text": "4.3. Model Details",
|
| 827 |
+
"text_level": 1,
|
| 828 |
+
"bbox": [
|
| 829 |
+
500,
|
| 830 |
+
479,
|
| 831 |
+
643,
|
| 832 |
+
494
|
| 833 |
+
],
|
| 834 |
+
"page_idx": 4
|
| 835 |
+
},
|
| 836 |
+
{
|
| 837 |
+
"type": "text",
|
| 838 |
+
"text": "We present the detailed implementation of our model and training procedure in the following sections.",
|
| 839 |
+
"bbox": [
|
| 840 |
+
500,
|
| 841 |
+
503,
|
| 842 |
+
890,
|
| 843 |
+
535
|
| 844 |
+
],
|
| 845 |
+
"page_idx": 4
|
| 846 |
+
},
|
| 847 |
+
{
|
| 848 |
+
"type": "text",
|
| 849 |
+
"text": "4.3.1 Context Encoder",
|
| 850 |
+
"text_level": 1,
|
| 851 |
+
"bbox": [
|
| 852 |
+
500,
|
| 853 |
+
550,
|
| 854 |
+
671,
|
| 855 |
+
564
|
| 856 |
+
],
|
| 857 |
+
"page_idx": 4
|
| 858 |
+
},
|
| 859 |
+
{
|
| 860 |
+
"type": "text",
|
| 861 |
+
"text": "The context encoder leverages both vectorized and rasterized representations to encode traffic context. Vectorized representation takes the traffic context, including observed agent states and map states, as vectors. It is efficient at covering a large spatial space. Rasterized representation draws traffic context on a single image with multiple channels and excels at capturing geometrical information. Both representations have achieved top performance in trajectory prediction benchmarks such as Argoverse and WOMD [6, 10, 12, 14, 15].",
|
| 862 |
+
"bbox": [
|
| 863 |
+
496,
|
| 864 |
+
566,
|
| 865 |
+
890,
|
| 866 |
+
718
|
| 867 |
+
],
|
| 868 |
+
"page_idx": 4
|
| 869 |
+
},
|
| 870 |
+
{
|
| 871 |
+
"type": "text",
|
| 872 |
+
"text": "In M2I, we use the best of both worlds. First, we leverage a vector encoder based on VectorNet [12] that takes observed agent trajectories and lane segments as a set of polylines. Each polyline is a set of vectors that connect neighboring points together. For each polyline, the vector encoder runs an MLP to encode the feature of vectors within the polyline and a graph neural network to encode their dependencies followed by a max-pooling layer to summarize the feature of all the vectors. The polyline features, including agent polyline features and map polyline features, are processed by cross attention to obtain the final agent feature that includes information on the map and nearby agents. We",
|
| 873 |
+
"bbox": [
|
| 874 |
+
496,
|
| 875 |
+
719,
|
| 876 |
+
893,
|
| 877 |
+
901
|
| 878 |
+
],
|
| 879 |
+
"page_idx": 4
|
| 880 |
+
},
|
| 881 |
+
{
|
| 882 |
+
"type": "text",
|
| 883 |
+
"text": "refer to [12] for detailed implementations.",
|
| 884 |
+
"bbox": [
|
| 885 |
+
76,
|
| 886 |
+
90,
|
| 887 |
+
354,
|
| 888 |
+
104
|
| 889 |
+
],
|
| 890 |
+
"page_idx": 5
|
| 891 |
+
},
|
| 892 |
+
{
|
| 893 |
+
"type": "text",
|
| 894 |
+
"text": "In addition to encoding the vectorized feature, we utilize a second encoder to learn features from a rasterized representation. Following [14], we first rasterize the input states into an image with 60 channels, including the position of the agents at each past time frame with the map information. The size of the image is $224 \\times 224$ and each pixel represents an area of $1\\mathrm{m} \\times 1\\mathrm{m}$ . We run a pre-trained VGG16 [36] model as the encoder to obtain the rasterized feature. The output of the context encoder is a concatenation of the vectorized feature and the rasterized feature.",
|
| 895 |
+
"bbox": [
|
| 896 |
+
75,
|
| 897 |
+
106,
|
| 898 |
+
467,
|
| 899 |
+
255
|
| 900 |
+
],
|
| 901 |
+
"page_idx": 5
|
| 902 |
+
},
|
| 903 |
+
{
|
| 904 |
+
"type": "text",
|
| 905 |
+
"text": "Conditional Context Encoder The context encoder in the conditional trajectory predictor processes the additional influencer future trajectory in the following ways. First, the future trajectory is added to the vectorized representation as an extra vector when running VectorNet. In parallel, we create extra 80 channels on the rasterized representation and draw the $(x,y)$ positions over 80 time steps in the next 8 seconds. We run the pre-trained VGG16 model to encode the augmented image, and combine the output feature with the vectorized feature as the final output.",
|
| 906 |
+
"bbox": [
|
| 907 |
+
75,
|
| 908 |
+
257,
|
| 909 |
+
467,
|
| 910 |
+
407
|
| 911 |
+
],
|
| 912 |
+
"page_idx": 5
|
| 913 |
+
},
|
| 914 |
+
{
|
| 915 |
+
"type": "text",
|
| 916 |
+
"text": "4.3.2 Relation Prediction Head",
|
| 917 |
+
"text_level": 1,
|
| 918 |
+
"bbox": [
|
| 919 |
+
76,
|
| 920 |
+
421,
|
| 921 |
+
307,
|
| 922 |
+
434
|
| 923 |
+
],
|
| 924 |
+
"page_idx": 5
|
| 925 |
+
},
|
| 926 |
+
{
|
| 927 |
+
"type": "text",
|
| 928 |
+
"text": "The relation prediction head has one layer of MLP with one fully connected layer for classification. The MLP has a hidden size of 128, followed by a layer normalization layer and a ReLU activation layer. The output is the logits over three types of relations, as described in Sec. 3.3.",
|
| 929 |
+
"bbox": [
|
| 930 |
+
75,
|
| 931 |
+
441,
|
| 932 |
+
467,
|
| 933 |
+
516
|
| 934 |
+
],
|
| 935 |
+
"page_idx": 5
|
| 936 |
+
},
|
| 937 |
+
{
|
| 938 |
+
"type": "text",
|
| 939 |
+
"text": "4.3.3 Trajectory Prediction Head",
|
| 940 |
+
"text_level": 1,
|
| 941 |
+
"bbox": [
|
| 942 |
+
76,
|
| 943 |
+
530,
|
| 944 |
+
321,
|
| 945 |
+
544
|
| 946 |
+
],
|
| 947 |
+
"page_idx": 5
|
| 948 |
+
},
|
| 949 |
+
{
|
| 950 |
+
"type": "text",
|
| 951 |
+
"text": "The trajectory prediction head adopts DenseTNT [15] to generate multi-modal future predictions for its outstanding performance in the marginal prediction benchmarks. It first predicts the distribution of the agent goals as a heatmap, through a lane scoring module that identifies likely lanes to follow, a feature encoding module that uses the attention mechanism to extract features between goals and lanes, and a probability estimation module that predicts the likelihood of goals. Next, the prediction head regresses the full trajectory over the prediction horizon conditioned on the goal. The prediction head can be combined with the context encoder and trained end-to-end.",
|
| 952 |
+
"bbox": [
|
| 953 |
+
75,
|
| 954 |
+
550,
|
| 955 |
+
467,
|
| 956 |
+
729
|
| 957 |
+
],
|
| 958 |
+
"page_idx": 5
|
| 959 |
+
},
|
| 960 |
+
{
|
| 961 |
+
"type": "text",
|
| 962 |
+
"text": "4.3.4 Training Details",
|
| 963 |
+
"text_level": 1,
|
| 964 |
+
"bbox": [
|
| 965 |
+
76,
|
| 966 |
+
744,
|
| 967 |
+
243,
|
| 968 |
+
758
|
| 969 |
+
],
|
| 970 |
+
"page_idx": 5
|
| 971 |
+
},
|
| 972 |
+
{
|
| 973 |
+
"type": "text",
|
| 974 |
+
"text": "At training time, we train each model, including the relation predictor, marginal predictor, and conditional predictor, separately. Each model is trained on the training set from WOMD with a batch size of 64 for 30 epochs on 8 Nvidia RTX 3080 GPUs. The data is batched randomly. We use an Adam optimizer and a learning rate scheduler that decays the learning rate by $30\\%$ every 5 epochs, with an initial value of 1e-3. The hidden size in the model is 128, if not specified. We observe consistent performance",
|
| 975 |
+
"bbox": [
|
| 976 |
+
75,
|
| 977 |
+
763,
|
| 978 |
+
467,
|
| 979 |
+
900
|
| 980 |
+
],
|
| 981 |
+
"page_idx": 5
|
| 982 |
+
},
|
| 983 |
+
{
|
| 984 |
+
"type": "text",
|
| 985 |
+
"text": "over different learning rates and batch sizes. When training the conditional predictor, we use the teacher forcing technique by providing the ground truth future trajectory of the influencer agent.",
|
| 986 |
+
"bbox": [
|
| 987 |
+
496,
|
| 988 |
+
90,
|
| 989 |
+
890,
|
| 990 |
+
151
|
| 991 |
+
],
|
| 992 |
+
"page_idx": 5
|
| 993 |
+
},
|
| 994 |
+
{
|
| 995 |
+
"type": "text",
|
| 996 |
+
"text": "4.4. Quantitative Results",
|
| 997 |
+
"text_level": 1,
|
| 998 |
+
"bbox": [
|
| 999 |
+
500,
|
| 1000 |
+
155,
|
| 1001 |
+
692,
|
| 1002 |
+
171
|
| 1003 |
+
],
|
| 1004 |
+
"page_idx": 5
|
| 1005 |
+
},
|
| 1006 |
+
{
|
| 1007 |
+
"type": "text",
|
| 1008 |
+
"text": "In Tab. 1, we compare our model with the following baselines, including the top ranked published models on the WOMD interaction prediction challenge leaderboard [1]: Waymo LSTM Baseline [10] is the official baseline provided by the benchmark. It leverages an LSTM encoder to encode observed agent trajectories, and an MLP-based prediction head to generate multiple samples. Waymo Full Baseline [10] is an extended version of the Waymo LSTM Baseline, by leveraging a set of auxiliary encoders to encode context information. SceneTransformer [32] is a transformer-based model that leverages attention to combine features across road graphs and agent interactions both spatially and temporally. The model achieves state-of-the-art performance in the WOMD benchmark in both the marginal prediction task and the interactive prediction task. HeatIRm4 [30] models the agent interaction as a directed edge feature graph and leverages an attention network to extract interaction features. It was the winner of the 2021 WOMD challenge. $\\mathbf{AIR}^2$ [42] adopts a marginal anchorbased model using a raster representation. The model generates joint predictions by combining marginal predictions from each agent. It achieved the top performance at the WOMD challenge. Baseline Marginal is our baseline model that leverages the same marginal predictor as M2I to generate $N$ marginal prediction samples for both agents, without considering their future interactions. When combining the marginal predictions into joint predictions, we take the top $K$ marginal pairs out of $N^2$ options given their joint probabilities as the product of marginal probabilities. This is a common practice to combine marginal predictions into joint predictions, as in [4, 10]. Baseline Joint is our baseline model that jointly predicts the goals and trajectories for both interacting agents, using the same context encoder and the trajectory prediction head as in M2I. As the joint goal space grows exponentially with the number of agents, we can only afford a small number of goal candidates for each agent. To ease the computational complexity, we leverage a marginal predictor to predict the top 80 goals for each agent and obtain $80\\times 80$ goal pairs for joint goal and trajectory prediction. As a result, this baseline tradeoffs prediction accuracy with computational feasibility by using a reduced set of goals.",
|
| 1009 |
+
"bbox": [
|
| 1010 |
+
496,
|
| 1011 |
+
175,
|
| 1012 |
+
890,
|
| 1013 |
+
810
|
| 1014 |
+
],
|
| 1015 |
+
"page_idx": 5
|
| 1016 |
+
},
|
| 1017 |
+
{
|
| 1018 |
+
"type": "text",
|
| 1019 |
+
"text": "4.4.1 Validation Set",
|
| 1020 |
+
"text_level": 1,
|
| 1021 |
+
"bbox": [
|
| 1022 |
+
500,
|
| 1023 |
+
820,
|
| 1024 |
+
651,
|
| 1025 |
+
833
|
| 1026 |
+
],
|
| 1027 |
+
"page_idx": 5
|
| 1028 |
+
},
|
| 1029 |
+
{
|
| 1030 |
+
"type": "text",
|
| 1031 |
+
"text": "We present the results in the interactive validation set in the top half of Tab. 1, where the baseline results are reported as in [10,32]. Our model M2I outperforms both Waymo baselines in terms of all metrics. Compared to the current state-",
|
| 1032 |
+
"bbox": [
|
| 1033 |
+
496,
|
| 1034 |
+
839,
|
| 1035 |
+
890,
|
| 1036 |
+
900
|
| 1037 |
+
],
|
| 1038 |
+
"page_idx": 5
|
| 1039 |
+
},
|
| 1040 |
+
{
|
| 1041 |
+
"type": "table",
|
| 1042 |
+
"img_path": "images/a36c54a4c4f6f1edf1d7893d2c8d35fc3c6eda3c7c3394cc3e2deb9a472880c4.jpg",
|
| 1043 |
+
"table_caption": [],
|
| 1044 |
+
"table_footnote": [],
|
| 1045 |
+
"table_body": "<table><tr><td rowspan=\"2\">Set</td><td rowspan=\"2\">Model</td><td colspan=\"3\">Vehicle (8s)</td><td colspan=\"3\">Pedestrian (8s)</td><td colspan=\"3\">Cyclist (8s)</td><td>All (8s)</td></tr><tr><td>mFDE ↓</td><td>MR ↓</td><td>mAP ↑</td><td>mFDE ↓</td><td>MR ↓</td><td>mAP ↑</td><td>mFDE ↓</td><td>MR ↓</td><td>mAP ↑</td><td>mAP ↑</td></tr><tr><td rowspan=\"6\">Val.</td><td>Waymo LSTM Baseline [10]</td><td>-</td><td>0.88</td><td>0.01</td><td>-</td><td>0.93</td><td>0.02</td><td>-</td><td>0.98</td><td>0.00</td><td>0.01</td></tr><tr><td>Waymo Full Baseline [10]</td><td>6.07</td><td>0.66</td><td>0.08</td><td>4.20</td><td>1.00</td><td>0.00</td><td>6.46</td><td>0.83</td><td>0.01</td><td>0.03</td></tr><tr><td>SceneTransformer [32]</td><td>3.99</td><td>0.49</td><td>0.11</td><td>3.15</td><td>0.62</td><td>0.06</td><td>4.69</td><td>0.71</td><td>0.04</td><td>0.07</td></tr><tr><td>Baseline Marginal</td><td>6.26</td><td>0.60</td><td>0.16</td><td>3.59</td><td>0.63</td><td>0.04</td><td>6.47</td><td>0.76</td><td>0.03</td><td>0.07</td></tr><tr><td>Baseline Joint</td><td>11.31</td><td>0.64</td><td>0.14</td><td>3.44</td><td>0.93</td><td>0.01</td><td>7.16</td><td>0.82</td><td>0.01</td><td>0.05</td></tr><tr><td>M2I</td><td>5.49</td><td>0.55</td><td>0.18</td><td>3.61</td><td>0.60</td><td>0.06</td><td>6.26</td><td>0.73</td><td>0.04</td><td>0.09</td></tr><tr><td rowspan=\"5\">Test</td><td>Waymo LSTM Baseline [10]</td><td>12.40</td><td>0.87</td><td>0.01</td><td>6.85</td><td>0.92</td><td>0.00</td><td>10.84</td><td>0.97</td><td>0.00</td><td>0.00</td></tr><tr><td>HeatIRm4 [30]</td><td>7.20</td><td>0.80</td><td>0.07</td><td>4.06</td><td>0.80</td><td>0.05</td><td>6.69</td><td>0.85</td><td>0.01</td><td>0.04</td></tr><tr><td>AIR² [42]</td><td>5.00</td><td>0.64</td><td>0.10</td><td>3.68</td><td>0.71</td><td>0.04</td><td>5.47</td><td>0.81</td><td>0.04</td><td>0.05</td></tr><tr><td>SceneTransformer [32]</td><td>4.08</td><td>0.50</td><td>0.10</td><td>3.19</td><td>0.62</td><td>0.05</td><td>4.65</td><td>0.70</td><td>0.04</td><td>0.06</td></tr><tr><td>M2I</td><td>5.65</td><td>0.57</td><td>0.16</td><td>3.73</td><td>0.60</td><td>0.06</td><td>6.16</td><td>0.74</td><td>0.03</td><td>0.08</td></tr></table>",
|
| 1046 |
+
"bbox": [
|
| 1047 |
+
88,
|
| 1048 |
+
88,
|
| 1049 |
+
883,
|
| 1050 |
+
277
|
| 1051 |
+
],
|
| 1052 |
+
"page_idx": 6
|
| 1053 |
+
},
|
| 1054 |
+
{
|
| 1055 |
+
"type": "text",
|
| 1056 |
+
"text": "of-the-art model SceneTransformer, M2I achieves a better $mAP$ , the official ranking metric, over vehicles, and a better miss rate over pedestrians. Although M2I has higher minFDE errors, it has improved the mAP over all agents (the most right column) by a large margin, meaning our model generates a more accurate distribution using its predicted confidence scores and outputs fewer false positive predictions. In addition, as our proposed approach does not assume a specific prediction model, it could leverage SceneTransformer as the context encoder to achieve better minFDE, and we defer it as future work. When compared with our own baselines that share the same context encoder and prediction head, M2I outperforms the marginal predictor, which assumes independence between two agents, and a joint predictor, which only affords a small set of goal candidates due to computational constraints.",
|
| 1057 |
+
"bbox": [
|
| 1058 |
+
75,
|
| 1059 |
+
356,
|
| 1060 |
+
472,
|
| 1061 |
+
598
|
| 1062 |
+
],
|
| 1063 |
+
"page_idx": 6
|
| 1064 |
+
},
|
| 1065 |
+
{
|
| 1066 |
+
"type": "text",
|
| 1067 |
+
"text": "4.4.2 Testing Set",
|
| 1068 |
+
"text_level": 1,
|
| 1069 |
+
"bbox": [
|
| 1070 |
+
76,
|
| 1071 |
+
604,
|
| 1072 |
+
209,
|
| 1073 |
+
619
|
| 1074 |
+
],
|
| 1075 |
+
"page_idx": 6
|
| 1076 |
+
},
|
| 1077 |
+
{
|
| 1078 |
+
"type": "text",
|
| 1079 |
+
"text": "We show the results in the interactive test set in the bottom half of Tab. 1. For a fair comparison, we use the numbers reported on the official benchmark website [1] and only include the published models. Similar to the observations from the validation set, we observe that M2I improves mAP metrics by a large margin, compared to past WOMD interaction prediction challenge winners [30,42] and the existing state-of-the-art model [32].",
|
| 1080 |
+
"bbox": [
|
| 1081 |
+
75,
|
| 1082 |
+
625,
|
| 1083 |
+
468,
|
| 1084 |
+
744
|
| 1085 |
+
],
|
| 1086 |
+
"page_idx": 6
|
| 1087 |
+
},
|
| 1088 |
+
{
|
| 1089 |
+
"type": "text",
|
| 1090 |
+
"text": "4.5. Ablation Study",
|
| 1091 |
+
"text_level": 1,
|
| 1092 |
+
"bbox": [
|
| 1093 |
+
76,
|
| 1094 |
+
755,
|
| 1095 |
+
230,
|
| 1096 |
+
771
|
| 1097 |
+
],
|
| 1098 |
+
"page_idx": 6
|
| 1099 |
+
},
|
| 1100 |
+
{
|
| 1101 |
+
"type": "text",
|
| 1102 |
+
"text": "We present ablation studies on the relation predictor, conditional predictor, and generalization to other predictors.",
|
| 1103 |
+
"bbox": [
|
| 1104 |
+
76,
|
| 1105 |
+
775,
|
| 1106 |
+
468,
|
| 1107 |
+
806
|
| 1108 |
+
],
|
| 1109 |
+
"page_idx": 6
|
| 1110 |
+
},
|
| 1111 |
+
{
|
| 1112 |
+
"type": "text",
|
| 1113 |
+
"text": "4.5.1 Relation Prediction",
|
| 1114 |
+
"text_level": 1,
|
| 1115 |
+
"bbox": [
|
| 1116 |
+
76,
|
| 1117 |
+
819,
|
| 1118 |
+
266,
|
| 1119 |
+
833
|
| 1120 |
+
],
|
| 1121 |
+
"page_idx": 6
|
| 1122 |
+
},
|
| 1123 |
+
{
|
| 1124 |
+
"type": "text",
|
| 1125 |
+
"text": "We measure the performance of our relation predictor on the validation dataset and observe an accuracy of $90.09\\%$ . We verify the significance of an accurate relation predictor by comparing the performance of vehicle trajectory predictions",
|
| 1126 |
+
"bbox": [
|
| 1127 |
+
76,
|
| 1128 |
+
839,
|
| 1129 |
+
468,
|
| 1130 |
+
901
|
| 1131 |
+
],
|
| 1132 |
+
"page_idx": 6
|
| 1133 |
+
},
|
| 1134 |
+
{
|
| 1135 |
+
"type": "table",
|
| 1136 |
+
"img_path": "images/f5ebbcb926cfd74473356078dbc74ce36898dacbd64cf41df4111db49a8d2b2d.jpg",
|
| 1137 |
+
"table_caption": [
|
| 1138 |
+
"Table 1. Joint metrics on the interactive validation and test set. The best performed metrics are bolded and the grey cells indicate the ranking metric used by the WOMD benchmark. M2I outperforms both Waymo baselines and challenge winners. Compared to the current state-of-the-art model SceneTransformer, it improves the mAP metric by a large margin over vehicles and all agents, demonstrating its advantage in learning a more accurate probability distribution and producing fewer false positive predictions."
|
| 1139 |
+
],
|
| 1140 |
+
"table_footnote": [],
|
| 1141 |
+
"table_body": "<table><tr><td>Model</td><td>minADE ↓</td><td>minFDE ↓</td><td>MR ↓</td><td>mAP ↑</td></tr><tr><td>M2I Marginal</td><td>1.70</td><td>3.45</td><td>0.23</td><td>0.30</td></tr><tr><td>M2I Conditional GT</td><td>1.46</td><td>2.43</td><td>0.12</td><td>0.41</td></tr><tr><td>M2I Conditional P1</td><td>1.75</td><td>3.49</td><td>0.25</td><td>0.26</td></tr></table>",
|
| 1142 |
+
"bbox": [
|
| 1143 |
+
500,
|
| 1144 |
+
356,
|
| 1145 |
+
890,
|
| 1146 |
+
425
|
| 1147 |
+
],
|
| 1148 |
+
"page_idx": 6
|
| 1149 |
+
},
|
| 1150 |
+
{
|
| 1151 |
+
"type": "text",
|
| 1152 |
+
"text": "Table 2. Comparison between the marginal predictor and the conditional predictor over marginal metrics for vehicle reactors at 8s.",
|
| 1153 |
+
"bbox": [
|
| 1154 |
+
498,
|
| 1155 |
+
434,
|
| 1156 |
+
890,
|
| 1157 |
+
464
|
| 1158 |
+
],
|
| 1159 |
+
"page_idx": 6
|
| 1160 |
+
},
|
| 1161 |
+
{
|
| 1162 |
+
"type": "text",
|
| 1163 |
+
"text": "using the predicted relations and using the ground truth relations, and observe a gap of $3.05\\%$ in terms of mAP at 8s.",
|
| 1164 |
+
"bbox": [
|
| 1165 |
+
498,
|
| 1166 |
+
474,
|
| 1167 |
+
890,
|
| 1168 |
+
506
|
| 1169 |
+
],
|
| 1170 |
+
"page_idx": 6
|
| 1171 |
+
},
|
| 1172 |
+
{
|
| 1173 |
+
"type": "text",
|
| 1174 |
+
"text": "4.5.2 Conditional Prediction",
|
| 1175 |
+
"text_level": 1,
|
| 1176 |
+
"bbox": [
|
| 1177 |
+
498,
|
| 1178 |
+
517,
|
| 1179 |
+
714,
|
| 1180 |
+
531
|
| 1181 |
+
],
|
| 1182 |
+
"page_idx": 6
|
| 1183 |
+
},
|
| 1184 |
+
{
|
| 1185 |
+
"type": "text",
|
| 1186 |
+
"text": "We validate the effectiveness of our conditional predictor by comparing its performance against the marginal predictor (M2IMarginal) for vehicle reactor trajectory prediction. The results are summarized in Tab. 2. When the conditional predictor takes the ground truth future trajectory of the influencer agent (c.f. M2I Conditional GT), it generates predictions for the reactor agent with better performance across all metrics. This validates our hypothesis on the dependence between the influencer trajectory and the reactor trajectory. As the ground truth trajectories are not available at inference time, we present the prediction results when the conditional predictor takes the best predicted influencer trajectory as M2I Conditional P1. It is not surprising to see that the performance is inferior to the marginal predictor results, due to errors in influencer prediction. However, as we show in Tab. 1, our model is able to outperform the marginal baseline model by including more than one sample from the influencer and selecting the most likely joint samples.",
|
| 1187 |
+
"bbox": [
|
| 1188 |
+
496,
|
| 1189 |
+
537,
|
| 1190 |
+
890,
|
| 1191 |
+
811
|
| 1192 |
+
],
|
| 1193 |
+
"page_idx": 6
|
| 1194 |
+
},
|
| 1195 |
+
{
|
| 1196 |
+
"type": "text",
|
| 1197 |
+
"text": "4.5.3 Generalizing to Other Predictors",
|
| 1198 |
+
"text_level": 1,
|
| 1199 |
+
"bbox": [
|
| 1200 |
+
498,
|
| 1201 |
+
819,
|
| 1202 |
+
784,
|
| 1203 |
+
835
|
| 1204 |
+
],
|
| 1205 |
+
"page_idx": 6
|
| 1206 |
+
},
|
| 1207 |
+
{
|
| 1208 |
+
"type": "text",
|
| 1209 |
+
"text": "We demonstrate that our proposed approach can be extended to other existing predictor models to validate its generalizability. In this experiment, we replace the context encoder with VectorNet [12] and the prediction head with",
|
| 1210 |
+
"bbox": [
|
| 1211 |
+
496,
|
| 1212 |
+
839,
|
| 1213 |
+
890,
|
| 1214 |
+
901
|
| 1215 |
+
],
|
| 1216 |
+
"page_idx": 6
|
| 1217 |
+
},
|
| 1218 |
+
{
|
| 1219 |
+
"type": "image",
|
| 1220 |
+
"img_path": "images/0f2a1b716a2e7bdc6bae55ae634d2a8fc3ec97755510dfa7154794537c595e6c.jpg",
|
| 1221 |
+
"image_caption": [
|
| 1222 |
+
"Figure 4. Example prediction using Baseline Marginal (left) and M2I (right). The marginal predictor produces overlapping and inaccurate predictions. M2I successfully identifies the influencer and reactor (the predicted relation type is annotated next to the current position of each agent) in a challenging interactive scene and achieves better prediction accuracy and scene compliance."
|
| 1223 |
+
],
|
| 1224 |
+
"image_footnote": [],
|
| 1225 |
+
"bbox": [
|
| 1226 |
+
148,
|
| 1227 |
+
95,
|
| 1228 |
+
488,
|
| 1229 |
+
281
|
| 1230 |
+
],
|
| 1231 |
+
"page_idx": 7
|
| 1232 |
+
},
|
| 1233 |
+
{
|
| 1234 |
+
"type": "image",
|
| 1235 |
+
"img_path": "images/d094c14edfe5400d71daf66f05e905a91a40051bb171769aa1761ce9d7447a0a.jpg",
|
| 1236 |
+
"image_caption": [],
|
| 1237 |
+
"image_footnote": [],
|
| 1238 |
+
"bbox": [
|
| 1239 |
+
501,
|
| 1240 |
+
95,
|
| 1241 |
+
843,
|
| 1242 |
+
281
|
| 1243 |
+
],
|
| 1244 |
+
"page_idx": 7
|
| 1245 |
+
},
|
| 1246 |
+
{
|
| 1247 |
+
"type": "table",
|
| 1248 |
+
"img_path": "images/fd91fb40026c8297481a5c8fa6300508a5ea9fb30ee4c1f79163a532e0cef1ef.jpg",
|
| 1249 |
+
"table_caption": [],
|
| 1250 |
+
"table_footnote": [],
|
| 1251 |
+
"table_body": "<table><tr><td>Model</td><td>minADE ↓</td><td>minFDE ↓</td><td>OR ↓</td><td>mAP ↑</td></tr><tr><td>TNT Marginal</td><td>3.43</td><td>8.72</td><td>0.42</td><td>0.10</td></tr><tr><td>TNT Joint</td><td>5.30</td><td>14.07</td><td>0.34</td><td>0.13</td></tr><tr><td>TNT M2I</td><td>3.38</td><td>8.46</td><td>0.20</td><td>0.14</td></tr></table>",
|
| 1252 |
+
"bbox": [
|
| 1253 |
+
94,
|
| 1254 |
+
349,
|
| 1255 |
+
452,
|
| 1256 |
+
417
|
| 1257 |
+
],
|
| 1258 |
+
"page_idx": 7
|
| 1259 |
+
},
|
| 1260 |
+
{
|
| 1261 |
+
"type": "text",
|
| 1262 |
+
"text": "Table 3. Joint metrics on the interactive validation set for vehicles at 8s. We replace the context encoder and the prediction head in M2I and baselines with a different model. We observe a similar trend in performance improvement, especially over OR and mAP, which validates the generalizability of our proposed approach.",
|
| 1263 |
+
"bbox": [
|
| 1264 |
+
75,
|
| 1265 |
+
428,
|
| 1266 |
+
470,
|
| 1267 |
+
500
|
| 1268 |
+
],
|
| 1269 |
+
"page_idx": 7
|
| 1270 |
+
},
|
| 1271 |
+
{
|
| 1272 |
+
"type": "text",
|
| 1273 |
+
"text": "TNT [46], which is an anchor-based goal-conditioned prediction model, and obtain a variant of M2I named TNT M2I. We compare this variant with a marginal predictor baseline (TNT Marginal) and a joint predictor baseline (TNT Joint) using the same VectorNet and TNT backbones.",
|
| 1274 |
+
"bbox": [
|
| 1275 |
+
75,
|
| 1276 |
+
510,
|
| 1277 |
+
468,
|
| 1278 |
+
585
|
| 1279 |
+
],
|
| 1280 |
+
"page_idx": 7
|
| 1281 |
+
},
|
| 1282 |
+
{
|
| 1283 |
+
"type": "text",
|
| 1284 |
+
"text": "The results, summarized in Tab. 3, show that our approach consistently improves all metrics, especially OR and mAP, by a large margin when using a different predictor model. The improvements indicate that our proposed approach generalizes to other predictors and generates scene compliant and accurate future trajectories.",
|
| 1285 |
+
"bbox": [
|
| 1286 |
+
75,
|
| 1287 |
+
585,
|
| 1288 |
+
470,
|
| 1289 |
+
676
|
| 1290 |
+
],
|
| 1291 |
+
"page_idx": 7
|
| 1292 |
+
},
|
| 1293 |
+
{
|
| 1294 |
+
"type": "text",
|
| 1295 |
+
"text": "4.6. Qualitative Results",
|
| 1296 |
+
"text_level": 1,
|
| 1297 |
+
"bbox": [
|
| 1298 |
+
76,
|
| 1299 |
+
686,
|
| 1300 |
+
261,
|
| 1301 |
+
702
|
| 1302 |
+
],
|
| 1303 |
+
"page_idx": 7
|
| 1304 |
+
},
|
| 1305 |
+
{
|
| 1306 |
+
"type": "text",
|
| 1307 |
+
"text": "We present a challenging interactive scenario<sup>1</sup> in Fig. 4, and visualize the most likely prediction sample from a marginal baseline and M2I. In this scenario, the red agent is yielding to the blue agent who is making a U-turn. The marginal predictor on the left fails to capture the interaction and predicts overlapping trajectories. On the other hand, M2I successfully identifies the underlying interaction relation, and predicts an accurate trajectory for the influencer and an accurate reactor trajectory that reacts to the predicted influencer trajectory. As a result, M2I achieves better prediction accuracy and scene compliance.",
|
| 1308 |
+
"bbox": [
|
| 1309 |
+
75,
|
| 1310 |
+
710,
|
| 1311 |
+
470,
|
| 1312 |
+
878
|
| 1313 |
+
],
|
| 1314 |
+
"page_idx": 7
|
| 1315 |
+
},
|
| 1316 |
+
{
|
| 1317 |
+
"type": "text",
|
| 1318 |
+
"text": "5. Conclusion",
|
| 1319 |
+
"text_level": 1,
|
| 1320 |
+
"bbox": [
|
| 1321 |
+
500,
|
| 1322 |
+
351,
|
| 1323 |
+
619,
|
| 1324 |
+
367
|
| 1325 |
+
],
|
| 1326 |
+
"page_idx": 7
|
| 1327 |
+
},
|
| 1328 |
+
{
|
| 1329 |
+
"type": "text",
|
| 1330 |
+
"text": "In conclusion, we propose a simple but effective joint prediction framework M2I through marginal and conditional predictors, by exploiting the factorized relations between interacting agents. M2I uses a modular encoder-decoder architecture, allowing it to choose from a variety of context encoders and prediction heads. Experiments on the interactive Waymo Open Motion Dataset benchmark show that our framework achieves state-of-the-art performance. In the ablation study, we show the generalization of our framework using a different predictor model.",
|
| 1331 |
+
"bbox": [
|
| 1332 |
+
496,
|
| 1333 |
+
377,
|
| 1334 |
+
893,
|
| 1335 |
+
529
|
| 1336 |
+
],
|
| 1337 |
+
"page_idx": 7
|
| 1338 |
+
},
|
| 1339 |
+
{
|
| 1340 |
+
"type": "text",
|
| 1341 |
+
"text": "Limitations We identify the following limitations. First, there exists a gap when comparing our model to the state-of-the-art in terms of the minFDE metric, indicating that our approach still has room for improvement. Thanks to its modular design, we plan to extend M2I to use SceneTransformer [32] as the context encoder and fill the gap. Second, the performance of M2I heavily depends on the size of interactive training data, especially when training the relation predictor and the conditional trajectory predictor. Looking at Tab. 1, we see that our approach improves the mAP metrics by a large margin on vehicles because of sufficient vehicle interactions in the training data, but the improvement is more negligible over the other two types due to lack of interactive scenarios involving pedestrians and cyclists. Finally, M2I assumes no mutual influence between interacting agents, allowing it to decouple joint agent distributions into marginal and conditional distributions. While we have observed an obvious influencer according to our heuristics in almost all the interactive scenarios in the Waymo Open Motion Dataset, we defer predicting for more complicated scenarios involving mutual influence (and loopy influence for more than two agents) as future work.",
|
| 1342 |
+
"bbox": [
|
| 1343 |
+
496,
|
| 1344 |
+
542,
|
| 1345 |
+
893,
|
| 1346 |
+
875
|
| 1347 |
+
],
|
| 1348 |
+
"page_idx": 7
|
| 1349 |
+
},
|
| 1350 |
+
{
|
| 1351 |
+
"type": "page_footnote",
|
| 1352 |
+
"text": "1More examples can be found in Appendix B.",
|
| 1353 |
+
"bbox": [
|
| 1354 |
+
94,
|
| 1355 |
+
886,
|
| 1356 |
+
339,
|
| 1357 |
+
901
|
| 1358 |
+
],
|
| 1359 |
+
"page_idx": 7
|
| 1360 |
+
},
|
| 1361 |
+
{
|
| 1362 |
+
"type": "text",
|
| 1363 |
+
"text": "References",
|
| 1364 |
+
"text_level": 1,
|
| 1365 |
+
"bbox": [
|
| 1366 |
+
78,
|
| 1367 |
+
89,
|
| 1368 |
+
173,
|
| 1369 |
+
104
|
| 1370 |
+
],
|
| 1371 |
+
"page_idx": 8
|
| 1372 |
+
},
|
| 1373 |
+
{
|
| 1374 |
+
"type": "list",
|
| 1375 |
+
"sub_type": "ref_text",
|
| 1376 |
+
"list_items": [
|
| 1377 |
+
"[1] Waymo open motion dataset interaction prediction. https://waymo.com/open/challenges/2021/interaction-prediction/, 2021. [Online; Accessed November 16th 2021]. 6, 7",
|
| 1378 |
+
"[2] Alexandre Alahi, Kratarth Goel, Vignesh Ramanathan, Alexandre Robicquet, Li Fei-Fei, and Silvio Savarese. Social LSTM: Human trajectory prediction in crowded spaces. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 961–971, 2016. 2",
|
| 1379 |
+
"[3] Sergio Casas, Cole Gulino, Renjie Liao, and Raquel Urtasun. SpAGNN: Spatially-aware graph neural networks for relational behavior forecasting from sensor data. In 2020 IEEE International Conference on Robotics and Automation (ICRA), pages 9491-9497. IEEE, 2020. 2",
|
| 1380 |
+
"[4] Sergio Casas, Cole Gulino, Simon Suo, Katie Luo, Renjie Liao, and Raquel Urtasun. Implicit latent variable model for scene-consistent motion forecasting. In Proceedings of the European Conference on Computer Vision (ECCV). Springer, 2020. 2, 6",
|
| 1381 |
+
"[5] Yuning Chai, Benjamin Sapp, Mayank Bansal, and Dragomir Anguelov. MultiPath: Multiple probabilistic anchor trajectory hypotheses for behavior prediction. In Conference on Robot Learning (CoRL), 2019. 1, 2",
|
| 1382 |
+
"[6] Ming-Fang Chang, John Lambert, Patsorn Sangkloy, Jagjeet Singh, Slawomir Bak, Andrew Hartnett, De Wang, Peter Carr, Simon Lucey, Deva Ramanan, et al. Argoverse: 3d tracking and forecasting with rich maps. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8748-8757, 2019. 2, 5",
|
| 1383 |
+
"[7] Henggang Cui, Vladan Radosavljevic, Fang-Chieh Chou, Tsung-Han Lin, Thi Nguyen, Tzu-Kuo Huang, Jeff Schneider, and Nemanja Djuric. Multimodal trajectory predictions for autonomous driving using deep convolutional networks. In 2019 International Conference on Robotics and Automation (ICRA), pages 2090-2096. IEEE, 2019. 4",
|
| 1384 |
+
"[8] Shengzhe Dai, Zhiheng Li, Li Li, Nanning Zheng, and Shuofeng Wang. A flexible and explainable vehicle motion prediction and inference framework combining semi-supervised aog and st-lstm. IEEE Transactions on Intelligent Transportation Systems, 2020. 2",
|
| 1385 |
+
"[9] Nachiket Deo and Mohan M Trivedi. Multi-modal trajectory prediction of surrounding vehicles with maneuver based lstms. In 2018 IEEE Intelligent Vehicles Symposium (IV), pages 1179-1184. IEEE, 2018. 2",
|
| 1386 |
+
"[10] Scott Ettinger, Shuyang Cheng, Benjamin Caine, Chenxi Liu, Hang Zhao, Sabeek Pradhan, Yuning Chai, Ben Sapp, Charles R Qi, Yin Zhou, et al. Large scale interactive motion forecasting for autonomous driving: The waymo open motion dataset. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9710-9719, 2021. 2, 3, 4, 5, 6, 7",
|
| 1387 |
+
"[11] Liangji Fang, Qinhong Jiang, Jianping Shi, and Bolei Zhou. TPNet: Trajectory proposal network for motion prediction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6797-6806, 2020. 2"
|
| 1388 |
+
],
|
| 1389 |
+
"bbox": [
|
| 1390 |
+
78,
|
| 1391 |
+
116,
|
| 1392 |
+
470,
|
| 1393 |
+
900
|
| 1394 |
+
],
|
| 1395 |
+
"page_idx": 8
|
| 1396 |
+
},
|
| 1397 |
+
{
|
| 1398 |
+
"type": "list",
|
| 1399 |
+
"sub_type": "ref_text",
|
| 1400 |
+
"list_items": [
|
| 1401 |
+
"[12] Jiyang Gao, Chen Sun, Hang Zhao, Yi Shen, Dragomir Anguelov, Congcong Li, and Cordelia Schmid. VectorNet: Encoding hd maps and agent dynamics from vectorized representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11525-11533, 2020. 1, 4, 5, 6, 7",
|
| 1402 |
+
"[13] Thomas Gilles, Stefano Sabatini, Dzmitry Tsishkou, Bogdan Stanciulescu, and Fabien Moutarde. GOHOME: Graph-oriented heatmap output for future motion estimation. arXiv preprint arXiv:2109.01827, 2021. 2",
|
| 1403 |
+
"[14] Thomas Gilles, Stefano Sabatini, Dzmitry Tsishkou, Bogdan Stanciulescu, and Fabien Moutarde. HOME: Heatmap output for future motion estimation. In IEEE International Intelligent Transportation Systems Conference (ITSC), pages 500-507. IEEE, 2021. 4, 5, 6",
|
| 1404 |
+
"[15] Junru Gu, Chen Sun, and Hang Zhao. DenseTNT: End-to-end trajectory prediction from dense goal sets. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15303-15312, 2021. 1, 2, 5, 6, 11",
|
| 1405 |
+
"[16] Agrim Gupta, Justin Johnson, Li Fei-Fei, Silvio Savarese, and Alexandre Alahi. Social GAN: Socially acceptable trajectories with generative adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2255–2264, 2018. 2, 5",
|
| 1406 |
+
"[17] Dirk Helbing and Peter Molnar. Social force model for pedestrian dynamics. Physical review E, 51(5):4282, 1995. 2",
|
| 1407 |
+
"[18] Xin Huang, Stephen G McGill, Jonathan A DeCastro, Luke Fletcher, John J Leonard, Brian C Williams, and Guy Rosman. DiversityGAN: Diversity-aware vehicle motion prediction via latent semantic sampling. IEEE Robotics and Automation Letters, 5(4):5089-5096, 2020. 2",
|
| 1408 |
+
"[19] Xin Huang, Guy Rosman, Igor Gilitschenski, Ashkan Jasour, Stephen G McGill, John J Leonard, and Brian C Williams. HYPER: Learned hybrid trajectory prediction via factored inference and adaptive sampling. In International Conference on Robotics and Automation (ICRA), 2022. 2",
|
| 1409 |
+
"[20] Siddhesh Khandelwal, William Qi, Jagjeet Singh, Andrew Hartnett, and Deva Ramanan. What-if motion prediction for autonomous driving. arXiv preprint arXiv:2008.10587, 2020.3",
|
| 1410 |
+
"[21] ByeoungDo Kim, Seong Hyeon Park, Seokhwan Lee, Elbek Khoshimjonov, Dongsuk Kum, Junsoo Kim, Jeong Soo Kim, and Jun Won Choi. LaPred: Lane-aware prediction of multimodal future trajectories of dynamic agents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14636-14645, 2021. 2",
|
| 1411 |
+
"[22] Vineet Kosaraju, Amir Sadeghian, Roberto Martin-Martin, Ian Reid, Hamid Rezatofighi, and Silvio Savarese. SocialBiGAT: Multimodal trajectory forecasting using bicycle-gan and graph attention networks. Advances in Neural Information Processing Systems, 32, 2019. 2",
|
| 1412 |
+
"[23] Sumit Kumar, Yiming Gu, Jerrick Hoang, Galen Clark Haynes, and Micol Marchetti-Bowick. Interaction-based trajectory prediction over a hybrid traffic graph. In 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 5530-5535. IEEE, 2020. 2, 3"
|
| 1413 |
+
],
|
| 1414 |
+
"bbox": [
|
| 1415 |
+
501,
|
| 1416 |
+
92,
|
| 1417 |
+
893,
|
| 1418 |
+
900
|
| 1419 |
+
],
|
| 1420 |
+
"page_idx": 8
|
| 1421 |
+
},
|
| 1422 |
+
{
|
| 1423 |
+
"type": "list",
|
| 1424 |
+
"sub_type": "ref_text",
|
| 1425 |
+
"list_items": [
|
| 1426 |
+
"[24] Yen-Ling Kuo, Xin Huang, Andrei Barbu, Stephen G McGill, Boris Katz, John J Leonard, and Guy Rosman. Trajectory prediction with linguistic representations. In International Conference on Robotics and Automation (ICRA), 2022. 2",
|
| 1427 |
+
"[25] Donsuk Lee, Yiming Gu, Jerrick Hoang, and Micol Marchetti-Bowick. Joint interaction and trajectory prediction for autonomous driving using graph neural networks. arXiv preprint arXiv:1912.07882, 2019. 2",
|
| 1428 |
+
"[26] Namhoon Lee, Wongun Choi, Paul Vernaza, Christopher B Choy, Philip HS Torr, and Manmohan Chandraker. DESIRE: Distant future prediction in dynamic scenes with interacting agents. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 336-345, 2017. 1, 2",
|
| 1429 |
+
"[27] Lingyun Luke Li, Bin Yang, Ming Liang, Wenyuan Zeng, Mengye Ren, Sean Segal, and Raquel Urtasun. End-to-end contextual perception and prediction with interaction transformer. In 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 5784-5791. IEEE, 2020. 2",
|
| 1430 |
+
"[28] Ming Liang, Bin Yang, Rui Hu, Yun Chen, Renjie Liao, Song Feng, and Raquel Urtasun. Learning lane graph representations for motion forecasting. In European Conference on Computer Vision, pages 541-556. Springer, 2020. 1, 4",
|
| 1431 |
+
"[29] Karttikeya Mangalam, Harshayu Girase, Shreyas Agarwal, Kuan-Hui Lee, Ehsan Adeli, Jitendra Malik, and Adrien Gaidon. It is not the journey but the destination: Endpoint conditioned trajectory prediction. In Proceedings of the European Conference on Computer Vision (ECCV), August 2020. 2",
|
| 1432 |
+
"[30] Xiaoyu Mo, Zhiyu Huang, and Chen Lv. Multi-modal interactive agent trajectory prediction using heterogeneous edge-enhanced graph attention network. Workshop on Autonomous Driving, CVPR, 2021. 6, 7",
|
| 1433 |
+
"[31] Abduallah Mohamed, Kun Qian, Mohamed Elhoseiny, and Christian Claudel. Social-STGCNN: A social spatiotemporal graph convolutional neural network for human trajectory prediction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14424-14432, 2020. 2",
|
| 1434 |
+
"[32] Jiquan Ngiam, Benjamin Caine, Vijay Vasudevan, Zhengdong Zhang, Hao-Tien Lewis Chiang, Jeffrey Ling, Rebecca Roelofs, Alex Bewley, Chenxi Liu, Ashish Venugopal, et al. Scene Transformer: A unified architecture for predicting multiple agent trajectories. In International Conference on Learning Representations (ICLR), 2022. 2, 6, 7, 8",
|
| 1435 |
+
"[33] Kamra Nitin, Zhu Hao, Trivedi Dweep, Zhang Ming, and Liu Yan. Multi-agent trajectory prediction with fuzzy query attention. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2",
|
| 1436 |
+
"[34] Nicholas Rhinehart, Rowan McAllister, Kris Kitani, and Sergey Levine. PRECOG: Prediction conditioned on goals in visual multi-agent settings. In Proceedings of the IEEE International Conference on Computer Vision, pages 2821-2830, 2019. 2",
|
| 1437 |
+
"[35] Tim Salzmann, Boris Ivanovic, Punarjay Chakravarty, and Marco Pavone. Trajectory: Multi-agent generative trajec"
|
| 1438 |
+
],
|
| 1439 |
+
"bbox": [
|
| 1440 |
+
78,
|
| 1441 |
+
90,
|
| 1442 |
+
468,
|
| 1443 |
+
901
|
| 1444 |
+
],
|
| 1445 |
+
"page_idx": 9
|
| 1446 |
+
},
|
| 1447 |
+
{
|
| 1448 |
+
"type": "list",
|
| 1449 |
+
"sub_type": "ref_text",
|
| 1450 |
+
"list_items": [
|
| 1451 |
+
"tory forecasting with heterogeneous data for control. In Proceedings of the European Conference on Computer Vision (ECCV). Springer, 2020. 2, 3",
|
| 1452 |
+
"[36] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 6",
|
| 1453 |
+
"[37] Haoran Song, Di Luan, Wenchao Ding, Michael Y Wang, and Qifeng Chen. Learning to predict vehicle trajectories with model-based planning. In Conference on Robot Learning (CoRL), 2021. 2",
|
| 1454 |
+
"[38] Charlie Tang and Russ R Salakhutdinov. Multiple futures prediction. In Advances in Neural Information Processing Systems, pages 15424-15434, 2019. 1, 2",
|
| 1455 |
+
"[39] Ekaterina Tolstaya, Reza Mahjourian, Carlton Downey, Balakrishnan Vadarajan, Benjamin Sapp, and Dragomir Anguelov. Identifying driver interactions via conditional behavior prediction. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 3473-3479. IEEE, 2021. 2, 3",
|
| 1456 |
+
"[40] T. van der Heiden, N. S. Nagaraja, C. Weiss, and E. Gavves. SafeCritic: Collision-aware trajectory prediction. In *British Machine Vision Conference Workshop*, 2019. 2",
|
| 1457 |
+
"[41] Allen Wang, Xin Huang, Ashkan Jasour, and Brian Williams. Fast risk assessment for autonomous vehicles using learned models of agent futures. In Robotics: Science and Systems, 2020. 5",
|
| 1458 |
+
"[42] David Wu and Yunan Wu. $\\mathrm{Air}^2$ for interaction prediction. Workshop on Autonomous Driving, CVPR, 2021. 6, 7",
|
| 1459 |
+
"[43] Kota Yamaguchi, Alexander C Berg, Luis E Ortiz, and Tamara L Berg. Who are you with and where are you going? In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1345-1352, 2011. 2",
|
| 1460 |
+
"[44] Ye Yuan and Kris Kitani. Diverse trajectory forecasting with determinantal point processes. In International Conference on Learning Representations (ICLR), 2020. 2",
|
| 1461 |
+
"[45] Wei Zhan, Liting Sun, Di Wang, Haojie Shi, Aubrey Clausse, Maximilian Naumann, Julius Kummerle, Hendrik Konigshof, Christoph Stiller, Arnaud de La Fortelle, et al. Interaction dataset: An international, adversarial and cooperative motion dataset in interactive driving scenarios with semantic maps. arXiv preprint arXiv:1910.03088, 2019. 3",
|
| 1462 |
+
"[46] Hang Zhao, Jiyang Gao, Tian Lan, Chen Sun, Benjamin Sapp, Balakrishnan Varadarajan, Yue Shen, Yi Shen, Yuning Chai, Cordelia Schmid, et al. TNT: Target-driven trajectory prediction. In Conference on Robot Learning (CoRL), 2020. 2, 4, 8",
|
| 1463 |
+
"[47] Tianyang Zhao, Yifei Xu, Mathew Monfort, Wongun Choi, Chris Baker, Yibiao Zhao, Yizhou Wang, and Ying Nian Wu. Multi-agent tensor fusion for contextual trajectory prediction. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 12126-12134, 2019. 2"
|
| 1464 |
+
],
|
| 1465 |
+
"bbox": [
|
| 1466 |
+
503,
|
| 1467 |
+
92,
|
| 1468 |
+
890,
|
| 1469 |
+
825
|
| 1470 |
+
],
|
| 1471 |
+
"page_idx": 9
|
| 1472 |
+
},
|
| 1473 |
+
{
|
| 1474 |
+
"type": "text",
|
| 1475 |
+
"text": "Appendix",
|
| 1476 |
+
"text_level": 1,
|
| 1477 |
+
"bbox": [
|
| 1478 |
+
76,
|
| 1479 |
+
90,
|
| 1480 |
+
163,
|
| 1481 |
+
107
|
| 1482 |
+
],
|
| 1483 |
+
"page_idx": 10
|
| 1484 |
+
},
|
| 1485 |
+
{
|
| 1486 |
+
"type": "text",
|
| 1487 |
+
"text": "A. Additional Experiment Details",
|
| 1488 |
+
"text_level": 1,
|
| 1489 |
+
"bbox": [
|
| 1490 |
+
76,
|
| 1491 |
+
113,
|
| 1492 |
+
339,
|
| 1493 |
+
130
|
| 1494 |
+
],
|
| 1495 |
+
"page_idx": 10
|
| 1496 |
+
},
|
| 1497 |
+
{
|
| 1498 |
+
"type": "text",
|
| 1499 |
+
"text": "In this section, we introduce additional details on filtering interactive training data, training the baseline joint predictor, and training by agent types.",
|
| 1500 |
+
"bbox": [
|
| 1501 |
+
76,
|
| 1502 |
+
137,
|
| 1503 |
+
468,
|
| 1504 |
+
184
|
| 1505 |
+
],
|
| 1506 |
+
"page_idx": 10
|
| 1507 |
+
},
|
| 1508 |
+
{
|
| 1509 |
+
"type": "text",
|
| 1510 |
+
"text": "A.1 Filtering Interactive Training Data",
|
| 1511 |
+
"text_level": 1,
|
| 1512 |
+
"bbox": [
|
| 1513 |
+
76,
|
| 1514 |
+
200,
|
| 1515 |
+
366,
|
| 1516 |
+
215
|
| 1517 |
+
],
|
| 1518 |
+
"page_idx": 10
|
| 1519 |
+
},
|
| 1520 |
+
{
|
| 1521 |
+
"type": "text",
|
| 1522 |
+
"text": "The Waymo Open Motion Dataset only provides interactive scenarios in its validation set and testing set. To filter the interactive scenario in the training set, we implement a script to identify scenarios that include 2 interacting agents based on the objects_of_interest mask provided in the data. The script is provided in the source code.",
|
| 1523 |
+
"bbox": [
|
| 1524 |
+
76,
|
| 1525 |
+
223,
|
| 1526 |
+
468,
|
| 1527 |
+
315
|
| 1528 |
+
],
|
| 1529 |
+
"page_idx": 10
|
| 1530 |
+
},
|
| 1531 |
+
{
|
| 1532 |
+
"type": "text",
|
| 1533 |
+
"text": "A.2 Baseline Joint Predictor",
|
| 1534 |
+
"text_level": 1,
|
| 1535 |
+
"bbox": [
|
| 1536 |
+
76,
|
| 1537 |
+
332,
|
| 1538 |
+
290,
|
| 1539 |
+
347
|
| 1540 |
+
],
|
| 1541 |
+
"page_idx": 10
|
| 1542 |
+
},
|
| 1543 |
+
{
|
| 1544 |
+
"type": "text",
|
| 1545 |
+
"text": "We train the Baseline Joint predictor described in Sec. 4.4 as follows. First, we predict the distribution of goals for each interacting agent as a heatmap, according to [15]. Second, we select the top 80 goals based on the predicted probability for each agent. Third, we combine the selected goals into 6400 goal pairs and run each goal pair feature, including $(x,y)$ positions for both goals, through a 2-layer MLP with a hidden size of 128 followed by a normalization layer and a ReLU activation layer. Fourth, we run a fully connected layer to predict the probability logit for each goal pair, and train the joint goal prediction model through the following loss:",
|
| 1546 |
+
"bbox": [
|
| 1547 |
+
76,
|
| 1548 |
+
354,
|
| 1549 |
+
468,
|
| 1550 |
+
534
|
| 1551 |
+
],
|
| 1552 |
+
"page_idx": 10
|
| 1553 |
+
},
|
| 1554 |
+
{
|
| 1555 |
+
"type": "equation",
|
| 1556 |
+
"text": "\n$$\n\\mathcal {L} _ {J} = \\mathcal {L} _ {c e} (J, \\hat {J}), \\tag {8}\n$$\n",
|
| 1557 |
+
"text_format": "latex",
|
| 1558 |
+
"bbox": [
|
| 1559 |
+
215,
|
| 1560 |
+
535,
|
| 1561 |
+
468,
|
| 1562 |
+
551
|
| 1563 |
+
],
|
| 1564 |
+
"page_idx": 10
|
| 1565 |
+
},
|
| 1566 |
+
{
|
| 1567 |
+
"type": "text",
|
| 1568 |
+
"text": "where $\\mathcal{L}_{ce}$ is the cross entropy loss, $J$ is the predicted goal pair distribution, and $\\hat{J}$ is the index of the goal pair out of all candidates that is the closest to the ground truth goal pair in terms of Euclidean distance. Given the predicted goal pairs, we train the trajectory completion model to regress the full trajectories of both interacting agents following the same procedure in [15].",
|
| 1569 |
+
"bbox": [
|
| 1570 |
+
76,
|
| 1571 |
+
556,
|
| 1572 |
+
468,
|
| 1573 |
+
662
|
| 1574 |
+
],
|
| 1575 |
+
"page_idx": 10
|
| 1576 |
+
},
|
| 1577 |
+
{
|
| 1578 |
+
"type": "text",
|
| 1579 |
+
"text": "A.3 Training by Agent Types",
|
| 1580 |
+
"text_level": 1,
|
| 1581 |
+
"bbox": [
|
| 1582 |
+
76,
|
| 1583 |
+
680,
|
| 1584 |
+
294,
|
| 1585 |
+
696
|
| 1586 |
+
],
|
| 1587 |
+
"page_idx": 10
|
| 1588 |
+
},
|
| 1589 |
+
{
|
| 1590 |
+
"type": "text",
|
| 1591 |
+
"text": "The Waymo Open Motion Dataset consists of three types of agents to predict: vehicles, pedestrians, and cyclists. As each agent type has different behavior models and the distribution is unbalanced among types (e.g. vehicle types account for $78\\%$ of the training data), we train the marginal trajectory predictor and the conditional trajectory predictor for each agent type separately. We observe that the prediction performance over pedestrians and cyclists improves by a large margin, compared to training a single model for all agents.",
|
| 1592 |
+
"bbox": [
|
| 1593 |
+
76,
|
| 1594 |
+
703,
|
| 1595 |
+
468,
|
| 1596 |
+
854
|
| 1597 |
+
],
|
| 1598 |
+
"page_idx": 10
|
| 1599 |
+
},
|
| 1600 |
+
{
|
| 1601 |
+
"type": "text",
|
| 1602 |
+
"text": "For the same reason, we train four relation predictors for vehicle-vehicle interactions, vehicle-pedestrian interactions, vehicle-cyclist interactions, and interactions that",
|
| 1603 |
+
"bbox": [
|
| 1604 |
+
76,
|
| 1605 |
+
854,
|
| 1606 |
+
468,
|
| 1607 |
+
900
|
| 1608 |
+
],
|
| 1609 |
+
"page_idx": 10
|
| 1610 |
+
},
|
| 1611 |
+
{
|
| 1612 |
+
"type": "text",
|
| 1613 |
+
"text": "cover the remaining agent pair types, including cyclist-pedestrian, cyclist-cyclist, pedestrian-pedestrian.",
|
| 1614 |
+
"bbox": [
|
| 1615 |
+
498,
|
| 1616 |
+
90,
|
| 1617 |
+
890,
|
| 1618 |
+
122
|
| 1619 |
+
],
|
| 1620 |
+
"page_idx": 10
|
| 1621 |
+
},
|
| 1622 |
+
{
|
| 1623 |
+
"type": "text",
|
| 1624 |
+
"text": "B. Additional Qualitative Examples",
|
| 1625 |
+
"text_level": 1,
|
| 1626 |
+
"bbox": [
|
| 1627 |
+
500,
|
| 1628 |
+
130,
|
| 1629 |
+
777,
|
| 1630 |
+
147
|
| 1631 |
+
],
|
| 1632 |
+
"page_idx": 10
|
| 1633 |
+
},
|
| 1634 |
+
{
|
| 1635 |
+
"type": "text",
|
| 1636 |
+
"text": "We present additional representative examples in a variety of interaction settings to showcase the advantage of M2I over the marginal baseline.",
|
| 1637 |
+
"bbox": [
|
| 1638 |
+
500,
|
| 1639 |
+
154,
|
| 1640 |
+
893,
|
| 1641 |
+
199
|
| 1642 |
+
],
|
| 1643 |
+
"page_idx": 10
|
| 1644 |
+
},
|
| 1645 |
+
{
|
| 1646 |
+
"type": "text",
|
| 1647 |
+
"text": "B.1 Influencer Overtakes Reactor",
|
| 1648 |
+
"text_level": 1,
|
| 1649 |
+
"bbox": [
|
| 1650 |
+
500,
|
| 1651 |
+
217,
|
| 1652 |
+
751,
|
| 1653 |
+
233
|
| 1654 |
+
],
|
| 1655 |
+
"page_idx": 10
|
| 1656 |
+
},
|
| 1657 |
+
{
|
| 1658 |
+
"type": "text",
|
| 1659 |
+
"text": "In Fig. 5, we present three examples in which the influencer overtakes the reactor. In each example, M2I successfully predicts the correct relation type and improves prediction accuracy and scene compliance, while the marginal predictor predicts overlapping trajectories without considering the future interaction between agents.",
|
| 1660 |
+
"bbox": [
|
| 1661 |
+
500,
|
| 1662 |
+
241,
|
| 1663 |
+
893,
|
| 1664 |
+
332
|
| 1665 |
+
],
|
| 1666 |
+
"page_idx": 10
|
| 1667 |
+
},
|
| 1668 |
+
{
|
| 1669 |
+
"type": "text",
|
| 1670 |
+
"text": "B.2 Reactor Yields to Influencer before Turning",
|
| 1671 |
+
"text_level": 1,
|
| 1672 |
+
"bbox": [
|
| 1673 |
+
500,
|
| 1674 |
+
349,
|
| 1675 |
+
848,
|
| 1676 |
+
366
|
| 1677 |
+
],
|
| 1678 |
+
"page_idx": 10
|
| 1679 |
+
},
|
| 1680 |
+
{
|
| 1681 |
+
"type": "text",
|
| 1682 |
+
"text": "In Fig. 6, we present three examples in which the reactor waits for the influencer to pass before turning. In each example, M2I successfully predicts the correct relation type and the accurate reactive trajectories for the reactor. On the other hand, the marginal predictor ignores the interaction and results in less accurate predictions.",
|
| 1683 |
+
"bbox": [
|
| 1684 |
+
500,
|
| 1685 |
+
375,
|
| 1686 |
+
890,
|
| 1687 |
+
465
|
| 1688 |
+
],
|
| 1689 |
+
"page_idx": 10
|
| 1690 |
+
},
|
| 1691 |
+
{
|
| 1692 |
+
"type": "text",
|
| 1693 |
+
"text": "B.3 Reactor Merges behind Influencer",
|
| 1694 |
+
"text_level": 1,
|
| 1695 |
+
"bbox": [
|
| 1696 |
+
500,
|
| 1697 |
+
482,
|
| 1698 |
+
782,
|
| 1699 |
+
497
|
| 1700 |
+
],
|
| 1701 |
+
"page_idx": 10
|
| 1702 |
+
},
|
| 1703 |
+
{
|
| 1704 |
+
"type": "text",
|
| 1705 |
+
"text": "In Fig. 7, we present two examples in which the reactor merges behind the influencer after the influencer passes. In each example, M2I successfully predicts the correct relation type and the accurate reactor trajectories that follow the influencer, while the marginal predictor fails to account for the interaction and predicts trajectories far away from the ground truth.",
|
| 1706 |
+
"bbox": [
|
| 1707 |
+
500,
|
| 1708 |
+
506,
|
| 1709 |
+
890,
|
| 1710 |
+
612
|
| 1711 |
+
],
|
| 1712 |
+
"page_idx": 10
|
| 1713 |
+
},
|
| 1714 |
+
{
|
| 1715 |
+
"type": "text",
|
| 1716 |
+
"text": "C. Multi-Agent Generalization",
|
| 1717 |
+
"text_level": 1,
|
| 1718 |
+
"bbox": [
|
| 1719 |
+
500,
|
| 1720 |
+
621,
|
| 1721 |
+
740,
|
| 1722 |
+
638
|
| 1723 |
+
],
|
| 1724 |
+
"page_idx": 10
|
| 1725 |
+
},
|
| 1726 |
+
{
|
| 1727 |
+
"type": "text",
|
| 1728 |
+
"text": "We present a qualitative analysis on applying M2I to multi-agent scenarios involving more than two agents. In Fig. 8, we show two examples in which M2I provides scene compliant relation predictions in crowded traffic. Given the relation predictions, it is straightforward to predict the agent trajectories through marginal and conditional predictors, as in Eq. (3).",
|
| 1729 |
+
"bbox": [
|
| 1730 |
+
500,
|
| 1731 |
+
645,
|
| 1732 |
+
890,
|
| 1733 |
+
751
|
| 1734 |
+
],
|
| 1735 |
+
"page_idx": 10
|
| 1736 |
+
},
|
| 1737 |
+
{
|
| 1738 |
+
"type": "image",
|
| 1739 |
+
"img_path": "images/8b72cb1ef5eafb00eeeb8babf7daa649e5eac10d56d71f1d9f3a37d5be5fabae.jpg",
|
| 1740 |
+
"image_caption": [],
|
| 1741 |
+
"image_footnote": [],
|
| 1742 |
+
"bbox": [
|
| 1743 |
+
76,
|
| 1744 |
+
151,
|
| 1745 |
+
491,
|
| 1746 |
+
275
|
| 1747 |
+
],
|
| 1748 |
+
"page_idx": 11
|
| 1749 |
+
},
|
| 1750 |
+
{
|
| 1751 |
+
"type": "image",
|
| 1752 |
+
"img_path": "images/0517a2a85fb7278130d1d8c8a7936963ed7cdac9b7f2b71b23071fc41459014c.jpg",
|
| 1753 |
+
"image_caption": [],
|
| 1754 |
+
"image_footnote": [],
|
| 1755 |
+
"bbox": [
|
| 1756 |
+
491,
|
| 1757 |
+
151,
|
| 1758 |
+
903,
|
| 1759 |
+
275
|
| 1760 |
+
],
|
| 1761 |
+
"page_idx": 11
|
| 1762 |
+
},
|
| 1763 |
+
{
|
| 1764 |
+
"type": "image",
|
| 1765 |
+
"img_path": "images/82c7e7e4571f12f1b274a96cd6cb5ac9b7972a0ee3fcd3386843e916d702e959.jpg",
|
| 1766 |
+
"image_caption": [],
|
| 1767 |
+
"image_footnote": [],
|
| 1768 |
+
"bbox": [
|
| 1769 |
+
78,
|
| 1770 |
+
275,
|
| 1771 |
+
490,
|
| 1772 |
+
411
|
| 1773 |
+
],
|
| 1774 |
+
"page_idx": 11
|
| 1775 |
+
},
|
| 1776 |
+
{
|
| 1777 |
+
"type": "image",
|
| 1778 |
+
"img_path": "images/9450d9e1ea03f7bef7bca3ae0cb79b38b6f4a9fd5496e1a6dca2d537cb7299c9.jpg",
|
| 1779 |
+
"image_caption": [],
|
| 1780 |
+
"image_footnote": [],
|
| 1781 |
+
"bbox": [
|
| 1782 |
+
491,
|
| 1783 |
+
275,
|
| 1784 |
+
903,
|
| 1785 |
+
411
|
| 1786 |
+
],
|
| 1787 |
+
"page_idx": 11
|
| 1788 |
+
},
|
| 1789 |
+
{
|
| 1790 |
+
"type": "image",
|
| 1791 |
+
"img_path": "images/2b8ca4bd365484638507bac381f8441faa3bb4cb0e269d10f9b66c7a85041baf.jpg",
|
| 1792 |
+
"image_caption": [],
|
| 1793 |
+
"image_footnote": [],
|
| 1794 |
+
"bbox": [
|
| 1795 |
+
78,
|
| 1796 |
+
411,
|
| 1797 |
+
488,
|
| 1798 |
+
712
|
| 1799 |
+
],
|
| 1800 |
+
"page_idx": 11
|
| 1801 |
+
},
|
| 1802 |
+
{
|
| 1803 |
+
"type": "image",
|
| 1804 |
+
"img_path": "images/171430dbf3f6c709ab26ad2cdab814aac9a27addbd786f4834a42b116e43d9ea.jpg",
|
| 1805 |
+
"image_caption": [],
|
| 1806 |
+
"image_footnote": [],
|
| 1807 |
+
"bbox": [
|
| 1808 |
+
491,
|
| 1809 |
+
411,
|
| 1810 |
+
903,
|
| 1811 |
+
712
|
| 1812 |
+
],
|
| 1813 |
+
"page_idx": 11
|
| 1814 |
+
},
|
| 1815 |
+
{
|
| 1816 |
+
"type": "image",
|
| 1817 |
+
"img_path": "images/22945496ed2a9663c47a1337bb95f0b41b567eefb53badcf0f1983d2a19c2d7c.jpg",
|
| 1818 |
+
"image_caption": [
|
| 1819 |
+
"Figure 5. Influencer overtakes reactor. In each example, M2I (right column) successfully predicts the correct relation type and improves prediction accuracy and scene compliance, while the marginal predictor (left column) predicts overlapping trajectories without considering the future interaction between agents."
|
| 1820 |
+
],
|
| 1821 |
+
"image_footnote": [],
|
| 1822 |
+
"bbox": [
|
| 1823 |
+
361,
|
| 1824 |
+
722,
|
| 1825 |
+
609,
|
| 1826 |
+
770
|
| 1827 |
+
],
|
| 1828 |
+
"page_idx": 11
|
| 1829 |
+
},
|
| 1830 |
+
{
|
| 1831 |
+
"type": "image",
|
| 1832 |
+
"img_path": "images/f46f436e354b5c8243cd9c83d9f60f9f0ebed13a15d4dcb0f281fb63c993f5b8.jpg",
|
| 1833 |
+
"image_caption": [],
|
| 1834 |
+
"image_footnote": [],
|
| 1835 |
+
"bbox": [
|
| 1836 |
+
80,
|
| 1837 |
+
89,
|
| 1838 |
+
488,
|
| 1839 |
+
205
|
| 1840 |
+
],
|
| 1841 |
+
"page_idx": 12
|
| 1842 |
+
},
|
| 1843 |
+
{
|
| 1844 |
+
"type": "image",
|
| 1845 |
+
"img_path": "images/f41b31a31e66ca0f6259067b8fb738cb8f1bed475616d51b766ded7a60ad91e9.jpg",
|
| 1846 |
+
"image_caption": [],
|
| 1847 |
+
"image_footnote": [],
|
| 1848 |
+
"bbox": [
|
| 1849 |
+
491,
|
| 1850 |
+
90,
|
| 1851 |
+
901,
|
| 1852 |
+
205
|
| 1853 |
+
],
|
| 1854 |
+
"page_idx": 12
|
| 1855 |
+
},
|
| 1856 |
+
{
|
| 1857 |
+
"type": "image",
|
| 1858 |
+
"img_path": "images/9f944f01256e53c0554e3574faa0039ebc2d9f252a300770bc73a0651b4a7ae4.jpg",
|
| 1859 |
+
"image_caption": [],
|
| 1860 |
+
"image_footnote": [],
|
| 1861 |
+
"bbox": [
|
| 1862 |
+
80,
|
| 1863 |
+
207,
|
| 1864 |
+
488,
|
| 1865 |
+
483
|
| 1866 |
+
],
|
| 1867 |
+
"page_idx": 12
|
| 1868 |
+
},
|
| 1869 |
+
{
|
| 1870 |
+
"type": "image",
|
| 1871 |
+
"img_path": "images/a0f8073cbfcf9a263553bd460bc71c6d2f364183bc3f301512d54a8334e27495.jpg",
|
| 1872 |
+
"image_caption": [],
|
| 1873 |
+
"image_footnote": [],
|
| 1874 |
+
"bbox": [
|
| 1875 |
+
491,
|
| 1876 |
+
207,
|
| 1877 |
+
901,
|
| 1878 |
+
483
|
| 1879 |
+
],
|
| 1880 |
+
"page_idx": 12
|
| 1881 |
+
},
|
| 1882 |
+
{
|
| 1883 |
+
"type": "image",
|
| 1884 |
+
"img_path": "images/e341cc2a710210b54721a4ba26cfce62d28834553735830e82ac32fe8b4e318a.jpg",
|
| 1885 |
+
"image_caption": [],
|
| 1886 |
+
"image_footnote": [],
|
| 1887 |
+
"bbox": [
|
| 1888 |
+
80,
|
| 1889 |
+
486,
|
| 1890 |
+
488,
|
| 1891 |
+
777
|
| 1892 |
+
],
|
| 1893 |
+
"page_idx": 12
|
| 1894 |
+
},
|
| 1895 |
+
{
|
| 1896 |
+
"type": "image",
|
| 1897 |
+
"img_path": "images/69784a8ef8ab3a400113e163fa0972bd1bc9c048a58615ae718db6bca0a18be1.jpg",
|
| 1898 |
+
"image_caption": [],
|
| 1899 |
+
"image_footnote": [],
|
| 1900 |
+
"bbox": [
|
| 1901 |
+
491,
|
| 1902 |
+
486,
|
| 1903 |
+
901,
|
| 1904 |
+
777
|
| 1905 |
+
],
|
| 1906 |
+
"page_idx": 12
|
| 1907 |
+
},
|
| 1908 |
+
{
|
| 1909 |
+
"type": "image",
|
| 1910 |
+
"img_path": "images/4abc6b57f2b3da55cd6f176ea02422f0967b3417ce6a2598595779216bd60453.jpg",
|
| 1911 |
+
"image_caption": [
|
| 1912 |
+
"Figure 6. Reactor yields to influencer before turning. In each example, M2I (right column) successfully predicts the correct relation type and the accurate reactive trajectories for the reactor. On the other hand, the marginal predictor (left column) ignores the interaction and results in less accurate predictions."
|
| 1913 |
+
],
|
| 1914 |
+
"image_footnote": [],
|
| 1915 |
+
"bbox": [
|
| 1916 |
+
361,
|
| 1917 |
+
787,
|
| 1918 |
+
609,
|
| 1919 |
+
834
|
| 1920 |
+
],
|
| 1921 |
+
"page_idx": 12
|
| 1922 |
+
},
|
| 1923 |
+
{
|
| 1924 |
+
"type": "image",
|
| 1925 |
+
"img_path": "images/2e68b5c5e86f7534dcd77ef5d6f3a18337fa4d69aebda7b0af6c1c7531baa4b3.jpg",
|
| 1926 |
+
"image_caption": [],
|
| 1927 |
+
"image_footnote": [],
|
| 1928 |
+
"bbox": [
|
| 1929 |
+
76,
|
| 1930 |
+
157,
|
| 1931 |
+
491,
|
| 1932 |
+
477
|
| 1933 |
+
],
|
| 1934 |
+
"page_idx": 13
|
| 1935 |
+
},
|
| 1936 |
+
{
|
| 1937 |
+
"type": "image",
|
| 1938 |
+
"img_path": "images/5c3e78fd922836bc5652a6db2e8a3a96cf612066e40e2013a3f97695ce5dea9b.jpg",
|
| 1939 |
+
"image_caption": [],
|
| 1940 |
+
"image_footnote": [],
|
| 1941 |
+
"bbox": [
|
| 1942 |
+
491,
|
| 1943 |
+
157,
|
| 1944 |
+
903,
|
| 1945 |
+
477
|
| 1946 |
+
],
|
| 1947 |
+
"page_idx": 13
|
| 1948 |
+
},
|
| 1949 |
+
{
|
| 1950 |
+
"type": "image",
|
| 1951 |
+
"img_path": "images/325d90e4712ae49b827a56c3e77a5b8599dc08e2cf36376909ad685f50313c43.jpg",
|
| 1952 |
+
"image_caption": [],
|
| 1953 |
+
"image_footnote": [],
|
| 1954 |
+
"bbox": [
|
| 1955 |
+
78,
|
| 1956 |
+
477,
|
| 1957 |
+
490,
|
| 1958 |
+
705
|
| 1959 |
+
],
|
| 1960 |
+
"page_idx": 13
|
| 1961 |
+
},
|
| 1962 |
+
{
|
| 1963 |
+
"type": "image",
|
| 1964 |
+
"img_path": "images/7c5cbafdab0d0457593e4eb7e02f18a43f4a18f11140f58ba97a28939637e2fd.jpg",
|
| 1965 |
+
"image_caption": [],
|
| 1966 |
+
"image_footnote": [],
|
| 1967 |
+
"bbox": [
|
| 1968 |
+
491,
|
| 1969 |
+
477,
|
| 1970 |
+
903,
|
| 1971 |
+
705
|
| 1972 |
+
],
|
| 1973 |
+
"page_idx": 13
|
| 1974 |
+
},
|
| 1975 |
+
{
|
| 1976 |
+
"type": "image",
|
| 1977 |
+
"img_path": "images/6f1e5747c42e277bae3e729e992925cc0132871c08b4cde93cfcdab52f8b417e.jpg",
|
| 1978 |
+
"image_caption": [
|
| 1979 |
+
"Figure 7. Reactor merges behind influencer. In each example, M2I (right column) successfully predicts the correct relation type and the accurate reactor trajectories that follow the influencer, while the marginal predictor (left column) fails to account for the interaction and predicts trajectories far away from the ground truth."
|
| 1980 |
+
],
|
| 1981 |
+
"image_footnote": [],
|
| 1982 |
+
"bbox": [
|
| 1983 |
+
361,
|
| 1984 |
+
715,
|
| 1985 |
+
609,
|
| 1986 |
+
762
|
| 1987 |
+
],
|
| 1988 |
+
"page_idx": 13
|
| 1989 |
+
},
|
| 1990 |
+
{
|
| 1991 |
+
"type": "image",
|
| 1992 |
+
"img_path": "images/d65bf0c5d365f597319de091abb631dda3eeab226baaea2f9011fa72c05ad5f5.jpg",
|
| 1993 |
+
"image_caption": [],
|
| 1994 |
+
"image_footnote": [],
|
| 1995 |
+
"bbox": [
|
| 1996 |
+
151,
|
| 1997 |
+
114,
|
| 1998 |
+
823,
|
| 1999 |
+
444
|
| 2000 |
+
],
|
| 2001 |
+
"page_idx": 14
|
| 2002 |
+
},
|
| 2003 |
+
{
|
| 2004 |
+
"type": "image",
|
| 2005 |
+
"img_path": "images/addc1c8425f0032726abbf2df5a208724568259f3dddf77d85fa011782c799e2.jpg",
|
| 2006 |
+
"image_caption": [
|
| 2007 |
+
"Figure 8. Examples of M2I providing scene compliant relation predictions in complex multi-agent scenarios."
|
| 2008 |
+
],
|
| 2009 |
+
"image_footnote": [],
|
| 2010 |
+
"bbox": [
|
| 2011 |
+
151,
|
| 2012 |
+
446,
|
| 2013 |
+
823,
|
| 2014 |
+
844
|
| 2015 |
+
],
|
| 2016 |
+
"page_idx": 14
|
| 2017 |
+
}
|
| 2018 |
+
]
|
2202.11xxx/2202.11884/edf7b602-b852-4c85-9e17-0cbd193b4c28_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11884/edf7b602-b852-4c85-9e17-0cbd193b4c28_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:277eb501ffb0b9456ee67c0081d846f490554506643db00db6b560724b56e409
|
| 3 |
+
size 4799163
|
2202.11xxx/2202.11884/full.md
ADDED
|
@@ -0,0 +1,397 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# M2I: From Factored Marginal Trajectory Prediction to Interactive Prediction
|
| 2 |
+
|
| 3 |
+
Qiao Sun $^{1*}$ Xin Huang $^{2*†}$ $^{1}$ IIIS, Tsinghua University
|
| 4 |
+
|
| 5 |
+
Junru Gu $^{1}$ Brian C. Williams $^{2}$ Hang Zhao $^{1\ddagger}$ $^{2}$ CSAIL, Massachusetts Institute of Technology
|
| 6 |
+
|
| 7 |
+
# Abstract
|
| 8 |
+
|
| 9 |
+
Predicting future motions of road participants is an important task for driving autonomously in urban scenes. Existing models excel at predicting marginal trajectories for single agents, yet it remains an open question to jointly predict scene compliant trajectories over multiple agents. The challenge is due to exponentially increasing prediction space as a function of the number of agents. In this work, we exploit the underlying relations between interacting agents and decouple the joint prediction problem into marginal prediction problems. Our proposed approach M2I first classifies interacting agents as pairs of influencers and reactors, and then leverages a marginal prediction model and a conditional prediction model to predict trajectories for the influencers and reactors, respectively. The predictions from interacting agents are combined and selected according to their joint likelihoods. Experiments show that our simple but effective approach achieves state-of-the-art performance on the Waymo Open Motion Dataset interactive prediction benchmark.
|
| 10 |
+
|
| 11 |
+
# 1. Introduction
|
| 12 |
+
|
| 13 |
+
Trajectory prediction is widely used by intelligent driving systems to infer future motions of nearby agents and identify risky scenarios to enable safe driving. Recent advances have shown great success in predicting accurate trajectories by learning from real-world driving examples. Many existing trajectory prediction works [5, 12, 15, 26, 28, 38] focus on generating marginal prediction samples of future trajectories over individual agents, failing to reason about their interactions in the future. As a result, the prediction samples over multiple agents may overlap with each other and result in sub-optimal performance.
|
| 14 |
+
|
| 15 |
+
We present a motivating example in Fig. 1, in which a marginal predictor produces a set of prediction samples separately for two interacting agents, as visualized in the top left figure. While the predictions for each agent are rea
|
| 16 |
+
|
| 17 |
+

|
| 18 |
+
Joint Prediction using Traditional Marginal Predictors
|
| 19 |
+
|
| 20 |
+

|
| 21 |
+
Joint Prediction using M2I
|
| 22 |
+
Figure 1. A motivating example of M2I. Top: Traditional marginal predictor often produces scene inconsistent trajectory predictions that collide with each other. Even for non-colliding predictions, it ignores the potential interaction between agent futures and may predict unrealistic behaviors. Bottom: Our proposed approach M2I predicts scene compliant trajectories by first identifying an influencer reactor pair in the scene. It then predicts marginal trajectories for the influencer and reactive trajectories for the reactor.
|
| 23 |
+
|
| 24 |
+
sonable without considering the presence of the other, some trajectory pairs will collide when considering them jointly. For instance, it is unlikely that the red agent turns left while the blue agent goes forward, as indicated in the top middle example in Fig. 1. Therefore, it is necessary to predict scene compliant trajectories with the existence of multiple agents to support better prediction accuracy.
|
| 25 |
+
|
| 26 |
+
To generate scene compliant trajectories, one can learn a joint predictor to predict trajectories in a joint space over multiple agents; however, it suffers from an exponentially increasing prediction space as the number of agents increases. As investigated by [15], while it is feasible to predict a set of goals for a single agent, the goal space increases exponentially with the number of agents and becomes unmanageable for even two agents with a few hundred goal candidates for each agent. A more computationally efficient alternative to producing scene compliant trajectories is to post-process marginal prediction samples by pruning colliding ones; however, such an ad-hoc approach fails to take into account potential agent interactions in the future and may ignore other conflicts which are hard to prune by heuristics. For instance, although the prediction sample in the top right figure in Fig. 1 is collision-free, the red agent may slow down when turning left to keep a safe distance from the blue agent. Such an interactive behavior is hard to be captured by a marginal predictor as it is unaware of the
|
| 27 |
+
|
| 28 |
+
future behavior of the other agents in the scene.
|
| 29 |
+
|
| 30 |
+
In this paper, we propose M2I that leverages marginal and conditional trajectory predictors to efficiently predict scene compliant multi-agent trajectories, by approximating the joint distribution as a product of a marginal distribution and a conditional distribution. The factorization assumes two types of agents: the influencer that behaves independently without considering the other agents, and the reactor that reacts to the behavior of the influencer. This assumption is inspired by the recent study on the underlying correlations between interactive agent trajectories [39]. Under the assumption, we leverage a standard marginal predictor to generate prediction samples for the influencer, and a conditional predictor to roll out future trajectories for the reactor conditioned on the future trajectory of the influencer. The advantage of our proposed approach M2I is illustrated in the bottom figures in Fig. 1, in which we first predict the relations of the interactive agents. Given the relations, we predict the future trajectories of the influencer and then predict reactive behaviors of the reactor conditioned on each influencer prediction. As causality in driving interaction remains an open problem [39], we pre-label the influencer-reactor relation based on a heuristic, and propose a relation predictor to classify interactive relations at inference time.
|
| 31 |
+
|
| 32 |
+
Our contributions are three-fold. First, we propose a simple but effective framework M2I that leverages marginal and conditional predictors to generate accurate and scene compliant multi-agent trajectories. The framework does not assume a specific predictor structure, allowing it to be adopted by a wide range of backbone prediction models. Second, we propose a relation predictor that infers high-level relations among interactive agents to decouple the prediction space. Third, we demonstrate our framework using a goal-conditioned prediction model. Experiments show that M2I achieves state-of-the-art performance on the Waymo Open Motion Dataset interactive prediction benchmark.
|
| 33 |
+
|
| 34 |
+
# 2. Related Work
|
| 35 |
+
|
| 36 |
+
Trajectory prediction for traffic agents has been studied extensively in recent years. Due to uncertainty in human intent, the future trajectories are probabilistic and multimodal. To handle the multi-modality problem, [5, 35] propose models that output behavior predictions as Gaussian mixture models (GMMs), in which each mixture component represents a single modality. Instead of parameterizing the prediction distribution, generative models, such as generative adversarial models (GANs) [16, 18, 47] and (conditional) variational autoencoders (VAEs) [26, 29, 35, 44], produce trajectory samples to approximate the distribution space. These generative models suffer from sample inefficiency and require many samples to cover diverse driving scenarios [18].
|
| 37 |
+
|
| 38 |
+
More recently, a family of models are proposed to im
|
| 39 |
+
|
| 40 |
+
prove prediction accuracy and coverage by first predicting high-level intentions, such as goal targets [11, 13, 15, 29, 34, 46], lanes to follow [21, 37], and maneuver actions [8, 9, 19, 24], before predicting low-level trajectories conditioning on the intention. Such models demonstrate great success in predicting accurate trajectories for single agents in popular trajectory prediction benchmarks, such as Argov-verse [6] and Waymo Open Motion Dataset [10]. While our proposed approach M2I can use an arbitrary prediction model, we choose to adopt an anchor-free goal-based predictor [15] because of its outstanding performance.
|
| 41 |
+
|
| 42 |
+
In the rest of the section, we introduce the literature closely related to our approach, on interactive trajectory prediction and conditional trajectory prediction.
|
| 43 |
+
|
| 44 |
+
# 2.1. Interactive Trajectory Prediction
|
| 45 |
+
|
| 46 |
+
Predicting scene compliant trajectories for multiple agents remains an open question due to its complexity. Early work leverages hand-crafted interaction models, such as social forces [17] and energy functions [43]. These handcrafted functions require manual tuning and have difficulties modeling highly complicated and nonlinear interactions. In contrast, learning-based methods achieve better accuracy by learning interactions from realistic driving data: [2, 16] utilize social pooling mechanisms to capture social influences from neighbor agents to predict interactive pedestrian trajectories in crowded scenes; [3, 4, 31, 35] build a graph neural network (GNN) to learn the agent-to-agent interactions; [22, 27, 32, 33, 38] leverage attention and transformer mechanisms to learn multi-agent interaction behaviors. In this work, we build a sparse graph with directed edges representing dependencies between agent nodes, but our approach differs from existing graph-based models in a few ways. First, it adopts explicit influencer-reactor relations and offers better interpretability in agent interactions. Second, M2I predicts scene compliant trajectories through marginal and conditional predictors to afford better computational efficiency. Third, it utilizes the future trajectory of influencer agents to predict conditional behaviors for the reactors for better accuracy. This also allows M2I to be used for counterfactual reasoning in simulation applications by varying influencer trajectories.
|
| 47 |
+
|
| 48 |
+
Existing marginal prediction work produces scene compliant trajectories by leveraging an auxiliary collision loss [27] or a critic based on an inverse reinforcement learning framework [40] that discourages colliding trajectories. In this work, we focus on identifying agent relations explicitly as influencers and reactors to generate scene compliant predictions. Our work is relevant to [23, 25] that predicts interacting types before predicting scene compliant trajectories, but we further exploit the structure of the decoupled relations and the influence of low-level influencer trajectories, as opposed to only providing the high-level interaction labels as the input to the trajectory predictor.
|
| 49 |
+
|
| 50 |
+

|
| 51 |
+
Figure 2. Overview of M2I. The relation predictor predicts influencer-reactor relations for interacting agents. The marginal predictor generates marginal predictions for the influencer. The conditional predictor generates predictions for the reactor, conditioned on each influencer trajectory. The sample selector chooses a subset of representative joint samples as output.
|
| 52 |
+
|
| 53 |
+
# 2.2. Conditional Trajectory Prediction
|
| 54 |
+
|
| 55 |
+
Conditional prediction approaches study the correlations between future agent trajectories, by predicting trajectories conditioned on the future trajectory of another agent [20,35,39]. These approaches often rely on the future trajectory of the autonomous vehicle or a robot whose future plan is known to the predictor. Our work goes beyond by conditioning on the future trajectory of another agent to be predicted. Despite the prediction errors of the conditioned agent, we show that our model outperforms marginal predictors that do not account for the interactive correlations.
|
| 56 |
+
|
| 57 |
+
# 3. Approach
|
| 58 |
+
|
| 59 |
+
In this section, we introduce a formal problem formulation and an overview of M2I, followed by detailed explanations of each model used in the approach.
|
| 60 |
+
|
| 61 |
+
# 3.1. Problem Formulation
|
| 62 |
+
|
| 63 |
+
Given observed states $X = (M, S)$ , including the map states $M$ and the observed states $S$ of all agents in a scene, the goal is to predict the future states of the interacting agents $Y$ up to a finite horizon $T$ . We assume the interacting agents are pre-labeled in a given scene, which is available in common interactive prediction datasets such as [10, 45]. As the distribution over $Y$ is a joint distribution over multiple agents, we approximate it as the factorization over a marginal distribution and a conditional distribution:
|
| 64 |
+
|
| 65 |
+
$$
|
| 66 |
+
P (Y | X) = P \left(Y _ {I}, Y _ {R} | X\right) \approx P \left(Y _ {I} | X\right) P \left(Y _ {R} | X, Y _ {I}\right). \tag {1}
|
| 67 |
+
$$
|
| 68 |
+
|
| 69 |
+
The factorization in Eq. (1) first assigns the interacting agents as the influencer $Y_{I}$ and the reactor $Y_{R}$ , and decouples the joint distribution as the marginal distribution over the influencer and the conditional distribution over the reactor. This factorization allows us to reduce the complexity of learning a joint distribution to learning more tractable distributions. In the case where two agents are not interacting, the factorization can be simplified as two marginal distributions:
|
| 70 |
+
|
| 71 |
+
$$
|
| 72 |
+
P (Y | X) \approx P \left(Y _ {I} | X\right) P \left(Y _ {R} | X\right), \tag {2}
|
| 73 |
+
$$
|
| 74 |
+
|
| 75 |
+
where there is no conditional dependence between the agents. Such independence is presumed by many marginal prediction models that predict the marginal distribution without considering other agents in the scene.
|
| 76 |
+
|
| 77 |
+
We focus on two interactive agents in this paper and aim to tackle the pairwise interactive trajectory prediction problem proposed by [10]. For scenarios involving more than two interactive agents, our approach can be modified by predicting the relations over all the agents and chaining multiple marginal and conditional distributions together, assuming no loopy influence:
|
| 78 |
+
|
| 79 |
+
$$
|
| 80 |
+
P _ {N > 2} (Y | X) \approx \prod_ {i = 1} ^ {N} P \left(Y _ {i} | X, \mathbf {Y} _ {i} ^ {\inf }\right), \tag {3}
|
| 81 |
+
$$
|
| 82 |
+
|
| 83 |
+
where $N$ is the number of total interactive agents, and $\mathbf{Y}_i^{\mathrm{inf}}$ is the set of influencer agents for agent $i$ predicted by the relation predictor. We refer to examples of multi-agent relation predictions in Appendix C.
|
| 84 |
+
|
| 85 |
+
# 3.2. Model Overview
|
| 86 |
+
|
| 87 |
+
Our proposed approach M2I is summarized in Fig. 2. It includes a relation predictor to predict the influencer and the reactor in a scene, a marginal predictor to predict future trajectories of the influencer, a conditional predictor to predict future trajectories of the reactor conditioned on the future trajectory of the influencer, and a sample selector to select a set of representative joint prediction samples. Although M2I includes three different learned models, they share the same encoder-decoder structure and adopt the same context encoder to learn context information, as illustrated in Fig. 3. The conditional predictor takes an augmented scene context input that includes the influencer future trajectory to learn reactive behaviors for the reactor. In the following, we introduce each model with more details.
|
| 88 |
+
|
| 89 |
+
# 3.3. Relation Predictor
|
| 90 |
+
|
| 91 |
+
We propose a relation predictor to classify whether an interacting agent is an influencer or a reactor, based on the pass yield relation between two agents. Similar to [23], we assume three types of relations: PASS, YIELD, and NONE,
|
| 92 |
+
|
| 93 |
+

|
| 94 |
+
Figure 3. M2I includes three models that share the same context encoder. The relation predictor includes a relation prediction head to predict distribution over relation types. The marginal predictor adopts a trajectory prediction head to produce multi-modal prediction samples. The conditional trajectory predictor takes an augmented scene context input as the influencer future trajectory.
|
| 95 |
+
|
| 96 |
+
and determine the relation using the following heuristics. Given two agent future trajectories $y_{1}$ and $y_{2}$ with $T$ steps, we first compute the closest spatial distance between two agents to determine whether a pass yield relation exists:
|
| 97 |
+
|
| 98 |
+
$$
|
| 99 |
+
d _ {I} = \min _ {\tau_ {1} = 1} ^ {T} \min _ {\tau_ {2} = 1} ^ {T} | | y _ {1} ^ {\tau_ {1}} - y _ {2} ^ {\tau_ {2}} | | _ {2}. \tag {4}
|
| 100 |
+
$$
|
| 101 |
+
|
| 102 |
+
If $d_I > \epsilon_d$ , which is a dynamic threshold depending on the agent size, the agents never get too close to each other and thus we label the relation type as none. Otherwise, we obtain the time step from each agent at which they reach the closest spatial distance, such that:
|
| 103 |
+
|
| 104 |
+
$$
|
| 105 |
+
t _ {1} = \arg \min _ {\tau_ {1} = 1} ^ {T} \min _ {\tau_ {2} = 1} ^ {T} \| y _ {1} ^ {\tau_ {1}} - y _ {2} ^ {\tau_ {2}} \| _ {2}, \tag {5}
|
| 106 |
+
$$
|
| 107 |
+
|
| 108 |
+
$$
|
| 109 |
+
t _ {2} = \arg \min _ {\tau_ {2} = 1} ^ {T} \min _ {\tau_ {1} = 1} ^ {T} \| y _ {1} ^ {\tau_ {1}} - y _ {2} ^ {\tau_ {2}} \| _ {2}. \tag {6}
|
| 110 |
+
$$
|
| 111 |
+
|
| 112 |
+
When $t_1 > t_2$ , we define that agent 1 yields to agent 2, as it takes longer for agent 1 to reach the interaction point. Otherwise, we define that agent 1 passes agent 2.
|
| 113 |
+
|
| 114 |
+
After labeling the training data with three interaction types, we propose an encoder-decoder-based model to classify an input scenario into a distribution over these types. As shown in Fig. 3, the relation predictor model consists of a context encoder that extracts the context information, including the observed states of the interacting agents and nearby agents and map coordinates, into a hidden vector, as well as a relation prediction head that outputs the probability over each relation type. There is a rich set of literature on learning context information from a traffic scene, such as [7, 12, 14, 28]. Our model could utilize any existing context encoder thanks to its modular design, and we defer a detailed explanation of our choice in Sec. 4. The relation prediction head consists of one layer of multi-layer perceptron (MLP) to output the probability logits over each relation.
|
| 115 |
+
|
| 116 |
+
The loss to train the relation predictor is defined as:
|
| 117 |
+
|
| 118 |
+
$$
|
| 119 |
+
\mathcal {L} _ {\text {r e l a t i o n}} = \mathcal {L} _ {c e} (R, \hat {R}), \tag {7}
|
| 120 |
+
$$
|
| 121 |
+
|
| 122 |
+
where $\mathcal{L}_{ce}$ is the cross entropy loss, $R$ is the predicted relation distribution, and $\hat{R}$ is the ground truth relation.
|
| 123 |
+
|
| 124 |
+
Given the predicted relation, we can assign each agent as an influencer or a reactor. If the relation is none, both agents are influencer, such that their future behaviors are independent of each other, as in Eq. (2). If the relation is agent 1 yielding to agent 2, we assign agent 1 as the reactor and agent 2 as the influencer. If the relation is agent 1 passing agent 2, we flip the influencer and reactor labels.
|
| 125 |
+
|
| 126 |
+
# 3.4. Marginal Trajectory Predictor
|
| 127 |
+
|
| 128 |
+
We propose a marginal trajectory predictor for the influencer based on an encoder-decoder structure, as shown in Fig. 3, which is widely adopted in the trajectory prediction literature [10, 14, 46]. The predictor utilizes the same context encoder as in Sec. 3.3, and generates a set of prediction samples associated with confidence scores using a trajectory prediction head. Although our approach can take an arbitrary prediction head, we focus on an anchor-free goal-based prediction head because of its outstanding performance in trajectory prediction benchmarks, and defer a detailed explanation in Sec. 4.
|
| 129 |
+
|
| 130 |
+
# 3.5. Conditional Trajectory Predictor
|
| 131 |
+
|
| 132 |
+
The conditional trajectory predictor is similar to the marginal predictor, except that it takes an augmented scene context that includes the future trajectory of the influencer, as shown in Fig. 3. This allows the features of the influencer future trajectory to be extracted and learned in the same way as other context features. The encoded scene feature is used by the trajectory prediction head, which shares the same model as in the marginal predictor, to produce multimodal prediction samples.
|
| 133 |
+
|
| 134 |
+
# 3.6. Sample Selector
|
| 135 |
+
|
| 136 |
+
Given the predicted relations of the influencer and the reactor, we predict $N$ samples with confidence scores (or
|
| 137 |
+
|
| 138 |
+
probabilities) for the influencer using the marginal predictor, and for each influencer sample, we predict $N$ samples for the reactor using the conditional predictor. The number of joint samples is thus $N^2$ , and the probability of each joint sample is a product of the marginal probability and the conditional probability. We further reduce the size of the joint samples to $K$ as evaluating each prediction sample for downstream tasks such as risk assessment can be expensive [41]. In M2I, we select the $K$ samples from $N^2$ candidates with the highest joint likelihoods.
|
| 139 |
+
|
| 140 |
+
# 3.7. Inference
|
| 141 |
+
|
| 142 |
+
At inference time, we generate the joint predictions following the procedure illustrated in Fig. 2. First, we call the relation predictor and choose the interaction relation with the highest probability. Second, for the predicted influencer, we generate $N$ trajectory samples using the marginal predictor. Third, for each influencer sample, we generate $N$ samples for the predicted reactor using the conditional predictor. Fourth, we use the sample selector to select $K$ representative samples from $N^2$ candidates. In the case where the predicted relation is none, we use the marginal predictor for both agents to obtain $N^2$ trajectory pairs, and follow the same sample selection step.
|
| 143 |
+
|
| 144 |
+
# 4. Experiments
|
| 145 |
+
|
| 146 |
+
In this section, we introduce the dataset benchmark and details of the model, followed by a series of experiments to demonstrate the effectiveness of M2I.
|
| 147 |
+
|
| 148 |
+
# 4.1. Dataset
|
| 149 |
+
|
| 150 |
+
We train and validate M2I in the Waymo Open Motion Dataset (WOMD), a large-scale driving dataset collected from realistic traffic scenarios. We focus on the interactive prediction task to predict the joint future trajectories of two interacting agents for the next 8 seconds with 80 time steps, given the observations, including 1.1 seconds of agent states with 11 time steps that may include missing observations and the map state. The dataset includes 204,166 scenarios in the training set and 43,479 examples in the validation set. The dataset provides labels on which agents are likely to interact, yet it does not specify how they interact. During training, we pre-label the interaction type (yield, pass, or none) of the interacting agents according to Sec. 3.3.
|
| 151 |
+
|
| 152 |
+
# 4.2. Metrics
|
| 153 |
+
|
| 154 |
+
We follow the WOMD benchmark by using the following metrics: minADE measures the average displacement error between the ground truth future joint trajectory and the closest predicted sample out of $K = 6$ joint samples. This metric is widely adopted since [16] to measure the prediction error against a multi-modal distribution. minFDE
|
| 155 |
+
|
| 156 |
+
measures the final displacement error between the ground truth end positions in the joint trajectory and the closest predicted end positions from $K$ joint samples. Miss rate (MR) measures the percentage of none of the $K$ joint prediction samples are within a given lateral and longitudinal threshold of the ground truth trajectory. The threshold depends on the initial velocity of the predicted agents. More details are described in [10]. Overlap rate (OR) measures the level of scene compliance as the percentage of the predicted trajectory of any agent overlapping with the predicted trajectories of other agents. This metric only considers the most likely joint prediction sample. A lower overlap rate indicates the predictions are more scene compliant. In this paper, we slightly modify the metric definition compared to the original version of WOMD, which considers the overlapping among all objects including the ones not predicted, so that we can measure directly the overlapping between predicted agents. Mean average precision (mAP) measures the area under the precision-recall curve of the prediction samples given their confidence scores. Compared to minADE/minFDE metrics that are only measured against the best sample regardless of its score, mAP measures the quality of confidence score and penalizes false positive predictions [10]. It is the official ranking metric used by WOMD benchmark and we refer to [10] for the implementation.
|
| 157 |
+
|
| 158 |
+
# 4.3. Model Details
|
| 159 |
+
|
| 160 |
+
We present the detailed implementation of our model and training procedure in the following sections.
|
| 161 |
+
|
| 162 |
+
# 4.3.1 Context Encoder
|
| 163 |
+
|
| 164 |
+
The context encoder leverages both vectorized and rasterized representations to encode traffic context. Vectorized representation takes the traffic context, including observed agent states and map states, as vectors. It is efficient at covering a large spatial space. Rasterized representation draws traffic context on a single image with multiple channels and excels at capturing geometrical information. Both representations have achieved top performance in trajectory prediction benchmarks such as Argoverse and WOMD [6, 10, 12, 14, 15].
|
| 165 |
+
|
| 166 |
+
In M2I, we use the best of both worlds. First, we leverage a vector encoder based on VectorNet [12] that takes observed agent trajectories and lane segments as a set of polylines. Each polyline is a set of vectors that connect neighboring points together. For each polyline, the vector encoder runs an MLP to encode the feature of vectors within the polyline and a graph neural network to encode their dependencies followed by a max-pooling layer to summarize the feature of all the vectors. The polyline features, including agent polyline features and map polyline features, are processed by cross attention to obtain the final agent feature that includes information on the map and nearby agents. We
|
| 167 |
+
|
| 168 |
+
refer to [12] for detailed implementations.
|
| 169 |
+
|
| 170 |
+
In addition to encoding the vectorized feature, we utilize a second encoder to learn features from a rasterized representation. Following [14], we first rasterize the input states into an image with 60 channels, including the position of the agents at each past time frame with the map information. The size of the image is $224 \times 224$ and each pixel represents an area of $1\mathrm{m} \times 1\mathrm{m}$ . We run a pre-trained VGG16 [36] model as the encoder to obtain the rasterized feature. The output of the context encoder is a concatenation of the vectorized feature and the rasterized feature.
|
| 171 |
+
|
| 172 |
+
Conditional Context Encoder The context encoder in the conditional trajectory predictor processes the additional influencer future trajectory in the following ways. First, the future trajectory is added to the vectorized representation as an extra vector when running VectorNet. In parallel, we create extra 80 channels on the rasterized representation and draw the $(x,y)$ positions over 80 time steps in the next 8 seconds. We run the pre-trained VGG16 model to encode the augmented image, and combine the output feature with the vectorized feature as the final output.
|
| 173 |
+
|
| 174 |
+
# 4.3.2 Relation Prediction Head
|
| 175 |
+
|
| 176 |
+
The relation prediction head has one layer of MLP with one fully connected layer for classification. The MLP has a hidden size of 128, followed by a layer normalization layer and a ReLU activation layer. The output is the logits over three types of relations, as described in Sec. 3.3.
|
| 177 |
+
|
| 178 |
+
# 4.3.3 Trajectory Prediction Head
|
| 179 |
+
|
| 180 |
+
The trajectory prediction head adopts DenseTNT [15] to generate multi-modal future predictions for its outstanding performance in the marginal prediction benchmarks. It first predicts the distribution of the agent goals as a heatmap, through a lane scoring module that identifies likely lanes to follow, a feature encoding module that uses the attention mechanism to extract features between goals and lanes, and a probability estimation module that predicts the likelihood of goals. Next, the prediction head regresses the full trajectory over the prediction horizon conditioned on the goal. The prediction head can be combined with the context encoder and trained end-to-end.
|
| 181 |
+
|
| 182 |
+
# 4.3.4 Training Details
|
| 183 |
+
|
| 184 |
+
At training time, we train each model, including the relation predictor, marginal predictor, and conditional predictor, separately. Each model is trained on the training set from WOMD with a batch size of 64 for 30 epochs on 8 Nvidia RTX 3080 GPUs. The data is batched randomly. We use an Adam optimizer and a learning rate scheduler that decays the learning rate by $30\%$ every 5 epochs, with an initial value of 1e-3. The hidden size in the model is 128, if not specified. We observe consistent performance
|
| 185 |
+
|
| 186 |
+
over different learning rates and batch sizes. When training the conditional predictor, we use the teacher forcing technique by providing the ground truth future trajectory of the influencer agent.
|
| 187 |
+
|
| 188 |
+
# 4.4. Quantitative Results
|
| 189 |
+
|
| 190 |
+
In Tab. 1, we compare our model with the following baselines, including the top ranked published models on the WOMD interaction prediction challenge leaderboard [1]: Waymo LSTM Baseline [10] is the official baseline provided by the benchmark. It leverages an LSTM encoder to encode observed agent trajectories, and an MLP-based prediction head to generate multiple samples. Waymo Full Baseline [10] is an extended version of the Waymo LSTM Baseline, by leveraging a set of auxiliary encoders to encode context information. SceneTransformer [32] is a transformer-based model that leverages attention to combine features across road graphs and agent interactions both spatially and temporally. The model achieves state-of-the-art performance in the WOMD benchmark in both the marginal prediction task and the interactive prediction task. HeatIRm4 [30] models the agent interaction as a directed edge feature graph and leverages an attention network to extract interaction features. It was the winner of the 2021 WOMD challenge. $\mathbf{AIR}^2$ [42] adopts a marginal anchorbased model using a raster representation. The model generates joint predictions by combining marginal predictions from each agent. It achieved the top performance at the WOMD challenge. Baseline Marginal is our baseline model that leverages the same marginal predictor as M2I to generate $N$ marginal prediction samples for both agents, without considering their future interactions. When combining the marginal predictions into joint predictions, we take the top $K$ marginal pairs out of $N^2$ options given their joint probabilities as the product of marginal probabilities. This is a common practice to combine marginal predictions into joint predictions, as in [4, 10]. Baseline Joint is our baseline model that jointly predicts the goals and trajectories for both interacting agents, using the same context encoder and the trajectory prediction head as in M2I. As the joint goal space grows exponentially with the number of agents, we can only afford a small number of goal candidates for each agent. To ease the computational complexity, we leverage a marginal predictor to predict the top 80 goals for each agent and obtain $80\times 80$ goal pairs for joint goal and trajectory prediction. As a result, this baseline tradeoffs prediction accuracy with computational feasibility by using a reduced set of goals.
|
| 191 |
+
|
| 192 |
+
# 4.4.1 Validation Set
|
| 193 |
+
|
| 194 |
+
We present the results in the interactive validation set in the top half of Tab. 1, where the baseline results are reported as in [10,32]. Our model M2I outperforms both Waymo baselines in terms of all metrics. Compared to the current state-
|
| 195 |
+
|
| 196 |
+
<table><tr><td rowspan="2">Set</td><td rowspan="2">Model</td><td colspan="3">Vehicle (8s)</td><td colspan="3">Pedestrian (8s)</td><td colspan="3">Cyclist (8s)</td><td>All (8s)</td></tr><tr><td>mFDE ↓</td><td>MR ↓</td><td>mAP ↑</td><td>mFDE ↓</td><td>MR ↓</td><td>mAP ↑</td><td>mFDE ↓</td><td>MR ↓</td><td>mAP ↑</td><td>mAP ↑</td></tr><tr><td rowspan="6">Val.</td><td>Waymo LSTM Baseline [10]</td><td>-</td><td>0.88</td><td>0.01</td><td>-</td><td>0.93</td><td>0.02</td><td>-</td><td>0.98</td><td>0.00</td><td>0.01</td></tr><tr><td>Waymo Full Baseline [10]</td><td>6.07</td><td>0.66</td><td>0.08</td><td>4.20</td><td>1.00</td><td>0.00</td><td>6.46</td><td>0.83</td><td>0.01</td><td>0.03</td></tr><tr><td>SceneTransformer [32]</td><td>3.99</td><td>0.49</td><td>0.11</td><td>3.15</td><td>0.62</td><td>0.06</td><td>4.69</td><td>0.71</td><td>0.04</td><td>0.07</td></tr><tr><td>Baseline Marginal</td><td>6.26</td><td>0.60</td><td>0.16</td><td>3.59</td><td>0.63</td><td>0.04</td><td>6.47</td><td>0.76</td><td>0.03</td><td>0.07</td></tr><tr><td>Baseline Joint</td><td>11.31</td><td>0.64</td><td>0.14</td><td>3.44</td><td>0.93</td><td>0.01</td><td>7.16</td><td>0.82</td><td>0.01</td><td>0.05</td></tr><tr><td>M2I</td><td>5.49</td><td>0.55</td><td>0.18</td><td>3.61</td><td>0.60</td><td>0.06</td><td>6.26</td><td>0.73</td><td>0.04</td><td>0.09</td></tr><tr><td rowspan="5">Test</td><td>Waymo LSTM Baseline [10]</td><td>12.40</td><td>0.87</td><td>0.01</td><td>6.85</td><td>0.92</td><td>0.00</td><td>10.84</td><td>0.97</td><td>0.00</td><td>0.00</td></tr><tr><td>HeatIRm4 [30]</td><td>7.20</td><td>0.80</td><td>0.07</td><td>4.06</td><td>0.80</td><td>0.05</td><td>6.69</td><td>0.85</td><td>0.01</td><td>0.04</td></tr><tr><td>AIR² [42]</td><td>5.00</td><td>0.64</td><td>0.10</td><td>3.68</td><td>0.71</td><td>0.04</td><td>5.47</td><td>0.81</td><td>0.04</td><td>0.05</td></tr><tr><td>SceneTransformer [32]</td><td>4.08</td><td>0.50</td><td>0.10</td><td>3.19</td><td>0.62</td><td>0.05</td><td>4.65</td><td>0.70</td><td>0.04</td><td>0.06</td></tr><tr><td>M2I</td><td>5.65</td><td>0.57</td><td>0.16</td><td>3.73</td><td>0.60</td><td>0.06</td><td>6.16</td><td>0.74</td><td>0.03</td><td>0.08</td></tr></table>
|
| 197 |
+
|
| 198 |
+
of-the-art model SceneTransformer, M2I achieves a better $mAP$ , the official ranking metric, over vehicles, and a better miss rate over pedestrians. Although M2I has higher minFDE errors, it has improved the mAP over all agents (the most right column) by a large margin, meaning our model generates a more accurate distribution using its predicted confidence scores and outputs fewer false positive predictions. In addition, as our proposed approach does not assume a specific prediction model, it could leverage SceneTransformer as the context encoder to achieve better minFDE, and we defer it as future work. When compared with our own baselines that share the same context encoder and prediction head, M2I outperforms the marginal predictor, which assumes independence between two agents, and a joint predictor, which only affords a small set of goal candidates due to computational constraints.
|
| 199 |
+
|
| 200 |
+
# 4.4.2 Testing Set
|
| 201 |
+
|
| 202 |
+
We show the results in the interactive test set in the bottom half of Tab. 1. For a fair comparison, we use the numbers reported on the official benchmark website [1] and only include the published models. Similar to the observations from the validation set, we observe that M2I improves mAP metrics by a large margin, compared to past WOMD interaction prediction challenge winners [30,42] and the existing state-of-the-art model [32].
|
| 203 |
+
|
| 204 |
+
# 4.5. Ablation Study
|
| 205 |
+
|
| 206 |
+
We present ablation studies on the relation predictor, conditional predictor, and generalization to other predictors.
|
| 207 |
+
|
| 208 |
+
# 4.5.1 Relation Prediction
|
| 209 |
+
|
| 210 |
+
We measure the performance of our relation predictor on the validation dataset and observe an accuracy of $90.09\%$ . We verify the significance of an accurate relation predictor by comparing the performance of vehicle trajectory predictions
|
| 211 |
+
|
| 212 |
+
Table 1. Joint metrics on the interactive validation and test set. The best performed metrics are bolded and the grey cells indicate the ranking metric used by the WOMD benchmark. M2I outperforms both Waymo baselines and challenge winners. Compared to the current state-of-the-art model SceneTransformer, it improves the mAP metric by a large margin over vehicles and all agents, demonstrating its advantage in learning a more accurate probability distribution and producing fewer false positive predictions.
|
| 213 |
+
|
| 214 |
+
<table><tr><td>Model</td><td>minADE ↓</td><td>minFDE ↓</td><td>MR ↓</td><td>mAP ↑</td></tr><tr><td>M2I Marginal</td><td>1.70</td><td>3.45</td><td>0.23</td><td>0.30</td></tr><tr><td>M2I Conditional GT</td><td>1.46</td><td>2.43</td><td>0.12</td><td>0.41</td></tr><tr><td>M2I Conditional P1</td><td>1.75</td><td>3.49</td><td>0.25</td><td>0.26</td></tr></table>
|
| 215 |
+
|
| 216 |
+
Table 2. Comparison between the marginal predictor and the conditional predictor over marginal metrics for vehicle reactors at 8s.
|
| 217 |
+
|
| 218 |
+
using the predicted relations and using the ground truth relations, and observe a gap of $3.05\%$ in terms of mAP at 8s.
|
| 219 |
+
|
| 220 |
+
# 4.5.2 Conditional Prediction
|
| 221 |
+
|
| 222 |
+
We validate the effectiveness of our conditional predictor by comparing its performance against the marginal predictor (M2IMarginal) for vehicle reactor trajectory prediction. The results are summarized in Tab. 2. When the conditional predictor takes the ground truth future trajectory of the influencer agent (c.f. M2I Conditional GT), it generates predictions for the reactor agent with better performance across all metrics. This validates our hypothesis on the dependence between the influencer trajectory and the reactor trajectory. As the ground truth trajectories are not available at inference time, we present the prediction results when the conditional predictor takes the best predicted influencer trajectory as M2I Conditional P1. It is not surprising to see that the performance is inferior to the marginal predictor results, due to errors in influencer prediction. However, as we show in Tab. 1, our model is able to outperform the marginal baseline model by including more than one sample from the influencer and selecting the most likely joint samples.
|
| 223 |
+
|
| 224 |
+
# 4.5.3 Generalizing to Other Predictors
|
| 225 |
+
|
| 226 |
+
We demonstrate that our proposed approach can be extended to other existing predictor models to validate its generalizability. In this experiment, we replace the context encoder with VectorNet [12] and the prediction head with
|
| 227 |
+
|
| 228 |
+

|
| 229 |
+
Figure 4. Example prediction using Baseline Marginal (left) and M2I (right). The marginal predictor produces overlapping and inaccurate predictions. M2I successfully identifies the influencer and reactor (the predicted relation type is annotated next to the current position of each agent) in a challenging interactive scene and achieves better prediction accuracy and scene compliance.
|
| 230 |
+
|
| 231 |
+

|
| 232 |
+
|
| 233 |
+
<table><tr><td>Model</td><td>minADE ↓</td><td>minFDE ↓</td><td>OR ↓</td><td>mAP ↑</td></tr><tr><td>TNT Marginal</td><td>3.43</td><td>8.72</td><td>0.42</td><td>0.10</td></tr><tr><td>TNT Joint</td><td>5.30</td><td>14.07</td><td>0.34</td><td>0.13</td></tr><tr><td>TNT M2I</td><td>3.38</td><td>8.46</td><td>0.20</td><td>0.14</td></tr></table>
|
| 234 |
+
|
| 235 |
+
Table 3. Joint metrics on the interactive validation set for vehicles at 8s. We replace the context encoder and the prediction head in M2I and baselines with a different model. We observe a similar trend in performance improvement, especially over OR and mAP, which validates the generalizability of our proposed approach.
|
| 236 |
+
|
| 237 |
+
TNT [46], which is an anchor-based goal-conditioned prediction model, and obtain a variant of M2I named TNT M2I. We compare this variant with a marginal predictor baseline (TNT Marginal) and a joint predictor baseline (TNT Joint) using the same VectorNet and TNT backbones.
|
| 238 |
+
|
| 239 |
+
The results, summarized in Tab. 3, show that our approach consistently improves all metrics, especially OR and mAP, by a large margin when using a different predictor model. The improvements indicate that our proposed approach generalizes to other predictors and generates scene compliant and accurate future trajectories.
|
| 240 |
+
|
| 241 |
+
# 4.6. Qualitative Results
|
| 242 |
+
|
| 243 |
+
We present a challenging interactive scenario<sup>1</sup> in Fig. 4, and visualize the most likely prediction sample from a marginal baseline and M2I. In this scenario, the red agent is yielding to the blue agent who is making a U-turn. The marginal predictor on the left fails to capture the interaction and predicts overlapping trajectories. On the other hand, M2I successfully identifies the underlying interaction relation, and predicts an accurate trajectory for the influencer and an accurate reactor trajectory that reacts to the predicted influencer trajectory. As a result, M2I achieves better prediction accuracy and scene compliance.
|
| 244 |
+
|
| 245 |
+
# 5. Conclusion
|
| 246 |
+
|
| 247 |
+
In conclusion, we propose a simple but effective joint prediction framework M2I through marginal and conditional predictors, by exploiting the factorized relations between interacting agents. M2I uses a modular encoder-decoder architecture, allowing it to choose from a variety of context encoders and prediction heads. Experiments on the interactive Waymo Open Motion Dataset benchmark show that our framework achieves state-of-the-art performance. In the ablation study, we show the generalization of our framework using a different predictor model.
|
| 248 |
+
|
| 249 |
+
Limitations We identify the following limitations. First, there exists a gap when comparing our model to the state-of-the-art in terms of the minFDE metric, indicating that our approach still has room for improvement. Thanks to its modular design, we plan to extend M2I to use SceneTransformer [32] as the context encoder and fill the gap. Second, the performance of M2I heavily depends on the size of interactive training data, especially when training the relation predictor and the conditional trajectory predictor. Looking at Tab. 1, we see that our approach improves the mAP metrics by a large margin on vehicles because of sufficient vehicle interactions in the training data, but the improvement is more negligible over the other two types due to lack of interactive scenarios involving pedestrians and cyclists. Finally, M2I assumes no mutual influence between interacting agents, allowing it to decouple joint agent distributions into marginal and conditional distributions. While we have observed an obvious influencer according to our heuristics in almost all the interactive scenarios in the Waymo Open Motion Dataset, we defer predicting for more complicated scenarios involving mutual influence (and loopy influence for more than two agents) as future work.
|
| 250 |
+
|
| 251 |
+
# References
|
| 252 |
+
|
| 253 |
+
[1] Waymo open motion dataset interaction prediction. https://waymo.com/open/challenges/2021/interaction-prediction/, 2021. [Online; Accessed November 16th 2021]. 6, 7
|
| 254 |
+
[2] Alexandre Alahi, Kratarth Goel, Vignesh Ramanathan, Alexandre Robicquet, Li Fei-Fei, and Silvio Savarese. Social LSTM: Human trajectory prediction in crowded spaces. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 961–971, 2016. 2
|
| 255 |
+
[3] Sergio Casas, Cole Gulino, Renjie Liao, and Raquel Urtasun. SpAGNN: Spatially-aware graph neural networks for relational behavior forecasting from sensor data. In 2020 IEEE International Conference on Robotics and Automation (ICRA), pages 9491-9497. IEEE, 2020. 2
|
| 256 |
+
[4] Sergio Casas, Cole Gulino, Simon Suo, Katie Luo, Renjie Liao, and Raquel Urtasun. Implicit latent variable model for scene-consistent motion forecasting. In Proceedings of the European Conference on Computer Vision (ECCV). Springer, 2020. 2, 6
|
| 257 |
+
[5] Yuning Chai, Benjamin Sapp, Mayank Bansal, and Dragomir Anguelov. MultiPath: Multiple probabilistic anchor trajectory hypotheses for behavior prediction. In Conference on Robot Learning (CoRL), 2019. 1, 2
|
| 258 |
+
[6] Ming-Fang Chang, John Lambert, Patsorn Sangkloy, Jagjeet Singh, Slawomir Bak, Andrew Hartnett, De Wang, Peter Carr, Simon Lucey, Deva Ramanan, et al. Argoverse: 3d tracking and forecasting with rich maps. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 8748-8757, 2019. 2, 5
|
| 259 |
+
[7] Henggang Cui, Vladan Radosavljevic, Fang-Chieh Chou, Tsung-Han Lin, Thi Nguyen, Tzu-Kuo Huang, Jeff Schneider, and Nemanja Djuric. Multimodal trajectory predictions for autonomous driving using deep convolutional networks. In 2019 International Conference on Robotics and Automation (ICRA), pages 2090-2096. IEEE, 2019. 4
|
| 260 |
+
[8] Shengzhe Dai, Zhiheng Li, Li Li, Nanning Zheng, and Shuofeng Wang. A flexible and explainable vehicle motion prediction and inference framework combining semi-supervised aog and st-lstm. IEEE Transactions on Intelligent Transportation Systems, 2020. 2
|
| 261 |
+
[9] Nachiket Deo and Mohan M Trivedi. Multi-modal trajectory prediction of surrounding vehicles with maneuver based lstms. In 2018 IEEE Intelligent Vehicles Symposium (IV), pages 1179-1184. IEEE, 2018. 2
|
| 262 |
+
[10] Scott Ettinger, Shuyang Cheng, Benjamin Caine, Chenxi Liu, Hang Zhao, Sabeek Pradhan, Yuning Chai, Ben Sapp, Charles R Qi, Yin Zhou, et al. Large scale interactive motion forecasting for autonomous driving: The waymo open motion dataset. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 9710-9719, 2021. 2, 3, 4, 5, 6, 7
|
| 263 |
+
[11] Liangji Fang, Qinhong Jiang, Jianping Shi, and Bolei Zhou. TPNet: Trajectory proposal network for motion prediction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 6797-6806, 2020. 2
|
| 264 |
+
|
| 265 |
+
[12] Jiyang Gao, Chen Sun, Hang Zhao, Yi Shen, Dragomir Anguelov, Congcong Li, and Cordelia Schmid. VectorNet: Encoding hd maps and agent dynamics from vectorized representation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 11525-11533, 2020. 1, 4, 5, 6, 7
|
| 266 |
+
[13] Thomas Gilles, Stefano Sabatini, Dzmitry Tsishkou, Bogdan Stanciulescu, and Fabien Moutarde. GOHOME: Graph-oriented heatmap output for future motion estimation. arXiv preprint arXiv:2109.01827, 2021. 2
|
| 267 |
+
[14] Thomas Gilles, Stefano Sabatini, Dzmitry Tsishkou, Bogdan Stanciulescu, and Fabien Moutarde. HOME: Heatmap output for future motion estimation. In IEEE International Intelligent Transportation Systems Conference (ITSC), pages 500-507. IEEE, 2021. 4, 5, 6
|
| 268 |
+
[15] Junru Gu, Chen Sun, and Hang Zhao. DenseTNT: End-to-end trajectory prediction from dense goal sets. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 15303-15312, 2021. 1, 2, 5, 6, 11
|
| 269 |
+
[16] Agrim Gupta, Justin Johnson, Li Fei-Fei, Silvio Savarese, and Alexandre Alahi. Social GAN: Socially acceptable trajectories with generative adversarial networks. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 2255–2264, 2018. 2, 5
|
| 270 |
+
[17] Dirk Helbing and Peter Molnar. Social force model for pedestrian dynamics. Physical review E, 51(5):4282, 1995. 2
|
| 271 |
+
[18] Xin Huang, Stephen G McGill, Jonathan A DeCastro, Luke Fletcher, John J Leonard, Brian C Williams, and Guy Rosman. DiversityGAN: Diversity-aware vehicle motion prediction via latent semantic sampling. IEEE Robotics and Automation Letters, 5(4):5089-5096, 2020. 2
|
| 272 |
+
[19] Xin Huang, Guy Rosman, Igor Gilitschenski, Ashkan Jasour, Stephen G McGill, John J Leonard, and Brian C Williams. HYPER: Learned hybrid trajectory prediction via factored inference and adaptive sampling. In International Conference on Robotics and Automation (ICRA), 2022. 2
|
| 273 |
+
[20] Siddhesh Khandelwal, William Qi, Jagjeet Singh, Andrew Hartnett, and Deva Ramanan. What-if motion prediction for autonomous driving. arXiv preprint arXiv:2008.10587, 2020.3
|
| 274 |
+
[21] ByeoungDo Kim, Seong Hyeon Park, Seokhwan Lee, Elbek Khoshimjonov, Dongsuk Kum, Junsoo Kim, Jeong Soo Kim, and Jun Won Choi. LaPred: Lane-aware prediction of multimodal future trajectories of dynamic agents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14636-14645, 2021. 2
|
| 275 |
+
[22] Vineet Kosaraju, Amir Sadeghian, Roberto Martin-Martin, Ian Reid, Hamid Rezatofighi, and Silvio Savarese. SocialBiGAT: Multimodal trajectory forecasting using bicycle-gan and graph attention networks. Advances in Neural Information Processing Systems, 32, 2019. 2
|
| 276 |
+
[23] Sumit Kumar, Yiming Gu, Jerrick Hoang, Galen Clark Haynes, and Micol Marchetti-Bowick. Interaction-based trajectory prediction over a hybrid traffic graph. In 2021 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 5530-5535. IEEE, 2020. 2, 3
|
| 277 |
+
|
| 278 |
+
[24] Yen-Ling Kuo, Xin Huang, Andrei Barbu, Stephen G McGill, Boris Katz, John J Leonard, and Guy Rosman. Trajectory prediction with linguistic representations. In International Conference on Robotics and Automation (ICRA), 2022. 2
|
| 279 |
+
[25] Donsuk Lee, Yiming Gu, Jerrick Hoang, and Micol Marchetti-Bowick. Joint interaction and trajectory prediction for autonomous driving using graph neural networks. arXiv preprint arXiv:1912.07882, 2019. 2
|
| 280 |
+
[26] Namhoon Lee, Wongun Choi, Paul Vernaza, Christopher B Choy, Philip HS Torr, and Manmohan Chandraker. DESIRE: Distant future prediction in dynamic scenes with interacting agents. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 336-345, 2017. 1, 2
|
| 281 |
+
[27] Lingyun Luke Li, Bin Yang, Ming Liang, Wenyuan Zeng, Mengye Ren, Sean Segal, and Raquel Urtasun. End-to-end contextual perception and prediction with interaction transformer. In 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS), pages 5784-5791. IEEE, 2020. 2
|
| 282 |
+
[28] Ming Liang, Bin Yang, Rui Hu, Yun Chen, Renjie Liao, Song Feng, and Raquel Urtasun. Learning lane graph representations for motion forecasting. In European Conference on Computer Vision, pages 541-556. Springer, 2020. 1, 4
|
| 283 |
+
[29] Karttikeya Mangalam, Harshayu Girase, Shreyas Agarwal, Kuan-Hui Lee, Ehsan Adeli, Jitendra Malik, and Adrien Gaidon. It is not the journey but the destination: Endpoint conditioned trajectory prediction. In Proceedings of the European Conference on Computer Vision (ECCV), August 2020. 2
|
| 284 |
+
[30] Xiaoyu Mo, Zhiyu Huang, and Chen Lv. Multi-modal interactive agent trajectory prediction using heterogeneous edge-enhanced graph attention network. Workshop on Autonomous Driving, CVPR, 2021. 6, 7
|
| 285 |
+
[31] Abduallah Mohamed, Kun Qian, Mohamed Elhoseiny, and Christian Claudel. Social-STGCNN: A social spatiotemporal graph convolutional neural network for human trajectory prediction. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 14424-14432, 2020. 2
|
| 286 |
+
[32] Jiquan Ngiam, Benjamin Caine, Vijay Vasudevan, Zhengdong Zhang, Hao-Tien Lewis Chiang, Jeffrey Ling, Rebecca Roelofs, Alex Bewley, Chenxi Liu, Ashish Venugopal, et al. Scene Transformer: A unified architecture for predicting multiple agent trajectories. In International Conference on Learning Representations (ICLR), 2022. 2, 6, 7, 8
|
| 287 |
+
[33] Kamra Nitin, Zhu Hao, Trivedi Dweep, Zhang Ming, and Liu Yan. Multi-agent trajectory prediction with fuzzy query attention. In Advances in Neural Information Processing Systems (NeurIPS), 2020. 2
|
| 288 |
+
[34] Nicholas Rhinehart, Rowan McAllister, Kris Kitani, and Sergey Levine. PRECOG: Prediction conditioned on goals in visual multi-agent settings. In Proceedings of the IEEE International Conference on Computer Vision, pages 2821-2830, 2019. 2
|
| 289 |
+
[35] Tim Salzmann, Boris Ivanovic, Punarjay Chakravarty, and Marco Pavone. Trajectory: Multi-agent generative trajec
|
| 290 |
+
|
| 291 |
+
tory forecasting with heterogeneous data for control. In Proceedings of the European Conference on Computer Vision (ECCV). Springer, 2020. 2, 3
|
| 292 |
+
[36] Karen Simonyan and Andrew Zisserman. Very deep convolutional networks for large-scale image recognition. arXiv preprint arXiv:1409.1556, 2014. 6
|
| 293 |
+
[37] Haoran Song, Di Luan, Wenchao Ding, Michael Y Wang, and Qifeng Chen. Learning to predict vehicle trajectories with model-based planning. In Conference on Robot Learning (CoRL), 2021. 2
|
| 294 |
+
[38] Charlie Tang and Russ R Salakhutdinov. Multiple futures prediction. In Advances in Neural Information Processing Systems, pages 15424-15434, 2019. 1, 2
|
| 295 |
+
[39] Ekaterina Tolstaya, Reza Mahjourian, Carlton Downey, Balakrishnan Vadarajan, Benjamin Sapp, and Dragomir Anguelov. Identifying driver interactions via conditional behavior prediction. In 2021 IEEE International Conference on Robotics and Automation (ICRA), pages 3473-3479. IEEE, 2021. 2, 3
|
| 296 |
+
[40] T. van der Heiden, N. S. Nagaraja, C. Weiss, and E. Gavves. SafeCritic: Collision-aware trajectory prediction. In *British Machine Vision Conference Workshop*, 2019. 2
|
| 297 |
+
[41] Allen Wang, Xin Huang, Ashkan Jasour, and Brian Williams. Fast risk assessment for autonomous vehicles using learned models of agent futures. In Robotics: Science and Systems, 2020. 5
|
| 298 |
+
[42] David Wu and Yunan Wu. $\mathrm{Air}^2$ for interaction prediction. Workshop on Autonomous Driving, CVPR, 2021. 6, 7
|
| 299 |
+
[43] Kota Yamaguchi, Alexander C Berg, Luis E Ortiz, and Tamara L Berg. Who are you with and where are you going? In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1345-1352, 2011. 2
|
| 300 |
+
[44] Ye Yuan and Kris Kitani. Diverse trajectory forecasting with determinantal point processes. In International Conference on Learning Representations (ICLR), 2020. 2
|
| 301 |
+
[45] Wei Zhan, Liting Sun, Di Wang, Haojie Shi, Aubrey Clausse, Maximilian Naumann, Julius Kummerle, Hendrik Konigshof, Christoph Stiller, Arnaud de La Fortelle, et al. Interaction dataset: An international, adversarial and cooperative motion dataset in interactive driving scenarios with semantic maps. arXiv preprint arXiv:1910.03088, 2019. 3
|
| 302 |
+
[46] Hang Zhao, Jiyang Gao, Tian Lan, Chen Sun, Benjamin Sapp, Balakrishnan Varadarajan, Yue Shen, Yi Shen, Yuning Chai, Cordelia Schmid, et al. TNT: Target-driven trajectory prediction. In Conference on Robot Learning (CoRL), 2020. 2, 4, 8
|
| 303 |
+
[47] Tianyang Zhao, Yifei Xu, Mathew Monfort, Wongun Choi, Chris Baker, Yibiao Zhao, Yizhou Wang, and Ying Nian Wu. Multi-agent tensor fusion for contextual trajectory prediction. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 12126-12134, 2019. 2
|
| 304 |
+
|
| 305 |
+
# Appendix
|
| 306 |
+
|
| 307 |
+
# A. Additional Experiment Details
|
| 308 |
+
|
| 309 |
+
In this section, we introduce additional details on filtering interactive training data, training the baseline joint predictor, and training by agent types.
|
| 310 |
+
|
| 311 |
+
# A.1 Filtering Interactive Training Data
|
| 312 |
+
|
| 313 |
+
The Waymo Open Motion Dataset only provides interactive scenarios in its validation set and testing set. To filter the interactive scenario in the training set, we implement a script to identify scenarios that include 2 interacting agents based on the objects_of_interest mask provided in the data. The script is provided in the source code.
|
| 314 |
+
|
| 315 |
+
# A.2 Baseline Joint Predictor
|
| 316 |
+
|
| 317 |
+
We train the Baseline Joint predictor described in Sec. 4.4 as follows. First, we predict the distribution of goals for each interacting agent as a heatmap, according to [15]. Second, we select the top 80 goals based on the predicted probability for each agent. Third, we combine the selected goals into 6400 goal pairs and run each goal pair feature, including $(x,y)$ positions for both goals, through a 2-layer MLP with a hidden size of 128 followed by a normalization layer and a ReLU activation layer. Fourth, we run a fully connected layer to predict the probability logit for each goal pair, and train the joint goal prediction model through the following loss:
|
| 318 |
+
|
| 319 |
+
$$
|
| 320 |
+
\mathcal {L} _ {J} = \mathcal {L} _ {c e} (J, \hat {J}), \tag {8}
|
| 321 |
+
$$
|
| 322 |
+
|
| 323 |
+
where $\mathcal{L}_{ce}$ is the cross entropy loss, $J$ is the predicted goal pair distribution, and $\hat{J}$ is the index of the goal pair out of all candidates that is the closest to the ground truth goal pair in terms of Euclidean distance. Given the predicted goal pairs, we train the trajectory completion model to regress the full trajectories of both interacting agents following the same procedure in [15].
|
| 324 |
+
|
| 325 |
+
# A.3 Training by Agent Types
|
| 326 |
+
|
| 327 |
+
The Waymo Open Motion Dataset consists of three types of agents to predict: vehicles, pedestrians, and cyclists. As each agent type has different behavior models and the distribution is unbalanced among types (e.g. vehicle types account for $78\%$ of the training data), we train the marginal trajectory predictor and the conditional trajectory predictor for each agent type separately. We observe that the prediction performance over pedestrians and cyclists improves by a large margin, compared to training a single model for all agents.
|
| 328 |
+
|
| 329 |
+
For the same reason, we train four relation predictors for vehicle-vehicle interactions, vehicle-pedestrian interactions, vehicle-cyclist interactions, and interactions that
|
| 330 |
+
|
| 331 |
+
cover the remaining agent pair types, including cyclist-pedestrian, cyclist-cyclist, pedestrian-pedestrian.
|
| 332 |
+
|
| 333 |
+
# B. Additional Qualitative Examples
|
| 334 |
+
|
| 335 |
+
We present additional representative examples in a variety of interaction settings to showcase the advantage of M2I over the marginal baseline.
|
| 336 |
+
|
| 337 |
+
# B.1 Influencer Overtakes Reactor
|
| 338 |
+
|
| 339 |
+
In Fig. 5, we present three examples in which the influencer overtakes the reactor. In each example, M2I successfully predicts the correct relation type and improves prediction accuracy and scene compliance, while the marginal predictor predicts overlapping trajectories without considering the future interaction between agents.
|
| 340 |
+
|
| 341 |
+
# B.2 Reactor Yields to Influencer before Turning
|
| 342 |
+
|
| 343 |
+
In Fig. 6, we present three examples in which the reactor waits for the influencer to pass before turning. In each example, M2I successfully predicts the correct relation type and the accurate reactive trajectories for the reactor. On the other hand, the marginal predictor ignores the interaction and results in less accurate predictions.
|
| 344 |
+
|
| 345 |
+
# B.3 Reactor Merges behind Influencer
|
| 346 |
+
|
| 347 |
+
In Fig. 7, we present two examples in which the reactor merges behind the influencer after the influencer passes. In each example, M2I successfully predicts the correct relation type and the accurate reactor trajectories that follow the influencer, while the marginal predictor fails to account for the interaction and predicts trajectories far away from the ground truth.
|
| 348 |
+
|
| 349 |
+
# C. Multi-Agent Generalization
|
| 350 |
+
|
| 351 |
+
We present a qualitative analysis on applying M2I to multi-agent scenarios involving more than two agents. In Fig. 8, we show two examples in which M2I provides scene compliant relation predictions in crowded traffic. Given the relation predictions, it is straightforward to predict the agent trajectories through marginal and conditional predictors, as in Eq. (3).
|
| 352 |
+
|
| 353 |
+

|
| 354 |
+
|
| 355 |
+

|
| 356 |
+
|
| 357 |
+

|
| 358 |
+
|
| 359 |
+

|
| 360 |
+
|
| 361 |
+

|
| 362 |
+
|
| 363 |
+

|
| 364 |
+
|
| 365 |
+

|
| 366 |
+
Figure 5. Influencer overtakes reactor. In each example, M2I (right column) successfully predicts the correct relation type and improves prediction accuracy and scene compliance, while the marginal predictor (left column) predicts overlapping trajectories without considering the future interaction between agents.
|
| 367 |
+
|
| 368 |
+

|
| 369 |
+
|
| 370 |
+

|
| 371 |
+
|
| 372 |
+

|
| 373 |
+
|
| 374 |
+

|
| 375 |
+
|
| 376 |
+

|
| 377 |
+
|
| 378 |
+

|
| 379 |
+
|
| 380 |
+

|
| 381 |
+
Figure 6. Reactor yields to influencer before turning. In each example, M2I (right column) successfully predicts the correct relation type and the accurate reactive trajectories for the reactor. On the other hand, the marginal predictor (left column) ignores the interaction and results in less accurate predictions.
|
| 382 |
+
|
| 383 |
+

|
| 384 |
+
|
| 385 |
+

|
| 386 |
+
|
| 387 |
+

|
| 388 |
+
|
| 389 |
+

|
| 390 |
+
|
| 391 |
+

|
| 392 |
+
Figure 7. Reactor merges behind influencer. In each example, M2I (right column) successfully predicts the correct relation type and the accurate reactor trajectories that follow the influencer, while the marginal predictor (left column) fails to account for the interaction and predicts trajectories far away from the ground truth.
|
| 393 |
+
|
| 394 |
+

|
| 395 |
+
|
| 396 |
+

|
| 397 |
+
Figure 8. Examples of M2I providing scene compliant relation predictions in complex multi-agent scenarios.
|
2202.11xxx/2202.11884/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b619bedfd0b41f187961f920a1e67ab9c27d002bf72fb1c8f2e14b17379508a7
|
| 3 |
+
size 989802
|
2202.11xxx/2202.11884/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11907/13c6c70f-eb0a-4855-9c93-c58436de7c44_content_list.json
ADDED
|
@@ -0,0 +1,1155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Uncertainty-driven Planner for Exploration and Navigation",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
151,
|
| 8 |
+
88,
|
| 9 |
+
834,
|
| 10 |
+
112
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Georgios Georgakis<sup>1</sup>, Bernadette Bucher<sup>1</sup>, Anton Arapin<sup>2</sup>, Karl Schmeckpeper<sup>1</sup>, Nikolai Matni<sup>1</sup>, and Kostas Daniilidis<sup>1</sup>",
|
| 17 |
+
"bbox": [
|
| 18 |
+
194,
|
| 19 |
+
132,
|
| 20 |
+
792,
|
| 21 |
+
165
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Abstract—We consider the problems of exploration and point-goal navigation in previously unseen environments, where the spatial complexity of indoor scenes and partial observability constitute these tasks challenging. We argue that learning occupancy priors over indoor maps provides significant advantages towards addressing these problems. To this end, we present a novel planning framework that first learns to generate occupancy maps beyond the field-of-view of the agent, and second leverages the model uncertainty over the generated areas to formulate path selection policies for each task of interest. For point-goal navigation the policy chooses paths with an upper confidence bound policy for efficient and traversable paths, while for exploration the policy maximizes model uncertainty over candidate paths. We perform experiments in the visually realistic environments of Matterport3D using the Habitat simulator and demonstrate: 1) Improved results on exploration and map quality metrics over competitive methods, and 2) The effectiveness of our planning module when paired with the state-of-the-art DD-PPO method for the point-goal navigation task.",
|
| 28 |
+
"bbox": [
|
| 29 |
+
81,
|
| 30 |
+
202,
|
| 31 |
+
488,
|
| 32 |
+
443
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "I. INTRODUCTION",
|
| 39 |
+
"text_level": 1,
|
| 40 |
+
"bbox": [
|
| 41 |
+
218,
|
| 42 |
+
454,
|
| 43 |
+
352,
|
| 44 |
+
467
|
| 45 |
+
],
|
| 46 |
+
"page_idx": 0
|
| 47 |
+
},
|
| 48 |
+
{
|
| 49 |
+
"type": "text",
|
| 50 |
+
"text": "A major prerequisite towards true autonomy is the ability to navigate and explore novel environments. This problem is usually studied in the context of specific tasks such as reaching a specified point goal [1], finding a semantic target [2], or covering as much area as possible while building a map. Each of these tasks has its own idiosyncrasies, but all of them represent examples where one must often reason beyond what is currently observed and incorporate the uncertainty over the inferred information into the decision making process. For example, in point-goal navigation it is important to predict whether a certain path can lead to a dead-end. Likewise, in exploration strong confidence over a particular region's representation may prompt the agent to visit new areas of the map.",
|
| 51 |
+
"bbox": [
|
| 52 |
+
81,
|
| 53 |
+
473,
|
| 54 |
+
488,
|
| 55 |
+
684
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "We investigate the tasks of point-goal navigation and exploration, and propose a planning module that leverages contextual occupancy priors. These priors are learned by a",
|
| 62 |
+
"bbox": [
|
| 63 |
+
81,
|
| 64 |
+
685,
|
| 65 |
+
488,
|
| 66 |
+
731
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "Research was sponsored by the Army Research Office and was accomplished under Grant Number W911NF-20-1-0080. The views and conclusions contained in this document are those of the authors and should not be interpreted as representing the official policies, either expressed or implied, of the Army Research Office or the U.S. Government. The U.S. Government is authorized to reproduce and distribute reprints for Government purposes notwithstanding any copyright notation herein. Further support was provided by the following grants: NSF IIS 1703319, NSF MRI 1626008, NSF TRIPODS 1934960, NSF CPS 2038873, ARL DCIST CRA W911NF-17-2-0181, ONR N00014-17-1-2093, the DARPA-SRC C-BRIC, CAREER award ECCS-2045834, and a Google Research Scholar award.",
|
| 73 |
+
"bbox": [
|
| 74 |
+
81,
|
| 75 |
+
742,
|
| 76 |
+
488,
|
| 77 |
+
866
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "list",
|
| 83 |
+
"sub_type": "text",
|
| 84 |
+
"list_items": [
|
| 85 |
+
"<sup>1</sup>GRASP Laboratory, Department of Computer and Information Science, University of Pennsylvania, Philadelphia, PA 19104. ggeorgqak@seas.upenn.edu",
|
| 86 |
+
"$^{2}$ Department of Computer Science, The University of Chicago, Chicago, IL, 60637. aarapin@uchicago.edu"
|
| 87 |
+
],
|
| 88 |
+
"bbox": [
|
| 89 |
+
81,
|
| 90 |
+
867,
|
| 91 |
+
488,
|
| 92 |
+
926
|
| 93 |
+
],
|
| 94 |
+
"page_idx": 0
|
| 95 |
+
},
|
| 96 |
+
{
|
| 97 |
+
"type": "text",
|
| 98 |
+
"text": "map predictor module that is trained to estimate occupancy values outside the field-of-view of the agent. Using the epistemic (model) uncertainty associated with these predictions we define objectives for path selection for each task of interest. Earlier work in this field focused mainly on learning how to actively control the agent for the purpose of reducing the uncertainty over the map [3] (Active SLAM), without considering navigation tasks in the process, while methods that did consider navigation often operated in relatively simple environments of artificially placed cylindrical obstacles [4], [5].",
|
| 99 |
+
"bbox": [
|
| 100 |
+
504,
|
| 101 |
+
202,
|
| 102 |
+
913,
|
| 103 |
+
366
|
| 104 |
+
],
|
| 105 |
+
"page_idx": 0
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"type": "text",
|
| 109 |
+
"text": "With the recent introduction of realistic and visually complex environments serving as navigation benchmarks [6], [7], the focus shifted on learning-based end-to-end approaches [8], [9], [10]. While end-to-end formulations that map pixels directly to actions are attractive in terms of their simplicity, they require very large quantities of training data. For instance, DD-PPO [10] needs 2.5 billion frames of experience to reach its state-of-the-art performance on Gibson [7]. On the other hand, modular approaches [11], [12], [13] are able to encode prior information into explicit map representations and are thus much more sample efficient. Our method falls into the latter category, but differs from other approaches by its use of the uncertainty over predictions outside the field-of-view of the agent during the planning stage. In contrast to [13], [12] this allows our method more flexibility when defining goal selection objectives, and does not require re-training between different tasks.",
|
| 110 |
+
"bbox": [
|
| 111 |
+
504,
|
| 112 |
+
367,
|
| 113 |
+
911,
|
| 114 |
+
625
|
| 115 |
+
],
|
| 116 |
+
"page_idx": 0
|
| 117 |
+
},
|
| 118 |
+
{
|
| 119 |
+
"type": "text",
|
| 120 |
+
"text": "In this paper, we introduce Uncertainty-driven Planner for Exploration and Navigation (UPEN), in which we propose a planning algorithm that is informed by predictions over unobserved areas. Through this spatial prediction approach our model learns layout patterns that can guide a planner towards preferable paths in unknown environments. More specifically, we first train an ensemble of occupancy map predictor models by learning to hallucinate top-down occupancy regions from unobserved areas. Then, a Rapidly Exploring Random-Trees [14] (RRT) algorithm generates a set of candidate paths. We select paths from these candidates using epistemic (model) uncertainty associated with a path traversibility estimate as measured by the disagreement of ensemble models [15], [16], and we choose appropriate short-term goals based on the task of interest. Our contributions are as follows:",
|
| 121 |
+
"bbox": [
|
| 122 |
+
504,
|
| 123 |
+
625,
|
| 124 |
+
913,
|
| 125 |
+
864
|
| 126 |
+
],
|
| 127 |
+
"page_idx": 0
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"type": "text",
|
| 131 |
+
"text": "- We propose UPEN, a novel planning framework that leverages learned layout priors and formulates uncertainty-based objectives for path selection in exploration and navigation tasks.",
|
| 132 |
+
"bbox": [
|
| 133 |
+
522,
|
| 134 |
+
866,
|
| 135 |
+
911,
|
| 136 |
+
926
|
| 137 |
+
],
|
| 138 |
+
"page_idx": 0
|
| 139 |
+
},
|
| 140 |
+
{
|
| 141 |
+
"type": "aside_text",
|
| 142 |
+
"text": "arXiv:2202.11907v1 [cs.RO] 24 Feb 2022",
|
| 143 |
+
"bbox": [
|
| 144 |
+
22,
|
| 145 |
+
263,
|
| 146 |
+
57,
|
| 147 |
+
705
|
| 148 |
+
],
|
| 149 |
+
"page_idx": 0
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"type": "image",
|
| 153 |
+
"img_path": "images/3101d742b01884f030f2e6a318fa21e8680f97ec696e0a5c3d93f96c382dc13f.jpg",
|
| 154 |
+
"image_caption": [
|
| 155 |
+
"Fig. 1: Occupancy map prediction (blue-occupied, green-free) and uncertainty estimation for a time-step $t$ . The egocentric depth observation is first ground-projected and passed through an ensemble $f$ of encoder-decoder models that each infers information in unobserved areas $(\\hat{m}_t)$ . Each $\\hat{m}_t$ is then registered to a separate global map $M_t$ . The final occupancy probabilities and model uncertainty are given by the mean and variance over the set of global maps."
|
| 156 |
+
],
|
| 157 |
+
"image_footnote": [],
|
| 158 |
+
"bbox": [
|
| 159 |
+
145,
|
| 160 |
+
65,
|
| 161 |
+
851,
|
| 162 |
+
215
|
| 163 |
+
],
|
| 164 |
+
"page_idx": 1
|
| 165 |
+
},
|
| 166 |
+
{
|
| 167 |
+
"type": "list",
|
| 168 |
+
"sub_type": "text",
|
| 169 |
+
"list_items": [
|
| 170 |
+
"- We show improved exploration results over competitive methods on the Matterport3D [17] dataset.",
|
| 171 |
+
"- We demonstrate the effectiveness of our planner when used to complement existing end-to-end methods on the point-goal navigation task."
|
| 172 |
+
],
|
| 173 |
+
"bbox": [
|
| 174 |
+
99,
|
| 175 |
+
304,
|
| 176 |
+
486,
|
| 177 |
+
378
|
| 178 |
+
],
|
| 179 |
+
"page_idx": 1
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "text",
|
| 183 |
+
"text": "II. RELATED WORK",
|
| 184 |
+
"text_level": 1,
|
| 185 |
+
"bbox": [
|
| 186 |
+
212,
|
| 187 |
+
391,
|
| 188 |
+
359,
|
| 189 |
+
404
|
| 190 |
+
],
|
| 191 |
+
"page_idx": 1
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"type": "text",
|
| 195 |
+
"text": "a) Navigation approaches: Traditional approaches to visual navigation focus on building a 3D metric map of the environment [18], [3] before using that representation for any downstream navigation tasks, which does not lend itself favourably for task-driven learnable representations that can capture contextual cues. The recent introduction of large-scale indoor environments and simulators [7], [17], [6] has fuelled a slew of learning based methods for indoor navigation tasks [1] such as point-goal [10], [19], [20], [21], [22], object-goal [23], [24], [25], [26], [27], and image-goal [8], [28], [29]. Modular approaches which incorporate explicit or learned map representations [11], [23], [25] have shown to outperform end-to-end methods on tasks such as object-goal, however, this is not currently the case for the point-goal [10], [20] task. In our work, we demonstrate how an uncertainty-driven planning module can favourably complement DD-PPO [10], a competitive method on point-goal navigation, and show increased performance in challenging episodes.",
|
| 196 |
+
"bbox": [
|
| 197 |
+
81,
|
| 198 |
+
412,
|
| 199 |
+
488,
|
| 200 |
+
684
|
| 201 |
+
],
|
| 202 |
+
"page_idx": 1
|
| 203 |
+
},
|
| 204 |
+
{
|
| 205 |
+
"type": "text",
|
| 206 |
+
"text": "b) Exploration methods for navigation: A considerable amount of work was also devoted to planning efficient paths during map building, generally referred to as Active SLAM [30], [31], [32], [33], [34], [35]. For example, [32], [35] define information gain objectives based on the estimated uncertainty over the map in order to decide future actions, while [33] investigates different uncertainty measures. Recent methods focus on learning policies for efficient exploration either through coverage [9], [13], [36], [37] or map accuracy [12] reward functions. Furthermore, several works have gone beyond traditional mapping, and sought to predict maps for unseen regions [12], [38], [24], [27], [39] which further increased robustness in the decision making process. Our approach leverages the uncertainty over predicted occupancy maps for unobserved areas and shows its effectiveness on exploring a novel environment.",
|
| 207 |
+
"bbox": [
|
| 208 |
+
81,
|
| 209 |
+
685,
|
| 210 |
+
488,
|
| 211 |
+
926
|
| 212 |
+
],
|
| 213 |
+
"page_idx": 1
|
| 214 |
+
},
|
| 215 |
+
{
|
| 216 |
+
"type": "text",
|
| 217 |
+
"text": "c) Uncertainty estimation: To navigate in partially observed maps, uncertainty has been estimated across nodes in a path [4], [40], via the marginal probability of landmarks [5], and with the variance of model predictions across predicted maps [24], [41]. Furthermore, uncertainty-aware mapping has been shown to be effective in unknown and highly risky environments [42], [43]. In our work, we use uncertainty differently for exploration and point goal navigation. In exploration, we estimate uncertainty over a predicted occupancy map via the variance between models in an ensemble. This variance across the ensemble specifically estimates model (epistemic) uncertainty [44], [45]. We select paths by maximizing epistemic uncertainty as a proxy for maximizing information gain following prior work in exploration [16], [24]. In point goal navigation, we compute traversability scores for candidate paths using an ensemble of map predictors and compute uncertainty with respect to these traversability scores using the variance over the scores given by each model in the ensemble. We use this uncertainty regarding path traversability to construct an upper confidence bound policy for path selection to balance exploration and exploitation in point goal navigation [46], [47], [48], [24].",
|
| 218 |
+
"bbox": [
|
| 219 |
+
504,
|
| 220 |
+
304,
|
| 221 |
+
913,
|
| 222 |
+
636
|
| 223 |
+
],
|
| 224 |
+
"page_idx": 1
|
| 225 |
+
},
|
| 226 |
+
{
|
| 227 |
+
"type": "text",
|
| 228 |
+
"text": "III. APPROACH",
|
| 229 |
+
"text_level": 1,
|
| 230 |
+
"bbox": [
|
| 231 |
+
651,
|
| 232 |
+
642,
|
| 233 |
+
764,
|
| 234 |
+
655
|
| 235 |
+
],
|
| 236 |
+
"page_idx": 1
|
| 237 |
+
},
|
| 238 |
+
{
|
| 239 |
+
"type": "text",
|
| 240 |
+
"text": "We present an uncertainty-driven planning module for exploration and point-goal navigation tasks, which benefits from a learned occupancy map predictor module. Our approach takes as input the agent's egocentric depth observation and learns to predict regions of the occupancy map that are outside of the agent's field-of-view. Then it uses the uncertainty over those predictions to decide on a set of candidate paths generated by RRT. We define a separate policy to select a short-term goal along a path for each task of interest. In exploration we maximize uncertainty over the candidate paths, while for point-goal navigation we choose paths with an upper confidence bound policy for efficient and traversable paths. Finally, a local policy (DD-PPO [10]) predicts navigation actions to reach the short-term goal.",
|
| 241 |
+
"bbox": [
|
| 242 |
+
504,
|
| 243 |
+
660,
|
| 244 |
+
913,
|
| 245 |
+
871
|
| 246 |
+
],
|
| 247 |
+
"page_idx": 1
|
| 248 |
+
},
|
| 249 |
+
{
|
| 250 |
+
"type": "text",
|
| 251 |
+
"text": "A. Occupancy Map Prediction",
|
| 252 |
+
"text_level": 1,
|
| 253 |
+
"bbox": [
|
| 254 |
+
504,
|
| 255 |
+
878,
|
| 256 |
+
718,
|
| 257 |
+
891
|
| 258 |
+
],
|
| 259 |
+
"page_idx": 1
|
| 260 |
+
},
|
| 261 |
+
{
|
| 262 |
+
"type": "text",
|
| 263 |
+
"text": "The first component in our planning module aims to capture layout priors in indoor environments. Such information",
|
| 264 |
+
"bbox": [
|
| 265 |
+
504,
|
| 266 |
+
896,
|
| 267 |
+
911,
|
| 268 |
+
926
|
| 269 |
+
],
|
| 270 |
+
"page_idx": 1
|
| 271 |
+
},
|
| 272 |
+
{
|
| 273 |
+
"type": "image",
|
| 274 |
+
"img_path": "images/8a0481f2d9b3787ae919dd4731e301ffbd62a007165109aff10b37f51da5f317.jpg",
|
| 275 |
+
"image_caption": [
|
| 276 |
+
"Fig. 2: Examples of path selections for exploration (top row) and point-goal navigation (bottom-row) tasks. Given the model uncertainty and occupancy probabilities we first generate a set of paths which are evaluated either with an exploration objective (section III-B) or an upper confidence bound objective (section III-C). The agent position is denoted as a dark green dot, the goal is shown as magenta, and red dots signify short-term goals."
|
| 277 |
+
],
|
| 278 |
+
"image_footnote": [],
|
| 279 |
+
"bbox": [
|
| 280 |
+
143,
|
| 281 |
+
65,
|
| 282 |
+
854,
|
| 283 |
+
308
|
| 284 |
+
],
|
| 285 |
+
"page_idx": 2
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"type": "text",
|
| 289 |
+
"text": "can lead to a more intelligent decision making process for a downstream navigation task. Following the recent success of [12], [24] we formulate the occupancy map prediction as a semantic segmentation problem. Our model takes as input a depth image $D_{t}$ at time-step $t$ which is ground projected to an egocentric grid $m_t' \\in \\mathbb{R}^{|C| \\times h \\times w}$ , where $C$ is the set of classes containing unknown, occupied, and free, and $h, w$ are the dimensions of the local grid. The ground projection is carried out by first using the camera intrinsic parameters to unproject $D_{t}$ to a 3D point cloud and then map each 3D point to the $h \\times w$ grid coordinates: $x' = \\lfloor \\frac{x}{r} \\rfloor + \\frac{w - 1}{2}$ , $z' = \\lfloor \\frac{z}{r} \\rfloor + \\frac{h - 1}{2}$ , where $x', z'$ are the grid coordinates, $x, z$ are the 3D points, and $r$ is the grid cell size. Since the agent has a limited field of view, $m_t'$ represents a local incomplete top-down occupancy grid of the area surrounding the agent. Our objective is to predict the missing values and produce the complete local occupancy map $\\hat{m}_t \\in \\mathbb{R}^{|C| \\times h \\times w}$ . To do so, we pass $m_t'$ through an encoder-decoder UNet [49] model $f$ that outputs a prediction for each grid location over the set of classes $C$ . The model $f$ is trained with a pixel-wise cross-entropy loss:",
|
| 290 |
+
"bbox": [
|
| 291 |
+
84,
|
| 292 |
+
395,
|
| 293 |
+
488,
|
| 294 |
+
713
|
| 295 |
+
],
|
| 296 |
+
"page_idx": 2
|
| 297 |
+
},
|
| 298 |
+
{
|
| 299 |
+
"type": "equation",
|
| 300 |
+
"text": "\n$$\nL = - \\frac {1}{K} \\sum_ {k} ^ {K} \\sum_ {c} ^ {C} m _ {k, c} \\log \\hat {m} _ {k, c} \\tag {1}\n$$\n",
|
| 301 |
+
"text_format": "latex",
|
| 302 |
+
"bbox": [
|
| 303 |
+
176,
|
| 304 |
+
723,
|
| 305 |
+
488,
|
| 306 |
+
765
|
| 307 |
+
],
|
| 308 |
+
"page_idx": 2
|
| 309 |
+
},
|
| 310 |
+
{
|
| 311 |
+
"type": "text",
|
| 312 |
+
"text": "where $K = h \\times w$ corresponds to the number of cells in the local grid and $m_{k,c}$ is the ground-truth label for pixel $k$ . The ground-truth occupancy is generated by ground-projecting the available semantic information of the 3D scenes. To ensure diversity in the training examples, we sample training pairs across shortest paths between two randomly selected locations in a scene, where $m_t'$ can contain a variable number of ground-projected depth images. Unlike [12] we do not use the RGB images during training, as we have found that the aforementioned sampling strategy is sufficient for the model",
|
| 313 |
+
"bbox": [
|
| 314 |
+
81,
|
| 315 |
+
775,
|
| 316 |
+
490,
|
| 317 |
+
926
|
| 318 |
+
],
|
| 319 |
+
"page_idx": 2
|
| 320 |
+
},
|
| 321 |
+
{
|
| 322 |
+
"type": "text",
|
| 323 |
+
"text": "to converge. This enables us to define a smaller and less memory intensive model $f$ .",
|
| 324 |
+
"bbox": [
|
| 325 |
+
504,
|
| 326 |
+
395,
|
| 327 |
+
911,
|
| 328 |
+
425
|
| 329 |
+
],
|
| 330 |
+
"page_idx": 2
|
| 331 |
+
},
|
| 332 |
+
{
|
| 333 |
+
"type": "text",
|
| 334 |
+
"text": "During a navigation episode, we maintain a global map $M_t \\in \\mathbb{R}^{|C| \\times H \\times W}$ . Since $f$ predicts a probability distribution over the classes for each grid location, we register $\\hat{m}_t$ by updating $M_t$ using Bayes Theorem. The global map $M_t$ is initialized with a uniform prior probability distribution across all classes.",
|
| 335 |
+
"bbox": [
|
| 336 |
+
504,
|
| 337 |
+
426,
|
| 338 |
+
913,
|
| 339 |
+
513
|
| 340 |
+
],
|
| 341 |
+
"page_idx": 2
|
| 342 |
+
},
|
| 343 |
+
{
|
| 344 |
+
"type": "text",
|
| 345 |
+
"text": "B. Exploration Policy",
|
| 346 |
+
"text_level": 1,
|
| 347 |
+
"bbox": [
|
| 348 |
+
506,
|
| 349 |
+
527,
|
| 350 |
+
660,
|
| 351 |
+
542
|
| 352 |
+
],
|
| 353 |
+
"page_idx": 2
|
| 354 |
+
},
|
| 355 |
+
{
|
| 356 |
+
"type": "text",
|
| 357 |
+
"text": "The main goal of exploration task is to maximize map coverage which requires navigating to new map regions around obstacles. To this end, we propose selecting paths using uncertainty of our map predictions as an objective in our planning algorithm. We are explicitly minimizing map uncertainty by collecting observations to improve the predicted global map $M_{t}$ . Implicitly map coverage is maximized by minimizing map uncertainty because high coverage is required for predicting an accurate map with low uncertainty.",
|
| 358 |
+
"bbox": [
|
| 359 |
+
504,
|
| 360 |
+
547,
|
| 361 |
+
913,
|
| 362 |
+
683
|
| 363 |
+
],
|
| 364 |
+
"page_idx": 2
|
| 365 |
+
},
|
| 366 |
+
{
|
| 367 |
+
"type": "text",
|
| 368 |
+
"text": "We use the epistemic (model) uncertainty as an objective for exploration [45], [44], [16], [24]. In order to estimate epistemic uncertainty, we construct $f$ as an ensemble of $N$ occupancy prediction models defined over the parameters $\\{\\theta_1,\\dots,\\theta_N\\}$ . Variance between models in the ensemble comes from different random weight initializations in each network [16]. Our model estimates the true probability distribution $P(m_t|m_t')$ by averaging over sampled model weights, $P(m_t|m_t') \\approx \\mathbb{E}_{\\theta \\sim q(\\theta)}f(m_t';\\theta) \\approx \\frac{1}{N}\\sum_{i = 1}^{N}f(m_t';\\theta_i)$ where the parameters $\\theta$ are random variables sampled from the distribution $q(\\theta)[50]$ , [51]. Then, following prior work [15], [16], [24], the epistemic uncertainty can be approximated from the variance between the outputs of the models in the ensemble, $\\mathrm{Var}f(m_t';\\theta)$ .",
|
| 369 |
+
"bbox": [
|
| 370 |
+
504,
|
| 371 |
+
684,
|
| 372 |
+
913,
|
| 373 |
+
896
|
| 374 |
+
],
|
| 375 |
+
"page_idx": 2
|
| 376 |
+
},
|
| 377 |
+
{
|
| 378 |
+
"type": "text",
|
| 379 |
+
"text": "For path planning during exploration, our proposed objective can be used with any planner which generates a set $S$",
|
| 380 |
+
"bbox": [
|
| 381 |
+
504,
|
| 382 |
+
896,
|
| 383 |
+
911,
|
| 384 |
+
926
|
| 385 |
+
],
|
| 386 |
+
"page_idx": 2
|
| 387 |
+
},
|
| 388 |
+
{
|
| 389 |
+
"type": "text",
|
| 390 |
+
"text": "of candidate paths. Each path $s \\in S$ can be expressed as a subset of grid locations in our map. Each of these grid locations $k$ has an associated uncertainty estimate given by the variance between model predictions in our ensemble. We specify this uncertainty map as $u_{k} \\coloneqq \\operatorname{Var} f(m_{t}^{\\prime};\\theta) \\in \\mathbb{R}^{1 \\times h \\times w}$ . We use this map to score each path $s$ and construct the objective",
|
| 391 |
+
"bbox": [
|
| 392 |
+
81,
|
| 393 |
+
65,
|
| 394 |
+
488,
|
| 395 |
+
170
|
| 396 |
+
],
|
| 397 |
+
"page_idx": 3
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"type": "equation",
|
| 401 |
+
"text": "\n$$\n\\underset {s \\in S} {\\arg \\max } \\frac {1}{| s |} \\sum_ {k \\in s} u _ {k} \\tag {2}\n$$\n",
|
| 402 |
+
"text_format": "latex",
|
| 403 |
+
"bbox": [
|
| 404 |
+
220,
|
| 405 |
+
170,
|
| 406 |
+
488,
|
| 407 |
+
204
|
| 408 |
+
],
|
| 409 |
+
"page_idx": 3
|
| 410 |
+
},
|
| 411 |
+
{
|
| 412 |
+
"type": "text",
|
| 413 |
+
"text": "which selects the path with the maximum average epistemic uncertainty on the traversed grid.",
|
| 414 |
+
"bbox": [
|
| 415 |
+
81,
|
| 416 |
+
210,
|
| 417 |
+
488,
|
| 418 |
+
239
|
| 419 |
+
],
|
| 420 |
+
"page_idx": 3
|
| 421 |
+
},
|
| 422 |
+
{
|
| 423 |
+
"type": "text",
|
| 424 |
+
"text": "In this work, we incorporate our uncertainty-based objective in RRT to plan to explore. We expand RRT for a set number of iterations, which generates candidate paths in random directions. We select between these paths using our objective from equation 2. In practice, equation 2 is evaluated over the accumulated global map $M_{t}$ . Figure 1 shows the occupancy map prediction and the uncertainty estimation process using the ensemble $f$ , while Figure 2 (top row) shows an example of path selection using the exploration objective.",
|
| 425 |
+
"bbox": [
|
| 426 |
+
81,
|
| 427 |
+
241,
|
| 428 |
+
488,
|
| 429 |
+
390
|
| 430 |
+
],
|
| 431 |
+
"page_idx": 3
|
| 432 |
+
},
|
| 433 |
+
{
|
| 434 |
+
"type": "text",
|
| 435 |
+
"text": "C. Point-goal Policy",
|
| 436 |
+
"text_level": 1,
|
| 437 |
+
"bbox": [
|
| 438 |
+
83,
|
| 439 |
+
400,
|
| 440 |
+
227,
|
| 441 |
+
415
|
| 442 |
+
],
|
| 443 |
+
"page_idx": 3
|
| 444 |
+
},
|
| 445 |
+
{
|
| 446 |
+
"type": "text",
|
| 447 |
+
"text": "In the problem of point-goal navigation, the objective is to efficiently navigate past obstacles to a given goal location from a starting position. We again use RRT as a planner which generates a set of paths $S$ between the agent's current location and the goal location. Thus, the primary objective when we select a path from these candidates to traverse is for the path to be unobstructed. Given a predicted occupancy map from model $i$ in our ensemble and a candidate path $s \\in S$ generated by our planner, we evaluate whether or not the path is obstructed by taking the maximum probability of occupancy in any grid cell $k$ along each path. Specifically,",
|
| 448 |
+
"bbox": [
|
| 449 |
+
81,
|
| 450 |
+
419,
|
| 451 |
+
488,
|
| 452 |
+
585
|
| 453 |
+
],
|
| 454 |
+
"page_idx": 3
|
| 455 |
+
},
|
| 456 |
+
{
|
| 457 |
+
"type": "equation",
|
| 458 |
+
"text": "\n$$\np _ {i, s} = \\max _ {k \\in s} \\left(\\hat {m} _ {k, o c c} ^ {i} | _ {k \\in s}\\right) \\tag {3}\n$$\n",
|
| 459 |
+
"text_format": "latex",
|
| 460 |
+
"bbox": [
|
| 461 |
+
199,
|
| 462 |
+
592,
|
| 463 |
+
488,
|
| 464 |
+
616
|
| 465 |
+
],
|
| 466 |
+
"page_idx": 3
|
| 467 |
+
},
|
| 468 |
+
{
|
| 469 |
+
"type": "text",
|
| 470 |
+
"text": "where $\\hat{m}_{k,occ}^i|_{k\\in s}$ is the map of occupancy probabilities defined on the subset of grid cells $k\\in s$ predicted by model $i$ in the ensemble $f$ . Choosing the path $s\\in S$ by minimizing $p_{i,s}$ chooses the path we think most likely to be unobstructed. We can minimize this likelihood by selecting $\\arg \\min_{s\\in S}\\mu_s$ where $\\mu_s\\coloneqq \\frac{1}{N}\\sum_{i = 1}^{N}p_{i,s}$ . However, we note that there may be multiple unobstructed candidate paths generated by our planner. We differentiate between these in our selection by adding a term $d_{s}$ to our objective to incentivize selecting shorter paths. Furthermore, as an agent navigates to a goal, it makes map predictions using its accumulated observations along the way. Therefore, to improve navigation performance we can incorporate an exploration component in our navigation objective to incentivize choosing paths where it can gain the most information regarding efficient traversability.",
|
| 471 |
+
"bbox": [
|
| 472 |
+
81,
|
| 473 |
+
623,
|
| 474 |
+
488,
|
| 475 |
+
849
|
| 476 |
+
],
|
| 477 |
+
"page_idx": 3
|
| 478 |
+
},
|
| 479 |
+
{
|
| 480 |
+
"type": "text",
|
| 481 |
+
"text": "We estimate uncertainty associated with efficient traversability of a particular path $s$ for our exploration objective. Since there is zero uncertainty associated with path lengths $d_{s}$ , we design our exploration objective to maximize information gain for path traversability. We",
|
| 482 |
+
"bbox": [
|
| 483 |
+
81,
|
| 484 |
+
851,
|
| 485 |
+
488,
|
| 486 |
+
926
|
| 487 |
+
],
|
| 488 |
+
"page_idx": 3
|
| 489 |
+
},
|
| 490 |
+
{
|
| 491 |
+
"type": "text",
|
| 492 |
+
"text": "denote $P_{s_{NT}}(m_t|m_t')$ as the probability the path $s$ is not traversable ( $NT$ ) estimated by $\\mu_s$ . We recall that $\\mu_s$ is computed by averaging traversability scores over an ensemble of models. We compute the variance of these scores $\\mathrm{Var}_{i\\in N}p_{i,s}$ to estimate uncertainty of our model approximating $P_{s_{NT}}(m_t|m_t')$ .",
|
| 493 |
+
"bbox": [
|
| 494 |
+
504,
|
| 495 |
+
65,
|
| 496 |
+
911,
|
| 497 |
+
157
|
| 498 |
+
],
|
| 499 |
+
"page_idx": 3
|
| 500 |
+
},
|
| 501 |
+
{
|
| 502 |
+
"type": "text",
|
| 503 |
+
"text": "We combine exploration and exploitation in our full objective using an upper confidence bound policy [47], [46], [48], [24]. Our objective for efficient traversable paths is specified as",
|
| 504 |
+
"bbox": [
|
| 505 |
+
504,
|
| 506 |
+
156,
|
| 507 |
+
911,
|
| 508 |
+
214
|
| 509 |
+
],
|
| 510 |
+
"page_idx": 3
|
| 511 |
+
},
|
| 512 |
+
{
|
| 513 |
+
"type": "equation",
|
| 514 |
+
"text": "\n$$\n\\underset {s \\in S} {\\arg \\min } P _ {s _ {N T}} \\left(m _ {t} \\mid m _ {t} ^ {\\prime}\\right) + d _ {s} \\tag {4}\n$$\n",
|
| 515 |
+
"text_format": "latex",
|
| 516 |
+
"bbox": [
|
| 517 |
+
614,
|
| 518 |
+
215,
|
| 519 |
+
911,
|
| 520 |
+
241
|
| 521 |
+
],
|
| 522 |
+
"page_idx": 3
|
| 523 |
+
},
|
| 524 |
+
{
|
| 525 |
+
"type": "text",
|
| 526 |
+
"text": "and can be reconstructed as a maximization problem $\\arg \\max_{s\\in S} - P_{s_{NT}}(m_t|m_t^{\\prime}) - d_s$ . We denote $\\sigma_{s}\\coloneqq$ $\\sqrt{\\operatorname{Var}_{i\\in N}p_{i,s}}$ and observe the upper bound",
|
| 527 |
+
"bbox": [
|
| 528 |
+
504,
|
| 529 |
+
246,
|
| 530 |
+
911,
|
| 531 |
+
292
|
| 532 |
+
],
|
| 533 |
+
"page_idx": 3
|
| 534 |
+
},
|
| 535 |
+
{
|
| 536 |
+
"type": "equation",
|
| 537 |
+
"text": "\n$$\n- P _ {s _ {N T}} \\left(m _ {t} \\mid m _ {t} ^ {\\prime}\\right) - d _ {s} \\leq - \\mu_ {s} + \\alpha_ {1} \\sigma_ {s} - d _ {s} \\tag {5}\n$$\n",
|
| 538 |
+
"text_format": "latex",
|
| 539 |
+
"bbox": [
|
| 540 |
+
566,
|
| 541 |
+
299,
|
| 542 |
+
911,
|
| 543 |
+
315
|
| 544 |
+
],
|
| 545 |
+
"page_idx": 3
|
| 546 |
+
},
|
| 547 |
+
{
|
| 548 |
+
"type": "text",
|
| 549 |
+
"text": "holds with some fixed but unknown probability where $\\alpha_{1}$ is a constant hyperparameter. Using our upper bound to estimate $-P_{s_{NT}}(m_t|D_t)$ , our full objective function as a minimization problem is",
|
| 550 |
+
"bbox": [
|
| 551 |
+
504,
|
| 552 |
+
321,
|
| 553 |
+
911,
|
| 554 |
+
382
|
| 555 |
+
],
|
| 556 |
+
"page_idx": 3
|
| 557 |
+
},
|
| 558 |
+
{
|
| 559 |
+
"type": "equation",
|
| 560 |
+
"text": "\n$$\n\\underset {s} {\\arg \\min } \\mu_ {s} - \\alpha_ {1} \\sigma_ {s} + \\alpha_ {2} d _ {s} \\tag {6}\n$$\n",
|
| 561 |
+
"text_format": "latex",
|
| 562 |
+
"bbox": [
|
| 563 |
+
617,
|
| 564 |
+
391,
|
| 565 |
+
911,
|
| 566 |
+
414
|
| 567 |
+
],
|
| 568 |
+
"page_idx": 3
|
| 569 |
+
},
|
| 570 |
+
{
|
| 571 |
+
"type": "text",
|
| 572 |
+
"text": "where $\\alpha_{2}$ is a hyperparameter weighting the contribution of path length. Similarly to our exploration policy, in practice, equation 6 is evaluated over the accumulated global map $M_{t}$ . Figure 2 (bottom row) illustrates path selection using our objective during a point-goal episode.",
|
| 573 |
+
"bbox": [
|
| 574 |
+
504,
|
| 575 |
+
419,
|
| 576 |
+
911,
|
| 577 |
+
494
|
| 578 |
+
],
|
| 579 |
+
"page_idx": 3
|
| 580 |
+
},
|
| 581 |
+
{
|
| 582 |
+
"type": "text",
|
| 583 |
+
"text": "IV. EXPERIMENTS",
|
| 584 |
+
"text_level": 1,
|
| 585 |
+
"bbox": [
|
| 586 |
+
640,
|
| 587 |
+
503,
|
| 588 |
+
774,
|
| 589 |
+
516
|
| 590 |
+
],
|
| 591 |
+
"page_idx": 3
|
| 592 |
+
},
|
| 593 |
+
{
|
| 594 |
+
"type": "text",
|
| 595 |
+
"text": "Our experiments are conducted on the Matterport3D (MP3D) [17] dataset using the Habitat [6] simulator. We follow the standard train/val/test environments split of MP3D which contains overall 90 reconstructions of realistic indoor scenes. The splits are disjoint, therefore all evaluations are conducted in novel scenes where the occupancy map predictor model has not seen during training. Our observation space consists of $256 \\times 256$ depth images, while the action space contains four actions: MOVE_FORWARD by $25cm$ , TURN_LEFT and TURN_RIGHT by $10^{\\circ}$ and STOP.",
|
| 596 |
+
"bbox": [
|
| 597 |
+
504,
|
| 598 |
+
522,
|
| 599 |
+
911,
|
| 600 |
+
672
|
| 601 |
+
],
|
| 602 |
+
"page_idx": 3
|
| 603 |
+
},
|
| 604 |
+
{
|
| 605 |
+
"type": "text",
|
| 606 |
+
"text": "We perform two key experiments. First, we compare to other state-of-the-art methods on the task of exploration using both coverage and map accuracy metrics (sec. IV-B). Second we evaluate on the point-goal navigation task and demonstrate increased performance when DD-PPO [10] is complemented with our planning strategy (sec. IV-C).",
|
| 607 |
+
"bbox": [
|
| 608 |
+
504,
|
| 609 |
+
672,
|
| 610 |
+
911,
|
| 611 |
+
763
|
| 612 |
+
],
|
| 613 |
+
"page_idx": 3
|
| 614 |
+
},
|
| 615 |
+
{
|
| 616 |
+
"type": "text",
|
| 617 |
+
"text": "A. Implementation Details",
|
| 618 |
+
"text_level": 1,
|
| 619 |
+
"bbox": [
|
| 620 |
+
504,
|
| 621 |
+
772,
|
| 622 |
+
689,
|
| 623 |
+
786
|
| 624 |
+
],
|
| 625 |
+
"page_idx": 3
|
| 626 |
+
},
|
| 627 |
+
{
|
| 628 |
+
"type": "text",
|
| 629 |
+
"text": "The Unet [49] model used for the occupancy map prediction has four encoder and four decoder convolutional blocks with skip connections and it is combined with a ResNet18 [53] for feature extraction. We use Pytorch [54] and train using the Adam optimizer with a learning rate of 0.0002. The grid dimensions are $h = w = 160$ for local, and $H = W = 768$ for global, while each cell in the grid is $5cm \\times 5cm$ . For the path generation process, we run the RRT every 30 navigation steps for exploration and",
|
| 630 |
+
"bbox": [
|
| 631 |
+
504,
|
| 632 |
+
790,
|
| 633 |
+
911,
|
| 634 |
+
926
|
| 635 |
+
],
|
| 636 |
+
"page_idx": 3
|
| 637 |
+
},
|
| 638 |
+
{
|
| 639 |
+
"type": "image",
|
| 640 |
+
"img_path": "images/a372c4f3ab0ac9f203050a7d39e3e86a5fc0a914b1cf933d9976b7c6c72e7bf0.jpg",
|
| 641 |
+
"image_caption": [],
|
| 642 |
+
"image_footnote": [],
|
| 643 |
+
"bbox": [
|
| 644 |
+
125,
|
| 645 |
+
66,
|
| 646 |
+
250,
|
| 647 |
+
162
|
| 648 |
+
],
|
| 649 |
+
"page_idx": 4
|
| 650 |
+
},
|
| 651 |
+
{
|
| 652 |
+
"type": "image",
|
| 653 |
+
"img_path": "images/b77a6008e2ed91126ddde9f6fab51a19b97d1d1870dffbf5687ecc2ac88e8fd6.jpg",
|
| 654 |
+
"image_caption": [],
|
| 655 |
+
"image_footnote": [],
|
| 656 |
+
"bbox": [
|
| 657 |
+
250,
|
| 658 |
+
66,
|
| 659 |
+
372,
|
| 660 |
+
162
|
| 661 |
+
],
|
| 662 |
+
"page_idx": 4
|
| 663 |
+
},
|
| 664 |
+
{
|
| 665 |
+
"type": "image",
|
| 666 |
+
"img_path": "images/890902a0f6efbdcd4e70d2197d5e38b12be61a1c6cc9bb06f17fe70c86e631cb.jpg",
|
| 667 |
+
"image_caption": [],
|
| 668 |
+
"image_footnote": [],
|
| 669 |
+
"bbox": [
|
| 670 |
+
374,
|
| 671 |
+
66,
|
| 672 |
+
496,
|
| 673 |
+
162
|
| 674 |
+
],
|
| 675 |
+
"page_idx": 4
|
| 676 |
+
},
|
| 677 |
+
{
|
| 678 |
+
"type": "image",
|
| 679 |
+
"img_path": "images/02144e7909218df3dbd7fc95d2a30a38660de281472282f79f99ee928da3dc6d.jpg",
|
| 680 |
+
"image_caption": [],
|
| 681 |
+
"image_footnote": [],
|
| 682 |
+
"bbox": [
|
| 683 |
+
500,
|
| 684 |
+
66,
|
| 685 |
+
620,
|
| 686 |
+
162
|
| 687 |
+
],
|
| 688 |
+
"page_idx": 4
|
| 689 |
+
},
|
| 690 |
+
{
|
| 691 |
+
"type": "image",
|
| 692 |
+
"img_path": "images/c757ae17cb792015851f42525623500bffb1d0562f6fdd1ad8759d3a366df4c8.jpg",
|
| 693 |
+
"image_caption": [],
|
| 694 |
+
"image_footnote": [],
|
| 695 |
+
"bbox": [
|
| 696 |
+
622,
|
| 697 |
+
66,
|
| 698 |
+
746,
|
| 699 |
+
162
|
| 700 |
+
],
|
| 701 |
+
"page_idx": 4
|
| 702 |
+
},
|
| 703 |
+
{
|
| 704 |
+
"type": "image",
|
| 705 |
+
"img_path": "images/b64a561d4491f7249a0ad29056b3677c1cfd4b522e7ccd251149c4dec02bf6ea.jpg",
|
| 706 |
+
"image_caption": [
|
| 707 |
+
"Fig. 3: Exploration example with $T = 1000$ showing the trajectory followed by our agent (red line). The top row shows RGB images observed by the agent. The ground-truth map is visualized in the bottom right corner."
|
| 708 |
+
],
|
| 709 |
+
"image_footnote": [],
|
| 710 |
+
"bbox": [
|
| 711 |
+
125,
|
| 712 |
+
164,
|
| 713 |
+
250,
|
| 714 |
+
258
|
| 715 |
+
],
|
| 716 |
+
"page_idx": 4
|
| 717 |
+
},
|
| 718 |
+
{
|
| 719 |
+
"type": "image",
|
| 720 |
+
"img_path": "images/ba7fa3580010ebdd959a20679d6ebcefc6ca36e2e2620864046bfd32b650f950.jpg",
|
| 721 |
+
"image_caption": [],
|
| 722 |
+
"image_footnote": [],
|
| 723 |
+
"bbox": [
|
| 724 |
+
250,
|
| 725 |
+
164,
|
| 726 |
+
372,
|
| 727 |
+
258
|
| 728 |
+
],
|
| 729 |
+
"page_idx": 4
|
| 730 |
+
},
|
| 731 |
+
{
|
| 732 |
+
"type": "image",
|
| 733 |
+
"img_path": "images/8c727e33dd42f0c4b6bad23e83f198c49a4a229f17a1434a5ade826e3a75fdfc.jpg",
|
| 734 |
+
"image_caption": [
|
| 735 |
+
"time"
|
| 736 |
+
],
|
| 737 |
+
"image_footnote": [],
|
| 738 |
+
"bbox": [
|
| 739 |
+
374,
|
| 740 |
+
164,
|
| 741 |
+
496,
|
| 742 |
+
258
|
| 743 |
+
],
|
| 744 |
+
"page_idx": 4
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "image",
|
| 748 |
+
"img_path": "images/78f8150b3c4cb97e8b51dad90e43a45d143b7f684bb25a3d568db023a24f48ae.jpg",
|
| 749 |
+
"image_caption": [],
|
| 750 |
+
"image_footnote": [],
|
| 751 |
+
"bbox": [
|
| 752 |
+
500,
|
| 753 |
+
164,
|
| 754 |
+
622,
|
| 755 |
+
258
|
| 756 |
+
],
|
| 757 |
+
"page_idx": 4
|
| 758 |
+
},
|
| 759 |
+
{
|
| 760 |
+
"type": "image",
|
| 761 |
+
"img_path": "images/485747c95b8cd817bb35d2888feb402c511c742e6a38129ed36b76faba50e40b.jpg",
|
| 762 |
+
"image_caption": [],
|
| 763 |
+
"image_footnote": [],
|
| 764 |
+
"bbox": [
|
| 765 |
+
624,
|
| 766 |
+
164,
|
| 767 |
+
746,
|
| 768 |
+
258
|
| 769 |
+
],
|
| 770 |
+
"page_idx": 4
|
| 771 |
+
},
|
| 772 |
+
{
|
| 773 |
+
"type": "image",
|
| 774 |
+
"img_path": "images/99634cafb76a86d0259e197514c7bfc84b33cafbd1dafd76b640833b8188bcd4.jpg",
|
| 775 |
+
"image_caption": [],
|
| 776 |
+
"image_footnote": [],
|
| 777 |
+
"bbox": [
|
| 778 |
+
748,
|
| 779 |
+
164,
|
| 780 |
+
870,
|
| 781 |
+
258
|
| 782 |
+
],
|
| 783 |
+
"page_idx": 4
|
| 784 |
+
},
|
| 785 |
+
{
|
| 786 |
+
"type": "table",
|
| 787 |
+
"img_path": "images/303445e7d97413b66ac6e852833e7fd3b11c1b41ff844cde9b5eb90e89626d2c.jpg",
|
| 788 |
+
"table_caption": [],
|
| 789 |
+
"table_footnote": [],
|
| 790 |
+
"table_body": "<table><tr><td></td><td colspan=\"2\">Noisy</td><td colspan=\"2\">Noise-free</td></tr><tr><td>Method</td><td>Map Acc (m2)</td><td>IoU (%)</td><td>Map Acc (m2)</td><td>IoU (%)</td></tr><tr><td>ANS(depth) [12]</td><td>72.5</td><td>26.0</td><td>85.9</td><td>34.0</td></tr><tr><td>OccAnt(depth) w/o AR [12]</td><td>92.7</td><td>29.0</td><td>104.7</td><td>38.0</td></tr><tr><td>OccAnt(depth) [12]</td><td>94.1</td><td>33.0</td><td>96.5</td><td>35.0</td></tr><tr><td>FBE [52] + DD-PPO [10]</td><td>100.9</td><td>28.7</td><td>120.2</td><td>44.7</td></tr><tr><td>UPEN + DD-PPO [10]</td><td>110.3</td><td>25.8</td><td>141.6</td><td>45.6</td></tr></table>",
|
| 791 |
+
"bbox": [
|
| 792 |
+
88,
|
| 793 |
+
319,
|
| 794 |
+
558,
|
| 795 |
+
397
|
| 796 |
+
],
|
| 797 |
+
"page_idx": 4
|
| 798 |
+
},
|
| 799 |
+
{
|
| 800 |
+
"type": "table",
|
| 801 |
+
"img_path": "images/7ae12a13a1464674b7128204f1b4061d5df096342ff8338c270bb151937c66e9.jpg",
|
| 802 |
+
"table_caption": [
|
| 803 |
+
"TABLE I: Exploration results on MP3D test scenes evaluating map quality at $\\mathrm{T} = {500}$ . The \"w/o AR\" refers to the baseline that is trained without the anticipation reward in [12]."
|
| 804 |
+
],
|
| 805 |
+
"table_footnote": [],
|
| 806 |
+
"table_body": "<table><tr><td></td><td>Cov (m2)</td><td>Cov (%)</td></tr><tr><td>ANS(rgb) [13]</td><td>73.28</td><td>52.1</td></tr><tr><td>FBE [52] + DD-PPO [10]</td><td>85.3</td><td>53.0</td></tr><tr><td>UPEN + DD-PPO [10]</td><td>113.0</td><td>67.9</td></tr></table>",
|
| 807 |
+
"bbox": [
|
| 808 |
+
593,
|
| 809 |
+
340,
|
| 810 |
+
895,
|
| 811 |
+
391
|
| 812 |
+
],
|
| 813 |
+
"page_idx": 4
|
| 814 |
+
},
|
| 815 |
+
{
|
| 816 |
+
"type": "text",
|
| 817 |
+
"text": "TABLE II: Exploration results on MP3D test scenes evaluating area coverage at $\\mathrm{T} = {1000}$ .",
|
| 818 |
+
"bbox": [
|
| 819 |
+
576,
|
| 820 |
+
396,
|
| 821 |
+
911,
|
| 822 |
+
426
|
| 823 |
+
],
|
| 824 |
+
"page_idx": 4
|
| 825 |
+
},
|
| 826 |
+
{
|
| 827 |
+
"type": "text",
|
| 828 |
+
"text": "20 for point-goal. The RRT is set to generate a maximum of 10 paths every run, with a goal sampling rate of $20\\%$ . Finally, the RRT expands new nodes with a distance of 5 pixels at a time. A single step in a navigation episode requires 0.37s on average that includes map prediction and registration, planning using RRT, and DD-PPO. The timing was performed on a laptop using i7 CPU @ 2.20GHz and a GTX1060 GPU. All experiments are with ensemble size of 4. We provide code and trained models: https://github.com/ggeorgak11/UPEN.",
|
| 829 |
+
"bbox": [
|
| 830 |
+
81,
|
| 831 |
+
478,
|
| 832 |
+
488,
|
| 833 |
+
630
|
| 834 |
+
],
|
| 835 |
+
"page_idx": 4
|
| 836 |
+
},
|
| 837 |
+
{
|
| 838 |
+
"type": "text",
|
| 839 |
+
"text": "B. Exploration",
|
| 840 |
+
"text_level": 1,
|
| 841 |
+
"bbox": [
|
| 842 |
+
83,
|
| 843 |
+
636,
|
| 844 |
+
187,
|
| 845 |
+
650
|
| 846 |
+
],
|
| 847 |
+
"page_idx": 4
|
| 848 |
+
},
|
| 849 |
+
{
|
| 850 |
+
"type": "text",
|
| 851 |
+
"text": "The setup from [12] is followed for this experiment, where the objective is to cover as much area as possible given a limited time budget $T = 1000$ . Unless stated otherwise, the evaluation is conducted with simulated noise following the noise models from [13], [12]. We use the following metrics: 1) Map Accuracy $(m^2)$ : as defined in [12] the area in the predicted occupancy map that matches the ground-truth map. 2) IoU (%): the intersection over union of the predicted map and the ground-truth. 3) Cov $(m^2)$ : the actual area covered by the agent. 4) Cov (%): ratio of covered area to max scene coverage. We note that the two coverage metrics are computed on a map containing only ground-projections of depth observations. Our method is validated against the competitive approaches of Occupancy Anticipation [12] (OccAnt) and Active Neural SLAM [13] (ANS), which are modular approaches with mapper components. Both use reinforcement learning to train goal selection policies optimized over map accuracy and coverage respectively. Furthermore,",
|
| 852 |
+
"bbox": [
|
| 853 |
+
81,
|
| 854 |
+
654,
|
| 855 |
+
488,
|
| 856 |
+
926
|
| 857 |
+
],
|
| 858 |
+
"page_idx": 4
|
| 859 |
+
},
|
| 860 |
+
{
|
| 861 |
+
"type": "text",
|
| 862 |
+
"text": "we compare against the classical method of Frontier-based Exploration [52] (FBE). Since both UPEN and FBE are combined with DD-PPO and use the same predicted maps, this comparison directly validates our exploration objective.",
|
| 863 |
+
"bbox": [
|
| 864 |
+
504,
|
| 865 |
+
478,
|
| 866 |
+
911,
|
| 867 |
+
537
|
| 868 |
+
],
|
| 869 |
+
"page_idx": 4
|
| 870 |
+
},
|
| 871 |
+
{
|
| 872 |
+
"type": "text",
|
| 873 |
+
"text": "We report two key results. First, in Table I our method outperforms all baselines in the noise-free case in both Map Accuracy and IoU. In fact, we show $21.4m^2$ and $36.9m^2$ improvement over FBE and OccAnt respectively on the Map Accuracy metric. In the noisy case even though we still surpass all baselines on Map Accuracy, our performance drops significantly in both metrics. In addition, the Map Accuracy increasing while IoU drops is attributed to increased map coverage with reduced accuracy. This is not surprising since unlike OccAnt and Neural SLAM we are not using a pose estimator. Second, in Table II we demonstrate superior performance on coverage metrics with a margin of $27.7m^2$ from FBE and $39.7m^2$ from ANS. This suggests that our method is more efficient when exploring a novel scene, thus validating our uncertainty-based exploration policy. Figure 3 shows an exploration episode.",
|
| 874 |
+
"bbox": [
|
| 875 |
+
504,
|
| 876 |
+
539,
|
| 877 |
+
913,
|
| 878 |
+
780
|
| 879 |
+
],
|
| 880 |
+
"page_idx": 4
|
| 881 |
+
},
|
| 882 |
+
{
|
| 883 |
+
"type": "text",
|
| 884 |
+
"text": "C. Point-goal Navigation",
|
| 885 |
+
"text_level": 1,
|
| 886 |
+
"bbox": [
|
| 887 |
+
506,
|
| 888 |
+
787,
|
| 889 |
+
681,
|
| 890 |
+
801
|
| 891 |
+
],
|
| 892 |
+
"page_idx": 4
|
| 893 |
+
},
|
| 894 |
+
{
|
| 895 |
+
"type": "text",
|
| 896 |
+
"text": "We evaluate the performance of our uncertainty-driven planner when used to augment DD-PPO [10] against its vanilla version. DD-PPO is currently one of the best performing methods on point-goal navigation, achieving $97\\%$ SPL on the Gibson [7] validation set as shown in [10]. We follow the point-goal task setup from [1] where given a target coordinate the agent needs to navigate to that target and stop within a $0.2m$ radius. The agent is given a time-budget of",
|
| 897 |
+
"bbox": [
|
| 898 |
+
504,
|
| 899 |
+
805,
|
| 900 |
+
913,
|
| 901 |
+
926
|
| 902 |
+
],
|
| 903 |
+
"page_idx": 4
|
| 904 |
+
},
|
| 905 |
+
{
|
| 906 |
+
"type": "table",
|
| 907 |
+
"img_path": "images/1e58351d0d1071cd2e8c070af7b6ecec1b13ee7f6c965cc1b51c5d426ab8ef9e.jpg",
|
| 908 |
+
"table_caption": [],
|
| 909 |
+
"table_footnote": [],
|
| 910 |
+
"table_body": "<table><tr><td>Dataset</td><td colspan=\"2\">MP3D Val</td><td colspan=\"2\">MP3D Test</td><td colspan=\"2\">MP3D Val-Hard</td></tr><tr><td>Method</td><td>Success (%)</td><td>SPL (%)</td><td>Success (%)</td><td>SPL (%)</td><td>Success (%)</td><td>SPL (%)</td></tr><tr><td>DD-PPO [10]</td><td>47.8</td><td>38.7</td><td>37.3</td><td>30.2</td><td>38.0</td><td>28.1</td></tr><tr><td>UPEN-Occ + DD-PPO [10]</td><td>43.8</td><td>30.2</td><td>36.3</td><td>25.3</td><td>42.3</td><td>26.9</td></tr><tr><td>UPEN-Greedy + DD-PPO [10]</td><td>48.9</td><td>36.0</td><td>37.5</td><td>28.1</td><td>43.0</td><td>28.8</td></tr><tr><td>UPEN + DD-PPO [10]</td><td>49.8</td><td>36.9</td><td>40.8</td><td>30.7</td><td>45.7</td><td>31.6</td></tr></table>",
|
| 911 |
+
"bbox": [
|
| 912 |
+
176,
|
| 913 |
+
66,
|
| 914 |
+
820,
|
| 915 |
+
140
|
| 916 |
+
],
|
| 917 |
+
"page_idx": 5
|
| 918 |
+
},
|
| 919 |
+
{
|
| 920 |
+
"type": "table",
|
| 921 |
+
"img_path": "images/ba7e618afd55730d7ba3ab747d74a21dd078d558c890edd45d6ed832811609e9.jpg",
|
| 922 |
+
"table_caption": [
|
| 923 |
+
"TABLE III: Point-goal navigation results of our method against the vanilla DD-PPO[10]. \"Occ\" signifies a policy that uses only occupancy predictions, while \"Greedy\" refers to a policy taking into consideration path length without uncertainty."
|
| 924 |
+
],
|
| 925 |
+
"table_footnote": [],
|
| 926 |
+
"table_body": "<table><tr><td></td><td>Avg GD (m)</td><td>Avg GEDR</td><td>Min GEDR</td></tr><tr><td>Gibson Val</td><td>5.88</td><td>1.37</td><td>1.00</td></tr><tr><td>MP3D Val</td><td>11.14</td><td>1.40</td><td>1.00</td></tr><tr><td>MP3D Test</td><td>13.23</td><td>1.42</td><td>1.00</td></tr><tr><td>MP3D Val-Hard</td><td>8.28</td><td>3.19</td><td>2.50</td></tr></table>",
|
| 927 |
+
"bbox": [
|
| 928 |
+
106,
|
| 929 |
+
200,
|
| 930 |
+
467,
|
| 931 |
+
262
|
| 932 |
+
],
|
| 933 |
+
"page_idx": 5
|
| 934 |
+
},
|
| 935 |
+
{
|
| 936 |
+
"type": "text",
|
| 937 |
+
"text": "TABLE IV: Geodesic distance (GD) and geodesic to Euclidean distance ratio (GEDR) between different evaluation sets for point-goal navigation.",
|
| 938 |
+
"bbox": [
|
| 939 |
+
81,
|
| 940 |
+
267,
|
| 941 |
+
488,
|
| 942 |
+
313
|
| 943 |
+
],
|
| 944 |
+
"page_idx": 5
|
| 945 |
+
},
|
| 946 |
+
{
|
| 947 |
+
"type": "image",
|
| 948 |
+
"img_path": "images/cfed539bff34b3f87750d8ac61fda9ec84335ac17442410e8d478156ee718684.jpg",
|
| 949 |
+
"image_caption": [
|
| 950 |
+
"Fig. 4: Point-goal navigation examples from the MP3D Val-Hard set where the vanilla DD-PPO [10] fails to reach the target while our method is successful."
|
| 951 |
+
],
|
| 952 |
+
"image_footnote": [],
|
| 953 |
+
"bbox": [
|
| 954 |
+
174,
|
| 955 |
+
330,
|
| 956 |
+
400,
|
| 957 |
+
613
|
| 958 |
+
],
|
| 959 |
+
"page_idx": 5
|
| 960 |
+
},
|
| 961 |
+
{
|
| 962 |
+
"type": "text",
|
| 963 |
+
"text": "$T = 500$ steps to reach the target. For evaluation we use the standard metrics [1]: Success: percentage of successful episodes, and SPL: success rate normalized by path length. For this experiment we assume noise-free poses are provided by the simulator. To combine DD-PPO with our planner, we set the current short-term goal estimated by our approach as the target that DD-PPO needs to reach. For the vanilla DD-PPO we use the final target location in each test episode.",
|
| 964 |
+
"bbox": [
|
| 965 |
+
81,
|
| 966 |
+
698,
|
| 967 |
+
488,
|
| 968 |
+
819
|
| 969 |
+
],
|
| 970 |
+
"page_idx": 5
|
| 971 |
+
},
|
| 972 |
+
{
|
| 973 |
+
"type": "text",
|
| 974 |
+
"text": "DD-PPO essentially solves Gibson point-goal navigation task so we turn our attention to MP3D where DD-PPO has lower performance due to the episodes having larger average geodesic distance (GD) to goal. However, we noticed that the average geodesic to euclidean distance ratio (GEDR) in MP3D is still low (a GEDR of 1 means there is a straight line path between the starting position and the goal).",
|
| 975 |
+
"bbox": [
|
| 976 |
+
81,
|
| 977 |
+
820,
|
| 978 |
+
488,
|
| 979 |
+
928
|
| 980 |
+
],
|
| 981 |
+
"page_idx": 5
|
| 982 |
+
},
|
| 983 |
+
{
|
| 984 |
+
"type": "text",
|
| 985 |
+
"text": "In order to demonstrate the effectiveness of our proposed method, we generated a new evaluation set (MP3D Val-Hard) with minimum GEDR=2.5. This created episodes which frequently involve sharp u-turns and multiple obstacles along the shortest path. Table IV illustrates episode statistics between different evaluation sets<sup>1</sup>. In addition to MP3D Val-Hard, we also test our method on the publicly available sets of MP3D Val and MP3D Test. We note that MP3D Val-Hard was generated using the same random procedure as its publicly available counterparts.",
|
| 986 |
+
"bbox": [
|
| 987 |
+
504,
|
| 988 |
+
205,
|
| 989 |
+
911,
|
| 990 |
+
357
|
| 991 |
+
],
|
| 992 |
+
"page_idx": 5
|
| 993 |
+
},
|
| 994 |
+
{
|
| 995 |
+
"type": "text",
|
| 996 |
+
"text": "We define two variations of our method in order to demonstrate the usefulness of our uncertainty estimation by choosing different values for the $\\alpha_{1}$ and $\\alpha_{2}$ parameters of Eq. 6 from section III-C. First, $UPEN-Occ + DD-PPO$ ( $\\alpha_{1} = 0$ , $\\alpha_{2} = 0$ ) considers only the occupancy probabilities when estimating the traversability difficulty of a path, while $UPEN-Greedy + DD-PPO$ ( $\\alpha_{1} = 0$ , $\\alpha = 0.5$ ) considers the path length and not the uncertainty. Our default method $UPEN + DD-PPO$ uses $\\alpha_{1} = 0.1$ and $\\alpha_{2} = 0.5$ .",
|
| 997 |
+
"bbox": [
|
| 998 |
+
504,
|
| 999 |
+
358,
|
| 1000 |
+
913,
|
| 1001 |
+
494
|
| 1002 |
+
],
|
| 1003 |
+
"page_idx": 5
|
| 1004 |
+
},
|
| 1005 |
+
{
|
| 1006 |
+
"type": "text",
|
| 1007 |
+
"text": "The results are illustrated in Table III. We outperform all baselines in all evaluation sets with regards to Success. The largest gap in performance is observed in the MP3D Val-Hard set which contains episodes with much higher average GEDR that the other sets. This suggests that our method is able to follow more complicated paths by choosing short-term goals, in contrast to the vanilla DD-PPO which has to negotiate narrow passages and sharp turns only from egocentric observations. Regarding SPL, our performance gains are not as pronounced as in Success, since our policy frequently prefers paths with lower traversability difficulty in favor of shortest paths, to ensure higher success probability.",
|
| 1008 |
+
"bbox": [
|
| 1009 |
+
504,
|
| 1010 |
+
494,
|
| 1011 |
+
913,
|
| 1012 |
+
676
|
| 1013 |
+
],
|
| 1014 |
+
"page_idx": 5
|
| 1015 |
+
},
|
| 1016 |
+
{
|
| 1017 |
+
"type": "text",
|
| 1018 |
+
"text": "V. CONCLUSION",
|
| 1019 |
+
"text_level": 1,
|
| 1020 |
+
"bbox": [
|
| 1021 |
+
648,
|
| 1022 |
+
686,
|
| 1023 |
+
769,
|
| 1024 |
+
699
|
| 1025 |
+
],
|
| 1026 |
+
"page_idx": 5
|
| 1027 |
+
},
|
| 1028 |
+
{
|
| 1029 |
+
"type": "text",
|
| 1030 |
+
"text": "We introduced a novel uncertainty-driven planner for exploration and navigation tasks in previously unseen environments. The planner leverages an occupancy map predictor that hallucinates map regions outside the field of view of the agent and uses its predictions to formulate uncertainty based objectives. Our experiments on exploration suggests that our method is more efficient in covering unknown areas. In terms of point-goal navigation, we showed how DD-PPO [10] augmented with our method outperforms its vanilla version. This suggests that end-to-end navigation methods can benefit from employing an uncertainty-driven planner, especially in difficult episodes.",
|
| 1031 |
+
"bbox": [
|
| 1032 |
+
504,
|
| 1033 |
+
707,
|
| 1034 |
+
913,
|
| 1035 |
+
888
|
| 1036 |
+
],
|
| 1037 |
+
"page_idx": 5
|
| 1038 |
+
},
|
| 1039 |
+
{
|
| 1040 |
+
"type": "page_footnote",
|
| 1041 |
+
"text": "<sup>1</sup>The Gibson val, MP3D val, and MP3D test sets were downloaded from https://github.com/facebookresearch/habitat-lab before 09/09/2021.",
|
| 1042 |
+
"bbox": [
|
| 1043 |
+
504,
|
| 1044 |
+
901,
|
| 1045 |
+
911,
|
| 1046 |
+
926
|
| 1047 |
+
],
|
| 1048 |
+
"page_idx": 5
|
| 1049 |
+
},
|
| 1050 |
+
{
|
| 1051 |
+
"type": "text",
|
| 1052 |
+
"text": "REFERENCES",
|
| 1053 |
+
"text_level": 1,
|
| 1054 |
+
"bbox": [
|
| 1055 |
+
238,
|
| 1056 |
+
66,
|
| 1057 |
+
333,
|
| 1058 |
+
78
|
| 1059 |
+
],
|
| 1060 |
+
"page_idx": 6
|
| 1061 |
+
},
|
| 1062 |
+
{
|
| 1063 |
+
"type": "list",
|
| 1064 |
+
"sub_type": "ref_text",
|
| 1065 |
+
"list_items": [
|
| 1066 |
+
"[1] P. Anderson, A. Chang, D. S. Chaplot, A. Dosovitskiy, S. Gupta, V. Koltun, J. Kosecka, J. Malik, R. Mottaghi, M. Savva, et al., \"On evaluation of embodied navigation agents,\" arXiv preprint arXiv:1807.06757, 2018.",
|
| 1067 |
+
"[2] D. Batra, A. Gokaslan, A. Kembhavi, O. Maksymets, R. Mottaghi, M. Savva, A. Toshev, and E. Wijmans, \"Objectnav revisited: On evaluation of embodied agents navigating to objects,\" arXiv preprint arXiv:2006.13171, 2020.",
|
| 1068 |
+
"[3] C. Cadena, L. Carlone, H. Carrillo, Y. Latif, D. Scaramuzza, J. Neira, I. Reid, and J. J. Leonard, \"Past, present, and future of simultaneous localization and mapping: Toward the robust-perception age,\" IEEE Transactions on robotics, vol. 32, no. 6, pp. 1309-1332, 2016.",
|
| 1069 |
+
"[4] N. A. Melchior and R. Simmons, \"Particle rt for path planning with uncertainty,\" in Proceedings 2007 IEEE International Conference on Robotics and Automation. IEEE, 2007, pp. 1617-1624.",
|
| 1070 |
+
"[5] K. Ok, S. Ansari, B. Gallagher, W. Sica, F. Dellaert, and M. Stilman, \"Path planning with uncertainty: Voronoi uncertainty fields,\" in 2013 IEEE International Conference on Robotics and Automation. IEEE, 2013, pp. 4596-4601.",
|
| 1071 |
+
"[6] M. Savva, A. Kadian, O. Maksymets, Y. Zhao, E. Wijmans, B. Jain, J. Straub, J. Liu, V. Koltun, J. Malik, et al., \"Habitat: A platform for embodied ai research,\" in Proceedings of the IEEE International Conference on Computer Vision, 2019, pp. 9339-9347.",
|
| 1072 |
+
"[7] F. Xia, A. R. Zamir, Z. He, A. Sax, J. Malik, and S. Savarese, \"Gibson env: Real-world perception for embodied agents,\" in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2018, pp. 9068-9079.",
|
| 1073 |
+
"[8] Y. Zhu, R. Mottaghi, E. Kolve, J. J. Lim, A. Gupta, L. Fei-Fei, and A. Farhadi, \"Target-driven visual navigation in indoor scenes using deep reinforcement learning,\" in 2017 IEEE international conference on robotics and automation (ICRA). IEEE, 2017, pp. 3357-3364.",
|
| 1074 |
+
"[9] T. Chen, S. Gupta, and A. Gupta, “Learning exploration policies for navigation,” 7th International Conference on Learning Representations, ICLR 2019, 2019.",
|
| 1075 |
+
"[10] E. Wijmans, A. Kadian, A. Morcos, S. Lee, I. Essa, D. Parikh, M. Savva, and D. Batra, “Dd-ppy: Learning near-perfect pointgoal navigators from 2.5 billion frames,” arXiv, pp. arXiv-1911, 2019.",
|
| 1076 |
+
"[11] S. Gupta, J. Davidson, S. Levine, R. Sukthankar, and J. Malik, \"Cognitive mapping and planning for visual navigation,\" in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2017, pp. 2616-2625.",
|
| 1077 |
+
"[12] S. K. Ramakrishnan, Z. Al-Halah, and K. Grauman, \"Occupancy anticipation for efficient exploration and navigation,\" European Conference on Computer Vision, pp. 400-418, 2020.",
|
| 1078 |
+
"[13] D. S. Chaplot, D. Gandhi, S. Gupta, A. Gupta, and R. Salakhutdinov, \"Learning to explore using active neural slam,\" International Conference on Learning Representations, 2020.",
|
| 1079 |
+
"[14] S. M. LaValle et al., \"Rapidly-exploring random trees: A new tool for path planning,\" 1998.",
|
| 1080 |
+
"[15] H. S. Seung, M. Opper, and H. Sompolinsky, “Query by committee,” in Proceedings of the fifth annual workshop on Computational learning theory, 1992, pp. 287–294.",
|
| 1081 |
+
"[16] D. Pathak, D. Gandhi, and A. Gupta, \"Self-Supervised Exploration via Disagreement,\" ICML, 2019.",
|
| 1082 |
+
"[17] A. Chang, A. Dai, T. Funkhouser, M. Halber, M. Niessner, M. Savva, S. Song, A. Zeng, and Y. Zhang, \"Matterport3d: Learning from rgb-d data in indoor environments,\" 2017 International Conference on 3D Vision (3DV), IEEE, 2017.",
|
| 1083 |
+
"[18] J. Fuentes-Pacheco, J. Ruiz-Ascencio, and J. M. Rendon-Mancha, “Visual simultaneous localization and mapping: a survey,” Artificial intelligence review, vol. 43, no. 1, pp. 55–81, 2015.",
|
| 1084 |
+
"[19] M. Savva, A. X. Chang, A. Dosovitskiy, T. Funkhouser, and V. Koltun, “Minos: Multimodal indoor simulator for navigation in complex environments,” arXiv preprint arXiv:1712.03931, 2017.",
|
| 1085 |
+
"[20] X. Zhao, H. Agrawal, D. Batra, and A. Schwing, “The surprising effectiveness of visual odometry techniques for embodied pointgoal navigation,” arXiv preprint arXiv:2108.11550, 2021.",
|
| 1086 |
+
"[21] P. Karkus, S. Cai, and D. Hsu, \"Differentiable slam-net: Learning particle slam for visual navigation,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021, pp. 2815-2825.",
|
| 1087 |
+
"[22] D. Mishkin, A. Dosovitskiy, and V. Koltun, \"Benchmarking classic and learned navigation in complex 3d environments,\" arXiv preprint arXiv:1901.10915, 2019."
|
| 1088 |
+
],
|
| 1089 |
+
"bbox": [
|
| 1090 |
+
86,
|
| 1091 |
+
87,
|
| 1092 |
+
488,
|
| 1093 |
+
924
|
| 1094 |
+
],
|
| 1095 |
+
"page_idx": 6
|
| 1096 |
+
},
|
| 1097 |
+
{
|
| 1098 |
+
"type": "list",
|
| 1099 |
+
"sub_type": "ref_text",
|
| 1100 |
+
"list_items": [
|
| 1101 |
+
"[23] D. S. Chaplot, D. Gandhi, A. Gupta, and R. Salakhutdinov, “Object goal navigation using goal-oriented semantic exploration,” Advances in Neural Information Processing Systems 33, 2020.",
|
| 1102 |
+
"[24] G. Georgakis, B. Bucher, K. Schmeckpeper, S. Singh, and K. Dani-ilidis, \"Learning to map for active semantic goal navigation,\" arXiv preprint arXiv:2106.15648, 2021.",
|
| 1103 |
+
"[25] G. Georgakis, Y. Li, and J. Kosecka, “Simultaneous mapping and target driven navigation,” arXiv preprint arXiv:1911.07980, 2019.",
|
| 1104 |
+
"[26] A. Mousavian, A. Toshev, M. Fiser, J. Košecka, A. Wahid, and J. Davidson, \"Visual representations for semantic target driven navigation,\" in 2019 International Conference on Robotics and Automation (ICRA). IEEE, 2019, pp. 8846-8852.",
|
| 1105 |
+
"[27] Y. Liang, B. Chen, and S. Song, \"SSCNav: Confidence-aware semantic scene completion for visual semantic navigation,\" International Conference on Robotics and Automation (ICRA), 2021.",
|
| 1106 |
+
"[28] D. S. Chaplot, R. Salakhutdinov, A. Gupta, and S. Gupta, “Neural topological slam for visual navigation,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp. 12875–12884.",
|
| 1107 |
+
"[29] O. Kwon, N. Kim, Y. Choi, H. Yoo, J. Park, and S. Oh, \"Visual graph memory with unsupervised representation for visual navigation.\"",
|
| 1108 |
+
"[30] H. J. S. Feder, J. J. Leonard, and C. M. Smith, \"Adaptive mobile robot navigation and mapping,\" The International Journal of Robotics Research, vol. 18, no. 7, pp. 650-668, 1999.",
|
| 1109 |
+
"[31] T. Kollar and N. Roy, “Trajectory optimization using reinforcement learning for map exploration,” The International Journal of Robotics Research, vol. 27, no. 2, pp. 175–196, 2008.",
|
| 1110 |
+
"[32] L. Carlone, J. Du, M. K. Ng, B. Bona, and M. Indri, \"Active slam and exploration with particle filters using kullback-leibler divergence,\" Journal of Intelligent & Robotic Systems, vol. 75, no. 2, pp. 291-311, 2014.",
|
| 1111 |
+
"[33] H. Carrillo, I. Reid, and J. A. Castellanos, \"On the comparison of uncertainty criteria for active slam,\" in 2012 IEEE International Conference on Robotics and Automation. IEEE, 2012, pp. 2080-2087.",
|
| 1112 |
+
"[34] J.-L. Blanco, J.-A. Fernandez-Madrigal, and J. González, “A novel measure of uncertainty for mobile robot slam with rao—blackwellized particle filters,” The International Journal of Robotics Research, vol. 27, no. 1, pp. 73-89, 2008.",
|
| 1113 |
+
"[35] C. Stachniss, G. Grisetti, and W. Burgard, \"Information gain-based exploration using rao-blackwellized particle filters,\" in Robotics: Science and systems, vol. 2, 2005, pp. 65-72.",
|
| 1114 |
+
"[36] K. Fang, A. Toshev, L. Fei-Fei, and S. Savarese, \"Scene memory transformer for embodied agents in long-horizon tasks,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2019, pp. 538-547.",
|
| 1115 |
+
"[37] J. Zhang, L. Tai, M. Liu, J. Boedecker, and W. Burgard, “Neural slam: Learning to explore with external memory,” arXiv preprint arXiv:1706.09520, 2017.",
|
| 1116 |
+
"[38] M. Narasimhan, E. Wijmans, X. Chen, T. Darrell, D. Batra, D. Parikh, and A. Singh, \"Seeing the un-scene: Learning amodal semantic maps for room navigation,\" European Conference on Computer Vision. Springer, Cham, 2020.",
|
| 1117 |
+
"[39] Y. Katsumata, A. Taniguchi, L. El Hafi, Y. Hagiwara, and T. Taniguchi, \"Spcomapgan: Spatial concept formation-based semantic mapping with generative adversarial networks,\" in 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2020, pp. 7927-7934.",
|
| 1118 |
+
"[40] E. Beeching, J. Dibangoye, O. Simonin, and C. Wolf, “Learning to plan with uncertain topological maps,” in Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part III 16. Springer, 2020, pp. 473–490.",
|
| 1119 |
+
"[41] K. Katyal, K. Popek, C. Paxton, P. Burlina, and G. D. Hager, \"Uncertainty-aware occupancy map prediction using generative networks for robot navigation,\" in 2019 International Conference on Robotics and Automation (ICRA), 2019, pp. 5453-5459.",
|
| 1120 |
+
"[42] D. D. Fan, K. Otsu, Y. Kubo, A. Dixit, J. Burdick, and A.-A. Agha-Mohammadi, \"Step: Stochastic traversability evaluation and planning for risk-aware off-road navigation,\" in Robotics: Science and Systems. RSS Foundation, 2021, pp. 1-21.",
|
| 1121 |
+
"[43] É. Pairet, J. D. Hernández, M. Carreras, Y. Petillot, and M. Lahijanian, \"Online mapping and motion planning under uncertainty for safe navigation in unknown environments,\" IEEE Transactions on Automation Science and Engineering, 2021."
|
| 1122 |
+
],
|
| 1123 |
+
"bbox": [
|
| 1124 |
+
509,
|
| 1125 |
+
66,
|
| 1126 |
+
911,
|
| 1127 |
+
925
|
| 1128 |
+
],
|
| 1129 |
+
"page_idx": 6
|
| 1130 |
+
},
|
| 1131 |
+
{
|
| 1132 |
+
"type": "list",
|
| 1133 |
+
"sub_type": "ref_text",
|
| 1134 |
+
"list_items": [
|
| 1135 |
+
"[44] Y. Gal, “Uncertainty in deep learning,” Ph.D. dissertation, University of Cambridge, 2016.",
|
| 1136 |
+
"[45] A. Kendall and Y. Gal, \"What uncertainties do we need in bayesian deep learning for computer vision?\" in Advances in neural information processing systems, 2017, pp. 5574-5584.",
|
| 1137 |
+
"[46] M. G. Azar, I. Osband, and R. Munos, “Minimax regret bounds for reinforcement learning,” in International Conference on Machine Learning. PMLR, 2017, pp. 263–272.",
|
| 1138 |
+
"[47] P. Auer, N. Cesa-Bianchi, and P. Fischer, \"Finite-time analysis of the multiarmed bandit problem,\" Machine learning, vol. 47, no. 2, pp. 235-256, 2002.",
|
| 1139 |
+
"[48] R. Y. Chen, S. Sidor, P. Abbeel, and J. Schulman, \"UCB exploration via q-ensembles,\" arXiv preprint arXiv:1706.01502, 2017.",
|
| 1140 |
+
"[49] O. Ronneberger, P. Fischer, and T. Brox, \"U-net: Convolutional networks for biomedical image segmentation,\" in International Conference on Medical image computing and computer-assisted intervention. Springer, 2015, pp. 234-241.",
|
| 1141 |
+
"[50] B. Lakshminarayanan, A. Pritzel, and C. Blundell, \"Simple and scalable predictive uncertainty estimation using deep ensembles,\" Advances in Neural Information Processing Systems 30, 2017.",
|
| 1142 |
+
"[51] Y. Gal, R. Islam, and Z. Ghahramani, “Deep bayesian active learning with image data,” in International Conference on Machine Learning. PMLR, 2017, pp. 1183–1192.",
|
| 1143 |
+
"[52] B. Yamauchi, “A frontier-based approach for autonomous exploration,” in Proceedings 1997 IEEE International Symposium on Computational Intelligence in Robotics and Automation CIRA '97. Towards New Computational Principles for Robotics and Automation'. IEEE, 1997, pp. 146-151.",
|
| 1144 |
+
"[53] K. He, X. Zhang, S. Ren, and J. Sun, \"Deep residual learning for image recognition,\" in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 770-778.",
|
| 1145 |
+
"[54] A. Paszke, S. Gross, S. Chintala, G. Chanan, E. Yang, Z. DeVito, Z. Lin, A. Desmaison, L. Antiga, and A. Lerer, \"Automatic differentiation in pytorch,\" 2017."
|
| 1146 |
+
],
|
| 1147 |
+
"bbox": [
|
| 1148 |
+
84,
|
| 1149 |
+
66,
|
| 1150 |
+
488,
|
| 1151 |
+
453
|
| 1152 |
+
],
|
| 1153 |
+
"page_idx": 7
|
| 1154 |
+
}
|
| 1155 |
+
]
|
2202.11xxx/2202.11907/13c6c70f-eb0a-4855-9c93-c58436de7c44_model.json
ADDED
|
@@ -0,0 +1,1745 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "aside_text",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.023,
|
| 7 |
+
0.264,
|
| 8 |
+
0.058,
|
| 9 |
+
0.707
|
| 10 |
+
],
|
| 11 |
+
"angle": 270,
|
| 12 |
+
"content": "arXiv:2202.11907v1 [cs.RO] 24 Feb 2022"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "title",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.153,
|
| 18 |
+
0.089,
|
| 19 |
+
0.836,
|
| 20 |
+
0.113
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "Uncertainty-driven Planner for Exploration and Navigation"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.196,
|
| 29 |
+
0.133,
|
| 30 |
+
0.794,
|
| 31 |
+
0.166
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "Georgios Georgakis<sup>1</sup>, Bernadette Bucher<sup>1</sup>, Anton Arapin<sup>2</sup>, Karl Schmeckpeper<sup>1</sup>, Nikolai Matni<sup>1</sup>, and Kostas Daniilidis<sup>1</sup>"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.082,
|
| 40 |
+
0.203,
|
| 41 |
+
0.49,
|
| 42 |
+
0.444
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "Abstract—We consider the problems of exploration and point-goal navigation in previously unseen environments, where the spatial complexity of indoor scenes and partial observability constitute these tasks challenging. We argue that learning occupancy priors over indoor maps provides significant advantages towards addressing these problems. To this end, we present a novel planning framework that first learns to generate occupancy maps beyond the field-of-view of the agent, and second leverages the model uncertainty over the generated areas to formulate path selection policies for each task of interest. For point-goal navigation the policy chooses paths with an upper confidence bound policy for efficient and traversable paths, while for exploration the policy maximizes model uncertainty over candidate paths. We perform experiments in the visually realistic environments of Matterport3D using the Habitat simulator and demonstrate: 1) Improved results on exploration and map quality metrics over competitive methods, and 2) The effectiveness of our planning module when paired with the state-of-the-art DD-PPO method for the point-goal navigation task."
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "title",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.22,
|
| 51 |
+
0.455,
|
| 52 |
+
0.353,
|
| 53 |
+
0.468
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "I. INTRODUCTION"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.082,
|
| 62 |
+
0.474,
|
| 63 |
+
0.49,
|
| 64 |
+
0.685
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "A major prerequisite towards true autonomy is the ability to navigate and explore novel environments. This problem is usually studied in the context of specific tasks such as reaching a specified point goal [1], finding a semantic target [2], or covering as much area as possible while building a map. Each of these tasks has its own idiosyncrasies, but all of them represent examples where one must often reason beyond what is currently observed and incorporate the uncertainty over the inferred information into the decision making process. For example, in point-goal navigation it is important to predict whether a certain path can lead to a dead-end. Likewise, in exploration strong confidence over a particular region's representation may prompt the agent to visit new areas of the map."
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.082,
|
| 73 |
+
0.686,
|
| 74 |
+
0.49,
|
| 75 |
+
0.732
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "We investigate the tasks of point-goal navigation and exploration, and propose a planning module that leverages contextual occupancy priors. These priors are learned by a"
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.082,
|
| 84 |
+
0.743,
|
| 85 |
+
0.49,
|
| 86 |
+
0.867
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "Research was sponsored by the Army Research Office and was accomplished under Grant Number W911NF-20-1-0080. The views and conclusions contained in this document are those of the authors and should not be interpreted as representing the official policies, either expressed or implied, of the Army Research Office or the U.S. Government. The U.S. Government is authorized to reproduce and distribute reprints for Government purposes notwithstanding any copyright notation herein. Further support was provided by the following grants: NSF IIS 1703319, NSF MRI 1626008, NSF TRIPODS 1934960, NSF CPS 2038873, ARL DCIST CRA W911NF-17-2-0181, ONR N00014-17-1-2093, the DARPA-SRC C-BRIC, CAREER award ECCS-2045834, and a Google Research Scholar award."
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.082,
|
| 95 |
+
0.868,
|
| 96 |
+
0.489,
|
| 97 |
+
0.902
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "<sup>1</sup>GRASP Laboratory, Department of Computer and Information Science, University of Pennsylvania, Philadelphia, PA 19104. ggeorgqak@seas.upenn.edu"
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.082,
|
| 106 |
+
0.903,
|
| 107 |
+
0.489,
|
| 108 |
+
0.927
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "\\(^{2}\\)Department of Computer Science, The University of Chicago, Chicago, IL, 60637. aarapin@uchicago.edu"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "list",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.082,
|
| 117 |
+
0.868,
|
| 118 |
+
0.489,
|
| 119 |
+
0.927
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": null
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.505,
|
| 128 |
+
0.203,
|
| 129 |
+
0.914,
|
| 130 |
+
0.367
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "map predictor module that is trained to estimate occupancy values outside the field-of-view of the agent. Using the epistemic (model) uncertainty associated with these predictions we define objectives for path selection for each task of interest. Earlier work in this field focused mainly on learning how to actively control the agent for the purpose of reducing the uncertainty over the map [3] (Active SLAM), without considering navigation tasks in the process, while methods that did consider navigation often operated in relatively simple environments of artificially placed cylindrical obstacles [4], [5]."
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.505,
|
| 139 |
+
0.368,
|
| 140 |
+
0.913,
|
| 141 |
+
0.625
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "With the recent introduction of realistic and visually complex environments serving as navigation benchmarks [6], [7], the focus shifted on learning-based end-to-end approaches [8], [9], [10]. While end-to-end formulations that map pixels directly to actions are attractive in terms of their simplicity, they require very large quantities of training data. For instance, DD-PPO [10] needs 2.5 billion frames of experience to reach its state-of-the-art performance on Gibson [7]. On the other hand, modular approaches [11], [12], [13] are able to encode prior information into explicit map representations and are thus much more sample efficient. Our method falls into the latter category, but differs from other approaches by its use of the uncertainty over predictions outside the field-of-view of the agent during the planning stage. In contrast to [13], [12] this allows our method more flexibility when defining goal selection objectives, and does not require re-training between different tasks."
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"bbox": [
|
| 149 |
+
0.505,
|
| 150 |
+
0.625,
|
| 151 |
+
0.914,
|
| 152 |
+
0.865
|
| 153 |
+
],
|
| 154 |
+
"angle": 0,
|
| 155 |
+
"content": "In this paper, we introduce Uncertainty-driven Planner for Exploration and Navigation (UPEN), in which we propose a planning algorithm that is informed by predictions over unobserved areas. Through this spatial prediction approach our model learns layout patterns that can guide a planner towards preferable paths in unknown environments. More specifically, we first train an ensemble of occupancy map predictor models by learning to hallucinate top-down occupancy regions from unobserved areas. Then, a Rapidly Exploring Random-Trees [14] (RRT) algorithm generates a set of candidate paths. We select paths from these candidates using epistemic (model) uncertainty associated with a path traversibility estimate as measured by the disagreement of ensemble models [15], [16], and we choose appropriate short-term goals based on the task of interest. Our contributions are as follows:"
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "text",
|
| 159 |
+
"bbox": [
|
| 160 |
+
0.523,
|
| 161 |
+
0.867,
|
| 162 |
+
0.913,
|
| 163 |
+
0.927
|
| 164 |
+
],
|
| 165 |
+
"angle": 0,
|
| 166 |
+
"content": "- We propose UPEN, a novel planning framework that leverages learned layout priors and formulates uncertainty-based objectives for path selection in exploration and navigation tasks."
|
| 167 |
+
}
|
| 168 |
+
],
|
| 169 |
+
[
|
| 170 |
+
{
|
| 171 |
+
"type": "image",
|
| 172 |
+
"bbox": [
|
| 173 |
+
0.146,
|
| 174 |
+
0.066,
|
| 175 |
+
0.852,
|
| 176 |
+
0.216
|
| 177 |
+
],
|
| 178 |
+
"angle": 0,
|
| 179 |
+
"content": null
|
| 180 |
+
},
|
| 181 |
+
{
|
| 182 |
+
"type": "image_caption",
|
| 183 |
+
"bbox": [
|
| 184 |
+
0.082,
|
| 185 |
+
0.221,
|
| 186 |
+
0.916,
|
| 187 |
+
0.285
|
| 188 |
+
],
|
| 189 |
+
"angle": 0,
|
| 190 |
+
"content": "Fig. 1: Occupancy map prediction (blue-occupied, green-free) and uncertainty estimation for a time-step \\( t \\). The egocentric depth observation is first ground-projected and passed through an ensemble \\( f \\) of encoder-decoder models that each infers information in unobserved areas \\( (\\hat{m}_t) \\). Each \\( \\hat{m}_t \\) is then registered to a separate global map \\( M_t \\). The final occupancy probabilities and model uncertainty are given by the mean and variance over the set of global maps."
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "text",
|
| 194 |
+
"bbox": [
|
| 195 |
+
0.1,
|
| 196 |
+
0.305,
|
| 197 |
+
0.488,
|
| 198 |
+
0.334
|
| 199 |
+
],
|
| 200 |
+
"angle": 0,
|
| 201 |
+
"content": "- We show improved exploration results over competitive methods on the Matterport3D [17] dataset."
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "text",
|
| 205 |
+
"bbox": [
|
| 206 |
+
0.1,
|
| 207 |
+
0.334,
|
| 208 |
+
0.488,
|
| 209 |
+
0.38
|
| 210 |
+
],
|
| 211 |
+
"angle": 0,
|
| 212 |
+
"content": "- We demonstrate the effectiveness of our planner when used to complement existing end-to-end methods on the point-goal navigation task."
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"type": "list",
|
| 216 |
+
"bbox": [
|
| 217 |
+
0.1,
|
| 218 |
+
0.305,
|
| 219 |
+
0.488,
|
| 220 |
+
0.38
|
| 221 |
+
],
|
| 222 |
+
"angle": 0,
|
| 223 |
+
"content": null
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "title",
|
| 227 |
+
"bbox": [
|
| 228 |
+
0.214,
|
| 229 |
+
0.392,
|
| 230 |
+
0.361,
|
| 231 |
+
0.405
|
| 232 |
+
],
|
| 233 |
+
"angle": 0,
|
| 234 |
+
"content": "II. RELATED WORK"
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "text",
|
| 238 |
+
"bbox": [
|
| 239 |
+
0.082,
|
| 240 |
+
0.413,
|
| 241 |
+
0.49,
|
| 242 |
+
0.685
|
| 243 |
+
],
|
| 244 |
+
"angle": 0,
|
| 245 |
+
"content": "a) Navigation approaches: Traditional approaches to visual navigation focus on building a 3D metric map of the environment [18], [3] before using that representation for any downstream navigation tasks, which does not lend itself favourably for task-driven learnable representations that can capture contextual cues. The recent introduction of large-scale indoor environments and simulators [7], [17], [6] has fuelled a slew of learning based methods for indoor navigation tasks [1] such as point-goal [10], [19], [20], [21], [22], object-goal [23], [24], [25], [26], [27], and image-goal [8], [28], [29]. Modular approaches which incorporate explicit or learned map representations [11], [23], [25] have shown to outperform end-to-end methods on tasks such as object-goal, however, this is not currently the case for the point-goal [10], [20] task. In our work, we demonstrate how an uncertainty-driven planning module can favourably complement DD-PPO [10], a competitive method on point-goal navigation, and show increased performance in challenging episodes."
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "text",
|
| 249 |
+
"bbox": [
|
| 250 |
+
0.082,
|
| 251 |
+
0.686,
|
| 252 |
+
0.49,
|
| 253 |
+
0.927
|
| 254 |
+
],
|
| 255 |
+
"angle": 0,
|
| 256 |
+
"content": "b) Exploration methods for navigation: A considerable amount of work was also devoted to planning efficient paths during map building, generally referred to as Active SLAM [30], [31], [32], [33], [34], [35]. For example, [32], [35] define information gain objectives based on the estimated uncertainty over the map in order to decide future actions, while [33] investigates different uncertainty measures. Recent methods focus on learning policies for efficient exploration either through coverage [9], [13], [36], [37] or map accuracy [12] reward functions. Furthermore, several works have gone beyond traditional mapping, and sought to predict maps for unseen regions [12], [38], [24], [27], [39] which further increased robustness in the decision making process. Our approach leverages the uncertainty over predicted occupancy maps for unobserved areas and shows its effectiveness on exploring a novel environment."
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "text",
|
| 260 |
+
"bbox": [
|
| 261 |
+
0.506,
|
| 262 |
+
0.305,
|
| 263 |
+
0.915,
|
| 264 |
+
0.637
|
| 265 |
+
],
|
| 266 |
+
"angle": 0,
|
| 267 |
+
"content": "c) Uncertainty estimation: To navigate in partially observed maps, uncertainty has been estimated across nodes in a path [4], [40], via the marginal probability of landmarks [5], and with the variance of model predictions across predicted maps [24], [41]. Furthermore, uncertainty-aware mapping has been shown to be effective in unknown and highly risky environments [42], [43]. In our work, we use uncertainty differently for exploration and point goal navigation. In exploration, we estimate uncertainty over a predicted occupancy map via the variance between models in an ensemble. This variance across the ensemble specifically estimates model (epistemic) uncertainty [44], [45]. We select paths by maximizing epistemic uncertainty as a proxy for maximizing information gain following prior work in exploration [16], [24]. In point goal navigation, we compute traversability scores for candidate paths using an ensemble of map predictors and compute uncertainty with respect to these traversability scores using the variance over the scores given by each model in the ensemble. We use this uncertainty regarding path traversability to construct an upper confidence bound policy for path selection to balance exploration and exploitation in point goal navigation [46], [47], [48], [24]."
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "title",
|
| 271 |
+
"bbox": [
|
| 272 |
+
0.653,
|
| 273 |
+
0.643,
|
| 274 |
+
0.766,
|
| 275 |
+
0.656
|
| 276 |
+
],
|
| 277 |
+
"angle": 0,
|
| 278 |
+
"content": "III. APPROACH"
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "text",
|
| 282 |
+
"bbox": [
|
| 283 |
+
0.506,
|
| 284 |
+
0.661,
|
| 285 |
+
0.914,
|
| 286 |
+
0.872
|
| 287 |
+
],
|
| 288 |
+
"angle": 0,
|
| 289 |
+
"content": "We present an uncertainty-driven planning module for exploration and point-goal navigation tasks, which benefits from a learned occupancy map predictor module. Our approach takes as input the agent's egocentric depth observation and learns to predict regions of the occupancy map that are outside of the agent's field-of-view. Then it uses the uncertainty over those predictions to decide on a set of candidate paths generated by RRT. We define a separate policy to select a short-term goal along a path for each task of interest. In exploration we maximize uncertainty over the candidate paths, while for point-goal navigation we choose paths with an upper confidence bound policy for efficient and traversable paths. Finally, a local policy (DD-PPO [10]) predicts navigation actions to reach the short-term goal."
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "title",
|
| 293 |
+
"bbox": [
|
| 294 |
+
0.506,
|
| 295 |
+
0.879,
|
| 296 |
+
0.719,
|
| 297 |
+
0.892
|
| 298 |
+
],
|
| 299 |
+
"angle": 0,
|
| 300 |
+
"content": "A. Occupancy Map Prediction"
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "text",
|
| 304 |
+
"bbox": [
|
| 305 |
+
0.506,
|
| 306 |
+
0.897,
|
| 307 |
+
0.913,
|
| 308 |
+
0.927
|
| 309 |
+
],
|
| 310 |
+
"angle": 0,
|
| 311 |
+
"content": "The first component in our planning module aims to capture layout priors in indoor environments. Such information"
|
| 312 |
+
}
|
| 313 |
+
],
|
| 314 |
+
[
|
| 315 |
+
{
|
| 316 |
+
"type": "image",
|
| 317 |
+
"bbox": [
|
| 318 |
+
0.145,
|
| 319 |
+
0.066,
|
| 320 |
+
0.855,
|
| 321 |
+
0.309
|
| 322 |
+
],
|
| 323 |
+
"angle": 0,
|
| 324 |
+
"content": null
|
| 325 |
+
},
|
| 326 |
+
{
|
| 327 |
+
"type": "image_caption",
|
| 328 |
+
"bbox": [
|
| 329 |
+
0.082,
|
| 330 |
+
0.312,
|
| 331 |
+
0.914,
|
| 332 |
+
0.375
|
| 333 |
+
],
|
| 334 |
+
"angle": 0,
|
| 335 |
+
"content": "Fig. 2: Examples of path selections for exploration (top row) and point-goal navigation (bottom-row) tasks. Given the model uncertainty and occupancy probabilities we first generate a set of paths which are evaluated either with an exploration objective (section III-B) or an upper confidence bound objective (section III-C). The agent position is denoted as a dark green dot, the goal is shown as magenta, and red dots signify short-term goals."
|
| 336 |
+
},
|
| 337 |
+
{
|
| 338 |
+
"type": "text",
|
| 339 |
+
"bbox": [
|
| 340 |
+
0.086,
|
| 341 |
+
0.396,
|
| 342 |
+
0.49,
|
| 343 |
+
0.714
|
| 344 |
+
],
|
| 345 |
+
"angle": 0,
|
| 346 |
+
"content": "can lead to a more intelligent decision making process for a downstream navigation task. Following the recent success of [12], [24] we formulate the occupancy map prediction as a semantic segmentation problem. Our model takes as input a depth image \\( D_{t} \\) at time-step \\( t \\) which is ground projected to an egocentric grid \\( m_t' \\in \\mathbb{R}^{|C| \\times h \\times w} \\), where \\( C \\) is the set of classes containing unknown, occupied, and free, and \\( h, w \\) are the dimensions of the local grid. The ground projection is carried out by first using the camera intrinsic parameters to unproject \\( D_{t} \\) to a 3D point cloud and then map each 3D point to the \\( h \\times w \\) grid coordinates: \\( x' = \\lfloor \\frac{x}{r} \\rfloor + \\frac{w - 1}{2} \\), \\( z' = \\lfloor \\frac{z}{r} \\rfloor + \\frac{h - 1}{2} \\), where \\( x', z' \\) are the grid coordinates, \\( x, z \\) are the 3D points, and \\( r \\) is the grid cell size. Since the agent has a limited field of view, \\( m_t' \\) represents a local incomplete top-down occupancy grid of the area surrounding the agent. Our objective is to predict the missing values and produce the complete local occupancy map \\( \\hat{m}_t \\in \\mathbb{R}^{|C| \\times h \\times w} \\). To do so, we pass \\( m_t' \\) through an encoder-decoder UNet [49] model \\( f \\) that outputs a prediction for each grid location over the set of classes \\( C \\). The model \\( f \\) is trained with a pixel-wise cross-entropy loss:"
|
| 347 |
+
},
|
| 348 |
+
{
|
| 349 |
+
"type": "equation",
|
| 350 |
+
"bbox": [
|
| 351 |
+
0.178,
|
| 352 |
+
0.724,
|
| 353 |
+
0.49,
|
| 354 |
+
0.766
|
| 355 |
+
],
|
| 356 |
+
"angle": 0,
|
| 357 |
+
"content": "\\[\nL = - \\frac {1}{K} \\sum_ {k} ^ {K} \\sum_ {c} ^ {C} m _ {k, c} \\log \\hat {m} _ {k, c} \\tag {1}\n\\]"
|
| 358 |
+
},
|
| 359 |
+
{
|
| 360 |
+
"type": "text",
|
| 361 |
+
"bbox": [
|
| 362 |
+
0.082,
|
| 363 |
+
0.776,
|
| 364 |
+
0.491,
|
| 365 |
+
0.927
|
| 366 |
+
],
|
| 367 |
+
"angle": 0,
|
| 368 |
+
"content": "where \\( K = h \\times w \\) corresponds to the number of cells in the local grid and \\( m_{k,c} \\) is the ground-truth label for pixel \\( k \\). The ground-truth occupancy is generated by ground-projecting the available semantic information of the 3D scenes. To ensure diversity in the training examples, we sample training pairs across shortest paths between two randomly selected locations in a scene, where \\( m_t' \\) can contain a variable number of ground-projected depth images. Unlike [12] we do not use the RGB images during training, as we have found that the aforementioned sampling strategy is sufficient for the model"
|
| 369 |
+
},
|
| 370 |
+
{
|
| 371 |
+
"type": "text",
|
| 372 |
+
"bbox": [
|
| 373 |
+
0.506,
|
| 374 |
+
0.396,
|
| 375 |
+
0.913,
|
| 376 |
+
0.426
|
| 377 |
+
],
|
| 378 |
+
"angle": 0,
|
| 379 |
+
"content": "to converge. This enables us to define a smaller and less memory intensive model \\( f \\)."
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"type": "text",
|
| 383 |
+
"bbox": [
|
| 384 |
+
0.506,
|
| 385 |
+
0.427,
|
| 386 |
+
0.914,
|
| 387 |
+
0.515
|
| 388 |
+
],
|
| 389 |
+
"angle": 0,
|
| 390 |
+
"content": "During a navigation episode, we maintain a global map \\( M_t \\in \\mathbb{R}^{|C| \\times H \\times W} \\). Since \\( f \\) predicts a probability distribution over the classes for each grid location, we register \\( \\hat{m}_t \\) by updating \\( M_t \\) using Bayes Theorem. The global map \\( M_t \\) is initialized with a uniform prior probability distribution across all classes."
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"type": "title",
|
| 394 |
+
"bbox": [
|
| 395 |
+
0.507,
|
| 396 |
+
0.528,
|
| 397 |
+
0.661,
|
| 398 |
+
0.544
|
| 399 |
+
],
|
| 400 |
+
"angle": 0,
|
| 401 |
+
"content": "B. Exploration Policy"
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"type": "text",
|
| 405 |
+
"bbox": [
|
| 406 |
+
0.505,
|
| 407 |
+
0.548,
|
| 408 |
+
0.915,
|
| 409 |
+
0.684
|
| 410 |
+
],
|
| 411 |
+
"angle": 0,
|
| 412 |
+
"content": "The main goal of exploration task is to maximize map coverage which requires navigating to new map regions around obstacles. To this end, we propose selecting paths using uncertainty of our map predictions as an objective in our planning algorithm. We are explicitly minimizing map uncertainty by collecting observations to improve the predicted global map \\( M_{t} \\). Implicitly map coverage is maximized by minimizing map uncertainty because high coverage is required for predicting an accurate map with low uncertainty."
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"type": "text",
|
| 416 |
+
"bbox": [
|
| 417 |
+
0.506,
|
| 418 |
+
0.685,
|
| 419 |
+
0.914,
|
| 420 |
+
0.897
|
| 421 |
+
],
|
| 422 |
+
"angle": 0,
|
| 423 |
+
"content": "We use the epistemic (model) uncertainty as an objective for exploration [45], [44], [16], [24]. In order to estimate epistemic uncertainty, we construct \\( f \\) as an ensemble of \\( N \\) occupancy prediction models defined over the parameters \\( \\{\\theta_1,\\dots,\\theta_N\\} \\). Variance between models in the ensemble comes from different random weight initializations in each network [16]. Our model estimates the true probability distribution \\( P(m_t|m_t') \\) by averaging over sampled model weights, \\( P(m_t|m_t') \\approx \\mathbb{E}_{\\theta \\sim q(\\theta)}f(m_t';\\theta) \\approx \\frac{1}{N}\\sum_{i = 1}^{N}f(m_t';\\theta_i) \\) where the parameters \\( \\theta \\) are random variables sampled from the distribution \\( q(\\theta)[50] \\), [51]. Then, following prior work [15], [16], [24], the epistemic uncertainty can be approximated from the variance between the outputs of the models in the ensemble, \\( \\mathrm{Var}f(m_t';\\theta) \\)."
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"type": "text",
|
| 427 |
+
"bbox": [
|
| 428 |
+
0.506,
|
| 429 |
+
0.897,
|
| 430 |
+
0.913,
|
| 431 |
+
0.927
|
| 432 |
+
],
|
| 433 |
+
"angle": 0,
|
| 434 |
+
"content": "For path planning during exploration, our proposed objective can be used with any planner which generates a set \\( S \\)"
|
| 435 |
+
}
|
| 436 |
+
],
|
| 437 |
+
[
|
| 438 |
+
{
|
| 439 |
+
"type": "text",
|
| 440 |
+
"bbox": [
|
| 441 |
+
0.082,
|
| 442 |
+
0.066,
|
| 443 |
+
0.49,
|
| 444 |
+
0.171
|
| 445 |
+
],
|
| 446 |
+
"angle": 0,
|
| 447 |
+
"content": "of candidate paths. Each path \\( s \\in S \\) can be expressed as a subset of grid locations in our map. Each of these grid locations \\( k \\) has an associated uncertainty estimate given by the variance between model predictions in our ensemble. We specify this uncertainty map as \\( u_{k} \\coloneqq \\operatorname{Var} f(m_{t}^{\\prime};\\theta) \\in \\mathbb{R}^{1 \\times h \\times w} \\). We use this map to score each path \\( s \\) and construct the objective"
|
| 448 |
+
},
|
| 449 |
+
{
|
| 450 |
+
"type": "equation",
|
| 451 |
+
"bbox": [
|
| 452 |
+
0.222,
|
| 453 |
+
0.171,
|
| 454 |
+
0.489,
|
| 455 |
+
0.205
|
| 456 |
+
],
|
| 457 |
+
"angle": 0,
|
| 458 |
+
"content": "\\[\n\\underset {s \\in S} {\\arg \\max } \\frac {1}{| s |} \\sum_ {k \\in s} u _ {k} \\tag {2}\n\\]"
|
| 459 |
+
},
|
| 460 |
+
{
|
| 461 |
+
"type": "text",
|
| 462 |
+
"bbox": [
|
| 463 |
+
0.082,
|
| 464 |
+
0.211,
|
| 465 |
+
0.489,
|
| 466 |
+
0.24
|
| 467 |
+
],
|
| 468 |
+
"angle": 0,
|
| 469 |
+
"content": "which selects the path with the maximum average epistemic uncertainty on the traversed grid."
|
| 470 |
+
},
|
| 471 |
+
{
|
| 472 |
+
"type": "text",
|
| 473 |
+
"bbox": [
|
| 474 |
+
0.082,
|
| 475 |
+
0.242,
|
| 476 |
+
0.489,
|
| 477 |
+
0.391
|
| 478 |
+
],
|
| 479 |
+
"angle": 0,
|
| 480 |
+
"content": "In this work, we incorporate our uncertainty-based objective in RRT to plan to explore. We expand RRT for a set number of iterations, which generates candidate paths in random directions. We select between these paths using our objective from equation 2. In practice, equation 2 is evaluated over the accumulated global map \\( M_{t} \\). Figure 1 shows the occupancy map prediction and the uncertainty estimation process using the ensemble \\( f \\), while Figure 2 (top row) shows an example of path selection using the exploration objective."
|
| 481 |
+
},
|
| 482 |
+
{
|
| 483 |
+
"type": "title",
|
| 484 |
+
"bbox": [
|
| 485 |
+
0.084,
|
| 486 |
+
0.401,
|
| 487 |
+
0.228,
|
| 488 |
+
0.416
|
| 489 |
+
],
|
| 490 |
+
"angle": 0,
|
| 491 |
+
"content": "C. Point-goal Policy"
|
| 492 |
+
},
|
| 493 |
+
{
|
| 494 |
+
"type": "text",
|
| 495 |
+
"bbox": [
|
| 496 |
+
0.082,
|
| 497 |
+
0.42,
|
| 498 |
+
0.49,
|
| 499 |
+
0.587
|
| 500 |
+
],
|
| 501 |
+
"angle": 0,
|
| 502 |
+
"content": "In the problem of point-goal navigation, the objective is to efficiently navigate past obstacles to a given goal location from a starting position. We again use RRT as a planner which generates a set of paths \\( S \\) between the agent's current location and the goal location. Thus, the primary objective when we select a path from these candidates to traverse is for the path to be unobstructed. Given a predicted occupancy map from model \\( i \\) in our ensemble and a candidate path \\( s \\in S \\) generated by our planner, we evaluate whether or not the path is obstructed by taking the maximum probability of occupancy in any grid cell \\( k \\) along each path. Specifically,"
|
| 503 |
+
},
|
| 504 |
+
{
|
| 505 |
+
"type": "equation",
|
| 506 |
+
"bbox": [
|
| 507 |
+
0.2,
|
| 508 |
+
0.593,
|
| 509 |
+
0.489,
|
| 510 |
+
0.617
|
| 511 |
+
],
|
| 512 |
+
"angle": 0,
|
| 513 |
+
"content": "\\[\np _ {i, s} = \\max _ {k \\in s} \\left(\\hat {m} _ {k, o c c} ^ {i} | _ {k \\in s}\\right) \\tag {3}\n\\]"
|
| 514 |
+
},
|
| 515 |
+
{
|
| 516 |
+
"type": "text",
|
| 517 |
+
"bbox": [
|
| 518 |
+
0.082,
|
| 519 |
+
0.624,
|
| 520 |
+
0.49,
|
| 521 |
+
0.851
|
| 522 |
+
],
|
| 523 |
+
"angle": 0,
|
| 524 |
+
"content": "where \\(\\hat{m}_{k,occ}^i|_{k\\in s}\\) is the map of occupancy probabilities defined on the subset of grid cells \\(k\\in s\\) predicted by model \\(i\\) in the ensemble \\(f\\). Choosing the path \\(s\\in S\\) by minimizing \\(p_{i,s}\\) chooses the path we think most likely to be unobstructed. We can minimize this likelihood by selecting \\(\\arg \\min_{s\\in S}\\mu_s\\) where \\(\\mu_s\\coloneqq \\frac{1}{N}\\sum_{i = 1}^{N}p_{i,s}\\). However, we note that there may be multiple unobstructed candidate paths generated by our planner. We differentiate between these in our selection by adding a term \\(d_{s}\\) to our objective to incentivize selecting shorter paths. Furthermore, as an agent navigates to a goal, it makes map predictions using its accumulated observations along the way. Therefore, to improve navigation performance we can incorporate an exploration component in our navigation objective to incentivize choosing paths where it can gain the most information regarding efficient traversability."
|
| 525 |
+
},
|
| 526 |
+
{
|
| 527 |
+
"type": "text",
|
| 528 |
+
"bbox": [
|
| 529 |
+
0.082,
|
| 530 |
+
0.852,
|
| 531 |
+
0.49,
|
| 532 |
+
0.927
|
| 533 |
+
],
|
| 534 |
+
"angle": 0,
|
| 535 |
+
"content": "We estimate uncertainty associated with efficient traversability of a particular path \\( s \\) for our exploration objective. Since there is zero uncertainty associated with path lengths \\( d_{s} \\), we design our exploration objective to maximize information gain for path traversability. We"
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"type": "text",
|
| 539 |
+
"bbox": [
|
| 540 |
+
0.505,
|
| 541 |
+
0.066,
|
| 542 |
+
0.913,
|
| 543 |
+
0.158
|
| 544 |
+
],
|
| 545 |
+
"angle": 0,
|
| 546 |
+
"content": "denote \\( P_{s_{NT}}(m_t|m_t') \\) as the probability the path \\( s \\) is not traversable (\\( NT \\)) estimated by \\( \\mu_s \\). We recall that \\( \\mu_s \\) is computed by averaging traversability scores over an ensemble of models. We compute the variance of these scores \\( \\mathrm{Var}_{i\\in N}p_{i,s} \\) to estimate uncertainty of our model approximating \\( P_{s_{NT}}(m_t|m_t') \\)."
|
| 547 |
+
},
|
| 548 |
+
{
|
| 549 |
+
"type": "text",
|
| 550 |
+
"bbox": [
|
| 551 |
+
0.506,
|
| 552 |
+
0.157,
|
| 553 |
+
0.913,
|
| 554 |
+
0.215
|
| 555 |
+
],
|
| 556 |
+
"angle": 0,
|
| 557 |
+
"content": "We combine exploration and exploitation in our full objective using an upper confidence bound policy [47], [46], [48], [24]. Our objective for efficient traversable paths is specified as"
|
| 558 |
+
},
|
| 559 |
+
{
|
| 560 |
+
"type": "equation",
|
| 561 |
+
"bbox": [
|
| 562 |
+
0.616,
|
| 563 |
+
0.217,
|
| 564 |
+
0.913,
|
| 565 |
+
0.242
|
| 566 |
+
],
|
| 567 |
+
"angle": 0,
|
| 568 |
+
"content": "\\[\n\\underset {s \\in S} {\\arg \\min } P _ {s _ {N T}} \\left(m _ {t} \\mid m _ {t} ^ {\\prime}\\right) + d _ {s} \\tag {4}\n\\]"
|
| 569 |
+
},
|
| 570 |
+
{
|
| 571 |
+
"type": "text",
|
| 572 |
+
"bbox": [
|
| 573 |
+
0.506,
|
| 574 |
+
0.247,
|
| 575 |
+
0.913,
|
| 576 |
+
0.293
|
| 577 |
+
],
|
| 578 |
+
"angle": 0,
|
| 579 |
+
"content": "and can be reconstructed as a maximization problem \\(\\arg \\max_{s\\in S} - P_{s_{NT}}(m_t|m_t^{\\prime}) - d_s\\) . We denote \\(\\sigma_{s}\\coloneqq\\) \\(\\sqrt{\\operatorname{Var}_{i\\in N}p_{i,s}}\\) and observe the upper bound"
|
| 580 |
+
},
|
| 581 |
+
{
|
| 582 |
+
"type": "equation",
|
| 583 |
+
"bbox": [
|
| 584 |
+
0.567,
|
| 585 |
+
0.3,
|
| 586 |
+
0.913,
|
| 587 |
+
0.316
|
| 588 |
+
],
|
| 589 |
+
"angle": 0,
|
| 590 |
+
"content": "\\[\n- P _ {s _ {N T}} \\left(m _ {t} \\mid m _ {t} ^ {\\prime}\\right) - d _ {s} \\leq - \\mu_ {s} + \\alpha_ {1} \\sigma_ {s} - d _ {s} \\tag {5}\n\\]"
|
| 591 |
+
},
|
| 592 |
+
{
|
| 593 |
+
"type": "text",
|
| 594 |
+
"bbox": [
|
| 595 |
+
0.505,
|
| 596 |
+
0.323,
|
| 597 |
+
0.913,
|
| 598 |
+
0.383
|
| 599 |
+
],
|
| 600 |
+
"angle": 0,
|
| 601 |
+
"content": "holds with some fixed but unknown probability where \\(\\alpha_{1}\\) is a constant hyperparameter. Using our upper bound to estimate \\(-P_{s_{NT}}(m_t|D_t)\\), our full objective function as a minimization problem is"
|
| 602 |
+
},
|
| 603 |
+
{
|
| 604 |
+
"type": "equation",
|
| 605 |
+
"bbox": [
|
| 606 |
+
0.618,
|
| 607 |
+
0.392,
|
| 608 |
+
0.913,
|
| 609 |
+
0.415
|
| 610 |
+
],
|
| 611 |
+
"angle": 0,
|
| 612 |
+
"content": "\\[\n\\underset {s} {\\arg \\min } \\mu_ {s} - \\alpha_ {1} \\sigma_ {s} + \\alpha_ {2} d _ {s} \\tag {6}\n\\]"
|
| 613 |
+
},
|
| 614 |
+
{
|
| 615 |
+
"type": "text",
|
| 616 |
+
"bbox": [
|
| 617 |
+
0.506,
|
| 618 |
+
0.42,
|
| 619 |
+
0.913,
|
| 620 |
+
0.496
|
| 621 |
+
],
|
| 622 |
+
"angle": 0,
|
| 623 |
+
"content": "where \\(\\alpha_{2}\\) is a hyperparameter weighting the contribution of path length. Similarly to our exploration policy, in practice, equation 6 is evaluated over the accumulated global map \\(M_{t}\\). Figure 2 (bottom row) illustrates path selection using our objective during a point-goal episode."
|
| 624 |
+
},
|
| 625 |
+
{
|
| 626 |
+
"type": "title",
|
| 627 |
+
"bbox": [
|
| 628 |
+
0.642,
|
| 629 |
+
0.504,
|
| 630 |
+
0.776,
|
| 631 |
+
0.517
|
| 632 |
+
],
|
| 633 |
+
"angle": 0,
|
| 634 |
+
"content": "IV. EXPERIMENTS"
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"type": "text",
|
| 638 |
+
"bbox": [
|
| 639 |
+
0.505,
|
| 640 |
+
0.523,
|
| 641 |
+
0.913,
|
| 642 |
+
0.674
|
| 643 |
+
],
|
| 644 |
+
"angle": 0,
|
| 645 |
+
"content": "Our experiments are conducted on the Matterport3D (MP3D) [17] dataset using the Habitat [6] simulator. We follow the standard train/val/test environments split of MP3D which contains overall 90 reconstructions of realistic indoor scenes. The splits are disjoint, therefore all evaluations are conducted in novel scenes where the occupancy map predictor model has not seen during training. Our observation space consists of \\(256 \\times 256\\) depth images, while the action space contains four actions: MOVE_FORWARD by \\(25cm\\), TURN_LEFT and TURN_RIGHT by \\(10^{\\circ}\\) and STOP."
|
| 646 |
+
},
|
| 647 |
+
{
|
| 648 |
+
"type": "text",
|
| 649 |
+
"bbox": [
|
| 650 |
+
0.505,
|
| 651 |
+
0.674,
|
| 652 |
+
0.913,
|
| 653 |
+
0.765
|
| 654 |
+
],
|
| 655 |
+
"angle": 0,
|
| 656 |
+
"content": "We perform two key experiments. First, we compare to other state-of-the-art methods on the task of exploration using both coverage and map accuracy metrics (sec. IV-B). Second we evaluate on the point-goal navigation task and demonstrate increased performance when DD-PPO [10] is complemented with our planning strategy (sec. IV-C)."
|
| 657 |
+
},
|
| 658 |
+
{
|
| 659 |
+
"type": "title",
|
| 660 |
+
"bbox": [
|
| 661 |
+
0.506,
|
| 662 |
+
0.773,
|
| 663 |
+
0.691,
|
| 664 |
+
0.787
|
| 665 |
+
],
|
| 666 |
+
"angle": 0,
|
| 667 |
+
"content": "A. Implementation Details"
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"type": "text",
|
| 671 |
+
"bbox": [
|
| 672 |
+
0.505,
|
| 673 |
+
0.791,
|
| 674 |
+
0.913,
|
| 675 |
+
0.927
|
| 676 |
+
],
|
| 677 |
+
"angle": 0,
|
| 678 |
+
"content": "The Unet [49] model used for the occupancy map prediction has four encoder and four decoder convolutional blocks with skip connections and it is combined with a ResNet18 [53] for feature extraction. We use Pytorch [54] and train using the Adam optimizer with a learning rate of 0.0002. The grid dimensions are \\( h = w = 160 \\) for local, and \\( H = W = 768 \\) for global, while each cell in the grid is \\( 5cm \\times 5cm \\). For the path generation process, we run the RRT every 30 navigation steps for exploration and"
|
| 679 |
+
}
|
| 680 |
+
],
|
| 681 |
+
[
|
| 682 |
+
{
|
| 683 |
+
"type": "image",
|
| 684 |
+
"bbox": [
|
| 685 |
+
0.127,
|
| 686 |
+
0.068,
|
| 687 |
+
0.251,
|
| 688 |
+
0.164
|
| 689 |
+
],
|
| 690 |
+
"angle": 0,
|
| 691 |
+
"content": null
|
| 692 |
+
},
|
| 693 |
+
{
|
| 694 |
+
"type": "image",
|
| 695 |
+
"bbox": [
|
| 696 |
+
0.251,
|
| 697 |
+
0.068,
|
| 698 |
+
0.374,
|
| 699 |
+
0.164
|
| 700 |
+
],
|
| 701 |
+
"angle": 0,
|
| 702 |
+
"content": null
|
| 703 |
+
},
|
| 704 |
+
{
|
| 705 |
+
"type": "image",
|
| 706 |
+
"bbox": [
|
| 707 |
+
0.375,
|
| 708 |
+
0.068,
|
| 709 |
+
0.498,
|
| 710 |
+
0.164
|
| 711 |
+
],
|
| 712 |
+
"angle": 0,
|
| 713 |
+
"content": null
|
| 714 |
+
},
|
| 715 |
+
{
|
| 716 |
+
"type": "image",
|
| 717 |
+
"bbox": [
|
| 718 |
+
0.5,
|
| 719 |
+
0.068,
|
| 720 |
+
0.622,
|
| 721 |
+
0.164
|
| 722 |
+
],
|
| 723 |
+
"angle": 0,
|
| 724 |
+
"content": null
|
| 725 |
+
},
|
| 726 |
+
{
|
| 727 |
+
"type": "image",
|
| 728 |
+
"bbox": [
|
| 729 |
+
0.624,
|
| 730 |
+
0.068,
|
| 731 |
+
0.747,
|
| 732 |
+
0.164
|
| 733 |
+
],
|
| 734 |
+
"angle": 0,
|
| 735 |
+
"content": null
|
| 736 |
+
},
|
| 737 |
+
{
|
| 738 |
+
"type": "image",
|
| 739 |
+
"bbox": [
|
| 740 |
+
0.127,
|
| 741 |
+
0.165,
|
| 742 |
+
0.25,
|
| 743 |
+
0.26
|
| 744 |
+
],
|
| 745 |
+
"angle": 0,
|
| 746 |
+
"content": null
|
| 747 |
+
},
|
| 748 |
+
{
|
| 749 |
+
"type": "image",
|
| 750 |
+
"bbox": [
|
| 751 |
+
0.251,
|
| 752 |
+
0.165,
|
| 753 |
+
0.374,
|
| 754 |
+
0.26
|
| 755 |
+
],
|
| 756 |
+
"angle": 0,
|
| 757 |
+
"content": null
|
| 758 |
+
},
|
| 759 |
+
{
|
| 760 |
+
"type": "image",
|
| 761 |
+
"bbox": [
|
| 762 |
+
0.375,
|
| 763 |
+
0.165,
|
| 764 |
+
0.498,
|
| 765 |
+
0.26
|
| 766 |
+
],
|
| 767 |
+
"angle": 0,
|
| 768 |
+
"content": null
|
| 769 |
+
},
|
| 770 |
+
{
|
| 771 |
+
"type": "image",
|
| 772 |
+
"bbox": [
|
| 773 |
+
0.5,
|
| 774 |
+
0.165,
|
| 775 |
+
0.623,
|
| 776 |
+
0.26
|
| 777 |
+
],
|
| 778 |
+
"angle": 0,
|
| 779 |
+
"content": null
|
| 780 |
+
},
|
| 781 |
+
{
|
| 782 |
+
"type": "image",
|
| 783 |
+
"bbox": [
|
| 784 |
+
0.625,
|
| 785 |
+
0.165,
|
| 786 |
+
0.748,
|
| 787 |
+
0.26
|
| 788 |
+
],
|
| 789 |
+
"angle": 0,
|
| 790 |
+
"content": null
|
| 791 |
+
},
|
| 792 |
+
{
|
| 793 |
+
"type": "image",
|
| 794 |
+
"bbox": [
|
| 795 |
+
0.749,
|
| 796 |
+
0.165,
|
| 797 |
+
0.872,
|
| 798 |
+
0.26
|
| 799 |
+
],
|
| 800 |
+
"angle": 0,
|
| 801 |
+
"content": null
|
| 802 |
+
},
|
| 803 |
+
{
|
| 804 |
+
"type": "image_caption",
|
| 805 |
+
"bbox": [
|
| 806 |
+
0.417,
|
| 807 |
+
0.265,
|
| 808 |
+
0.46,
|
| 809 |
+
0.278
|
| 810 |
+
],
|
| 811 |
+
"angle": 0,
|
| 812 |
+
"content": "time"
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"type": "image_caption",
|
| 816 |
+
"bbox": [
|
| 817 |
+
0.082,
|
| 818 |
+
0.283,
|
| 819 |
+
0.911,
|
| 820 |
+
0.314
|
| 821 |
+
],
|
| 822 |
+
"angle": 0,
|
| 823 |
+
"content": "Fig. 3: Exploration example with \\( T = 1000 \\) showing the trajectory followed by our agent (red line). The top row shows RGB images observed by the agent. The ground-truth map is visualized in the bottom right corner."
|
| 824 |
+
},
|
| 825 |
+
{
|
| 826 |
+
"type": "table",
|
| 827 |
+
"bbox": [
|
| 828 |
+
0.089,
|
| 829 |
+
0.32,
|
| 830 |
+
0.559,
|
| 831 |
+
0.398
|
| 832 |
+
],
|
| 833 |
+
"angle": 0,
|
| 834 |
+
"content": "<table><tr><td></td><td colspan=\"2\">Noisy</td><td colspan=\"2\">Noise-free</td></tr><tr><td>Method</td><td>Map Acc (m2)</td><td>IoU (%)</td><td>Map Acc (m2)</td><td>IoU (%)</td></tr><tr><td>ANS(depth) [12]</td><td>72.5</td><td>26.0</td><td>85.9</td><td>34.0</td></tr><tr><td>OccAnt(depth) w/o AR [12]</td><td>92.7</td><td>29.0</td><td>104.7</td><td>38.0</td></tr><tr><td>OccAnt(depth) [12]</td><td>94.1</td><td>33.0</td><td>96.5</td><td>35.0</td></tr><tr><td>FBE [52] + DD-PPO [10]</td><td>100.9</td><td>28.7</td><td>120.2</td><td>44.7</td></tr><tr><td>UPEN + DD-PPO [10]</td><td>110.3</td><td>25.8</td><td>141.6</td><td>45.6</td></tr></table>"
|
| 835 |
+
},
|
| 836 |
+
{
|
| 837 |
+
"type": "table_caption",
|
| 838 |
+
"bbox": [
|
| 839 |
+
0.082,
|
| 840 |
+
0.404,
|
| 841 |
+
0.544,
|
| 842 |
+
0.449
|
| 843 |
+
],
|
| 844 |
+
"angle": 0,
|
| 845 |
+
"content": "TABLE I: Exploration results on MP3D test scenes evaluating map quality at \\( \\mathrm{T} = {500} \\) . The \"w/o AR\" refers to the baseline that is trained without the anticipation reward in [12]."
|
| 846 |
+
},
|
| 847 |
+
{
|
| 848 |
+
"type": "table",
|
| 849 |
+
"bbox": [
|
| 850 |
+
0.594,
|
| 851 |
+
0.341,
|
| 852 |
+
0.897,
|
| 853 |
+
0.392
|
| 854 |
+
],
|
| 855 |
+
"angle": 0,
|
| 856 |
+
"content": "<table><tr><td></td><td>Cov (m2)</td><td>Cov (%)</td></tr><tr><td>ANS(rgb) [13]</td><td>73.28</td><td>52.1</td></tr><tr><td>FBE [52] + DD-PPO [10]</td><td>85.3</td><td>53.0</td></tr><tr><td>UPEN + DD-PPO [10]</td><td>113.0</td><td>67.9</td></tr></table>"
|
| 857 |
+
},
|
| 858 |
+
{
|
| 859 |
+
"type": "table_caption",
|
| 860 |
+
"bbox": [
|
| 861 |
+
0.577,
|
| 862 |
+
0.397,
|
| 863 |
+
0.913,
|
| 864 |
+
0.428
|
| 865 |
+
],
|
| 866 |
+
"angle": 0,
|
| 867 |
+
"content": "TABLE II: Exploration results on MP3D test scenes evaluating area coverage at \\( \\mathrm{T} = {1000} \\) ."
|
| 868 |
+
},
|
| 869 |
+
{
|
| 870 |
+
"type": "text",
|
| 871 |
+
"bbox": [
|
| 872 |
+
0.082,
|
| 873 |
+
0.479,
|
| 874 |
+
0.49,
|
| 875 |
+
0.631
|
| 876 |
+
],
|
| 877 |
+
"angle": 0,
|
| 878 |
+
"content": "20 for point-goal. The RRT is set to generate a maximum of 10 paths every run, with a goal sampling rate of \\(20\\%\\). Finally, the RRT expands new nodes with a distance of 5 pixels at a time. A single step in a navigation episode requires 0.37s on average that includes map prediction and registration, planning using RRT, and DD-PPO. The timing was performed on a laptop using i7 CPU @ 2.20GHz and a GTX1060 GPU. All experiments are with ensemble size of 4. We provide code and trained models: https://github.com/ggeorgak11/UPEN."
|
| 879 |
+
},
|
| 880 |
+
{
|
| 881 |
+
"type": "title",
|
| 882 |
+
"bbox": [
|
| 883 |
+
0.084,
|
| 884 |
+
0.637,
|
| 885 |
+
0.189,
|
| 886 |
+
0.651
|
| 887 |
+
],
|
| 888 |
+
"angle": 0,
|
| 889 |
+
"content": "B. Exploration"
|
| 890 |
+
},
|
| 891 |
+
{
|
| 892 |
+
"type": "text",
|
| 893 |
+
"bbox": [
|
| 894 |
+
0.082,
|
| 895 |
+
0.655,
|
| 896 |
+
0.49,
|
| 897 |
+
0.927
|
| 898 |
+
],
|
| 899 |
+
"angle": 0,
|
| 900 |
+
"content": "The setup from [12] is followed for this experiment, where the objective is to cover as much area as possible given a limited time budget \\( T = 1000 \\). Unless stated otherwise, the evaluation is conducted with simulated noise following the noise models from [13], [12]. We use the following metrics: 1) Map Accuracy \\( (m^2) \\): as defined in [12] the area in the predicted occupancy map that matches the ground-truth map. 2) IoU (%): the intersection over union of the predicted map and the ground-truth. 3) Cov \\( (m^2) \\): the actual area covered by the agent. 4) Cov (%): ratio of covered area to max scene coverage. We note that the two coverage metrics are computed on a map containing only ground-projections of depth observations. Our method is validated against the competitive approaches of Occupancy Anticipation [12] (OccAnt) and Active Neural SLAM [13] (ANS), which are modular approaches with mapper components. Both use reinforcement learning to train goal selection policies optimized over map accuracy and coverage respectively. Furthermore,"
|
| 901 |
+
},
|
| 902 |
+
{
|
| 903 |
+
"type": "text",
|
| 904 |
+
"bbox": [
|
| 905 |
+
0.505,
|
| 906 |
+
0.479,
|
| 907 |
+
0.913,
|
| 908 |
+
0.539
|
| 909 |
+
],
|
| 910 |
+
"angle": 0,
|
| 911 |
+
"content": "we compare against the classical method of Frontier-based Exploration [52] (FBE). Since both UPEN and FBE are combined with DD-PPO and use the same predicted maps, this comparison directly validates our exploration objective."
|
| 912 |
+
},
|
| 913 |
+
{
|
| 914 |
+
"type": "text",
|
| 915 |
+
"bbox": [
|
| 916 |
+
0.505,
|
| 917 |
+
0.54,
|
| 918 |
+
0.914,
|
| 919 |
+
0.781
|
| 920 |
+
],
|
| 921 |
+
"angle": 0,
|
| 922 |
+
"content": "We report two key results. First, in Table I our method outperforms all baselines in the noise-free case in both Map Accuracy and IoU. In fact, we show \\(21.4m^2\\) and \\(36.9m^2\\) improvement over FBE and OccAnt respectively on the Map Accuracy metric. In the noisy case even though we still surpass all baselines on Map Accuracy, our performance drops significantly in both metrics. In addition, the Map Accuracy increasing while IoU drops is attributed to increased map coverage with reduced accuracy. This is not surprising since unlike OccAnt and Neural SLAM we are not using a pose estimator. Second, in Table II we demonstrate superior performance on coverage metrics with a margin of \\(27.7m^2\\) from FBE and \\(39.7m^2\\) from ANS. This suggests that our method is more efficient when exploring a novel scene, thus validating our uncertainty-based exploration policy. Figure 3 shows an exploration episode."
|
| 923 |
+
},
|
| 924 |
+
{
|
| 925 |
+
"type": "title",
|
| 926 |
+
"bbox": [
|
| 927 |
+
0.507,
|
| 928 |
+
0.788,
|
| 929 |
+
0.682,
|
| 930 |
+
0.802
|
| 931 |
+
],
|
| 932 |
+
"angle": 0,
|
| 933 |
+
"content": "C. Point-goal Navigation"
|
| 934 |
+
},
|
| 935 |
+
{
|
| 936 |
+
"type": "text",
|
| 937 |
+
"bbox": [
|
| 938 |
+
0.505,
|
| 939 |
+
0.806,
|
| 940 |
+
0.914,
|
| 941 |
+
0.927
|
| 942 |
+
],
|
| 943 |
+
"angle": 0,
|
| 944 |
+
"content": "We evaluate the performance of our uncertainty-driven planner when used to augment DD-PPO [10] against its vanilla version. DD-PPO is currently one of the best performing methods on point-goal navigation, achieving \\(97\\%\\) SPL on the Gibson [7] validation set as shown in [10]. We follow the point-goal task setup from [1] where given a target coordinate the agent needs to navigate to that target and stop within a \\(0.2m\\) radius. The agent is given a time-budget of"
|
| 945 |
+
}
|
| 946 |
+
],
|
| 947 |
+
[
|
| 948 |
+
{
|
| 949 |
+
"type": "table",
|
| 950 |
+
"bbox": [
|
| 951 |
+
0.178,
|
| 952 |
+
0.068,
|
| 953 |
+
0.821,
|
| 954 |
+
0.141
|
| 955 |
+
],
|
| 956 |
+
"angle": 0,
|
| 957 |
+
"content": "<table><tr><td>Dataset</td><td colspan=\"2\">MP3D Val</td><td colspan=\"2\">MP3D Test</td><td colspan=\"2\">MP3D Val-Hard</td></tr><tr><td>Method</td><td>Success (%)</td><td>SPL (%)</td><td>Success (%)</td><td>SPL (%)</td><td>Success (%)</td><td>SPL (%)</td></tr><tr><td>DD-PPO [10]</td><td>47.8</td><td>38.7</td><td>37.3</td><td>30.2</td><td>38.0</td><td>28.1</td></tr><tr><td>UPEN-Occ + DD-PPO [10]</td><td>43.8</td><td>30.2</td><td>36.3</td><td>25.3</td><td>42.3</td><td>26.9</td></tr><tr><td>UPEN-Greedy + DD-PPO [10]</td><td>48.9</td><td>36.0</td><td>37.5</td><td>28.1</td><td>43.0</td><td>28.8</td></tr><tr><td>UPEN + DD-PPO [10]</td><td>49.8</td><td>36.9</td><td>40.8</td><td>30.7</td><td>45.7</td><td>31.6</td></tr></table>"
|
| 958 |
+
},
|
| 959 |
+
{
|
| 960 |
+
"type": "table_caption",
|
| 961 |
+
"bbox": [
|
| 962 |
+
0.082,
|
| 963 |
+
0.154,
|
| 964 |
+
0.913,
|
| 965 |
+
0.186
|
| 966 |
+
],
|
| 967 |
+
"angle": 0,
|
| 968 |
+
"content": "TABLE III: Point-goal navigation results of our method against the vanilla DD-PPO[10]. \"Occ\" signifies a policy that uses only occupancy predictions, while \"Greedy\" refers to a policy taking into consideration path length without uncertainty."
|
| 969 |
+
},
|
| 970 |
+
{
|
| 971 |
+
"type": "table",
|
| 972 |
+
"bbox": [
|
| 973 |
+
0.107,
|
| 974 |
+
0.202,
|
| 975 |
+
0.468,
|
| 976 |
+
0.263
|
| 977 |
+
],
|
| 978 |
+
"angle": 0,
|
| 979 |
+
"content": "<table><tr><td></td><td>Avg GD (m)</td><td>Avg GEDR</td><td>Min GEDR</td></tr><tr><td>Gibson Val</td><td>5.88</td><td>1.37</td><td>1.00</td></tr><tr><td>MP3D Val</td><td>11.14</td><td>1.40</td><td>1.00</td></tr><tr><td>MP3D Test</td><td>13.23</td><td>1.42</td><td>1.00</td></tr><tr><td>MP3D Val-Hard</td><td>8.28</td><td>3.19</td><td>2.50</td></tr></table>"
|
| 980 |
+
},
|
| 981 |
+
{
|
| 982 |
+
"type": "table_caption",
|
| 983 |
+
"bbox": [
|
| 984 |
+
0.082,
|
| 985 |
+
0.268,
|
| 986 |
+
0.49,
|
| 987 |
+
0.314
|
| 988 |
+
],
|
| 989 |
+
"angle": 0,
|
| 990 |
+
"content": "TABLE IV: Geodesic distance (GD) and geodesic to Euclidean distance ratio (GEDR) between different evaluation sets for point-goal navigation."
|
| 991 |
+
},
|
| 992 |
+
{
|
| 993 |
+
"type": "image",
|
| 994 |
+
"bbox": [
|
| 995 |
+
0.176,
|
| 996 |
+
0.332,
|
| 997 |
+
0.401,
|
| 998 |
+
0.614
|
| 999 |
+
],
|
| 1000 |
+
"angle": 0,
|
| 1001 |
+
"content": null
|
| 1002 |
+
},
|
| 1003 |
+
{
|
| 1004 |
+
"type": "image_caption",
|
| 1005 |
+
"bbox": [
|
| 1006 |
+
0.082,
|
| 1007 |
+
0.618,
|
| 1008 |
+
0.49,
|
| 1009 |
+
0.664
|
| 1010 |
+
],
|
| 1011 |
+
"angle": 0,
|
| 1012 |
+
"content": "Fig. 4: Point-goal navigation examples from the MP3D Val-Hard set where the vanilla DD-PPO [10] fails to reach the target while our method is successful."
|
| 1013 |
+
},
|
| 1014 |
+
{
|
| 1015 |
+
"type": "text",
|
| 1016 |
+
"bbox": [
|
| 1017 |
+
0.082,
|
| 1018 |
+
0.699,
|
| 1019 |
+
0.489,
|
| 1020 |
+
0.82
|
| 1021 |
+
],
|
| 1022 |
+
"angle": 0,
|
| 1023 |
+
"content": "\\(T = 500\\) steps to reach the target. For evaluation we use the standard metrics [1]: Success: percentage of successful episodes, and SPL: success rate normalized by path length. For this experiment we assume noise-free poses are provided by the simulator. To combine DD-PPO with our planner, we set the current short-term goal estimated by our approach as the target that DD-PPO needs to reach. For the vanilla DD-PPO we use the final target location in each test episode."
|
| 1024 |
+
},
|
| 1025 |
+
{
|
| 1026 |
+
"type": "text",
|
| 1027 |
+
"bbox": [
|
| 1028 |
+
0.082,
|
| 1029 |
+
0.821,
|
| 1030 |
+
0.49,
|
| 1031 |
+
0.929
|
| 1032 |
+
],
|
| 1033 |
+
"angle": 0,
|
| 1034 |
+
"content": "DD-PPO essentially solves Gibson point-goal navigation task so we turn our attention to MP3D where DD-PPO has lower performance due to the episodes having larger average geodesic distance (GD) to goal. However, we noticed that the average geodesic to euclidean distance ratio (GEDR) in MP3D is still low (a GEDR of 1 means there is a straight line path between the starting position and the goal)."
|
| 1035 |
+
},
|
| 1036 |
+
{
|
| 1037 |
+
"type": "text",
|
| 1038 |
+
"bbox": [
|
| 1039 |
+
0.505,
|
| 1040 |
+
0.207,
|
| 1041 |
+
0.913,
|
| 1042 |
+
0.358
|
| 1043 |
+
],
|
| 1044 |
+
"angle": 0,
|
| 1045 |
+
"content": "In order to demonstrate the effectiveness of our proposed method, we generated a new evaluation set (MP3D Val-Hard) with minimum GEDR=2.5. This created episodes which frequently involve sharp u-turns and multiple obstacles along the shortest path. Table IV illustrates episode statistics between different evaluation sets<sup>1</sup>. In addition to MP3D Val-Hard, we also test our method on the publicly available sets of MP3D Val and MP3D Test. We note that MP3D Val-Hard was generated using the same random procedure as its publicly available counterparts."
|
| 1046 |
+
},
|
| 1047 |
+
{
|
| 1048 |
+
"type": "text",
|
| 1049 |
+
"bbox": [
|
| 1050 |
+
0.506,
|
| 1051 |
+
0.359,
|
| 1052 |
+
0.914,
|
| 1053 |
+
0.495
|
| 1054 |
+
],
|
| 1055 |
+
"angle": 0,
|
| 1056 |
+
"content": "We define two variations of our method in order to demonstrate the usefulness of our uncertainty estimation by choosing different values for the \\(\\alpha_{1}\\) and \\(\\alpha_{2}\\) parameters of Eq. 6 from section III-C. First, \\(UPEN-Occ + DD-PPO\\) (\\(\\alpha_{1} = 0\\), \\(\\alpha_{2} = 0\\)) considers only the occupancy probabilities when estimating the traversability difficulty of a path, while \\(UPEN-Greedy + DD-PPO\\) (\\(\\alpha_{1} = 0\\), \\(\\alpha = 0.5\\)) considers the path length and not the uncertainty. Our default method \\(UPEN + DD-PPO\\) uses \\(\\alpha_{1} = 0.1\\) and \\(\\alpha_{2} = 0.5\\)."
|
| 1057 |
+
},
|
| 1058 |
+
{
|
| 1059 |
+
"type": "text",
|
| 1060 |
+
"bbox": [
|
| 1061 |
+
0.505,
|
| 1062 |
+
0.495,
|
| 1063 |
+
0.914,
|
| 1064 |
+
0.678
|
| 1065 |
+
],
|
| 1066 |
+
"angle": 0,
|
| 1067 |
+
"content": "The results are illustrated in Table III. We outperform all baselines in all evaluation sets with regards to Success. The largest gap in performance is observed in the MP3D Val-Hard set which contains episodes with much higher average GEDR that the other sets. This suggests that our method is able to follow more complicated paths by choosing short-term goals, in contrast to the vanilla DD-PPO which has to negotiate narrow passages and sharp turns only from egocentric observations. Regarding SPL, our performance gains are not as pronounced as in Success, since our policy frequently prefers paths with lower traversability difficulty in favor of shortest paths, to ensure higher success probability."
|
| 1068 |
+
},
|
| 1069 |
+
{
|
| 1070 |
+
"type": "title",
|
| 1071 |
+
"bbox": [
|
| 1072 |
+
0.649,
|
| 1073 |
+
0.688,
|
| 1074 |
+
0.771,
|
| 1075 |
+
0.7
|
| 1076 |
+
],
|
| 1077 |
+
"angle": 0,
|
| 1078 |
+
"content": "V. CONCLUSION"
|
| 1079 |
+
},
|
| 1080 |
+
{
|
| 1081 |
+
"type": "text",
|
| 1082 |
+
"bbox": [
|
| 1083 |
+
0.505,
|
| 1084 |
+
0.708,
|
| 1085 |
+
0.914,
|
| 1086 |
+
0.889
|
| 1087 |
+
],
|
| 1088 |
+
"angle": 0,
|
| 1089 |
+
"content": "We introduced a novel uncertainty-driven planner for exploration and navigation tasks in previously unseen environments. The planner leverages an occupancy map predictor that hallucinates map regions outside the field of view of the agent and uses its predictions to formulate uncertainty based objectives. Our experiments on exploration suggests that our method is more efficient in covering unknown areas. In terms of point-goal navigation, we showed how DD-PPO [10] augmented with our method outperforms its vanilla version. This suggests that end-to-end navigation methods can benefit from employing an uncertainty-driven planner, especially in difficult episodes."
|
| 1090 |
+
},
|
| 1091 |
+
{
|
| 1092 |
+
"type": "page_footnote",
|
| 1093 |
+
"bbox": [
|
| 1094 |
+
0.506,
|
| 1095 |
+
0.902,
|
| 1096 |
+
0.913,
|
| 1097 |
+
0.927
|
| 1098 |
+
],
|
| 1099 |
+
"angle": 0,
|
| 1100 |
+
"content": "<sup>1</sup>The Gibson val, MP3D val, and MP3D test sets were downloaded from https://github.com/facebookresearch/habitat-lab before 09/09/2021."
|
| 1101 |
+
}
|
| 1102 |
+
],
|
| 1103 |
+
[
|
| 1104 |
+
{
|
| 1105 |
+
"type": "title",
|
| 1106 |
+
"bbox": [
|
| 1107 |
+
0.24,
|
| 1108 |
+
0.068,
|
| 1109 |
+
0.334,
|
| 1110 |
+
0.079
|
| 1111 |
+
],
|
| 1112 |
+
"angle": 0,
|
| 1113 |
+
"content": "REFERENCES"
|
| 1114 |
+
},
|
| 1115 |
+
{
|
| 1116 |
+
"type": "ref_text",
|
| 1117 |
+
"bbox": [
|
| 1118 |
+
0.093,
|
| 1119 |
+
0.088,
|
| 1120 |
+
0.489,
|
| 1121 |
+
0.132
|
| 1122 |
+
],
|
| 1123 |
+
"angle": 0,
|
| 1124 |
+
"content": "[1] P. Anderson, A. Chang, D. S. Chaplot, A. Dosovitskiy, S. Gupta, V. Koltun, J. Kosecka, J. Malik, R. Mottaghi, M. Savva, et al., \"On evaluation of embodied navigation agents,\" arXiv preprint arXiv:1807.06757, 2018."
|
| 1125 |
+
},
|
| 1126 |
+
{
|
| 1127 |
+
"type": "ref_text",
|
| 1128 |
+
"bbox": [
|
| 1129 |
+
0.093,
|
| 1130 |
+
0.134,
|
| 1131 |
+
0.489,
|
| 1132 |
+
0.178
|
| 1133 |
+
],
|
| 1134 |
+
"angle": 0,
|
| 1135 |
+
"content": "[2] D. Batra, A. Gokaslan, A. Kembhavi, O. Maksymets, R. Mottaghi, M. Savva, A. Toshev, and E. Wijmans, \"Objectnav revisited: On evaluation of embodied agents navigating to objects,\" arXiv preprint arXiv:2006.13171, 2020."
|
| 1136 |
+
},
|
| 1137 |
+
{
|
| 1138 |
+
"type": "ref_text",
|
| 1139 |
+
"bbox": [
|
| 1140 |
+
0.093,
|
| 1141 |
+
0.179,
|
| 1142 |
+
0.489,
|
| 1143 |
+
0.223
|
| 1144 |
+
],
|
| 1145 |
+
"angle": 0,
|
| 1146 |
+
"content": "[3] C. Cadena, L. Carlone, H. Carrillo, Y. Latif, D. Scaramuzza, J. Neira, I. Reid, and J. J. Leonard, \"Past, present, and future of simultaneous localization and mapping: Toward the robust-perception age,\" IEEE Transactions on robotics, vol. 32, no. 6, pp. 1309-1332, 2016."
|
| 1147 |
+
},
|
| 1148 |
+
{
|
| 1149 |
+
"type": "ref_text",
|
| 1150 |
+
"bbox": [
|
| 1151 |
+
0.093,
|
| 1152 |
+
0.224,
|
| 1153 |
+
0.489,
|
| 1154 |
+
0.258
|
| 1155 |
+
],
|
| 1156 |
+
"angle": 0,
|
| 1157 |
+
"content": "[4] N. A. Melchior and R. Simmons, \"Particle rt for path planning with uncertainty,\" in Proceedings 2007 IEEE International Conference on Robotics and Automation. IEEE, 2007, pp. 1617-1624."
|
| 1158 |
+
},
|
| 1159 |
+
{
|
| 1160 |
+
"type": "ref_text",
|
| 1161 |
+
"bbox": [
|
| 1162 |
+
0.093,
|
| 1163 |
+
0.259,
|
| 1164 |
+
0.489,
|
| 1165 |
+
0.303
|
| 1166 |
+
],
|
| 1167 |
+
"angle": 0,
|
| 1168 |
+
"content": "[5] K. Ok, S. Ansari, B. Gallagher, W. Sica, F. Dellaert, and M. Stilman, \"Path planning with uncertainty: Voronoi uncertainty fields,\" in 2013 IEEE International Conference on Robotics and Automation. IEEE, 2013, pp. 4596-4601."
|
| 1169 |
+
},
|
| 1170 |
+
{
|
| 1171 |
+
"type": "ref_text",
|
| 1172 |
+
"bbox": [
|
| 1173 |
+
0.093,
|
| 1174 |
+
0.304,
|
| 1175 |
+
0.489,
|
| 1176 |
+
0.349
|
| 1177 |
+
],
|
| 1178 |
+
"angle": 0,
|
| 1179 |
+
"content": "[6] M. Savva, A. Kadian, O. Maksymets, Y. Zhao, E. Wijmans, B. Jain, J. Straub, J. Liu, V. Koltun, J. Malik, et al., \"Habitat: A platform for embodied ai research,\" in Proceedings of the IEEE International Conference on Computer Vision, 2019, pp. 9339-9347."
|
| 1180 |
+
},
|
| 1181 |
+
{
|
| 1182 |
+
"type": "ref_text",
|
| 1183 |
+
"bbox": [
|
| 1184 |
+
0.093,
|
| 1185 |
+
0.349,
|
| 1186 |
+
0.489,
|
| 1187 |
+
0.394
|
| 1188 |
+
],
|
| 1189 |
+
"angle": 0,
|
| 1190 |
+
"content": "[7] F. Xia, A. R. Zamir, Z. He, A. Sax, J. Malik, and S. Savarese, \"Gibson env: Real-world perception for embodied agents,\" in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2018, pp. 9068-9079."
|
| 1191 |
+
},
|
| 1192 |
+
{
|
| 1193 |
+
"type": "ref_text",
|
| 1194 |
+
"bbox": [
|
| 1195 |
+
0.093,
|
| 1196 |
+
0.395,
|
| 1197 |
+
0.489,
|
| 1198 |
+
0.439
|
| 1199 |
+
],
|
| 1200 |
+
"angle": 0,
|
| 1201 |
+
"content": "[8] Y. Zhu, R. Mottaghi, E. Kolve, J. J. Lim, A. Gupta, L. Fei-Fei, and A. Farhadi, \"Target-driven visual navigation in indoor scenes using deep reinforcement learning,\" in 2017 IEEE international conference on robotics and automation (ICRA). IEEE, 2017, pp. 3357-3364."
|
| 1202 |
+
},
|
| 1203 |
+
{
|
| 1204 |
+
"type": "ref_text",
|
| 1205 |
+
"bbox": [
|
| 1206 |
+
0.093,
|
| 1207 |
+
0.439,
|
| 1208 |
+
0.489,
|
| 1209 |
+
0.472
|
| 1210 |
+
],
|
| 1211 |
+
"angle": 0,
|
| 1212 |
+
"content": "[9] T. Chen, S. Gupta, and A. Gupta, “Learning exploration policies for navigation,” 7th International Conference on Learning Representations, ICLR 2019, 2019."
|
| 1213 |
+
},
|
| 1214 |
+
{
|
| 1215 |
+
"type": "ref_text",
|
| 1216 |
+
"bbox": [
|
| 1217 |
+
0.088,
|
| 1218 |
+
0.473,
|
| 1219 |
+
0.489,
|
| 1220 |
+
0.507
|
| 1221 |
+
],
|
| 1222 |
+
"angle": 0,
|
| 1223 |
+
"content": "[10] E. Wijmans, A. Kadian, A. Morcos, S. Lee, I. Essa, D. Parikh, M. Savva, and D. Batra, “Dd-ppy: Learning near-perfect pointgoal navigators from 2.5 billion frames,” arXiv, pp. arXiv-1911, 2019."
|
| 1224 |
+
},
|
| 1225 |
+
{
|
| 1226 |
+
"type": "ref_text",
|
| 1227 |
+
"bbox": [
|
| 1228 |
+
0.088,
|
| 1229 |
+
0.508,
|
| 1230 |
+
0.49,
|
| 1231 |
+
0.552
|
| 1232 |
+
],
|
| 1233 |
+
"angle": 0,
|
| 1234 |
+
"content": "[11] S. Gupta, J. Davidson, S. Levine, R. Sukthankar, and J. Malik, \"Cognitive mapping and planning for visual navigation,\" in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2017, pp. 2616-2625."
|
| 1235 |
+
},
|
| 1236 |
+
{
|
| 1237 |
+
"type": "ref_text",
|
| 1238 |
+
"bbox": [
|
| 1239 |
+
0.088,
|
| 1240 |
+
0.553,
|
| 1241 |
+
0.489,
|
| 1242 |
+
0.586
|
| 1243 |
+
],
|
| 1244 |
+
"angle": 0,
|
| 1245 |
+
"content": "[12] S. K. Ramakrishnan, Z. Al-Halah, and K. Grauman, \"Occupancy anticipation for efficient exploration and navigation,\" European Conference on Computer Vision, pp. 400-418, 2020."
|
| 1246 |
+
},
|
| 1247 |
+
{
|
| 1248 |
+
"type": "ref_text",
|
| 1249 |
+
"bbox": [
|
| 1250 |
+
0.088,
|
| 1251 |
+
0.587,
|
| 1252 |
+
0.489,
|
| 1253 |
+
0.62
|
| 1254 |
+
],
|
| 1255 |
+
"angle": 0,
|
| 1256 |
+
"content": "[13] D. S. Chaplot, D. Gandhi, S. Gupta, A. Gupta, and R. Salakhutdinov, \"Learning to explore using active neural slam,\" International Conference on Learning Representations, 2020."
|
| 1257 |
+
},
|
| 1258 |
+
{
|
| 1259 |
+
"type": "ref_text",
|
| 1260 |
+
"bbox": [
|
| 1261 |
+
0.088,
|
| 1262 |
+
0.621,
|
| 1263 |
+
0.489,
|
| 1264 |
+
0.643
|
| 1265 |
+
],
|
| 1266 |
+
"angle": 0,
|
| 1267 |
+
"content": "[14] S. M. LaValle et al., \"Rapidly-exploring random trees: A new tool for path planning,\" 1998."
|
| 1268 |
+
},
|
| 1269 |
+
{
|
| 1270 |
+
"type": "ref_text",
|
| 1271 |
+
"bbox": [
|
| 1272 |
+
0.088,
|
| 1273 |
+
0.643,
|
| 1274 |
+
0.489,
|
| 1275 |
+
0.677
|
| 1276 |
+
],
|
| 1277 |
+
"angle": 0,
|
| 1278 |
+
"content": "[15] H. S. Seung, M. Opper, and H. Sompolinsky, “Query by committee,” in Proceedings of the fifth annual workshop on Computational learning theory, 1992, pp. 287–294."
|
| 1279 |
+
},
|
| 1280 |
+
{
|
| 1281 |
+
"type": "ref_text",
|
| 1282 |
+
"bbox": [
|
| 1283 |
+
0.088,
|
| 1284 |
+
0.678,
|
| 1285 |
+
0.489,
|
| 1286 |
+
0.699
|
| 1287 |
+
],
|
| 1288 |
+
"angle": 0,
|
| 1289 |
+
"content": "[16] D. Pathak, D. Gandhi, and A. Gupta, \"Self-Supervised Exploration via Disagreement,\" ICML, 2019."
|
| 1290 |
+
},
|
| 1291 |
+
{
|
| 1292 |
+
"type": "ref_text",
|
| 1293 |
+
"bbox": [
|
| 1294 |
+
0.088,
|
| 1295 |
+
0.7,
|
| 1296 |
+
0.489,
|
| 1297 |
+
0.744
|
| 1298 |
+
],
|
| 1299 |
+
"angle": 0,
|
| 1300 |
+
"content": "[17] A. Chang, A. Dai, T. Funkhouser, M. Halber, M. Niessner, M. Savva, S. Song, A. Zeng, and Y. Zhang, \"Matterport3d: Learning from rgb-d data in indoor environments,\" 2017 International Conference on 3D Vision (3DV), IEEE, 2017."
|
| 1301 |
+
},
|
| 1302 |
+
{
|
| 1303 |
+
"type": "ref_text",
|
| 1304 |
+
"bbox": [
|
| 1305 |
+
0.088,
|
| 1306 |
+
0.745,
|
| 1307 |
+
0.489,
|
| 1308 |
+
0.779
|
| 1309 |
+
],
|
| 1310 |
+
"angle": 0,
|
| 1311 |
+
"content": "[18] J. Fuentes-Pacheco, J. Ruiz-Ascencio, and J. M. Rendon-Mancha, “Visual simultaneous localization and mapping: a survey,” Artificial intelligence review, vol. 43, no. 1, pp. 55–81, 2015."
|
| 1312 |
+
},
|
| 1313 |
+
{
|
| 1314 |
+
"type": "ref_text",
|
| 1315 |
+
"bbox": [
|
| 1316 |
+
0.088,
|
| 1317 |
+
0.78,
|
| 1318 |
+
0.489,
|
| 1319 |
+
0.813
|
| 1320 |
+
],
|
| 1321 |
+
"angle": 0,
|
| 1322 |
+
"content": "[19] M. Savva, A. X. Chang, A. Dosovitskiy, T. Funkhouser, and V. Koltun, “Minos: Multimodal indoor simulator for navigation in complex environments,” arXiv preprint arXiv:1712.03931, 2017."
|
| 1323 |
+
},
|
| 1324 |
+
{
|
| 1325 |
+
"type": "ref_text",
|
| 1326 |
+
"bbox": [
|
| 1327 |
+
0.088,
|
| 1328 |
+
0.814,
|
| 1329 |
+
0.489,
|
| 1330 |
+
0.847
|
| 1331 |
+
],
|
| 1332 |
+
"angle": 0,
|
| 1333 |
+
"content": "[20] X. Zhao, H. Agrawal, D. Batra, and A. Schwing, “The surprising effectiveness of visual odometry techniques for embodied pointgoal navigation,” arXiv preprint arXiv:2108.11550, 2021."
|
| 1334 |
+
},
|
| 1335 |
+
{
|
| 1336 |
+
"type": "ref_text",
|
| 1337 |
+
"bbox": [
|
| 1338 |
+
0.088,
|
| 1339 |
+
0.848,
|
| 1340 |
+
0.489,
|
| 1341 |
+
0.891
|
| 1342 |
+
],
|
| 1343 |
+
"angle": 0,
|
| 1344 |
+
"content": "[21] P. Karkus, S. Cai, and D. Hsu, \"Differentiable slam-net: Learning particle slam for visual navigation,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021, pp. 2815-2825."
|
| 1345 |
+
},
|
| 1346 |
+
{
|
| 1347 |
+
"type": "ref_text",
|
| 1348 |
+
"bbox": [
|
| 1349 |
+
0.088,
|
| 1350 |
+
0.892,
|
| 1351 |
+
0.489,
|
| 1352 |
+
0.925
|
| 1353 |
+
],
|
| 1354 |
+
"angle": 0,
|
| 1355 |
+
"content": "[22] D. Mishkin, A. Dosovitskiy, and V. Koltun, \"Benchmarking classic and learned navigation in complex 3d environments,\" arXiv preprint arXiv:1901.10915, 2019."
|
| 1356 |
+
},
|
| 1357 |
+
{
|
| 1358 |
+
"type": "list",
|
| 1359 |
+
"bbox": [
|
| 1360 |
+
0.088,
|
| 1361 |
+
0.088,
|
| 1362 |
+
0.49,
|
| 1363 |
+
0.925
|
| 1364 |
+
],
|
| 1365 |
+
"angle": 0,
|
| 1366 |
+
"content": null
|
| 1367 |
+
},
|
| 1368 |
+
{
|
| 1369 |
+
"type": "ref_text",
|
| 1370 |
+
"bbox": [
|
| 1371 |
+
0.511,
|
| 1372 |
+
0.068,
|
| 1373 |
+
0.912,
|
| 1374 |
+
0.103
|
| 1375 |
+
],
|
| 1376 |
+
"angle": 0,
|
| 1377 |
+
"content": "[23] D. S. Chaplot, D. Gandhi, A. Gupta, and R. Salakhutdinov, “Object goal navigation using goal-oriented semantic exploration,” Advances in Neural Information Processing Systems 33, 2020."
|
| 1378 |
+
},
|
| 1379 |
+
{
|
| 1380 |
+
"type": "ref_text",
|
| 1381 |
+
"bbox": [
|
| 1382 |
+
0.511,
|
| 1383 |
+
0.104,
|
| 1384 |
+
0.912,
|
| 1385 |
+
0.138
|
| 1386 |
+
],
|
| 1387 |
+
"angle": 0,
|
| 1388 |
+
"content": "[24] G. Georgakis, B. Bucher, K. Schmeckpeper, S. Singh, and K. Dani-ilidis, \"Learning to map for active semantic goal navigation,\" arXiv preprint arXiv:2106.15648, 2021."
|
| 1389 |
+
},
|
| 1390 |
+
{
|
| 1391 |
+
"type": "ref_text",
|
| 1392 |
+
"bbox": [
|
| 1393 |
+
0.511,
|
| 1394 |
+
0.14,
|
| 1395 |
+
0.912,
|
| 1396 |
+
0.162
|
| 1397 |
+
],
|
| 1398 |
+
"angle": 0,
|
| 1399 |
+
"content": "[25] G. Georgakis, Y. Li, and J. Kosecka, “Simultaneous mapping and target driven navigation,” arXiv preprint arXiv:1911.07980, 2019."
|
| 1400 |
+
},
|
| 1401 |
+
{
|
| 1402 |
+
"type": "ref_text",
|
| 1403 |
+
"bbox": [
|
| 1404 |
+
0.511,
|
| 1405 |
+
0.163,
|
| 1406 |
+
0.912,
|
| 1407 |
+
0.208
|
| 1408 |
+
],
|
| 1409 |
+
"angle": 0,
|
| 1410 |
+
"content": "[26] A. Mousavian, A. Toshev, M. Fiser, J. Košecka, A. Wahid, and J. Davidson, \"Visual representations for semantic target driven navigation,\" in 2019 International Conference on Robotics and Automation (ICRA). IEEE, 2019, pp. 8846-8852."
|
| 1411 |
+
},
|
| 1412 |
+
{
|
| 1413 |
+
"type": "ref_text",
|
| 1414 |
+
"bbox": [
|
| 1415 |
+
0.511,
|
| 1416 |
+
0.21,
|
| 1417 |
+
0.912,
|
| 1418 |
+
0.242
|
| 1419 |
+
],
|
| 1420 |
+
"angle": 0,
|
| 1421 |
+
"content": "[27] Y. Liang, B. Chen, and S. Song, \"SSCNav: Confidence-aware semantic scene completion for visual semantic navigation,\" International Conference on Robotics and Automation (ICRA), 2021."
|
| 1422 |
+
},
|
| 1423 |
+
{
|
| 1424 |
+
"type": "ref_text",
|
| 1425 |
+
"bbox": [
|
| 1426 |
+
0.511,
|
| 1427 |
+
0.243,
|
| 1428 |
+
0.912,
|
| 1429 |
+
0.289
|
| 1430 |
+
],
|
| 1431 |
+
"angle": 0,
|
| 1432 |
+
"content": "[28] D. S. Chaplot, R. Salakhutdinov, A. Gupta, and S. Gupta, “Neural topological slam for visual navigation,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp. 12875–12884."
|
| 1433 |
+
},
|
| 1434 |
+
{
|
| 1435 |
+
"type": "ref_text",
|
| 1436 |
+
"bbox": [
|
| 1437 |
+
0.511,
|
| 1438 |
+
0.29,
|
| 1439 |
+
0.912,
|
| 1440 |
+
0.313
|
| 1441 |
+
],
|
| 1442 |
+
"angle": 0,
|
| 1443 |
+
"content": "[29] O. Kwon, N. Kim, Y. Choi, H. Yoo, J. Park, and S. Oh, \"Visual graph memory with unsupervised representation for visual navigation.\""
|
| 1444 |
+
},
|
| 1445 |
+
{
|
| 1446 |
+
"type": "ref_text",
|
| 1447 |
+
"bbox": [
|
| 1448 |
+
0.511,
|
| 1449 |
+
0.314,
|
| 1450 |
+
0.912,
|
| 1451 |
+
0.348
|
| 1452 |
+
],
|
| 1453 |
+
"angle": 0,
|
| 1454 |
+
"content": "[30] H. J. S. Feder, J. J. Leonard, and C. M. Smith, \"Adaptive mobile robot navigation and mapping,\" The International Journal of Robotics Research, vol. 18, no. 7, pp. 650-668, 1999."
|
| 1455 |
+
},
|
| 1456 |
+
{
|
| 1457 |
+
"type": "ref_text",
|
| 1458 |
+
"bbox": [
|
| 1459 |
+
0.511,
|
| 1460 |
+
0.349,
|
| 1461 |
+
0.912,
|
| 1462 |
+
0.382
|
| 1463 |
+
],
|
| 1464 |
+
"angle": 0,
|
| 1465 |
+
"content": "[31] T. Kollar and N. Roy, “Trajectory optimization using reinforcement learning for map exploration,” The International Journal of Robotics Research, vol. 27, no. 2, pp. 175–196, 2008."
|
| 1466 |
+
},
|
| 1467 |
+
{
|
| 1468 |
+
"type": "ref_text",
|
| 1469 |
+
"bbox": [
|
| 1470 |
+
0.511,
|
| 1471 |
+
0.383,
|
| 1472 |
+
0.912,
|
| 1473 |
+
0.428
|
| 1474 |
+
],
|
| 1475 |
+
"angle": 0,
|
| 1476 |
+
"content": "[32] L. Carlone, J. Du, M. K. Ng, B. Bona, and M. Indri, \"Active slam and exploration with particle filters using kullback-leibler divergence,\" Journal of Intelligent & Robotic Systems, vol. 75, no. 2, pp. 291-311, 2014."
|
| 1477 |
+
},
|
| 1478 |
+
{
|
| 1479 |
+
"type": "ref_text",
|
| 1480 |
+
"bbox": [
|
| 1481 |
+
0.511,
|
| 1482 |
+
0.43,
|
| 1483 |
+
0.912,
|
| 1484 |
+
0.473
|
| 1485 |
+
],
|
| 1486 |
+
"angle": 0,
|
| 1487 |
+
"content": "[33] H. Carrillo, I. Reid, and J. A. Castellanos, \"On the comparison of uncertainty criteria for active slam,\" in 2012 IEEE International Conference on Robotics and Automation. IEEE, 2012, pp. 2080-2087."
|
| 1488 |
+
},
|
| 1489 |
+
{
|
| 1490 |
+
"type": "ref_text",
|
| 1491 |
+
"bbox": [
|
| 1492 |
+
0.511,
|
| 1493 |
+
0.476,
|
| 1494 |
+
0.912,
|
| 1495 |
+
0.522
|
| 1496 |
+
],
|
| 1497 |
+
"angle": 0,
|
| 1498 |
+
"content": "[34] J.-L. Blanco, J.-A. Fernandez-Madrigal, and J. González, “A novel measure of uncertainty for mobile robot slam with rao—blackwellized particle filters,” The International Journal of Robotics Research, vol. 27, no. 1, pp. 73-89, 2008."
|
| 1499 |
+
},
|
| 1500 |
+
{
|
| 1501 |
+
"type": "ref_text",
|
| 1502 |
+
"bbox": [
|
| 1503 |
+
0.511,
|
| 1504 |
+
0.523,
|
| 1505 |
+
0.912,
|
| 1506 |
+
0.556
|
| 1507 |
+
],
|
| 1508 |
+
"angle": 0,
|
| 1509 |
+
"content": "[35] C. Stachniss, G. Grisetti, and W. Burgard, \"Information gain-based exploration using rao-blackwellized particle filters,\" in Robotics: Science and systems, vol. 2, 2005, pp. 65-72."
|
| 1510 |
+
},
|
| 1511 |
+
{
|
| 1512 |
+
"type": "ref_text",
|
| 1513 |
+
"bbox": [
|
| 1514 |
+
0.511,
|
| 1515 |
+
0.557,
|
| 1516 |
+
0.912,
|
| 1517 |
+
0.603
|
| 1518 |
+
],
|
| 1519 |
+
"angle": 0,
|
| 1520 |
+
"content": "[36] K. Fang, A. Toshev, L. Fei-Fei, and S. Savarese, \"Scene memory transformer for embodied agents in long-horizon tasks,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2019, pp. 538-547."
|
| 1521 |
+
},
|
| 1522 |
+
{
|
| 1523 |
+
"type": "ref_text",
|
| 1524 |
+
"bbox": [
|
| 1525 |
+
0.511,
|
| 1526 |
+
0.604,
|
| 1527 |
+
0.912,
|
| 1528 |
+
0.637
|
| 1529 |
+
],
|
| 1530 |
+
"angle": 0,
|
| 1531 |
+
"content": "[37] J. Zhang, L. Tai, M. Liu, J. Boedecker, and W. Burgard, “Neural slam: Learning to explore with external memory,” arXiv preprint arXiv:1706.09520, 2017."
|
| 1532 |
+
},
|
| 1533 |
+
{
|
| 1534 |
+
"type": "ref_text",
|
| 1535 |
+
"bbox": [
|
| 1536 |
+
0.511,
|
| 1537 |
+
0.638,
|
| 1538 |
+
0.912,
|
| 1539 |
+
0.684
|
| 1540 |
+
],
|
| 1541 |
+
"angle": 0,
|
| 1542 |
+
"content": "[38] M. Narasimhan, E. Wijmans, X. Chen, T. Darrell, D. Batra, D. Parikh, and A. Singh, \"Seeing the un-scene: Learning amodal semantic maps for room navigation,\" European Conference on Computer Vision. Springer, Cham, 2020."
|
| 1543 |
+
},
|
| 1544 |
+
{
|
| 1545 |
+
"type": "ref_text",
|
| 1546 |
+
"bbox": [
|
| 1547 |
+
0.511,
|
| 1548 |
+
0.685,
|
| 1549 |
+
0.912,
|
| 1550 |
+
0.74
|
| 1551 |
+
],
|
| 1552 |
+
"angle": 0,
|
| 1553 |
+
"content": "[39] Y. Katsumata, A. Taniguchi, L. El Hafi, Y. Hagiwara, and T. Taniguchi, \"Spcomapgan: Spatial concept formation-based semantic mapping with generative adversarial networks,\" in 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2020, pp. 7927-7934."
|
| 1554 |
+
},
|
| 1555 |
+
{
|
| 1556 |
+
"type": "ref_text",
|
| 1557 |
+
"bbox": [
|
| 1558 |
+
0.511,
|
| 1559 |
+
0.742,
|
| 1560 |
+
0.912,
|
| 1561 |
+
0.788
|
| 1562 |
+
],
|
| 1563 |
+
"angle": 0,
|
| 1564 |
+
"content": "[40] E. Beeching, J. Dibangoye, O. Simonin, and C. Wolf, “Learning to plan with uncertain topological maps,” in Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part III 16. Springer, 2020, pp. 473–490."
|
| 1565 |
+
},
|
| 1566 |
+
{
|
| 1567 |
+
"type": "ref_text",
|
| 1568 |
+
"bbox": [
|
| 1569 |
+
0.511,
|
| 1570 |
+
0.789,
|
| 1571 |
+
0.912,
|
| 1572 |
+
0.834
|
| 1573 |
+
],
|
| 1574 |
+
"angle": 0,
|
| 1575 |
+
"content": "[41] K. Katyal, K. Popek, C. Paxton, P. Burlina, and G. D. Hager, \"Uncertainty-aware occupancy map prediction using generative networks for robot navigation,\" in 2019 International Conference on Robotics and Automation (ICRA), 2019, pp. 5453-5459."
|
| 1576 |
+
},
|
| 1577 |
+
{
|
| 1578 |
+
"type": "ref_text",
|
| 1579 |
+
"bbox": [
|
| 1580 |
+
0.511,
|
| 1581 |
+
0.835,
|
| 1582 |
+
0.912,
|
| 1583 |
+
0.88
|
| 1584 |
+
],
|
| 1585 |
+
"angle": 0,
|
| 1586 |
+
"content": "[42] D. D. Fan, K. Otsu, Y. Kubo, A. Dixit, J. Burdick, and A.-A. Agha-Mohammadi, \"Step: Stochastic traversability evaluation and planning for risk-aware off-road navigation,\" in Robotics: Science and Systems. RSS Foundation, 2021, pp. 1-21."
|
| 1587 |
+
},
|
| 1588 |
+
{
|
| 1589 |
+
"type": "ref_text",
|
| 1590 |
+
"bbox": [
|
| 1591 |
+
0.511,
|
| 1592 |
+
0.88,
|
| 1593 |
+
0.912,
|
| 1594 |
+
0.926
|
| 1595 |
+
],
|
| 1596 |
+
"angle": 0,
|
| 1597 |
+
"content": "[43] É. Pairet, J. D. Hernández, M. Carreras, Y. Petillot, and M. Lahijanian, \"Online mapping and motion planning under uncertainty for safe navigation in unknown environments,\" IEEE Transactions on Automation Science and Engineering, 2021."
|
| 1598 |
+
},
|
| 1599 |
+
{
|
| 1600 |
+
"type": "list",
|
| 1601 |
+
"bbox": [
|
| 1602 |
+
0.511,
|
| 1603 |
+
0.068,
|
| 1604 |
+
0.912,
|
| 1605 |
+
0.926
|
| 1606 |
+
],
|
| 1607 |
+
"angle": 0,
|
| 1608 |
+
"content": null
|
| 1609 |
+
}
|
| 1610 |
+
],
|
| 1611 |
+
[
|
| 1612 |
+
{
|
| 1613 |
+
"type": "ref_text",
|
| 1614 |
+
"bbox": [
|
| 1615 |
+
0.085,
|
| 1616 |
+
0.068,
|
| 1617 |
+
0.49,
|
| 1618 |
+
0.091
|
| 1619 |
+
],
|
| 1620 |
+
"angle": 0,
|
| 1621 |
+
"content": "[44] Y. Gal, “Uncertainty in deep learning,” Ph.D. dissertation, University of Cambridge, 2016."
|
| 1622 |
+
},
|
| 1623 |
+
{
|
| 1624 |
+
"type": "ref_text",
|
| 1625 |
+
"bbox": [
|
| 1626 |
+
0.085,
|
| 1627 |
+
0.092,
|
| 1628 |
+
0.49,
|
| 1629 |
+
0.125
|
| 1630 |
+
],
|
| 1631 |
+
"angle": 0,
|
| 1632 |
+
"content": "[45] A. Kendall and Y. Gal, \"What uncertainties do we need in bayesian deep learning for computer vision?\" in Advances in neural information processing systems, 2017, pp. 5574-5584."
|
| 1633 |
+
},
|
| 1634 |
+
{
|
| 1635 |
+
"type": "ref_text",
|
| 1636 |
+
"bbox": [
|
| 1637 |
+
0.085,
|
| 1638 |
+
0.126,
|
| 1639 |
+
0.49,
|
| 1640 |
+
0.159
|
| 1641 |
+
],
|
| 1642 |
+
"angle": 0,
|
| 1643 |
+
"content": "[46] M. G. Azar, I. Osband, and R. Munos, “Minimax regret bounds for reinforcement learning,” in International Conference on Machine Learning. PMLR, 2017, pp. 263–272."
|
| 1644 |
+
},
|
| 1645 |
+
{
|
| 1646 |
+
"type": "ref_text",
|
| 1647 |
+
"bbox": [
|
| 1648 |
+
0.085,
|
| 1649 |
+
0.16,
|
| 1650 |
+
0.49,
|
| 1651 |
+
0.193
|
| 1652 |
+
],
|
| 1653 |
+
"angle": 0,
|
| 1654 |
+
"content": "[47] P. Auer, N. Cesa-Bianchi, and P. Fischer, \"Finite-time analysis of the multiarmed bandit problem,\" Machine learning, vol. 47, no. 2, pp. 235-256, 2002."
|
| 1655 |
+
},
|
| 1656 |
+
{
|
| 1657 |
+
"type": "ref_text",
|
| 1658 |
+
"bbox": [
|
| 1659 |
+
0.085,
|
| 1660 |
+
0.194,
|
| 1661 |
+
0.49,
|
| 1662 |
+
0.216
|
| 1663 |
+
],
|
| 1664 |
+
"angle": 0,
|
| 1665 |
+
"content": "[48] R. Y. Chen, S. Sidor, P. Abbeel, and J. Schulman, \"UCB exploration via q-ensembles,\" arXiv preprint arXiv:1706.01502, 2017."
|
| 1666 |
+
},
|
| 1667 |
+
{
|
| 1668 |
+
"type": "ref_text",
|
| 1669 |
+
"bbox": [
|
| 1670 |
+
0.085,
|
| 1671 |
+
0.217,
|
| 1672 |
+
0.49,
|
| 1673 |
+
0.261
|
| 1674 |
+
],
|
| 1675 |
+
"angle": 0,
|
| 1676 |
+
"content": "[49] O. Ronneberger, P. Fischer, and T. Brox, \"U-net: Convolutional networks for biomedical image segmentation,\" in International Conference on Medical image computing and computer-assisted intervention. Springer, 2015, pp. 234-241."
|
| 1677 |
+
},
|
| 1678 |
+
{
|
| 1679 |
+
"type": "ref_text",
|
| 1680 |
+
"bbox": [
|
| 1681 |
+
0.085,
|
| 1682 |
+
0.262,
|
| 1683 |
+
0.49,
|
| 1684 |
+
0.295
|
| 1685 |
+
],
|
| 1686 |
+
"angle": 0,
|
| 1687 |
+
"content": "[50] B. Lakshminarayanan, A. Pritzel, and C. Blundell, \"Simple and scalable predictive uncertainty estimation using deep ensembles,\" Advances in Neural Information Processing Systems 30, 2017."
|
| 1688 |
+
},
|
| 1689 |
+
{
|
| 1690 |
+
"type": "ref_text",
|
| 1691 |
+
"bbox": [
|
| 1692 |
+
0.085,
|
| 1693 |
+
0.296,
|
| 1694 |
+
0.49,
|
| 1695 |
+
0.328
|
| 1696 |
+
],
|
| 1697 |
+
"angle": 0,
|
| 1698 |
+
"content": "[51] Y. Gal, R. Islam, and Z. Ghahramani, “Deep bayesian active learning with image data,” in International Conference on Machine Learning. PMLR, 2017, pp. 1183–1192."
|
| 1699 |
+
},
|
| 1700 |
+
{
|
| 1701 |
+
"type": "ref_text",
|
| 1702 |
+
"bbox": [
|
| 1703 |
+
0.085,
|
| 1704 |
+
0.33,
|
| 1705 |
+
0.49,
|
| 1706 |
+
0.385
|
| 1707 |
+
],
|
| 1708 |
+
"angle": 0,
|
| 1709 |
+
"content": "[52] B. Yamauchi, “A frontier-based approach for autonomous exploration,” in Proceedings 1997 IEEE International Symposium on Computational Intelligence in Robotics and Automation CIRA '97. Towards New Computational Principles for Robotics and Automation'. IEEE, 1997, pp. 146-151."
|
| 1710 |
+
},
|
| 1711 |
+
{
|
| 1712 |
+
"type": "ref_text",
|
| 1713 |
+
"bbox": [
|
| 1714 |
+
0.085,
|
| 1715 |
+
0.386,
|
| 1716 |
+
0.49,
|
| 1717 |
+
0.42
|
| 1718 |
+
],
|
| 1719 |
+
"angle": 0,
|
| 1720 |
+
"content": "[53] K. He, X. Zhang, S. Ren, and J. Sun, \"Deep residual learning for image recognition,\" in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 770-778."
|
| 1721 |
+
},
|
| 1722 |
+
{
|
| 1723 |
+
"type": "ref_text",
|
| 1724 |
+
"bbox": [
|
| 1725 |
+
0.085,
|
| 1726 |
+
0.421,
|
| 1727 |
+
0.49,
|
| 1728 |
+
0.454
|
| 1729 |
+
],
|
| 1730 |
+
"angle": 0,
|
| 1731 |
+
"content": "[54] A. Paszke, S. Gross, S. Chintala, G. Chanan, E. Yang, Z. DeVito, Z. Lin, A. Desmaison, L. Antiga, and A. Lerer, \"Automatic differentiation in pytorch,\" 2017."
|
| 1732 |
+
},
|
| 1733 |
+
{
|
| 1734 |
+
"type": "list",
|
| 1735 |
+
"bbox": [
|
| 1736 |
+
0.085,
|
| 1737 |
+
0.068,
|
| 1738 |
+
0.49,
|
| 1739 |
+
0.454
|
| 1740 |
+
],
|
| 1741 |
+
"angle": 0,
|
| 1742 |
+
"content": null
|
| 1743 |
+
}
|
| 1744 |
+
]
|
| 1745 |
+
]
|
2202.11xxx/2202.11907/13c6c70f-eb0a-4855-9c93-c58436de7c44_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:74711b0a378db93aaba47f943132feee98a01ca90a46743be6d9129ca75563a2
|
| 3 |
+
size 3759944
|
2202.11xxx/2202.11907/full.md
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Uncertainty-driven Planner for Exploration and Navigation
|
| 2 |
+
|
| 3 |
+
Georgios Georgakis<sup>1</sup>, Bernadette Bucher<sup>1</sup>, Anton Arapin<sup>2</sup>, Karl Schmeckpeper<sup>1</sup>, Nikolai Matni<sup>1</sup>, and Kostas Daniilidis<sup>1</sup>
|
| 4 |
+
|
| 5 |
+
Abstract—We consider the problems of exploration and point-goal navigation in previously unseen environments, where the spatial complexity of indoor scenes and partial observability constitute these tasks challenging. We argue that learning occupancy priors over indoor maps provides significant advantages towards addressing these problems. To this end, we present a novel planning framework that first learns to generate occupancy maps beyond the field-of-view of the agent, and second leverages the model uncertainty over the generated areas to formulate path selection policies for each task of interest. For point-goal navigation the policy chooses paths with an upper confidence bound policy for efficient and traversable paths, while for exploration the policy maximizes model uncertainty over candidate paths. We perform experiments in the visually realistic environments of Matterport3D using the Habitat simulator and demonstrate: 1) Improved results on exploration and map quality metrics over competitive methods, and 2) The effectiveness of our planning module when paired with the state-of-the-art DD-PPO method for the point-goal navigation task.
|
| 6 |
+
|
| 7 |
+
# I. INTRODUCTION
|
| 8 |
+
|
| 9 |
+
A major prerequisite towards true autonomy is the ability to navigate and explore novel environments. This problem is usually studied in the context of specific tasks such as reaching a specified point goal [1], finding a semantic target [2], or covering as much area as possible while building a map. Each of these tasks has its own idiosyncrasies, but all of them represent examples where one must often reason beyond what is currently observed and incorporate the uncertainty over the inferred information into the decision making process. For example, in point-goal navigation it is important to predict whether a certain path can lead to a dead-end. Likewise, in exploration strong confidence over a particular region's representation may prompt the agent to visit new areas of the map.
|
| 10 |
+
|
| 11 |
+
We investigate the tasks of point-goal navigation and exploration, and propose a planning module that leverages contextual occupancy priors. These priors are learned by a
|
| 12 |
+
|
| 13 |
+
Research was sponsored by the Army Research Office and was accomplished under Grant Number W911NF-20-1-0080. The views and conclusions contained in this document are those of the authors and should not be interpreted as representing the official policies, either expressed or implied, of the Army Research Office or the U.S. Government. The U.S. Government is authorized to reproduce and distribute reprints for Government purposes notwithstanding any copyright notation herein. Further support was provided by the following grants: NSF IIS 1703319, NSF MRI 1626008, NSF TRIPODS 1934960, NSF CPS 2038873, ARL DCIST CRA W911NF-17-2-0181, ONR N00014-17-1-2093, the DARPA-SRC C-BRIC, CAREER award ECCS-2045834, and a Google Research Scholar award.
|
| 14 |
+
|
| 15 |
+
<sup>1</sup>GRASP Laboratory, Department of Computer and Information Science, University of Pennsylvania, Philadelphia, PA 19104. ggeorgqak@seas.upenn.edu
|
| 16 |
+
$^{2}$ Department of Computer Science, The University of Chicago, Chicago, IL, 60637. aarapin@uchicago.edu
|
| 17 |
+
|
| 18 |
+
map predictor module that is trained to estimate occupancy values outside the field-of-view of the agent. Using the epistemic (model) uncertainty associated with these predictions we define objectives for path selection for each task of interest. Earlier work in this field focused mainly on learning how to actively control the agent for the purpose of reducing the uncertainty over the map [3] (Active SLAM), without considering navigation tasks in the process, while methods that did consider navigation often operated in relatively simple environments of artificially placed cylindrical obstacles [4], [5].
|
| 19 |
+
|
| 20 |
+
With the recent introduction of realistic and visually complex environments serving as navigation benchmarks [6], [7], the focus shifted on learning-based end-to-end approaches [8], [9], [10]. While end-to-end formulations that map pixels directly to actions are attractive in terms of their simplicity, they require very large quantities of training data. For instance, DD-PPO [10] needs 2.5 billion frames of experience to reach its state-of-the-art performance on Gibson [7]. On the other hand, modular approaches [11], [12], [13] are able to encode prior information into explicit map representations and are thus much more sample efficient. Our method falls into the latter category, but differs from other approaches by its use of the uncertainty over predictions outside the field-of-view of the agent during the planning stage. In contrast to [13], [12] this allows our method more flexibility when defining goal selection objectives, and does not require re-training between different tasks.
|
| 21 |
+
|
| 22 |
+
In this paper, we introduce Uncertainty-driven Planner for Exploration and Navigation (UPEN), in which we propose a planning algorithm that is informed by predictions over unobserved areas. Through this spatial prediction approach our model learns layout patterns that can guide a planner towards preferable paths in unknown environments. More specifically, we first train an ensemble of occupancy map predictor models by learning to hallucinate top-down occupancy regions from unobserved areas. Then, a Rapidly Exploring Random-Trees [14] (RRT) algorithm generates a set of candidate paths. We select paths from these candidates using epistemic (model) uncertainty associated with a path traversibility estimate as measured by the disagreement of ensemble models [15], [16], and we choose appropriate short-term goals based on the task of interest. Our contributions are as follows:
|
| 23 |
+
|
| 24 |
+
- We propose UPEN, a novel planning framework that leverages learned layout priors and formulates uncertainty-based objectives for path selection in exploration and navigation tasks.
|
| 25 |
+
|
| 26 |
+

|
| 27 |
+
Fig. 1: Occupancy map prediction (blue-occupied, green-free) and uncertainty estimation for a time-step $t$ . The egocentric depth observation is first ground-projected and passed through an ensemble $f$ of encoder-decoder models that each infers information in unobserved areas $(\hat{m}_t)$ . Each $\hat{m}_t$ is then registered to a separate global map $M_t$ . The final occupancy probabilities and model uncertainty are given by the mean and variance over the set of global maps.
|
| 28 |
+
|
| 29 |
+
- We show improved exploration results over competitive methods on the Matterport3D [17] dataset.
|
| 30 |
+
- We demonstrate the effectiveness of our planner when used to complement existing end-to-end methods on the point-goal navigation task.
|
| 31 |
+
|
| 32 |
+
# II. RELATED WORK
|
| 33 |
+
|
| 34 |
+
a) Navigation approaches: Traditional approaches to visual navigation focus on building a 3D metric map of the environment [18], [3] before using that representation for any downstream navigation tasks, which does not lend itself favourably for task-driven learnable representations that can capture contextual cues. The recent introduction of large-scale indoor environments and simulators [7], [17], [6] has fuelled a slew of learning based methods for indoor navigation tasks [1] such as point-goal [10], [19], [20], [21], [22], object-goal [23], [24], [25], [26], [27], and image-goal [8], [28], [29]. Modular approaches which incorporate explicit or learned map representations [11], [23], [25] have shown to outperform end-to-end methods on tasks such as object-goal, however, this is not currently the case for the point-goal [10], [20] task. In our work, we demonstrate how an uncertainty-driven planning module can favourably complement DD-PPO [10], a competitive method on point-goal navigation, and show increased performance in challenging episodes.
|
| 35 |
+
|
| 36 |
+
b) Exploration methods for navigation: A considerable amount of work was also devoted to planning efficient paths during map building, generally referred to as Active SLAM [30], [31], [32], [33], [34], [35]. For example, [32], [35] define information gain objectives based on the estimated uncertainty over the map in order to decide future actions, while [33] investigates different uncertainty measures. Recent methods focus on learning policies for efficient exploration either through coverage [9], [13], [36], [37] or map accuracy [12] reward functions. Furthermore, several works have gone beyond traditional mapping, and sought to predict maps for unseen regions [12], [38], [24], [27], [39] which further increased robustness in the decision making process. Our approach leverages the uncertainty over predicted occupancy maps for unobserved areas and shows its effectiveness on exploring a novel environment.
|
| 37 |
+
|
| 38 |
+
c) Uncertainty estimation: To navigate in partially observed maps, uncertainty has been estimated across nodes in a path [4], [40], via the marginal probability of landmarks [5], and with the variance of model predictions across predicted maps [24], [41]. Furthermore, uncertainty-aware mapping has been shown to be effective in unknown and highly risky environments [42], [43]. In our work, we use uncertainty differently for exploration and point goal navigation. In exploration, we estimate uncertainty over a predicted occupancy map via the variance between models in an ensemble. This variance across the ensemble specifically estimates model (epistemic) uncertainty [44], [45]. We select paths by maximizing epistemic uncertainty as a proxy for maximizing information gain following prior work in exploration [16], [24]. In point goal navigation, we compute traversability scores for candidate paths using an ensemble of map predictors and compute uncertainty with respect to these traversability scores using the variance over the scores given by each model in the ensemble. We use this uncertainty regarding path traversability to construct an upper confidence bound policy for path selection to balance exploration and exploitation in point goal navigation [46], [47], [48], [24].
|
| 39 |
+
|
| 40 |
+
# III. APPROACH
|
| 41 |
+
|
| 42 |
+
We present an uncertainty-driven planning module for exploration and point-goal navigation tasks, which benefits from a learned occupancy map predictor module. Our approach takes as input the agent's egocentric depth observation and learns to predict regions of the occupancy map that are outside of the agent's field-of-view. Then it uses the uncertainty over those predictions to decide on a set of candidate paths generated by RRT. We define a separate policy to select a short-term goal along a path for each task of interest. In exploration we maximize uncertainty over the candidate paths, while for point-goal navigation we choose paths with an upper confidence bound policy for efficient and traversable paths. Finally, a local policy (DD-PPO [10]) predicts navigation actions to reach the short-term goal.
|
| 43 |
+
|
| 44 |
+
# A. Occupancy Map Prediction
|
| 45 |
+
|
| 46 |
+
The first component in our planning module aims to capture layout priors in indoor environments. Such information
|
| 47 |
+
|
| 48 |
+

|
| 49 |
+
Fig. 2: Examples of path selections for exploration (top row) and point-goal navigation (bottom-row) tasks. Given the model uncertainty and occupancy probabilities we first generate a set of paths which are evaluated either with an exploration objective (section III-B) or an upper confidence bound objective (section III-C). The agent position is denoted as a dark green dot, the goal is shown as magenta, and red dots signify short-term goals.
|
| 50 |
+
|
| 51 |
+
can lead to a more intelligent decision making process for a downstream navigation task. Following the recent success of [12], [24] we formulate the occupancy map prediction as a semantic segmentation problem. Our model takes as input a depth image $D_{t}$ at time-step $t$ which is ground projected to an egocentric grid $m_t' \in \mathbb{R}^{|C| \times h \times w}$ , where $C$ is the set of classes containing unknown, occupied, and free, and $h, w$ are the dimensions of the local grid. The ground projection is carried out by first using the camera intrinsic parameters to unproject $D_{t}$ to a 3D point cloud and then map each 3D point to the $h \times w$ grid coordinates: $x' = \lfloor \frac{x}{r} \rfloor + \frac{w - 1}{2}$ , $z' = \lfloor \frac{z}{r} \rfloor + \frac{h - 1}{2}$ , where $x', z'$ are the grid coordinates, $x, z$ are the 3D points, and $r$ is the grid cell size. Since the agent has a limited field of view, $m_t'$ represents a local incomplete top-down occupancy grid of the area surrounding the agent. Our objective is to predict the missing values and produce the complete local occupancy map $\hat{m}_t \in \mathbb{R}^{|C| \times h \times w}$ . To do so, we pass $m_t'$ through an encoder-decoder UNet [49] model $f$ that outputs a prediction for each grid location over the set of classes $C$ . The model $f$ is trained with a pixel-wise cross-entropy loss:
|
| 52 |
+
|
| 53 |
+
$$
|
| 54 |
+
L = - \frac {1}{K} \sum_ {k} ^ {K} \sum_ {c} ^ {C} m _ {k, c} \log \hat {m} _ {k, c} \tag {1}
|
| 55 |
+
$$
|
| 56 |
+
|
| 57 |
+
where $K = h \times w$ corresponds to the number of cells in the local grid and $m_{k,c}$ is the ground-truth label for pixel $k$ . The ground-truth occupancy is generated by ground-projecting the available semantic information of the 3D scenes. To ensure diversity in the training examples, we sample training pairs across shortest paths between two randomly selected locations in a scene, where $m_t'$ can contain a variable number of ground-projected depth images. Unlike [12] we do not use the RGB images during training, as we have found that the aforementioned sampling strategy is sufficient for the model
|
| 58 |
+
|
| 59 |
+
to converge. This enables us to define a smaller and less memory intensive model $f$ .
|
| 60 |
+
|
| 61 |
+
During a navigation episode, we maintain a global map $M_t \in \mathbb{R}^{|C| \times H \times W}$ . Since $f$ predicts a probability distribution over the classes for each grid location, we register $\hat{m}_t$ by updating $M_t$ using Bayes Theorem. The global map $M_t$ is initialized with a uniform prior probability distribution across all classes.
|
| 62 |
+
|
| 63 |
+
# B. Exploration Policy
|
| 64 |
+
|
| 65 |
+
The main goal of exploration task is to maximize map coverage which requires navigating to new map regions around obstacles. To this end, we propose selecting paths using uncertainty of our map predictions as an objective in our planning algorithm. We are explicitly minimizing map uncertainty by collecting observations to improve the predicted global map $M_{t}$ . Implicitly map coverage is maximized by minimizing map uncertainty because high coverage is required for predicting an accurate map with low uncertainty.
|
| 66 |
+
|
| 67 |
+
We use the epistemic (model) uncertainty as an objective for exploration [45], [44], [16], [24]. In order to estimate epistemic uncertainty, we construct $f$ as an ensemble of $N$ occupancy prediction models defined over the parameters $\{\theta_1,\dots,\theta_N\}$ . Variance between models in the ensemble comes from different random weight initializations in each network [16]. Our model estimates the true probability distribution $P(m_t|m_t')$ by averaging over sampled model weights, $P(m_t|m_t') \approx \mathbb{E}_{\theta \sim q(\theta)}f(m_t';\theta) \approx \frac{1}{N}\sum_{i = 1}^{N}f(m_t';\theta_i)$ where the parameters $\theta$ are random variables sampled from the distribution $q(\theta)[50]$ , [51]. Then, following prior work [15], [16], [24], the epistemic uncertainty can be approximated from the variance between the outputs of the models in the ensemble, $\mathrm{Var}f(m_t';\theta)$ .
|
| 68 |
+
|
| 69 |
+
For path planning during exploration, our proposed objective can be used with any planner which generates a set $S$
|
| 70 |
+
|
| 71 |
+
of candidate paths. Each path $s \in S$ can be expressed as a subset of grid locations in our map. Each of these grid locations $k$ has an associated uncertainty estimate given by the variance between model predictions in our ensemble. We specify this uncertainty map as $u_{k} \coloneqq \operatorname{Var} f(m_{t}^{\prime};\theta) \in \mathbb{R}^{1 \times h \times w}$ . We use this map to score each path $s$ and construct the objective
|
| 72 |
+
|
| 73 |
+
$$
|
| 74 |
+
\underset {s \in S} {\arg \max } \frac {1}{| s |} \sum_ {k \in s} u _ {k} \tag {2}
|
| 75 |
+
$$
|
| 76 |
+
|
| 77 |
+
which selects the path with the maximum average epistemic uncertainty on the traversed grid.
|
| 78 |
+
|
| 79 |
+
In this work, we incorporate our uncertainty-based objective in RRT to plan to explore. We expand RRT for a set number of iterations, which generates candidate paths in random directions. We select between these paths using our objective from equation 2. In practice, equation 2 is evaluated over the accumulated global map $M_{t}$ . Figure 1 shows the occupancy map prediction and the uncertainty estimation process using the ensemble $f$ , while Figure 2 (top row) shows an example of path selection using the exploration objective.
|
| 80 |
+
|
| 81 |
+
# C. Point-goal Policy
|
| 82 |
+
|
| 83 |
+
In the problem of point-goal navigation, the objective is to efficiently navigate past obstacles to a given goal location from a starting position. We again use RRT as a planner which generates a set of paths $S$ between the agent's current location and the goal location. Thus, the primary objective when we select a path from these candidates to traverse is for the path to be unobstructed. Given a predicted occupancy map from model $i$ in our ensemble and a candidate path $s \in S$ generated by our planner, we evaluate whether or not the path is obstructed by taking the maximum probability of occupancy in any grid cell $k$ along each path. Specifically,
|
| 84 |
+
|
| 85 |
+
$$
|
| 86 |
+
p _ {i, s} = \max _ {k \in s} \left(\hat {m} _ {k, o c c} ^ {i} | _ {k \in s}\right) \tag {3}
|
| 87 |
+
$$
|
| 88 |
+
|
| 89 |
+
where $\hat{m}_{k,occ}^i|_{k\in s}$ is the map of occupancy probabilities defined on the subset of grid cells $k\in s$ predicted by model $i$ in the ensemble $f$ . Choosing the path $s\in S$ by minimizing $p_{i,s}$ chooses the path we think most likely to be unobstructed. We can minimize this likelihood by selecting $\arg \min_{s\in S}\mu_s$ where $\mu_s\coloneqq \frac{1}{N}\sum_{i = 1}^{N}p_{i,s}$ . However, we note that there may be multiple unobstructed candidate paths generated by our planner. We differentiate between these in our selection by adding a term $d_{s}$ to our objective to incentivize selecting shorter paths. Furthermore, as an agent navigates to a goal, it makes map predictions using its accumulated observations along the way. Therefore, to improve navigation performance we can incorporate an exploration component in our navigation objective to incentivize choosing paths where it can gain the most information regarding efficient traversability.
|
| 90 |
+
|
| 91 |
+
We estimate uncertainty associated with efficient traversability of a particular path $s$ for our exploration objective. Since there is zero uncertainty associated with path lengths $d_{s}$ , we design our exploration objective to maximize information gain for path traversability. We
|
| 92 |
+
|
| 93 |
+
denote $P_{s_{NT}}(m_t|m_t')$ as the probability the path $s$ is not traversable ( $NT$ ) estimated by $\mu_s$ . We recall that $\mu_s$ is computed by averaging traversability scores over an ensemble of models. We compute the variance of these scores $\mathrm{Var}_{i\in N}p_{i,s}$ to estimate uncertainty of our model approximating $P_{s_{NT}}(m_t|m_t')$ .
|
| 94 |
+
|
| 95 |
+
We combine exploration and exploitation in our full objective using an upper confidence bound policy [47], [46], [48], [24]. Our objective for efficient traversable paths is specified as
|
| 96 |
+
|
| 97 |
+
$$
|
| 98 |
+
\underset {s \in S} {\arg \min } P _ {s _ {N T}} \left(m _ {t} \mid m _ {t} ^ {\prime}\right) + d _ {s} \tag {4}
|
| 99 |
+
$$
|
| 100 |
+
|
| 101 |
+
and can be reconstructed as a maximization problem $\arg \max_{s\in S} - P_{s_{NT}}(m_t|m_t^{\prime}) - d_s$ . We denote $\sigma_{s}\coloneqq$ $\sqrt{\operatorname{Var}_{i\in N}p_{i,s}}$ and observe the upper bound
|
| 102 |
+
|
| 103 |
+
$$
|
| 104 |
+
- P _ {s _ {N T}} \left(m _ {t} \mid m _ {t} ^ {\prime}\right) - d _ {s} \leq - \mu_ {s} + \alpha_ {1} \sigma_ {s} - d _ {s} \tag {5}
|
| 105 |
+
$$
|
| 106 |
+
|
| 107 |
+
holds with some fixed but unknown probability where $\alpha_{1}$ is a constant hyperparameter. Using our upper bound to estimate $-P_{s_{NT}}(m_t|D_t)$ , our full objective function as a minimization problem is
|
| 108 |
+
|
| 109 |
+
$$
|
| 110 |
+
\underset {s} {\arg \min } \mu_ {s} - \alpha_ {1} \sigma_ {s} + \alpha_ {2} d _ {s} \tag {6}
|
| 111 |
+
$$
|
| 112 |
+
|
| 113 |
+
where $\alpha_{2}$ is a hyperparameter weighting the contribution of path length. Similarly to our exploration policy, in practice, equation 6 is evaluated over the accumulated global map $M_{t}$ . Figure 2 (bottom row) illustrates path selection using our objective during a point-goal episode.
|
| 114 |
+
|
| 115 |
+
# IV. EXPERIMENTS
|
| 116 |
+
|
| 117 |
+
Our experiments are conducted on the Matterport3D (MP3D) [17] dataset using the Habitat [6] simulator. We follow the standard train/val/test environments split of MP3D which contains overall 90 reconstructions of realistic indoor scenes. The splits are disjoint, therefore all evaluations are conducted in novel scenes where the occupancy map predictor model has not seen during training. Our observation space consists of $256 \times 256$ depth images, while the action space contains four actions: MOVE_FORWARD by $25cm$ , TURN_LEFT and TURN_RIGHT by $10^{\circ}$ and STOP.
|
| 118 |
+
|
| 119 |
+
We perform two key experiments. First, we compare to other state-of-the-art methods on the task of exploration using both coverage and map accuracy metrics (sec. IV-B). Second we evaluate on the point-goal navigation task and demonstrate increased performance when DD-PPO [10] is complemented with our planning strategy (sec. IV-C).
|
| 120 |
+
|
| 121 |
+
# A. Implementation Details
|
| 122 |
+
|
| 123 |
+
The Unet [49] model used for the occupancy map prediction has four encoder and four decoder convolutional blocks with skip connections and it is combined with a ResNet18 [53] for feature extraction. We use Pytorch [54] and train using the Adam optimizer with a learning rate of 0.0002. The grid dimensions are $h = w = 160$ for local, and $H = W = 768$ for global, while each cell in the grid is $5cm \times 5cm$ . For the path generation process, we run the RRT every 30 navigation steps for exploration and
|
| 124 |
+
|
| 125 |
+

|
| 126 |
+
|
| 127 |
+

|
| 128 |
+
|
| 129 |
+

|
| 130 |
+
|
| 131 |
+

|
| 132 |
+
|
| 133 |
+

|
| 134 |
+
|
| 135 |
+

|
| 136 |
+
Fig. 3: Exploration example with $T = 1000$ showing the trajectory followed by our agent (red line). The top row shows RGB images observed by the agent. The ground-truth map is visualized in the bottom right corner.
|
| 137 |
+
|
| 138 |
+

|
| 139 |
+
|
| 140 |
+

|
| 141 |
+
time
|
| 142 |
+
|
| 143 |
+

|
| 144 |
+
|
| 145 |
+

|
| 146 |
+
|
| 147 |
+

|
| 148 |
+
|
| 149 |
+
<table><tr><td></td><td colspan="2">Noisy</td><td colspan="2">Noise-free</td></tr><tr><td>Method</td><td>Map Acc (m2)</td><td>IoU (%)</td><td>Map Acc (m2)</td><td>IoU (%)</td></tr><tr><td>ANS(depth) [12]</td><td>72.5</td><td>26.0</td><td>85.9</td><td>34.0</td></tr><tr><td>OccAnt(depth) w/o AR [12]</td><td>92.7</td><td>29.0</td><td>104.7</td><td>38.0</td></tr><tr><td>OccAnt(depth) [12]</td><td>94.1</td><td>33.0</td><td>96.5</td><td>35.0</td></tr><tr><td>FBE [52] + DD-PPO [10]</td><td>100.9</td><td>28.7</td><td>120.2</td><td>44.7</td></tr><tr><td>UPEN + DD-PPO [10]</td><td>110.3</td><td>25.8</td><td>141.6</td><td>45.6</td></tr></table>
|
| 150 |
+
|
| 151 |
+
TABLE I: Exploration results on MP3D test scenes evaluating map quality at $\mathrm{T} = {500}$ . The "w/o AR" refers to the baseline that is trained without the anticipation reward in [12].
|
| 152 |
+
|
| 153 |
+
<table><tr><td></td><td>Cov (m2)</td><td>Cov (%)</td></tr><tr><td>ANS(rgb) [13]</td><td>73.28</td><td>52.1</td></tr><tr><td>FBE [52] + DD-PPO [10]</td><td>85.3</td><td>53.0</td></tr><tr><td>UPEN + DD-PPO [10]</td><td>113.0</td><td>67.9</td></tr></table>
|
| 154 |
+
|
| 155 |
+
TABLE II: Exploration results on MP3D test scenes evaluating area coverage at $\mathrm{T} = {1000}$ .
|
| 156 |
+
|
| 157 |
+
20 for point-goal. The RRT is set to generate a maximum of 10 paths every run, with a goal sampling rate of $20\%$ . Finally, the RRT expands new nodes with a distance of 5 pixels at a time. A single step in a navigation episode requires 0.37s on average that includes map prediction and registration, planning using RRT, and DD-PPO. The timing was performed on a laptop using i7 CPU @ 2.20GHz and a GTX1060 GPU. All experiments are with ensemble size of 4. We provide code and trained models: https://github.com/ggeorgak11/UPEN.
|
| 158 |
+
|
| 159 |
+
# B. Exploration
|
| 160 |
+
|
| 161 |
+
The setup from [12] is followed for this experiment, where the objective is to cover as much area as possible given a limited time budget $T = 1000$ . Unless stated otherwise, the evaluation is conducted with simulated noise following the noise models from [13], [12]. We use the following metrics: 1) Map Accuracy $(m^2)$ : as defined in [12] the area in the predicted occupancy map that matches the ground-truth map. 2) IoU (%): the intersection over union of the predicted map and the ground-truth. 3) Cov $(m^2)$ : the actual area covered by the agent. 4) Cov (%): ratio of covered area to max scene coverage. We note that the two coverage metrics are computed on a map containing only ground-projections of depth observations. Our method is validated against the competitive approaches of Occupancy Anticipation [12] (OccAnt) and Active Neural SLAM [13] (ANS), which are modular approaches with mapper components. Both use reinforcement learning to train goal selection policies optimized over map accuracy and coverage respectively. Furthermore,
|
| 162 |
+
|
| 163 |
+
we compare against the classical method of Frontier-based Exploration [52] (FBE). Since both UPEN and FBE are combined with DD-PPO and use the same predicted maps, this comparison directly validates our exploration objective.
|
| 164 |
+
|
| 165 |
+
We report two key results. First, in Table I our method outperforms all baselines in the noise-free case in both Map Accuracy and IoU. In fact, we show $21.4m^2$ and $36.9m^2$ improvement over FBE and OccAnt respectively on the Map Accuracy metric. In the noisy case even though we still surpass all baselines on Map Accuracy, our performance drops significantly in both metrics. In addition, the Map Accuracy increasing while IoU drops is attributed to increased map coverage with reduced accuracy. This is not surprising since unlike OccAnt and Neural SLAM we are not using a pose estimator. Second, in Table II we demonstrate superior performance on coverage metrics with a margin of $27.7m^2$ from FBE and $39.7m^2$ from ANS. This suggests that our method is more efficient when exploring a novel scene, thus validating our uncertainty-based exploration policy. Figure 3 shows an exploration episode.
|
| 166 |
+
|
| 167 |
+
# C. Point-goal Navigation
|
| 168 |
+
|
| 169 |
+
We evaluate the performance of our uncertainty-driven planner when used to augment DD-PPO [10] against its vanilla version. DD-PPO is currently one of the best performing methods on point-goal navigation, achieving $97\%$ SPL on the Gibson [7] validation set as shown in [10]. We follow the point-goal task setup from [1] where given a target coordinate the agent needs to navigate to that target and stop within a $0.2m$ radius. The agent is given a time-budget of
|
| 170 |
+
|
| 171 |
+
<table><tr><td>Dataset</td><td colspan="2">MP3D Val</td><td colspan="2">MP3D Test</td><td colspan="2">MP3D Val-Hard</td></tr><tr><td>Method</td><td>Success (%)</td><td>SPL (%)</td><td>Success (%)</td><td>SPL (%)</td><td>Success (%)</td><td>SPL (%)</td></tr><tr><td>DD-PPO [10]</td><td>47.8</td><td>38.7</td><td>37.3</td><td>30.2</td><td>38.0</td><td>28.1</td></tr><tr><td>UPEN-Occ + DD-PPO [10]</td><td>43.8</td><td>30.2</td><td>36.3</td><td>25.3</td><td>42.3</td><td>26.9</td></tr><tr><td>UPEN-Greedy + DD-PPO [10]</td><td>48.9</td><td>36.0</td><td>37.5</td><td>28.1</td><td>43.0</td><td>28.8</td></tr><tr><td>UPEN + DD-PPO [10]</td><td>49.8</td><td>36.9</td><td>40.8</td><td>30.7</td><td>45.7</td><td>31.6</td></tr></table>
|
| 172 |
+
|
| 173 |
+
TABLE III: Point-goal navigation results of our method against the vanilla DD-PPO[10]. "Occ" signifies a policy that uses only occupancy predictions, while "Greedy" refers to a policy taking into consideration path length without uncertainty.
|
| 174 |
+
|
| 175 |
+
<table><tr><td></td><td>Avg GD (m)</td><td>Avg GEDR</td><td>Min GEDR</td></tr><tr><td>Gibson Val</td><td>5.88</td><td>1.37</td><td>1.00</td></tr><tr><td>MP3D Val</td><td>11.14</td><td>1.40</td><td>1.00</td></tr><tr><td>MP3D Test</td><td>13.23</td><td>1.42</td><td>1.00</td></tr><tr><td>MP3D Val-Hard</td><td>8.28</td><td>3.19</td><td>2.50</td></tr></table>
|
| 176 |
+
|
| 177 |
+
TABLE IV: Geodesic distance (GD) and geodesic to Euclidean distance ratio (GEDR) between different evaluation sets for point-goal navigation.
|
| 178 |
+
|
| 179 |
+

|
| 180 |
+
Fig. 4: Point-goal navigation examples from the MP3D Val-Hard set where the vanilla DD-PPO [10] fails to reach the target while our method is successful.
|
| 181 |
+
|
| 182 |
+
$T = 500$ steps to reach the target. For evaluation we use the standard metrics [1]: Success: percentage of successful episodes, and SPL: success rate normalized by path length. For this experiment we assume noise-free poses are provided by the simulator. To combine DD-PPO with our planner, we set the current short-term goal estimated by our approach as the target that DD-PPO needs to reach. For the vanilla DD-PPO we use the final target location in each test episode.
|
| 183 |
+
|
| 184 |
+
DD-PPO essentially solves Gibson point-goal navigation task so we turn our attention to MP3D where DD-PPO has lower performance due to the episodes having larger average geodesic distance (GD) to goal. However, we noticed that the average geodesic to euclidean distance ratio (GEDR) in MP3D is still low (a GEDR of 1 means there is a straight line path between the starting position and the goal).
|
| 185 |
+
|
| 186 |
+
In order to demonstrate the effectiveness of our proposed method, we generated a new evaluation set (MP3D Val-Hard) with minimum GEDR=2.5. This created episodes which frequently involve sharp u-turns and multiple obstacles along the shortest path. Table IV illustrates episode statistics between different evaluation sets<sup>1</sup>. In addition to MP3D Val-Hard, we also test our method on the publicly available sets of MP3D Val and MP3D Test. We note that MP3D Val-Hard was generated using the same random procedure as its publicly available counterparts.
|
| 187 |
+
|
| 188 |
+
We define two variations of our method in order to demonstrate the usefulness of our uncertainty estimation by choosing different values for the $\alpha_{1}$ and $\alpha_{2}$ parameters of Eq. 6 from section III-C. First, $UPEN-Occ + DD-PPO$ ( $\alpha_{1} = 0$ , $\alpha_{2} = 0$ ) considers only the occupancy probabilities when estimating the traversability difficulty of a path, while $UPEN-Greedy + DD-PPO$ ( $\alpha_{1} = 0$ , $\alpha = 0.5$ ) considers the path length and not the uncertainty. Our default method $UPEN + DD-PPO$ uses $\alpha_{1} = 0.1$ and $\alpha_{2} = 0.5$ .
|
| 189 |
+
|
| 190 |
+
The results are illustrated in Table III. We outperform all baselines in all evaluation sets with regards to Success. The largest gap in performance is observed in the MP3D Val-Hard set which contains episodes with much higher average GEDR that the other sets. This suggests that our method is able to follow more complicated paths by choosing short-term goals, in contrast to the vanilla DD-PPO which has to negotiate narrow passages and sharp turns only from egocentric observations. Regarding SPL, our performance gains are not as pronounced as in Success, since our policy frequently prefers paths with lower traversability difficulty in favor of shortest paths, to ensure higher success probability.
|
| 191 |
+
|
| 192 |
+
# V. CONCLUSION
|
| 193 |
+
|
| 194 |
+
We introduced a novel uncertainty-driven planner for exploration and navigation tasks in previously unseen environments. The planner leverages an occupancy map predictor that hallucinates map regions outside the field of view of the agent and uses its predictions to formulate uncertainty based objectives. Our experiments on exploration suggests that our method is more efficient in covering unknown areas. In terms of point-goal navigation, we showed how DD-PPO [10] augmented with our method outperforms its vanilla version. This suggests that end-to-end navigation methods can benefit from employing an uncertainty-driven planner, especially in difficult episodes.
|
| 195 |
+
|
| 196 |
+
# REFERENCES
|
| 197 |
+
|
| 198 |
+
[1] P. Anderson, A. Chang, D. S. Chaplot, A. Dosovitskiy, S. Gupta, V. Koltun, J. Kosecka, J. Malik, R. Mottaghi, M. Savva, et al., "On evaluation of embodied navigation agents," arXiv preprint arXiv:1807.06757, 2018.
|
| 199 |
+
[2] D. Batra, A. Gokaslan, A. Kembhavi, O. Maksymets, R. Mottaghi, M. Savva, A. Toshev, and E. Wijmans, "Objectnav revisited: On evaluation of embodied agents navigating to objects," arXiv preprint arXiv:2006.13171, 2020.
|
| 200 |
+
[3] C. Cadena, L. Carlone, H. Carrillo, Y. Latif, D. Scaramuzza, J. Neira, I. Reid, and J. J. Leonard, "Past, present, and future of simultaneous localization and mapping: Toward the robust-perception age," IEEE Transactions on robotics, vol. 32, no. 6, pp. 1309-1332, 2016.
|
| 201 |
+
[4] N. A. Melchior and R. Simmons, "Particle rt for path planning with uncertainty," in Proceedings 2007 IEEE International Conference on Robotics and Automation. IEEE, 2007, pp. 1617-1624.
|
| 202 |
+
[5] K. Ok, S. Ansari, B. Gallagher, W. Sica, F. Dellaert, and M. Stilman, "Path planning with uncertainty: Voronoi uncertainty fields," in 2013 IEEE International Conference on Robotics and Automation. IEEE, 2013, pp. 4596-4601.
|
| 203 |
+
[6] M. Savva, A. Kadian, O. Maksymets, Y. Zhao, E. Wijmans, B. Jain, J. Straub, J. Liu, V. Koltun, J. Malik, et al., "Habitat: A platform for embodied ai research," in Proceedings of the IEEE International Conference on Computer Vision, 2019, pp. 9339-9347.
|
| 204 |
+
[7] F. Xia, A. R. Zamir, Z. He, A. Sax, J. Malik, and S. Savarese, "Gibson env: Real-world perception for embodied agents," in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2018, pp. 9068-9079.
|
| 205 |
+
[8] Y. Zhu, R. Mottaghi, E. Kolve, J. J. Lim, A. Gupta, L. Fei-Fei, and A. Farhadi, "Target-driven visual navigation in indoor scenes using deep reinforcement learning," in 2017 IEEE international conference on robotics and automation (ICRA). IEEE, 2017, pp. 3357-3364.
|
| 206 |
+
[9] T. Chen, S. Gupta, and A. Gupta, “Learning exploration policies for navigation,” 7th International Conference on Learning Representations, ICLR 2019, 2019.
|
| 207 |
+
[10] E. Wijmans, A. Kadian, A. Morcos, S. Lee, I. Essa, D. Parikh, M. Savva, and D. Batra, “Dd-ppy: Learning near-perfect pointgoal navigators from 2.5 billion frames,” arXiv, pp. arXiv-1911, 2019.
|
| 208 |
+
[11] S. Gupta, J. Davidson, S. Levine, R. Sukthankar, and J. Malik, "Cognitive mapping and planning for visual navigation," in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2017, pp. 2616-2625.
|
| 209 |
+
[12] S. K. Ramakrishnan, Z. Al-Halah, and K. Grauman, "Occupancy anticipation for efficient exploration and navigation," European Conference on Computer Vision, pp. 400-418, 2020.
|
| 210 |
+
[13] D. S. Chaplot, D. Gandhi, S. Gupta, A. Gupta, and R. Salakhutdinov, "Learning to explore using active neural slam," International Conference on Learning Representations, 2020.
|
| 211 |
+
[14] S. M. LaValle et al., "Rapidly-exploring random trees: A new tool for path planning," 1998.
|
| 212 |
+
[15] H. S. Seung, M. Opper, and H. Sompolinsky, “Query by committee,” in Proceedings of the fifth annual workshop on Computational learning theory, 1992, pp. 287–294.
|
| 213 |
+
[16] D. Pathak, D. Gandhi, and A. Gupta, "Self-Supervised Exploration via Disagreement," ICML, 2019.
|
| 214 |
+
[17] A. Chang, A. Dai, T. Funkhouser, M. Halber, M. Niessner, M. Savva, S. Song, A. Zeng, and Y. Zhang, "Matterport3d: Learning from rgb-d data in indoor environments," 2017 International Conference on 3D Vision (3DV), IEEE, 2017.
|
| 215 |
+
[18] J. Fuentes-Pacheco, J. Ruiz-Ascencio, and J. M. Rendon-Mancha, “Visual simultaneous localization and mapping: a survey,” Artificial intelligence review, vol. 43, no. 1, pp. 55–81, 2015.
|
| 216 |
+
[19] M. Savva, A. X. Chang, A. Dosovitskiy, T. Funkhouser, and V. Koltun, “Minos: Multimodal indoor simulator for navigation in complex environments,” arXiv preprint arXiv:1712.03931, 2017.
|
| 217 |
+
[20] X. Zhao, H. Agrawal, D. Batra, and A. Schwing, “The surprising effectiveness of visual odometry techniques for embodied pointgoal navigation,” arXiv preprint arXiv:2108.11550, 2021.
|
| 218 |
+
[21] P. Karkus, S. Cai, and D. Hsu, "Differentiable slam-net: Learning particle slam for visual navigation," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021, pp. 2815-2825.
|
| 219 |
+
[22] D. Mishkin, A. Dosovitskiy, and V. Koltun, "Benchmarking classic and learned navigation in complex 3d environments," arXiv preprint arXiv:1901.10915, 2019.
|
| 220 |
+
|
| 221 |
+
[23] D. S. Chaplot, D. Gandhi, A. Gupta, and R. Salakhutdinov, “Object goal navigation using goal-oriented semantic exploration,” Advances in Neural Information Processing Systems 33, 2020.
|
| 222 |
+
[24] G. Georgakis, B. Bucher, K. Schmeckpeper, S. Singh, and K. Dani-ilidis, "Learning to map for active semantic goal navigation," arXiv preprint arXiv:2106.15648, 2021.
|
| 223 |
+
[25] G. Georgakis, Y. Li, and J. Kosecka, “Simultaneous mapping and target driven navigation,” arXiv preprint arXiv:1911.07980, 2019.
|
| 224 |
+
[26] A. Mousavian, A. Toshev, M. Fiser, J. Košecka, A. Wahid, and J. Davidson, "Visual representations for semantic target driven navigation," in 2019 International Conference on Robotics and Automation (ICRA). IEEE, 2019, pp. 8846-8852.
|
| 225 |
+
[27] Y. Liang, B. Chen, and S. Song, "SSCNav: Confidence-aware semantic scene completion for visual semantic navigation," International Conference on Robotics and Automation (ICRA), 2021.
|
| 226 |
+
[28] D. S. Chaplot, R. Salakhutdinov, A. Gupta, and S. Gupta, “Neural topological slam for visual navigation,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp. 12875–12884.
|
| 227 |
+
[29] O. Kwon, N. Kim, Y. Choi, H. Yoo, J. Park, and S. Oh, "Visual graph memory with unsupervised representation for visual navigation."
|
| 228 |
+
[30] H. J. S. Feder, J. J. Leonard, and C. M. Smith, "Adaptive mobile robot navigation and mapping," The International Journal of Robotics Research, vol. 18, no. 7, pp. 650-668, 1999.
|
| 229 |
+
[31] T. Kollar and N. Roy, “Trajectory optimization using reinforcement learning for map exploration,” The International Journal of Robotics Research, vol. 27, no. 2, pp. 175–196, 2008.
|
| 230 |
+
[32] L. Carlone, J. Du, M. K. Ng, B. Bona, and M. Indri, "Active slam and exploration with particle filters using kullback-leibler divergence," Journal of Intelligent & Robotic Systems, vol. 75, no. 2, pp. 291-311, 2014.
|
| 231 |
+
[33] H. Carrillo, I. Reid, and J. A. Castellanos, "On the comparison of uncertainty criteria for active slam," in 2012 IEEE International Conference on Robotics and Automation. IEEE, 2012, pp. 2080-2087.
|
| 232 |
+
[34] J.-L. Blanco, J.-A. Fernandez-Madrigal, and J. González, “A novel measure of uncertainty for mobile robot slam with rao—blackwellized particle filters,” The International Journal of Robotics Research, vol. 27, no. 1, pp. 73-89, 2008.
|
| 233 |
+
[35] C. Stachniss, G. Grisetti, and W. Burgard, "Information gain-based exploration using rao-blackwellized particle filters," in Robotics: Science and systems, vol. 2, 2005, pp. 65-72.
|
| 234 |
+
[36] K. Fang, A. Toshev, L. Fei-Fei, and S. Savarese, "Scene memory transformer for embodied agents in long-horizon tasks," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2019, pp. 538-547.
|
| 235 |
+
[37] J. Zhang, L. Tai, M. Liu, J. Boedecker, and W. Burgard, “Neural slam: Learning to explore with external memory,” arXiv preprint arXiv:1706.09520, 2017.
|
| 236 |
+
[38] M. Narasimhan, E. Wijmans, X. Chen, T. Darrell, D. Batra, D. Parikh, and A. Singh, "Seeing the un-scene: Learning amodal semantic maps for room navigation," European Conference on Computer Vision. Springer, Cham, 2020.
|
| 237 |
+
[39] Y. Katsumata, A. Taniguchi, L. El Hafi, Y. Hagiwara, and T. Taniguchi, "Spcomapgan: Spatial concept formation-based semantic mapping with generative adversarial networks," in 2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS). IEEE, 2020, pp. 7927-7934.
|
| 238 |
+
[40] E. Beeching, J. Dibangoye, O. Simonin, and C. Wolf, “Learning to plan with uncertain topological maps,” in Computer Vision-ECCV 2020: 16th European Conference, Glasgow, UK, August 23–28, 2020, Proceedings, Part III 16. Springer, 2020, pp. 473–490.
|
| 239 |
+
[41] K. Katyal, K. Popek, C. Paxton, P. Burlina, and G. D. Hager, "Uncertainty-aware occupancy map prediction using generative networks for robot navigation," in 2019 International Conference on Robotics and Automation (ICRA), 2019, pp. 5453-5459.
|
| 240 |
+
[42] D. D. Fan, K. Otsu, Y. Kubo, A. Dixit, J. Burdick, and A.-A. Agha-Mohammadi, "Step: Stochastic traversability evaluation and planning for risk-aware off-road navigation," in Robotics: Science and Systems. RSS Foundation, 2021, pp. 1-21.
|
| 241 |
+
[43] É. Pairet, J. D. Hernández, M. Carreras, Y. Petillot, and M. Lahijanian, "Online mapping and motion planning under uncertainty for safe navigation in unknown environments," IEEE Transactions on Automation Science and Engineering, 2021.
|
| 242 |
+
|
| 243 |
+
[44] Y. Gal, “Uncertainty in deep learning,” Ph.D. dissertation, University of Cambridge, 2016.
|
| 244 |
+
[45] A. Kendall and Y. Gal, "What uncertainties do we need in bayesian deep learning for computer vision?" in Advances in neural information processing systems, 2017, pp. 5574-5584.
|
| 245 |
+
[46] M. G. Azar, I. Osband, and R. Munos, “Minimax regret bounds for reinforcement learning,” in International Conference on Machine Learning. PMLR, 2017, pp. 263–272.
|
| 246 |
+
[47] P. Auer, N. Cesa-Bianchi, and P. Fischer, "Finite-time analysis of the multiarmed bandit problem," Machine learning, vol. 47, no. 2, pp. 235-256, 2002.
|
| 247 |
+
[48] R. Y. Chen, S. Sidor, P. Abbeel, and J. Schulman, "UCB exploration via q-ensembles," arXiv preprint arXiv:1706.01502, 2017.
|
| 248 |
+
[49] O. Ronneberger, P. Fischer, and T. Brox, "U-net: Convolutional networks for biomedical image segmentation," in International Conference on Medical image computing and computer-assisted intervention. Springer, 2015, pp. 234-241.
|
| 249 |
+
[50] B. Lakshminarayanan, A. Pritzel, and C. Blundell, "Simple and scalable predictive uncertainty estimation using deep ensembles," Advances in Neural Information Processing Systems 30, 2017.
|
| 250 |
+
[51] Y. Gal, R. Islam, and Z. Ghahramani, “Deep bayesian active learning with image data,” in International Conference on Machine Learning. PMLR, 2017, pp. 1183–1192.
|
| 251 |
+
[52] B. Yamauchi, “A frontier-based approach for autonomous exploration,” in Proceedings 1997 IEEE International Symposium on Computational Intelligence in Robotics and Automation CIRA '97. Towards New Computational Principles for Robotics and Automation'. IEEE, 1997, pp. 146-151.
|
| 252 |
+
[53] K. He, X. Zhang, S. Ren, and J. Sun, "Deep residual learning for image recognition," in Proceedings of the IEEE conference on computer vision and pattern recognition, 2016, pp. 770-778.
|
| 253 |
+
[54] A. Paszke, S. Gross, S. Chintala, G. Chanan, E. Yang, Z. DeVito, Z. Lin, A. Desmaison, L. Antiga, and A. Lerer, "Automatic differentiation in pytorch," 2017.
|
2202.11xxx/2202.11907/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c44d11a983966b3133b64a57897e1b15aa2fb850b2e8f558a331f2575d3eef80
|
| 3 |
+
size 366792
|
2202.11xxx/2202.11907/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11911/9037d395-d951-4662-9f75-505b7890fe99_content_list.json
ADDED
|
@@ -0,0 +1,1571 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "When Transformer Meets Robotic Grasping: Exploits Context for Efficient Grasp Detection",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
122,
|
| 8 |
+
70,
|
| 9 |
+
872,
|
| 10 |
+
140
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Shaochen Wang, Zhangli Zhou, and Zhen Kan, Senior Member, IEEE",
|
| 17 |
+
"bbox": [
|
| 18 |
+
233,
|
| 19 |
+
147,
|
| 20 |
+
754,
|
| 21 |
+
164
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Abstract—In this paper, we present a transformer-based architecture, namely TF-Grasp, for robotic grasp detection. The developed TF-Grasp framework has two elaborate designs making it well suitable for visual grasping tasks. The first key design is that we adopt the local window attention to capture local contextual information and detailed features of graspable objects. Then, we apply the cross window attention to model the long-term dependencies between distant pixels. Object knowledge, environmental configuration, and relationships between different visual entities are aggregated for subsequent grasp detection. The second key design is that we build a hierarchical encoder-decoder architecture with skip-connections, delivering shallow features from the encoder to decoder to enable a multi-scale feature fusion. Due to the powerful attention mechanism, TF-Grasp can simultaneously obtain the local information (i.e., the contours of objects), and model long-term connections such as the relationships between distinct visual concepts in clutter. Extensive computational experiments demonstrate that TF-Grasp achieves competitive results versus state-of-art grasping convolutional models and attains a higher accuracy of $97.99\\%$ and $94.6\\%$ on Cornell and Jacquard grasping datasets, respectively. Real-world experiments using a 7DoF Franka Emika Panda robot also demonstrate its capability of grasping unseen objects in a variety of scenarios. The code is available at https://github.com/WangShaoSUN/grasp-transformer.",
|
| 28 |
+
"bbox": [
|
| 29 |
+
73,
|
| 30 |
+
220,
|
| 31 |
+
491,
|
| 32 |
+
536
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Index Terms—Vision Transformer, Grasp Detection, Robotic Grasping.",
|
| 39 |
+
"bbox": [
|
| 40 |
+
73,
|
| 41 |
+
542,
|
| 42 |
+
488,
|
| 43 |
+
570
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "I. INTRODUCTION",
|
| 50 |
+
"text_level": 1,
|
| 51 |
+
"bbox": [
|
| 52 |
+
204,
|
| 53 |
+
590,
|
| 54 |
+
361,
|
| 55 |
+
606
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "DATA-driven methodologies such as deep learning have become the mainstream methods for robotic visual sensing tasks such as indoor localization [1], trajectory prediction [2], and robotic manipulation [3], [4], since they require less handcrafted feature engineering and can be extended to many complex tasks. In recent years, as visual sensing is increasingly being used in manufacturing, industry, and medical care, growing research is devoted to developing advanced robot's perception abilities. A typical application of visual sensing is the robotic grasp detection, where the images of objects are used to infer the grasping pose. Considering a grasping task of manipulating a wide diversity of objects, to find",
|
| 62 |
+
"bbox": [
|
| 63 |
+
73,
|
| 64 |
+
612,
|
| 65 |
+
491,
|
| 66 |
+
794
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "Manuscript received February 23, 2022; revised April 25, 2022; accepted June 20, 2022. This letter was recommended for publication by Markus Vincze upon evaluation of the Associate Editor and Reviewers' comments. This work was supported in part by the National Natural Science Foundation of China under Grant U2013601, and Grant 62173314. (Corresponding author: Zhen Kan.)",
|
| 73 |
+
"bbox": [
|
| 74 |
+
73,
|
| 75 |
+
806,
|
| 76 |
+
491,
|
| 77 |
+
875
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "text",
|
| 83 |
+
"text": "Shaochen Wang, Zhangli Zhou, and Zhen Kan are with the Department of Automation, University of Science and Technology of China, Hefei 230026, China, (e-mail: samwang@mail.ustc.edu.cn; zzl1215@mail.ustc.edu.cn; zkan@ustc.edu.cn.) An extended version is available at https://arxiv.org/abs/2202.11911.",
|
| 84 |
+
"bbox": [
|
| 85 |
+
73,
|
| 86 |
+
875,
|
| 87 |
+
491,
|
| 88 |
+
931
|
| 89 |
+
],
|
| 90 |
+
"page_idx": 0
|
| 91 |
+
},
|
| 92 |
+
{
|
| 93 |
+
"type": "text",
|
| 94 |
+
"text": "Digital Object Identifier (DOI): see top of this page.",
|
| 95 |
+
"bbox": [
|
| 96 |
+
88,
|
| 97 |
+
931,
|
| 98 |
+
375,
|
| 99 |
+
944
|
| 100 |
+
],
|
| 101 |
+
"page_idx": 0
|
| 102 |
+
},
|
| 103 |
+
{
|
| 104 |
+
"type": "text",
|
| 105 |
+
"text": "the graspable regions, the robots have to concentrate on not only partial geometric information but also the entire visual appearance of the object. Particularly in unstructured and cluttered environments, dealing with variations in shape and position (e.g., occlusion) and also the spatial relationship with other objects are critical to the performance of grasp detection. Therefore, this work is particularly motivated to investigate grasp detection that takes into account both local neighbor pixels and long-distance relationships in spatial dimensions.",
|
| 106 |
+
"bbox": [
|
| 107 |
+
501,
|
| 108 |
+
219,
|
| 109 |
+
921,
|
| 110 |
+
354
|
| 111 |
+
],
|
| 112 |
+
"page_idx": 0
|
| 113 |
+
},
|
| 114 |
+
{
|
| 115 |
+
"type": "text",
|
| 116 |
+
"text": "Most modern grasp detectors [3], [5] are based on convolutional neural networks (CNNs) which emerge as the de facto standard for processing visual robotic grasping. However, current CNNs are composed of individual convolution kernels, which are more inclined to concentrate on local level information. Also, the convolution kernels in a layer of CNN are viewed as independent counterparts without mutual information fusion. Generally, to maintain a large receptive field, CNNs have to repeatedly stack convolutional layers, which reduce the spatial resolution and inevitably results in the loss of global details and degraded performance.",
|
| 117 |
+
"bbox": [
|
| 118 |
+
501,
|
| 119 |
+
356,
|
| 120 |
+
921,
|
| 121 |
+
521
|
| 122 |
+
],
|
| 123 |
+
"page_idx": 0
|
| 124 |
+
},
|
| 125 |
+
{
|
| 126 |
+
"type": "text",
|
| 127 |
+
"text": "Recently, as a novel approach to handle natural language processing and computer vision, the transformer [6], [7], [8] demonstrates remarkable success. The widely adopted attention mechanisms [6] of transformers in sequence modeling provide an elegant resolution that can better convey the fusion of information across global sequences. In fact, as robots are deployed in more and more diverse applications such as industrial assembly lines and smart home, the sensing capacity of robotic systems needs to be enriched, not only in local regions, but also in global interaction. Especially when robots frequently interact with objects in the environment, the awareness of global attention is particularly important with respect to safety and reliability. However, most vision transformers are designed for image classification on natural images processing tasks. Few of them are specifically built for robotic tasks.",
|
| 128 |
+
"bbox": [
|
| 129 |
+
501,
|
| 130 |
+
522,
|
| 131 |
+
921,
|
| 132 |
+
762
|
| 133 |
+
],
|
| 134 |
+
"page_idx": 0
|
| 135 |
+
},
|
| 136 |
+
{
|
| 137 |
+
"type": "text",
|
| 138 |
+
"text": "In this paper, we present a transformer-based visual grasp detection framework, namely TF-Grasp, which leverages the fact that the attention can better aggregate information across the entire input sequences to obtain an improved global representation. More specifically, the information within independent image patches is bridged via self-attention and the encoder in our framework captures these multi-scale low-level features. The decoder incorporates the high-level features through long-range spatial dependencies to construct the final grasping pose. We provide detailed empirical evidence to show that our grasping transformer performs reasonably well on popular grasping testbeds, e.g., Cornell and Jacquard grasping",
|
| 139 |
+
"bbox": [
|
| 140 |
+
501,
|
| 141 |
+
763,
|
| 142 |
+
921,
|
| 143 |
+
946
|
| 144 |
+
],
|
| 145 |
+
"page_idx": 0
|
| 146 |
+
},
|
| 147 |
+
{
|
| 148 |
+
"type": "header",
|
| 149 |
+
"text": "IEEE ROBOTICS AND AUTOMATION LETTERS. PREPRINT VERSION. JUNE 2022",
|
| 150 |
+
"bbox": [
|
| 151 |
+
76,
|
| 152 |
+
29,
|
| 153 |
+
501,
|
| 154 |
+
40
|
| 155 |
+
],
|
| 156 |
+
"page_idx": 0
|
| 157 |
+
},
|
| 158 |
+
{
|
| 159 |
+
"type": "page_number",
|
| 160 |
+
"text": "1",
|
| 161 |
+
"bbox": [
|
| 162 |
+
911,
|
| 163 |
+
30,
|
| 164 |
+
919,
|
| 165 |
+
40
|
| 166 |
+
],
|
| 167 |
+
"page_idx": 0
|
| 168 |
+
},
|
| 169 |
+
{
|
| 170 |
+
"type": "aside_text",
|
| 171 |
+
"text": "arXiv:2202.11911v3 [cs.RO] 13 Sep 2022",
|
| 172 |
+
"bbox": [
|
| 173 |
+
22,
|
| 174 |
+
263,
|
| 175 |
+
60,
|
| 176 |
+
705
|
| 177 |
+
],
|
| 178 |
+
"page_idx": 0
|
| 179 |
+
},
|
| 180 |
+
{
|
| 181 |
+
"type": "text",
|
| 182 |
+
"text": "datasets. The experimental results demonstrate that the transformer architecture plays an integral role in generating appropriate grasping poses by learning local and global features from different parts of each object. The vision transformer-based grasp detection works well on the real robotic system and shows promising generalization to unseen objects. In addition, our TF-Grasp can generate the required grasping poses for parallel grippers in a single forward pass of the network.",
|
| 183 |
+
"bbox": [
|
| 184 |
+
78,
|
| 185 |
+
69,
|
| 186 |
+
488,
|
| 187 |
+
203
|
| 188 |
+
],
|
| 189 |
+
"page_idx": 1
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"type": "text",
|
| 193 |
+
"text": "In a nutshell, the contributions of this paper can be summarised in three folds:",
|
| 194 |
+
"bbox": [
|
| 195 |
+
78,
|
| 196 |
+
205,
|
| 197 |
+
488,
|
| 198 |
+
233
|
| 199 |
+
],
|
| 200 |
+
"page_idx": 1
|
| 201 |
+
},
|
| 202 |
+
{
|
| 203 |
+
"type": "list",
|
| 204 |
+
"sub_type": "text",
|
| 205 |
+
"list_items": [
|
| 206 |
+
"- This work presents a novel and neat transformer architecture for visual robotic grasping tasks. To the best of our knowledge, it is one of the first attempts considering vision transformers in grasp detection tasks.",
|
| 207 |
+
"- We consider simultaneous fusion of local and global features and redesign the classical ViT framework for robotic visual sensing tasks.",
|
| 208 |
+
"- Exhaustive experiments are conducted to show the advantages of the transformer-based robotic perception framework. The experimental results demonstrate that our model achieves improved performance on popular grasping datasets compared to the state-of-the-art methods. We further show that our grasping transformer can generate appropriate grasping poses for known or unknown objects in either single or cluttered environments."
|
| 209 |
+
],
|
| 210 |
+
"bbox": [
|
| 211 |
+
94,
|
| 212 |
+
237,
|
| 213 |
+
488,
|
| 214 |
+
462
|
| 215 |
+
],
|
| 216 |
+
"page_idx": 1
|
| 217 |
+
},
|
| 218 |
+
{
|
| 219 |
+
"type": "text",
|
| 220 |
+
"text": "II. RELATED WORK",
|
| 221 |
+
"text_level": 1,
|
| 222 |
+
"bbox": [
|
| 223 |
+
215,
|
| 224 |
+
481,
|
| 225 |
+
352,
|
| 226 |
+
493
|
| 227 |
+
],
|
| 228 |
+
"page_idx": 1
|
| 229 |
+
},
|
| 230 |
+
{
|
| 231 |
+
"type": "text",
|
| 232 |
+
"text": "This section reviews recent advances in the field of robotic grasping and briefly describes the progress of transformers in different areas.",
|
| 233 |
+
"bbox": [
|
| 234 |
+
78,
|
| 235 |
+
500,
|
| 236 |
+
488,
|
| 237 |
+
542
|
| 238 |
+
],
|
| 239 |
+
"page_idx": 1
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"type": "text",
|
| 243 |
+
"text": "A. Grasp Detection",
|
| 244 |
+
"text_level": 1,
|
| 245 |
+
"bbox": [
|
| 246 |
+
78,
|
| 247 |
+
564,
|
| 248 |
+
210,
|
| 249 |
+
577
|
| 250 |
+
],
|
| 251 |
+
"page_idx": 1
|
| 252 |
+
},
|
| 253 |
+
{
|
| 254 |
+
"type": "text",
|
| 255 |
+
"text": "The ability to locate the object position and determine the appropriate grasping pose is crucial to stable and robust robotic grasping. Grasp detection, as the name implies, uses the image captured from the camera to infer the grasping pose for the robot manipulator. Using geometry-driven methods, earlier works [9], [10] mainly focus on analyzing the contours of objects to identify grasping points. A common assumption in these methods is that the geometric model of the object is always available. However, preparing the CAD models for graspable objects is time-consuming and impractical for real-time implementation. Recently, deep learning based methods have been successfully applied in visual grasping tasks [3], [5], [11], [12], [13]. The work of [14] is one of the earliest works that introduces deep neural networks to grasp detection via a two-stage strategy where the first stage finds exhaustive possible grasping candidates and the second stage evaluates the quality of these grasp candidates to identify the best one. However, due to numerous grasping proposals, the method in [14] suffers from relatively slow speed. Many recent works utilize convolutional neural networks to generate bounding box proposals to estimate the grasp pose of objects. Redmon et al. [5] employed an Alexnet-like CNN architecture to regress grasping poses. Kumra et al. [3] explored the use of ResNet-50 as a backbone to incorporate multimodal",
|
| 256 |
+
"bbox": [
|
| 257 |
+
78,
|
| 258 |
+
583,
|
| 259 |
+
488,
|
| 260 |
+
943
|
| 261 |
+
],
|
| 262 |
+
"page_idx": 1
|
| 263 |
+
},
|
| 264 |
+
{
|
| 265 |
+
"type": "text",
|
| 266 |
+
"text": "including depth and RGB information to further improve the grasp performance. Besides, CNN-based grasp quality networks [15], [16] were proposed to evaluate and predict the robustness of grasp candidates. In the same line, GG-CNN [17] developed a fully convolutional neural network to perform grasp detection, which provides a lightweight and real-time solution for visual grasping. Currently, most of the existing grasp detection methods are still heavily inspired by computer vision techniques such as object recognition, object detection, etc. In contrast to classical visual problems where the detected objects are usually well-defined instances in the scene, in grasp detection, the grasp configuration to be generated is continuous, which implies an infinite number of possible grasp options. This places significant challenges in feature extraction to identify a valid grasp configuration from all possible candidates. We argue that the loss of long-term dependencies in feature extraction is a major drawback of current CNNs based grasp detection methods.",
|
| 267 |
+
"bbox": [
|
| 268 |
+
506,
|
| 269 |
+
69,
|
| 270 |
+
919,
|
| 271 |
+
340
|
| 272 |
+
],
|
| 273 |
+
"page_idx": 1
|
| 274 |
+
},
|
| 275 |
+
{
|
| 276 |
+
"type": "text",
|
| 277 |
+
"text": "B. Transformer",
|
| 278 |
+
"text_level": 1,
|
| 279 |
+
"bbox": [
|
| 280 |
+
508,
|
| 281 |
+
383,
|
| 282 |
+
612,
|
| 283 |
+
397
|
| 284 |
+
],
|
| 285 |
+
"page_idx": 1
|
| 286 |
+
},
|
| 287 |
+
{
|
| 288 |
+
"type": "text",
|
| 289 |
+
"text": "Transformer [6] first emerged in machine translation and is rapidly establishing itself as a new paradigm in natural language processing due to its potential to model global information, which learns the high quality features by considering the whole context. Thanks to its excellent global representation and friendly parallel computation, the transformer is competitive in long sequences modeling and gradually replaces RNNs and CNNs.",
|
| 290 |
+
"bbox": [
|
| 291 |
+
506,
|
| 292 |
+
411,
|
| 293 |
+
919,
|
| 294 |
+
530
|
| 295 |
+
],
|
| 296 |
+
"page_idx": 1
|
| 297 |
+
},
|
| 298 |
+
{
|
| 299 |
+
"type": "text",
|
| 300 |
+
"text": "Motivated by the remarkable success of transformers achieved in natural language processing, more and more researchers are interested in the employment of attention mechanisms in visual tasks. At present, the transformer has been successfully applied to image classification, object detection, and segmentation tasks. However, there still exist many challenges. First, visual signals and word tokens are very different on many scales. Second, the high dimension of pixel-level information may introduce significant computational complexity.",
|
| 301 |
+
"bbox": [
|
| 302 |
+
506,
|
| 303 |
+
535,
|
| 304 |
+
919,
|
| 305 |
+
685
|
| 306 |
+
],
|
| 307 |
+
"page_idx": 1
|
| 308 |
+
},
|
| 309 |
+
{
|
| 310 |
+
"type": "text",
|
| 311 |
+
"text": "More recently, ViT [7] was presented as a transformer model to tackle natural images recognition, which splits the image into non-overlapping patches. The authors in [8] proposed a hierarchical ViT called Swin-Transformer by calculating the local self-attention with shifted windows. In contrast to the quadratic computation complexity of self-attention in ViT, Swin-Transformer achieves a linear complexity. Inspired by this fashion, many researchers have tried to apply transformer to other fields. For example, TransUNet [18] combines transformer and Unet [19] for medical image diagnosis. Nevertheless, how to exploit the strengths of attention to aggregate information from entire inputs has not been investigated in the task of visual grasp detection. Unlike prior works, we design a transformer based encoder-decoder architecture to predict the grasp posture in an end-to-end manner. It is shown that our method achieves higher grasp success than the state-of-the-art CNNs counterparts.",
|
| 312 |
+
"bbox": [
|
| 313 |
+
506,
|
| 314 |
+
688,
|
| 315 |
+
919,
|
| 316 |
+
943
|
| 317 |
+
],
|
| 318 |
+
"page_idx": 1
|
| 319 |
+
},
|
| 320 |
+
{
|
| 321 |
+
"type": "page_number",
|
| 322 |
+
"text": "2",
|
| 323 |
+
"bbox": [
|
| 324 |
+
76,
|
| 325 |
+
31,
|
| 326 |
+
84,
|
| 327 |
+
39
|
| 328 |
+
],
|
| 329 |
+
"page_idx": 1
|
| 330 |
+
},
|
| 331 |
+
{
|
| 332 |
+
"type": "header",
|
| 333 |
+
"text": "IEEE ROBOTICS AND AUTOMATION LETTERS. PREPRINT VERSION. JUNE 2022",
|
| 334 |
+
"bbox": [
|
| 335 |
+
496,
|
| 336 |
+
30,
|
| 337 |
+
919,
|
| 338 |
+
40
|
| 339 |
+
],
|
| 340 |
+
"page_idx": 1
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"type": "image",
|
| 344 |
+
"img_path": "images/228c85b3826ed6b1e7ebc41b9b50406218033be10de1d47ee93cb91541d84974.jpg",
|
| 345 |
+
"image_caption": [
|
| 346 |
+
"Fig. 1. Overview of the TF-grasp model. Our model takes as input the image captured by the camera mounted on the end-effector of the manipulator and generates a pixel-level grasp representation."
|
| 347 |
+
],
|
| 348 |
+
"image_footnote": [],
|
| 349 |
+
"bbox": [
|
| 350 |
+
117,
|
| 351 |
+
71,
|
| 352 |
+
452,
|
| 353 |
+
407
|
| 354 |
+
],
|
| 355 |
+
"page_idx": 2
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"type": "text",
|
| 359 |
+
"text": "III. METHOD",
|
| 360 |
+
"text_level": 1,
|
| 361 |
+
"bbox": [
|
| 362 |
+
233,
|
| 363 |
+
486,
|
| 364 |
+
333,
|
| 365 |
+
500
|
| 366 |
+
],
|
| 367 |
+
"page_idx": 2
|
| 368 |
+
},
|
| 369 |
+
{
|
| 370 |
+
"type": "text",
|
| 371 |
+
"text": "Grasp Representation. The autonomous visual grasping tasks generally start from collecting visual images of the object by sensory input, which will then be processed to generate an effective grasp configuration to maximise the probability of grasp success. Considering a parallel-plate gripper, the grasp representation $g$ [20] is formulated as a 5-dimensional tuple:",
|
| 372 |
+
"bbox": [
|
| 373 |
+
73,
|
| 374 |
+
503,
|
| 375 |
+
490,
|
| 376 |
+
595
|
| 377 |
+
],
|
| 378 |
+
"page_idx": 2
|
| 379 |
+
},
|
| 380 |
+
{
|
| 381 |
+
"type": "equation",
|
| 382 |
+
"text": "\n$$\ng = \\{x, y, \\theta , w, h \\} \\tag {1}\n$$\n",
|
| 383 |
+
"text_format": "latex",
|
| 384 |
+
"bbox": [
|
| 385 |
+
218,
|
| 386 |
+
603,
|
| 387 |
+
488,
|
| 388 |
+
619
|
| 389 |
+
],
|
| 390 |
+
"page_idx": 2
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"type": "text",
|
| 394 |
+
"text": "where $(x,y)$ are the center coordinates of the grasp rectangle, $(w,h)$ denote the width and height of the grasp rectangle, and $\\theta$ is the orientation of the grasp rectangle with respect to the horizontal axis. Given a gripper with known dimensions, a simplified representation can be expressed as $g = (p,\\phi ,w)$ where $p = (x,y)$ , $\\phi$ indicates the orientation angle of gripper and $w$ denotes the opening distance of gripper, respectively.",
|
| 395 |
+
"bbox": [
|
| 396 |
+
73,
|
| 397 |
+
626,
|
| 398 |
+
488,
|
| 399 |
+
731
|
| 400 |
+
],
|
| 401 |
+
"page_idx": 2
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"type": "text",
|
| 405 |
+
"text": "To facilitate grasping, we follow the setting in [17] to represent the grasp in 2-D image space as",
|
| 406 |
+
"bbox": [
|
| 407 |
+
73,
|
| 408 |
+
732,
|
| 409 |
+
488,
|
| 410 |
+
762
|
| 411 |
+
],
|
| 412 |
+
"page_idx": 2
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"type": "equation",
|
| 416 |
+
"text": "\n$$\nG = \\{Q, W, \\Theta \\} \\in \\mathbb {R} ^ {3 \\times W \\times H}, \\tag {2}\n$$\n",
|
| 417 |
+
"text_format": "latex",
|
| 418 |
+
"bbox": [
|
| 419 |
+
181,
|
| 420 |
+
768,
|
| 421 |
+
488,
|
| 422 |
+
786
|
| 423 |
+
],
|
| 424 |
+
"page_idx": 2
|
| 425 |
+
},
|
| 426 |
+
{
|
| 427 |
+
"type": "text",
|
| 428 |
+
"text": "where the grasp quality $Q$ measures the grasp success of each pixel, and $W$ and $\\Theta$ are the gripper width and orientation angle maps. The value of each pixel in $W$ and $\\Theta$ represents the corresponding width and angle of gripper at that position during the grasping.",
|
| 429 |
+
"bbox": [
|
| 430 |
+
73,
|
| 431 |
+
792,
|
| 432 |
+
488,
|
| 433 |
+
868
|
| 434 |
+
],
|
| 435 |
+
"page_idx": 2
|
| 436 |
+
},
|
| 437 |
+
{
|
| 438 |
+
"type": "text",
|
| 439 |
+
"text": "Consequently, in the developed TF-Grasp, the grasp detection task boils down to three sub-tasks, namely the problems of predicting grasping position, angle, and width.",
|
| 440 |
+
"bbox": [
|
| 441 |
+
73,
|
| 442 |
+
869,
|
| 443 |
+
488,
|
| 444 |
+
914
|
| 445 |
+
],
|
| 446 |
+
"page_idx": 2
|
| 447 |
+
},
|
| 448 |
+
{
|
| 449 |
+
"type": "text",
|
| 450 |
+
"text": "Grasp Transformer Overview. A deep motivation of this work is that the treatment of robot perception in complex,",
|
| 451 |
+
"bbox": [
|
| 452 |
+
73,
|
| 453 |
+
914,
|
| 454 |
+
491,
|
| 455 |
+
946
|
| 456 |
+
],
|
| 457 |
+
"page_idx": 2
|
| 458 |
+
},
|
| 459 |
+
{
|
| 460 |
+
"type": "text",
|
| 461 |
+
"text": "dynamic robotic tasks should be global and holistic with information mutual fusion. Specifically, the grasping model can be formulated into an encoder-decoder architecture with a U-shaped structure, as detailed in Fig. 1. The encoder branch aggregates the entire visual input, mutually fuses features by using attention blocks, and then extracts the specific features that are useful for visual robotic grasping. During the decoder process, the model incorporates features delivered via skip-connections and performs a pixel-level grasp prediction by up-sampling. More concretely, the attention modules in the decoder enable more comprehensive processing of local and long-range information, allowing for better multi-scale feature fusion. Each pixel in the prediction heatmap is correlated with the final location and orientation of the end-effector.",
|
| 462 |
+
"bbox": [
|
| 463 |
+
501,
|
| 464 |
+
68,
|
| 465 |
+
921,
|
| 466 |
+
280
|
| 467 |
+
],
|
| 468 |
+
"page_idx": 2
|
| 469 |
+
},
|
| 470 |
+
{
|
| 471 |
+
"type": "text",
|
| 472 |
+
"text": "To bridge the domain gaps between the transformer and visual robotic grasping tasks, we have carefully designed our grasping transformer in the following aspects for improved grasp detection. (a) Cascade Design. Different from the classic ViT architecture, we adapt a cascaded encoder-decoder structure. The encoder utilizes self-attention to learn a contextual representation that facilitates grasping and the decoder makes use of the extracted features to perform a pixel-level grasp prediction. (b) Local and Global balance. We utilize the swim attention layer to achieve a trade-off between global and local information for better scene perception. Window attention performs local feature extraction and the shifted-window attention allows cross window interactions to globally focus on more diverse regions. (c) Feature Fusion. The feature representations at different stages are connected by skip-connections for a multi-scale feature fusion, which acquire both rich semantic and detailed features. (d) Lightweight Design. It is essential for robots to account for efficiency and the real-time performance. We utilize shifted attention blocks and a slimming design for our grasping transformer to reach an ideal trade-off between the performance and speed.",
|
| 473 |
+
"bbox": [
|
| 474 |
+
501,
|
| 475 |
+
280,
|
| 476 |
+
921,
|
| 477 |
+
598
|
| 478 |
+
],
|
| 479 |
+
"page_idx": 2
|
| 480 |
+
},
|
| 481 |
+
{
|
| 482 |
+
"type": "text",
|
| 483 |
+
"text": "Grasp Transformer Encoder. Before being fed into the encoder, the image is first passed through patch partition layer and is then cut into non-overlapping patches. Each patch is treated as a word token in the text. For example, a 2D image $I \\in \\mathbb{R}^{W \\times H \\times C}$ is split into fixed-size patches $x \\in \\mathbb{R}^{N \\times (P \\times P \\times C)}$ , where $(H, W)$ denote the height and width of the original image, $C$ represents the channel of the image, $P$ is the shape size of each image patch, and $N = H \\times W / P^2$ refers to the number of image patches. Then token-based representations can be obtained by passing the images patches into a projection layer.",
|
| 484 |
+
"bbox": [
|
| 485 |
+
503,
|
| 486 |
+
597,
|
| 487 |
+
921,
|
| 488 |
+
763
|
| 489 |
+
],
|
| 490 |
+
"page_idx": 2
|
| 491 |
+
},
|
| 492 |
+
{
|
| 493 |
+
"type": "text",
|
| 494 |
+
"text": "The encoder is composed by stacking identical transformer blocks. Attentions in the transformer block build long-distance interactions across distant pixels and attend on these positions in the embedding space. At the top of the encoder is a bottleneck block attached to the decoder. The fundamental element in our grasping transformer framework is the multi-head self-attention. The input feature $\\mathbf{X}$ is linearly transformed to derive the query $Q$ , key $K$ , and value $V$ , which are defined as follows:",
|
| 495 |
+
"bbox": [
|
| 496 |
+
501,
|
| 497 |
+
763,
|
| 498 |
+
921,
|
| 499 |
+
897
|
| 500 |
+
],
|
| 501 |
+
"page_idx": 2
|
| 502 |
+
},
|
| 503 |
+
{
|
| 504 |
+
"type": "equation",
|
| 505 |
+
"text": "\n$$\nQ = X W _ {Q}, K = X W _ {K}, V = X W _ {V}, \\tag {3}\n$$\n",
|
| 506 |
+
"text_format": "latex",
|
| 507 |
+
"bbox": [
|
| 508 |
+
583,
|
| 509 |
+
906,
|
| 510 |
+
919,
|
| 511 |
+
922
|
| 512 |
+
],
|
| 513 |
+
"page_idx": 2
|
| 514 |
+
},
|
| 515 |
+
{
|
| 516 |
+
"type": "text",
|
| 517 |
+
"text": "where $W_{Q}, W_{K}, W_{V}$ are linear projection matrices. Next, we",
|
| 518 |
+
"bbox": [
|
| 519 |
+
503,
|
| 520 |
+
928,
|
| 521 |
+
921,
|
| 522 |
+
945
|
| 523 |
+
],
|
| 524 |
+
"page_idx": 2
|
| 525 |
+
},
|
| 526 |
+
{
|
| 527 |
+
"type": "header",
|
| 528 |
+
"text": "WANG et al.: WHEN TRANSFORMER MEETS ROBOTIC GRASPING",
|
| 529 |
+
"bbox": [
|
| 530 |
+
76,
|
| 531 |
+
29,
|
| 532 |
+
426,
|
| 533 |
+
40
|
| 534 |
+
],
|
| 535 |
+
"page_idx": 2
|
| 536 |
+
},
|
| 537 |
+
{
|
| 538 |
+
"type": "page_number",
|
| 539 |
+
"text": "3",
|
| 540 |
+
"bbox": [
|
| 541 |
+
911,
|
| 542 |
+
30,
|
| 543 |
+
919,
|
| 544 |
+
40
|
| 545 |
+
],
|
| 546 |
+
"page_idx": 2
|
| 547 |
+
},
|
| 548 |
+
{
|
| 549 |
+
"type": "image",
|
| 550 |
+
"img_path": "images/42fc0c37297069791a7fa9a49b3d7318823dfcc7e3ce5e8ff443080bba8a358f.jpg",
|
| 551 |
+
"image_caption": [
|
| 552 |
+
"Fig. 2. The architecture of our transformer block."
|
| 553 |
+
],
|
| 554 |
+
"image_footnote": [],
|
| 555 |
+
"bbox": [
|
| 556 |
+
133,
|
| 557 |
+
70,
|
| 558 |
+
434,
|
| 559 |
+
258
|
| 560 |
+
],
|
| 561 |
+
"page_idx": 3
|
| 562 |
+
},
|
| 563 |
+
{
|
| 564 |
+
"type": "text",
|
| 565 |
+
"text": "compute the similarity between the query and key by using the dot product to obtain the attention,",
|
| 566 |
+
"bbox": [
|
| 567 |
+
73,
|
| 568 |
+
311,
|
| 569 |
+
491,
|
| 570 |
+
340
|
| 571 |
+
],
|
| 572 |
+
"page_idx": 3
|
| 573 |
+
},
|
| 574 |
+
{
|
| 575 |
+
"type": "equation",
|
| 576 |
+
"text": "\n$$\n\\operatorname {A t t e n t i o n} (Q, K, V) = \\operatorname {S o f t M a x} \\left(\\frac {Q K ^ {T}}{\\sqrt {d}} + B\\right) V \\tag {4}\n$$\n",
|
| 577 |
+
"text_format": "latex",
|
| 578 |
+
"bbox": [
|
| 579 |
+
124,
|
| 580 |
+
345,
|
| 581 |
+
491,
|
| 582 |
+
380
|
| 583 |
+
],
|
| 584 |
+
"page_idx": 3
|
| 585 |
+
},
|
| 586 |
+
{
|
| 587 |
+
"type": "text",
|
| 588 |
+
"text": "where $\\sqrt{d}$ is the scaling factor and $B$ is the learnable relative position encoding.",
|
| 589 |
+
"bbox": [
|
| 590 |
+
73,
|
| 591 |
+
387,
|
| 592 |
+
490,
|
| 593 |
+
417
|
| 594 |
+
],
|
| 595 |
+
"page_idx": 3
|
| 596 |
+
},
|
| 597 |
+
{
|
| 598 |
+
"type": "text",
|
| 599 |
+
"text": "The computational complexity of self-attention grows quadratically with respect to the image size. To achieve computational efficiency, we leverage the advantages of CNNs and transformer and adopt the swim-transformer block [8] in our framework. The swim-transformer layer consists of two parts: local attention and global attention. Within the local attention, the calculation of self-attention is restricted to local regions where images patches are divided into non-overlapping local windows. Cross-window attention introduces connections between neighbors by sliding non-overlapping windows. The structure of swim-transformer block is presented in Fig. 2 which is composed of MLP, Layer Norm, window-based MSA and shifted-window MSA. The computation procedure of swim-transformer block is represented as follows:",
|
| 600 |
+
"bbox": [
|
| 601 |
+
73,
|
| 602 |
+
417,
|
| 603 |
+
491,
|
| 604 |
+
630
|
| 605 |
+
],
|
| 606 |
+
"page_idx": 3
|
| 607 |
+
},
|
| 608 |
+
{
|
| 609 |
+
"type": "equation",
|
| 610 |
+
"text": "\n$$\n\\begin{array}{l} \\hat {\\mathbf {x}} ^ {l} = \\operatorname {W - M S A} \\left(\\operatorname {L N} \\left(\\mathbf {x} ^ {l - 1}\\right)\\right) + \\mathbf {x} ^ {l - 1}, \\\\ \\mathbf {x} ^ {l} = \\operatorname {M L P} \\left(\\ln \\left(\\hat {\\mathbf {x}} ^ {l}\\right)\\right) + \\hat {\\mathbf {x}} ^ {l}, (5) \\\\ \\hat {\\mathbf {x}} ^ {l + 1} = \\operatorname {S W - M S A} \\left(\\operatorname {L N} \\left(\\mathbf {x} ^ {l}\\right)\\right) + \\mathbf {x} ^ {l}, (5) \\\\ \\mathbf {x} ^ {l + 1} = \\operatorname {M L P} \\left(\\operatorname {L N} \\left(\\hat {\\mathbf {x}} ^ {l + 1}\\right)\\right) + \\hat {\\mathbf {x}} ^ {l + 1} \\\\ \\end{array}\n$$\n",
|
| 611 |
+
"text_format": "latex",
|
| 612 |
+
"bbox": [
|
| 613 |
+
153,
|
| 614 |
+
633,
|
| 615 |
+
490,
|
| 616 |
+
713
|
| 617 |
+
],
|
| 618 |
+
"page_idx": 3
|
| 619 |
+
},
|
| 620 |
+
{
|
| 621 |
+
"type": "text",
|
| 622 |
+
"text": "where W-MSA and SW-MSA refer to the local window and global shifted window multi-head self-attention, respectively. $\\mathbf{x}^{l - 1}$ denotes the feature of output from the previous layer. Then, the features will be sent into the window attention, W-MSA. There is a layer norm before both MLP and attention layer, and residual connections are applied to these modules. Between every two swim transformer blocks, there exists a patch merging operation that reduces the resolution of feature maps. The patch merging layer builds a hierarchical representation by gradually merging consecutive neighboring patches between successive transformer layers.",
|
| 623 |
+
"bbox": [
|
| 624 |
+
73,
|
| 625 |
+
717,
|
| 626 |
+
491,
|
| 627 |
+
883
|
| 628 |
+
],
|
| 629 |
+
"page_idx": 3
|
| 630 |
+
},
|
| 631 |
+
{
|
| 632 |
+
"type": "text",
|
| 633 |
+
"text": "Grasp Transformer Decoder. The decoder generates an executable grasping configuration that allows the end-effector to move to the corresponding positions. We transform the planar grasp detection problem into a pixel-level prediction.",
|
| 634 |
+
"bbox": [
|
| 635 |
+
73,
|
| 636 |
+
883,
|
| 637 |
+
491,
|
| 638 |
+
946
|
| 639 |
+
],
|
| 640 |
+
"page_idx": 3
|
| 641 |
+
},
|
| 642 |
+
{
|
| 643 |
+
"type": "text",
|
| 644 |
+
"text": "Three grasping heads are attached in parallel to the top of the decoder, including a grasp confidence head $Q$ , a gripper angle head $\\Theta$ , and a gripper width head $W$ . The output of each head is a heat map with the same size as the input visual image. The grasp confidence head outputs a value between 0 and 1, which indicates the probability of the successful grasping at each pixel point. Likewise, the gripper width and angle heads output the width and rotation angle of the gripper when grasping at the corresponding point in the image, respectively. We treat the grasping posture estimation as a regression problem and use our transformer model to learn a mapping $F: I \\to \\tilde{G}$ by minimizing the distances between the predicted grasping heatmaps $\\tilde{G}(Q, W, \\Theta)$ and the ground truth, where $I$ is the input data. The loss function is defined as follows:",
|
| 645 |
+
"bbox": [
|
| 646 |
+
501,
|
| 647 |
+
68,
|
| 648 |
+
921,
|
| 649 |
+
280
|
| 650 |
+
],
|
| 651 |
+
"page_idx": 3
|
| 652 |
+
},
|
| 653 |
+
{
|
| 654 |
+
"type": "equation",
|
| 655 |
+
"text": "\n$$\n\\mathcal {L} = \\sum_ {i} ^ {N} \\sum_ {m \\in \\{Q, W, \\Theta \\}} \\| \\tilde {G} _ {i} ^ {m} - L _ {i} ^ {m} \\| ^ {2} \\tag {6}\n$$\n",
|
| 656 |
+
"text_format": "latex",
|
| 657 |
+
"bbox": [
|
| 658 |
+
594,
|
| 659 |
+
286,
|
| 660 |
+
919,
|
| 661 |
+
330
|
| 662 |
+
],
|
| 663 |
+
"page_idx": 3
|
| 664 |
+
},
|
| 665 |
+
{
|
| 666 |
+
"type": "text",
|
| 667 |
+
"text": "where $N$ is the number of sample size and $L_{i}$ is the corresponding label.",
|
| 668 |
+
"bbox": [
|
| 669 |
+
503,
|
| 670 |
+
335,
|
| 671 |
+
919,
|
| 672 |
+
364
|
| 673 |
+
],
|
| 674 |
+
"page_idx": 3
|
| 675 |
+
},
|
| 676 |
+
{
|
| 677 |
+
"type": "text",
|
| 678 |
+
"text": "The ultimate grasp location is the position with the highest grasp confidence by retrieving the grasp quality heatmap, defined as:",
|
| 679 |
+
"bbox": [
|
| 680 |
+
503,
|
| 681 |
+
366,
|
| 682 |
+
919,
|
| 683 |
+
410
|
| 684 |
+
],
|
| 685 |
+
"page_idx": 3
|
| 686 |
+
},
|
| 687 |
+
{
|
| 688 |
+
"type": "equation",
|
| 689 |
+
"text": "\n$$\n\\mathcal {G} _ {p o s} ^ {*} = \\operatorname {a r g m a x} _ {p o s} Q, \\tag {7}\n$$\n",
|
| 690 |
+
"text_format": "latex",
|
| 691 |
+
"bbox": [
|
| 692 |
+
633,
|
| 693 |
+
412,
|
| 694 |
+
919,
|
| 695 |
+
429
|
| 696 |
+
],
|
| 697 |
+
"page_idx": 3
|
| 698 |
+
},
|
| 699 |
+
{
|
| 700 |
+
"type": "text",
|
| 701 |
+
"text": "where $Q$ is the grasp confidence map. Afterward, we extract the predicted angle $\\theta$ and angle $w$ of the corresponding position from the angle and width heatmaps.",
|
| 702 |
+
"bbox": [
|
| 703 |
+
501,
|
| 704 |
+
436,
|
| 705 |
+
919,
|
| 706 |
+
481
|
| 707 |
+
],
|
| 708 |
+
"page_idx": 3
|
| 709 |
+
},
|
| 710 |
+
{
|
| 711 |
+
"type": "text",
|
| 712 |
+
"text": "In our grasp detection decoder, we also adopt swim transformer block to reduce the computational complexity. Swin attention aggregates multi-scale features and builds a hierarchical representation. And skip-connections merge the features learned at these different stages for further fusion to produce a better grasp posture. Analogous to U-net [19], skip-connections are implemented by concatenating features from the $i$ -th layer of the encoder directly into the layer $i$ -th in the decoder. In the decoding phase, following the patch expanding layer, the concatenated features are taken as input to the next attention block stage. Simultaneously, we can learn the relationship between the fused features where the features in the encoder can be used as queries and keys to interact with the counterparts in the decoder for self-attention computing.",
|
| 713 |
+
"bbox": [
|
| 714 |
+
501,
|
| 715 |
+
482,
|
| 716 |
+
921,
|
| 717 |
+
691
|
| 718 |
+
],
|
| 719 |
+
"page_idx": 3
|
| 720 |
+
},
|
| 721 |
+
{
|
| 722 |
+
"type": "text",
|
| 723 |
+
"text": "A benefit of our pixel-level grasp representation is that only a single forward propagation is required to obtain the best grasp postures within the global visual scene, avoiding the need to generate multiple grasp candidates and saving the computation expense.",
|
| 724 |
+
"bbox": [
|
| 725 |
+
501,
|
| 726 |
+
693,
|
| 727 |
+
921,
|
| 728 |
+
768
|
| 729 |
+
],
|
| 730 |
+
"page_idx": 3
|
| 731 |
+
},
|
| 732 |
+
{
|
| 733 |
+
"type": "text",
|
| 734 |
+
"text": "IV. EXPERIMENTS",
|
| 735 |
+
"text_level": 1,
|
| 736 |
+
"bbox": [
|
| 737 |
+
643,
|
| 738 |
+
786,
|
| 739 |
+
781,
|
| 740 |
+
799
|
| 741 |
+
],
|
| 742 |
+
"page_idx": 3
|
| 743 |
+
},
|
| 744 |
+
{
|
| 745 |
+
"type": "text",
|
| 746 |
+
"text": "In this section, extensive experiments are carried out to validate the performance of the proposed TF-Grasp method. We verify the performance of TF-Grasp on two popular grasping datasets and then evaluate its effectiveness on a real Franka Panda robotic manipulator.",
|
| 747 |
+
"bbox": [
|
| 748 |
+
501,
|
| 749 |
+
806,
|
| 750 |
+
919,
|
| 751 |
+
880
|
| 752 |
+
],
|
| 753 |
+
"page_idx": 3
|
| 754 |
+
},
|
| 755 |
+
{
|
| 756 |
+
"type": "text",
|
| 757 |
+
"text": "The goal of this section tends to answer the following questions:",
|
| 758 |
+
"bbox": [
|
| 759 |
+
503,
|
| 760 |
+
881,
|
| 761 |
+
919,
|
| 762 |
+
911
|
| 763 |
+
],
|
| 764 |
+
"page_idx": 3
|
| 765 |
+
},
|
| 766 |
+
{
|
| 767 |
+
"type": "text",
|
| 768 |
+
"text": "- Is the transformer-based grasp detection model better than CNN-based models?",
|
| 769 |
+
"bbox": [
|
| 770 |
+
519,
|
| 771 |
+
914,
|
| 772 |
+
919,
|
| 773 |
+
943
|
| 774 |
+
],
|
| 775 |
+
"page_idx": 3
|
| 776 |
+
},
|
| 777 |
+
{
|
| 778 |
+
"type": "page_number",
|
| 779 |
+
"text": "4",
|
| 780 |
+
"bbox": [
|
| 781 |
+
76,
|
| 782 |
+
31,
|
| 783 |
+
86,
|
| 784 |
+
39
|
| 785 |
+
],
|
| 786 |
+
"page_idx": 3
|
| 787 |
+
},
|
| 788 |
+
{
|
| 789 |
+
"type": "header",
|
| 790 |
+
"text": "IEEE ROBOTICS AND AUTOMATION LETTERS. PREPRINT VERSION. JUNE 2022",
|
| 791 |
+
"bbox": [
|
| 792 |
+
496,
|
| 793 |
+
29,
|
| 794 |
+
919,
|
| 795 |
+
40
|
| 796 |
+
],
|
| 797 |
+
"page_idx": 3
|
| 798 |
+
},
|
| 799 |
+
{
|
| 800 |
+
"type": "image",
|
| 801 |
+
"img_path": "images/0582bfc584f37c6eeef228f621cb4ebf1c2042322a7b7ce2afdd1225d2dda0f7.jpg",
|
| 802 |
+
"image_caption": [],
|
| 803 |
+
"image_footnote": [],
|
| 804 |
+
"bbox": [
|
| 805 |
+
84,
|
| 806 |
+
70,
|
| 807 |
+
189,
|
| 808 |
+
151
|
| 809 |
+
],
|
| 810 |
+
"page_idx": 4
|
| 811 |
+
},
|
| 812 |
+
{
|
| 813 |
+
"type": "image",
|
| 814 |
+
"img_path": "images/c358db5a7e04807132f07d97f83c0ec16130b86d5b177ac2f5fc1dbbedb6d391.jpg",
|
| 815 |
+
"image_caption": [],
|
| 816 |
+
"image_footnote": [],
|
| 817 |
+
"bbox": [
|
| 818 |
+
196,
|
| 819 |
+
70,
|
| 820 |
+
303,
|
| 821 |
+
151
|
| 822 |
+
],
|
| 823 |
+
"page_idx": 4
|
| 824 |
+
},
|
| 825 |
+
{
|
| 826 |
+
"type": "image",
|
| 827 |
+
"img_path": "images/aa43bb63c295a26678e48824719115643525c54440bff1eb858a984cc352e121.jpg",
|
| 828 |
+
"image_caption": [],
|
| 829 |
+
"image_footnote": [],
|
| 830 |
+
"bbox": [
|
| 831 |
+
310,
|
| 832 |
+
70,
|
| 833 |
+
418,
|
| 834 |
+
151
|
| 835 |
+
],
|
| 836 |
+
"page_idx": 4
|
| 837 |
+
},
|
| 838 |
+
{
|
| 839 |
+
"type": "image",
|
| 840 |
+
"img_path": "images/f20303f29e7d68bb4fd7610e8c8b57b5a61c80e464e67e0ae9c2a5ab191a1782.jpg",
|
| 841 |
+
"image_caption": [],
|
| 842 |
+
"image_footnote": [],
|
| 843 |
+
"bbox": [
|
| 844 |
+
424,
|
| 845 |
+
70,
|
| 846 |
+
531,
|
| 847 |
+
151
|
| 848 |
+
],
|
| 849 |
+
"page_idx": 4
|
| 850 |
+
},
|
| 851 |
+
{
|
| 852 |
+
"type": "image",
|
| 853 |
+
"img_path": "images/3e5fa0d507c199916d25603cf15ed4991e6d3cf42c3487d4395c5b9cb1585f3c.jpg",
|
| 854 |
+
"image_caption": [],
|
| 855 |
+
"image_footnote": [],
|
| 856 |
+
"bbox": [
|
| 857 |
+
537,
|
| 858 |
+
70,
|
| 859 |
+
643,
|
| 860 |
+
151
|
| 861 |
+
],
|
| 862 |
+
"page_idx": 4
|
| 863 |
+
},
|
| 864 |
+
{
|
| 865 |
+
"type": "image",
|
| 866 |
+
"img_path": "images/a58d21f542940f3d94083b095c62ea52f768e10deac23591f599ced17fe0a77e.jpg",
|
| 867 |
+
"image_caption": [],
|
| 868 |
+
"image_footnote": [],
|
| 869 |
+
"bbox": [
|
| 870 |
+
651,
|
| 871 |
+
70,
|
| 872 |
+
758,
|
| 873 |
+
151
|
| 874 |
+
],
|
| 875 |
+
"page_idx": 4
|
| 876 |
+
},
|
| 877 |
+
{
|
| 878 |
+
"type": "image",
|
| 879 |
+
"img_path": "images/6ed5489324d5271172dc388abf1559c8386801023a00c9b2d8e9b6587907e7b7.jpg",
|
| 880 |
+
"image_caption": [],
|
| 881 |
+
"image_footnote": [],
|
| 882 |
+
"bbox": [
|
| 883 |
+
764,
|
| 884 |
+
70,
|
| 885 |
+
870,
|
| 886 |
+
151
|
| 887 |
+
],
|
| 888 |
+
"page_idx": 4
|
| 889 |
+
},
|
| 890 |
+
{
|
| 891 |
+
"type": "image",
|
| 892 |
+
"img_path": "images/6007e61d135617063e76237e987b2a75b94ec7fa16c734311d02aa46a9a05399.jpg",
|
| 893 |
+
"image_caption": [
|
| 894 |
+
"Fig. 3. The visualized attention heatmaps learned by our method, which show that our transformer model can learn the concepts beneficial for grasping."
|
| 895 |
+
],
|
| 896 |
+
"image_footnote": [],
|
| 897 |
+
"bbox": [
|
| 898 |
+
84,
|
| 899 |
+
165,
|
| 900 |
+
189,
|
| 901 |
+
247
|
| 902 |
+
],
|
| 903 |
+
"page_idx": 4
|
| 904 |
+
},
|
| 905 |
+
{
|
| 906 |
+
"type": "image",
|
| 907 |
+
"img_path": "images/cd45955d651fdea8def9915dd0fc9e5f5dbd0fcd0e0d3f4298a1a18a6dd21476.jpg",
|
| 908 |
+
"image_caption": [],
|
| 909 |
+
"image_footnote": [],
|
| 910 |
+
"bbox": [
|
| 911 |
+
197,
|
| 912 |
+
165,
|
| 913 |
+
303,
|
| 914 |
+
247
|
| 915 |
+
],
|
| 916 |
+
"page_idx": 4
|
| 917 |
+
},
|
| 918 |
+
{
|
| 919 |
+
"type": "image",
|
| 920 |
+
"img_path": "images/a510e113bd107df70c8dec4f10dbc0f342ab07e5be461412cadf3d9b2966ab11.jpg",
|
| 921 |
+
"image_caption": [],
|
| 922 |
+
"image_footnote": [],
|
| 923 |
+
"bbox": [
|
| 924 |
+
310,
|
| 925 |
+
165,
|
| 926 |
+
416,
|
| 927 |
+
247
|
| 928 |
+
],
|
| 929 |
+
"page_idx": 4
|
| 930 |
+
},
|
| 931 |
+
{
|
| 932 |
+
"type": "image",
|
| 933 |
+
"img_path": "images/b370f394fe8ab9a61b48d5b562da092a0cc12e9b7e464205d507749ddd35c26c.jpg",
|
| 934 |
+
"image_caption": [],
|
| 935 |
+
"image_footnote": [],
|
| 936 |
+
"bbox": [
|
| 937 |
+
424,
|
| 938 |
+
166,
|
| 939 |
+
531,
|
| 940 |
+
247
|
| 941 |
+
],
|
| 942 |
+
"page_idx": 4
|
| 943 |
+
},
|
| 944 |
+
{
|
| 945 |
+
"type": "image",
|
| 946 |
+
"img_path": "images/e2ce556d39855f8c3a172c420a5d29367777ba86dd7c538d6ebb2b173adf5e29.jpg",
|
| 947 |
+
"image_caption": [],
|
| 948 |
+
"image_footnote": [],
|
| 949 |
+
"bbox": [
|
| 950 |
+
537,
|
| 951 |
+
166,
|
| 952 |
+
643,
|
| 953 |
+
247
|
| 954 |
+
],
|
| 955 |
+
"page_idx": 4
|
| 956 |
+
},
|
| 957 |
+
{
|
| 958 |
+
"type": "image",
|
| 959 |
+
"img_path": "images/18555efc00025965a0999303a537f1fd8a55c1ae1e2a0161c1ec151fa768ec31.jpg",
|
| 960 |
+
"image_caption": [],
|
| 961 |
+
"image_footnote": [],
|
| 962 |
+
"bbox": [
|
| 963 |
+
651,
|
| 964 |
+
166,
|
| 965 |
+
756,
|
| 966 |
+
247
|
| 967 |
+
],
|
| 968 |
+
"page_idx": 4
|
| 969 |
+
},
|
| 970 |
+
{
|
| 971 |
+
"type": "image",
|
| 972 |
+
"img_path": "images/017f87b2b2198540951485e2cc1bfcedaa2f359a5843c428de40dec3a6204aef.jpg",
|
| 973 |
+
"image_caption": [],
|
| 974 |
+
"image_footnote": [],
|
| 975 |
+
"bbox": [
|
| 976 |
+
764,
|
| 977 |
+
166,
|
| 978 |
+
870,
|
| 979 |
+
247
|
| 980 |
+
],
|
| 981 |
+
"page_idx": 4
|
| 982 |
+
},
|
| 983 |
+
{
|
| 984 |
+
"type": "text",
|
| 985 |
+
"text": "- If true, what makes the transformer-based grasp detection model outperforming others?",
|
| 986 |
+
"bbox": [
|
| 987 |
+
89,
|
| 988 |
+
301,
|
| 989 |
+
491,
|
| 990 |
+
330
|
| 991 |
+
],
|
| 992 |
+
"page_idx": 4
|
| 993 |
+
},
|
| 994 |
+
{
|
| 995 |
+
"type": "text",
|
| 996 |
+
"text": "A. Datasets and Experiment Setup",
|
| 997 |
+
"text_level": 1,
|
| 998 |
+
"bbox": [
|
| 999 |
+
73,
|
| 1000 |
+
351,
|
| 1001 |
+
313,
|
| 1002 |
+
367
|
| 1003 |
+
],
|
| 1004 |
+
"page_idx": 4
|
| 1005 |
+
},
|
| 1006 |
+
{
|
| 1007 |
+
"type": "text",
|
| 1008 |
+
"text": "The Cornell grasping data [14] is a multi-object dataset that contains 885 images. The resolution of each image is $640 \\times 480$ . The whole dataset is relatively small and we use various data augmentation techniques such as rotation, zooms, and random cropping to avoid overfitting. We then validate the performance of TF-Grasp on the Jacquard dataset [21] which is generated in a simulator via CAD models. The Jacquard dataset is fairly large, containing over 50k images of 11k object categories, and there are over 1 million annotated grasp labels.",
|
| 1009 |
+
"bbox": [
|
| 1010 |
+
73,
|
| 1011 |
+
371,
|
| 1012 |
+
490,
|
| 1013 |
+
518
|
| 1014 |
+
],
|
| 1015 |
+
"page_idx": 4
|
| 1016 |
+
},
|
| 1017 |
+
{
|
| 1018 |
+
"type": "text",
|
| 1019 |
+
"text": "Evaluation Metric. A predicted grasp is regarded as correct if the following conditions are satisfied.",
|
| 1020 |
+
"bbox": [
|
| 1021 |
+
73,
|
| 1022 |
+
522,
|
| 1023 |
+
488,
|
| 1024 |
+
551
|
| 1025 |
+
],
|
| 1026 |
+
"page_idx": 4
|
| 1027 |
+
},
|
| 1028 |
+
{
|
| 1029 |
+
"type": "list",
|
| 1030 |
+
"sub_type": "text",
|
| 1031 |
+
"list_items": [
|
| 1032 |
+
"i) The discrepancy between the predicted grasping angle and the ground truth is within $30^{\\circ}$ .",
|
| 1033 |
+
"ii) The Jaccard index defined in Eq. (8) is greater than 0.25."
|
| 1034 |
+
],
|
| 1035 |
+
"bbox": [
|
| 1036 |
+
73,
|
| 1037 |
+
551,
|
| 1038 |
+
488,
|
| 1039 |
+
598
|
| 1040 |
+
],
|
| 1041 |
+
"page_idx": 4
|
| 1042 |
+
},
|
| 1043 |
+
{
|
| 1044 |
+
"type": "equation",
|
| 1045 |
+
"text": "\n$$\nJ \\left(\\mathcal {R} ^ {*}, \\mathcal {R}\\right) = \\frac {\\left| \\mathcal {R} ^ {*} \\cap \\mathcal {R} \\right|}{\\left| \\mathcal {R} ^ {*} \\cup \\mathcal {R} \\right|} \\tag {8}\n$$\n",
|
| 1046 |
+
"text_format": "latex",
|
| 1047 |
+
"bbox": [
|
| 1048 |
+
200,
|
| 1049 |
+
604,
|
| 1050 |
+
488,
|
| 1051 |
+
637
|
| 1052 |
+
],
|
| 1053 |
+
"page_idx": 4
|
| 1054 |
+
},
|
| 1055 |
+
{
|
| 1056 |
+
"type": "text",
|
| 1057 |
+
"text": "TF-Grasp takes a $224 \\times 224$ image as input and outputs three pixel-wise maps with the same resolution as the input. The input is normalized by subtracting its mean and dividing the standard deviation. We follow the common strategy to train the grasp transformer. Both the encoder and decoder contain four swim-attention blocks and each consists of 1, 2, 4, 8 attention heads. The window size is 7. At each training step, a batch of samples is randomly sampled from the training set and we use the ground truth as the target values to train our neural network. Concretely, we utilize the mean squared error as the loss function and apply AdamW [28] as the optimizer. The default size of batch size is set to 64. The patch partition layer is implemented by convolutions with kernels of $p \\times p$ and a stride $p$ . In our implementation, $p$ is set to 4. In order to preserve a one-to-one mapping of the angle $\\Theta$ between $[- \\frac{\\pi}{2}, \\frac{\\pi}{2}]$ , we decode the learning of angle into two components, $\\sin(2\\Theta)$ and $\\cos(2\\Theta)$ . In this way, the final angle is obtained by $\\arctan \\left(\\frac{\\sin 2\\Theta}{\\cos 2\\Theta}\\right)/2$ . TF-Grasp is implemented by PyTorch, and the entire grasp detection system is running on the Ubuntu 18.04 desktop with Intel Core i9 CPU and NVIDIA 3090 GPU.",
|
| 1058 |
+
"bbox": [
|
| 1059 |
+
73,
|
| 1060 |
+
642,
|
| 1061 |
+
491,
|
| 1062 |
+
945
|
| 1063 |
+
],
|
| 1064 |
+
"page_idx": 4
|
| 1065 |
+
},
|
| 1066 |
+
{
|
| 1067 |
+
"type": "table",
|
| 1068 |
+
"img_path": "images/5b38207399e7dff0d76aeb21b9bb36019eecd6193874ea9cc7e7b371b3f5efd3.jpg",
|
| 1069 |
+
"table_caption": [
|
| 1070 |
+
"TABLEI THE ACCURACY ON CORNELL GRASPING DATASET."
|
| 1071 |
+
],
|
| 1072 |
+
"table_footnote": [],
|
| 1073 |
+
"table_body": "<table><tr><td rowspan=\"2\">Method</td><td rowspan=\"2\">Input</td><td colspan=\"2\">Accuracy(%)</td><td rowspan=\"2\">Time (ms)</td></tr><tr><td>IW</td><td>OW</td></tr><tr><td>Fast Search [20]</td><td>RGB-D</td><td>60.5</td><td>58.3</td><td>5000</td></tr><tr><td>GG-CNN [17]</td><td>D</td><td>73.0</td><td>69.0</td><td>19</td></tr><tr><td>SAE [14]</td><td>RGB-D</td><td>73.9</td><td>75.6</td><td>1350</td></tr><tr><td>Two-stage closed-loop [22]</td><td>RGB-D</td><td>85.3</td><td>-</td><td>140</td></tr><tr><td>AlexNet, MultiGrasp [5]</td><td>RGB-D</td><td>88.0</td><td>87.1</td><td>76</td></tr><tr><td>STEM-CaRFs [23]</td><td>RGB-D</td><td>88.2</td><td>87.5</td><td>-</td></tr><tr><td>GRPN [24]</td><td>RGB</td><td>88.7</td><td>-</td><td>200</td></tr><tr><td>ResNet-50x2 [3]</td><td>RGB-D</td><td>89.2</td><td>88.9</td><td>103</td></tr><tr><td>GraspNet [12]</td><td>RGB-D</td><td>90.2</td><td>90.6</td><td>24</td></tr><tr><td>ZF-net [25]</td><td>RGB-D</td><td>93.2</td><td>89.1</td><td>-</td></tr><tr><td>E2E-net [26]</td><td>RGB</td><td>98.2</td><td>-</td><td>63</td></tr><tr><td>GR-ConvNet [27]</td><td>D</td><td>93.2</td><td>94.3</td><td>19</td></tr><tr><td>GR-ConvNet [27]</td><td>RGB</td><td>96.6</td><td>95.5</td><td>19</td></tr><tr><td>GR-ConvNet [27]</td><td>RGB-D</td><td>97.7</td><td>96.6</td><td>20</td></tr><tr><td rowspan=\"3\">TF-Grasp</td><td>D</td><td>95.2</td><td>94.9</td><td>41.1</td></tr><tr><td>RGB</td><td>96.78</td><td>95.0</td><td>41.3</td></tr><tr><td>RGB-D</td><td>97.99</td><td>96.7</td><td>41.6</td></tr></table>",
|
| 1074 |
+
"bbox": [
|
| 1075 |
+
506,
|
| 1076 |
+
334,
|
| 1077 |
+
921,
|
| 1078 |
+
588
|
| 1079 |
+
],
|
| 1080 |
+
"page_idx": 4
|
| 1081 |
+
},
|
| 1082 |
+
{
|
| 1083 |
+
"type": "text",
|
| 1084 |
+
"text": "B. Experimental Results and Analysis",
|
| 1085 |
+
"text_level": 1,
|
| 1086 |
+
"bbox": [
|
| 1087 |
+
504,
|
| 1088 |
+
617,
|
| 1089 |
+
764,
|
| 1090 |
+
633
|
| 1091 |
+
],
|
| 1092 |
+
"page_idx": 4
|
| 1093 |
+
},
|
| 1094 |
+
{
|
| 1095 |
+
"type": "text",
|
| 1096 |
+
"text": "To show its effectiveness, our approach is compared with a number of baselines under the same experimental conditions, i.e., evaluation metric. The results of image-wise (IW) and object-wise (OW) settings in the public Cornell grasping dataset are present in Table I. Since the Cornell dataset is relatively small, we follow the setting of previous works [3], [5], [14] by adopting a five-fold cross-validation. Also, to make the comparison fair and comprehensive, the input modalities and running time are considered. For all compared baselines, we use the data reported in their original papers. Taking as input only the depth information, our TF-Grasp achieves an accuracy of $95.2\\%$ which is competitive to the state-of-the-art. When using both depth and RGB data, our model obtains $97.99\\%$ accuracy. For Table II, we use $90\\%$ data of the Jacquard dataset as the training set and the remaining $10\\%$ as the validation set. In addition, our model takes about 41ms to process a single image using the Intel Core i9-10900X CPU processor, which is competitive with the state-of-art approaches and basically meets the real-time requirements. The transformer grasping model exhibits a better accuracy",
|
| 1097 |
+
"bbox": [
|
| 1098 |
+
501,
|
| 1099 |
+
642,
|
| 1100 |
+
921,
|
| 1101 |
+
945
|
| 1102 |
+
],
|
| 1103 |
+
"page_idx": 4
|
| 1104 |
+
},
|
| 1105 |
+
{
|
| 1106 |
+
"type": "header",
|
| 1107 |
+
"text": "WANG et al.: WHEN TRANSFORMER MEETS ROBOTIC GRASPING",
|
| 1108 |
+
"bbox": [
|
| 1109 |
+
76,
|
| 1110 |
+
29,
|
| 1111 |
+
426,
|
| 1112 |
+
40
|
| 1113 |
+
],
|
| 1114 |
+
"page_idx": 4
|
| 1115 |
+
},
|
| 1116 |
+
{
|
| 1117 |
+
"type": "page_number",
|
| 1118 |
+
"text": "5",
|
| 1119 |
+
"bbox": [
|
| 1120 |
+
911,
|
| 1121 |
+
30,
|
| 1122 |
+
919,
|
| 1123 |
+
40
|
| 1124 |
+
],
|
| 1125 |
+
"page_idx": 4
|
| 1126 |
+
},
|
| 1127 |
+
{
|
| 1128 |
+
"type": "table",
|
| 1129 |
+
"img_path": "images/6fb0d06885f5c9b2637d297c75b2fbc22821acb0afdd7fdda6a5f6cc8fe0b808.jpg",
|
| 1130 |
+
"table_caption": [
|
| 1131 |
+
"TABLE II THE ACCURACY ON JACQUARD GRASPING DATASET."
|
| 1132 |
+
],
|
| 1133 |
+
"table_footnote": [],
|
| 1134 |
+
"table_body": "<table><tr><td>Authors</td><td>Method</td><td>Input</td><td>Accuracy (%)</td></tr><tr><td>Depierre [21]</td><td>Jacquard</td><td>RGB-D</td><td>74.2</td></tr><tr><td>Morrison [17]</td><td>GG-CNN2</td><td>D</td><td>84</td></tr><tr><td>Zhou [29]</td><td>FCGN, ResNet-101</td><td>RGB</td><td>91.8</td></tr><tr><td>Alexandre [16]</td><td>GQ-STN</td><td>D</td><td>70.8</td></tr><tr><td>Zhang [11]</td><td>ROI-GD</td><td>RGB</td><td>90.4</td></tr><tr><td>Stefan [26]</td><td>Det Seg</td><td>RGB</td><td>92.59</td></tr><tr><td>Stefan [26]</td><td>Det Seg Refine</td><td>RGB</td><td>92.95</td></tr><tr><td>Kumra [27]</td><td>GR-ConvNet</td><td>D</td><td>93.7</td></tr><tr><td>Kumra [27]</td><td>GR-ConvNet</td><td>RGB</td><td>91.8</td></tr><tr><td>Kumra [27]</td><td>GR-ConvNet</td><td>RGB-D</td><td>94.6</td></tr><tr><td rowspan=\"3\">Our</td><td>TF-Grasp</td><td>D</td><td>93.1</td></tr><tr><td>TF-Grasp</td><td>RGB</td><td>93.57</td></tr><tr><td>TF-Grasp</td><td>RGB-D</td><td>94.6</td></tr></table>",
|
| 1135 |
+
"bbox": [
|
| 1136 |
+
88,
|
| 1137 |
+
104,
|
| 1138 |
+
473,
|
| 1139 |
+
267
|
| 1140 |
+
],
|
| 1141 |
+
"page_idx": 5
|
| 1142 |
+
},
|
| 1143 |
+
{
|
| 1144 |
+
"type": "text",
|
| 1145 |
+
"text": "on both datasets compared to conventional CNN models. Our proposed approach achieves a higher accuracy of $94.6\\%$ which is on-par or superior to previous methods. The results on the Cornell and Jacquard datasets all indicate that the model with the attention mechanism is more suitable for visual grasping tasks.",
|
| 1146 |
+
"bbox": [
|
| 1147 |
+
78,
|
| 1148 |
+
306,
|
| 1149 |
+
488,
|
| 1150 |
+
395
|
| 1151 |
+
],
|
| 1152 |
+
"page_idx": 5
|
| 1153 |
+
},
|
| 1154 |
+
{
|
| 1155 |
+
"type": "text",
|
| 1156 |
+
"text": "Despite the fact that our model is trained on a single object dataset, it can be well adapted to multi-object environments with the help of attention mechanisms. In addition, to evaluate the advantages of the transformer versus CNNs for visual grasping tasks, we use the original convolution layers, residual layers, and our transformer as feature extractors to test detection accuracy on different objects on the Cornell dataset. We apply an object-wise split to the Cornell dataset and Fig. 5 shows the detection accuracy of objects not seen during the training phase. All objects are subsets of the Cornell dataset and are evaluated 5 times. All models shown in Fig. 5 employ an encoder-decoder architecture with 4 stages in order to guarantee a fair comparison, where the original-conv is a fully convolutional neural network and resnet-conv is to replace the original convolution layer with the residual block. The result of different models is shown in Fig. 5. Note that the transformer outperforms original convolutions on all selected objects and is marginally better or on-par with the residual network.",
|
| 1157 |
+
"bbox": [
|
| 1158 |
+
78,
|
| 1159 |
+
398,
|
| 1160 |
+
488,
|
| 1161 |
+
684
|
| 1162 |
+
],
|
| 1163 |
+
"page_idx": 5
|
| 1164 |
+
},
|
| 1165 |
+
{
|
| 1166 |
+
"type": "text",
|
| 1167 |
+
"text": "These results demonstrate that the transformer improves robotic grasp detection. We conjecture that prior methods that rely on local operations of the convolution layers might ignore the dependencies between long-range pixels. Instead, our approach leverages the attention mechanism to exploit both local and global information and integrates features that are useful for grasping. To better demonstrate whether the transformer-based grasping model can model the relationships between objects and across the scene, we present the multi-object grasping results and grasping quality heatmaps of the transformer and CNN in Fig. 4. Our aim is to verify that the transformer is preferred over CNN for visual grasping tasks and is better at capturing global and local information. From Fig. 4, we can see that the grasp rectangles predicted by CNN have the right grasp position in most cases, but the predicted gripper angle and width are often not appropriate. In some cases, CNN even generates grasping rectangles in the",
|
| 1168 |
+
"bbox": [
|
| 1169 |
+
78,
|
| 1170 |
+
688,
|
| 1171 |
+
488,
|
| 1172 |
+
943
|
| 1173 |
+
],
|
| 1174 |
+
"page_idx": 5
|
| 1175 |
+
},
|
| 1176 |
+
{
|
| 1177 |
+
"type": "text",
|
| 1178 |
+
"text": "background. With the attention mechanism, our transformer-based model is able to clearly identify the objects from the background. In the second row of Fig. 4, the grasping quality images show that the CNN-based approach can not identify the graspable area and consider the entire region of objects as a graspable zone with high success probabilities. Instead, as shown in the fourth row of Fig. 4, the transformer-based model is prone to capture the area that is easy to grasp due to its larger receptive field. For each attention block, the attention operation establishes the inter-element relationships through self-attention, and the subsequent multilayer-perceptron (MLP) module further models the inherent relation between each element. The layer normalization and residual connections that interleave these two operations keep the training stable and efficient. In contrast, in CNN, the receptive field of each convolutional kernel is limited. To build a larger receptive field, the model often needs to repeatedly stack convolutional layers to gain global and semantically rich features. However, such a method in general results in the loss of detailed feature information such as the position and shape information of objects that are essential for grasping tasks. Therefore, we exploit a transformer-based model which can better capture not only the global information but also detailed features (e.g., the position and shape information).",
|
| 1179 |
+
"bbox": [
|
| 1180 |
+
508,
|
| 1181 |
+
69,
|
| 1182 |
+
919,
|
| 1183 |
+
430
|
| 1184 |
+
],
|
| 1185 |
+
"page_idx": 5
|
| 1186 |
+
},
|
| 1187 |
+
{
|
| 1188 |
+
"type": "image",
|
| 1189 |
+
"img_path": "images/761a099d4341cbb5f509cb053d09a7efd5ccceb21a4c4f6f3ccc3eb3c688fae4.jpg",
|
| 1190 |
+
"image_caption": [
|
| 1191 |
+
"C. Visualization Analysis",
|
| 1192 |
+
"Fig. 5. The accuracy of different models as feature extractors on selected objects."
|
| 1193 |
+
],
|
| 1194 |
+
"image_footnote": [],
|
| 1195 |
+
"bbox": [
|
| 1196 |
+
570,
|
| 1197 |
+
494,
|
| 1198 |
+
856,
|
| 1199 |
+
659
|
| 1200 |
+
],
|
| 1201 |
+
"page_idx": 5
|
| 1202 |
+
},
|
| 1203 |
+
{
|
| 1204 |
+
"type": "text",
|
| 1205 |
+
"text": "To clarify why the transformer architecture is helpful for grasp detection tasks, we visualize the heatmaps of attention maps, detailed in Fig. 3. From these heat maps, we can discover that the self-attention modules can readily learn the area that is easy for grasping, such as the edges of objects, ignore irrelevant details, and pay more attention on the contour and shape of the objects. Meanwhile, the model focuses on more general characteristics rather than individual features. For example, for the chairs shown in Fig. 3, our method evaluates the edge of the chairs with a higher grasp quality. We further provide more concrete examples of real-world grasping, and the experimental results show that the attention mechanism is more likely to achieve a better understanding of the grasping scenario, generate more accurate grasping rectangles, and work well on both household and novel objects.",
|
| 1206 |
+
"bbox": [
|
| 1207 |
+
508,
|
| 1208 |
+
718,
|
| 1209 |
+
919,
|
| 1210 |
+
943
|
| 1211 |
+
],
|
| 1212 |
+
"page_idx": 5
|
| 1213 |
+
},
|
| 1214 |
+
{
|
| 1215 |
+
"type": "page_number",
|
| 1216 |
+
"text": "6",
|
| 1217 |
+
"bbox": [
|
| 1218 |
+
76,
|
| 1219 |
+
31,
|
| 1220 |
+
86,
|
| 1221 |
+
39
|
| 1222 |
+
],
|
| 1223 |
+
"page_idx": 5
|
| 1224 |
+
},
|
| 1225 |
+
{
|
| 1226 |
+
"type": "header",
|
| 1227 |
+
"text": "IEEE ROBOTICS AND AUTOMATION LETTERS. PREPRINT VERSION. JUNE 2022",
|
| 1228 |
+
"bbox": [
|
| 1229 |
+
496,
|
| 1230 |
+
30,
|
| 1231 |
+
919,
|
| 1232 |
+
40
|
| 1233 |
+
],
|
| 1234 |
+
"page_idx": 5
|
| 1235 |
+
},
|
| 1236 |
+
{
|
| 1237 |
+
"type": "image",
|
| 1238 |
+
"img_path": "images/01e44a4d008dea06d9308de9255807cc5e6af36e629a7724652614dca0a886a1.jpg",
|
| 1239 |
+
"image_caption": [
|
| 1240 |
+
"(a) Samples of generated rectangles predicted by CNN"
|
| 1241 |
+
],
|
| 1242 |
+
"image_footnote": [],
|
| 1243 |
+
"bbox": [
|
| 1244 |
+
130,
|
| 1245 |
+
70,
|
| 1246 |
+
867,
|
| 1247 |
+
138
|
| 1248 |
+
],
|
| 1249 |
+
"page_idx": 6
|
| 1250 |
+
},
|
| 1251 |
+
{
|
| 1252 |
+
"type": "image",
|
| 1253 |
+
"img_path": "images/f5c28d2878aa34d008ef29dec6fcff465e5c0e6fb2bc25baa948af5a400b00e2.jpg",
|
| 1254 |
+
"image_caption": [
|
| 1255 |
+
"(b) Predicted grasp quality heatmaps by CNN"
|
| 1256 |
+
],
|
| 1257 |
+
"image_footnote": [],
|
| 1258 |
+
"bbox": [
|
| 1259 |
+
129,
|
| 1260 |
+
166,
|
| 1261 |
+
867,
|
| 1262 |
+
224
|
| 1263 |
+
],
|
| 1264 |
+
"page_idx": 6
|
| 1265 |
+
},
|
| 1266 |
+
{
|
| 1267 |
+
"type": "image",
|
| 1268 |
+
"img_path": "images/e8fb35f59661e2125f94d260416433f51b66e88f603b3043407554e450e38eaf.jpg",
|
| 1269 |
+
"image_caption": [
|
| 1270 |
+
"(c) Samples of generated rectangles predicted by Transformer"
|
| 1271 |
+
],
|
| 1272 |
+
"image_footnote": [],
|
| 1273 |
+
"bbox": [
|
| 1274 |
+
130,
|
| 1275 |
+
255,
|
| 1276 |
+
867,
|
| 1277 |
+
323
|
| 1278 |
+
],
|
| 1279 |
+
"page_idx": 6
|
| 1280 |
+
},
|
| 1281 |
+
{
|
| 1282 |
+
"type": "image",
|
| 1283 |
+
"img_path": "images/e788d1f0db33e44bb3e6a0c4df34152707f628d114b6f889abf2665b4f19ab2f.jpg",
|
| 1284 |
+
"image_caption": [
|
| 1285 |
+
"(d) Predicted grasp quality heatmaps by Transformer",
|
| 1286 |
+
"Fig. 4. Visualization comparison of the CNN and transformer-based grasping models."
|
| 1287 |
+
],
|
| 1288 |
+
"image_footnote": [],
|
| 1289 |
+
"bbox": [
|
| 1290 |
+
130,
|
| 1291 |
+
352,
|
| 1292 |
+
867,
|
| 1293 |
+
411
|
| 1294 |
+
],
|
| 1295 |
+
"page_idx": 6
|
| 1296 |
+
},
|
| 1297 |
+
{
|
| 1298 |
+
"type": "table",
|
| 1299 |
+
"img_path": "images/73f0146a9e6e21910dfe00a852399bd54af8b482403ee224a5d2fe992f7bc335.jpg",
|
| 1300 |
+
"table_caption": [
|
| 1301 |
+
"TABLE III COMPARISON BETWEEN USING AND NOT USING SKIP-CONNECTIONS"
|
| 1302 |
+
],
|
| 1303 |
+
"table_footnote": [],
|
| 1304 |
+
"table_body": "<table><tr><td colspan=\"3\">The accuracy on Cornell Grasping Results</td></tr><tr><td></td><td>With Skip-connections</td><td>Without Skip-connections</td></tr><tr><td>RGB</td><td>96.78%</td><td>95.7%</td></tr><tr><td>Depth</td><td>95.2%</td><td>94.3%</td></tr><tr><td>RGB+Depth</td><td>97.99%</td><td>96.1%</td></tr><tr><td colspan=\"3\">The accuracy on Jacquard Grasping Results</td></tr><tr><td></td><td>With Skip-connections</td><td>Without Skip-connections</td></tr><tr><td>RGB</td><td>93.57%</td><td>92.4%</td></tr><tr><td>Depth</td><td>93.1%</td><td>91.8%</td></tr><tr><td>RGB+Depth</td><td>94.6%</td><td>93.27%</td></tr></table>",
|
| 1305 |
+
"bbox": [
|
| 1306 |
+
76,
|
| 1307 |
+
513,
|
| 1308 |
+
486,
|
| 1309 |
+
648
|
| 1310 |
+
],
|
| 1311 |
+
"page_idx": 6
|
| 1312 |
+
},
|
| 1313 |
+
{
|
| 1314 |
+
"type": "text",
|
| 1315 |
+
"text": "In Fig. 6, we illustrate a pick-and-place task based on our TF-Grasp on the Franka manipulator. Our grasp detection system works well for novel objects that have not been seen during training procedure and also locates graspable objects in cluttered environments.",
|
| 1316 |
+
"bbox": [
|
| 1317 |
+
73,
|
| 1318 |
+
674,
|
| 1319 |
+
490,
|
| 1320 |
+
748
|
| 1321 |
+
],
|
| 1322 |
+
"page_idx": 6
|
| 1323 |
+
},
|
| 1324 |
+
{
|
| 1325 |
+
"type": "text",
|
| 1326 |
+
"text": "In conclusion, the visualization results indicate that our TF-Grasp can produce a more general and robust prediction, which contributes to improving the detection accuracy.",
|
| 1327 |
+
"bbox": [
|
| 1328 |
+
73,
|
| 1329 |
+
750,
|
| 1330 |
+
491,
|
| 1331 |
+
796
|
| 1332 |
+
],
|
| 1333 |
+
"page_idx": 6
|
| 1334 |
+
},
|
| 1335 |
+
{
|
| 1336 |
+
"type": "text",
|
| 1337 |
+
"text": "D. Ablation Studies",
|
| 1338 |
+
"text_level": 1,
|
| 1339 |
+
"bbox": [
|
| 1340 |
+
75,
|
| 1341 |
+
818,
|
| 1342 |
+
212,
|
| 1343 |
+
830
|
| 1344 |
+
],
|
| 1345 |
+
"page_idx": 6
|
| 1346 |
+
},
|
| 1347 |
+
{
|
| 1348 |
+
"type": "text",
|
| 1349 |
+
"text": "To understand the role of skip-connections in our transformer model on the visual grasping problems, we conduct experiments on the Cornell and Jacquard grasping datasets with and without skip-connections using our transformer, respectively. The detailed experimental results are shown in Table III. The use of skip-connections is better than not using skip-connections in all input modes. The attention mechanism",
|
| 1350 |
+
"bbox": [
|
| 1351 |
+
73,
|
| 1352 |
+
838,
|
| 1353 |
+
490,
|
| 1354 |
+
945
|
| 1355 |
+
],
|
| 1356 |
+
"page_idx": 6
|
| 1357 |
+
},
|
| 1358 |
+
{
|
| 1359 |
+
"type": "text",
|
| 1360 |
+
"text": "in the transformer builds inter-relationships in each layer, incorporates global features, and achieves promising results. Through skip-connections, the multi-scale representations at different stages are further fused globally. The empirical evidence shows that these further refinement and contextual features contribute to the quality of final grasp prediction.",
|
| 1361 |
+
"bbox": [
|
| 1362 |
+
503,
|
| 1363 |
+
479,
|
| 1364 |
+
919,
|
| 1365 |
+
570
|
| 1366 |
+
],
|
| 1367 |
+
"page_idx": 6
|
| 1368 |
+
},
|
| 1369 |
+
{
|
| 1370 |
+
"type": "text",
|
| 1371 |
+
"text": "E. Grasping in Real World Scenarios",
|
| 1372 |
+
"text_level": 1,
|
| 1373 |
+
"bbox": [
|
| 1374 |
+
504,
|
| 1375 |
+
602,
|
| 1376 |
+
763,
|
| 1377 |
+
617
|
| 1378 |
+
],
|
| 1379 |
+
"page_idx": 6
|
| 1380 |
+
},
|
| 1381 |
+
{
|
| 1382 |
+
"type": "text",
|
| 1383 |
+
"text": "Physical Setting. The Franka Panda robot manipulation and the RealSense D435 RGB-D camera are used in our physical experiment. The camera is attached to the end-effector to keep a good visual coverage of graspable objects. In each grasp attempt, our TF-Grasp receives the visual signals from the depth camera mounted on the robot end-effector and outputs an optimal grasping posture. Next, the end-effector approaches the optimal target grasping posture based on the trajectory planned by a motion planning method, and then closes the gripper. Such a transformer-based grasp detection system can be easily adapted to other hardware platforms. During the grasp process, the raw depth sensor is filled with a portion of missing pixels that have NaN values. We generate the mask of NaN values, normalize the depth image, and apply cv2.inpaint [30] for further depth completion.",
|
| 1384 |
+
"bbox": [
|
| 1385 |
+
501,
|
| 1386 |
+
626,
|
| 1387 |
+
919,
|
| 1388 |
+
851
|
| 1389 |
+
],
|
| 1390 |
+
"page_idx": 6
|
| 1391 |
+
},
|
| 1392 |
+
{
|
| 1393 |
+
"type": "text",
|
| 1394 |
+
"text": "We perform a total of 165 grasping attempts, of which the robot performs successful grasp 152 times, achieving a success rate of $92.1\\%$ . Table IV lists the results of learning-based methods on real robot grasping. These results indicate that the transformer-based grasp detection system also behaves well on real robots.",
|
| 1395 |
+
"bbox": [
|
| 1396 |
+
501,
|
| 1397 |
+
854,
|
| 1398 |
+
921,
|
| 1399 |
+
944
|
| 1400 |
+
],
|
| 1401 |
+
"page_idx": 6
|
| 1402 |
+
},
|
| 1403 |
+
{
|
| 1404 |
+
"type": "header",
|
| 1405 |
+
"text": "WANG et al.: WHEN TRANSFORMER MEETS ROBOTIC GRASPING",
|
| 1406 |
+
"bbox": [
|
| 1407 |
+
76,
|
| 1408 |
+
29,
|
| 1409 |
+
428,
|
| 1410 |
+
40
|
| 1411 |
+
],
|
| 1412 |
+
"page_idx": 6
|
| 1413 |
+
},
|
| 1414 |
+
{
|
| 1415 |
+
"type": "page_number",
|
| 1416 |
+
"text": "7",
|
| 1417 |
+
"bbox": [
|
| 1418 |
+
911,
|
| 1419 |
+
30,
|
| 1420 |
+
919,
|
| 1421 |
+
40
|
| 1422 |
+
],
|
| 1423 |
+
"page_idx": 6
|
| 1424 |
+
},
|
| 1425 |
+
{
|
| 1426 |
+
"type": "image",
|
| 1427 |
+
"img_path": "images/fada870a953965bac0a7503ce4ae62b5635a0eb4abc046d5526cdb018ea416c4.jpg",
|
| 1428 |
+
"image_caption": [
|
| 1429 |
+
"Fig. 6. Screenshots of physical grasping in clutter."
|
| 1430 |
+
],
|
| 1431 |
+
"image_footnote": [],
|
| 1432 |
+
"bbox": [
|
| 1433 |
+
112,
|
| 1434 |
+
66,
|
| 1435 |
+
883,
|
| 1436 |
+
164
|
| 1437 |
+
],
|
| 1438 |
+
"page_idx": 7
|
| 1439 |
+
},
|
| 1440 |
+
{
|
| 1441 |
+
"type": "table",
|
| 1442 |
+
"img_path": "images/0cdb0a3b728bd6800a294085b9cbfa66f738caee1df43bbfab16bfec44ee2ccc.jpg",
|
| 1443 |
+
"table_caption": [
|
| 1444 |
+
"TABLE IV THE RESULTS FOR PHYSICAL SETUP."
|
| 1445 |
+
],
|
| 1446 |
+
"table_footnote": [],
|
| 1447 |
+
"table_body": "<table><tr><td>Authors</td><td>Physical grasp</td><td>Success rate (%)</td></tr><tr><td>Lenz [14]</td><td>89/100</td><td>89%</td></tr><tr><td>Pinto [31]</td><td>109/150</td><td>73%</td></tr><tr><td>Morrison [17]</td><td>110/120</td><td>92%</td></tr><tr><td>Chu [32]</td><td>89/100</td><td>89%</td></tr><tr><td>TF-Grasp(Ours)</td><td>152/165</td><td>92.1%</td></tr></table>",
|
| 1448 |
+
"bbox": [
|
| 1449 |
+
122,
|
| 1450 |
+
247,
|
| 1451 |
+
439,
|
| 1452 |
+
320
|
| 1453 |
+
],
|
| 1454 |
+
"page_idx": 7
|
| 1455 |
+
},
|
| 1456 |
+
{
|
| 1457 |
+
"type": "text",
|
| 1458 |
+
"text": "V. DISCUSSION AND CONCLUSION",
|
| 1459 |
+
"text_level": 1,
|
| 1460 |
+
"bbox": [
|
| 1461 |
+
158,
|
| 1462 |
+
354,
|
| 1463 |
+
406,
|
| 1464 |
+
368
|
| 1465 |
+
],
|
| 1466 |
+
"page_idx": 7
|
| 1467 |
+
},
|
| 1468 |
+
{
|
| 1469 |
+
"type": "text",
|
| 1470 |
+
"text": "In this work, we develop a novel architecture for visual grasping. Although CNN and its variants are still the dominant models in visual robotic grasping, we show the powerful potential of transformers in grasp detection. Compared with CNN-based counterparts, the transformer-based grasp detection models are better at capturing global dependencies and learning powerful feature representation. The results show that our proposed approach outperforms original CNN-based models. The contexts can be better represented by attention propagation. Nevertheless, the current approach is limited to the parallel gripper. Future research will focus on developing a universal transformer-based grasp detection method for other types of grippers, such as the five finger dexterous hand.",
|
| 1471 |
+
"bbox": [
|
| 1472 |
+
73,
|
| 1473 |
+
376,
|
| 1474 |
+
490,
|
| 1475 |
+
571
|
| 1476 |
+
],
|
| 1477 |
+
"page_idx": 7
|
| 1478 |
+
},
|
| 1479 |
+
{
|
| 1480 |
+
"type": "text",
|
| 1481 |
+
"text": "REFERENCES",
|
| 1482 |
+
"text_level": 1,
|
| 1483 |
+
"bbox": [
|
| 1484 |
+
235,
|
| 1485 |
+
590,
|
| 1486 |
+
330,
|
| 1487 |
+
604
|
| 1488 |
+
],
|
| 1489 |
+
"page_idx": 7
|
| 1490 |
+
},
|
| 1491 |
+
{
|
| 1492 |
+
"type": "list",
|
| 1493 |
+
"sub_type": "ref_text",
|
| 1494 |
+
"list_items": [
|
| 1495 |
+
"[1] J. Song, M. Patel, and M. Ghaffari, “Fusing convolutional neural network and geometric constraint for image-based indoor localization,” IEEE Robotics Autom. Lett., vol. 7, no. 2, pp. 1674–1681, 2022.",
|
| 1496 |
+
"[2] D. Zhao and J. Oh, \"Noticing motion patterns: A temporal cnn with a novel convolution operator for human trajectory prediction,\" IEEE Robotics Autom. Lett., vol. 6, no. 2, pp. 628-634, 2021.",
|
| 1497 |
+
"[3] S. Kumra and C. Kanan, \"Robotic grasp detection using deep convolutional neural networks,\" in Proc. IEEE/RSJ Int. Conf. Intell. Robots Syst, 2017, pp. 769-776.",
|
| 1498 |
+
"[4] X. Zhu, Y. Zhou, Y. Fan, and M. Tomizuka, \"Learn to grasp with less supervision: A data-efficient maximum likelihood grasp sampling loss,\" arXiv preprint arXiv:2110.01379, 2021.",
|
| 1499 |
+
"[5] J. Redmon and A. Angelova, \"Real-time grasp detection using convolutional neural networks,\" in Proc. IEEE Int. Conf. Robot. Autom., 2015, pp. 1316-1322.",
|
| 1500 |
+
"[6] A. V. et al., \"Attention is all you need,\" in Annual Conference on Neural Inform. Processing Sys. 2017, December 4-9, 2017, Long Beach, CA, USA, 2017, pp. 5998-6008.",
|
| 1501 |
+
"[7] A. D. et al., \"An image is worth 16x16 words: Transformers for image recognition at scale,\" in Proc. Int. Conf. Learn. Represent. OpenReview.net, 2021.",
|
| 1502 |
+
"[8] L. Z. et al., \"Swin transformer: Hierarchical vision transformer using shifted windows,\" in Proc. IEEE Int. Conf. Comput. Vision, 2021, pp. 10012-10022.",
|
| 1503 |
+
"[9] R. M. Murray, Z. Li, and S. S. Sastry, A mathematical introduction to robotic manipulation. Boca Raton, FL, USA: CRC, 1994, 2017.",
|
| 1504 |
+
"[10] A. Bicchi and V. Kumar, “Robotic grasping and contact: A review,” in Proc. IEEE Int. Conf. Robot. Autom., San Francisco, CA, USA, Apr. 2000, pp. 348–353."
|
| 1505 |
+
],
|
| 1506 |
+
"bbox": [
|
| 1507 |
+
76,
|
| 1508 |
+
613,
|
| 1509 |
+
491,
|
| 1510 |
+
944
|
| 1511 |
+
],
|
| 1512 |
+
"page_idx": 7
|
| 1513 |
+
},
|
| 1514 |
+
{
|
| 1515 |
+
"type": "list",
|
| 1516 |
+
"sub_type": "ref_text",
|
| 1517 |
+
"list_items": [
|
| 1518 |
+
"[11] H. Zhang, X. Lan, S. Bai, X. Zhou, Z. Tian, and N. Zheng, “Roi-based robotic grasp detection for object overlapping scenes,” in Proc. IEEE Int. Conf. Intell. Robots Syst., 2019, pp. 4768–4775.",
|
| 1519 |
+
"[12] U. Asif, J. Tang, and S. Harrer, \"Graspnet: An efficient convolutional neural network for real-time grasp detection for low-powered devices,\" in IJCAI, vol. 7, 2018, pp. 4875-4882.",
|
| 1520 |
+
"[13] X. Zhu, L. Sun, Y. Fan, and M. Tomizuka, “6-dof contrastive grasp proposal network,” in Proc. IEEE Int. Conf. Robot.Automat., 2021, pp. 6371–6377.",
|
| 1521 |
+
"[14] I. Lenz, H. Lee, and A. Saxena, \"Deep learning for detecting robotic grasps,\" Int. J. Robotics Res., vol. 34, no. 4-5, pp. 705-724, 2015.",
|
| 1522 |
+
"[15] J. M. et al., \"Dex-net 2.0: Deep learning to plan robust grasps with synthetic point clouds and analytic grasp metrics,\" in Robotics: Science and Systems XIII, Massachusetts Institute of Technology, Cambridge, Massachusetts, USA, July 12-16, 2017, 2017.",
|
| 1523 |
+
"[16] A. Gariépy, J.-C. Ruel, B. Chaib-Draa, and P. Giguere, “Gq-stn: Optimizing one-shot grasp detection based on robustness classifier,” in Proc. IEEE/RSJ Int. Conf. Intell. Robots Syst., 2019, pp. 3996–4003.",
|
| 1524 |
+
"[17] D. Morrison, P. Corke, and J. Leitner, “Learning robust, real-time, reactive robotic grasping,” Int. J. Robotics Res., vol. 39, no. 2-3, pp. 183���201, 2020.",
|
| 1525 |
+
"[18] C. J. et al., \"Transunet: Transformers make strong encoders for medical image segmentation,\" arXiv preprint arXiv:2102.04306, 2021.",
|
| 1526 |
+
"[19] O. Ronneberger, P. Fischer, and T. Brox, “U-net: Convolutional networks for biomedical image segmentation,” in Proc. Int. Conf. on Medical image computing and computer-assisted intervention. Springer, 2015, pp. 234–241.",
|
| 1527 |
+
"[20] Y. Jiang, S. Moseson, and A. Saxena, \"Efficient grasping from rgbd images: Learning using a new rectangle representation,\" in Proc. IEEE Int. Conf. Robot. Automat., 2011, pp. 3304-3311.",
|
| 1528 |
+
"[21] A. Depierre, E. Dellandrea, and L. Chen, \"Jacquard: A large scale dataset for robotic grasp detection,\" in Proc. IEEE/RSJ Int. Conf. Intell. Robots Syst., 2018, pp. 3511-3516.",
|
| 1529 |
+
"[22] Z. Wang, Z. Li, B. Wang, and H. Liu, \"Robot grasp detection using multimodal deep convolutional neural networks,\" Advances in Mechanical Engineering, vol. 8, no. 9, p. 1687814016668077, 2016.",
|
| 1530 |
+
"[23] U. Asif, M. Bennamoun, and F. A. Sohel, \"Rgb-d object recognition and grasp detection using hierarchical cascaded forests,\" IEEE Trans. on Robotics, vol. 33, no. 3, pp. 547-564, 2017.",
|
| 1531 |
+
"[24] H. Karaoguz and P. Jensfelt, \"Object detection approach for robot grasp detection,\" in Proc. IEEE Int. Conf. Robot.Automat., 2019, pp. 4953-4959.",
|
| 1532 |
+
"[25] D. Guo, F. Sun, H. Liu, T. Kong, B. Fang, and N. Xi, “A hybrid deep architecture for robotic grasp detection,” in Proc. IEEE Int. Conf. Robot.Automat., 2017, pp. 1609-1614.",
|
| 1533 |
+
"[26] S. Ainetter and F. Fraundorfer, \"End-to-end trainable deep neural network for robotic grasp detection and semantic segmentation from rgb,\" in Proc. IEEE Int. Conf. Robot.Automat. IEEE, 2021, pp. 13452-13458.",
|
| 1534 |
+
"[27] S. Kumra, S. Joshi, and F. Sahin, “Antipodal robotic grasping using generative residual convolutional neural network,” in Proc. IEEE Int. Conf. Intell. Robots Syst. IEEE, pp. 9626–9633.",
|
| 1535 |
+
"[28] I. Loshchilov and F. Hutter, \"Decoupled weight decay regularization,\" in Proc. Int. Conf. Learn. Represent., 2018.",
|
| 1536 |
+
"[29] X. Zhou, X. Lan, H. Zhang, Z. Tian, Y. Zhang, and N. Zheng, “Fully convolutional grasp detection network with oriented anchor box,” in Proc. IEEE Int. Conf. Intell. Robots Syst., 2018, pp. 7223-7230.",
|
| 1537 |
+
"[30] G. Bradski, “The opencv library.” Dr. Dobb's Journal: Software Tools for the Professional Programmer, vol. 25, no. 11, pp. 120-123, 2000.",
|
| 1538 |
+
"[31] L. Pinto and A. Gupta, \"Supersizing self-supervision: Learning to grasp from 50k tries and 700 robot hours,\" in Proc. IEEE Int. Conf. Robot. Autom., 2016, pp. 3406-3413.",
|
| 1539 |
+
"[32] F.-J. Chu, R. Xu, and P. A. Vela, “Real-world multiobject, multigrasp detection,” IEEE Robotics Autom. Lett., vol. 3, no. 4, pp. 3355–3362, 2018."
|
| 1540 |
+
],
|
| 1541 |
+
"bbox": [
|
| 1542 |
+
506,
|
| 1543 |
+
215,
|
| 1544 |
+
921,
|
| 1545 |
+
940
|
| 1546 |
+
],
|
| 1547 |
+
"page_idx": 7
|
| 1548 |
+
},
|
| 1549 |
+
{
|
| 1550 |
+
"type": "page_number",
|
| 1551 |
+
"text": "8",
|
| 1552 |
+
"bbox": [
|
| 1553 |
+
76,
|
| 1554 |
+
31,
|
| 1555 |
+
86,
|
| 1556 |
+
39
|
| 1557 |
+
],
|
| 1558 |
+
"page_idx": 7
|
| 1559 |
+
},
|
| 1560 |
+
{
|
| 1561 |
+
"type": "header",
|
| 1562 |
+
"text": "IEEE ROBOTICS AND AUTOMATION LETTERS. PREPRINT VERSION. JUNE 2022",
|
| 1563 |
+
"bbox": [
|
| 1564 |
+
496,
|
| 1565 |
+
29,
|
| 1566 |
+
919,
|
| 1567 |
+
40
|
| 1568 |
+
],
|
| 1569 |
+
"page_idx": 7
|
| 1570 |
+
}
|
| 1571 |
+
]
|
2202.11xxx/2202.11911/9037d395-d951-4662-9f75-505b7890fe99_model.json
ADDED
|
@@ -0,0 +1,2009 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "header",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.077,
|
| 7 |
+
0.03,
|
| 8 |
+
0.502,
|
| 9 |
+
0.041
|
| 10 |
+
],
|
| 11 |
+
"angle": 0,
|
| 12 |
+
"content": "IEEE ROBOTICS AND AUTOMATION LETTERS. PREPRINT VERSION. JUNE 2022"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "page_number",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.912,
|
| 18 |
+
0.031,
|
| 19 |
+
0.921,
|
| 20 |
+
0.041
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "1"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "aside_text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.023,
|
| 29 |
+
0.264,
|
| 30 |
+
0.061,
|
| 31 |
+
0.707
|
| 32 |
+
],
|
| 33 |
+
"angle": 270,
|
| 34 |
+
"content": "arXiv:2202.11911v3 [cs.RO] 13 Sep 2022"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "title",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.124,
|
| 40 |
+
0.071,
|
| 41 |
+
0.873,
|
| 42 |
+
0.141
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "When Transformer Meets Robotic Grasping: Exploits Context for Efficient Grasp Detection"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.235,
|
| 51 |
+
0.148,
|
| 52 |
+
0.756,
|
| 53 |
+
0.165
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "Shaochen Wang, Zhangli Zhou, and Zhen Kan, Senior Member, IEEE"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.075,
|
| 62 |
+
0.221,
|
| 63 |
+
0.493,
|
| 64 |
+
0.537
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "Abstract—In this paper, we present a transformer-based architecture, namely TF-Grasp, for robotic grasp detection. The developed TF-Grasp framework has two elaborate designs making it well suitable for visual grasping tasks. The first key design is that we adopt the local window attention to capture local contextual information and detailed features of graspable objects. Then, we apply the cross window attention to model the long-term dependencies between distant pixels. Object knowledge, environmental configuration, and relationships between different visual entities are aggregated for subsequent grasp detection. The second key design is that we build a hierarchical encoder-decoder architecture with skip-connections, delivering shallow features from the encoder to decoder to enable a multi-scale feature fusion. Due to the powerful attention mechanism, TF-Grasp can simultaneously obtain the local information (i.e., the contours of objects), and model long-term connections such as the relationships between distinct visual concepts in clutter. Extensive computational experiments demonstrate that TF-Grasp achieves competitive results versus state-of-art grasping convolutional models and attains a higher accuracy of \\(97.99\\%\\) and \\(94.6\\%\\) on Cornell and Jacquard grasping datasets, respectively. Real-world experiments using a 7DoF Franka Emika Panda robot also demonstrate its capability of grasping unseen objects in a variety of scenarios. The code is available at https://github.com/WangShaoSUN/grasp-transformer."
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.075,
|
| 73 |
+
0.543,
|
| 74 |
+
0.49,
|
| 75 |
+
0.571
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "Index Terms—Vision Transformer, Grasp Detection, Robotic Grasping."
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "title",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.205,
|
| 84 |
+
0.592,
|
| 85 |
+
0.362,
|
| 86 |
+
0.607
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "I. INTRODUCTION"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.074,
|
| 95 |
+
0.613,
|
| 96 |
+
0.493,
|
| 97 |
+
0.795
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "DATA-driven methodologies such as deep learning have become the mainstream methods for robotic visual sensing tasks such as indoor localization [1], trajectory prediction [2], and robotic manipulation [3], [4], since they require less handcrafted feature engineering and can be extended to many complex tasks. In recent years, as visual sensing is increasingly being used in manufacturing, industry, and medical care, growing research is devoted to developing advanced robot's perception abilities. A typical application of visual sensing is the robotic grasp detection, where the images of objects are used to infer the grasping pose. Considering a grasping task of manipulating a wide diversity of objects, to find"
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.074,
|
| 106 |
+
0.807,
|
| 107 |
+
0.493,
|
| 108 |
+
0.875
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "Manuscript received February 23, 2022; revised April 25, 2022; accepted June 20, 2022. This letter was recommended for publication by Markus Vincze upon evaluation of the Associate Editor and Reviewers' comments. This work was supported in part by the National Natural Science Foundation of China under Grant U2013601, and Grant 62173314. (Corresponding author: Zhen Kan.)"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.074,
|
| 117 |
+
0.875,
|
| 118 |
+
0.493,
|
| 119 |
+
0.933
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "Shaochen Wang, Zhangli Zhou, and Zhen Kan are with the Department of Automation, University of Science and Technology of China, Hefei 230026, China, (e-mail: samwang@mail.ustc.edu.cn; zzl1215@mail.ustc.edu.cn; zkan@ustc.edu.cn.) An extended version is available at https://arxiv.org/abs/2202.11911."
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.089,
|
| 128 |
+
0.933,
|
| 129 |
+
0.376,
|
| 130 |
+
0.945
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "Digital Object Identifier (DOI): see top of this page."
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.503,
|
| 139 |
+
0.22,
|
| 140 |
+
0.923,
|
| 141 |
+
0.356
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "the graspable regions, the robots have to concentrate on not only partial geometric information but also the entire visual appearance of the object. Particularly in unstructured and cluttered environments, dealing with variations in shape and position (e.g., occlusion) and also the spatial relationship with other objects are critical to the performance of grasp detection. Therefore, this work is particularly motivated to investigate grasp detection that takes into account both local neighbor pixels and long-distance relationships in spatial dimensions."
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"bbox": [
|
| 149 |
+
0.503,
|
| 150 |
+
0.357,
|
| 151 |
+
0.923,
|
| 152 |
+
0.522
|
| 153 |
+
],
|
| 154 |
+
"angle": 0,
|
| 155 |
+
"content": "Most modern grasp detectors [3], [5] are based on convolutional neural networks (CNNs) which emerge as the de facto standard for processing visual robotic grasping. However, current CNNs are composed of individual convolution kernels, which are more inclined to concentrate on local level information. Also, the convolution kernels in a layer of CNN are viewed as independent counterparts without mutual information fusion. Generally, to maintain a large receptive field, CNNs have to repeatedly stack convolutional layers, which reduce the spatial resolution and inevitably results in the loss of global details and degraded performance."
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "text",
|
| 159 |
+
"bbox": [
|
| 160 |
+
0.503,
|
| 161 |
+
0.523,
|
| 162 |
+
0.923,
|
| 163 |
+
0.763
|
| 164 |
+
],
|
| 165 |
+
"angle": 0,
|
| 166 |
+
"content": "Recently, as a novel approach to handle natural language processing and computer vision, the transformer [6], [7], [8] demonstrates remarkable success. The widely adopted attention mechanisms [6] of transformers in sequence modeling provide an elegant resolution that can better convey the fusion of information across global sequences. In fact, as robots are deployed in more and more diverse applications such as industrial assembly lines and smart home, the sensing capacity of robotic systems needs to be enriched, not only in local regions, but also in global interaction. Especially when robots frequently interact with objects in the environment, the awareness of global attention is particularly important with respect to safety and reliability. However, most vision transformers are designed for image classification on natural images processing tasks. Few of them are specifically built for robotic tasks."
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"type": "text",
|
| 170 |
+
"bbox": [
|
| 171 |
+
0.503,
|
| 172 |
+
0.765,
|
| 173 |
+
0.923,
|
| 174 |
+
0.947
|
| 175 |
+
],
|
| 176 |
+
"angle": 0,
|
| 177 |
+
"content": "In this paper, we present a transformer-based visual grasp detection framework, namely TF-Grasp, which leverages the fact that the attention can better aggregate information across the entire input sequences to obtain an improved global representation. More specifically, the information within independent image patches is bridged via self-attention and the encoder in our framework captures these multi-scale low-level features. The decoder incorporates the high-level features through long-range spatial dependencies to construct the final grasping pose. We provide detailed empirical evidence to show that our grasping transformer performs reasonably well on popular grasping testbeds, e.g., Cornell and Jacquard grasping"
|
| 178 |
+
}
|
| 179 |
+
],
|
| 180 |
+
[
|
| 181 |
+
{
|
| 182 |
+
"type": "page_number",
|
| 183 |
+
"bbox": [
|
| 184 |
+
0.078,
|
| 185 |
+
0.032,
|
| 186 |
+
0.086,
|
| 187 |
+
0.04
|
| 188 |
+
],
|
| 189 |
+
"angle": 0,
|
| 190 |
+
"content": "2"
|
| 191 |
+
},
|
| 192 |
+
{
|
| 193 |
+
"type": "header",
|
| 194 |
+
"bbox": [
|
| 195 |
+
0.498,
|
| 196 |
+
0.031,
|
| 197 |
+
0.921,
|
| 198 |
+
0.041
|
| 199 |
+
],
|
| 200 |
+
"angle": 0,
|
| 201 |
+
"content": "IEEE ROBOTICS AND AUTOMATION LETTERS. PREPRINT VERSION. JUNE 2022"
|
| 202 |
+
},
|
| 203 |
+
{
|
| 204 |
+
"type": "text",
|
| 205 |
+
"bbox": [
|
| 206 |
+
0.079,
|
| 207 |
+
0.07,
|
| 208 |
+
0.49,
|
| 209 |
+
0.204
|
| 210 |
+
],
|
| 211 |
+
"angle": 0,
|
| 212 |
+
"content": "datasets. The experimental results demonstrate that the transformer architecture plays an integral role in generating appropriate grasping poses by learning local and global features from different parts of each object. The vision transformer-based grasp detection works well on the real robotic system and shows promising generalization to unseen objects. In addition, our TF-Grasp can generate the required grasping poses for parallel grippers in a single forward pass of the network."
|
| 213 |
+
},
|
| 214 |
+
{
|
| 215 |
+
"type": "text",
|
| 216 |
+
"bbox": [
|
| 217 |
+
0.079,
|
| 218 |
+
0.206,
|
| 219 |
+
0.49,
|
| 220 |
+
0.234
|
| 221 |
+
],
|
| 222 |
+
"angle": 0,
|
| 223 |
+
"content": "In a nutshell, the contributions of this paper can be summarised in three folds:"
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "text",
|
| 227 |
+
"bbox": [
|
| 228 |
+
0.096,
|
| 229 |
+
0.238,
|
| 230 |
+
0.49,
|
| 231 |
+
0.298
|
| 232 |
+
],
|
| 233 |
+
"angle": 0,
|
| 234 |
+
"content": "- This work presents a novel and neat transformer architecture for visual robotic grasping tasks. To the best of our knowledge, it is one of the first attempts considering vision transformers in grasp detection tasks."
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "text",
|
| 238 |
+
"bbox": [
|
| 239 |
+
0.096,
|
| 240 |
+
0.299,
|
| 241 |
+
0.49,
|
| 242 |
+
0.343
|
| 243 |
+
],
|
| 244 |
+
"angle": 0,
|
| 245 |
+
"content": "- We consider simultaneous fusion of local and global features and redesign the classical ViT framework for robotic visual sensing tasks."
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "text",
|
| 249 |
+
"bbox": [
|
| 250 |
+
0.096,
|
| 251 |
+
0.345,
|
| 252 |
+
0.49,
|
| 253 |
+
0.463
|
| 254 |
+
],
|
| 255 |
+
"angle": 0,
|
| 256 |
+
"content": "- Exhaustive experiments are conducted to show the advantages of the transformer-based robotic perception framework. The experimental results demonstrate that our model achieves improved performance on popular grasping datasets compared to the state-of-the-art methods. We further show that our grasping transformer can generate appropriate grasping poses for known or unknown objects in either single or cluttered environments."
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "list",
|
| 260 |
+
"bbox": [
|
| 261 |
+
0.096,
|
| 262 |
+
0.238,
|
| 263 |
+
0.49,
|
| 264 |
+
0.463
|
| 265 |
+
],
|
| 266 |
+
"angle": 0,
|
| 267 |
+
"content": null
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "title",
|
| 271 |
+
"bbox": [
|
| 272 |
+
0.216,
|
| 273 |
+
0.482,
|
| 274 |
+
0.353,
|
| 275 |
+
0.494
|
| 276 |
+
],
|
| 277 |
+
"angle": 0,
|
| 278 |
+
"content": "II. RELATED WORK"
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "text",
|
| 282 |
+
"bbox": [
|
| 283 |
+
0.079,
|
| 284 |
+
0.5,
|
| 285 |
+
0.49,
|
| 286 |
+
0.543
|
| 287 |
+
],
|
| 288 |
+
"angle": 0,
|
| 289 |
+
"content": "This section reviews recent advances in the field of robotic grasping and briefly describes the progress of transformers in different areas."
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "title",
|
| 293 |
+
"bbox": [
|
| 294 |
+
0.079,
|
| 295 |
+
0.565,
|
| 296 |
+
0.211,
|
| 297 |
+
0.578
|
| 298 |
+
],
|
| 299 |
+
"angle": 0,
|
| 300 |
+
"content": "A. Grasp Detection"
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "text",
|
| 304 |
+
"bbox": [
|
| 305 |
+
0.079,
|
| 306 |
+
0.584,
|
| 307 |
+
0.49,
|
| 308 |
+
0.944
|
| 309 |
+
],
|
| 310 |
+
"angle": 0,
|
| 311 |
+
"content": "The ability to locate the object position and determine the appropriate grasping pose is crucial to stable and robust robotic grasping. Grasp detection, as the name implies, uses the image captured from the camera to infer the grasping pose for the robot manipulator. Using geometry-driven methods, earlier works [9], [10] mainly focus on analyzing the contours of objects to identify grasping points. A common assumption in these methods is that the geometric model of the object is always available. However, preparing the CAD models for graspable objects is time-consuming and impractical for real-time implementation. Recently, deep learning based methods have been successfully applied in visual grasping tasks [3], [5], [11], [12], [13]. The work of [14] is one of the earliest works that introduces deep neural networks to grasp detection via a two-stage strategy where the first stage finds exhaustive possible grasping candidates and the second stage evaluates the quality of these grasp candidates to identify the best one. However, due to numerous grasping proposals, the method in [14] suffers from relatively slow speed. Many recent works utilize convolutional neural networks to generate bounding box proposals to estimate the grasp pose of objects. Redmon et al. [5] employed an Alexnet-like CNN architecture to regress grasping poses. Kumra et al. [3] explored the use of ResNet-50 as a backbone to incorporate multimodal"
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"bbox": [
|
| 316 |
+
0.508,
|
| 317 |
+
0.07,
|
| 318 |
+
0.92,
|
| 319 |
+
0.341
|
| 320 |
+
],
|
| 321 |
+
"angle": 0,
|
| 322 |
+
"content": "including depth and RGB information to further improve the grasp performance. Besides, CNN-based grasp quality networks [15], [16] were proposed to evaluate and predict the robustness of grasp candidates. In the same line, GG-CNN [17] developed a fully convolutional neural network to perform grasp detection, which provides a lightweight and real-time solution for visual grasping. Currently, most of the existing grasp detection methods are still heavily inspired by computer vision techniques such as object recognition, object detection, etc. In contrast to classical visual problems where the detected objects are usually well-defined instances in the scene, in grasp detection, the grasp configuration to be generated is continuous, which implies an infinite number of possible grasp options. This places significant challenges in feature extraction to identify a valid grasp configuration from all possible candidates. We argue that the loss of long-term dependencies in feature extraction is a major drawback of current CNNs based grasp detection methods."
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "title",
|
| 326 |
+
"bbox": [
|
| 327 |
+
0.509,
|
| 328 |
+
0.384,
|
| 329 |
+
0.613,
|
| 330 |
+
0.398
|
| 331 |
+
],
|
| 332 |
+
"angle": 0,
|
| 333 |
+
"content": "B. Transformer"
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "text",
|
| 337 |
+
"bbox": [
|
| 338 |
+
0.508,
|
| 339 |
+
0.412,
|
| 340 |
+
0.92,
|
| 341 |
+
0.531
|
| 342 |
+
],
|
| 343 |
+
"angle": 0,
|
| 344 |
+
"content": "Transformer [6] first emerged in machine translation and is rapidly establishing itself as a new paradigm in natural language processing due to its potential to model global information, which learns the high quality features by considering the whole context. Thanks to its excellent global representation and friendly parallel computation, the transformer is competitive in long sequences modeling and gradually replaces RNNs and CNNs."
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"type": "text",
|
| 348 |
+
"bbox": [
|
| 349 |
+
0.508,
|
| 350 |
+
0.536,
|
| 351 |
+
0.92,
|
| 352 |
+
0.686
|
| 353 |
+
],
|
| 354 |
+
"angle": 0,
|
| 355 |
+
"content": "Motivated by the remarkable success of transformers achieved in natural language processing, more and more researchers are interested in the employment of attention mechanisms in visual tasks. At present, the transformer has been successfully applied to image classification, object detection, and segmentation tasks. However, there still exist many challenges. First, visual signals and word tokens are very different on many scales. Second, the high dimension of pixel-level information may introduce significant computational complexity."
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"type": "text",
|
| 359 |
+
"bbox": [
|
| 360 |
+
0.508,
|
| 361 |
+
0.689,
|
| 362 |
+
0.92,
|
| 363 |
+
0.944
|
| 364 |
+
],
|
| 365 |
+
"angle": 0,
|
| 366 |
+
"content": "More recently, ViT [7] was presented as a transformer model to tackle natural images recognition, which splits the image into non-overlapping patches. The authors in [8] proposed a hierarchical ViT called Swin-Transformer by calculating the local self-attention with shifted windows. In contrast to the quadratic computation complexity of self-attention in ViT, Swin-Transformer achieves a linear complexity. Inspired by this fashion, many researchers have tried to apply transformer to other fields. For example, TransUNet [18] combines transformer and Unet [19] for medical image diagnosis. Nevertheless, how to exploit the strengths of attention to aggregate information from entire inputs has not been investigated in the task of visual grasp detection. Unlike prior works, we design a transformer based encoder-decoder architecture to predict the grasp posture in an end-to-end manner. It is shown that our method achieves higher grasp success than the state-of-the-art CNNs counterparts."
|
| 367 |
+
}
|
| 368 |
+
],
|
| 369 |
+
[
|
| 370 |
+
{
|
| 371 |
+
"type": "header",
|
| 372 |
+
"bbox": [
|
| 373 |
+
0.077,
|
| 374 |
+
0.03,
|
| 375 |
+
0.428,
|
| 376 |
+
0.041
|
| 377 |
+
],
|
| 378 |
+
"angle": 0,
|
| 379 |
+
"content": "WANG et al.: WHEN TRANSFORMER MEETS ROBOTIC GRASPING"
|
| 380 |
+
},
|
| 381 |
+
{
|
| 382 |
+
"type": "page_number",
|
| 383 |
+
"bbox": [
|
| 384 |
+
0.912,
|
| 385 |
+
0.031,
|
| 386 |
+
0.92,
|
| 387 |
+
0.041
|
| 388 |
+
],
|
| 389 |
+
"angle": 0,
|
| 390 |
+
"content": "3"
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"type": "image",
|
| 394 |
+
"bbox": [
|
| 395 |
+
0.118,
|
| 396 |
+
0.073,
|
| 397 |
+
0.454,
|
| 398 |
+
0.409
|
| 399 |
+
],
|
| 400 |
+
"angle": 0,
|
| 401 |
+
"content": null
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"type": "image_caption",
|
| 405 |
+
"bbox": [
|
| 406 |
+
0.074,
|
| 407 |
+
0.425,
|
| 408 |
+
0.493,
|
| 409 |
+
0.464
|
| 410 |
+
],
|
| 411 |
+
"angle": 0,
|
| 412 |
+
"content": "Fig. 1. Overview of the TF-grasp model. Our model takes as input the image captured by the camera mounted on the end-effector of the manipulator and generates a pixel-level grasp representation."
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"type": "title",
|
| 416 |
+
"bbox": [
|
| 417 |
+
0.234,
|
| 418 |
+
0.487,
|
| 419 |
+
0.334,
|
| 420 |
+
0.5
|
| 421 |
+
],
|
| 422 |
+
"angle": 0,
|
| 423 |
+
"content": "III. METHOD"
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"type": "text",
|
| 427 |
+
"bbox": [
|
| 428 |
+
0.074,
|
| 429 |
+
0.505,
|
| 430 |
+
0.491,
|
| 431 |
+
0.597
|
| 432 |
+
],
|
| 433 |
+
"angle": 0,
|
| 434 |
+
"content": "Grasp Representation. The autonomous visual grasping tasks generally start from collecting visual images of the object by sensory input, which will then be processed to generate an effective grasp configuration to maximise the probability of grasp success. Considering a parallel-plate gripper, the grasp representation \\( g \\) [20] is formulated as a 5-dimensional tuple:"
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"type": "equation",
|
| 438 |
+
"bbox": [
|
| 439 |
+
0.219,
|
| 440 |
+
0.604,
|
| 441 |
+
0.49,
|
| 442 |
+
0.621
|
| 443 |
+
],
|
| 444 |
+
"angle": 0,
|
| 445 |
+
"content": "\\[\ng = \\{x, y, \\theta , w, h \\} \\tag {1}\n\\]"
|
| 446 |
+
},
|
| 447 |
+
{
|
| 448 |
+
"type": "text",
|
| 449 |
+
"bbox": [
|
| 450 |
+
0.075,
|
| 451 |
+
0.627,
|
| 452 |
+
0.49,
|
| 453 |
+
0.732
|
| 454 |
+
],
|
| 455 |
+
"angle": 0,
|
| 456 |
+
"content": "where \\((x,y)\\) are the center coordinates of the grasp rectangle, \\((w,h)\\) denote the width and height of the grasp rectangle, and \\(\\theta\\) is the orientation of the grasp rectangle with respect to the horizontal axis. Given a gripper with known dimensions, a simplified representation can be expressed as \\(g = (p,\\phi ,w)\\) where \\(p = (x,y)\\), \\(\\phi\\) indicates the orientation angle of gripper and \\(w\\) denotes the opening distance of gripper, respectively."
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "text",
|
| 460 |
+
"bbox": [
|
| 461 |
+
0.074,
|
| 462 |
+
0.733,
|
| 463 |
+
0.49,
|
| 464 |
+
0.763
|
| 465 |
+
],
|
| 466 |
+
"angle": 0,
|
| 467 |
+
"content": "To facilitate grasping, we follow the setting in [17] to represent the grasp in 2-D image space as"
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "equation",
|
| 471 |
+
"bbox": [
|
| 472 |
+
0.182,
|
| 473 |
+
0.769,
|
| 474 |
+
0.49,
|
| 475 |
+
0.787
|
| 476 |
+
],
|
| 477 |
+
"angle": 0,
|
| 478 |
+
"content": "\\[\nG = \\{Q, W, \\Theta \\} \\in \\mathbb {R} ^ {3 \\times W \\times H}, \\tag {2}\n\\]"
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"type": "text",
|
| 482 |
+
"bbox": [
|
| 483 |
+
0.074,
|
| 484 |
+
0.794,
|
| 485 |
+
0.49,
|
| 486 |
+
0.869
|
| 487 |
+
],
|
| 488 |
+
"angle": 0,
|
| 489 |
+
"content": "where the grasp quality \\( Q \\) measures the grasp success of each pixel, and \\( W \\) and \\( \\Theta \\) are the gripper width and orientation angle maps. The value of each pixel in \\( W \\) and \\( \\Theta \\) represents the corresponding width and angle of gripper at that position during the grasping."
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "text",
|
| 493 |
+
"bbox": [
|
| 494 |
+
0.074,
|
| 495 |
+
0.87,
|
| 496 |
+
0.49,
|
| 497 |
+
0.915
|
| 498 |
+
],
|
| 499 |
+
"angle": 0,
|
| 500 |
+
"content": "Consequently, in the developed TF-Grasp, the grasp detection task boils down to three sub-tasks, namely the problems of predicting grasping position, angle, and width."
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"type": "text",
|
| 504 |
+
"bbox": [
|
| 505 |
+
0.075,
|
| 506 |
+
0.915,
|
| 507 |
+
0.492,
|
| 508 |
+
0.947
|
| 509 |
+
],
|
| 510 |
+
"angle": 0,
|
| 511 |
+
"content": "Grasp Transformer Overview. A deep motivation of this work is that the treatment of robot perception in complex,"
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"type": "text",
|
| 515 |
+
"bbox": [
|
| 516 |
+
0.503,
|
| 517 |
+
0.069,
|
| 518 |
+
0.923,
|
| 519 |
+
0.281
|
| 520 |
+
],
|
| 521 |
+
"angle": 0,
|
| 522 |
+
"content": "dynamic robotic tasks should be global and holistic with information mutual fusion. Specifically, the grasping model can be formulated into an encoder-decoder architecture with a U-shaped structure, as detailed in Fig. 1. The encoder branch aggregates the entire visual input, mutually fuses features by using attention blocks, and then extracts the specific features that are useful for visual robotic grasping. During the decoder process, the model incorporates features delivered via skip-connections and performs a pixel-level grasp prediction by up-sampling. More concretely, the attention modules in the decoder enable more comprehensive processing of local and long-range information, allowing for better multi-scale feature fusion. Each pixel in the prediction heatmap is correlated with the final location and orientation of the end-effector."
|
| 523 |
+
},
|
| 524 |
+
{
|
| 525 |
+
"type": "text",
|
| 526 |
+
"bbox": [
|
| 527 |
+
0.503,
|
| 528 |
+
0.281,
|
| 529 |
+
0.923,
|
| 530 |
+
0.599
|
| 531 |
+
],
|
| 532 |
+
"angle": 0,
|
| 533 |
+
"content": "To bridge the domain gaps between the transformer and visual robotic grasping tasks, we have carefully designed our grasping transformer in the following aspects for improved grasp detection. (a) Cascade Design. Different from the classic ViT architecture, we adapt a cascaded encoder-decoder structure. The encoder utilizes self-attention to learn a contextual representation that facilitates grasping and the decoder makes use of the extracted features to perform a pixel-level grasp prediction. (b) Local and Global balance. We utilize the swim attention layer to achieve a trade-off between global and local information for better scene perception. Window attention performs local feature extraction and the shifted-window attention allows cross window interactions to globally focus on more diverse regions. (c) Feature Fusion. The feature representations at different stages are connected by skip-connections for a multi-scale feature fusion, which acquire both rich semantic and detailed features. (d) Lightweight Design. It is essential for robots to account for efficiency and the real-time performance. We utilize shifted attention blocks and a slimming design for our grasping transformer to reach an ideal trade-off between the performance and speed."
|
| 534 |
+
},
|
| 535 |
+
{
|
| 536 |
+
"type": "text",
|
| 537 |
+
"bbox": [
|
| 538 |
+
0.504,
|
| 539 |
+
0.598,
|
| 540 |
+
0.923,
|
| 541 |
+
0.764
|
| 542 |
+
],
|
| 543 |
+
"angle": 0,
|
| 544 |
+
"content": "Grasp Transformer Encoder. Before being fed into the encoder, the image is first passed through patch partition layer and is then cut into non-overlapping patches. Each patch is treated as a word token in the text. For example, a 2D image \\( I \\in \\mathbb{R}^{W \\times H \\times C} \\) is split into fixed-size patches \\( x \\in \\mathbb{R}^{N \\times (P \\times P \\times C)} \\), where \\( (H, W) \\) denote the height and width of the original image, \\( C \\) represents the channel of the image, \\( P \\) is the shape size of each image patch, and \\( N = H \\times W / P^2 \\) refers to the number of image patches. Then token-based representations can be obtained by passing the images patches into a projection layer."
|
| 545 |
+
},
|
| 546 |
+
{
|
| 547 |
+
"type": "text",
|
| 548 |
+
"bbox": [
|
| 549 |
+
0.503,
|
| 550 |
+
0.764,
|
| 551 |
+
0.922,
|
| 552 |
+
0.898
|
| 553 |
+
],
|
| 554 |
+
"angle": 0,
|
| 555 |
+
"content": "The encoder is composed by stacking identical transformer blocks. Attentions in the transformer block build long-distance interactions across distant pixels and attend on these positions in the embedding space. At the top of the encoder is a bottleneck block attached to the decoder. The fundamental element in our grasping transformer framework is the multi-head self-attention. The input feature \\(\\mathbf{X}\\) is linearly transformed to derive the query \\(Q\\), key \\(K\\), and value \\(V\\), which are defined as follows:"
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"type": "equation",
|
| 559 |
+
"bbox": [
|
| 560 |
+
0.584,
|
| 561 |
+
0.907,
|
| 562 |
+
0.921,
|
| 563 |
+
0.924
|
| 564 |
+
],
|
| 565 |
+
"angle": 0,
|
| 566 |
+
"content": "\\[\nQ = X W _ {Q}, K = X W _ {K}, V = X W _ {V}, \\tag {3}\n\\]"
|
| 567 |
+
},
|
| 568 |
+
{
|
| 569 |
+
"type": "text",
|
| 570 |
+
"bbox": [
|
| 571 |
+
0.504,
|
| 572 |
+
0.929,
|
| 573 |
+
0.922,
|
| 574 |
+
0.946
|
| 575 |
+
],
|
| 576 |
+
"angle": 0,
|
| 577 |
+
"content": "where \\(W_{Q}, W_{K}, W_{V}\\) are linear projection matrices. Next, we"
|
| 578 |
+
}
|
| 579 |
+
],
|
| 580 |
+
[
|
| 581 |
+
{
|
| 582 |
+
"type": "page_number",
|
| 583 |
+
"bbox": [
|
| 584 |
+
0.078,
|
| 585 |
+
0.032,
|
| 586 |
+
0.087,
|
| 587 |
+
0.04
|
| 588 |
+
],
|
| 589 |
+
"angle": 0,
|
| 590 |
+
"content": "4"
|
| 591 |
+
},
|
| 592 |
+
{
|
| 593 |
+
"type": "header",
|
| 594 |
+
"bbox": [
|
| 595 |
+
0.497,
|
| 596 |
+
0.03,
|
| 597 |
+
0.92,
|
| 598 |
+
0.041
|
| 599 |
+
],
|
| 600 |
+
"angle": 0,
|
| 601 |
+
"content": "IEEE ROBOTICS AND AUTOMATION LETTERS. PREPRINT VERSION. JUNE 2022"
|
| 602 |
+
},
|
| 603 |
+
{
|
| 604 |
+
"type": "image",
|
| 605 |
+
"bbox": [
|
| 606 |
+
0.134,
|
| 607 |
+
0.071,
|
| 608 |
+
0.436,
|
| 609 |
+
0.26
|
| 610 |
+
],
|
| 611 |
+
"angle": 0,
|
| 612 |
+
"content": null
|
| 613 |
+
},
|
| 614 |
+
{
|
| 615 |
+
"type": "image_caption",
|
| 616 |
+
"bbox": [
|
| 617 |
+
0.075,
|
| 618 |
+
0.272,
|
| 619 |
+
0.356,
|
| 620 |
+
0.285
|
| 621 |
+
],
|
| 622 |
+
"angle": 0,
|
| 623 |
+
"content": "Fig. 2. The architecture of our transformer block."
|
| 624 |
+
},
|
| 625 |
+
{
|
| 626 |
+
"type": "text",
|
| 627 |
+
"bbox": [
|
| 628 |
+
0.074,
|
| 629 |
+
0.312,
|
| 630 |
+
0.492,
|
| 631 |
+
0.342
|
| 632 |
+
],
|
| 633 |
+
"angle": 0,
|
| 634 |
+
"content": "compute the similarity between the query and key by using the dot product to obtain the attention,"
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"type": "equation",
|
| 638 |
+
"bbox": [
|
| 639 |
+
0.125,
|
| 640 |
+
0.347,
|
| 641 |
+
0.492,
|
| 642 |
+
0.381
|
| 643 |
+
],
|
| 644 |
+
"angle": 0,
|
| 645 |
+
"content": "\\[\n\\operatorname {A t t e n t i o n} (Q, K, V) = \\operatorname {S o f t M a x} \\left(\\frac {Q K ^ {T}}{\\sqrt {d}} + B\\right) V \\tag {4}\n\\]"
|
| 646 |
+
},
|
| 647 |
+
{
|
| 648 |
+
"type": "text",
|
| 649 |
+
"bbox": [
|
| 650 |
+
0.075,
|
| 651 |
+
0.388,
|
| 652 |
+
0.491,
|
| 653 |
+
0.419
|
| 654 |
+
],
|
| 655 |
+
"angle": 0,
|
| 656 |
+
"content": "where \\(\\sqrt{d}\\) is the scaling factor and \\(B\\) is the learnable relative position encoding."
|
| 657 |
+
},
|
| 658 |
+
{
|
| 659 |
+
"type": "text",
|
| 660 |
+
"bbox": [
|
| 661 |
+
0.074,
|
| 662 |
+
0.419,
|
| 663 |
+
0.492,
|
| 664 |
+
0.631
|
| 665 |
+
],
|
| 666 |
+
"angle": 0,
|
| 667 |
+
"content": "The computational complexity of self-attention grows quadratically with respect to the image size. To achieve computational efficiency, we leverage the advantages of CNNs and transformer and adopt the swim-transformer block [8] in our framework. The swim-transformer layer consists of two parts: local attention and global attention. Within the local attention, the calculation of self-attention is restricted to local regions where images patches are divided into non-overlapping local windows. Cross-window attention introduces connections between neighbors by sliding non-overlapping windows. The structure of swim-transformer block is presented in Fig. 2 which is composed of MLP, Layer Norm, window-based MSA and shifted-window MSA. The computation procedure of swim-transformer block is represented as follows:"
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"type": "equation",
|
| 671 |
+
"bbox": [
|
| 672 |
+
0.154,
|
| 673 |
+
0.635,
|
| 674 |
+
0.491,
|
| 675 |
+
0.714
|
| 676 |
+
],
|
| 677 |
+
"angle": 0,
|
| 678 |
+
"content": "\\[\n\\begin{array}{l} \\hat {\\mathbf {x}} ^ {l} = \\operatorname {W - M S A} \\left(\\operatorname {L N} \\left(\\mathbf {x} ^ {l - 1}\\right)\\right) + \\mathbf {x} ^ {l - 1}, \\\\ \\mathbf {x} ^ {l} = \\operatorname {M L P} \\left(\\ln \\left(\\hat {\\mathbf {x}} ^ {l}\\right)\\right) + \\hat {\\mathbf {x}} ^ {l}, (5) \\\\ \\hat {\\mathbf {x}} ^ {l + 1} = \\operatorname {S W - M S A} \\left(\\operatorname {L N} \\left(\\mathbf {x} ^ {l}\\right)\\right) + \\mathbf {x} ^ {l}, (5) \\\\ \\mathbf {x} ^ {l + 1} = \\operatorname {M L P} \\left(\\operatorname {L N} \\left(\\hat {\\mathbf {x}} ^ {l + 1}\\right)\\right) + \\hat {\\mathbf {x}} ^ {l + 1} \\\\ \\end{array}\n\\]"
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
"type": "text",
|
| 682 |
+
"bbox": [
|
| 683 |
+
0.074,
|
| 684 |
+
0.718,
|
| 685 |
+
0.492,
|
| 686 |
+
0.884
|
| 687 |
+
],
|
| 688 |
+
"angle": 0,
|
| 689 |
+
"content": "where W-MSA and SW-MSA refer to the local window and global shifted window multi-head self-attention, respectively. \\(\\mathbf{x}^{l - 1}\\) denotes the feature of output from the previous layer. Then, the features will be sent into the window attention, W-MSA. There is a layer norm before both MLP and attention layer, and residual connections are applied to these modules. Between every two swim transformer blocks, there exists a patch merging operation that reduces the resolution of feature maps. The patch merging layer builds a hierarchical representation by gradually merging consecutive neighboring patches between successive transformer layers."
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"type": "text",
|
| 693 |
+
"bbox": [
|
| 694 |
+
0.074,
|
| 695 |
+
0.885,
|
| 696 |
+
0.492,
|
| 697 |
+
0.947
|
| 698 |
+
],
|
| 699 |
+
"angle": 0,
|
| 700 |
+
"content": "Grasp Transformer Decoder. The decoder generates an executable grasping configuration that allows the end-effector to move to the corresponding positions. We transform the planar grasp detection problem into a pixel-level prediction."
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "text",
|
| 704 |
+
"bbox": [
|
| 705 |
+
0.503,
|
| 706 |
+
0.069,
|
| 707 |
+
0.923,
|
| 708 |
+
0.281
|
| 709 |
+
],
|
| 710 |
+
"angle": 0,
|
| 711 |
+
"content": "Three grasping heads are attached in parallel to the top of the decoder, including a grasp confidence head \\( Q \\), a gripper angle head \\( \\Theta \\), and a gripper width head \\( W \\). The output of each head is a heat map with the same size as the input visual image. The grasp confidence head outputs a value between 0 and 1, which indicates the probability of the successful grasping at each pixel point. Likewise, the gripper width and angle heads output the width and rotation angle of the gripper when grasping at the corresponding point in the image, respectively. We treat the grasping posture estimation as a regression problem and use our transformer model to learn a mapping \\( F: I \\to \\tilde{G} \\) by minimizing the distances between the predicted grasping heatmaps \\( \\tilde{G}(Q, W, \\Theta) \\) and the ground truth, where \\( I \\) is the input data. The loss function is defined as follows:"
|
| 712 |
+
},
|
| 713 |
+
{
|
| 714 |
+
"type": "equation",
|
| 715 |
+
"bbox": [
|
| 716 |
+
0.596,
|
| 717 |
+
0.287,
|
| 718 |
+
0.921,
|
| 719 |
+
0.331
|
| 720 |
+
],
|
| 721 |
+
"angle": 0,
|
| 722 |
+
"content": "\\[\n\\mathcal {L} = \\sum_ {i} ^ {N} \\sum_ {m \\in \\{Q, W, \\Theta \\}} \\| \\tilde {G} _ {i} ^ {m} - L _ {i} ^ {m} \\| ^ {2} \\tag {6}\n\\]"
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"type": "text",
|
| 726 |
+
"bbox": [
|
| 727 |
+
0.504,
|
| 728 |
+
0.337,
|
| 729 |
+
0.921,
|
| 730 |
+
0.366
|
| 731 |
+
],
|
| 732 |
+
"angle": 0,
|
| 733 |
+
"content": "where \\(N\\) is the number of sample size and \\(L_{i}\\) is the corresponding label."
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"type": "text",
|
| 737 |
+
"bbox": [
|
| 738 |
+
0.504,
|
| 739 |
+
0.367,
|
| 740 |
+
0.921,
|
| 741 |
+
0.411
|
| 742 |
+
],
|
| 743 |
+
"angle": 0,
|
| 744 |
+
"content": "The ultimate grasp location is the position with the highest grasp confidence by retrieving the grasp quality heatmap, defined as:"
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "equation",
|
| 748 |
+
"bbox": [
|
| 749 |
+
0.634,
|
| 750 |
+
0.413,
|
| 751 |
+
0.92,
|
| 752 |
+
0.43
|
| 753 |
+
],
|
| 754 |
+
"angle": 0,
|
| 755 |
+
"content": "\\[\n\\mathcal {G} _ {p o s} ^ {*} = \\operatorname {a r g m a x} _ {p o s} Q, \\tag {7}\n\\]"
|
| 756 |
+
},
|
| 757 |
+
{
|
| 758 |
+
"type": "text",
|
| 759 |
+
"bbox": [
|
| 760 |
+
0.503,
|
| 761 |
+
0.437,
|
| 762 |
+
0.921,
|
| 763 |
+
0.482
|
| 764 |
+
],
|
| 765 |
+
"angle": 0,
|
| 766 |
+
"content": "where \\(Q\\) is the grasp confidence map. Afterward, we extract the predicted angle \\(\\theta\\) and angle \\(w\\) of the corresponding position from the angle and width heatmaps."
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"type": "text",
|
| 770 |
+
"bbox": [
|
| 771 |
+
0.503,
|
| 772 |
+
0.483,
|
| 773 |
+
0.922,
|
| 774 |
+
0.693
|
| 775 |
+
],
|
| 776 |
+
"angle": 0,
|
| 777 |
+
"content": "In our grasp detection decoder, we also adopt swim transformer block to reduce the computational complexity. Swin attention aggregates multi-scale features and builds a hierarchical representation. And skip-connections merge the features learned at these different stages for further fusion to produce a better grasp posture. Analogous to U-net [19], skip-connections are implemented by concatenating features from the \\(i\\)-th layer of the encoder directly into the layer \\(i\\)-th in the decoder. In the decoding phase, following the patch expanding layer, the concatenated features are taken as input to the next attention block stage. Simultaneously, we can learn the relationship between the fused features where the features in the encoder can be used as queries and keys to interact with the counterparts in the decoder for self-attention computing."
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"type": "text",
|
| 781 |
+
"bbox": [
|
| 782 |
+
0.503,
|
| 783 |
+
0.694,
|
| 784 |
+
0.922,
|
| 785 |
+
0.769
|
| 786 |
+
],
|
| 787 |
+
"angle": 0,
|
| 788 |
+
"content": "A benefit of our pixel-level grasp representation is that only a single forward propagation is required to obtain the best grasp postures within the global visual scene, avoiding the need to generate multiple grasp candidates and saving the computation expense."
|
| 789 |
+
},
|
| 790 |
+
{
|
| 791 |
+
"type": "title",
|
| 792 |
+
"bbox": [
|
| 793 |
+
0.645,
|
| 794 |
+
0.787,
|
| 795 |
+
0.782,
|
| 796 |
+
0.8
|
| 797 |
+
],
|
| 798 |
+
"angle": 0,
|
| 799 |
+
"content": "IV. EXPERIMENTS"
|
| 800 |
+
},
|
| 801 |
+
{
|
| 802 |
+
"type": "text",
|
| 803 |
+
"bbox": [
|
| 804 |
+
0.503,
|
| 805 |
+
0.807,
|
| 806 |
+
0.921,
|
| 807 |
+
0.881
|
| 808 |
+
],
|
| 809 |
+
"angle": 0,
|
| 810 |
+
"content": "In this section, extensive experiments are carried out to validate the performance of the proposed TF-Grasp method. We verify the performance of TF-Grasp on two popular grasping datasets and then evaluate its effectiveness on a real Franka Panda robotic manipulator."
|
| 811 |
+
},
|
| 812 |
+
{
|
| 813 |
+
"type": "text",
|
| 814 |
+
"bbox": [
|
| 815 |
+
0.504,
|
| 816 |
+
0.882,
|
| 817 |
+
0.921,
|
| 818 |
+
0.912
|
| 819 |
+
],
|
| 820 |
+
"angle": 0,
|
| 821 |
+
"content": "The goal of this section tends to answer the following questions:"
|
| 822 |
+
},
|
| 823 |
+
{
|
| 824 |
+
"type": "text",
|
| 825 |
+
"bbox": [
|
| 826 |
+
0.521,
|
| 827 |
+
0.915,
|
| 828 |
+
0.921,
|
| 829 |
+
0.944
|
| 830 |
+
],
|
| 831 |
+
"angle": 0,
|
| 832 |
+
"content": "- Is the transformer-based grasp detection model better than CNN-based models?"
|
| 833 |
+
}
|
| 834 |
+
],
|
| 835 |
+
[
|
| 836 |
+
{
|
| 837 |
+
"type": "header",
|
| 838 |
+
"bbox": [
|
| 839 |
+
0.077,
|
| 840 |
+
0.03,
|
| 841 |
+
0.428,
|
| 842 |
+
0.041
|
| 843 |
+
],
|
| 844 |
+
"angle": 0,
|
| 845 |
+
"content": "WANG et al.: WHEN TRANSFORMER MEETS ROBOTIC GRASPING"
|
| 846 |
+
},
|
| 847 |
+
{
|
| 848 |
+
"type": "page_number",
|
| 849 |
+
"bbox": [
|
| 850 |
+
0.912,
|
| 851 |
+
0.031,
|
| 852 |
+
0.92,
|
| 853 |
+
0.041
|
| 854 |
+
],
|
| 855 |
+
"angle": 0,
|
| 856 |
+
"content": "5"
|
| 857 |
+
},
|
| 858 |
+
{
|
| 859 |
+
"type": "image",
|
| 860 |
+
"bbox": [
|
| 861 |
+
0.085,
|
| 862 |
+
0.071,
|
| 863 |
+
0.191,
|
| 864 |
+
0.152
|
| 865 |
+
],
|
| 866 |
+
"angle": 0,
|
| 867 |
+
"content": null
|
| 868 |
+
},
|
| 869 |
+
{
|
| 870 |
+
"type": "image",
|
| 871 |
+
"bbox": [
|
| 872 |
+
0.197,
|
| 873 |
+
0.071,
|
| 874 |
+
0.305,
|
| 875 |
+
0.152
|
| 876 |
+
],
|
| 877 |
+
"angle": 0,
|
| 878 |
+
"content": null
|
| 879 |
+
},
|
| 880 |
+
{
|
| 881 |
+
"type": "image",
|
| 882 |
+
"bbox": [
|
| 883 |
+
0.312,
|
| 884 |
+
0.071,
|
| 885 |
+
0.419,
|
| 886 |
+
0.152
|
| 887 |
+
],
|
| 888 |
+
"angle": 0,
|
| 889 |
+
"content": null
|
| 890 |
+
},
|
| 891 |
+
{
|
| 892 |
+
"type": "image",
|
| 893 |
+
"bbox": [
|
| 894 |
+
0.426,
|
| 895 |
+
0.071,
|
| 896 |
+
0.532,
|
| 897 |
+
0.152
|
| 898 |
+
],
|
| 899 |
+
"angle": 0,
|
| 900 |
+
"content": null
|
| 901 |
+
},
|
| 902 |
+
{
|
| 903 |
+
"type": "image",
|
| 904 |
+
"bbox": [
|
| 905 |
+
0.539,
|
| 906 |
+
0.071,
|
| 907 |
+
0.645,
|
| 908 |
+
0.152
|
| 909 |
+
],
|
| 910 |
+
"angle": 0,
|
| 911 |
+
"content": null
|
| 912 |
+
},
|
| 913 |
+
{
|
| 914 |
+
"type": "image",
|
| 915 |
+
"bbox": [
|
| 916 |
+
0.653,
|
| 917 |
+
0.071,
|
| 918 |
+
0.759,
|
| 919 |
+
0.152
|
| 920 |
+
],
|
| 921 |
+
"angle": 0,
|
| 922 |
+
"content": null
|
| 923 |
+
},
|
| 924 |
+
{
|
| 925 |
+
"type": "image",
|
| 926 |
+
"bbox": [
|
| 927 |
+
0.766,
|
| 928 |
+
0.071,
|
| 929 |
+
0.872,
|
| 930 |
+
0.152
|
| 931 |
+
],
|
| 932 |
+
"angle": 0,
|
| 933 |
+
"content": null
|
| 934 |
+
},
|
| 935 |
+
{
|
| 936 |
+
"type": "image",
|
| 937 |
+
"bbox": [
|
| 938 |
+
0.085,
|
| 939 |
+
0.166,
|
| 940 |
+
0.191,
|
| 941 |
+
0.248
|
| 942 |
+
],
|
| 943 |
+
"angle": 0,
|
| 944 |
+
"content": null
|
| 945 |
+
},
|
| 946 |
+
{
|
| 947 |
+
"type": "image",
|
| 948 |
+
"bbox": [
|
| 949 |
+
0.198,
|
| 950 |
+
0.166,
|
| 951 |
+
0.305,
|
| 952 |
+
0.248
|
| 953 |
+
],
|
| 954 |
+
"angle": 0,
|
| 955 |
+
"content": null
|
| 956 |
+
},
|
| 957 |
+
{
|
| 958 |
+
"type": "image",
|
| 959 |
+
"bbox": [
|
| 960 |
+
0.312,
|
| 961 |
+
0.166,
|
| 962 |
+
0.418,
|
| 963 |
+
0.248
|
| 964 |
+
],
|
| 965 |
+
"angle": 0,
|
| 966 |
+
"content": null
|
| 967 |
+
},
|
| 968 |
+
{
|
| 969 |
+
"type": "image",
|
| 970 |
+
"bbox": [
|
| 971 |
+
0.426,
|
| 972 |
+
0.167,
|
| 973 |
+
0.532,
|
| 974 |
+
0.248
|
| 975 |
+
],
|
| 976 |
+
"angle": 0,
|
| 977 |
+
"content": null
|
| 978 |
+
},
|
| 979 |
+
{
|
| 980 |
+
"type": "image",
|
| 981 |
+
"bbox": [
|
| 982 |
+
0.539,
|
| 983 |
+
0.167,
|
| 984 |
+
0.645,
|
| 985 |
+
0.248
|
| 986 |
+
],
|
| 987 |
+
"angle": 0,
|
| 988 |
+
"content": null
|
| 989 |
+
},
|
| 990 |
+
{
|
| 991 |
+
"type": "image",
|
| 992 |
+
"bbox": [
|
| 993 |
+
0.653,
|
| 994 |
+
0.167,
|
| 995 |
+
0.758,
|
| 996 |
+
0.248
|
| 997 |
+
],
|
| 998 |
+
"angle": 0,
|
| 999 |
+
"content": null
|
| 1000 |
+
},
|
| 1001 |
+
{
|
| 1002 |
+
"type": "image",
|
| 1003 |
+
"bbox": [
|
| 1004 |
+
0.766,
|
| 1005 |
+
0.167,
|
| 1006 |
+
0.872,
|
| 1007 |
+
0.248
|
| 1008 |
+
],
|
| 1009 |
+
"angle": 0,
|
| 1010 |
+
"content": null
|
| 1011 |
+
},
|
| 1012 |
+
{
|
| 1013 |
+
"type": "image_caption",
|
| 1014 |
+
"bbox": [
|
| 1015 |
+
0.075,
|
| 1016 |
+
0.262,
|
| 1017 |
+
0.904,
|
| 1018 |
+
0.276
|
| 1019 |
+
],
|
| 1020 |
+
"angle": 0,
|
| 1021 |
+
"content": "Fig. 3. The visualized attention heatmaps learned by our method, which show that our transformer model can learn the concepts beneficial for grasping."
|
| 1022 |
+
},
|
| 1023 |
+
{
|
| 1024 |
+
"type": "text",
|
| 1025 |
+
"bbox": [
|
| 1026 |
+
0.091,
|
| 1027 |
+
0.302,
|
| 1028 |
+
0.492,
|
| 1029 |
+
0.332
|
| 1030 |
+
],
|
| 1031 |
+
"angle": 0,
|
| 1032 |
+
"content": "- If true, what makes the transformer-based grasp detection model outperforming others?"
|
| 1033 |
+
},
|
| 1034 |
+
{
|
| 1035 |
+
"type": "title",
|
| 1036 |
+
"bbox": [
|
| 1037 |
+
0.075,
|
| 1038 |
+
0.352,
|
| 1039 |
+
0.314,
|
| 1040 |
+
0.368
|
| 1041 |
+
],
|
| 1042 |
+
"angle": 0,
|
| 1043 |
+
"content": "A. Datasets and Experiment Setup"
|
| 1044 |
+
},
|
| 1045 |
+
{
|
| 1046 |
+
"type": "text",
|
| 1047 |
+
"bbox": [
|
| 1048 |
+
0.074,
|
| 1049 |
+
0.372,
|
| 1050 |
+
0.491,
|
| 1051 |
+
0.52
|
| 1052 |
+
],
|
| 1053 |
+
"angle": 0,
|
| 1054 |
+
"content": "The Cornell grasping data [14] is a multi-object dataset that contains 885 images. The resolution of each image is \\(640 \\times 480\\). The whole dataset is relatively small and we use various data augmentation techniques such as rotation, zooms, and random cropping to avoid overfitting. We then validate the performance of TF-Grasp on the Jacquard dataset [21] which is generated in a simulator via CAD models. The Jacquard dataset is fairly large, containing over 50k images of 11k object categories, and there are over 1 million annotated grasp labels."
|
| 1055 |
+
},
|
| 1056 |
+
{
|
| 1057 |
+
"type": "text",
|
| 1058 |
+
"bbox": [
|
| 1059 |
+
0.075,
|
| 1060 |
+
0.523,
|
| 1061 |
+
0.49,
|
| 1062 |
+
0.553
|
| 1063 |
+
],
|
| 1064 |
+
"angle": 0,
|
| 1065 |
+
"content": "Evaluation Metric. A predicted grasp is regarded as correct if the following conditions are satisfied."
|
| 1066 |
+
},
|
| 1067 |
+
{
|
| 1068 |
+
"type": "text",
|
| 1069 |
+
"bbox": [
|
| 1070 |
+
0.075,
|
| 1071 |
+
0.553,
|
| 1072 |
+
0.49,
|
| 1073 |
+
0.583
|
| 1074 |
+
],
|
| 1075 |
+
"angle": 0,
|
| 1076 |
+
"content": "i) The discrepancy between the predicted grasping angle and the ground truth is within \\(30^{\\circ}\\)."
|
| 1077 |
+
},
|
| 1078 |
+
{
|
| 1079 |
+
"type": "text",
|
| 1080 |
+
"bbox": [
|
| 1081 |
+
0.091,
|
| 1082 |
+
0.584,
|
| 1083 |
+
0.49,
|
| 1084 |
+
0.599
|
| 1085 |
+
],
|
| 1086 |
+
"angle": 0,
|
| 1087 |
+
"content": "ii) The Jaccard index defined in Eq. (8) is greater than 0.25."
|
| 1088 |
+
},
|
| 1089 |
+
{
|
| 1090 |
+
"type": "list",
|
| 1091 |
+
"bbox": [
|
| 1092 |
+
0.075,
|
| 1093 |
+
0.553,
|
| 1094 |
+
0.49,
|
| 1095 |
+
0.599
|
| 1096 |
+
],
|
| 1097 |
+
"angle": 0,
|
| 1098 |
+
"content": null
|
| 1099 |
+
},
|
| 1100 |
+
{
|
| 1101 |
+
"type": "equation",
|
| 1102 |
+
"bbox": [
|
| 1103 |
+
0.202,
|
| 1104 |
+
0.605,
|
| 1105 |
+
0.49,
|
| 1106 |
+
0.638
|
| 1107 |
+
],
|
| 1108 |
+
"angle": 0,
|
| 1109 |
+
"content": "\\[\nJ \\left(\\mathcal {R} ^ {*}, \\mathcal {R}\\right) = \\frac {\\left| \\mathcal {R} ^ {*} \\cap \\mathcal {R} \\right|}{\\left| \\mathcal {R} ^ {*} \\cup \\mathcal {R} \\right|} \\tag {8}\n\\]"
|
| 1110 |
+
},
|
| 1111 |
+
{
|
| 1112 |
+
"type": "text",
|
| 1113 |
+
"bbox": [
|
| 1114 |
+
0.074,
|
| 1115 |
+
0.643,
|
| 1116 |
+
0.493,
|
| 1117 |
+
0.946
|
| 1118 |
+
],
|
| 1119 |
+
"angle": 0,
|
| 1120 |
+
"content": "TF-Grasp takes a \\(224 \\times 224\\) image as input and outputs three pixel-wise maps with the same resolution as the input. The input is normalized by subtracting its mean and dividing the standard deviation. We follow the common strategy to train the grasp transformer. Both the encoder and decoder contain four swim-attention blocks and each consists of 1, 2, 4, 8 attention heads. The window size is 7. At each training step, a batch of samples is randomly sampled from the training set and we use the ground truth as the target values to train our neural network. Concretely, we utilize the mean squared error as the loss function and apply AdamW [28] as the optimizer. The default size of batch size is set to 64. The patch partition layer is implemented by convolutions with kernels of \\(p \\times p\\) and a stride \\(p\\). In our implementation, \\(p\\) is set to 4. In order to preserve a one-to-one mapping of the angle \\(\\Theta\\) between \\([- \\frac{\\pi}{2}, \\frac{\\pi}{2}]\\), we decode the learning of angle into two components, \\(\\sin(2\\Theta)\\) and \\(\\cos(2\\Theta)\\). In this way, the final angle is obtained by \\(\\arctan \\left(\\frac{\\sin 2\\Theta}{\\cos 2\\Theta}\\right)/2\\). TF-Grasp is implemented by PyTorch, and the entire grasp detection system is running on the Ubuntu 18.04 desktop with Intel Core i9 CPU and NVIDIA 3090 GPU."
|
| 1121 |
+
},
|
| 1122 |
+
{
|
| 1123 |
+
"type": "table_caption",
|
| 1124 |
+
"bbox": [
|
| 1125 |
+
0.57,
|
| 1126 |
+
0.303,
|
| 1127 |
+
0.857,
|
| 1128 |
+
0.327
|
| 1129 |
+
],
|
| 1130 |
+
"angle": 0,
|
| 1131 |
+
"content": "TABLEI THE ACCURACY ON CORNELL GRASPING DATASET."
|
| 1132 |
+
},
|
| 1133 |
+
{
|
| 1134 |
+
"type": "table",
|
| 1135 |
+
"bbox": [
|
| 1136 |
+
0.508,
|
| 1137 |
+
0.335,
|
| 1138 |
+
0.922,
|
| 1139 |
+
0.589
|
| 1140 |
+
],
|
| 1141 |
+
"angle": 0,
|
| 1142 |
+
"content": "<table><tr><td rowspan=\"2\">Method</td><td rowspan=\"2\">Input</td><td colspan=\"2\">Accuracy(%)</td><td rowspan=\"2\">Time (ms)</td></tr><tr><td>IW</td><td>OW</td></tr><tr><td>Fast Search [20]</td><td>RGB-D</td><td>60.5</td><td>58.3</td><td>5000</td></tr><tr><td>GG-CNN [17]</td><td>D</td><td>73.0</td><td>69.0</td><td>19</td></tr><tr><td>SAE [14]</td><td>RGB-D</td><td>73.9</td><td>75.6</td><td>1350</td></tr><tr><td>Two-stage closed-loop [22]</td><td>RGB-D</td><td>85.3</td><td>-</td><td>140</td></tr><tr><td>AlexNet, MultiGrasp [5]</td><td>RGB-D</td><td>88.0</td><td>87.1</td><td>76</td></tr><tr><td>STEM-CaRFs [23]</td><td>RGB-D</td><td>88.2</td><td>87.5</td><td>-</td></tr><tr><td>GRPN [24]</td><td>RGB</td><td>88.7</td><td>-</td><td>200</td></tr><tr><td>ResNet-50x2 [3]</td><td>RGB-D</td><td>89.2</td><td>88.9</td><td>103</td></tr><tr><td>GraspNet [12]</td><td>RGB-D</td><td>90.2</td><td>90.6</td><td>24</td></tr><tr><td>ZF-net [25]</td><td>RGB-D</td><td>93.2</td><td>89.1</td><td>-</td></tr><tr><td>E2E-net [26]</td><td>RGB</td><td>98.2</td><td>-</td><td>63</td></tr><tr><td>GR-ConvNet [27]</td><td>D</td><td>93.2</td><td>94.3</td><td>19</td></tr><tr><td>GR-ConvNet [27]</td><td>RGB</td><td>96.6</td><td>95.5</td><td>19</td></tr><tr><td>GR-ConvNet [27]</td><td>RGB-D</td><td>97.7</td><td>96.6</td><td>20</td></tr><tr><td rowspan=\"3\">TF-Grasp</td><td>D</td><td>95.2</td><td>94.9</td><td>41.1</td></tr><tr><td>RGB</td><td>96.78</td><td>95.0</td><td>41.3</td></tr><tr><td>RGB-D</td><td>97.99</td><td>96.7</td><td>41.6</td></tr></table>"
|
| 1143 |
+
},
|
| 1144 |
+
{
|
| 1145 |
+
"type": "title",
|
| 1146 |
+
"bbox": [
|
| 1147 |
+
0.505,
|
| 1148 |
+
0.618,
|
| 1149 |
+
0.766,
|
| 1150 |
+
0.634
|
| 1151 |
+
],
|
| 1152 |
+
"angle": 0,
|
| 1153 |
+
"content": "B. Experimental Results and Analysis"
|
| 1154 |
+
},
|
| 1155 |
+
{
|
| 1156 |
+
"type": "text",
|
| 1157 |
+
"bbox": [
|
| 1158 |
+
0.503,
|
| 1159 |
+
0.643,
|
| 1160 |
+
0.922,
|
| 1161 |
+
0.946
|
| 1162 |
+
],
|
| 1163 |
+
"angle": 0,
|
| 1164 |
+
"content": "To show its effectiveness, our approach is compared with a number of baselines under the same experimental conditions, i.e., evaluation metric. The results of image-wise (IW) and object-wise (OW) settings in the public Cornell grasping dataset are present in Table I. Since the Cornell dataset is relatively small, we follow the setting of previous works [3], [5], [14] by adopting a five-fold cross-validation. Also, to make the comparison fair and comprehensive, the input modalities and running time are considered. For all compared baselines, we use the data reported in their original papers. Taking as input only the depth information, our TF-Grasp achieves an accuracy of \\(95.2\\%\\) which is competitive to the state-of-the-art. When using both depth and RGB data, our model obtains \\(97.99\\%\\) accuracy. For Table II, we use \\(90\\%\\) data of the Jacquard dataset as the training set and the remaining \\(10\\%\\) as the validation set. In addition, our model takes about 41ms to process a single image using the Intel Core i9-10900X CPU processor, which is competitive with the state-of-art approaches and basically meets the real-time requirements. The transformer grasping model exhibits a better accuracy"
|
| 1165 |
+
}
|
| 1166 |
+
],
|
| 1167 |
+
[
|
| 1168 |
+
{
|
| 1169 |
+
"type": "page_number",
|
| 1170 |
+
"bbox": [
|
| 1171 |
+
0.078,
|
| 1172 |
+
0.032,
|
| 1173 |
+
0.087,
|
| 1174 |
+
0.04
|
| 1175 |
+
],
|
| 1176 |
+
"angle": 0,
|
| 1177 |
+
"content": "6"
|
| 1178 |
+
},
|
| 1179 |
+
{
|
| 1180 |
+
"type": "header",
|
| 1181 |
+
"bbox": [
|
| 1182 |
+
0.498,
|
| 1183 |
+
0.031,
|
| 1184 |
+
0.92,
|
| 1185 |
+
0.041
|
| 1186 |
+
],
|
| 1187 |
+
"angle": 0,
|
| 1188 |
+
"content": "IEEE ROBOTICS AND AUTOMATION LETTERS. PREPRINT VERSION. JUNE 2022"
|
| 1189 |
+
},
|
| 1190 |
+
{
|
| 1191 |
+
"type": "table_caption",
|
| 1192 |
+
"bbox": [
|
| 1193 |
+
0.139,
|
| 1194 |
+
0.073,
|
| 1195 |
+
0.429,
|
| 1196 |
+
0.094
|
| 1197 |
+
],
|
| 1198 |
+
"angle": 0,
|
| 1199 |
+
"content": "TABLE II THE ACCURACY ON JACQUARD GRASPING DATASET."
|
| 1200 |
+
},
|
| 1201 |
+
{
|
| 1202 |
+
"type": "table",
|
| 1203 |
+
"bbox": [
|
| 1204 |
+
0.089,
|
| 1205 |
+
0.105,
|
| 1206 |
+
0.474,
|
| 1207 |
+
0.268
|
| 1208 |
+
],
|
| 1209 |
+
"angle": 0,
|
| 1210 |
+
"content": "<table><tr><td>Authors</td><td>Method</td><td>Input</td><td>Accuracy (%)</td></tr><tr><td>Depierre [21]</td><td>Jacquard</td><td>RGB-D</td><td>74.2</td></tr><tr><td>Morrison [17]</td><td>GG-CNN2</td><td>D</td><td>84</td></tr><tr><td>Zhou [29]</td><td>FCGN, ResNet-101</td><td>RGB</td><td>91.8</td></tr><tr><td>Alexandre [16]</td><td>GQ-STN</td><td>D</td><td>70.8</td></tr><tr><td>Zhang [11]</td><td>ROI-GD</td><td>RGB</td><td>90.4</td></tr><tr><td>Stefan [26]</td><td>Det Seg</td><td>RGB</td><td>92.59</td></tr><tr><td>Stefan [26]</td><td>Det Seg Refine</td><td>RGB</td><td>92.95</td></tr><tr><td>Kumra [27]</td><td>GR-ConvNet</td><td>D</td><td>93.7</td></tr><tr><td>Kumra [27]</td><td>GR-ConvNet</td><td>RGB</td><td>91.8</td></tr><tr><td>Kumra [27]</td><td>GR-ConvNet</td><td>RGB-D</td><td>94.6</td></tr><tr><td rowspan=\"3\">Our</td><td>TF-Grasp</td><td>D</td><td>93.1</td></tr><tr><td>TF-Grasp</td><td>RGB</td><td>93.57</td></tr><tr><td>TF-Grasp</td><td>RGB-D</td><td>94.6</td></tr></table>"
|
| 1211 |
+
},
|
| 1212 |
+
{
|
| 1213 |
+
"type": "text",
|
| 1214 |
+
"bbox": [
|
| 1215 |
+
0.079,
|
| 1216 |
+
0.308,
|
| 1217 |
+
0.49,
|
| 1218 |
+
0.396
|
| 1219 |
+
],
|
| 1220 |
+
"angle": 0,
|
| 1221 |
+
"content": "on both datasets compared to conventional CNN models. Our proposed approach achieves a higher accuracy of \\(94.6\\%\\) which is on-par or superior to previous methods. The results on the Cornell and Jacquard datasets all indicate that the model with the attention mechanism is more suitable for visual grasping tasks."
|
| 1222 |
+
},
|
| 1223 |
+
{
|
| 1224 |
+
"type": "text",
|
| 1225 |
+
"bbox": [
|
| 1226 |
+
0.079,
|
| 1227 |
+
0.4,
|
| 1228 |
+
0.49,
|
| 1229 |
+
0.685
|
| 1230 |
+
],
|
| 1231 |
+
"angle": 0,
|
| 1232 |
+
"content": "Despite the fact that our model is trained on a single object dataset, it can be well adapted to multi-object environments with the help of attention mechanisms. In addition, to evaluate the advantages of the transformer versus CNNs for visual grasping tasks, we use the original convolution layers, residual layers, and our transformer as feature extractors to test detection accuracy on different objects on the Cornell dataset. We apply an object-wise split to the Cornell dataset and Fig. 5 shows the detection accuracy of objects not seen during the training phase. All objects are subsets of the Cornell dataset and are evaluated 5 times. All models shown in Fig. 5 employ an encoder-decoder architecture with 4 stages in order to guarantee a fair comparison, where the original-conv is a fully convolutional neural network and resnet-conv is to replace the original convolution layer with the residual block. The result of different models is shown in Fig. 5. Note that the transformer outperforms original convolutions on all selected objects and is marginally better or on-par with the residual network."
|
| 1233 |
+
},
|
| 1234 |
+
{
|
| 1235 |
+
"type": "text",
|
| 1236 |
+
"bbox": [
|
| 1237 |
+
0.079,
|
| 1238 |
+
0.689,
|
| 1239 |
+
0.49,
|
| 1240 |
+
0.944
|
| 1241 |
+
],
|
| 1242 |
+
"angle": 0,
|
| 1243 |
+
"content": "These results demonstrate that the transformer improves robotic grasp detection. We conjecture that prior methods that rely on local operations of the convolution layers might ignore the dependencies between long-range pixels. Instead, our approach leverages the attention mechanism to exploit both local and global information and integrates features that are useful for grasping. To better demonstrate whether the transformer-based grasping model can model the relationships between objects and across the scene, we present the multi-object grasping results and grasping quality heatmaps of the transformer and CNN in Fig. 4. Our aim is to verify that the transformer is preferred over CNN for visual grasping tasks and is better at capturing global and local information. From Fig. 4, we can see that the grasp rectangles predicted by CNN have the right grasp position in most cases, but the predicted gripper angle and width are often not appropriate. In some cases, CNN even generates grasping rectangles in the"
|
| 1244 |
+
},
|
| 1245 |
+
{
|
| 1246 |
+
"type": "text",
|
| 1247 |
+
"bbox": [
|
| 1248 |
+
0.509,
|
| 1249 |
+
0.07,
|
| 1250 |
+
0.92,
|
| 1251 |
+
0.431
|
| 1252 |
+
],
|
| 1253 |
+
"angle": 0,
|
| 1254 |
+
"content": "background. With the attention mechanism, our transformer-based model is able to clearly identify the objects from the background. In the second row of Fig. 4, the grasping quality images show that the CNN-based approach can not identify the graspable area and consider the entire region of objects as a graspable zone with high success probabilities. Instead, as shown in the fourth row of Fig. 4, the transformer-based model is prone to capture the area that is easy to grasp due to its larger receptive field. For each attention block, the attention operation establishes the inter-element relationships through self-attention, and the subsequent multilayer-perceptron (MLP) module further models the inherent relation between each element. The layer normalization and residual connections that interleave these two operations keep the training stable and efficient. In contrast, in CNN, the receptive field of each convolutional kernel is limited. To build a larger receptive field, the model often needs to repeatedly stack convolutional layers to gain global and semantically rich features. However, such a method in general results in the loss of detailed feature information such as the position and shape information of objects that are essential for grasping tasks. Therefore, we exploit a transformer-based model which can better capture not only the global information but also detailed features (e.g., the position and shape information)."
|
| 1255 |
+
},
|
| 1256 |
+
{
|
| 1257 |
+
"type": "image_caption",
|
| 1258 |
+
"bbox": [
|
| 1259 |
+
0.509,
|
| 1260 |
+
0.457,
|
| 1261 |
+
0.678,
|
| 1262 |
+
0.47
|
| 1263 |
+
],
|
| 1264 |
+
"angle": 0,
|
| 1265 |
+
"content": "C. Visualization Analysis"
|
| 1266 |
+
},
|
| 1267 |
+
{
|
| 1268 |
+
"type": "image",
|
| 1269 |
+
"bbox": [
|
| 1270 |
+
0.571,
|
| 1271 |
+
0.496,
|
| 1272 |
+
0.857,
|
| 1273 |
+
0.66
|
| 1274 |
+
],
|
| 1275 |
+
"angle": 0,
|
| 1276 |
+
"content": null
|
| 1277 |
+
},
|
| 1278 |
+
{
|
| 1279 |
+
"type": "image_caption",
|
| 1280 |
+
"bbox": [
|
| 1281 |
+
0.509,
|
| 1282 |
+
0.678,
|
| 1283 |
+
0.92,
|
| 1284 |
+
0.701
|
| 1285 |
+
],
|
| 1286 |
+
"angle": 0,
|
| 1287 |
+
"content": "Fig. 5. The accuracy of different models as feature extractors on selected objects."
|
| 1288 |
+
},
|
| 1289 |
+
{
|
| 1290 |
+
"type": "text",
|
| 1291 |
+
"bbox": [
|
| 1292 |
+
0.509,
|
| 1293 |
+
0.719,
|
| 1294 |
+
0.92,
|
| 1295 |
+
0.944
|
| 1296 |
+
],
|
| 1297 |
+
"angle": 0,
|
| 1298 |
+
"content": "To clarify why the transformer architecture is helpful for grasp detection tasks, we visualize the heatmaps of attention maps, detailed in Fig. 3. From these heat maps, we can discover that the self-attention modules can readily learn the area that is easy for grasping, such as the edges of objects, ignore irrelevant details, and pay more attention on the contour and shape of the objects. Meanwhile, the model focuses on more general characteristics rather than individual features. For example, for the chairs shown in Fig. 3, our method evaluates the edge of the chairs with a higher grasp quality. We further provide more concrete examples of real-world grasping, and the experimental results show that the attention mechanism is more likely to achieve a better understanding of the grasping scenario, generate more accurate grasping rectangles, and work well on both household and novel objects."
|
| 1299 |
+
}
|
| 1300 |
+
],
|
| 1301 |
+
[
|
| 1302 |
+
{
|
| 1303 |
+
"type": "header",
|
| 1304 |
+
"bbox": [
|
| 1305 |
+
0.077,
|
| 1306 |
+
0.03,
|
| 1307 |
+
0.429,
|
| 1308 |
+
0.041
|
| 1309 |
+
],
|
| 1310 |
+
"angle": 0,
|
| 1311 |
+
"content": "WANG et al.: WHEN TRANSFORMER MEETS ROBOTIC GRASPING"
|
| 1312 |
+
},
|
| 1313 |
+
{
|
| 1314 |
+
"type": "page_number",
|
| 1315 |
+
"bbox": [
|
| 1316 |
+
0.912,
|
| 1317 |
+
0.031,
|
| 1318 |
+
0.921,
|
| 1319 |
+
0.041
|
| 1320 |
+
],
|
| 1321 |
+
"angle": 0,
|
| 1322 |
+
"content": "7"
|
| 1323 |
+
},
|
| 1324 |
+
{
|
| 1325 |
+
"type": "image",
|
| 1326 |
+
"bbox": [
|
| 1327 |
+
0.131,
|
| 1328 |
+
0.071,
|
| 1329 |
+
0.868,
|
| 1330 |
+
0.139
|
| 1331 |
+
],
|
| 1332 |
+
"angle": 0,
|
| 1333 |
+
"content": null
|
| 1334 |
+
},
|
| 1335 |
+
{
|
| 1336 |
+
"type": "image_caption",
|
| 1337 |
+
"bbox": [
|
| 1338 |
+
0.349,
|
| 1339 |
+
0.143,
|
| 1340 |
+
0.648,
|
| 1341 |
+
0.157
|
| 1342 |
+
],
|
| 1343 |
+
"angle": 0,
|
| 1344 |
+
"content": "(a) Samples of generated rectangles predicted by CNN"
|
| 1345 |
+
},
|
| 1346 |
+
{
|
| 1347 |
+
"type": "image",
|
| 1348 |
+
"bbox": [
|
| 1349 |
+
0.13,
|
| 1350 |
+
0.167,
|
| 1351 |
+
0.868,
|
| 1352 |
+
0.226
|
| 1353 |
+
],
|
| 1354 |
+
"angle": 0,
|
| 1355 |
+
"content": null
|
| 1356 |
+
},
|
| 1357 |
+
{
|
| 1358 |
+
"type": "image_caption",
|
| 1359 |
+
"bbox": [
|
| 1360 |
+
0.374,
|
| 1361 |
+
0.232,
|
| 1362 |
+
0.625,
|
| 1363 |
+
0.246
|
| 1364 |
+
],
|
| 1365 |
+
"angle": 0,
|
| 1366 |
+
"content": "(b) Predicted grasp quality heatmaps by CNN"
|
| 1367 |
+
},
|
| 1368 |
+
{
|
| 1369 |
+
"type": "image",
|
| 1370 |
+
"bbox": [
|
| 1371 |
+
0.131,
|
| 1372 |
+
0.256,
|
| 1373 |
+
0.868,
|
| 1374 |
+
0.324
|
| 1375 |
+
],
|
| 1376 |
+
"angle": 0,
|
| 1377 |
+
"content": null
|
| 1378 |
+
},
|
| 1379 |
+
{
|
| 1380 |
+
"type": "image_caption",
|
| 1381 |
+
"bbox": [
|
| 1382 |
+
0.331,
|
| 1383 |
+
0.33,
|
| 1384 |
+
0.668,
|
| 1385 |
+
0.343
|
| 1386 |
+
],
|
| 1387 |
+
"angle": 0,
|
| 1388 |
+
"content": "(c) Samples of generated rectangles predicted by Transformer"
|
| 1389 |
+
},
|
| 1390 |
+
{
|
| 1391 |
+
"type": "image",
|
| 1392 |
+
"bbox": [
|
| 1393 |
+
0.131,
|
| 1394 |
+
0.353,
|
| 1395 |
+
0.868,
|
| 1396 |
+
0.412
|
| 1397 |
+
],
|
| 1398 |
+
"angle": 0,
|
| 1399 |
+
"content": null
|
| 1400 |
+
},
|
| 1401 |
+
{
|
| 1402 |
+
"type": "image_caption",
|
| 1403 |
+
"bbox": [
|
| 1404 |
+
0.354,
|
| 1405 |
+
0.418,
|
| 1406 |
+
0.644,
|
| 1407 |
+
0.431
|
| 1408 |
+
],
|
| 1409 |
+
"angle": 0,
|
| 1410 |
+
"content": "(d) Predicted grasp quality heatmaps by Transformer"
|
| 1411 |
+
},
|
| 1412 |
+
{
|
| 1413 |
+
"type": "image_caption",
|
| 1414 |
+
"bbox": [
|
| 1415 |
+
0.075,
|
| 1416 |
+
0.441,
|
| 1417 |
+
0.544,
|
| 1418 |
+
0.454
|
| 1419 |
+
],
|
| 1420 |
+
"angle": 0,
|
| 1421 |
+
"content": "Fig. 4. Visualization comparison of the CNN and transformer-based grasping models."
|
| 1422 |
+
},
|
| 1423 |
+
{
|
| 1424 |
+
"type": "table_caption",
|
| 1425 |
+
"bbox": [
|
| 1426 |
+
0.093,
|
| 1427 |
+
0.482,
|
| 1428 |
+
0.475,
|
| 1429 |
+
0.505
|
| 1430 |
+
],
|
| 1431 |
+
"angle": 0,
|
| 1432 |
+
"content": "TABLE III COMPARISON BETWEEN USING AND NOT USING SKIP-CONNECTIONS"
|
| 1433 |
+
},
|
| 1434 |
+
{
|
| 1435 |
+
"type": "table",
|
| 1436 |
+
"bbox": [
|
| 1437 |
+
0.078,
|
| 1438 |
+
0.514,
|
| 1439 |
+
0.487,
|
| 1440 |
+
0.649
|
| 1441 |
+
],
|
| 1442 |
+
"angle": 0,
|
| 1443 |
+
"content": "<table><tr><td colspan=\"3\">The accuracy on Cornell Grasping Results</td></tr><tr><td></td><td>With Skip-connections</td><td>Without Skip-connections</td></tr><tr><td>RGB</td><td>96.78%</td><td>95.7%</td></tr><tr><td>Depth</td><td>95.2%</td><td>94.3%</td></tr><tr><td>RGB+Depth</td><td>97.99%</td><td>96.1%</td></tr><tr><td colspan=\"3\">The accuracy on Jacquard Grasping Results</td></tr><tr><td></td><td>With Skip-connections</td><td>Without Skip-connections</td></tr><tr><td>RGB</td><td>93.57%</td><td>92.4%</td></tr><tr><td>Depth</td><td>93.1%</td><td>91.8%</td></tr><tr><td>RGB+Depth</td><td>94.6%</td><td>93.27%</td></tr></table>"
|
| 1444 |
+
},
|
| 1445 |
+
{
|
| 1446 |
+
"type": "text",
|
| 1447 |
+
"bbox": [
|
| 1448 |
+
0.074,
|
| 1449 |
+
0.675,
|
| 1450 |
+
0.491,
|
| 1451 |
+
0.749
|
| 1452 |
+
],
|
| 1453 |
+
"angle": 0,
|
| 1454 |
+
"content": "In Fig. 6, we illustrate a pick-and-place task based on our TF-Grasp on the Franka manipulator. Our grasp detection system works well for novel objects that have not been seen during training procedure and also locates graspable objects in cluttered environments."
|
| 1455 |
+
},
|
| 1456 |
+
{
|
| 1457 |
+
"type": "text",
|
| 1458 |
+
"bbox": [
|
| 1459 |
+
0.074,
|
| 1460 |
+
0.751,
|
| 1461 |
+
0.492,
|
| 1462 |
+
0.797
|
| 1463 |
+
],
|
| 1464 |
+
"angle": 0,
|
| 1465 |
+
"content": "In conclusion, the visualization results indicate that our TF-Grasp can produce a more general and robust prediction, which contributes to improving the detection accuracy."
|
| 1466 |
+
},
|
| 1467 |
+
{
|
| 1468 |
+
"type": "title",
|
| 1469 |
+
"bbox": [
|
| 1470 |
+
0.076,
|
| 1471 |
+
0.819,
|
| 1472 |
+
0.214,
|
| 1473 |
+
0.832
|
| 1474 |
+
],
|
| 1475 |
+
"angle": 0,
|
| 1476 |
+
"content": "D. Ablation Studies"
|
| 1477 |
+
},
|
| 1478 |
+
{
|
| 1479 |
+
"type": "text",
|
| 1480 |
+
"bbox": [
|
| 1481 |
+
0.074,
|
| 1482 |
+
0.839,
|
| 1483 |
+
0.491,
|
| 1484 |
+
0.946
|
| 1485 |
+
],
|
| 1486 |
+
"angle": 0,
|
| 1487 |
+
"content": "To understand the role of skip-connections in our transformer model on the visual grasping problems, we conduct experiments on the Cornell and Jacquard grasping datasets with and without skip-connections using our transformer, respectively. The detailed experimental results are shown in Table III. The use of skip-connections is better than not using skip-connections in all input modes. The attention mechanism"
|
| 1488 |
+
},
|
| 1489 |
+
{
|
| 1490 |
+
"type": "text",
|
| 1491 |
+
"bbox": [
|
| 1492 |
+
0.504,
|
| 1493 |
+
0.48,
|
| 1494 |
+
0.921,
|
| 1495 |
+
0.571
|
| 1496 |
+
],
|
| 1497 |
+
"angle": 0,
|
| 1498 |
+
"content": "in the transformer builds inter-relationships in each layer, incorporates global features, and achieves promising results. Through skip-connections, the multi-scale representations at different stages are further fused globally. The empirical evidence shows that these further refinement and contextual features contribute to the quality of final grasp prediction."
|
| 1499 |
+
},
|
| 1500 |
+
{
|
| 1501 |
+
"type": "title",
|
| 1502 |
+
"bbox": [
|
| 1503 |
+
0.505,
|
| 1504 |
+
0.603,
|
| 1505 |
+
0.764,
|
| 1506 |
+
0.618
|
| 1507 |
+
],
|
| 1508 |
+
"angle": 0,
|
| 1509 |
+
"content": "E. Grasping in Real World Scenarios"
|
| 1510 |
+
},
|
| 1511 |
+
{
|
| 1512 |
+
"type": "text",
|
| 1513 |
+
"bbox": [
|
| 1514 |
+
0.503,
|
| 1515 |
+
0.627,
|
| 1516 |
+
0.921,
|
| 1517 |
+
0.852
|
| 1518 |
+
],
|
| 1519 |
+
"angle": 0,
|
| 1520 |
+
"content": "Physical Setting. The Franka Panda robot manipulation and the RealSense D435 RGB-D camera are used in our physical experiment. The camera is attached to the end-effector to keep a good visual coverage of graspable objects. In each grasp attempt, our TF-Grasp receives the visual signals from the depth camera mounted on the robot end-effector and outputs an optimal grasping posture. Next, the end-effector approaches the optimal target grasping posture based on the trajectory planned by a motion planning method, and then closes the gripper. Such a transformer-based grasp detection system can be easily adapted to other hardware platforms. During the grasp process, the raw depth sensor is filled with a portion of missing pixels that have NaN values. We generate the mask of NaN values, normalize the depth image, and apply cv2.inpaint [30] for further depth completion."
|
| 1521 |
+
},
|
| 1522 |
+
{
|
| 1523 |
+
"type": "text",
|
| 1524 |
+
"bbox": [
|
| 1525 |
+
0.503,
|
| 1526 |
+
0.855,
|
| 1527 |
+
0.922,
|
| 1528 |
+
0.945
|
| 1529 |
+
],
|
| 1530 |
+
"angle": 0,
|
| 1531 |
+
"content": "We perform a total of 165 grasping attempts, of which the robot performs successful grasp 152 times, achieving a success rate of \\(92.1\\%\\). Table IV lists the results of learning-based methods on real robot grasping. These results indicate that the transformer-based grasp detection system also behaves well on real robots."
|
| 1532 |
+
}
|
| 1533 |
+
],
|
| 1534 |
+
[
|
| 1535 |
+
{
|
| 1536 |
+
"type": "page_number",
|
| 1537 |
+
"bbox": [
|
| 1538 |
+
0.078,
|
| 1539 |
+
0.032,
|
| 1540 |
+
0.087,
|
| 1541 |
+
0.04
|
| 1542 |
+
],
|
| 1543 |
+
"angle": 0,
|
| 1544 |
+
"content": "8"
|
| 1545 |
+
},
|
| 1546 |
+
{
|
| 1547 |
+
"type": "header",
|
| 1548 |
+
"bbox": [
|
| 1549 |
+
0.497,
|
| 1550 |
+
0.03,
|
| 1551 |
+
0.92,
|
| 1552 |
+
0.041
|
| 1553 |
+
],
|
| 1554 |
+
"angle": 0,
|
| 1555 |
+
"content": "IEEE ROBOTICS AND AUTOMATION LETTERS. PREPRINT VERSION. JUNE 2022"
|
| 1556 |
+
},
|
| 1557 |
+
{
|
| 1558 |
+
"type": "image",
|
| 1559 |
+
"bbox": [
|
| 1560 |
+
0.113,
|
| 1561 |
+
0.068,
|
| 1562 |
+
0.885,
|
| 1563 |
+
0.165
|
| 1564 |
+
],
|
| 1565 |
+
"angle": 0,
|
| 1566 |
+
"content": null
|
| 1567 |
+
},
|
| 1568 |
+
{
|
| 1569 |
+
"type": "image_caption",
|
| 1570 |
+
"bbox": [
|
| 1571 |
+
0.075,
|
| 1572 |
+
0.175,
|
| 1573 |
+
0.361,
|
| 1574 |
+
0.188
|
| 1575 |
+
],
|
| 1576 |
+
"angle": 0,
|
| 1577 |
+
"content": "Fig. 6. Screenshots of physical grasping in clutter."
|
| 1578 |
+
},
|
| 1579 |
+
{
|
| 1580 |
+
"type": "table_caption",
|
| 1581 |
+
"bbox": [
|
| 1582 |
+
0.182,
|
| 1583 |
+
0.216,
|
| 1584 |
+
0.386,
|
| 1585 |
+
0.239
|
| 1586 |
+
],
|
| 1587 |
+
"angle": 0,
|
| 1588 |
+
"content": "TABLE IV THE RESULTS FOR PHYSICAL SETUP."
|
| 1589 |
+
},
|
| 1590 |
+
{
|
| 1591 |
+
"type": "table",
|
| 1592 |
+
"bbox": [
|
| 1593 |
+
0.124,
|
| 1594 |
+
0.248,
|
| 1595 |
+
0.44,
|
| 1596 |
+
0.321
|
| 1597 |
+
],
|
| 1598 |
+
"angle": 0,
|
| 1599 |
+
"content": "<table><tr><td>Authors</td><td>Physical grasp</td><td>Success rate (%)</td></tr><tr><td>Lenz [14]</td><td>89/100</td><td>89%</td></tr><tr><td>Pinto [31]</td><td>109/150</td><td>73%</td></tr><tr><td>Morrison [17]</td><td>110/120</td><td>92%</td></tr><tr><td>Chu [32]</td><td>89/100</td><td>89%</td></tr><tr><td>TF-Grasp(Ours)</td><td>152/165</td><td>92.1%</td></tr></table>"
|
| 1600 |
+
},
|
| 1601 |
+
{
|
| 1602 |
+
"type": "title",
|
| 1603 |
+
"bbox": [
|
| 1604 |
+
0.16,
|
| 1605 |
+
0.356,
|
| 1606 |
+
0.407,
|
| 1607 |
+
0.369
|
| 1608 |
+
],
|
| 1609 |
+
"angle": 0,
|
| 1610 |
+
"content": "V. DISCUSSION AND CONCLUSION"
|
| 1611 |
+
},
|
| 1612 |
+
{
|
| 1613 |
+
"type": "text",
|
| 1614 |
+
"bbox": [
|
| 1615 |
+
0.074,
|
| 1616 |
+
0.377,
|
| 1617 |
+
0.491,
|
| 1618 |
+
0.572
|
| 1619 |
+
],
|
| 1620 |
+
"angle": 0,
|
| 1621 |
+
"content": "In this work, we develop a novel architecture for visual grasping. Although CNN and its variants are still the dominant models in visual robotic grasping, we show the powerful potential of transformers in grasp detection. Compared with CNN-based counterparts, the transformer-based grasp detection models are better at capturing global dependencies and learning powerful feature representation. The results show that our proposed approach outperforms original CNN-based models. The contexts can be better represented by attention propagation. Nevertheless, the current approach is limited to the parallel gripper. Future research will focus on developing a universal transformer-based grasp detection method for other types of grippers, such as the five finger dexterous hand."
|
| 1622 |
+
},
|
| 1623 |
+
{
|
| 1624 |
+
"type": "title",
|
| 1625 |
+
"bbox": [
|
| 1626 |
+
0.236,
|
| 1627 |
+
0.592,
|
| 1628 |
+
0.331,
|
| 1629 |
+
0.605
|
| 1630 |
+
],
|
| 1631 |
+
"angle": 0,
|
| 1632 |
+
"content": "REFERENCES"
|
| 1633 |
+
},
|
| 1634 |
+
{
|
| 1635 |
+
"type": "ref_text",
|
| 1636 |
+
"bbox": [
|
| 1637 |
+
0.085,
|
| 1638 |
+
0.614,
|
| 1639 |
+
0.492,
|
| 1640 |
+
0.648
|
| 1641 |
+
],
|
| 1642 |
+
"angle": 0,
|
| 1643 |
+
"content": "[1] J. Song, M. Patel, and M. Ghaffari, “Fusing convolutional neural network and geometric constraint for image-based indoor localization,” IEEE Robotics Autom. Lett., vol. 7, no. 2, pp. 1674–1681, 2022."
|
| 1644 |
+
},
|
| 1645 |
+
{
|
| 1646 |
+
"type": "ref_text",
|
| 1647 |
+
"bbox": [
|
| 1648 |
+
0.085,
|
| 1649 |
+
0.649,
|
| 1650 |
+
0.492,
|
| 1651 |
+
0.683
|
| 1652 |
+
],
|
| 1653 |
+
"angle": 0,
|
| 1654 |
+
"content": "[2] D. Zhao and J. Oh, \"Noticing motion patterns: A temporal cnn with a novel convolution operator for human trajectory prediction,\" IEEE Robotics Autom. Lett., vol. 6, no. 2, pp. 628-634, 2021."
|
| 1655 |
+
},
|
| 1656 |
+
{
|
| 1657 |
+
"type": "ref_text",
|
| 1658 |
+
"bbox": [
|
| 1659 |
+
0.086,
|
| 1660 |
+
0.684,
|
| 1661 |
+
0.492,
|
| 1662 |
+
0.717
|
| 1663 |
+
],
|
| 1664 |
+
"angle": 0,
|
| 1665 |
+
"content": "[3] S. Kumra and C. Kanan, \"Robotic grasp detection using deep convolutional neural networks,\" in Proc. IEEE/RSJ Int. Conf. Intell. Robots Syst, 2017, pp. 769-776."
|
| 1666 |
+
},
|
| 1667 |
+
{
|
| 1668 |
+
"type": "ref_text",
|
| 1669 |
+
"bbox": [
|
| 1670 |
+
0.086,
|
| 1671 |
+
0.718,
|
| 1672 |
+
0.492,
|
| 1673 |
+
0.751
|
| 1674 |
+
],
|
| 1675 |
+
"angle": 0,
|
| 1676 |
+
"content": "[4] X. Zhu, Y. Zhou, Y. Fan, and M. Tomizuka, \"Learn to grasp with less supervision: A data-efficient maximum likelihood grasp sampling loss,\" arXiv preprint arXiv:2110.01379, 2021."
|
| 1677 |
+
},
|
| 1678 |
+
{
|
| 1679 |
+
"type": "ref_text",
|
| 1680 |
+
"bbox": [
|
| 1681 |
+
0.086,
|
| 1682 |
+
0.752,
|
| 1683 |
+
0.492,
|
| 1684 |
+
0.785
|
| 1685 |
+
],
|
| 1686 |
+
"angle": 0,
|
| 1687 |
+
"content": "[5] J. Redmon and A. Angelova, \"Real-time grasp detection using convolutional neural networks,\" in Proc. IEEE Int. Conf. Robot. Autom., 2015, pp. 1316-1322."
|
| 1688 |
+
},
|
| 1689 |
+
{
|
| 1690 |
+
"type": "ref_text",
|
| 1691 |
+
"bbox": [
|
| 1692 |
+
0.086,
|
| 1693 |
+
0.786,
|
| 1694 |
+
0.492,
|
| 1695 |
+
0.819
|
| 1696 |
+
],
|
| 1697 |
+
"angle": 0,
|
| 1698 |
+
"content": "[6] A. V. et al., \"Attention is all you need,\" in Annual Conference on Neural Inform. Processing Sys. 2017, December 4-9, 2017, Long Beach, CA, USA, 2017, pp. 5998-6008."
|
| 1699 |
+
},
|
| 1700 |
+
{
|
| 1701 |
+
"type": "ref_text",
|
| 1702 |
+
"bbox": [
|
| 1703 |
+
0.086,
|
| 1704 |
+
0.82,
|
| 1705 |
+
0.492,
|
| 1706 |
+
0.853
|
| 1707 |
+
],
|
| 1708 |
+
"angle": 0,
|
| 1709 |
+
"content": "[7] A. D. et al., \"An image is worth 16x16 words: Transformers for image recognition at scale,\" in Proc. Int. Conf. Learn. Represent. OpenReview.net, 2021."
|
| 1710 |
+
},
|
| 1711 |
+
{
|
| 1712 |
+
"type": "ref_text",
|
| 1713 |
+
"bbox": [
|
| 1714 |
+
0.086,
|
| 1715 |
+
0.854,
|
| 1716 |
+
0.492,
|
| 1717 |
+
0.886
|
| 1718 |
+
],
|
| 1719 |
+
"angle": 0,
|
| 1720 |
+
"content": "[8] L. Z. et al., \"Swin transformer: Hierarchical vision transformer using shifted windows,\" in Proc. IEEE Int. Conf. Comput. Vision, 2021, pp. 10012-10022."
|
| 1721 |
+
},
|
| 1722 |
+
{
|
| 1723 |
+
"type": "ref_text",
|
| 1724 |
+
"bbox": [
|
| 1725 |
+
0.085,
|
| 1726 |
+
0.888,
|
| 1727 |
+
0.492,
|
| 1728 |
+
0.91
|
| 1729 |
+
],
|
| 1730 |
+
"angle": 0,
|
| 1731 |
+
"content": "[9] R. M. Murray, Z. Li, and S. S. Sastry, A mathematical introduction to robotic manipulation. Boca Raton, FL, USA: CRC, 1994, 2017."
|
| 1732 |
+
},
|
| 1733 |
+
{
|
| 1734 |
+
"type": "ref_text",
|
| 1735 |
+
"bbox": [
|
| 1736 |
+
0.078,
|
| 1737 |
+
0.91,
|
| 1738 |
+
0.492,
|
| 1739 |
+
0.945
|
| 1740 |
+
],
|
| 1741 |
+
"angle": 0,
|
| 1742 |
+
"content": "[10] A. Bicchi and V. Kumar, “Robotic grasping and contact: A review,” in Proc. IEEE Int. Conf. Robot. Autom., San Francisco, CA, USA, Apr. 2000, pp. 348–353."
|
| 1743 |
+
},
|
| 1744 |
+
{
|
| 1745 |
+
"type": "list",
|
| 1746 |
+
"bbox": [
|
| 1747 |
+
0.078,
|
| 1748 |
+
0.614,
|
| 1749 |
+
0.492,
|
| 1750 |
+
0.945
|
| 1751 |
+
],
|
| 1752 |
+
"angle": 0,
|
| 1753 |
+
"content": null
|
| 1754 |
+
},
|
| 1755 |
+
{
|
| 1756 |
+
"type": "ref_text",
|
| 1757 |
+
"bbox": [
|
| 1758 |
+
0.508,
|
| 1759 |
+
0.216,
|
| 1760 |
+
0.922,
|
| 1761 |
+
0.251
|
| 1762 |
+
],
|
| 1763 |
+
"angle": 0,
|
| 1764 |
+
"content": "[11] H. Zhang, X. Lan, S. Bai, X. Zhou, Z. Tian, and N. Zheng, “Roi-based robotic grasp detection for object overlapping scenes,” in Proc. IEEE Int. Conf. Intell. Robots Syst., 2019, pp. 4768–4775."
|
| 1765 |
+
},
|
| 1766 |
+
{
|
| 1767 |
+
"type": "ref_text",
|
| 1768 |
+
"bbox": [
|
| 1769 |
+
0.508,
|
| 1770 |
+
0.252,
|
| 1771 |
+
0.921,
|
| 1772 |
+
0.284
|
| 1773 |
+
],
|
| 1774 |
+
"angle": 0,
|
| 1775 |
+
"content": "[12] U. Asif, J. Tang, and S. Harrer, \"Graspnet: An efficient convolutional neural network for real-time grasp detection for low-powered devices,\" in IJCAI, vol. 7, 2018, pp. 4875-4882."
|
| 1776 |
+
},
|
| 1777 |
+
{
|
| 1778 |
+
"type": "ref_text",
|
| 1779 |
+
"bbox": [
|
| 1780 |
+
0.509,
|
| 1781 |
+
0.285,
|
| 1782 |
+
0.921,
|
| 1783 |
+
0.318
|
| 1784 |
+
],
|
| 1785 |
+
"angle": 0,
|
| 1786 |
+
"content": "[13] X. Zhu, L. Sun, Y. Fan, and M. Tomizuka, “6-dof contrastive grasp proposal network,” in Proc. IEEE Int. Conf. Robot.Automat., 2021, pp. 6371–6377."
|
| 1787 |
+
},
|
| 1788 |
+
{
|
| 1789 |
+
"type": "ref_text",
|
| 1790 |
+
"bbox": [
|
| 1791 |
+
0.509,
|
| 1792 |
+
0.319,
|
| 1793 |
+
0.921,
|
| 1794 |
+
0.342
|
| 1795 |
+
],
|
| 1796 |
+
"angle": 0,
|
| 1797 |
+
"content": "[14] I. Lenz, H. Lee, and A. Saxena, \"Deep learning for detecting robotic grasps,\" Int. J. Robotics Res., vol. 34, no. 4-5, pp. 705-724, 2015."
|
| 1798 |
+
},
|
| 1799 |
+
{
|
| 1800 |
+
"type": "ref_text",
|
| 1801 |
+
"bbox": [
|
| 1802 |
+
0.509,
|
| 1803 |
+
0.343,
|
| 1804 |
+
0.921,
|
| 1805 |
+
0.386
|
| 1806 |
+
],
|
| 1807 |
+
"angle": 0,
|
| 1808 |
+
"content": "[15] J. M. et al., \"Dex-net 2.0: Deep learning to plan robust grasps with synthetic point clouds and analytic grasp metrics,\" in Robotics: Science and Systems XIII, Massachusetts Institute of Technology, Cambridge, Massachusetts, USA, July 12-16, 2017, 2017."
|
| 1809 |
+
},
|
| 1810 |
+
{
|
| 1811 |
+
"type": "ref_text",
|
| 1812 |
+
"bbox": [
|
| 1813 |
+
0.509,
|
| 1814 |
+
0.387,
|
| 1815 |
+
0.92,
|
| 1816 |
+
0.421
|
| 1817 |
+
],
|
| 1818 |
+
"angle": 0,
|
| 1819 |
+
"content": "[16] A. Gariépy, J.-C. Ruel, B. Chaib-Draa, and P. Giguere, “Gq-stn: Optimizing one-shot grasp detection based on robustness classifier,” in Proc. IEEE/RSJ Int. Conf. Intell. Robots Syst., 2019, pp. 3996–4003."
|
| 1820 |
+
},
|
| 1821 |
+
{
|
| 1822 |
+
"type": "ref_text",
|
| 1823 |
+
"bbox": [
|
| 1824 |
+
0.509,
|
| 1825 |
+
0.422,
|
| 1826 |
+
0.92,
|
| 1827 |
+
0.453
|
| 1828 |
+
],
|
| 1829 |
+
"angle": 0,
|
| 1830 |
+
"content": "[17] D. Morrison, P. Corke, and J. Leitner, “Learning robust, real-time, reactive robotic grasping,” Int. J. Robotics Res., vol. 39, no. 2-3, pp. 183–201, 2020."
|
| 1831 |
+
},
|
| 1832 |
+
{
|
| 1833 |
+
"type": "ref_text",
|
| 1834 |
+
"bbox": [
|
| 1835 |
+
0.508,
|
| 1836 |
+
0.455,
|
| 1837 |
+
0.92,
|
| 1838 |
+
0.478
|
| 1839 |
+
],
|
| 1840 |
+
"angle": 0,
|
| 1841 |
+
"content": "[18] C. J. et al., \"Transunet: Transformers make strong encoders for medical image segmentation,\" arXiv preprint arXiv:2102.04306, 2021."
|
| 1842 |
+
},
|
| 1843 |
+
{
|
| 1844 |
+
"type": "ref_text",
|
| 1845 |
+
"bbox": [
|
| 1846 |
+
0.509,
|
| 1847 |
+
0.478,
|
| 1848 |
+
0.921,
|
| 1849 |
+
0.523
|
| 1850 |
+
],
|
| 1851 |
+
"angle": 0,
|
| 1852 |
+
"content": "[19] O. Ronneberger, P. Fischer, and T. Brox, “U-net: Convolutional networks for biomedical image segmentation,” in Proc. Int. Conf. on Medical image computing and computer-assisted intervention. Springer, 2015, pp. 234–241."
|
| 1853 |
+
},
|
| 1854 |
+
{
|
| 1855 |
+
"type": "ref_text",
|
| 1856 |
+
"bbox": [
|
| 1857 |
+
0.509,
|
| 1858 |
+
0.523,
|
| 1859 |
+
0.92,
|
| 1860 |
+
0.556
|
| 1861 |
+
],
|
| 1862 |
+
"angle": 0,
|
| 1863 |
+
"content": "[20] Y. Jiang, S. Moseson, and A. Saxena, \"Efficient grasping from rgbd images: Learning using a new rectangle representation,\" in Proc. IEEE Int. Conf. Robot. Automat., 2011, pp. 3304-3311."
|
| 1864 |
+
},
|
| 1865 |
+
{
|
| 1866 |
+
"type": "ref_text",
|
| 1867 |
+
"bbox": [
|
| 1868 |
+
0.509,
|
| 1869 |
+
0.557,
|
| 1870 |
+
0.921,
|
| 1871 |
+
0.59
|
| 1872 |
+
],
|
| 1873 |
+
"angle": 0,
|
| 1874 |
+
"content": "[21] A. Depierre, E. Dellandrea, and L. Chen, \"Jacquard: A large scale dataset for robotic grasp detection,\" in Proc. IEEE/RSJ Int. Conf. Intell. Robots Syst., 2018, pp. 3511-3516."
|
| 1875 |
+
},
|
| 1876 |
+
{
|
| 1877 |
+
"type": "ref_text",
|
| 1878 |
+
"bbox": [
|
| 1879 |
+
0.508,
|
| 1880 |
+
0.591,
|
| 1881 |
+
0.921,
|
| 1882 |
+
0.624
|
| 1883 |
+
],
|
| 1884 |
+
"angle": 0,
|
| 1885 |
+
"content": "[22] Z. Wang, Z. Li, B. Wang, and H. Liu, \"Robot grasp detection using multimodal deep convolutional neural networks,\" Advances in Mechanical Engineering, vol. 8, no. 9, p. 1687814016668077, 2016."
|
| 1886 |
+
},
|
| 1887 |
+
{
|
| 1888 |
+
"type": "ref_text",
|
| 1889 |
+
"bbox": [
|
| 1890 |
+
0.509,
|
| 1891 |
+
0.625,
|
| 1892 |
+
0.92,
|
| 1893 |
+
0.657
|
| 1894 |
+
],
|
| 1895 |
+
"angle": 0,
|
| 1896 |
+
"content": "[23] U. Asif, M. Bennamoun, and F. A. Sohel, \"Rgb-d object recognition and grasp detection using hierarchical cascaded forests,\" IEEE Trans. on Robotics, vol. 33, no. 3, pp. 547-564, 2017."
|
| 1897 |
+
},
|
| 1898 |
+
{
|
| 1899 |
+
"type": "ref_text",
|
| 1900 |
+
"bbox": [
|
| 1901 |
+
0.508,
|
| 1902 |
+
0.659,
|
| 1903 |
+
0.921,
|
| 1904 |
+
0.692
|
| 1905 |
+
],
|
| 1906 |
+
"angle": 0,
|
| 1907 |
+
"content": "[24] H. Karaoguz and P. Jensfelt, \"Object detection approach for robot grasp detection,\" in Proc. IEEE Int. Conf. Robot.Automat., 2019, pp. 4953-4959."
|
| 1908 |
+
},
|
| 1909 |
+
{
|
| 1910 |
+
"type": "ref_text",
|
| 1911 |
+
"bbox": [
|
| 1912 |
+
0.509,
|
| 1913 |
+
0.693,
|
| 1914 |
+
0.921,
|
| 1915 |
+
0.727
|
| 1916 |
+
],
|
| 1917 |
+
"angle": 0,
|
| 1918 |
+
"content": "[25] D. Guo, F. Sun, H. Liu, T. Kong, B. Fang, and N. Xi, “A hybrid deep architecture for robotic grasp detection,” in Proc. IEEE Int. Conf. Robot.Automat., 2017, pp. 1609-1614."
|
| 1919 |
+
},
|
| 1920 |
+
{
|
| 1921 |
+
"type": "ref_text",
|
| 1922 |
+
"bbox": [
|
| 1923 |
+
0.509,
|
| 1924 |
+
0.728,
|
| 1925 |
+
0.921,
|
| 1926 |
+
0.761
|
| 1927 |
+
],
|
| 1928 |
+
"angle": 0,
|
| 1929 |
+
"content": "[26] S. Ainetter and F. Fraundorfer, \"End-to-end trainable deep neural network for robotic grasp detection and semantic segmentation from rgb,\" in Proc. IEEE Int. Conf. Robot.Automat. IEEE, 2021, pp. 13452-13458."
|
| 1930 |
+
},
|
| 1931 |
+
{
|
| 1932 |
+
"type": "ref_text",
|
| 1933 |
+
"bbox": [
|
| 1934 |
+
0.508,
|
| 1935 |
+
0.761,
|
| 1936 |
+
0.92,
|
| 1937 |
+
0.794
|
| 1938 |
+
],
|
| 1939 |
+
"angle": 0,
|
| 1940 |
+
"content": "[27] S. Kumra, S. Joshi, and F. Sahin, “Antipodal robotic grasping using generative residual convolutional neural network,” in Proc. IEEE Int. Conf. Intell. Robots Syst. IEEE, pp. 9626–9633."
|
| 1941 |
+
},
|
| 1942 |
+
{
|
| 1943 |
+
"type": "ref_text",
|
| 1944 |
+
"bbox": [
|
| 1945 |
+
0.509,
|
| 1946 |
+
0.795,
|
| 1947 |
+
0.921,
|
| 1948 |
+
0.817
|
| 1949 |
+
],
|
| 1950 |
+
"angle": 0,
|
| 1951 |
+
"content": "[28] I. Loshchilov and F. Hutter, \"Decoupled weight decay regularization,\" in Proc. Int. Conf. Learn. Represent., 2018."
|
| 1952 |
+
},
|
| 1953 |
+
{
|
| 1954 |
+
"type": "ref_text",
|
| 1955 |
+
"bbox": [
|
| 1956 |
+
0.509,
|
| 1957 |
+
0.818,
|
| 1958 |
+
0.92,
|
| 1959 |
+
0.851
|
| 1960 |
+
],
|
| 1961 |
+
"angle": 0,
|
| 1962 |
+
"content": "[29] X. Zhou, X. Lan, H. Zhang, Z. Tian, Y. Zhang, and N. Zheng, “Fully convolutional grasp detection network with oriented anchor box,” in Proc. IEEE Int. Conf. Intell. Robots Syst., 2018, pp. 7223-7230."
|
| 1963 |
+
},
|
| 1964 |
+
{
|
| 1965 |
+
"type": "ref_text",
|
| 1966 |
+
"bbox": [
|
| 1967 |
+
0.509,
|
| 1968 |
+
0.852,
|
| 1969 |
+
0.921,
|
| 1970 |
+
0.874
|
| 1971 |
+
],
|
| 1972 |
+
"angle": 0,
|
| 1973 |
+
"content": "[30] G. Bradski, “The opencv library.” Dr. Dobb's Journal: Software Tools for the Professional Programmer, vol. 25, no. 11, pp. 120-123, 2000."
|
| 1974 |
+
},
|
| 1975 |
+
{
|
| 1976 |
+
"type": "ref_text",
|
| 1977 |
+
"bbox": [
|
| 1978 |
+
0.509,
|
| 1979 |
+
0.875,
|
| 1980 |
+
0.921,
|
| 1981 |
+
0.908
|
| 1982 |
+
],
|
| 1983 |
+
"angle": 0,
|
| 1984 |
+
"content": "[31] L. Pinto and A. Gupta, \"Supersizing self-supervision: Learning to grasp from 50k tries and 700 robot hours,\" in Proc. IEEE Int. Conf. Robot. Autom., 2016, pp. 3406-3413."
|
| 1985 |
+
},
|
| 1986 |
+
{
|
| 1987 |
+
"type": "ref_text",
|
| 1988 |
+
"bbox": [
|
| 1989 |
+
0.509,
|
| 1990 |
+
0.908,
|
| 1991 |
+
0.921,
|
| 1992 |
+
0.941
|
| 1993 |
+
],
|
| 1994 |
+
"angle": 0,
|
| 1995 |
+
"content": "[32] F.-J. Chu, R. Xu, and P. A. Vela, “Real-world multiobject, multigrasp detection,” IEEE Robotics Autom. Lett., vol. 3, no. 4, pp. 3355–3362, 2018."
|
| 1996 |
+
},
|
| 1997 |
+
{
|
| 1998 |
+
"type": "list",
|
| 1999 |
+
"bbox": [
|
| 2000 |
+
0.508,
|
| 2001 |
+
0.216,
|
| 2002 |
+
0.922,
|
| 2003 |
+
0.941
|
| 2004 |
+
],
|
| 2005 |
+
"angle": 0,
|
| 2006 |
+
"content": null
|
| 2007 |
+
}
|
| 2008 |
+
]
|
| 2009 |
+
]
|
2202.11xxx/2202.11911/9037d395-d951-4662-9f75-505b7890fe99_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:488390286a52e31e40b404b73d626eb0eaf71b9dd253e0a96f5c8dff68fa28e8
|
| 3 |
+
size 5495573
|
2202.11xxx/2202.11911/full.md
ADDED
|
@@ -0,0 +1,291 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# When Transformer Meets Robotic Grasping: Exploits Context for Efficient Grasp Detection
|
| 2 |
+
|
| 3 |
+
Shaochen Wang, Zhangli Zhou, and Zhen Kan, Senior Member, IEEE
|
| 4 |
+
|
| 5 |
+
Abstract—In this paper, we present a transformer-based architecture, namely TF-Grasp, for robotic grasp detection. The developed TF-Grasp framework has two elaborate designs making it well suitable for visual grasping tasks. The first key design is that we adopt the local window attention to capture local contextual information and detailed features of graspable objects. Then, we apply the cross window attention to model the long-term dependencies between distant pixels. Object knowledge, environmental configuration, and relationships between different visual entities are aggregated for subsequent grasp detection. The second key design is that we build a hierarchical encoder-decoder architecture with skip-connections, delivering shallow features from the encoder to decoder to enable a multi-scale feature fusion. Due to the powerful attention mechanism, TF-Grasp can simultaneously obtain the local information (i.e., the contours of objects), and model long-term connections such as the relationships between distinct visual concepts in clutter. Extensive computational experiments demonstrate that TF-Grasp achieves competitive results versus state-of-art grasping convolutional models and attains a higher accuracy of $97.99\%$ and $94.6\%$ on Cornell and Jacquard grasping datasets, respectively. Real-world experiments using a 7DoF Franka Emika Panda robot also demonstrate its capability of grasping unseen objects in a variety of scenarios. The code is available at https://github.com/WangShaoSUN/grasp-transformer.
|
| 6 |
+
|
| 7 |
+
Index Terms—Vision Transformer, Grasp Detection, Robotic Grasping.
|
| 8 |
+
|
| 9 |
+
# I. INTRODUCTION
|
| 10 |
+
|
| 11 |
+
DATA-driven methodologies such as deep learning have become the mainstream methods for robotic visual sensing tasks such as indoor localization [1], trajectory prediction [2], and robotic manipulation [3], [4], since they require less handcrafted feature engineering and can be extended to many complex tasks. In recent years, as visual sensing is increasingly being used in manufacturing, industry, and medical care, growing research is devoted to developing advanced robot's perception abilities. A typical application of visual sensing is the robotic grasp detection, where the images of objects are used to infer the grasping pose. Considering a grasping task of manipulating a wide diversity of objects, to find
|
| 12 |
+
|
| 13 |
+
Manuscript received February 23, 2022; revised April 25, 2022; accepted June 20, 2022. This letter was recommended for publication by Markus Vincze upon evaluation of the Associate Editor and Reviewers' comments. This work was supported in part by the National Natural Science Foundation of China under Grant U2013601, and Grant 62173314. (Corresponding author: Zhen Kan.)
|
| 14 |
+
|
| 15 |
+
Shaochen Wang, Zhangli Zhou, and Zhen Kan are with the Department of Automation, University of Science and Technology of China, Hefei 230026, China, (e-mail: samwang@mail.ustc.edu.cn; zzl1215@mail.ustc.edu.cn; zkan@ustc.edu.cn.) An extended version is available at https://arxiv.org/abs/2202.11911.
|
| 16 |
+
|
| 17 |
+
Digital Object Identifier (DOI): see top of this page.
|
| 18 |
+
|
| 19 |
+
the graspable regions, the robots have to concentrate on not only partial geometric information but also the entire visual appearance of the object. Particularly in unstructured and cluttered environments, dealing with variations in shape and position (e.g., occlusion) and also the spatial relationship with other objects are critical to the performance of grasp detection. Therefore, this work is particularly motivated to investigate grasp detection that takes into account both local neighbor pixels and long-distance relationships in spatial dimensions.
|
| 20 |
+
|
| 21 |
+
Most modern grasp detectors [3], [5] are based on convolutional neural networks (CNNs) which emerge as the de facto standard for processing visual robotic grasping. However, current CNNs are composed of individual convolution kernels, which are more inclined to concentrate on local level information. Also, the convolution kernels in a layer of CNN are viewed as independent counterparts without mutual information fusion. Generally, to maintain a large receptive field, CNNs have to repeatedly stack convolutional layers, which reduce the spatial resolution and inevitably results in the loss of global details and degraded performance.
|
| 22 |
+
|
| 23 |
+
Recently, as a novel approach to handle natural language processing and computer vision, the transformer [6], [7], [8] demonstrates remarkable success. The widely adopted attention mechanisms [6] of transformers in sequence modeling provide an elegant resolution that can better convey the fusion of information across global sequences. In fact, as robots are deployed in more and more diverse applications such as industrial assembly lines and smart home, the sensing capacity of robotic systems needs to be enriched, not only in local regions, but also in global interaction. Especially when robots frequently interact with objects in the environment, the awareness of global attention is particularly important with respect to safety and reliability. However, most vision transformers are designed for image classification on natural images processing tasks. Few of them are specifically built for robotic tasks.
|
| 24 |
+
|
| 25 |
+
In this paper, we present a transformer-based visual grasp detection framework, namely TF-Grasp, which leverages the fact that the attention can better aggregate information across the entire input sequences to obtain an improved global representation. More specifically, the information within independent image patches is bridged via self-attention and the encoder in our framework captures these multi-scale low-level features. The decoder incorporates the high-level features through long-range spatial dependencies to construct the final grasping pose. We provide detailed empirical evidence to show that our grasping transformer performs reasonably well on popular grasping testbeds, e.g., Cornell and Jacquard grasping
|
| 26 |
+
|
| 27 |
+
datasets. The experimental results demonstrate that the transformer architecture plays an integral role in generating appropriate grasping poses by learning local and global features from different parts of each object. The vision transformer-based grasp detection works well on the real robotic system and shows promising generalization to unseen objects. In addition, our TF-Grasp can generate the required grasping poses for parallel grippers in a single forward pass of the network.
|
| 28 |
+
|
| 29 |
+
In a nutshell, the contributions of this paper can be summarised in three folds:
|
| 30 |
+
|
| 31 |
+
- This work presents a novel and neat transformer architecture for visual robotic grasping tasks. To the best of our knowledge, it is one of the first attempts considering vision transformers in grasp detection tasks.
|
| 32 |
+
- We consider simultaneous fusion of local and global features and redesign the classical ViT framework for robotic visual sensing tasks.
|
| 33 |
+
- Exhaustive experiments are conducted to show the advantages of the transformer-based robotic perception framework. The experimental results demonstrate that our model achieves improved performance on popular grasping datasets compared to the state-of-the-art methods. We further show that our grasping transformer can generate appropriate grasping poses for known or unknown objects in either single or cluttered environments.
|
| 34 |
+
|
| 35 |
+
# II. RELATED WORK
|
| 36 |
+
|
| 37 |
+
This section reviews recent advances in the field of robotic grasping and briefly describes the progress of transformers in different areas.
|
| 38 |
+
|
| 39 |
+
# A. Grasp Detection
|
| 40 |
+
|
| 41 |
+
The ability to locate the object position and determine the appropriate grasping pose is crucial to stable and robust robotic grasping. Grasp detection, as the name implies, uses the image captured from the camera to infer the grasping pose for the robot manipulator. Using geometry-driven methods, earlier works [9], [10] mainly focus on analyzing the contours of objects to identify grasping points. A common assumption in these methods is that the geometric model of the object is always available. However, preparing the CAD models for graspable objects is time-consuming and impractical for real-time implementation. Recently, deep learning based methods have been successfully applied in visual grasping tasks [3], [5], [11], [12], [13]. The work of [14] is one of the earliest works that introduces deep neural networks to grasp detection via a two-stage strategy where the first stage finds exhaustive possible grasping candidates and the second stage evaluates the quality of these grasp candidates to identify the best one. However, due to numerous grasping proposals, the method in [14] suffers from relatively slow speed. Many recent works utilize convolutional neural networks to generate bounding box proposals to estimate the grasp pose of objects. Redmon et al. [5] employed an Alexnet-like CNN architecture to regress grasping poses. Kumra et al. [3] explored the use of ResNet-50 as a backbone to incorporate multimodal
|
| 42 |
+
|
| 43 |
+
including depth and RGB information to further improve the grasp performance. Besides, CNN-based grasp quality networks [15], [16] were proposed to evaluate and predict the robustness of grasp candidates. In the same line, GG-CNN [17] developed a fully convolutional neural network to perform grasp detection, which provides a lightweight and real-time solution for visual grasping. Currently, most of the existing grasp detection methods are still heavily inspired by computer vision techniques such as object recognition, object detection, etc. In contrast to classical visual problems where the detected objects are usually well-defined instances in the scene, in grasp detection, the grasp configuration to be generated is continuous, which implies an infinite number of possible grasp options. This places significant challenges in feature extraction to identify a valid grasp configuration from all possible candidates. We argue that the loss of long-term dependencies in feature extraction is a major drawback of current CNNs based grasp detection methods.
|
| 44 |
+
|
| 45 |
+
# B. Transformer
|
| 46 |
+
|
| 47 |
+
Transformer [6] first emerged in machine translation and is rapidly establishing itself as a new paradigm in natural language processing due to its potential to model global information, which learns the high quality features by considering the whole context. Thanks to its excellent global representation and friendly parallel computation, the transformer is competitive in long sequences modeling and gradually replaces RNNs and CNNs.
|
| 48 |
+
|
| 49 |
+
Motivated by the remarkable success of transformers achieved in natural language processing, more and more researchers are interested in the employment of attention mechanisms in visual tasks. At present, the transformer has been successfully applied to image classification, object detection, and segmentation tasks. However, there still exist many challenges. First, visual signals and word tokens are very different on many scales. Second, the high dimension of pixel-level information may introduce significant computational complexity.
|
| 50 |
+
|
| 51 |
+
More recently, ViT [7] was presented as a transformer model to tackle natural images recognition, which splits the image into non-overlapping patches. The authors in [8] proposed a hierarchical ViT called Swin-Transformer by calculating the local self-attention with shifted windows. In contrast to the quadratic computation complexity of self-attention in ViT, Swin-Transformer achieves a linear complexity. Inspired by this fashion, many researchers have tried to apply transformer to other fields. For example, TransUNet [18] combines transformer and Unet [19] for medical image diagnosis. Nevertheless, how to exploit the strengths of attention to aggregate information from entire inputs has not been investigated in the task of visual grasp detection. Unlike prior works, we design a transformer based encoder-decoder architecture to predict the grasp posture in an end-to-end manner. It is shown that our method achieves higher grasp success than the state-of-the-art CNNs counterparts.
|
| 52 |
+
|
| 53 |
+

|
| 54 |
+
Fig. 1. Overview of the TF-grasp model. Our model takes as input the image captured by the camera mounted on the end-effector of the manipulator and generates a pixel-level grasp representation.
|
| 55 |
+
|
| 56 |
+
# III. METHOD
|
| 57 |
+
|
| 58 |
+
Grasp Representation. The autonomous visual grasping tasks generally start from collecting visual images of the object by sensory input, which will then be processed to generate an effective grasp configuration to maximise the probability of grasp success. Considering a parallel-plate gripper, the grasp representation $g$ [20] is formulated as a 5-dimensional tuple:
|
| 59 |
+
|
| 60 |
+
$$
|
| 61 |
+
g = \{x, y, \theta , w, h \} \tag {1}
|
| 62 |
+
$$
|
| 63 |
+
|
| 64 |
+
where $(x,y)$ are the center coordinates of the grasp rectangle, $(w,h)$ denote the width and height of the grasp rectangle, and $\theta$ is the orientation of the grasp rectangle with respect to the horizontal axis. Given a gripper with known dimensions, a simplified representation can be expressed as $g = (p,\phi ,w)$ where $p = (x,y)$ , $\phi$ indicates the orientation angle of gripper and $w$ denotes the opening distance of gripper, respectively.
|
| 65 |
+
|
| 66 |
+
To facilitate grasping, we follow the setting in [17] to represent the grasp in 2-D image space as
|
| 67 |
+
|
| 68 |
+
$$
|
| 69 |
+
G = \{Q, W, \Theta \} \in \mathbb {R} ^ {3 \times W \times H}, \tag {2}
|
| 70 |
+
$$
|
| 71 |
+
|
| 72 |
+
where the grasp quality $Q$ measures the grasp success of each pixel, and $W$ and $\Theta$ are the gripper width and orientation angle maps. The value of each pixel in $W$ and $\Theta$ represents the corresponding width and angle of gripper at that position during the grasping.
|
| 73 |
+
|
| 74 |
+
Consequently, in the developed TF-Grasp, the grasp detection task boils down to three sub-tasks, namely the problems of predicting grasping position, angle, and width.
|
| 75 |
+
|
| 76 |
+
Grasp Transformer Overview. A deep motivation of this work is that the treatment of robot perception in complex,
|
| 77 |
+
|
| 78 |
+
dynamic robotic tasks should be global and holistic with information mutual fusion. Specifically, the grasping model can be formulated into an encoder-decoder architecture with a U-shaped structure, as detailed in Fig. 1. The encoder branch aggregates the entire visual input, mutually fuses features by using attention blocks, and then extracts the specific features that are useful for visual robotic grasping. During the decoder process, the model incorporates features delivered via skip-connections and performs a pixel-level grasp prediction by up-sampling. More concretely, the attention modules in the decoder enable more comprehensive processing of local and long-range information, allowing for better multi-scale feature fusion. Each pixel in the prediction heatmap is correlated with the final location and orientation of the end-effector.
|
| 79 |
+
|
| 80 |
+
To bridge the domain gaps between the transformer and visual robotic grasping tasks, we have carefully designed our grasping transformer in the following aspects for improved grasp detection. (a) Cascade Design. Different from the classic ViT architecture, we adapt a cascaded encoder-decoder structure. The encoder utilizes self-attention to learn a contextual representation that facilitates grasping and the decoder makes use of the extracted features to perform a pixel-level grasp prediction. (b) Local and Global balance. We utilize the swim attention layer to achieve a trade-off between global and local information for better scene perception. Window attention performs local feature extraction and the shifted-window attention allows cross window interactions to globally focus on more diverse regions. (c) Feature Fusion. The feature representations at different stages are connected by skip-connections for a multi-scale feature fusion, which acquire both rich semantic and detailed features. (d) Lightweight Design. It is essential for robots to account for efficiency and the real-time performance. We utilize shifted attention blocks and a slimming design for our grasping transformer to reach an ideal trade-off between the performance and speed.
|
| 81 |
+
|
| 82 |
+
Grasp Transformer Encoder. Before being fed into the encoder, the image is first passed through patch partition layer and is then cut into non-overlapping patches. Each patch is treated as a word token in the text. For example, a 2D image $I \in \mathbb{R}^{W \times H \times C}$ is split into fixed-size patches $x \in \mathbb{R}^{N \times (P \times P \times C)}$ , where $(H, W)$ denote the height and width of the original image, $C$ represents the channel of the image, $P$ is the shape size of each image patch, and $N = H \times W / P^2$ refers to the number of image patches. Then token-based representations can be obtained by passing the images patches into a projection layer.
|
| 83 |
+
|
| 84 |
+
The encoder is composed by stacking identical transformer blocks. Attentions in the transformer block build long-distance interactions across distant pixels and attend on these positions in the embedding space. At the top of the encoder is a bottleneck block attached to the decoder. The fundamental element in our grasping transformer framework is the multi-head self-attention. The input feature $\mathbf{X}$ is linearly transformed to derive the query $Q$ , key $K$ , and value $V$ , which are defined as follows:
|
| 85 |
+
|
| 86 |
+
$$
|
| 87 |
+
Q = X W _ {Q}, K = X W _ {K}, V = X W _ {V}, \tag {3}
|
| 88 |
+
$$
|
| 89 |
+
|
| 90 |
+
where $W_{Q}, W_{K}, W_{V}$ are linear projection matrices. Next, we
|
| 91 |
+
|
| 92 |
+

|
| 93 |
+
Fig. 2. The architecture of our transformer block.
|
| 94 |
+
|
| 95 |
+
compute the similarity between the query and key by using the dot product to obtain the attention,
|
| 96 |
+
|
| 97 |
+
$$
|
| 98 |
+
\operatorname {A t t e n t i o n} (Q, K, V) = \operatorname {S o f t M a x} \left(\frac {Q K ^ {T}}{\sqrt {d}} + B\right) V \tag {4}
|
| 99 |
+
$$
|
| 100 |
+
|
| 101 |
+
where $\sqrt{d}$ is the scaling factor and $B$ is the learnable relative position encoding.
|
| 102 |
+
|
| 103 |
+
The computational complexity of self-attention grows quadratically with respect to the image size. To achieve computational efficiency, we leverage the advantages of CNNs and transformer and adopt the swim-transformer block [8] in our framework. The swim-transformer layer consists of two parts: local attention and global attention. Within the local attention, the calculation of self-attention is restricted to local regions where images patches are divided into non-overlapping local windows. Cross-window attention introduces connections between neighbors by sliding non-overlapping windows. The structure of swim-transformer block is presented in Fig. 2 which is composed of MLP, Layer Norm, window-based MSA and shifted-window MSA. The computation procedure of swim-transformer block is represented as follows:
|
| 104 |
+
|
| 105 |
+
$$
|
| 106 |
+
\begin{array}{l} \hat {\mathbf {x}} ^ {l} = \operatorname {W - M S A} \left(\operatorname {L N} \left(\mathbf {x} ^ {l - 1}\right)\right) + \mathbf {x} ^ {l - 1}, \\ \mathbf {x} ^ {l} = \operatorname {M L P} \left(\ln \left(\hat {\mathbf {x}} ^ {l}\right)\right) + \hat {\mathbf {x}} ^ {l}, (5) \\ \hat {\mathbf {x}} ^ {l + 1} = \operatorname {S W - M S A} \left(\operatorname {L N} \left(\mathbf {x} ^ {l}\right)\right) + \mathbf {x} ^ {l}, (5) \\ \mathbf {x} ^ {l + 1} = \operatorname {M L P} \left(\operatorname {L N} \left(\hat {\mathbf {x}} ^ {l + 1}\right)\right) + \hat {\mathbf {x}} ^ {l + 1} \\ \end{array}
|
| 107 |
+
$$
|
| 108 |
+
|
| 109 |
+
where W-MSA and SW-MSA refer to the local window and global shifted window multi-head self-attention, respectively. $\mathbf{x}^{l - 1}$ denotes the feature of output from the previous layer. Then, the features will be sent into the window attention, W-MSA. There is a layer norm before both MLP and attention layer, and residual connections are applied to these modules. Between every two swim transformer blocks, there exists a patch merging operation that reduces the resolution of feature maps. The patch merging layer builds a hierarchical representation by gradually merging consecutive neighboring patches between successive transformer layers.
|
| 110 |
+
|
| 111 |
+
Grasp Transformer Decoder. The decoder generates an executable grasping configuration that allows the end-effector to move to the corresponding positions. We transform the planar grasp detection problem into a pixel-level prediction.
|
| 112 |
+
|
| 113 |
+
Three grasping heads are attached in parallel to the top of the decoder, including a grasp confidence head $Q$ , a gripper angle head $\Theta$ , and a gripper width head $W$ . The output of each head is a heat map with the same size as the input visual image. The grasp confidence head outputs a value between 0 and 1, which indicates the probability of the successful grasping at each pixel point. Likewise, the gripper width and angle heads output the width and rotation angle of the gripper when grasping at the corresponding point in the image, respectively. We treat the grasping posture estimation as a regression problem and use our transformer model to learn a mapping $F: I \to \tilde{G}$ by minimizing the distances between the predicted grasping heatmaps $\tilde{G}(Q, W, \Theta)$ and the ground truth, where $I$ is the input data. The loss function is defined as follows:
|
| 114 |
+
|
| 115 |
+
$$
|
| 116 |
+
\mathcal {L} = \sum_ {i} ^ {N} \sum_ {m \in \{Q, W, \Theta \}} \| \tilde {G} _ {i} ^ {m} - L _ {i} ^ {m} \| ^ {2} \tag {6}
|
| 117 |
+
$$
|
| 118 |
+
|
| 119 |
+
where $N$ is the number of sample size and $L_{i}$ is the corresponding label.
|
| 120 |
+
|
| 121 |
+
The ultimate grasp location is the position with the highest grasp confidence by retrieving the grasp quality heatmap, defined as:
|
| 122 |
+
|
| 123 |
+
$$
|
| 124 |
+
\mathcal {G} _ {p o s} ^ {*} = \operatorname {a r g m a x} _ {p o s} Q, \tag {7}
|
| 125 |
+
$$
|
| 126 |
+
|
| 127 |
+
where $Q$ is the grasp confidence map. Afterward, we extract the predicted angle $\theta$ and angle $w$ of the corresponding position from the angle and width heatmaps.
|
| 128 |
+
|
| 129 |
+
In our grasp detection decoder, we also adopt swim transformer block to reduce the computational complexity. Swin attention aggregates multi-scale features and builds a hierarchical representation. And skip-connections merge the features learned at these different stages for further fusion to produce a better grasp posture. Analogous to U-net [19], skip-connections are implemented by concatenating features from the $i$ -th layer of the encoder directly into the layer $i$ -th in the decoder. In the decoding phase, following the patch expanding layer, the concatenated features are taken as input to the next attention block stage. Simultaneously, we can learn the relationship between the fused features where the features in the encoder can be used as queries and keys to interact with the counterparts in the decoder for self-attention computing.
|
| 130 |
+
|
| 131 |
+
A benefit of our pixel-level grasp representation is that only a single forward propagation is required to obtain the best grasp postures within the global visual scene, avoiding the need to generate multiple grasp candidates and saving the computation expense.
|
| 132 |
+
|
| 133 |
+
# IV. EXPERIMENTS
|
| 134 |
+
|
| 135 |
+
In this section, extensive experiments are carried out to validate the performance of the proposed TF-Grasp method. We verify the performance of TF-Grasp on two popular grasping datasets and then evaluate its effectiveness on a real Franka Panda robotic manipulator.
|
| 136 |
+
|
| 137 |
+
The goal of this section tends to answer the following questions:
|
| 138 |
+
|
| 139 |
+
- Is the transformer-based grasp detection model better than CNN-based models?
|
| 140 |
+
|
| 141 |
+

|
| 142 |
+
|
| 143 |
+

|
| 144 |
+
|
| 145 |
+

|
| 146 |
+
|
| 147 |
+

|
| 148 |
+
|
| 149 |
+

|
| 150 |
+
|
| 151 |
+

|
| 152 |
+
|
| 153 |
+

|
| 154 |
+
|
| 155 |
+

|
| 156 |
+
Fig. 3. The visualized attention heatmaps learned by our method, which show that our transformer model can learn the concepts beneficial for grasping.
|
| 157 |
+
|
| 158 |
+

|
| 159 |
+
|
| 160 |
+

|
| 161 |
+
|
| 162 |
+

|
| 163 |
+
|
| 164 |
+

|
| 165 |
+
|
| 166 |
+

|
| 167 |
+
|
| 168 |
+

|
| 169 |
+
|
| 170 |
+
- If true, what makes the transformer-based grasp detection model outperforming others?
|
| 171 |
+
|
| 172 |
+
# A. Datasets and Experiment Setup
|
| 173 |
+
|
| 174 |
+
The Cornell grasping data [14] is a multi-object dataset that contains 885 images. The resolution of each image is $640 \times 480$ . The whole dataset is relatively small and we use various data augmentation techniques such as rotation, zooms, and random cropping to avoid overfitting. We then validate the performance of TF-Grasp on the Jacquard dataset [21] which is generated in a simulator via CAD models. The Jacquard dataset is fairly large, containing over 50k images of 11k object categories, and there are over 1 million annotated grasp labels.
|
| 175 |
+
|
| 176 |
+
Evaluation Metric. A predicted grasp is regarded as correct if the following conditions are satisfied.
|
| 177 |
+
|
| 178 |
+
i) The discrepancy between the predicted grasping angle and the ground truth is within $30^{\circ}$ .
|
| 179 |
+
ii) The Jaccard index defined in Eq. (8) is greater than 0.25.
|
| 180 |
+
|
| 181 |
+
$$
|
| 182 |
+
J \left(\mathcal {R} ^ {*}, \mathcal {R}\right) = \frac {\left| \mathcal {R} ^ {*} \cap \mathcal {R} \right|}{\left| \mathcal {R} ^ {*} \cup \mathcal {R} \right|} \tag {8}
|
| 183 |
+
$$
|
| 184 |
+
|
| 185 |
+
TF-Grasp takes a $224 \times 224$ image as input and outputs three pixel-wise maps with the same resolution as the input. The input is normalized by subtracting its mean and dividing the standard deviation. We follow the common strategy to train the grasp transformer. Both the encoder and decoder contain four swim-attention blocks and each consists of 1, 2, 4, 8 attention heads. The window size is 7. At each training step, a batch of samples is randomly sampled from the training set and we use the ground truth as the target values to train our neural network. Concretely, we utilize the mean squared error as the loss function and apply AdamW [28] as the optimizer. The default size of batch size is set to 64. The patch partition layer is implemented by convolutions with kernels of $p \times p$ and a stride $p$ . In our implementation, $p$ is set to 4. In order to preserve a one-to-one mapping of the angle $\Theta$ between $[- \frac{\pi}{2}, \frac{\pi}{2}]$ , we decode the learning of angle into two components, $\sin(2\Theta)$ and $\cos(2\Theta)$ . In this way, the final angle is obtained by $\arctan \left(\frac{\sin 2\Theta}{\cos 2\Theta}\right)/2$ . TF-Grasp is implemented by PyTorch, and the entire grasp detection system is running on the Ubuntu 18.04 desktop with Intel Core i9 CPU and NVIDIA 3090 GPU.
|
| 186 |
+
|
| 187 |
+
TABLEI THE ACCURACY ON CORNELL GRASPING DATASET.
|
| 188 |
+
|
| 189 |
+
<table><tr><td rowspan="2">Method</td><td rowspan="2">Input</td><td colspan="2">Accuracy(%)</td><td rowspan="2">Time (ms)</td></tr><tr><td>IW</td><td>OW</td></tr><tr><td>Fast Search [20]</td><td>RGB-D</td><td>60.5</td><td>58.3</td><td>5000</td></tr><tr><td>GG-CNN [17]</td><td>D</td><td>73.0</td><td>69.0</td><td>19</td></tr><tr><td>SAE [14]</td><td>RGB-D</td><td>73.9</td><td>75.6</td><td>1350</td></tr><tr><td>Two-stage closed-loop [22]</td><td>RGB-D</td><td>85.3</td><td>-</td><td>140</td></tr><tr><td>AlexNet, MultiGrasp [5]</td><td>RGB-D</td><td>88.0</td><td>87.1</td><td>76</td></tr><tr><td>STEM-CaRFs [23]</td><td>RGB-D</td><td>88.2</td><td>87.5</td><td>-</td></tr><tr><td>GRPN [24]</td><td>RGB</td><td>88.7</td><td>-</td><td>200</td></tr><tr><td>ResNet-50x2 [3]</td><td>RGB-D</td><td>89.2</td><td>88.9</td><td>103</td></tr><tr><td>GraspNet [12]</td><td>RGB-D</td><td>90.2</td><td>90.6</td><td>24</td></tr><tr><td>ZF-net [25]</td><td>RGB-D</td><td>93.2</td><td>89.1</td><td>-</td></tr><tr><td>E2E-net [26]</td><td>RGB</td><td>98.2</td><td>-</td><td>63</td></tr><tr><td>GR-ConvNet [27]</td><td>D</td><td>93.2</td><td>94.3</td><td>19</td></tr><tr><td>GR-ConvNet [27]</td><td>RGB</td><td>96.6</td><td>95.5</td><td>19</td></tr><tr><td>GR-ConvNet [27]</td><td>RGB-D</td><td>97.7</td><td>96.6</td><td>20</td></tr><tr><td rowspan="3">TF-Grasp</td><td>D</td><td>95.2</td><td>94.9</td><td>41.1</td></tr><tr><td>RGB</td><td>96.78</td><td>95.0</td><td>41.3</td></tr><tr><td>RGB-D</td><td>97.99</td><td>96.7</td><td>41.6</td></tr></table>
|
| 190 |
+
|
| 191 |
+
# B. Experimental Results and Analysis
|
| 192 |
+
|
| 193 |
+
To show its effectiveness, our approach is compared with a number of baselines under the same experimental conditions, i.e., evaluation metric. The results of image-wise (IW) and object-wise (OW) settings in the public Cornell grasping dataset are present in Table I. Since the Cornell dataset is relatively small, we follow the setting of previous works [3], [5], [14] by adopting a five-fold cross-validation. Also, to make the comparison fair and comprehensive, the input modalities and running time are considered. For all compared baselines, we use the data reported in their original papers. Taking as input only the depth information, our TF-Grasp achieves an accuracy of $95.2\%$ which is competitive to the state-of-the-art. When using both depth and RGB data, our model obtains $97.99\%$ accuracy. For Table II, we use $90\%$ data of the Jacquard dataset as the training set and the remaining $10\%$ as the validation set. In addition, our model takes about 41ms to process a single image using the Intel Core i9-10900X CPU processor, which is competitive with the state-of-art approaches and basically meets the real-time requirements. The transformer grasping model exhibits a better accuracy
|
| 194 |
+
|
| 195 |
+
TABLE II THE ACCURACY ON JACQUARD GRASPING DATASET.
|
| 196 |
+
|
| 197 |
+
<table><tr><td>Authors</td><td>Method</td><td>Input</td><td>Accuracy (%)</td></tr><tr><td>Depierre [21]</td><td>Jacquard</td><td>RGB-D</td><td>74.2</td></tr><tr><td>Morrison [17]</td><td>GG-CNN2</td><td>D</td><td>84</td></tr><tr><td>Zhou [29]</td><td>FCGN, ResNet-101</td><td>RGB</td><td>91.8</td></tr><tr><td>Alexandre [16]</td><td>GQ-STN</td><td>D</td><td>70.8</td></tr><tr><td>Zhang [11]</td><td>ROI-GD</td><td>RGB</td><td>90.4</td></tr><tr><td>Stefan [26]</td><td>Det Seg</td><td>RGB</td><td>92.59</td></tr><tr><td>Stefan [26]</td><td>Det Seg Refine</td><td>RGB</td><td>92.95</td></tr><tr><td>Kumra [27]</td><td>GR-ConvNet</td><td>D</td><td>93.7</td></tr><tr><td>Kumra [27]</td><td>GR-ConvNet</td><td>RGB</td><td>91.8</td></tr><tr><td>Kumra [27]</td><td>GR-ConvNet</td><td>RGB-D</td><td>94.6</td></tr><tr><td rowspan="3">Our</td><td>TF-Grasp</td><td>D</td><td>93.1</td></tr><tr><td>TF-Grasp</td><td>RGB</td><td>93.57</td></tr><tr><td>TF-Grasp</td><td>RGB-D</td><td>94.6</td></tr></table>
|
| 198 |
+
|
| 199 |
+
on both datasets compared to conventional CNN models. Our proposed approach achieves a higher accuracy of $94.6\%$ which is on-par or superior to previous methods. The results on the Cornell and Jacquard datasets all indicate that the model with the attention mechanism is more suitable for visual grasping tasks.
|
| 200 |
+
|
| 201 |
+
Despite the fact that our model is trained on a single object dataset, it can be well adapted to multi-object environments with the help of attention mechanisms. In addition, to evaluate the advantages of the transformer versus CNNs for visual grasping tasks, we use the original convolution layers, residual layers, and our transformer as feature extractors to test detection accuracy on different objects on the Cornell dataset. We apply an object-wise split to the Cornell dataset and Fig. 5 shows the detection accuracy of objects not seen during the training phase. All objects are subsets of the Cornell dataset and are evaluated 5 times. All models shown in Fig. 5 employ an encoder-decoder architecture with 4 stages in order to guarantee a fair comparison, where the original-conv is a fully convolutional neural network and resnet-conv is to replace the original convolution layer with the residual block. The result of different models is shown in Fig. 5. Note that the transformer outperforms original convolutions on all selected objects and is marginally better or on-par with the residual network.
|
| 202 |
+
|
| 203 |
+
These results demonstrate that the transformer improves robotic grasp detection. We conjecture that prior methods that rely on local operations of the convolution layers might ignore the dependencies between long-range pixels. Instead, our approach leverages the attention mechanism to exploit both local and global information and integrates features that are useful for grasping. To better demonstrate whether the transformer-based grasping model can model the relationships between objects and across the scene, we present the multi-object grasping results and grasping quality heatmaps of the transformer and CNN in Fig. 4. Our aim is to verify that the transformer is preferred over CNN for visual grasping tasks and is better at capturing global and local information. From Fig. 4, we can see that the grasp rectangles predicted by CNN have the right grasp position in most cases, but the predicted gripper angle and width are often not appropriate. In some cases, CNN even generates grasping rectangles in the
|
| 204 |
+
|
| 205 |
+
background. With the attention mechanism, our transformer-based model is able to clearly identify the objects from the background. In the second row of Fig. 4, the grasping quality images show that the CNN-based approach can not identify the graspable area and consider the entire region of objects as a graspable zone with high success probabilities. Instead, as shown in the fourth row of Fig. 4, the transformer-based model is prone to capture the area that is easy to grasp due to its larger receptive field. For each attention block, the attention operation establishes the inter-element relationships through self-attention, and the subsequent multilayer-perceptron (MLP) module further models the inherent relation between each element. The layer normalization and residual connections that interleave these two operations keep the training stable and efficient. In contrast, in CNN, the receptive field of each convolutional kernel is limited. To build a larger receptive field, the model often needs to repeatedly stack convolutional layers to gain global and semantically rich features. However, such a method in general results in the loss of detailed feature information such as the position and shape information of objects that are essential for grasping tasks. Therefore, we exploit a transformer-based model which can better capture not only the global information but also detailed features (e.g., the position and shape information).
|
| 206 |
+
|
| 207 |
+

|
| 208 |
+
C. Visualization Analysis
|
| 209 |
+
Fig. 5. The accuracy of different models as feature extractors on selected objects.
|
| 210 |
+
|
| 211 |
+
To clarify why the transformer architecture is helpful for grasp detection tasks, we visualize the heatmaps of attention maps, detailed in Fig. 3. From these heat maps, we can discover that the self-attention modules can readily learn the area that is easy for grasping, such as the edges of objects, ignore irrelevant details, and pay more attention on the contour and shape of the objects. Meanwhile, the model focuses on more general characteristics rather than individual features. For example, for the chairs shown in Fig. 3, our method evaluates the edge of the chairs with a higher grasp quality. We further provide more concrete examples of real-world grasping, and the experimental results show that the attention mechanism is more likely to achieve a better understanding of the grasping scenario, generate more accurate grasping rectangles, and work well on both household and novel objects.
|
| 212 |
+
|
| 213 |
+

|
| 214 |
+
(a) Samples of generated rectangles predicted by CNN
|
| 215 |
+
|
| 216 |
+

|
| 217 |
+
(b) Predicted grasp quality heatmaps by CNN
|
| 218 |
+
|
| 219 |
+

|
| 220 |
+
(c) Samples of generated rectangles predicted by Transformer
|
| 221 |
+
|
| 222 |
+

|
| 223 |
+
(d) Predicted grasp quality heatmaps by Transformer
|
| 224 |
+
Fig. 4. Visualization comparison of the CNN and transformer-based grasping models.
|
| 225 |
+
|
| 226 |
+
TABLE III COMPARISON BETWEEN USING AND NOT USING SKIP-CONNECTIONS
|
| 227 |
+
|
| 228 |
+
<table><tr><td colspan="3">The accuracy on Cornell Grasping Results</td></tr><tr><td></td><td>With Skip-connections</td><td>Without Skip-connections</td></tr><tr><td>RGB</td><td>96.78%</td><td>95.7%</td></tr><tr><td>Depth</td><td>95.2%</td><td>94.3%</td></tr><tr><td>RGB+Depth</td><td>97.99%</td><td>96.1%</td></tr><tr><td colspan="3">The accuracy on Jacquard Grasping Results</td></tr><tr><td></td><td>With Skip-connections</td><td>Without Skip-connections</td></tr><tr><td>RGB</td><td>93.57%</td><td>92.4%</td></tr><tr><td>Depth</td><td>93.1%</td><td>91.8%</td></tr><tr><td>RGB+Depth</td><td>94.6%</td><td>93.27%</td></tr></table>
|
| 229 |
+
|
| 230 |
+
In Fig. 6, we illustrate a pick-and-place task based on our TF-Grasp on the Franka manipulator. Our grasp detection system works well for novel objects that have not been seen during training procedure and also locates graspable objects in cluttered environments.
|
| 231 |
+
|
| 232 |
+
In conclusion, the visualization results indicate that our TF-Grasp can produce a more general and robust prediction, which contributes to improving the detection accuracy.
|
| 233 |
+
|
| 234 |
+
# D. Ablation Studies
|
| 235 |
+
|
| 236 |
+
To understand the role of skip-connections in our transformer model on the visual grasping problems, we conduct experiments on the Cornell and Jacquard grasping datasets with and without skip-connections using our transformer, respectively. The detailed experimental results are shown in Table III. The use of skip-connections is better than not using skip-connections in all input modes. The attention mechanism
|
| 237 |
+
|
| 238 |
+
in the transformer builds inter-relationships in each layer, incorporates global features, and achieves promising results. Through skip-connections, the multi-scale representations at different stages are further fused globally. The empirical evidence shows that these further refinement and contextual features contribute to the quality of final grasp prediction.
|
| 239 |
+
|
| 240 |
+
# E. Grasping in Real World Scenarios
|
| 241 |
+
|
| 242 |
+
Physical Setting. The Franka Panda robot manipulation and the RealSense D435 RGB-D camera are used in our physical experiment. The camera is attached to the end-effector to keep a good visual coverage of graspable objects. In each grasp attempt, our TF-Grasp receives the visual signals from the depth camera mounted on the robot end-effector and outputs an optimal grasping posture. Next, the end-effector approaches the optimal target grasping posture based on the trajectory planned by a motion planning method, and then closes the gripper. Such a transformer-based grasp detection system can be easily adapted to other hardware platforms. During the grasp process, the raw depth sensor is filled with a portion of missing pixels that have NaN values. We generate the mask of NaN values, normalize the depth image, and apply cv2.inpaint [30] for further depth completion.
|
| 243 |
+
|
| 244 |
+
We perform a total of 165 grasping attempts, of which the robot performs successful grasp 152 times, achieving a success rate of $92.1\%$ . Table IV lists the results of learning-based methods on real robot grasping. These results indicate that the transformer-based grasp detection system also behaves well on real robots.
|
| 245 |
+
|
| 246 |
+

|
| 247 |
+
Fig. 6. Screenshots of physical grasping in clutter.
|
| 248 |
+
|
| 249 |
+
TABLE IV THE RESULTS FOR PHYSICAL SETUP.
|
| 250 |
+
|
| 251 |
+
<table><tr><td>Authors</td><td>Physical grasp</td><td>Success rate (%)</td></tr><tr><td>Lenz [14]</td><td>89/100</td><td>89%</td></tr><tr><td>Pinto [31]</td><td>109/150</td><td>73%</td></tr><tr><td>Morrison [17]</td><td>110/120</td><td>92%</td></tr><tr><td>Chu [32]</td><td>89/100</td><td>89%</td></tr><tr><td>TF-Grasp(Ours)</td><td>152/165</td><td>92.1%</td></tr></table>
|
| 252 |
+
|
| 253 |
+
# V. DISCUSSION AND CONCLUSION
|
| 254 |
+
|
| 255 |
+
In this work, we develop a novel architecture for visual grasping. Although CNN and its variants are still the dominant models in visual robotic grasping, we show the powerful potential of transformers in grasp detection. Compared with CNN-based counterparts, the transformer-based grasp detection models are better at capturing global dependencies and learning powerful feature representation. The results show that our proposed approach outperforms original CNN-based models. The contexts can be better represented by attention propagation. Nevertheless, the current approach is limited to the parallel gripper. Future research will focus on developing a universal transformer-based grasp detection method for other types of grippers, such as the five finger dexterous hand.
|
| 256 |
+
|
| 257 |
+
# REFERENCES
|
| 258 |
+
|
| 259 |
+
[1] J. Song, M. Patel, and M. Ghaffari, “Fusing convolutional neural network and geometric constraint for image-based indoor localization,” IEEE Robotics Autom. Lett., vol. 7, no. 2, pp. 1674–1681, 2022.
|
| 260 |
+
[2] D. Zhao and J. Oh, "Noticing motion patterns: A temporal cnn with a novel convolution operator for human trajectory prediction," IEEE Robotics Autom. Lett., vol. 6, no. 2, pp. 628-634, 2021.
|
| 261 |
+
[3] S. Kumra and C. Kanan, "Robotic grasp detection using deep convolutional neural networks," in Proc. IEEE/RSJ Int. Conf. Intell. Robots Syst, 2017, pp. 769-776.
|
| 262 |
+
[4] X. Zhu, Y. Zhou, Y. Fan, and M. Tomizuka, "Learn to grasp with less supervision: A data-efficient maximum likelihood grasp sampling loss," arXiv preprint arXiv:2110.01379, 2021.
|
| 263 |
+
[5] J. Redmon and A. Angelova, "Real-time grasp detection using convolutional neural networks," in Proc. IEEE Int. Conf. Robot. Autom., 2015, pp. 1316-1322.
|
| 264 |
+
[6] A. V. et al., "Attention is all you need," in Annual Conference on Neural Inform. Processing Sys. 2017, December 4-9, 2017, Long Beach, CA, USA, 2017, pp. 5998-6008.
|
| 265 |
+
[7] A. D. et al., "An image is worth 16x16 words: Transformers for image recognition at scale," in Proc. Int. Conf. Learn. Represent. OpenReview.net, 2021.
|
| 266 |
+
[8] L. Z. et al., "Swin transformer: Hierarchical vision transformer using shifted windows," in Proc. IEEE Int. Conf. Comput. Vision, 2021, pp. 10012-10022.
|
| 267 |
+
[9] R. M. Murray, Z. Li, and S. S. Sastry, A mathematical introduction to robotic manipulation. Boca Raton, FL, USA: CRC, 1994, 2017.
|
| 268 |
+
[10] A. Bicchi and V. Kumar, “Robotic grasping and contact: A review,” in Proc. IEEE Int. Conf. Robot. Autom., San Francisco, CA, USA, Apr. 2000, pp. 348–353.
|
| 269 |
+
|
| 270 |
+
[11] H. Zhang, X. Lan, S. Bai, X. Zhou, Z. Tian, and N. Zheng, “Roi-based robotic grasp detection for object overlapping scenes,” in Proc. IEEE Int. Conf. Intell. Robots Syst., 2019, pp. 4768–4775.
|
| 271 |
+
[12] U. Asif, J. Tang, and S. Harrer, "Graspnet: An efficient convolutional neural network for real-time grasp detection for low-powered devices," in IJCAI, vol. 7, 2018, pp. 4875-4882.
|
| 272 |
+
[13] X. Zhu, L. Sun, Y. Fan, and M. Tomizuka, “6-dof contrastive grasp proposal network,” in Proc. IEEE Int. Conf. Robot.Automat., 2021, pp. 6371–6377.
|
| 273 |
+
[14] I. Lenz, H. Lee, and A. Saxena, "Deep learning for detecting robotic grasps," Int. J. Robotics Res., vol. 34, no. 4-5, pp. 705-724, 2015.
|
| 274 |
+
[15] J. M. et al., "Dex-net 2.0: Deep learning to plan robust grasps with synthetic point clouds and analytic grasp metrics," in Robotics: Science and Systems XIII, Massachusetts Institute of Technology, Cambridge, Massachusetts, USA, July 12-16, 2017, 2017.
|
| 275 |
+
[16] A. Gariépy, J.-C. Ruel, B. Chaib-Draa, and P. Giguere, “Gq-stn: Optimizing one-shot grasp detection based on robustness classifier,” in Proc. IEEE/RSJ Int. Conf. Intell. Robots Syst., 2019, pp. 3996–4003.
|
| 276 |
+
[17] D. Morrison, P. Corke, and J. Leitner, “Learning robust, real-time, reactive robotic grasping,” Int. J. Robotics Res., vol. 39, no. 2-3, pp. 183–201, 2020.
|
| 277 |
+
[18] C. J. et al., "Transunet: Transformers make strong encoders for medical image segmentation," arXiv preprint arXiv:2102.04306, 2021.
|
| 278 |
+
[19] O. Ronneberger, P. Fischer, and T. Brox, “U-net: Convolutional networks for biomedical image segmentation,” in Proc. Int. Conf. on Medical image computing and computer-assisted intervention. Springer, 2015, pp. 234–241.
|
| 279 |
+
[20] Y. Jiang, S. Moseson, and A. Saxena, "Efficient grasping from rgbd images: Learning using a new rectangle representation," in Proc. IEEE Int. Conf. Robot. Automat., 2011, pp. 3304-3311.
|
| 280 |
+
[21] A. Depierre, E. Dellandrea, and L. Chen, "Jacquard: A large scale dataset for robotic grasp detection," in Proc. IEEE/RSJ Int. Conf. Intell. Robots Syst., 2018, pp. 3511-3516.
|
| 281 |
+
[22] Z. Wang, Z. Li, B. Wang, and H. Liu, "Robot grasp detection using multimodal deep convolutional neural networks," Advances in Mechanical Engineering, vol. 8, no. 9, p. 1687814016668077, 2016.
|
| 282 |
+
[23] U. Asif, M. Bennamoun, and F. A. Sohel, "Rgb-d object recognition and grasp detection using hierarchical cascaded forests," IEEE Trans. on Robotics, vol. 33, no. 3, pp. 547-564, 2017.
|
| 283 |
+
[24] H. Karaoguz and P. Jensfelt, "Object detection approach for robot grasp detection," in Proc. IEEE Int. Conf. Robot.Automat., 2019, pp. 4953-4959.
|
| 284 |
+
[25] D. Guo, F. Sun, H. Liu, T. Kong, B. Fang, and N. Xi, “A hybrid deep architecture for robotic grasp detection,” in Proc. IEEE Int. Conf. Robot.Automat., 2017, pp. 1609-1614.
|
| 285 |
+
[26] S. Ainetter and F. Fraundorfer, "End-to-end trainable deep neural network for robotic grasp detection and semantic segmentation from rgb," in Proc. IEEE Int. Conf. Robot.Automat. IEEE, 2021, pp. 13452-13458.
|
| 286 |
+
[27] S. Kumra, S. Joshi, and F. Sahin, “Antipodal robotic grasping using generative residual convolutional neural network,” in Proc. IEEE Int. Conf. Intell. Robots Syst. IEEE, pp. 9626–9633.
|
| 287 |
+
[28] I. Loshchilov and F. Hutter, "Decoupled weight decay regularization," in Proc. Int. Conf. Learn. Represent., 2018.
|
| 288 |
+
[29] X. Zhou, X. Lan, H. Zhang, Z. Tian, Y. Zhang, and N. Zheng, “Fully convolutional grasp detection network with oriented anchor box,” in Proc. IEEE Int. Conf. Intell. Robots Syst., 2018, pp. 7223-7230.
|
| 289 |
+
[30] G. Bradski, “The opencv library.” Dr. Dobb's Journal: Software Tools for the Professional Programmer, vol. 25, no. 11, pp. 120-123, 2000.
|
| 290 |
+
[31] L. Pinto and A. Gupta, "Supersizing self-supervision: Learning to grasp from 50k tries and 700 robot hours," in Proc. IEEE Int. Conf. Robot. Autom., 2016, pp. 3406-3413.
|
| 291 |
+
[32] F.-J. Chu, R. Xu, and P. A. Vela, “Real-world multiobject, multigrasp detection,” IEEE Robotics Autom. Lett., vol. 3, no. 4, pp. 3355–3362, 2018.
|
2202.11xxx/2202.11911/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:36c142615bc59b28786c9fbeefe65ff9cb55be2cbf848da7c66daed477880287
|
| 3 |
+
size 507373
|
2202.11xxx/2202.11911/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11912/f4c9ccdd-f304-4b1d-89a0-9fd9c3d45733_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11912/f4c9ccdd-f304-4b1d-89a0-9fd9c3d45733_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11912/f4c9ccdd-f304-4b1d-89a0-9fd9c3d45733_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:486ff6be70586591e3d9bc40d2096abe0a4fb60ad661e008662b760b5d9a0a3c
|
| 3 |
+
size 1626865
|
2202.11xxx/2202.11912/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11912/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:74d44867e64fcc1af53b3cef5b3fd1e3b43a171553703b0387cae827d960a1fe
|
| 3 |
+
size 727374
|
2202.11xxx/2202.11912/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11915/6d305bc4-4eda-4ae4-9704-6ea8af50946e_content_list.json
ADDED
|
@@ -0,0 +1,2020 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Interpolation-based Contrastive Learning for Few-Label Semi-Supervised Learning",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
166,
|
| 8 |
+
70,
|
| 9 |
+
833,
|
| 10 |
+
140
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Xihong Yang, Xiaochang Hu, Sihang Zhou, Xinwang Liu, En Zhu",
|
| 17 |
+
"bbox": [
|
| 18 |
+
250,
|
| 19 |
+
147,
|
| 20 |
+
746,
|
| 21 |
+
164
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "Abstract—Semi-supervised learning (SSL) has long been proved to be an effective technique to construct powerful models with limited labels. In the existing literature, consistency regularization-based methods, which force the perturbed samples to have similar predictions with the original ones have attracted much attention for their promising accuracy. However, we observe that, the performance of such methods decreases drastically when the labels get extremely limited, e.g., 2 or 3 labels for each category. Our empirical study finds that the main problem lies with the drift of semantic information in the procedure of data augmentation. The problem can be alleviated when enough supervision is provided. However, when little guidance is available, the incorrect regularization would mislead the network and undermine the performance of the algorithm. To tackle the problem, we (1) propose an interpolation-based method to construct more reliable positive sample pairs; (2) design a novel contrastive loss to guide the embedding of the learned network to change linearly between samples so as to improve the discriminative capability of the network by enlarging the margin decision boundaries. Since no destructive regularization is introduced, the performance of our proposed algorithm is largely improved. Specifically, the proposed algorithm outperforms the second best algorithm (Comatch) with $5.3\\%$ by achieving $88.73\\%$ classification accuracy when only two labels are available for each class on the CIFAR-10 dataset. Moreover, we further prove the generality of the proposed method by improving the performance of the existing state-of-the-art algorithms considerably with our proposed strategy.",
|
| 28 |
+
"bbox": [
|
| 29 |
+
73,
|
| 30 |
+
220,
|
| 31 |
+
491,
|
| 32 |
+
575
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Index Terms—Semi-supervised learning, contrastive learning, interpolation-based method, few-label.",
|
| 39 |
+
"bbox": [
|
| 40 |
+
73,
|
| 41 |
+
580,
|
| 42 |
+
490,
|
| 43 |
+
608
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "I. INTRODUCTION",
|
| 50 |
+
"text_level": 1,
|
| 51 |
+
"bbox": [
|
| 52 |
+
215,
|
| 53 |
+
633,
|
| 54 |
+
351,
|
| 55 |
+
647
|
| 56 |
+
],
|
| 57 |
+
"page_idx": 0
|
| 58 |
+
},
|
| 59 |
+
{
|
| 60 |
+
"type": "text",
|
| 61 |
+
"text": "In recent years, machine learning has developed rapidly and achieved remarkable performance in many fields like, image classification [1], [2], object detection [3], [4], semantic segmentation [5], [6], and clustering [7]–[14]. Convolutional neural networks (CNNs) have attracted the attention of many researchers. The success of most of these deep neural networks depends heavily on a large number of high-quality labeled datasets [2], [15], [16].",
|
| 62 |
+
"bbox": [
|
| 63 |
+
73,
|
| 64 |
+
656,
|
| 65 |
+
490,
|
| 66 |
+
777
|
| 67 |
+
],
|
| 68 |
+
"page_idx": 0
|
| 69 |
+
},
|
| 70 |
+
{
|
| 71 |
+
"type": "text",
|
| 72 |
+
"text": "However, collecting labeled data can consume a lot of resources which is un-affordable to countless everyday learning demands in modern society. Therefore, deep learning algorithms which can achieve appropriate performance with tractable supervision have been a hot research spot in recent years. Specifically, deep semi-supervised learning (SSL) algorithms, which seek to improve the performance of deep learning models on datasets with only limited labeled data by leveraging large amounts of unlabeled data, are an important branch in this family. This has led to a plethora of SSL methods designed for various fields [17]–[23].",
|
| 73 |
+
"bbox": [
|
| 74 |
+
73,
|
| 75 |
+
777,
|
| 76 |
+
491,
|
| 77 |
+
946
|
| 78 |
+
],
|
| 79 |
+
"page_idx": 0
|
| 80 |
+
},
|
| 81 |
+
{
|
| 82 |
+
"type": "image",
|
| 83 |
+
"img_path": "images/6bc9ffcc7194c03f130bc0424de8ece95e79f8c4942c4e5a492a62d9b2d02493.jpg",
|
| 84 |
+
"image_caption": [
|
| 85 |
+
"Figure 1: Illustration of the positive sample pair construction process. Different from the existing works which construct positive sample pairs with data augmentation, we construct positive sample pairs with interpolation operations. Specifically, given two unlabeled images, the integration of the sample embeddings '1' and the embedding of the sample integration '2' are acquired as a positive sample pair."
|
| 86 |
+
],
|
| 87 |
+
"image_footnote": [],
|
| 88 |
+
"bbox": [
|
| 89 |
+
524,
|
| 90 |
+
217,
|
| 91 |
+
906,
|
| 92 |
+
354
|
| 93 |
+
],
|
| 94 |
+
"page_idx": 0
|
| 95 |
+
},
|
| 96 |
+
{
|
| 97 |
+
"type": "text",
|
| 98 |
+
"text": "Among all the deep semi-supervised learning algorithms, consistency regularization based methods treat the original input and its augmented version as positive pairs, which is a form of contrastive learning [24]–[31]. These consistency regularization-based methods follow a common assumption that ever after data augmentation, the classifier could output the same class probability for an unlabeled sample, which means data augmentation will not change the semantic. The input image should be more similar to its augmented version than other images. Under this assumption, researchers perturb the input samples by conducting data augmentation to generate similar samples of the original data.",
|
| 99 |
+
"bbox": [
|
| 100 |
+
501,
|
| 101 |
+
502,
|
| 102 |
+
921,
|
| 103 |
+
683
|
| 104 |
+
],
|
| 105 |
+
"page_idx": 0
|
| 106 |
+
},
|
| 107 |
+
{
|
| 108 |
+
"type": "text",
|
| 109 |
+
"text": "The mentioned algorithms have contributed remarkable performance improvement to improve the learning accuracy when only a few labeled data are available. However, we observe that, when the number of labeled data gets extremely small, e.g., 2 to 3 labels for each category, the performance of the existing algorithms would drop drastically. For example, to the CIFAR-10 dataset whose scale for training samples is 50,000 and 10 categories, the performance of the state-of-the-art algorithm MixMatch [28] can achieve the top-1 accuracy of $86.47\\%$ when 250 labeled data is available. Nevertheless, the performance of the same algorithm drops to $50.10\\%$ when only 30 labeled samples are available. The similar phenomenon happens to the Mean-Teacher [27] algorithm whose performance drop by more than a half when the label number decreases from 250 to 30. More experimental results can be found in Table I.",
|
| 110 |
+
"bbox": [
|
| 111 |
+
501,
|
| 112 |
+
685,
|
| 113 |
+
921,
|
| 114 |
+
926
|
| 115 |
+
],
|
| 116 |
+
"page_idx": 0
|
| 117 |
+
},
|
| 118 |
+
{
|
| 119 |
+
"type": "text",
|
| 120 |
+
"text": "According to our analysis, one of the main reasons that",
|
| 121 |
+
"bbox": [
|
| 122 |
+
519,
|
| 123 |
+
929,
|
| 124 |
+
921,
|
| 125 |
+
945
|
| 126 |
+
],
|
| 127 |
+
"page_idx": 0
|
| 128 |
+
},
|
| 129 |
+
{
|
| 130 |
+
"type": "page_number",
|
| 131 |
+
"text": "1",
|
| 132 |
+
"bbox": [
|
| 133 |
+
911,
|
| 134 |
+
30,
|
| 135 |
+
919,
|
| 136 |
+
40
|
| 137 |
+
],
|
| 138 |
+
"page_idx": 0
|
| 139 |
+
},
|
| 140 |
+
{
|
| 141 |
+
"type": "aside_text",
|
| 142 |
+
"text": "arXiv:2202.11915v2 [cs.CV] 22 Jun 2022",
|
| 143 |
+
"bbox": [
|
| 144 |
+
22,
|
| 145 |
+
265,
|
| 146 |
+
57,
|
| 147 |
+
707
|
| 148 |
+
],
|
| 149 |
+
"page_idx": 0
|
| 150 |
+
},
|
| 151 |
+
{
|
| 152 |
+
"type": "table",
|
| 153 |
+
"img_path": "images/534e09ca5dee8ab047edd9e260f0e7905cf41d491040f0b01d28101d79b71603.jpg",
|
| 154 |
+
"table_caption": [
|
| 155 |
+
"Table I: Classification accuracy of two state-of-the-art semi-supervised algorithms, i.e., MixMatch [28] and Mean-Teacher [27], on CIFAR-10 dataset with 30, 40, 250, 500 and 1000 labels."
|
| 156 |
+
],
|
| 157 |
+
"table_footnote": [],
|
| 158 |
+
"table_body": "<table><tr><td colspan=\"2\">Method</td><td>30</td><td>40</td><td>250</td><td>500</td><td>1000</td></tr><tr><td>MixMatch</td><td>[28]</td><td>50.10</td><td>59.08</td><td>86.47</td><td>89.33</td><td>90.79</td></tr><tr><td>Mean-Teacher</td><td>[27]</td><td>24.51</td><td>24.93</td><td>52.49</td><td>70.15</td><td>80.12</td></tr></table>",
|
| 159 |
+
"bbox": [
|
| 160 |
+
80,
|
| 161 |
+
131,
|
| 162 |
+
488,
|
| 163 |
+
196
|
| 164 |
+
],
|
| 165 |
+
"page_idx": 1
|
| 166 |
+
},
|
| 167 |
+
{
|
| 168 |
+
"type": "image",
|
| 169 |
+
"img_path": "images/47ed56425f9d2eeed4d7c62ad5ef24f979efaeec9462c6303c41a4b52f4cbc63.jpg",
|
| 170 |
+
"image_caption": [
|
| 171 |
+
"Figure 2: Representative examples of semantic information drift caused by inappropriate data augmentation of MINIST samples."
|
| 172 |
+
],
|
| 173 |
+
"image_footnote": [],
|
| 174 |
+
"bbox": [
|
| 175 |
+
117,
|
| 176 |
+
210,
|
| 177 |
+
450,
|
| 178 |
+
378
|
| 179 |
+
],
|
| 180 |
+
"page_idx": 1
|
| 181 |
+
},
|
| 182 |
+
{
|
| 183 |
+
"type": "text",
|
| 184 |
+
"text": "cause large performance decrease lies with the semantic information drift during data augmentation. Taking the samples in the MINIST dataset for example, when the vertical flip is applied to the samples, the labels of \"6\"s and \"9\"s, \"2\"s and \"5\"s can easily get changed. This would challenge the rationality of the information consistency assumption of existing methods. This problem could be alleviated when relatively abundant label information is available. However, when the label information is extremely lacked, the performance of the corresponding algorithms could decrease a lot.",
|
| 185 |
+
"bbox": [
|
| 186 |
+
73,
|
| 187 |
+
460,
|
| 188 |
+
491,
|
| 189 |
+
611
|
| 190 |
+
],
|
| 191 |
+
"page_idx": 1
|
| 192 |
+
},
|
| 193 |
+
{
|
| 194 |
+
"type": "text",
|
| 195 |
+
"text": "In this paper, to solve the problem of semantic information drift caused by data augmentation-based positive sample pair construction, we propose a novel interpolation-based positive sample pair construction fashion. Generally, our design roots from the observation that the margin of decision boundaries would get larger if the prediction of the network could change linearly [32], [33]. Under the circumstance of semi-supervised learning, when the label is extremely limited, we seek to improve the discriminative capability of the network by forcing the embedding of the network to change linearly. Specifically, given two unlabeled images, on the one hand, we embed the samples separately into the latent space. On the other hand, we conduct image-level interpolation for an integrated image and do the embedding with the same network. Then, by combining the embedding of the interpolated images with the interpolation of the embeddings, we construct a positive sample pair. In our setting, the negative sample pairs are the embedding pair of different samples. By forcing the positive sample pairs to be close to each other in the latent space and the negative sample pairs to get far away from each other, we enlarge the margin of decision boundaries, thus improving the performance of the algorithm. To achieve the goal, we further",
|
| 196 |
+
"bbox": [
|
| 197 |
+
73,
|
| 198 |
+
612,
|
| 199 |
+
491,
|
| 200 |
+
945
|
| 201 |
+
],
|
| 202 |
+
"page_idx": 1
|
| 203 |
+
},
|
| 204 |
+
{
|
| 205 |
+
"type": "image",
|
| 206 |
+
"img_path": "images/58ea5485ecdbe9dd5b2879a89c66b8655e59b802caf308691586449bb4d941f6.jpg",
|
| 207 |
+
"image_caption": [
|
| 208 |
+
"Figure 3: Illustration of classification results with different data augmentations on the MINIST dataset."
|
| 209 |
+
],
|
| 210 |
+
"image_footnote": [],
|
| 211 |
+
"bbox": [
|
| 212 |
+
535,
|
| 213 |
+
71,
|
| 214 |
+
867,
|
| 215 |
+
305
|
| 216 |
+
],
|
| 217 |
+
"page_idx": 1
|
| 218 |
+
},
|
| 219 |
+
{
|
| 220 |
+
"type": "text",
|
| 221 |
+
"text": "propose a novel contrastive learning-based loss function to guide the network for better learning. We name the resultant algorithm Interpolation Contrastive Learning Semi-Supervised Learning (ICL-SSL).",
|
| 222 |
+
"bbox": [
|
| 223 |
+
501,
|
| 224 |
+
366,
|
| 225 |
+
921,
|
| 226 |
+
425
|
| 227 |
+
],
|
| 228 |
+
"page_idx": 1
|
| 229 |
+
},
|
| 230 |
+
{
|
| 231 |
+
"type": "text",
|
| 232 |
+
"text": "The main contributions of this paper are listed as follows:",
|
| 233 |
+
"bbox": [
|
| 234 |
+
519,
|
| 235 |
+
426,
|
| 236 |
+
916,
|
| 237 |
+
440
|
| 238 |
+
],
|
| 239 |
+
"page_idx": 1
|
| 240 |
+
},
|
| 241 |
+
{
|
| 242 |
+
"type": "list",
|
| 243 |
+
"sub_type": "text",
|
| 244 |
+
"list_items": [
|
| 245 |
+
"- We find that semantic information drift is one of the main problems that cause the performance of existing consistency regularization-based semi-supervised algorithms to decrease drastically when extremely limited labeled data is provided.",
|
| 246 |
+
"- We propose an interpolation-based positive sample construction method and a novel contrastive loss function to solve the problem and improve the learning accuracy.",
|
| 247 |
+
"- Our experimental results on the benchmark datasets verify the superior performance of the proposed algorithms against the state-of-the-art algorithms. We also show the generality of our proposed algorithm by enhancing the performance of the existing advanced algorithms steadily with our method."
|
| 248 |
+
],
|
| 249 |
+
"bbox": [
|
| 250 |
+
521,
|
| 251 |
+
441,
|
| 252 |
+
919,
|
| 253 |
+
651
|
| 254 |
+
],
|
| 255 |
+
"page_idx": 1
|
| 256 |
+
},
|
| 257 |
+
{
|
| 258 |
+
"type": "text",
|
| 259 |
+
"text": "II. RELATED WORK",
|
| 260 |
+
"text_level": 1,
|
| 261 |
+
"bbox": [
|
| 262 |
+
638,
|
| 263 |
+
667,
|
| 264 |
+
785,
|
| 265 |
+
679
|
| 266 |
+
],
|
| 267 |
+
"page_idx": 1
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "text",
|
| 271 |
+
"text": "In this section, we first define the main notations and then review several semi-supervised learning (SSL) methods related to our method ICL-SSL.",
|
| 272 |
+
"bbox": [
|
| 273 |
+
503,
|
| 274 |
+
685,
|
| 275 |
+
919,
|
| 276 |
+
729
|
| 277 |
+
],
|
| 278 |
+
"page_idx": 1
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "text",
|
| 282 |
+
"text": "A. Notations Definition",
|
| 283 |
+
"text_level": 1,
|
| 284 |
+
"bbox": [
|
| 285 |
+
503,
|
| 286 |
+
744,
|
| 287 |
+
666,
|
| 288 |
+
758
|
| 289 |
+
],
|
| 290 |
+
"page_idx": 1
|
| 291 |
+
},
|
| 292 |
+
{
|
| 293 |
+
"type": "text",
|
| 294 |
+
"text": "Given a dataset $\\mathcal{D} = \\mathcal{X} \\cup \\mathcal{U}$ , where $\\mathcal{X} = \\{(x_1, y_1), \\dots, (x_m, y_m)\\}$ is an labeled sub-dataset, $\\mathcal{U} = u_{m+1}, \\dots, u_{m+n}$ is a unlabeled sub-dataset, $n \\gg m$ and $y_m$ is encoded by one-hot, we define a classification model as $p(y|x; \\theta)$ , which outputs a distribution over class labels $y$ for an input $x$ with parameters $\\theta$ . For the model $p(y|x; \\theta)$ , it is concatenated by a encoder network $f(\\cdot)$ and a classification head $h(\\cdot)$ before softmax function. Meanwhile, after the encoder network $f(\\cdot)$ , we set a projection head $g(\\cdot)$ , outputting the normalized low-dimensional representation $z = g(f(\\cdot))$ . To simplify, $F(\\cdot)$ is defined as $g(f(\\cdot))$ . For more detailed definitions, please refer to Table II.",
|
| 295 |
+
"bbox": [
|
| 296 |
+
501,
|
| 297 |
+
763,
|
| 298 |
+
921,
|
| 299 |
+
944
|
| 300 |
+
],
|
| 301 |
+
"page_idx": 1
|
| 302 |
+
},
|
| 303 |
+
{
|
| 304 |
+
"type": "page_number",
|
| 305 |
+
"text": "2",
|
| 306 |
+
"bbox": [
|
| 307 |
+
76,
|
| 308 |
+
30,
|
| 309 |
+
86,
|
| 310 |
+
40
|
| 311 |
+
],
|
| 312 |
+
"page_idx": 1
|
| 313 |
+
},
|
| 314 |
+
{
|
| 315 |
+
"type": "table",
|
| 316 |
+
"img_path": "images/486cdbe556ad7fd970bd3f87457e944cb01f93b951388558de071d74e46ab07e.jpg",
|
| 317 |
+
"table_caption": [
|
| 318 |
+
"Table II: Notation summary"
|
| 319 |
+
],
|
| 320 |
+
"table_footnote": [],
|
| 321 |
+
"table_body": "<table><tr><td>Notations</td><td>Meaning</td></tr><tr><td>b</td><td>Bathsize</td></tr><tr><td>C</td><td>The numer of classes</td></tr><tr><td>X^B = (x_i, y_i)</td><td>Labeled sub-dataset with b</td></tr><tr><td>U^B = {u_1, ..., u_b}</td><td>Unlabeled sub-dataset matrix with b</td></tr><tr><td>x, u ∈ RC × RH × RW</td><td>Input samples</td></tr><tr><td>y ∈ {0,1}^C</td><td>Label with C classes encoded by one-hot</td></tr><tr><td>q ∈ RC</td><td>Predicted category probability distribution</td></tr><tr><td>f(·)</td><td>The encoder network</td></tr><tr><td>h(·)</td><td>The classifier</td></tr><tr><td>g(·)</td><td>The projector head</td></tr><tr><td>z ∈ RD = g(f(x)) = F(x)</td><td>Normalized low-dimensional representation</td></tr></table>",
|
| 322 |
+
"bbox": [
|
| 323 |
+
78,
|
| 324 |
+
85,
|
| 325 |
+
488,
|
| 326 |
+
236
|
| 327 |
+
],
|
| 328 |
+
"page_idx": 2
|
| 329 |
+
},
|
| 330 |
+
{
|
| 331 |
+
"type": "text",
|
| 332 |
+
"text": "B. Contrastive Learning",
|
| 333 |
+
"text_level": 1,
|
| 334 |
+
"bbox": [
|
| 335 |
+
75,
|
| 336 |
+
262,
|
| 337 |
+
243,
|
| 338 |
+
276
|
| 339 |
+
],
|
| 340 |
+
"page_idx": 2
|
| 341 |
+
},
|
| 342 |
+
{
|
| 343 |
+
"type": "text",
|
| 344 |
+
"text": "Thanks to leveraging unlabeled data for model training, contrastive learning attracts much attention of some researchers and becomes a hot spot recently [34]–[37]. It is a widely adopted form of self-supervised learning [34], [38]–[42], which can be used to optimize the task of instance discrimination. Instead of training a classification, contrastive learning is to maximize the similarities of positive pairs and minimize the similarities of negative pairs. It is important to learn the invariance with different views generated by data augmentations. The contrastive learning loss on unlabeled data can be described as follows:",
|
| 345 |
+
"bbox": [
|
| 346 |
+
73,
|
| 347 |
+
280,
|
| 348 |
+
490,
|
| 349 |
+
445
|
| 350 |
+
],
|
| 351 |
+
"page_idx": 2
|
| 352 |
+
},
|
| 353 |
+
{
|
| 354 |
+
"type": "equation",
|
| 355 |
+
"text": "\n$$\n- \\log \\frac {\\exp (F (D A (x _ {i})) \\cdot F (D A (x _ {i})) / T)}{\\sum_ {j = 1} ^ {N} \\exp ((F (D A (x _ {i})) \\cdot F (D A (x _ {j})) / T)}, \\tag {1}\n$$\n",
|
| 356 |
+
"text_format": "latex",
|
| 357 |
+
"bbox": [
|
| 358 |
+
119,
|
| 359 |
+
463,
|
| 360 |
+
488,
|
| 361 |
+
501
|
| 362 |
+
],
|
| 363 |
+
"page_idx": 2
|
| 364 |
+
},
|
| 365 |
+
{
|
| 366 |
+
"type": "text",
|
| 367 |
+
"text": "where $T$ is a temperature parameter [31]. $DA(\\cdot)$ denotes the stochastic data augmentation function. $F(\\cdot)$ is the simplified presentation of the encoder network $f(\\cdot)$ and the project head $g(\\cdot)$ . In recent methods, through designing a memory bank, MoCo [35] maintains the consistency of the negative sample pairs. SimCLR [34] calculates the pairwise similarity between two similar samples from the images in the same batch, which pushes the negative samples away while pulling the positive samples. Consistency regularization can be interpreted as a special form of contrastive learning, in which only positive samples are included.",
|
| 368 |
+
"bbox": [
|
| 369 |
+
73,
|
| 370 |
+
506,
|
| 371 |
+
491,
|
| 372 |
+
672
|
| 373 |
+
],
|
| 374 |
+
"page_idx": 2
|
| 375 |
+
},
|
| 376 |
+
{
|
| 377 |
+
"type": "text",
|
| 378 |
+
"text": "C. Consistency Regularization",
|
| 379 |
+
"text_level": 1,
|
| 380 |
+
"bbox": [
|
| 381 |
+
75,
|
| 382 |
+
691,
|
| 383 |
+
284,
|
| 384 |
+
705
|
| 385 |
+
],
|
| 386 |
+
"page_idx": 2
|
| 387 |
+
},
|
| 388 |
+
{
|
| 389 |
+
"type": "text",
|
| 390 |
+
"text": "Consistency regularization utilizes the assumption that the classifier should output the same prediction for the unlabeled data even after it is augmented. Data augmentation is a frequent regularization technique in semi-supervised learning. Through various data augmentation methods, consistency regularization generates a copy of the sample regarded as a similar sample to the original data. In the simplest form, prior work [43] adds the following consistency regularization loss on unlabeled samples:",
|
| 391 |
+
"bbox": [
|
| 392 |
+
73,
|
| 393 |
+
709,
|
| 394 |
+
490,
|
| 395 |
+
845
|
| 396 |
+
],
|
| 397 |
+
"page_idx": 2
|
| 398 |
+
},
|
| 399 |
+
{
|
| 400 |
+
"type": "equation",
|
| 401 |
+
"text": "\n$$\n\\left. \\left| \\left| p (y | D A (x); \\theta) - p (y | D A (x); \\theta) \\right| \\right| _ {2} ^ {2}, \\right. \\tag {2}\n$$\n",
|
| 402 |
+
"text_format": "latex",
|
| 403 |
+
"bbox": [
|
| 404 |
+
156,
|
| 405 |
+
859,
|
| 406 |
+
488,
|
| 407 |
+
878
|
| 408 |
+
],
|
| 409 |
+
"page_idx": 2
|
| 410 |
+
},
|
| 411 |
+
{
|
| 412 |
+
"type": "text",
|
| 413 |
+
"text": "where $DA(\\cdot)$ is a stochastic data augmentation. With the use of an exponential moving average (EMA) model, Mean-Teacher [27] replaces one of the terms in Eq.2, which provides a more stable target. To maximally alter the output class distribution,",
|
| 414 |
+
"bbox": [
|
| 415 |
+
73,
|
| 416 |
+
883,
|
| 417 |
+
491,
|
| 418 |
+
946
|
| 419 |
+
],
|
| 420 |
+
"page_idx": 2
|
| 421 |
+
},
|
| 422 |
+
{
|
| 423 |
+
"type": "text",
|
| 424 |
+
"text": "Virtual Adversarial Training (VAT) [44] uses an adversarial transformation in place of $DA(\\cdot)$ . More recently, a form of consistency regularization is utilized in Mixmatch [28] by using random horizontal flips and crops for the input samples. Unsupervised data augmentation (UDA) [45], ReMixMatch [30] and FixMatch [29] have been proposed with the use of weak and strong data augmentations. Generally speaking, through a weakly-augmented unlabeled sample, they generate a pseudo label and enforce consistency against the strongly-augmented version of the same input. The above consistency regularization models are based on data augmentation to generate positive samples. Although promising performance has been achieved, we observe that the discriminative capability of previews methods is limited since they would suffer from the semantic information drift issue. Therefore, the constructed samples are no longer similar. Instead of carefully designing data augmentations to utilize consistency regularization, we use an interpolation-based method to obtain positive pairs, which will avoid the semantic information drift caused by data augmentations.",
|
| 425 |
+
"bbox": [
|
| 426 |
+
501,
|
| 427 |
+
68,
|
| 428 |
+
921,
|
| 429 |
+
371
|
| 430 |
+
],
|
| 431 |
+
"page_idx": 2
|
| 432 |
+
},
|
| 433 |
+
{
|
| 434 |
+
"type": "text",
|
| 435 |
+
"text": "D. The Interpolation-based Method",
|
| 436 |
+
"text_level": 1,
|
| 437 |
+
"bbox": [
|
| 438 |
+
504,
|
| 439 |
+
391,
|
| 440 |
+
751,
|
| 441 |
+
406
|
| 442 |
+
],
|
| 443 |
+
"page_idx": 2
|
| 444 |
+
},
|
| 445 |
+
{
|
| 446 |
+
"type": "text",
|
| 447 |
+
"text": "Mixup [32] is an effective data augmentation strategy for image classification in computer vision [46]-[49]. It linearly interpolates the input samples and their labels on the input data and label spaces.",
|
| 448 |
+
"bbox": [
|
| 449 |
+
501,
|
| 450 |
+
409,
|
| 451 |
+
921,
|
| 452 |
+
470
|
| 453 |
+
],
|
| 454 |
+
"page_idx": 2
|
| 455 |
+
},
|
| 456 |
+
{
|
| 457 |
+
"type": "equation",
|
| 458 |
+
"text": "\n$$\n\\begin{array}{l} \\lambda \\sim B e t a (\\alpha , \\beta), \\\\ \\lambda^ {\\prime} = \\max (\\lambda , 1 - \\lambda), \\\\ x ^ {\\prime} = \\lambda^ {\\prime} x _ {1} + (1 - \\lambda^ {\\prime}) x _ {2}, \\\\ y ^ {\\prime} = \\lambda^ {\\prime} y _ {1} + (1 - \\lambda^ {\\prime}) y _ {2}, \\\\ \\end{array}\n$$\n",
|
| 459 |
+
"text_format": "latex",
|
| 460 |
+
"bbox": [
|
| 461 |
+
632,
|
| 462 |
+
481,
|
| 463 |
+
795,
|
| 464 |
+
554
|
| 465 |
+
],
|
| 466 |
+
"page_idx": 2
|
| 467 |
+
},
|
| 468 |
+
{
|
| 469 |
+
"type": "text",
|
| 470 |
+
"text": "where the $\\alpha$ and $\\beta$ are the parameter of Beta distribution, $\\lambda \\in [0,1]$ . The interpolations of input samples should lead to interpolations of the associated labels. In this manner, Mixup could extend the training distribution. It is recently achieved state-of-the-art performance through different tasks and network architectures. In [50], the interpolations are performed in the input space. In order to improve model performance, [51] is proposed to measure the realism of latent space interpolations in unsupervised learning. [33] performs the interpolation between input and pseudo-labels. Although the above methods are verified to be effective, they will still change the construction method of consistency regularized positive sample pairs. Therefore, how to solve the semantic information drift in consistency regularization is an open question. Different from the above approaches, we propose an interpolation-based method term ICL-SSL to construct positive sample pairs. Without using data augmentation to construct positive sample pairs, ICL-SSL is performed between the input samples and the representations, thus avoiding semantic information drift.",
|
| 471 |
+
"bbox": [
|
| 472 |
+
501,
|
| 473 |
+
560,
|
| 474 |
+
921,
|
| 475 |
+
861
|
| 476 |
+
],
|
| 477 |
+
"page_idx": 2
|
| 478 |
+
},
|
| 479 |
+
{
|
| 480 |
+
"type": "text",
|
| 481 |
+
"text": "III. METHOD",
|
| 482 |
+
"text_level": 1,
|
| 483 |
+
"bbox": [
|
| 484 |
+
661,
|
| 485 |
+
878,
|
| 486 |
+
763,
|
| 487 |
+
893
|
| 488 |
+
],
|
| 489 |
+
"page_idx": 2
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "text",
|
| 493 |
+
"text": "In this section, we introduce our proposed semi-supervised learning method. Firstly, we will explore the reason for the performance degradation under few labels via some experiments",
|
| 494 |
+
"bbox": [
|
| 495 |
+
503,
|
| 496 |
+
898,
|
| 497 |
+
921,
|
| 498 |
+
946
|
| 499 |
+
],
|
| 500 |
+
"page_idx": 2
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"type": "page_number",
|
| 504 |
+
"text": "3",
|
| 505 |
+
"bbox": [
|
| 506 |
+
911,
|
| 507 |
+
30,
|
| 508 |
+
919,
|
| 509 |
+
40
|
| 510 |
+
],
|
| 511 |
+
"page_idx": 2
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"type": "image",
|
| 515 |
+
"img_path": "images/b2f3ed046301ba486a991f828d320f17aba5982fed2ccddef57947cfd9ea5c40.jpg",
|
| 516 |
+
"image_caption": [
|
| 517 |
+
"Figure 4: Illustration of Interpolation-based Contrastive Learning Semi-Supervised Learning (ICL-SSL) mechanism. Following the definition in [29], Augw denotes the weak augmentation of the original input image. Augs denotes strong augmentation. Specifically, given two unlabeled images $u_{i}$ and $u_{j}$ , we firstly embed the samples separately into the latent space. Then, we conduct image-level interpolation for an integrated image and do the embedding with the same network. $z_{mix}$ and $mix_{z}$ are the positive embeddings pair constructed by ICL-SSL. By combining these two positive embeddings, we design a novel contrastive loss $L_{c}$ and force these two embeddings to change linearly between samples, improving the discriminative capability of the network. Therefore, our network would be guided to learn more discriminative embeddings with the interpolation-based method."
|
| 518 |
+
],
|
| 519 |
+
"image_footnote": [],
|
| 520 |
+
"bbox": [
|
| 521 |
+
171,
|
| 522 |
+
73,
|
| 523 |
+
813,
|
| 524 |
+
462
|
| 525 |
+
],
|
| 526 |
+
"page_idx": 3
|
| 527 |
+
},
|
| 528 |
+
{
|
| 529 |
+
"type": "text",
|
| 530 |
+
"text": "on MINIST dataset. Through the exploratory experiment, we analyze that the semantic information of the input samples will be drifted after some inappropriate data augmentations, thus limiting the performance. After that, to address this issue, we introduce an interpolation-based method ICL-SSL under few labels to construct more reliable positive sample pairs. Finally, we will detail the designed contrastive loss of ICL-SSL.",
|
| 531 |
+
"bbox": [
|
| 532 |
+
73,
|
| 533 |
+
622,
|
| 534 |
+
490,
|
| 535 |
+
728
|
| 536 |
+
],
|
| 537 |
+
"page_idx": 3
|
| 538 |
+
},
|
| 539 |
+
{
|
| 540 |
+
"type": "text",
|
| 541 |
+
"text": "A. Semantic information drift",
|
| 542 |
+
"text_level": 1,
|
| 543 |
+
"bbox": [
|
| 544 |
+
73,
|
| 545 |
+
744,
|
| 546 |
+
279,
|
| 547 |
+
758
|
| 548 |
+
],
|
| 549 |
+
"page_idx": 3
|
| 550 |
+
},
|
| 551 |
+
{
|
| 552 |
+
"type": "text",
|
| 553 |
+
"text": "Although promising performances have been achieved by the existing algorithms, we observe that when the number of labeled data gets extremely small, e.g. 2 to 3 labels for each category, the performance of the existing algorithms would decrease drastically. The detailed observation is shown in Table. I. Therefore, we conduct experiments to explore the reason to cause the performance dropping.",
|
| 554 |
+
"bbox": [
|
| 555 |
+
73,
|
| 556 |
+
763,
|
| 557 |
+
490,
|
| 558 |
+
868
|
| 559 |
+
],
|
| 560 |
+
"page_idx": 3
|
| 561 |
+
},
|
| 562 |
+
{
|
| 563 |
+
"type": "text",
|
| 564 |
+
"text": "Consistency regularization is an essential piece for many state-of-the-art semi-supervised learning methods [28]–[31]. A common assumption of consistency regularization is that the classifier should output the same class probability of an unlabeled sample even if it is augmented.",
|
| 565 |
+
"bbox": [
|
| 566 |
+
73,
|
| 567 |
+
869,
|
| 568 |
+
491,
|
| 569 |
+
945
|
| 570 |
+
],
|
| 571 |
+
"page_idx": 3
|
| 572 |
+
},
|
| 573 |
+
{
|
| 574 |
+
"type": "text",
|
| 575 |
+
"text": "In several SSL methods [28]–[30], when training data is not enough for generalization, data augmentation is a technique to apply consistency regularization. MixMatch [28] processes the input samples through random horizontal and random crops. The weak data augmentation method uses horizontal flips and vertical flips to process unlabeled samples in FixMatch [29].",
|
| 576 |
+
"bbox": [
|
| 577 |
+
501,
|
| 578 |
+
622,
|
| 579 |
+
919,
|
| 580 |
+
713
|
| 581 |
+
],
|
| 582 |
+
"page_idx": 3
|
| 583 |
+
},
|
| 584 |
+
{
|
| 585 |
+
"type": "text",
|
| 586 |
+
"text": "Through experiments shown in Fig. 2, we find that some data augmentations will change the semantic information about the input samples, leading to a decrease in the semantic similarity of the constructed samples damaging the SSL training. We visualize the result of data augmentation. It can be found that the semantic information of the input samples has been changed. Fig. 2 shows that under one data augmentation (random vertical flip), the semantic information of \"7\"s and \"2\"s, \"6\"s and \"9\"s, \"2\"s and \"5\"s can easily get changed. As a result, the quality of the constructed positive samples decreases or the construction fails, which in turn affects the performance of the model. To further verify the effect of data augmentation, we implement experiments on the MINIST dataset.",
|
| 587 |
+
"bbox": [
|
| 588 |
+
501,
|
| 589 |
+
714,
|
| 590 |
+
919,
|
| 591 |
+
911
|
| 592 |
+
],
|
| 593 |
+
"page_idx": 3
|
| 594 |
+
},
|
| 595 |
+
{
|
| 596 |
+
"type": "text",
|
| 597 |
+
"text": "As shown in Fig. 5(b), MINIST is a dataset composed of handwritten numbers, which is commonly used in deep",
|
| 598 |
+
"bbox": [
|
| 599 |
+
503,
|
| 600 |
+
914,
|
| 601 |
+
921,
|
| 602 |
+
945
|
| 603 |
+
],
|
| 604 |
+
"page_idx": 3
|
| 605 |
+
},
|
| 606 |
+
{
|
| 607 |
+
"type": "page_number",
|
| 608 |
+
"text": "4",
|
| 609 |
+
"bbox": [
|
| 610 |
+
76,
|
| 611 |
+
31,
|
| 612 |
+
86,
|
| 613 |
+
39
|
| 614 |
+
],
|
| 615 |
+
"page_idx": 3
|
| 616 |
+
},
|
| 617 |
+
{
|
| 618 |
+
"type": "code",
|
| 619 |
+
"sub_type": "algorithm",
|
| 620 |
+
"code_caption": [
|
| 621 |
+
"Algorithm 1 Interpolation-based Contrastive Learning Semi-Supervised Learning(ICL-SSL)"
|
| 622 |
+
],
|
| 623 |
+
"code_body": "Input: Labeled data $X = (x_{1},y_{1}),(x_{2},y_{2}),\\dots,(x_{b},y_{b})$ unlabeled data $U = (u_{1},u_{2},\\dots,u_{b})$ , Beta distribution parameter $\\alpha$ for feature interpolation, Batch size b, Epoch number e \n1: while $e < \\mathrm{Epoch}$ do \n2: for $i = 1$ to $b$ do \n3: $y_{b} = p(y|x)$ \n4: $q_{i} = p(y|u_{i})$ \n5: $z_{i} = g(f(u_{i}))$ \n6: end for \n7: for $i\\in 1,\\ldots ,$ b and $\\mathrm{j}\\in 1,\\dots ,$ b do \n8: $\\lambda = \\operatorname {Beta}(\\alpha ,\\alpha)$ \n9: $u_{mix} = \\lambda *u_i + (1 - \\lambda)*u_j$ \n10: $z_{mix} = g(f(u_{mix}))$ \n11: $m i x_{z} = \\lambda *z_{i} + (1 - \\lambda)*z_{j}$ \n12: end for \n13: end while \n14: Calculate classification loss via Eq.8, 10 and 11",
|
| 624 |
+
"bbox": [
|
| 625 |
+
73,
|
| 626 |
+
98,
|
| 627 |
+
491,
|
| 628 |
+
333
|
| 629 |
+
],
|
| 630 |
+
"page_idx": 4
|
| 631 |
+
},
|
| 632 |
+
{
|
| 633 |
+
"type": "text",
|
| 634 |
+
"text": "learning research. MINIST consists of 60000 training data and 10000 test data. Aiming to reduce the influence of irrelevant factors (e.g. complex structure of training model) to the performance, we explore the semantic information drift problem caused by data augmentation with two-layer MLPs.",
|
| 635 |
+
"bbox": [
|
| 636 |
+
73,
|
| 637 |
+
366,
|
| 638 |
+
490,
|
| 639 |
+
443
|
| 640 |
+
],
|
| 641 |
+
"page_idx": 4
|
| 642 |
+
},
|
| 643 |
+
{
|
| 644 |
+
"type": "text",
|
| 645 |
+
"text": "From the empirical analysis, we observe that the accuracy is decreased by $5.0\\%$ after the random horizontal flip argumentation on MINIST. As a consequence, after random vertical flips, the accuracy decreases by $4.0\\%$ shown in Fig. 3. Additionally, we also explore rotation, random re-cropping and random cropping, the result shows that those data augmentations will also limit the performance of the model.",
|
| 646 |
+
"bbox": [
|
| 647 |
+
73,
|
| 648 |
+
445,
|
| 649 |
+
491,
|
| 650 |
+
551
|
| 651 |
+
],
|
| 652 |
+
"page_idx": 4
|
| 653 |
+
},
|
| 654 |
+
{
|
| 655 |
+
"type": "text",
|
| 656 |
+
"text": "The experiment on MINIST can illustrate that during SSL training, some inappropriate data augmentations will change the semantic information of the input samples. Therefore, the semantic correlation of positive sample pairs will be destroyed by inappropriate data augmentations. When the label information is lacking, the incorrect regularization caused by data augmentation would mislead the network and limit the algorithm performance.",
|
| 657 |
+
"bbox": [
|
| 658 |
+
73,
|
| 659 |
+
553,
|
| 660 |
+
491,
|
| 661 |
+
675
|
| 662 |
+
],
|
| 663 |
+
"page_idx": 4
|
| 664 |
+
},
|
| 665 |
+
{
|
| 666 |
+
"type": "text",
|
| 667 |
+
"text": "B. ICL-SSL",
|
| 668 |
+
"text_level": 1,
|
| 669 |
+
"bbox": [
|
| 670 |
+
75,
|
| 671 |
+
717,
|
| 672 |
+
158,
|
| 673 |
+
731
|
| 674 |
+
],
|
| 675 |
+
"page_idx": 4
|
| 676 |
+
},
|
| 677 |
+
{
|
| 678 |
+
"type": "text",
|
| 679 |
+
"text": "To solve the semantic information drift problem, we proposed a novel interpolation contrastive learning Semi-supervised learning method termed ICL-SSL. Specifically, ICL-SSL does not change the semantic information during the positive pair construction of consistency regularization. In the following, we first obtain the low-dimensional representation $z$ of the unlabeled sample. Then, we describe the interpolation-based positive sample pairs construction method and loss function in detail.",
|
| 680 |
+
"bbox": [
|
| 681 |
+
73,
|
| 682 |
+
744,
|
| 683 |
+
490,
|
| 684 |
+
880
|
| 685 |
+
],
|
| 686 |
+
"page_idx": 4
|
| 687 |
+
},
|
| 688 |
+
{
|
| 689 |
+
"type": "text",
|
| 690 |
+
"text": "In our ICL-SSL method, the representations are extracted by encoder network $f(\\cdot)$ . Concretely, for any two unlabeled samples $u_{i},u_{j}$ in a batch of unlabeled sub-dataset $\\mathcal{U}^B$ , we could obtain their normalized representations $z_{i},z_{j}$ with $\\ell^2$ -",
|
| 691 |
+
"bbox": [
|
| 692 |
+
73,
|
| 693 |
+
883,
|
| 694 |
+
491,
|
| 695 |
+
946
|
| 696 |
+
],
|
| 697 |
+
"page_idx": 4
|
| 698 |
+
},
|
| 699 |
+
{
|
| 700 |
+
"type": "text",
|
| 701 |
+
"text": "norm:",
|
| 702 |
+
"bbox": [
|
| 703 |
+
504,
|
| 704 |
+
70,
|
| 705 |
+
549,
|
| 706 |
+
82
|
| 707 |
+
],
|
| 708 |
+
"page_idx": 4
|
| 709 |
+
},
|
| 710 |
+
{
|
| 711 |
+
"type": "equation",
|
| 712 |
+
"text": "\n$$\nz _ {i} = F \\left(u _ {i}\\right), z _ {i} = \\frac {z _ {i}}{\\left\\| z _ {i} \\right\\| _ {2}}, \\tag {4}\n$$\n",
|
| 713 |
+
"text_format": "latex",
|
| 714 |
+
"bbox": [
|
| 715 |
+
632,
|
| 716 |
+
97,
|
| 717 |
+
919,
|
| 718 |
+
133
|
| 719 |
+
],
|
| 720 |
+
"page_idx": 4
|
| 721 |
+
},
|
| 722 |
+
{
|
| 723 |
+
"type": "equation",
|
| 724 |
+
"text": "\n$$\nz _ {j} = F (u _ {j}), z _ {j} = \\frac {z _ {j}}{| | z _ {j} | | _ {2}},\n$$\n",
|
| 725 |
+
"text_format": "latex",
|
| 726 |
+
"bbox": [
|
| 727 |
+
627,
|
| 728 |
+
130,
|
| 729 |
+
797,
|
| 730 |
+
156
|
| 731 |
+
],
|
| 732 |
+
"page_idx": 4
|
| 733 |
+
},
|
| 734 |
+
{
|
| 735 |
+
"type": "text",
|
| 736 |
+
"text": "where $F(\\cdot)$ is defined as $g(f(\\cdot))$ , a simple form of encoder network $f(\\cdot)$ and project head $g(\\cdot)$ .",
|
| 737 |
+
"bbox": [
|
| 738 |
+
503,
|
| 739 |
+
165,
|
| 740 |
+
919,
|
| 741 |
+
196
|
| 742 |
+
],
|
| 743 |
+
"page_idx": 4
|
| 744 |
+
},
|
| 745 |
+
{
|
| 746 |
+
"type": "text",
|
| 747 |
+
"text": "After that, we perform interpolation operations on the normalized low-dimensional feature representations $z_{i}$ and $z_{j}$ .",
|
| 748 |
+
"bbox": [
|
| 749 |
+
503,
|
| 750 |
+
196,
|
| 751 |
+
919,
|
| 752 |
+
228
|
| 753 |
+
],
|
| 754 |
+
"page_idx": 4
|
| 755 |
+
},
|
| 756 |
+
{
|
| 757 |
+
"type": "equation",
|
| 758 |
+
"text": "\n$$\n\\begin{array}{l} m i x _ {z} = \\lambda z _ {i} + (1 - \\lambda) z _ {j} \\tag {5} \\\\ = \\lambda F (u _ {i}) + (1 - \\lambda) F (u _ {j}), \\\\ \\end{array}\n$$\n",
|
| 759 |
+
"text_format": "latex",
|
| 760 |
+
"bbox": [
|
| 761 |
+
596,
|
| 762 |
+
242,
|
| 763 |
+
919,
|
| 764 |
+
277
|
| 765 |
+
],
|
| 766 |
+
"page_idx": 4
|
| 767 |
+
},
|
| 768 |
+
{
|
| 769 |
+
"type": "text",
|
| 770 |
+
"text": "where $mix_{z}$ denotes the interpolated representation of $z_{i}$ and $z_{j}$ , $\\lambda$ is generated by Beta distribution. Simultaneously, unlike the above steps, we first perform an interpolation operation in the sample space $(u_{i}, u_{j})$ and then get the normalized low-dimensional feature:",
|
| 771 |
+
"bbox": [
|
| 772 |
+
501,
|
| 773 |
+
286,
|
| 774 |
+
919,
|
| 775 |
+
361
|
| 776 |
+
],
|
| 777 |
+
"page_idx": 4
|
| 778 |
+
},
|
| 779 |
+
{
|
| 780 |
+
"type": "equation",
|
| 781 |
+
"text": "\n$$\nz _ {m i x} = F (\\lambda u _ {i} + (1 - \\lambda) u _ {j}), \\tag {6}\n$$\n",
|
| 782 |
+
"text_format": "latex",
|
| 783 |
+
"bbox": [
|
| 784 |
+
612,
|
| 785 |
+
380,
|
| 786 |
+
919,
|
| 787 |
+
398
|
| 788 |
+
],
|
| 789 |
+
"page_idx": 4
|
| 790 |
+
},
|
| 791 |
+
{
|
| 792 |
+
"type": "text",
|
| 793 |
+
"text": "where $z_{\\text{mix}}$ is the representation of interpolated input data $u_i$ and $u_j$ . The constructed positive sample pair can be presented as follows:",
|
| 794 |
+
"bbox": [
|
| 795 |
+
503,
|
| 796 |
+
409,
|
| 797 |
+
919,
|
| 798 |
+
454
|
| 799 |
+
],
|
| 800 |
+
"page_idx": 4
|
| 801 |
+
},
|
| 802 |
+
{
|
| 803 |
+
"type": "equation",
|
| 804 |
+
"text": "\n$$\n[ m i x _ {z}, z _ {m i x} ]. \\tag {7}\n$$\n",
|
| 805 |
+
"text_format": "latex",
|
| 806 |
+
"bbox": [
|
| 807 |
+
665,
|
| 808 |
+
472,
|
| 809 |
+
919,
|
| 810 |
+
489
|
| 811 |
+
],
|
| 812 |
+
"page_idx": 4
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"type": "text",
|
| 816 |
+
"text": "The framework of our proposed ICL-SSL is shown in Fig. 4. ICL-SSL is a semantic-agnostic positive sample construction method. Specifically, we generate one positive sample from the features $z_{\\text{mix}}$ obtained by interpolating two inputs, and the other $\\text{mix}_z$ from interpolating the two features of the input. By this setting, both of these positive samples contain the original semantic information of each input $(u_i, u_j)$ . It has demonstrated that the interpolation operation has the effect to push the decision boundaries away from the class boundaries in [32], [33]. In this manner, with the utilization of our ICL-SSL, the margin decision boundaries would get larger, thus improving the discriminative capability of the network under few labels.",
|
| 817 |
+
"bbox": [
|
| 818 |
+
501,
|
| 819 |
+
500,
|
| 820 |
+
921,
|
| 821 |
+
696
|
| 822 |
+
],
|
| 823 |
+
"page_idx": 4
|
| 824 |
+
},
|
| 825 |
+
{
|
| 826 |
+
"type": "text",
|
| 827 |
+
"text": "C. Loss function",
|
| 828 |
+
"text_level": 1,
|
| 829 |
+
"bbox": [
|
| 830 |
+
504,
|
| 831 |
+
724,
|
| 832 |
+
622,
|
| 833 |
+
739
|
| 834 |
+
],
|
| 835 |
+
"page_idx": 4
|
| 836 |
+
},
|
| 837 |
+
{
|
| 838 |
+
"type": "text",
|
| 839 |
+
"text": "The loss of ICL-SSL mainly consists of three parts: the supervised classification loss $L_{x}$ , the unsupervised classification loss $L_{u}$ and the contrastive loss $L_{c}$ .",
|
| 840 |
+
"bbox": [
|
| 841 |
+
503,
|
| 842 |
+
744,
|
| 843 |
+
919,
|
| 844 |
+
790
|
| 845 |
+
],
|
| 846 |
+
"page_idx": 4
|
| 847 |
+
},
|
| 848 |
+
{
|
| 849 |
+
"type": "text",
|
| 850 |
+
"text": "In detail, $L_{x}$ is the supervised classification loss on the labeled data, which is defined as the cross-entropy between the ground-truth labels and the model's predictions:",
|
| 851 |
+
"bbox": [
|
| 852 |
+
503,
|
| 853 |
+
791,
|
| 854 |
+
919,
|
| 855 |
+
837
|
| 856 |
+
],
|
| 857 |
+
"page_idx": 4
|
| 858 |
+
},
|
| 859 |
+
{
|
| 860 |
+
"type": "equation",
|
| 861 |
+
"text": "\n$$\nL _ {x} = \\frac {1}{B} \\sum_ {b = 1} ^ {B} H \\left(y _ {b}, p \\left(y \\mid x _ {b}\\right)\\right), \\tag {8}\n$$\n",
|
| 862 |
+
"text_format": "latex",
|
| 863 |
+
"bbox": [
|
| 864 |
+
614,
|
| 865 |
+
845,
|
| 866 |
+
919,
|
| 867 |
+
888
|
| 868 |
+
],
|
| 869 |
+
"page_idx": 4
|
| 870 |
+
},
|
| 871 |
+
{
|
| 872 |
+
"type": "text",
|
| 873 |
+
"text": "where $x_{b}$ denotes the labeled data in $\\mathcal{X}^B$ . $p(y|x_b)$ is the output of the classifier. $H$ is the cross-entropy between the two distributions $y_{b}$ and $p(y|x_b)$ .",
|
| 874 |
+
"bbox": [
|
| 875 |
+
503,
|
| 876 |
+
898,
|
| 877 |
+
919,
|
| 878 |
+
946
|
| 879 |
+
],
|
| 880 |
+
"page_idx": 4
|
| 881 |
+
},
|
| 882 |
+
{
|
| 883 |
+
"type": "page_number",
|
| 884 |
+
"text": "5",
|
| 885 |
+
"bbox": [
|
| 886 |
+
911,
|
| 887 |
+
30,
|
| 888 |
+
919,
|
| 889 |
+
40
|
| 890 |
+
],
|
| 891 |
+
"page_idx": 4
|
| 892 |
+
},
|
| 893 |
+
{
|
| 894 |
+
"type": "image",
|
| 895 |
+
"img_path": "images/c85e3fa15fd1ccae73e78751528d5b8d0c98f3fd0e9cb74ebf28e2740791a621.jpg",
|
| 896 |
+
"image_caption": [
|
| 897 |
+
"(a) CIFAR-10"
|
| 898 |
+
],
|
| 899 |
+
"image_footnote": [],
|
| 900 |
+
"bbox": [
|
| 901 |
+
93,
|
| 902 |
+
104,
|
| 903 |
+
285,
|
| 904 |
+
252
|
| 905 |
+
],
|
| 906 |
+
"page_idx": 5
|
| 907 |
+
},
|
| 908 |
+
{
|
| 909 |
+
"type": "image",
|
| 910 |
+
"img_path": "images/579a6c70bd8339f0edb49cff0d4839abb21147192dc66f6c9decd710f7bd7b87.jpg",
|
| 911 |
+
"image_caption": [
|
| 912 |
+
"(b) MINIST"
|
| 913 |
+
],
|
| 914 |
+
"image_footnote": [],
|
| 915 |
+
"bbox": [
|
| 916 |
+
297,
|
| 917 |
+
104,
|
| 918 |
+
488,
|
| 919 |
+
252
|
| 920 |
+
],
|
| 921 |
+
"page_idx": 5
|
| 922 |
+
},
|
| 923 |
+
{
|
| 924 |
+
"type": "image",
|
| 925 |
+
"img_path": "images/9b70bf2051c9a320abf2da02597fd37afb636d5af2297b3a6adbee8fdfd2524b.jpg",
|
| 926 |
+
"image_caption": [
|
| 927 |
+
"(c) SVHN",
|
| 928 |
+
"Figure 5: Illustration of the CIFAR-10, MINIST, SVHN, CIFAR-100 dataset."
|
| 929 |
+
],
|
| 930 |
+
"image_footnote": [],
|
| 931 |
+
"bbox": [
|
| 932 |
+
503,
|
| 933 |
+
104,
|
| 934 |
+
694,
|
| 935 |
+
251
|
| 936 |
+
],
|
| 937 |
+
"page_idx": 5
|
| 938 |
+
},
|
| 939 |
+
{
|
| 940 |
+
"type": "image",
|
| 941 |
+
"img_path": "images/f054b0fe26c08ce7da7a224cb7bde40aad8dfdfa6c905e867f35fe8e4aa4c1e4.jpg",
|
| 942 |
+
"image_caption": [
|
| 943 |
+
"(d) CIFAR-100"
|
| 944 |
+
],
|
| 945 |
+
"image_footnote": [],
|
| 946 |
+
"bbox": [
|
| 947 |
+
707,
|
| 948 |
+
104,
|
| 949 |
+
897,
|
| 950 |
+
251
|
| 951 |
+
],
|
| 952 |
+
"page_idx": 5
|
| 953 |
+
},
|
| 954 |
+
{
|
| 955 |
+
"type": "text",
|
| 956 |
+
"text": "For the unlabeled data, its pseudo label $\\hat{q}_b$ is generated by the classification head $h(\\cdot)$ and the Softmax function. The formula can be described as:",
|
| 957 |
+
"bbox": [
|
| 958 |
+
73,
|
| 959 |
+
327,
|
| 960 |
+
491,
|
| 961 |
+
372
|
| 962 |
+
],
|
| 963 |
+
"page_idx": 5
|
| 964 |
+
},
|
| 965 |
+
{
|
| 966 |
+
"type": "equation",
|
| 967 |
+
"text": "\n$$\n\\hat {q} _ {b} = \\operatorname {S o f t m a x} (h (f (u _ {b}))). \\tag {9}\n$$\n",
|
| 968 |
+
"text_format": "latex",
|
| 969 |
+
"bbox": [
|
| 970 |
+
189,
|
| 971 |
+
388,
|
| 972 |
+
490,
|
| 973 |
+
406
|
| 974 |
+
],
|
| 975 |
+
"page_idx": 5
|
| 976 |
+
},
|
| 977 |
+
{
|
| 978 |
+
"type": "text",
|
| 979 |
+
"text": "$L_{u}$ is defined as the cross-entropy between the pseudo-labels and the model's predictions. It can be calculated by:",
|
| 980 |
+
"bbox": [
|
| 981 |
+
73,
|
| 982 |
+
415,
|
| 983 |
+
488,
|
| 984 |
+
445
|
| 985 |
+
],
|
| 986 |
+
"page_idx": 5
|
| 987 |
+
},
|
| 988 |
+
{
|
| 989 |
+
"type": "equation",
|
| 990 |
+
"text": "\n$$\nL _ {u} = \\frac {1}{\\mu B} \\sum_ {b = 1} ^ {\\mu B} \\ell \\left(\\max \\left(\\hat {q} _ {b}\\right) \\geq \\tau\\right) H \\left(\\hat {q} _ {b}, p (y \\mid u _ {b})\\right), \\tag {10}\n$$\n",
|
| 991 |
+
"text_format": "latex",
|
| 992 |
+
"bbox": [
|
| 993 |
+
101,
|
| 994 |
+
452,
|
| 995 |
+
488,
|
| 996 |
+
494
|
| 997 |
+
],
|
| 998 |
+
"page_idx": 5
|
| 999 |
+
},
|
| 1000 |
+
{
|
| 1001 |
+
"type": "text",
|
| 1002 |
+
"text": "where $\\hat{q}_b$ is the predicted probability of pseudo labels. $\\ell$ is the function to calculate the loss. When the largest class probability is above the threshold $\\tau$ , the loss will be calculated. Meanwhile, $\\mu$ is used to count the number of valid unlabeled samples. $H$ is the cross-entropy between $q_{b}$ and $p(y|u_b)$ .",
|
| 1003 |
+
"bbox": [
|
| 1004 |
+
73,
|
| 1005 |
+
501,
|
| 1006 |
+
488,
|
| 1007 |
+
575
|
| 1008 |
+
],
|
| 1009 |
+
"page_idx": 5
|
| 1010 |
+
},
|
| 1011 |
+
{
|
| 1012 |
+
"type": "text",
|
| 1013 |
+
"text": "Through the positive sample pairs $[mix_z, z_{mix}]$ constructed by interpolation strategy, the contrastive loss can be computed as:",
|
| 1014 |
+
"bbox": [
|
| 1015 |
+
73,
|
| 1016 |
+
577,
|
| 1017 |
+
488,
|
| 1018 |
+
619
|
| 1019 |
+
],
|
| 1020 |
+
"page_idx": 5
|
| 1021 |
+
},
|
| 1022 |
+
{
|
| 1023 |
+
"type": "equation",
|
| 1024 |
+
"text": "\n$$\nL _ {c} = - \\log \\frac {\\exp \\left(\\left(m i x _ {z} \\cdot z _ {m i x}\\right) / T\\right)}{\\sum_ {k = 1} ^ {B} I _ {\\lfloor k \\neq i \\rfloor} \\exp \\left(\\left(z _ {i} \\cdot z _ {j}\\right) / T\\right)}, \\tag {11}\n$$\n",
|
| 1025 |
+
"text_format": "latex",
|
| 1026 |
+
"bbox": [
|
| 1027 |
+
140,
|
| 1028 |
+
619,
|
| 1029 |
+
488,
|
| 1030 |
+
655
|
| 1031 |
+
],
|
| 1032 |
+
"page_idx": 5
|
| 1033 |
+
},
|
| 1034 |
+
{
|
| 1035 |
+
"type": "text",
|
| 1036 |
+
"text": "where $T$ is a temperature parameter. Similar to SimCLR [34], we do not sample negative samples explicitly. Instead, we treat the other examples within a minibatch as negative samples. $I_{[k\\neq i]\\in 0,1}$ is an indicator function. When $k = i$ the value of $I$ is set to 1. The similarity between positive is measured by the inner product. This loss is calculated across all positive samples in a batch. The contrastive loss encourages the model to produce similar representations for positive samples and pushes the negative samples away. The relation of the embedding changes linearly due to the proposed positive sample pairs constructed method. By minimizing Eq.11, the margin decision boundaries will be enlarged, thus improving the discriminative of the network.",
|
| 1037 |
+
"bbox": [
|
| 1038 |
+
73,
|
| 1039 |
+
660,
|
| 1040 |
+
488,
|
| 1041 |
+
854
|
| 1042 |
+
],
|
| 1043 |
+
"page_idx": 5
|
| 1044 |
+
},
|
| 1045 |
+
{
|
| 1046 |
+
"type": "text",
|
| 1047 |
+
"text": "In summary, the loss function of ICL-SSL can be computed by:",
|
| 1048 |
+
"bbox": [
|
| 1049 |
+
73,
|
| 1050 |
+
857,
|
| 1051 |
+
488,
|
| 1052 |
+
887
|
| 1053 |
+
],
|
| 1054 |
+
"page_idx": 5
|
| 1055 |
+
},
|
| 1056 |
+
{
|
| 1057 |
+
"type": "equation",
|
| 1058 |
+
"text": "\n$$\nL = L _ {x} + L _ {u} + \\alpha L _ {c}, \\tag {12}\n$$\n",
|
| 1059 |
+
"text_format": "latex",
|
| 1060 |
+
"bbox": [
|
| 1061 |
+
207,
|
| 1062 |
+
888,
|
| 1063 |
+
488,
|
| 1064 |
+
905
|
| 1065 |
+
],
|
| 1066 |
+
"page_idx": 5
|
| 1067 |
+
},
|
| 1068 |
+
{
|
| 1069 |
+
"type": "text",
|
| 1070 |
+
"text": "where $L_{x}$ represents the supervised loss and $L_{u}$ is the unsupervised loss. $\\alpha$ is a trade-off hyper-parameter to control",
|
| 1071 |
+
"bbox": [
|
| 1072 |
+
75,
|
| 1073 |
+
914,
|
| 1074 |
+
491,
|
| 1075 |
+
945
|
| 1076 |
+
],
|
| 1077 |
+
"page_idx": 5
|
| 1078 |
+
},
|
| 1079 |
+
{
|
| 1080 |
+
"type": "table",
|
| 1081 |
+
"img_path": "images/f5a05b2c0a4a999c5c8605618a23b2a89a72320e17a0af5c2f5a3e45cf31807d.jpg",
|
| 1082 |
+
"table_caption": [
|
| 1083 |
+
"Table III: Dataset summary"
|
| 1084 |
+
],
|
| 1085 |
+
"table_footnote": [],
|
| 1086 |
+
"table_body": "<table><tr><td>Dataset</td><td>Size</td><td>Train Set</td><td>Test Set</td><td>Class</td><td>Type</td></tr><tr><td>SVHN</td><td>32 × 32</td><td>73257</td><td>26032</td><td>10</td><td>image</td></tr><tr><td>MINIST</td><td>28× 28</td><td>60000</td><td>10000</td><td>10</td><td>image</td></tr><tr><td>CIFAR-10</td><td>32 × 32</td><td>50000</td><td>10000</td><td>10</td><td>image</td></tr><tr><td>CIFAR-100</td><td>32 × 32</td><td>50000</td><td>10000</td><td>100</td><td>image</td></tr></table>",
|
| 1087 |
+
"bbox": [
|
| 1088 |
+
511,
|
| 1089 |
+
344,
|
| 1090 |
+
919,
|
| 1091 |
+
468
|
| 1092 |
+
],
|
| 1093 |
+
"page_idx": 5
|
| 1094 |
+
},
|
| 1095 |
+
{
|
| 1096 |
+
"type": "text",
|
| 1097 |
+
"text": "the weight of the total loss. The detailed learning procedure of ICL-SSL is shown in Algorithm 1.",
|
| 1098 |
+
"bbox": [
|
| 1099 |
+
503,
|
| 1100 |
+
494,
|
| 1101 |
+
919,
|
| 1102 |
+
526
|
| 1103 |
+
],
|
| 1104 |
+
"page_idx": 5
|
| 1105 |
+
},
|
| 1106 |
+
{
|
| 1107 |
+
"type": "text",
|
| 1108 |
+
"text": "IV. EXPERIMENT",
|
| 1109 |
+
"text_level": 1,
|
| 1110 |
+
"bbox": [
|
| 1111 |
+
648,
|
| 1112 |
+
545,
|
| 1113 |
+
776,
|
| 1114 |
+
559
|
| 1115 |
+
],
|
| 1116 |
+
"page_idx": 5
|
| 1117 |
+
},
|
| 1118 |
+
{
|
| 1119 |
+
"type": "text",
|
| 1120 |
+
"text": "We evaluate the effectiveness of ICL-SSL on several semi-supervised learning benchmarks. We focus on the most challenging label-scare scenario where few labels are available, e.g., 2 or 3 labels for each category. At the same time, our ablation study teases apart the contribution of ICL-SSL components. In addition, we further verify the generality of the proposed method by improving the performance of the existing state-of-the-art algorithms considerably with our proposed strategy.",
|
| 1121 |
+
"bbox": [
|
| 1122 |
+
501,
|
| 1123 |
+
566,
|
| 1124 |
+
919,
|
| 1125 |
+
703
|
| 1126 |
+
],
|
| 1127 |
+
"page_idx": 5
|
| 1128 |
+
},
|
| 1129 |
+
{
|
| 1130 |
+
"type": "text",
|
| 1131 |
+
"text": "A. Implementation details",
|
| 1132 |
+
"text_level": 1,
|
| 1133 |
+
"bbox": [
|
| 1134 |
+
503,
|
| 1135 |
+
724,
|
| 1136 |
+
684,
|
| 1137 |
+
739
|
| 1138 |
+
],
|
| 1139 |
+
"page_idx": 5
|
| 1140 |
+
},
|
| 1141 |
+
{
|
| 1142 |
+
"type": "text",
|
| 1143 |
+
"text": "1) Datasets & Metric: The proposed algorithms are experimentally evaluated on SVHN [52], CIFAR-10 [53] and CIFAR-100 [53] datasets.",
|
| 1144 |
+
"bbox": [
|
| 1145 |
+
503,
|
| 1146 |
+
744,
|
| 1147 |
+
919,
|
| 1148 |
+
790
|
| 1149 |
+
],
|
| 1150 |
+
"page_idx": 5
|
| 1151 |
+
},
|
| 1152 |
+
{
|
| 1153 |
+
"type": "list",
|
| 1154 |
+
"sub_type": "text",
|
| 1155 |
+
"list_items": [
|
| 1156 |
+
"- The CIFAR-10 dataset consists of 60000 images of size $32 \\times 32$ . The training set of CIFAR-10 consists of 50000 images and the test set consists of 10000 images. The dataset includes ten classes, including images of natural objects such as horse, deer, fork, car and aircraft.",
|
| 1157 |
+
"- The CIFAR-100 dataset is similar to the CIFAR-10 dataset and contains 60000 images of the size $32 \\times 32$ . The 100 classes in the CIFAR-100 are grouped into 20 superclasses. Each class consists of 500 training images and 100 testing images."
|
| 1158 |
+
],
|
| 1159 |
+
"bbox": [
|
| 1160 |
+
521,
|
| 1161 |
+
792,
|
| 1162 |
+
921,
|
| 1163 |
+
945
|
| 1164 |
+
],
|
| 1165 |
+
"page_idx": 5
|
| 1166 |
+
},
|
| 1167 |
+
{
|
| 1168 |
+
"type": "page_number",
|
| 1169 |
+
"text": "6",
|
| 1170 |
+
"bbox": [
|
| 1171 |
+
76,
|
| 1172 |
+
31,
|
| 1173 |
+
86,
|
| 1174 |
+
39
|
| 1175 |
+
],
|
| 1176 |
+
"page_idx": 5
|
| 1177 |
+
},
|
| 1178 |
+
{
|
| 1179 |
+
"type": "text",
|
| 1180 |
+
"text": "- The SVHN dataset includes 73257 training data and 26032 test data of size $32 \\times 32$ . Besides, each example is a close-up image of house numbers from 0 to 9.",
|
| 1181 |
+
"bbox": [
|
| 1182 |
+
91,
|
| 1183 |
+
69,
|
| 1184 |
+
488,
|
| 1185 |
+
113
|
| 1186 |
+
],
|
| 1187 |
+
"page_idx": 6
|
| 1188 |
+
},
|
| 1189 |
+
{
|
| 1190 |
+
"type": "text",
|
| 1191 |
+
"text": "Detailed dataset statistics are summarized in Table III. We use the accuracy metric to evaluate the classification performance.",
|
| 1192 |
+
"bbox": [
|
| 1193 |
+
73,
|
| 1194 |
+
116,
|
| 1195 |
+
488,
|
| 1196 |
+
146
|
| 1197 |
+
],
|
| 1198 |
+
"page_idx": 6
|
| 1199 |
+
},
|
| 1200 |
+
{
|
| 1201 |
+
"type": "text",
|
| 1202 |
+
"text": "2) Experiment Settings: All experiments are implemented with an NVIDIA 1080Ti GPU on PyTorch platform. Following SSL evaluation methods, we evaluate our method on standard SSL benchmarks with the \"Wide-ResNet-28\" model from [54]. Compared with other methods, our model focuses on the challenging label-scare scenario e.g., 2 or 3 labels for each category. For CIFAR-10 and SVHN datasets, we train them for 300 epochs until convergence, the batch size chosen by us is 64. Due to the limited computing resources, the batch size of the all comparison experiments on CIFAR-100 dataset is set to 16. The weight parameter $\\alpha$ to control loss is set to 0.5, and the parameter $\\mu$ of the batch size for the control of unlabeled data is set to 1. The learning rate is set to 0.03 for CIFAR-10, CIFAR-100 and SVHN. The threshold $\\tau$ is set to 0.95. Besides, our network is trained using SGD optimizer. For our proposed method, we adopt the source data of CoMatch [31]. To alleviate the impact of randomness, we evaluate the models on 5 runs for each number of labeled points with different random seeds.",
|
| 1203 |
+
"bbox": [
|
| 1204 |
+
73,
|
| 1205 |
+
147,
|
| 1206 |
+
491,
|
| 1207 |
+
433
|
| 1208 |
+
],
|
| 1209 |
+
"page_idx": 6
|
| 1210 |
+
},
|
| 1211 |
+
{
|
| 1212 |
+
"type": "text",
|
| 1213 |
+
"text": "In Sub-Section \"Transfer to other models\", the algorithms are implemented with an NVIDIA 1080Ti GPU on PyTorch platform with 40, 250, 500, and 1000 labels on CIFAR-10 dataset. Three state-of-the-art algorithms are compared in our transferring experiments, including MixMatch [28], Mean-Teacher [27] and VAT [44]. For those algorithms, we reproduce results by adopting their source code with the original settings. The code for the compared algorithms can be downloaded from the authors' website: MixMatch<sup>1</sup>, MeanTeacher<sup>2</sup>, VAT<sup>3</sup>. Specifically, the training epoch is set as 300. The learning rate of the optimizer is set as 0.002 for MixMatch, 0.003 for Mean-Teacher, and 0.01 for VAT.",
|
| 1214 |
+
"bbox": [
|
| 1215 |
+
73,
|
| 1216 |
+
434,
|
| 1217 |
+
491,
|
| 1218 |
+
614
|
| 1219 |
+
],
|
| 1220 |
+
"page_idx": 6
|
| 1221 |
+
},
|
| 1222 |
+
{
|
| 1223 |
+
"type": "text",
|
| 1224 |
+
"text": "B. Comparison with the State-of-the-Art Algorithms",
|
| 1225 |
+
"text_level": 1,
|
| 1226 |
+
"bbox": [
|
| 1227 |
+
73,
|
| 1228 |
+
635,
|
| 1229 |
+
429,
|
| 1230 |
+
650
|
| 1231 |
+
],
|
| 1232 |
+
"page_idx": 6
|
| 1233 |
+
},
|
| 1234 |
+
{
|
| 1235 |
+
"type": "text",
|
| 1236 |
+
"text": "In this section, six state-of-the-art semi-supervised algorithms are compared to verify the effectiveness of ICL-SSL. The information for the compared algorithms is listed as follows:",
|
| 1237 |
+
"bbox": [
|
| 1238 |
+
73,
|
| 1239 |
+
655,
|
| 1240 |
+
490,
|
| 1241 |
+
715
|
| 1242 |
+
],
|
| 1243 |
+
"page_idx": 6
|
| 1244 |
+
},
|
| 1245 |
+
{
|
| 1246 |
+
"type": "list",
|
| 1247 |
+
"sub_type": "text",
|
| 1248 |
+
"list_items": [
|
| 1249 |
+
"(1) CoMatch [31]: The class probabilities and low-dimensional embeddings are jointly learned in CoMatch. Through imposing a smoothness constraint to the class probabilities, the quality of pseudo labels could be improved. Overall, CoMatch combines the pseudo-based model, the contrast-loss-based model and the graph-based model to improve the model performance in the case of few labels.",
|
| 1250 |
+
"(2) FixMatch [29]: For the labeled image FixMatch utilize weak-augmentation to generate the pseudo label. Additionally, for the unlabeled image, the pseudo label is obtained by the high-confidence prediction. And then, the network is trained to"
|
| 1251 |
+
],
|
| 1252 |
+
"bbox": [
|
| 1253 |
+
73,
|
| 1254 |
+
720,
|
| 1255 |
+
491,
|
| 1256 |
+
893
|
| 1257 |
+
],
|
| 1258 |
+
"page_idx": 6
|
| 1259 |
+
},
|
| 1260 |
+
{
|
| 1261 |
+
"type": "text",
|
| 1262 |
+
"text": "predict the pseudo label with the strongly augmented version of the same image.",
|
| 1263 |
+
"bbox": [
|
| 1264 |
+
503,
|
| 1265 |
+
69,
|
| 1266 |
+
919,
|
| 1267 |
+
99
|
| 1268 |
+
],
|
| 1269 |
+
"page_idx": 6
|
| 1270 |
+
},
|
| 1271 |
+
{
|
| 1272 |
+
"type": "list",
|
| 1273 |
+
"sub_type": "text",
|
| 1274 |
+
"list_items": [
|
| 1275 |
+
"(3) MixMatch [28]: MixMatch jointly optimizes two losses: the supervised loss and unsupervised loss. In detail, cross-entropy is chosen for the supervised loss. The unsupervised loss is the mean square error (MSE) between predictions and generated pseudo labels. MixMatch constructs pseudo labels by data augmentation. With the use of the sharpen function Sharpen(\\cdot), MixMatch could improve the quality of pseudo labels. In addition, Mixup is added in the training process, which can construct virtual samples through interpolation.",
|
| 1276 |
+
"(4) Virtual Adversarial Training(VAT) [44]: VAT is based on data perturbation. It replaces data augmentation with adversarial transformations. The adversarial transformation can lead to a lower classification error.",
|
| 1277 |
+
"(5) $\\pi$ -model [26]: For the same image, data augmentation is used to apply consistency regularization. The loss of $\\pi$ -model contains the supervised loss and the unsupervised loss. Specifically, the supervised loss is defined as the cross-entropy loss, and the unsupervised loss is the unsupervised consistency loss.",
|
| 1278 |
+
"(6) Mean-Teacher [27]: Mean-Teacher is a student-teacherapproach for SSL. The teacher model is based on the average weights of a student model in each update step. In MeanTeacher, the mean square error loss (MSE) is used as its consistency loss between two predictions. Besides, it uses the exponential moving average (EMA) to update, because the EMA is only updated once per epoch, which can control the model update speed."
|
| 1279 |
+
],
|
| 1280 |
+
"bbox": [
|
| 1281 |
+
503,
|
| 1282 |
+
104,
|
| 1283 |
+
921,
|
| 1284 |
+
532
|
| 1285 |
+
],
|
| 1286 |
+
"page_idx": 6
|
| 1287 |
+
},
|
| 1288 |
+
{
|
| 1289 |
+
"type": "text",
|
| 1290 |
+
"text": "C. Performance Comparison",
|
| 1291 |
+
"text_level": 1,
|
| 1292 |
+
"bbox": [
|
| 1293 |
+
504,
|
| 1294 |
+
549,
|
| 1295 |
+
702,
|
| 1296 |
+
563
|
| 1297 |
+
],
|
| 1298 |
+
"page_idx": 6
|
| 1299 |
+
},
|
| 1300 |
+
{
|
| 1301 |
+
"type": "list",
|
| 1302 |
+
"sub_type": "text",
|
| 1303 |
+
"list_items": [
|
| 1304 |
+
"1) CIFAR-10: To demonstrate the superiority of ICL-SSL, we conduct performance comparison experiments for our proposed ICL-SSL and 4 baselines, including Mean-Teacher [27], MixMatch [28], FixMath [29] and CoMatch [31]. For CIFAR-10 dataset, we evaluate the accuracy of above methods with a varying number of labeled data from 20 to 40. The results are reported in Table. IV. For fairness, we create 5 runs for each number of labeled points with different random seeds to alleviate the influence of randomness. We can observe that our method ICL-SSL outperforms all other methods by a significant margin, taking the result on only 2 labeled data in each class for example, ICL-SSL could reach an accuracy of $88.73\\%$ . For comparison, at 20 labels the second best algorithm (CoMatch [31]) achieves an accuracy $83.43\\%$ , which is $5.30\\%$ lower than ICL-SSL. ICL-SSL can achieve higher accuracy by using fewer labels.",
|
| 1305 |
+
"2) SVHN: Moreover, we implement comparison experiments on SVHN dataset. The comparison algorithms contains $\\pi$ model [26], Mean-Teacher [27], MixMatch [28], FixMatch [29]. The quantity of labels is 250 to 1000. The results can be seen in TableV. With different random seeds, we evaluate the models on 5 runs for each number of labeled data. We could observe that ICL-SSL outperforms all compared methods SVHN with 250, 500, and 1000 labeled data. For example, ICL-SSL exceeds MixMatch by $3.46\\%$ with 250 labels."
|
| 1306 |
+
],
|
| 1307 |
+
"bbox": [
|
| 1308 |
+
503,
|
| 1309 |
+
566,
|
| 1310 |
+
921,
|
| 1311 |
+
944
|
| 1312 |
+
],
|
| 1313 |
+
"page_idx": 6
|
| 1314 |
+
},
|
| 1315 |
+
{
|
| 1316 |
+
"type": "page_number",
|
| 1317 |
+
"text": "7",
|
| 1318 |
+
"bbox": [
|
| 1319 |
+
911,
|
| 1320 |
+
30,
|
| 1321 |
+
919,
|
| 1322 |
+
39
|
| 1323 |
+
],
|
| 1324 |
+
"page_idx": 6
|
| 1325 |
+
},
|
| 1326 |
+
{
|
| 1327 |
+
"type": "page_footnote",
|
| 1328 |
+
"text": "1 https://github.com/google-research/mixmatch",
|
| 1329 |
+
"bbox": [
|
| 1330 |
+
62,
|
| 1331 |
+
904,
|
| 1332 |
+
318,
|
| 1333 |
+
917
|
| 1334 |
+
],
|
| 1335 |
+
"page_idx": 6
|
| 1336 |
+
},
|
| 1337 |
+
{
|
| 1338 |
+
"type": "page_footnote",
|
| 1339 |
+
"text": "2 https://github.com/siit-vtt/semi-supervised-learning-pytorch",
|
| 1340 |
+
"bbox": [
|
| 1341 |
+
62,
|
| 1342 |
+
917,
|
| 1343 |
+
390,
|
| 1344 |
+
931
|
| 1345 |
+
],
|
| 1346 |
+
"page_idx": 6
|
| 1347 |
+
},
|
| 1348 |
+
{
|
| 1349 |
+
"type": "page_footnote",
|
| 1350 |
+
"text": "3 https://github.com/lyakaap/VAT-pytorch",
|
| 1351 |
+
"bbox": [
|
| 1352 |
+
62,
|
| 1353 |
+
931,
|
| 1354 |
+
290,
|
| 1355 |
+
944
|
| 1356 |
+
],
|
| 1357 |
+
"page_idx": 6
|
| 1358 |
+
},
|
| 1359 |
+
{
|
| 1360 |
+
"type": "image",
|
| 1361 |
+
"img_path": "images/145b89890e3ad728492ca9e2e7b3625acf1979302110d68f8f00b66ef3258793.jpg",
|
| 1362 |
+
"image_caption": [
|
| 1363 |
+
"(a)"
|
| 1364 |
+
],
|
| 1365 |
+
"image_footnote": [],
|
| 1366 |
+
"bbox": [
|
| 1367 |
+
86,
|
| 1368 |
+
104,
|
| 1369 |
+
272,
|
| 1370 |
+
212
|
| 1371 |
+
],
|
| 1372 |
+
"page_idx": 7
|
| 1373 |
+
},
|
| 1374 |
+
{
|
| 1375 |
+
"type": "image",
|
| 1376 |
+
"img_path": "images/ee3054f0fc6b5c032a006efdeed90a9eaab2fc5ce9a4ede18592661a5f7e8d9e.jpg",
|
| 1377 |
+
"image_caption": [
|
| 1378 |
+
"(b)"
|
| 1379 |
+
],
|
| 1380 |
+
"image_footnote": [],
|
| 1381 |
+
"bbox": [
|
| 1382 |
+
295,
|
| 1383 |
+
104,
|
| 1384 |
+
475,
|
| 1385 |
+
212
|
| 1386 |
+
],
|
| 1387 |
+
"page_idx": 7
|
| 1388 |
+
},
|
| 1389 |
+
{
|
| 1390 |
+
"type": "image",
|
| 1391 |
+
"img_path": "images/8f2017fad4fcc7765b09cf37f9664f5dc0e2f7a447e09c09bfcfeb8e2a5b1bae.jpg",
|
| 1392 |
+
"image_caption": [
|
| 1393 |
+
"(c)"
|
| 1394 |
+
],
|
| 1395 |
+
"image_footnote": [],
|
| 1396 |
+
"bbox": [
|
| 1397 |
+
501,
|
| 1398 |
+
104,
|
| 1399 |
+
684,
|
| 1400 |
+
212
|
| 1401 |
+
],
|
| 1402 |
+
"page_idx": 7
|
| 1403 |
+
},
|
| 1404 |
+
{
|
| 1405 |
+
"type": "image",
|
| 1406 |
+
"img_path": "images/98bcf8f77142e247501731436e01b81eb354980ff9eaf2b785ede5f25c49d68c.jpg",
|
| 1407 |
+
"image_caption": [
|
| 1408 |
+
"(d)"
|
| 1409 |
+
],
|
| 1410 |
+
"image_footnote": [],
|
| 1411 |
+
"bbox": [
|
| 1412 |
+
705,
|
| 1413 |
+
106,
|
| 1414 |
+
890,
|
| 1415 |
+
212
|
| 1416 |
+
],
|
| 1417 |
+
"page_idx": 7
|
| 1418 |
+
},
|
| 1419 |
+
{
|
| 1420 |
+
"type": "image",
|
| 1421 |
+
"img_path": "images/5f115e17711485d1c06a60b0032c2a57e19cb6f04e58e26a4eaaeae52e5f4057.jpg",
|
| 1422 |
+
"image_caption": [
|
| 1423 |
+
"(e)"
|
| 1424 |
+
],
|
| 1425 |
+
"image_footnote": [],
|
| 1426 |
+
"bbox": [
|
| 1427 |
+
86,
|
| 1428 |
+
251,
|
| 1429 |
+
269,
|
| 1430 |
+
358
|
| 1431 |
+
],
|
| 1432 |
+
"page_idx": 7
|
| 1433 |
+
},
|
| 1434 |
+
{
|
| 1435 |
+
"type": "image",
|
| 1436 |
+
"img_path": "images/a2ebf6ce71403f79b4c3a50b401b3f8d4fb2744e46cb159903960020e31ec513.jpg",
|
| 1437 |
+
"image_caption": [
|
| 1438 |
+
"(f)"
|
| 1439 |
+
],
|
| 1440 |
+
"image_footnote": [],
|
| 1441 |
+
"bbox": [
|
| 1442 |
+
295,
|
| 1443 |
+
251,
|
| 1444 |
+
475,
|
| 1445 |
+
358
|
| 1446 |
+
],
|
| 1447 |
+
"page_idx": 7
|
| 1448 |
+
},
|
| 1449 |
+
{
|
| 1450 |
+
"type": "image",
|
| 1451 |
+
"img_path": "images/5ff2e368d15f5024cf83efd8b47515e6411693dde7ee78be21335261951aea08.jpg",
|
| 1452 |
+
"image_caption": [
|
| 1453 |
+
"(g)",
|
| 1454 |
+
"Figure 6: Performance variation when the number of labeled data changes from 40 to 1000 on the CIFAR-10 dataset. (a) - (d) are the classification accuracy of Mean-Teacher [27] and (e) - (h) are the results of MixMatch [28]. The blue curve denotes the accuracy enhanced by our positive sample pair construction mechanism, and the red curve represents the accuracy of the original model."
|
| 1455 |
+
],
|
| 1456 |
+
"image_footnote": [],
|
| 1457 |
+
"bbox": [
|
| 1458 |
+
501,
|
| 1459 |
+
251,
|
| 1460 |
+
683,
|
| 1461 |
+
358
|
| 1462 |
+
],
|
| 1463 |
+
"page_idx": 7
|
| 1464 |
+
},
|
| 1465 |
+
{
|
| 1466 |
+
"type": "image",
|
| 1467 |
+
"img_path": "images/535cd9fc4729b045208073275400d564bb3c305583e05b779551a3f421ff69ed.jpg",
|
| 1468 |
+
"image_caption": [
|
| 1469 |
+
"(h)"
|
| 1470 |
+
],
|
| 1471 |
+
"image_footnote": [],
|
| 1472 |
+
"bbox": [
|
| 1473 |
+
710,
|
| 1474 |
+
251,
|
| 1475 |
+
890,
|
| 1476 |
+
358
|
| 1477 |
+
],
|
| 1478 |
+
"page_idx": 7
|
| 1479 |
+
},
|
| 1480 |
+
{
|
| 1481 |
+
"type": "image",
|
| 1482 |
+
"img_path": "images/e26df4f5accbc721140fb5673a2c77efb7d81a9210c786f5040d360793c8ba26.jpg",
|
| 1483 |
+
"image_caption": [
|
| 1484 |
+
"(a) MixMatch",
|
| 1485 |
+
"Figure 7: Performance comparison of different state-of-the-art methods on CIFAR-10 dataset with a varying number of labeled data. The blue curve denotes the accuracy enhanced by our positive sample pair construction mechanism, and red curve represents the accuracy of the original model, respectively."
|
| 1486 |
+
],
|
| 1487 |
+
"image_footnote": [],
|
| 1488 |
+
"bbox": [
|
| 1489 |
+
91,
|
| 1490 |
+
503,
|
| 1491 |
+
352,
|
| 1492 |
+
686
|
| 1493 |
+
],
|
| 1494 |
+
"page_idx": 7
|
| 1495 |
+
},
|
| 1496 |
+
{
|
| 1497 |
+
"type": "image",
|
| 1498 |
+
"img_path": "images/43b4ab784e696becc4fd2982e46214ebb11dc33f306338216887bc88e838d385.jpg",
|
| 1499 |
+
"image_caption": [
|
| 1500 |
+
"(b) Mean-Teacher"
|
| 1501 |
+
],
|
| 1502 |
+
"image_footnote": [],
|
| 1503 |
+
"bbox": [
|
| 1504 |
+
367,
|
| 1505 |
+
503,
|
| 1506 |
+
625,
|
| 1507 |
+
685
|
| 1508 |
+
],
|
| 1509 |
+
"page_idx": 7
|
| 1510 |
+
},
|
| 1511 |
+
{
|
| 1512 |
+
"type": "image",
|
| 1513 |
+
"img_path": "images/b91982d14e39f7d9afbccb97ebb57fe311bf1d8c6e5cb6bec9b02aa9aaa7ec7a.jpg",
|
| 1514 |
+
"image_caption": [
|
| 1515 |
+
"(c) VAT"
|
| 1516 |
+
],
|
| 1517 |
+
"image_footnote": [],
|
| 1518 |
+
"bbox": [
|
| 1519 |
+
645,
|
| 1520 |
+
505,
|
| 1521 |
+
901,
|
| 1522 |
+
685
|
| 1523 |
+
],
|
| 1524 |
+
"page_idx": 7
|
| 1525 |
+
},
|
| 1526 |
+
{
|
| 1527 |
+
"type": "text",
|
| 1528 |
+
"text": "3) CIFAR-100: To further investigate the effectiveness of our proposed model, we conduct experiments on CIFAR-100 dataset. Table. V reports the performance of the four methods with 200, 400, and 1000 labels. From those results, we can observe that, our proposed ICL-SSL could achieve better performance compared with other state-of-the-art algorithms. Taking the result with 200 lables for example, ICL-SSL exceeds FixMatch [29] by $4.75\\%$ .",
|
| 1529 |
+
"bbox": [
|
| 1530 |
+
73,
|
| 1531 |
+
791,
|
| 1532 |
+
491,
|
| 1533 |
+
912
|
| 1534 |
+
],
|
| 1535 |
+
"page_idx": 7
|
| 1536 |
+
},
|
| 1537 |
+
{
|
| 1538 |
+
"type": "text",
|
| 1539 |
+
"text": "Through the above experiments, our method outperforms all the existing methods in the case of few labels. The reason is",
|
| 1540 |
+
"bbox": [
|
| 1541 |
+
73,
|
| 1542 |
+
914,
|
| 1543 |
+
491,
|
| 1544 |
+
945
|
| 1545 |
+
],
|
| 1546 |
+
"page_idx": 7
|
| 1547 |
+
},
|
| 1548 |
+
{
|
| 1549 |
+
"type": "text",
|
| 1550 |
+
"text": "that other methods use data augmentation to generate positive sample pairs, easily leading to incorrect regularization. Different from them, our ICL-SSL aims to improve the discriminative capability from two aspects. Firstly, we proposed an interpolation-based method to construct more reliable positive sample pairs, thus alleviating the incorrect regularization. Additionally, we design a contrastive loss to guide the embedding to change linearly in samples, which could enlarge the margin decision boundaries. In summary, we proposed ICL-SSL that could improve the discriminative capability of the network and",
|
| 1551 |
+
"bbox": [
|
| 1552 |
+
501,
|
| 1553 |
+
791,
|
| 1554 |
+
921,
|
| 1555 |
+
944
|
| 1556 |
+
],
|
| 1557 |
+
"page_idx": 7
|
| 1558 |
+
},
|
| 1559 |
+
{
|
| 1560 |
+
"type": "page_number",
|
| 1561 |
+
"text": "8",
|
| 1562 |
+
"bbox": [
|
| 1563 |
+
76,
|
| 1564 |
+
31,
|
| 1565 |
+
86,
|
| 1566 |
+
39
|
| 1567 |
+
],
|
| 1568 |
+
"page_idx": 7
|
| 1569 |
+
},
|
| 1570 |
+
{
|
| 1571 |
+
"type": "table",
|
| 1572 |
+
"img_path": "images/9d68d998f9354086a7949d3eb64a3adf84ee921ea45d362af83a4a9b01b73b65.jpg",
|
| 1573 |
+
"table_caption": [
|
| 1574 |
+
"Table IV: Accuracy comparison with other state-of-the-art methods on five different folds, including Mean-Teacher [27], MixMatch [28], FixMatch [29] and CoMatch [31] on CIFAR-10 dataset. The red and blue values indicate the best and the runner-up results."
|
| 1575 |
+
],
|
| 1576 |
+
"table_footnote": [],
|
| 1577 |
+
"table_body": "<table><tr><td>Method</td><td></td><td>20 labels</td><td>30 labels</td><td>40 labels</td></tr><tr><td>Mean-Teacher</td><td>[27]</td><td>21.79±0.57</td><td>24.51±0.35</td><td>24.93±0.62</td></tr><tr><td>MixMatch</td><td>[28]</td><td>38.51±8.48</td><td>50.10±5.81</td><td>59.08±3.04</td></tr><tr><td>FixMatch</td><td>[29]</td><td>72.63±5.37</td><td>86.65±3.56</td><td>89.69±4.58</td></tr><tr><td>CoMatch</td><td>[31]</td><td>83.43±9.20</td><td>88.68±3.79</td><td>90.14±2.86</td></tr><tr><td>ICL-SSL</td><td>Ours</td><td>88.73±5.69</td><td>90.30±3.10</td><td>91.78±2.23</td></tr></table>",
|
| 1578 |
+
"bbox": [
|
| 1579 |
+
81,
|
| 1580 |
+
146,
|
| 1581 |
+
488,
|
| 1582 |
+
280
|
| 1583 |
+
],
|
| 1584 |
+
"page_idx": 8
|
| 1585 |
+
},
|
| 1586 |
+
{
|
| 1587 |
+
"type": "text",
|
| 1588 |
+
"text": "achieves the top-level performance on CIFRA-10, SVHN, and CIFAR-100 dataset.",
|
| 1589 |
+
"bbox": [
|
| 1590 |
+
73,
|
| 1591 |
+
306,
|
| 1592 |
+
491,
|
| 1593 |
+
335
|
| 1594 |
+
],
|
| 1595 |
+
"page_idx": 8
|
| 1596 |
+
},
|
| 1597 |
+
{
|
| 1598 |
+
"type": "text",
|
| 1599 |
+
"text": "D. Time Cost",
|
| 1600 |
+
"text_level": 1,
|
| 1601 |
+
"bbox": [
|
| 1602 |
+
73,
|
| 1603 |
+
356,
|
| 1604 |
+
171,
|
| 1605 |
+
369
|
| 1606 |
+
],
|
| 1607 |
+
"page_idx": 8
|
| 1608 |
+
},
|
| 1609 |
+
{
|
| 1610 |
+
"type": "text",
|
| 1611 |
+
"text": "As shown in Table VIII, we compare the training and the inference time of ICL-SSL and other state-of-the-art algorithms, including MixMatch [28], FixMatch [29], and CoMatch [31]. The results are the average training time for 300 epochs with 20 labels on CIFAR-10 dataset. We observe that the training and the inference time of ICL-SLL are 193.81 seconds and 1.08 seconds, respectively. From the Table.VIII we find that the computational efficiency of the proposed algorithm is comparable to the MixMatch and FixMatch and is much faster than that of CoMatch.",
|
| 1612 |
+
"bbox": [
|
| 1613 |
+
73,
|
| 1614 |
+
376,
|
| 1615 |
+
490,
|
| 1616 |
+
525
|
| 1617 |
+
],
|
| 1618 |
+
"page_idx": 8
|
| 1619 |
+
},
|
| 1620 |
+
{
|
| 1621 |
+
"type": "text",
|
| 1622 |
+
"text": "E. Ablation Study",
|
| 1623 |
+
"text_level": 1,
|
| 1624 |
+
"bbox": [
|
| 1625 |
+
73,
|
| 1626 |
+
547,
|
| 1627 |
+
202,
|
| 1628 |
+
561
|
| 1629 |
+
],
|
| 1630 |
+
"page_idx": 8
|
| 1631 |
+
},
|
| 1632 |
+
{
|
| 1633 |
+
"type": "text",
|
| 1634 |
+
"text": "In this section, we implement extensive ablation studies to examine the effect of different components in ICL-SSL. Due to the number of experiments in our ablation study, we perform the study with 20 and 40 labels split from CIFAR-10 dataset. The parameter settings are kept the same with comparison experiments, and the results are shown in Table. VII.",
|
| 1635 |
+
"bbox": [
|
| 1636 |
+
73,
|
| 1637 |
+
566,
|
| 1638 |
+
490,
|
| 1639 |
+
657
|
| 1640 |
+
],
|
| 1641 |
+
"page_idx": 8
|
| 1642 |
+
},
|
| 1643 |
+
{
|
| 1644 |
+
"type": "text",
|
| 1645 |
+
"text": "Effective of contrastive loss",
|
| 1646 |
+
"text_level": 1,
|
| 1647 |
+
"bbox": [
|
| 1648 |
+
73,
|
| 1649 |
+
665,
|
| 1650 |
+
276,
|
| 1651 |
+
679
|
| 1652 |
+
],
|
| 1653 |
+
"page_idx": 8
|
| 1654 |
+
},
|
| 1655 |
+
{
|
| 1656 |
+
"type": "text",
|
| 1657 |
+
"text": "To further investigate the superiority of the proposed contrastive loss, we experimentally compare our method. Here, we denote the FixMatch [29] as the baseline. With the experimental results, in the case of few labels, the model performance achieves better performance than that of baselines. Taking the result on CIFAR-10 with 20 labels for example, the accuracy exceeds the baseline by $16.1\\%$ performance increment. From the empirical analysis, it benefits from the contrastive loss to guide the embedding of the network to change linearly between samples to improve the discriminative capability of the network.",
|
| 1658 |
+
"bbox": [
|
| 1659 |
+
73,
|
| 1660 |
+
680,
|
| 1661 |
+
491,
|
| 1662 |
+
845
|
| 1663 |
+
],
|
| 1664 |
+
"page_idx": 8
|
| 1665 |
+
},
|
| 1666 |
+
{
|
| 1667 |
+
"type": "text",
|
| 1668 |
+
"text": "Effective of interpolation-based positive samples construction method.",
|
| 1669 |
+
"text_level": 1,
|
| 1670 |
+
"bbox": [
|
| 1671 |
+
73,
|
| 1672 |
+
854,
|
| 1673 |
+
488,
|
| 1674 |
+
882
|
| 1675 |
+
],
|
| 1676 |
+
"page_idx": 8
|
| 1677 |
+
},
|
| 1678 |
+
{
|
| 1679 |
+
"type": "text",
|
| 1680 |
+
"text": "Additionally, we verify the effectiveness of the interpolation-based positive samples construction method. As shown in TableVII, we can observe that the accuracy would decrease from $88.73\\%$ to $56.91\\%$ . The above experiments",
|
| 1681 |
+
"bbox": [
|
| 1682 |
+
73,
|
| 1683 |
+
883,
|
| 1684 |
+
491,
|
| 1685 |
+
946
|
| 1686 |
+
],
|
| 1687 |
+
"page_idx": 8
|
| 1688 |
+
},
|
| 1689 |
+
{
|
| 1690 |
+
"type": "image",
|
| 1691 |
+
"img_path": "images/6b7e27b01efce2a0346c0fad954d203386f67f83b0c6898d43cf0a8b3ab8d6e8.jpg",
|
| 1692 |
+
"image_caption": [
|
| 1693 |
+
"Figure 8: Sensitivity analysis of the hyper-parameter $\\alpha$ on CIFAR-10 dataset with 20 labels."
|
| 1694 |
+
],
|
| 1695 |
+
"image_footnote": [],
|
| 1696 |
+
"bbox": [
|
| 1697 |
+
526,
|
| 1698 |
+
71,
|
| 1699 |
+
903,
|
| 1700 |
+
247
|
| 1701 |
+
],
|
| 1702 |
+
"page_idx": 8
|
| 1703 |
+
},
|
| 1704 |
+
{
|
| 1705 |
+
"type": "text",
|
| 1706 |
+
"text": "demonstrate the effectiveness of the interpolation-based positive samples construction method.",
|
| 1707 |
+
"bbox": [
|
| 1708 |
+
503,
|
| 1709 |
+
314,
|
| 1710 |
+
919,
|
| 1711 |
+
345
|
| 1712 |
+
],
|
| 1713 |
+
"page_idx": 8
|
| 1714 |
+
},
|
| 1715 |
+
{
|
| 1716 |
+
"type": "text",
|
| 1717 |
+
"text": "F. Sensitivity Analysis",
|
| 1718 |
+
"text_level": 1,
|
| 1719 |
+
"bbox": [
|
| 1720 |
+
504,
|
| 1721 |
+
369,
|
| 1722 |
+
660,
|
| 1723 |
+
386
|
| 1724 |
+
],
|
| 1725 |
+
"page_idx": 8
|
| 1726 |
+
},
|
| 1727 |
+
{
|
| 1728 |
+
"type": "text",
|
| 1729 |
+
"text": "Further, we investigate the effect of hyper-parameters $\\alpha$ . As shown in Fig. 8, we observe that the classification accuracy will not fluctuate greatly when the $\\alpha$ is varying. This demonstrates that our model ICL-SSL is insensitive to the variation of the hyper-parameter $\\alpha$ .",
|
| 1730 |
+
"bbox": [
|
| 1731 |
+
503,
|
| 1732 |
+
388,
|
| 1733 |
+
919,
|
| 1734 |
+
465
|
| 1735 |
+
],
|
| 1736 |
+
"page_idx": 8
|
| 1737 |
+
},
|
| 1738 |
+
{
|
| 1739 |
+
"type": "text",
|
| 1740 |
+
"text": "G. Transferring to other models",
|
| 1741 |
+
"text_level": 1,
|
| 1742 |
+
"bbox": [
|
| 1743 |
+
504,
|
| 1744 |
+
483,
|
| 1745 |
+
725,
|
| 1746 |
+
498
|
| 1747 |
+
],
|
| 1748 |
+
"page_idx": 8
|
| 1749 |
+
},
|
| 1750 |
+
{
|
| 1751 |
+
"type": "text",
|
| 1752 |
+
"text": "To verify the generality of our proposed ICL-SSL, we transfer our method to the existing state-of-the-art algorithms. We implement our method into other semi-supervised learning models (MixMatch [28], VAT [44], Mean-Teacher [27]). All the experiments are implemented with CIFAR-10 dataset. Experiments are carried out on the number of labeled data from 40, 250, 500 and 1000. Here, we denote the baseline and the baseline with our method ICL-SSL as “B” and “ $\\mathbf{B} + \\mathbf{O}$ ”, respectively.",
|
| 1753 |
+
"bbox": [
|
| 1754 |
+
501,
|
| 1755 |
+
502,
|
| 1756 |
+
919,
|
| 1757 |
+
638
|
| 1758 |
+
],
|
| 1759 |
+
"page_idx": 8
|
| 1760 |
+
},
|
| 1761 |
+
{
|
| 1762 |
+
"type": "text",
|
| 1763 |
+
"text": "From Fig. 7, we have observed as follows: 1) The models could achieve better performance with our method. 2) As shown in Table VI, taking the results in MixMatch [28] for example, our method could improve the classification accuracy by $4.02\\%$ on 40 labeled data and $2.14\\%$ on 500 labels on CIFAR-10 dataset, respectively. In conclusion, the experiment results show that ICL-SSL can improve the model performance in other semi-supervised models. Moreover, in Fig. 6, we further show that other state-of-the-art methods could obtain higher accuracy with our proposed strategy during the training process.",
|
| 1764 |
+
"bbox": [
|
| 1765 |
+
503,
|
| 1766 |
+
638,
|
| 1767 |
+
921,
|
| 1768 |
+
804
|
| 1769 |
+
],
|
| 1770 |
+
"page_idx": 8
|
| 1771 |
+
},
|
| 1772 |
+
{
|
| 1773 |
+
"type": "text",
|
| 1774 |
+
"text": "V. CONCLUSION",
|
| 1775 |
+
"text_level": 1,
|
| 1776 |
+
"bbox": [
|
| 1777 |
+
651,
|
| 1778 |
+
820,
|
| 1779 |
+
772,
|
| 1780 |
+
833
|
| 1781 |
+
],
|
| 1782 |
+
"page_idx": 8
|
| 1783 |
+
},
|
| 1784 |
+
{
|
| 1785 |
+
"type": "text",
|
| 1786 |
+
"text": "In this work, we propose an interpolation-based method termed ICL-SSL to construct reliable positive sample pairs, thus alleviating the semantic information drift with extreme labels (e.g., 2 or 3 labels for each class). Specifically, ICL-SSL is a semantic-agnostic method. We interpolate the input images and their representations in image-level and latent space, respectively. Besides, the designed contrastive loss will",
|
| 1787 |
+
"bbox": [
|
| 1788 |
+
503,
|
| 1789 |
+
838,
|
| 1790 |
+
921,
|
| 1791 |
+
946
|
| 1792 |
+
],
|
| 1793 |
+
"page_idx": 8
|
| 1794 |
+
},
|
| 1795 |
+
{
|
| 1796 |
+
"type": "page_number",
|
| 1797 |
+
"text": "9",
|
| 1798 |
+
"bbox": [
|
| 1799 |
+
911,
|
| 1800 |
+
30,
|
| 1801 |
+
919,
|
| 1802 |
+
39
|
| 1803 |
+
],
|
| 1804 |
+
"page_idx": 8
|
| 1805 |
+
},
|
| 1806 |
+
{
|
| 1807 |
+
"type": "table",
|
| 1808 |
+
"img_path": "images/0c7007c8a0ad4b911dc58f36080942fd9c0effde32e43a46a73696762b479fd6.jpg",
|
| 1809 |
+
"table_caption": [
|
| 1810 |
+
"Table V: Accuracy comparison with $\\pi$ model [26], Mean-Teacher [27], MixMatch [28], and FixMatch [29] on CIFAR-100 and SVHN dataset. The red and blue values indicate the best and the runner-up results. The average and std values of the five fold cross validation are reported."
|
| 1811 |
+
],
|
| 1812 |
+
"table_footnote": [],
|
| 1813 |
+
"table_body": "<table><tr><td rowspan=\"2\" colspan=\"2\">Method</td><td colspan=\"3\">CIFAR 100</td><td colspan=\"3\">SVHN</td></tr><tr><td>200 labels</td><td>400 labels</td><td>800 labels</td><td>250 labels</td><td>500 labels</td><td>1000 labels</td></tr><tr><td>π Model</td><td>[26]</td><td>8.53±0.25</td><td>11.67±0.37</td><td>17.64±1.06</td><td>42.66±0.91</td><td>53.33±1.39</td><td>65.90±0.03</td></tr><tr><td>Mean-Teacher</td><td>[27]</td><td>7.11±0.06</td><td>11.54±0.28</td><td>17.82±0.09</td><td>42.70±1.79</td><td>55.71±0.53</td><td>67.71±1.22</td></tr><tr><td>MixMatch</td><td>[28]</td><td>4.55±0.45</td><td>17.68±0.07</td><td>26.75±1.13</td><td>92.12±0.06</td><td>94.53±0.43</td><td>95.13±0.04</td></tr><tr><td>FixMatch</td><td>[29]</td><td>9.31±0.08</td><td>24.44±0.35</td><td>28.12±0.30</td><td>95.45±0.07</td><td>95.73±0.15</td><td>95.94±0.10</td></tr><tr><td>ICL-SSL</td><td>Ours</td><td>14.06±0.52</td><td>26.52±1.20</td><td>33.81±0.63</td><td>95.58±0.14</td><td>95.80±0.12</td><td>96.05±0.14</td></tr></table>",
|
| 1814 |
+
"bbox": [
|
| 1815 |
+
84,
|
| 1816 |
+
116,
|
| 1817 |
+
911,
|
| 1818 |
+
287
|
| 1819 |
+
],
|
| 1820 |
+
"page_idx": 9
|
| 1821 |
+
},
|
| 1822 |
+
{
|
| 1823 |
+
"type": "table",
|
| 1824 |
+
"img_path": "images/73eb8d3fa4d6f19a450b38e5ed9a4afb037cd1a296bb2b7cb46792eea9992e37.jpg",
|
| 1825 |
+
"table_caption": [
|
| 1826 |
+
"Table VI: Applying the interpolation positive sample pair construction mechanism to other state-of-the-art semi-supervised learning algorithms, including MixMatch [28], VAT [44] and Mean-teacher [27], on the CIFAR-10 dataset. The blue values represent the results enhanced by our positive sample pair construction mechanism, and the black values are the results of the original model. 'B' and 'B+O' represent the baseline and the baseline with our method, respectively."
|
| 1827 |
+
],
|
| 1828 |
+
"table_footnote": [],
|
| 1829 |
+
"table_body": "<table><tr><td rowspan=\"2\" colspan=\"2\">Method</td><td colspan=\"2\">40 labels</td><td colspan=\"2\">250 labels</td><td colspan=\"2\">500 labels</td><td colspan=\"2\">1000 labels</td></tr><tr><td>B</td><td>B+O</td><td>B</td><td>B+O</td><td>B</td><td>B+O</td><td>B</td><td>B+O</td></tr><tr><td>VAT</td><td>[44]</td><td>20.00</td><td>23.00</td><td>34.00</td><td>41.00</td><td>47.00</td><td>48.00</td><td>61.00</td><td>66.00</td></tr><tr><td>MixMatch</td><td>[28]</td><td>57.86</td><td>61.88</td><td>86.06</td><td>86.50</td><td>87.00</td><td>89.14</td><td>90.46</td><td>91.56</td></tr><tr><td>Mean-Teacher</td><td>[27]</td><td>24.86</td><td>26.24</td><td>42.88</td><td>45.58</td><td>53.40</td><td>54.90</td><td>66.98</td><td>68.48</td></tr></table>",
|
| 1830 |
+
"bbox": [
|
| 1831 |
+
75,
|
| 1832 |
+
364,
|
| 1833 |
+
928,
|
| 1834 |
+
483
|
| 1835 |
+
],
|
| 1836 |
+
"page_idx": 9
|
| 1837 |
+
},
|
| 1838 |
+
{
|
| 1839 |
+
"type": "table",
|
| 1840 |
+
"img_path": "images/465c983a316ac5243f347cb6ffd0f0ea3408f1fe38819d12e32061b6f9f3a486.jpg",
|
| 1841 |
+
"table_caption": [
|
| 1842 |
+
"Table VII: Ablation comparisons of ICL-SSL mechanism. The results are reported with 20 labels on CIFAR-10 dataset."
|
| 1843 |
+
],
|
| 1844 |
+
"table_footnote": [],
|
| 1845 |
+
"table_body": "<table><tr><td>Ablation</td><td>20 labels</td><td>40 labels</td></tr><tr><td>ICL-SSL</td><td>88.73</td><td>91.78</td></tr><tr><td>ICL-SSL without contrastive loss</td><td>72.63</td><td>89.69</td></tr><tr><td>ICL-SSL without the interpolation-based method</td><td>56.91</td><td>70.89</td></tr></table>",
|
| 1846 |
+
"bbox": [
|
| 1847 |
+
78,
|
| 1848 |
+
541,
|
| 1849 |
+
486,
|
| 1850 |
+
632
|
| 1851 |
+
],
|
| 1852 |
+
"page_idx": 9
|
| 1853 |
+
},
|
| 1854 |
+
{
|
| 1855 |
+
"type": "table",
|
| 1856 |
+
"img_path": "images/04e03634fdee3597d93e07eac6eab985cb9e97a61aacef4407ec79000e3902d4.jpg",
|
| 1857 |
+
"table_caption": [
|
| 1858 |
+
"Table VIII: Training and inference time comparison on CIFAR-10 dataset with 20 labels."
|
| 1859 |
+
],
|
| 1860 |
+
"table_footnote": [],
|
| 1861 |
+
"table_body": "<table><tr><td>Method</td><td></td><td>Training Time (s)</td><td>Inference Time (s)</td></tr><tr><td>MixMatch</td><td>[28]</td><td>121.50 ± 0.21</td><td>0.60 ± 0.13</td></tr><tr><td>FixMatch</td><td>[29]</td><td>155.37 ± 0.10</td><td>3.46 ± 0.07</td></tr><tr><td>CoMatch</td><td>[31]</td><td>571.65 ± 0.15</td><td>1.30 ± 0.03</td></tr><tr><td>ICL-SLL</td><td>Ours</td><td>193.81 ± 0.14</td><td>1.08 ± 0.62</td></tr></table>",
|
| 1862 |
+
"bbox": [
|
| 1863 |
+
80,
|
| 1864 |
+
684,
|
| 1865 |
+
493,
|
| 1866 |
+
808
|
| 1867 |
+
],
|
| 1868 |
+
"page_idx": 9
|
| 1869 |
+
},
|
| 1870 |
+
{
|
| 1871 |
+
"type": "text",
|
| 1872 |
+
"text": "guide the embeddings changing linearly between samples and thus get a larger margin decision boundary. Benefiting from this mechanism, the discriminative capability of the network can be improved with extreme labels. Extensive experiments demonstrate the effectiveness and generality of our ICL-SSL. In the future, we will try to extend ICL-SSL to other fields (e.g. graph semi-supervised node classification). Besides, as",
|
| 1873 |
+
"bbox": [
|
| 1874 |
+
73,
|
| 1875 |
+
838,
|
| 1876 |
+
493,
|
| 1877 |
+
946
|
| 1878 |
+
],
|
| 1879 |
+
"page_idx": 9
|
| 1880 |
+
},
|
| 1881 |
+
{
|
| 1882 |
+
"type": "text",
|
| 1883 |
+
"text": "we analyzed in section IV-D, although our proposed algorithm is as efficient as other state-of-the-art contrastive algorithms, its efficiency still needs to be improved to suit even larger scale datasets. Therefore, how to reduce the training time is also a future work direction.",
|
| 1884 |
+
"bbox": [
|
| 1885 |
+
501,
|
| 1886 |
+
508,
|
| 1887 |
+
921,
|
| 1888 |
+
583
|
| 1889 |
+
],
|
| 1890 |
+
"page_idx": 9
|
| 1891 |
+
},
|
| 1892 |
+
{
|
| 1893 |
+
"type": "text",
|
| 1894 |
+
"text": "REFERENCES",
|
| 1895 |
+
"text_level": 1,
|
| 1896 |
+
"bbox": [
|
| 1897 |
+
663,
|
| 1898 |
+
595,
|
| 1899 |
+
761,
|
| 1900 |
+
607
|
| 1901 |
+
],
|
| 1902 |
+
"page_idx": 9
|
| 1903 |
+
},
|
| 1904 |
+
{
|
| 1905 |
+
"type": "list",
|
| 1906 |
+
"sub_type": "ref_text",
|
| 1907 |
+
"list_items": [
|
| 1908 |
+
"[1] R. He, Z. Han, X. Lu, and Y. Yin, \"Safe-student for safe deep semi-supervised learning with unseen-class unlabeled data,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2022, pp. 14585-14594.",
|
| 1909 |
+
"[2] X. Hu, Y. Zeng, X. Xu, S. Zhou, and L. Liu, \"Robust semi-supervised classification based on data augmented online elms with deep features,\" Knowledge-Based Systems, vol. 229, p. 107307, 2021.",
|
| 1910 |
+
"[3] T.-Y. Lin, M. Maire, S. Belongie, J. Hays, P. Perona, D. Ramanan, P. Dollar, and C. L. Zitnick, \"Microsoft coco: Common objects in context,\" in European conference on computer vision. Springer, 2014, pp. 740-755.",
|
| 1911 |
+
"[4] I. Bekkerman and J. Tabrikian, “Target detection and localization using mimo radars and sonars,” IEEE Transactions on Signal Processing, vol. 54, no. 10, pp. 3873–3883, 2006.",
|
| 1912 |
+
"[5] M. Everingham, S. A. Eslami, L. Van Gool, C. K. Williams, J. Winn, and A. Zisserman, \"The pascal visual object classes challenge: A retrospective,\" International journal of computer vision, vol. 111, no. 1, pp. 98-136, 2015.",
|
| 1913 |
+
"[6] S. Zhou, D. Nie, E. Adeli, J. Yin, J. Lian, and D. Shen, \"High-resolution encoder-decoder networks for low-contrast medical image segmentation,\" IEEE Transactions on Image Processing, vol. 29, pp. 461-475, 2019.",
|
| 1914 |
+
"[7] L. Li, S. Wang, X. Liu, E. Zhu, L. Shen, K. Li, and K. Li, \"Local sample-weighted multiple kernel clustering with consensus discriminative graph,\" IEEE Transactions on Neural Networks and Learning Systems, 2022.",
|
| 1915 |
+
"[8] S. Wang, X. Liu, L. Liu, S. Zhou, and E. Zhu, “Late fusion multiple kernel clustering with proxy graph refinement,” IEEE Transactions on Neural Networks and Learning Systems, 2021."
|
| 1916 |
+
],
|
| 1917 |
+
"bbox": [
|
| 1918 |
+
511,
|
| 1919 |
+
614,
|
| 1920 |
+
921,
|
| 1921 |
+
943
|
| 1922 |
+
],
|
| 1923 |
+
"page_idx": 9
|
| 1924 |
+
},
|
| 1925 |
+
{
|
| 1926 |
+
"type": "page_number",
|
| 1927 |
+
"text": "10",
|
| 1928 |
+
"bbox": [
|
| 1929 |
+
78,
|
| 1930 |
+
30,
|
| 1931 |
+
93,
|
| 1932 |
+
40
|
| 1933 |
+
],
|
| 1934 |
+
"page_idx": 9
|
| 1935 |
+
},
|
| 1936 |
+
{
|
| 1937 |
+
"type": "list",
|
| 1938 |
+
"sub_type": "ref_text",
|
| 1939 |
+
"list_items": [
|
| 1940 |
+
"[9] S. Zhou, X. Liu, M. Li, E. Zhu, L. Liu, C. Zhang, and J. Yin, “Multiple kernel clustering with neighbor-kernel subspace segmentation,” IEEE transactions on neural networks and learning systems, vol. 31, no. 4, pp. 1351–1362, 2019.",
|
| 1941 |
+
"[10] S. Wang, X. Liu, L. Liu, W. Tu, X. Zhu, J. Liu, S. Zhou, and E. Zhu, “Highly-efficient incomplete large-scale multi-view clustering with consensus bipartite graph,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 9776–9785.",
|
| 1942 |
+
"[11] S. Zhou, E. Zhu, X. Liu, T. Zheng, Q. Liu, J. Xia, and J. Yin, \"Subspace segmentation-based robust multiple kernel clustering,\" Information Fusion, vol. 53, pp. 145-154, 2020.",
|
| 1943 |
+
"[12] S. Wang, X. Liu, E. Zhu, C. Tang, J. Liu, J. Hu, J. Xia, and J. Yin, \"Multi-view clustering via late fusion alignment maximization,\" in IJCAI, 2019, pp. 3778-3784.",
|
| 1944 |
+
"[13] S. Zhou, X. Liu, J. Liu, X. Guo, Y. Zhao, E. Zhu, Y. Zhai, J. Yin, and W. Gao, \"Multi-view spectral clustering with optimal neighborhood laplacian matrix,\" in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, no. 04, 2020, pp. 6965-6972.",
|
| 1945 |
+
"[14] S. Wang, X. Liu, X. Zhu, P. Zhang, Y. Zhang, F. Gao, and E. Zhu, \"Fast parameter-free multi-view subspace clustering with consensus anchor guidance,\" IEEE Transactions on Image Processing, vol. 31, pp. 556-568, 2021.",
|
| 1946 |
+
"[15] D. Mahajan, R. Girshick, V. Ramanathan, K. He, M. Paluri, Y. Li, A. Bharambe, and L. Van Der Maaten, \"Exploring the limits of weakly supervised pretraining,\" in Proceedings of the European conference on computer vision (ECCV), 2018, pp. 181-196.",
|
| 1947 |
+
"[16] R. He, Z. Han, and Y. Yin, \"Towards safe and robust weakly-supervised anomaly detection under subpopulation shift,\" Knowledge-Based Systems, p. 109088, 2022.",
|
| 1948 |
+
"[17] M. Luo, X. Chang, L. Nie, Y. Yang, A. G. Hauptmann, and Q. Zheng, \"An adaptive semisupervised feature analysis for video semantic recognition,\" IEEE transactions on cybernetics, vol. 48, no. 2, pp. 648-660, 2017.",
|
| 1949 |
+
"[18] K. Chen, L. Yao, D. Zhang, X. Wang, X. Chang, and F. Nie, \"A semisupervised recurrent convolutional attention model for human activity recognition,\" IEEE transactions on neural networks and learning systems, vol. 31, no. 5, pp. 1747-1756, 2019.",
|
| 1950 |
+
"[19] E. Yu, J. Sun, J. Li, X. Chang, X.-H. Han, and A. G. Hauptmann, \"Adaptive semi-supervised feature selection for cross-modal retrieval,\" IEEE Transactions on Multimedia, vol. 21, no. 5, pp. 1276-1288, 2018.",
|
| 1951 |
+
"[20] Z. Zhang, T. W. Chow, and M. Zhao, \"Trace ratio optimization-based semi-supervised nonlinear dimensionality reduction for marginal manifold visualization,\" IEEE Transactions on Knowledge and Data Engineering, vol. 25, no. 5, pp. 1148-1161, 2012.",
|
| 1952 |
+
"[21] Z. Zhang, F. Li, L. Jia, J. Qin, L. Zhang, and S. Yan, \"Robust adaptive embedded label propagation with weight learning for inductive classification,\" IEEE transactions on neural networks and learning systems, vol. 29, no. 8, pp. 3388-3403, 2017.",
|
| 1953 |
+
"[22] H. Zhang, Z. Zhang, M. Zhao, Q. Ye, M. Zhang, and M. Wang, \"Robust triple-matrix-recovery-based auto-weighted label propagation for classification,\" IEEE Transactions on Neural Networks and Learning Systems, vol. 31, no. 11, pp. 4538-4552, 2020.",
|
| 1954 |
+
"[23] Q. Ye, J. Yang, T. Yin, and Z. Zhang, \"Can the virtual labels obtained by traditional lp approaches be well encoded in wrl?\" IEEE transactions on neural networks and learning systems, vol. 27, no. 7, pp. 1591-1598, 2015.",
|
| 1955 |
+
"[24] R. Jozefowicz, O. Vinyals, M. Schuster, N. Shazeer, and Y. Wu, “Exploring the limits of language modeling,” arXiv preprint arXiv:1602.02410, 2016.",
|
| 1956 |
+
"[25] M. Sajjadi, M. Javanmardi, and T. Tasdizen, \"Regularization with stochastic transformations and perturbations for deep semi-supervised learning,\" Advances in neural information processing systems, vol. 29, pp. 1163-1171, 2016.",
|
| 1957 |
+
"[26] S. Laine and T. Aila, “Temporal ensembling for semi-supervised learning,” arXiv preprint arXiv:1610.02242, 2016.",
|
| 1958 |
+
"[27] A. Tarvainen and H. Valpola, “Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results,” arXiv preprint arXiv:1703.01780, 2017.",
|
| 1959 |
+
"[28] D. Berthelot, N. Carlini, I. Goodfellow, N. Papernot, A. Oliver, and C. Raffel, \"Mixmatch: A holistic approach to semi-supervised learning,\" arXiv preprint arXiv:1905.02249, 2019.",
|
| 1960 |
+
"[29] K. Sohn, D. Berthelot, C.-L. Li, Z. Zhang, N. Carlini, E. D. Cubuk, A. Kurakin, H. Zhang, and C. Raffel, \"Fixmatch: Simplifying semi-supervised learning with consistency and confidence,\" arXiv preprint arXiv:2001.07685, 2020.",
|
| 1961 |
+
"[30] D. Berthelot, N. Carlini, E. D. Cubuk, A. Kurakin, K. Sohn, H. Zhang, and C. Raffel, \"Remixmatch: Semi-supervised learning with"
|
| 1962 |
+
],
|
| 1963 |
+
"bbox": [
|
| 1964 |
+
76,
|
| 1965 |
+
71,
|
| 1966 |
+
491,
|
| 1967 |
+
944
|
| 1968 |
+
],
|
| 1969 |
+
"page_idx": 10
|
| 1970 |
+
},
|
| 1971 |
+
{
|
| 1972 |
+
"type": "list",
|
| 1973 |
+
"sub_type": "ref_text",
|
| 1974 |
+
"list_items": [
|
| 1975 |
+
"distribution alignment and augmentation anchoring,\" arXiv preprint arXiv:1911.09785, 2019.",
|
| 1976 |
+
"[31] J. Li, C. Xiong, and S. C. Hoi, \"Comatch: Semi-supervised learning with contrastive graph regularization,\" in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2021, pp. 9475-9484.",
|
| 1977 |
+
"[32] H. Zhang, M. Cisse, Y. N. Dauphin, and D. Lopez-Paz, \"mixup: Beyond empirical risk minimization,\" arXiv preprint arXiv:1710.09412, 2017.",
|
| 1978 |
+
"[33] V. Verma, K. Kawaguchi, A. Lamb, J. Kannala, Y. Bengio, and D. Lopez-Paz, \"Interpolation consistency training for semi-supervised learning,\" arXiv preprint arXiv:1903.03825, 2019.",
|
| 1979 |
+
"[34] T. Chen, S. Kornblith, M. Norouzi, and G. Hinton, \"A simple framework for contrastive learning of visual representations,\" in International conference on machine learning. PMLR, 2020, pp. 1597-1607.",
|
| 1980 |
+
"[35] K. He, H. Fan, Y. Wu, S. Xie, and R. Girshick, \"Momentum contrast for unsupervised visual representation learning,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp. 9729-9738.",
|
| 1981 |
+
"[36] J. Zbontar, L. Jing, I. Misra, Y. LeCun, and S. Deny, “Barlow twins: Self-supervised learning via redundancy reduction,” arXiv preprint arXiv:2103.03230, 2021.",
|
| 1982 |
+
"[37] X. Chen and K. He, “Exploring simple siamese representation learning,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021, pp. 15750-15758.",
|
| 1983 |
+
"[38] Z. Wu, Y. Xiong, S. X. Yu, and D. Lin, \"Unsupervised feature learning via non-parametric instance discrimination,\" in Proceedings of the IEEE conference on computer vision and pattern recognition, 2018, pp. 3733-3742.",
|
| 1984 |
+
"[39] A. v. d. Oord, Y. Li, and O. Vinyals, “Representation learning with contrastive predictive coding,” arXiv preprint arXiv:1807.03748, 2018.",
|
| 1985 |
+
"[40] T. Chen, S. Kornblith, K. Swersky, M. Norouzi, and G. Hinton, \"Big self-supervised models are strong semi-supervised learners,\" arXiv preprint arXiv:2006.10029, 2020.",
|
| 1986 |
+
"[41] P. Chen, T. Ma, X. Qin, W. Xu, and S. Zhou, \"Data-efficient semi-supervised learning by reliable edge mining,\" in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp. 9192-9201.",
|
| 1987 |
+
"[42] Y. Liu, W. Tu, S. Zhou, X. Liu, L. Song, X. Yang, and E. Zhu, \"Deep graph clustering via dual correlation reduction,\" in Proc. of AAAI, 2022.",
|
| 1988 |
+
"[43] M. Sajjadi, M. Javanmardi, and T. Tasdizen, \"Regularization with stochastic transformations and perturbations for deep semi-supervised learning,\" Advances in neural information processing systems, vol. 29, pp. 1163-1171, 2016.",
|
| 1989 |
+
"[44] T. Miyato, S.-i. Maeda, M. Koyama, and S. Ishii, “Virtual adversarial training: a regularization method for supervised and semi-supervised learning,” IEEE transactions on pattern analysis and machine intelligence, vol. 41, no. 8, pp. 1979–1993, 2018.",
|
| 1990 |
+
"[45] Q. Xie, Z. Dai, E. Hovy, M.-T. Luong, and Q. V. Le, \"Unsupervised data augmentation for consistency training,\" arXiv preprint arXiv:1904.12848, 2019.",
|
| 1991 |
+
"[46] T. Lucas, C. Tallec, Y. Ollivier, and J. Verbeek, “Mixed batches and symmetric discriminators for gan training,” in International Conference on Machine Learning. PMLR, 2018, pp. 2844–2853.",
|
| 1992 |
+
"[47] D. Hendrycks, N. Mu, E. D. Cubuk, B. Zoph, J. Gilmer, and B. Lakshminarayanan, \"Augmix: A simple data processing method to improve robustness and uncertainty,\" arXiv preprint arXiv:1912.02781, 2019.",
|
| 1993 |
+
"[48] H. Guo, “Nonlinear mixup: Out-of-manifold data augmentation for text classification,” in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, no. 04, 2020, pp. 4044–4051.",
|
| 1994 |
+
"[49] H. Guo, Y. Mao, and R. Zhang, “Augmenting data with mixup for sentence classification: An empirical study,” arXiv preprint arXiv:1905.08941, 2019.",
|
| 1995 |
+
"[50] Y. Tokozume, Y. Ushiku, and T. Harada, “Between-class learning for image classification,” in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2018, pp. 5486-5494.",
|
| 1996 |
+
"[51] D. Berthelot, C. Raffel, A. Roy, and I. Goodfellow, “Understanding and improving interpolation in autoencoders via an adversarial regularizer,” arXiv preprint arXiv:1807.07543, 2018.",
|
| 1997 |
+
"[52] Y. Netzer, T. Wang, A. Coates, A. Bissacco, B. Wu, and A. Y. Ng, \"Reading digits in natural images with unsupervised feature learning,\" 2011.",
|
| 1998 |
+
"[53] A. Krizhevsky, G. Hinton et al., “Learning multiple layers of features from tiny images,” 2009.",
|
| 1999 |
+
"[54] A. Oliver, A. Odena, C. Raffel, E. D. Cubuk, and I. J. Goodfellow, \"Realistic evaluation of deep semi-supervised learning algorithms,\" arXiv preprint arXiv:1804.09170, 2018."
|
| 2000 |
+
],
|
| 2001 |
+
"bbox": [
|
| 2002 |
+
506,
|
| 2003 |
+
71,
|
| 2004 |
+
919,
|
| 2005 |
+
922
|
| 2006 |
+
],
|
| 2007 |
+
"page_idx": 10
|
| 2008 |
+
},
|
| 2009 |
+
{
|
| 2010 |
+
"type": "page_number",
|
| 2011 |
+
"text": "11",
|
| 2012 |
+
"bbox": [
|
| 2013 |
+
906,
|
| 2014 |
+
31,
|
| 2015 |
+
919,
|
| 2016 |
+
40
|
| 2017 |
+
],
|
| 2018 |
+
"page_idx": 10
|
| 2019 |
+
}
|
| 2020 |
+
]
|
2202.11xxx/2202.11915/6d305bc4-4eda-4ae4-9704-6ea8af50946e_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11915/6d305bc4-4eda-4ae4-9704-6ea8af50946e_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4a301ab95dbf0ad107258ee2843a873fd5f5a14d02eecc01be513c80f5128690
|
| 3 |
+
size 2162399
|
2202.11xxx/2202.11915/full.md
ADDED
|
@@ -0,0 +1,431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Interpolation-based Contrastive Learning for Few-Label Semi-Supervised Learning
|
| 2 |
+
|
| 3 |
+
Xihong Yang, Xiaochang Hu, Sihang Zhou, Xinwang Liu, En Zhu
|
| 4 |
+
|
| 5 |
+
Abstract—Semi-supervised learning (SSL) has long been proved to be an effective technique to construct powerful models with limited labels. In the existing literature, consistency regularization-based methods, which force the perturbed samples to have similar predictions with the original ones have attracted much attention for their promising accuracy. However, we observe that, the performance of such methods decreases drastically when the labels get extremely limited, e.g., 2 or 3 labels for each category. Our empirical study finds that the main problem lies with the drift of semantic information in the procedure of data augmentation. The problem can be alleviated when enough supervision is provided. However, when little guidance is available, the incorrect regularization would mislead the network and undermine the performance of the algorithm. To tackle the problem, we (1) propose an interpolation-based method to construct more reliable positive sample pairs; (2) design a novel contrastive loss to guide the embedding of the learned network to change linearly between samples so as to improve the discriminative capability of the network by enlarging the margin decision boundaries. Since no destructive regularization is introduced, the performance of our proposed algorithm is largely improved. Specifically, the proposed algorithm outperforms the second best algorithm (Comatch) with $5.3\%$ by achieving $88.73\%$ classification accuracy when only two labels are available for each class on the CIFAR-10 dataset. Moreover, we further prove the generality of the proposed method by improving the performance of the existing state-of-the-art algorithms considerably with our proposed strategy.
|
| 6 |
+
|
| 7 |
+
Index Terms—Semi-supervised learning, contrastive learning, interpolation-based method, few-label.
|
| 8 |
+
|
| 9 |
+
# I. INTRODUCTION
|
| 10 |
+
|
| 11 |
+
In recent years, machine learning has developed rapidly and achieved remarkable performance in many fields like, image classification [1], [2], object detection [3], [4], semantic segmentation [5], [6], and clustering [7]–[14]. Convolutional neural networks (CNNs) have attracted the attention of many researchers. The success of most of these deep neural networks depends heavily on a large number of high-quality labeled datasets [2], [15], [16].
|
| 12 |
+
|
| 13 |
+
However, collecting labeled data can consume a lot of resources which is un-affordable to countless everyday learning demands in modern society. Therefore, deep learning algorithms which can achieve appropriate performance with tractable supervision have been a hot research spot in recent years. Specifically, deep semi-supervised learning (SSL) algorithms, which seek to improve the performance of deep learning models on datasets with only limited labeled data by leveraging large amounts of unlabeled data, are an important branch in this family. This has led to a plethora of SSL methods designed for various fields [17]–[23].
|
| 14 |
+
|
| 15 |
+

|
| 16 |
+
Figure 1: Illustration of the positive sample pair construction process. Different from the existing works which construct positive sample pairs with data augmentation, we construct positive sample pairs with interpolation operations. Specifically, given two unlabeled images, the integration of the sample embeddings '1' and the embedding of the sample integration '2' are acquired as a positive sample pair.
|
| 17 |
+
|
| 18 |
+
Among all the deep semi-supervised learning algorithms, consistency regularization based methods treat the original input and its augmented version as positive pairs, which is a form of contrastive learning [24]–[31]. These consistency regularization-based methods follow a common assumption that ever after data augmentation, the classifier could output the same class probability for an unlabeled sample, which means data augmentation will not change the semantic. The input image should be more similar to its augmented version than other images. Under this assumption, researchers perturb the input samples by conducting data augmentation to generate similar samples of the original data.
|
| 19 |
+
|
| 20 |
+
The mentioned algorithms have contributed remarkable performance improvement to improve the learning accuracy when only a few labeled data are available. However, we observe that, when the number of labeled data gets extremely small, e.g., 2 to 3 labels for each category, the performance of the existing algorithms would drop drastically. For example, to the CIFAR-10 dataset whose scale for training samples is 50,000 and 10 categories, the performance of the state-of-the-art algorithm MixMatch [28] can achieve the top-1 accuracy of $86.47\%$ when 250 labeled data is available. Nevertheless, the performance of the same algorithm drops to $50.10\%$ when only 30 labeled samples are available. The similar phenomenon happens to the Mean-Teacher [27] algorithm whose performance drop by more than a half when the label number decreases from 250 to 30. More experimental results can be found in Table I.
|
| 21 |
+
|
| 22 |
+
According to our analysis, one of the main reasons that
|
| 23 |
+
|
| 24 |
+
Table I: Classification accuracy of two state-of-the-art semi-supervised algorithms, i.e., MixMatch [28] and Mean-Teacher [27], on CIFAR-10 dataset with 30, 40, 250, 500 and 1000 labels.
|
| 25 |
+
|
| 26 |
+
<table><tr><td colspan="2">Method</td><td>30</td><td>40</td><td>250</td><td>500</td><td>1000</td></tr><tr><td>MixMatch</td><td>[28]</td><td>50.10</td><td>59.08</td><td>86.47</td><td>89.33</td><td>90.79</td></tr><tr><td>Mean-Teacher</td><td>[27]</td><td>24.51</td><td>24.93</td><td>52.49</td><td>70.15</td><td>80.12</td></tr></table>
|
| 27 |
+
|
| 28 |
+

|
| 29 |
+
Figure 2: Representative examples of semantic information drift caused by inappropriate data augmentation of MINIST samples.
|
| 30 |
+
|
| 31 |
+
cause large performance decrease lies with the semantic information drift during data augmentation. Taking the samples in the MINIST dataset for example, when the vertical flip is applied to the samples, the labels of "6"s and "9"s, "2"s and "5"s can easily get changed. This would challenge the rationality of the information consistency assumption of existing methods. This problem could be alleviated when relatively abundant label information is available. However, when the label information is extremely lacked, the performance of the corresponding algorithms could decrease a lot.
|
| 32 |
+
|
| 33 |
+
In this paper, to solve the problem of semantic information drift caused by data augmentation-based positive sample pair construction, we propose a novel interpolation-based positive sample pair construction fashion. Generally, our design roots from the observation that the margin of decision boundaries would get larger if the prediction of the network could change linearly [32], [33]. Under the circumstance of semi-supervised learning, when the label is extremely limited, we seek to improve the discriminative capability of the network by forcing the embedding of the network to change linearly. Specifically, given two unlabeled images, on the one hand, we embed the samples separately into the latent space. On the other hand, we conduct image-level interpolation for an integrated image and do the embedding with the same network. Then, by combining the embedding of the interpolated images with the interpolation of the embeddings, we construct a positive sample pair. In our setting, the negative sample pairs are the embedding pair of different samples. By forcing the positive sample pairs to be close to each other in the latent space and the negative sample pairs to get far away from each other, we enlarge the margin of decision boundaries, thus improving the performance of the algorithm. To achieve the goal, we further
|
| 34 |
+
|
| 35 |
+

|
| 36 |
+
Figure 3: Illustration of classification results with different data augmentations on the MINIST dataset.
|
| 37 |
+
|
| 38 |
+
propose a novel contrastive learning-based loss function to guide the network for better learning. We name the resultant algorithm Interpolation Contrastive Learning Semi-Supervised Learning (ICL-SSL).
|
| 39 |
+
|
| 40 |
+
The main contributions of this paper are listed as follows:
|
| 41 |
+
|
| 42 |
+
- We find that semantic information drift is one of the main problems that cause the performance of existing consistency regularization-based semi-supervised algorithms to decrease drastically when extremely limited labeled data is provided.
|
| 43 |
+
- We propose an interpolation-based positive sample construction method and a novel contrastive loss function to solve the problem and improve the learning accuracy.
|
| 44 |
+
- Our experimental results on the benchmark datasets verify the superior performance of the proposed algorithms against the state-of-the-art algorithms. We also show the generality of our proposed algorithm by enhancing the performance of the existing advanced algorithms steadily with our method.
|
| 45 |
+
|
| 46 |
+
# II. RELATED WORK
|
| 47 |
+
|
| 48 |
+
In this section, we first define the main notations and then review several semi-supervised learning (SSL) methods related to our method ICL-SSL.
|
| 49 |
+
|
| 50 |
+
# A. Notations Definition
|
| 51 |
+
|
| 52 |
+
Given a dataset $\mathcal{D} = \mathcal{X} \cup \mathcal{U}$ , where $\mathcal{X} = \{(x_1, y_1), \dots, (x_m, y_m)\}$ is an labeled sub-dataset, $\mathcal{U} = u_{m+1}, \dots, u_{m+n}$ is a unlabeled sub-dataset, $n \gg m$ and $y_m$ is encoded by one-hot, we define a classification model as $p(y|x; \theta)$ , which outputs a distribution over class labels $y$ for an input $x$ with parameters $\theta$ . For the model $p(y|x; \theta)$ , it is concatenated by a encoder network $f(\cdot)$ and a classification head $h(\cdot)$ before softmax function. Meanwhile, after the encoder network $f(\cdot)$ , we set a projection head $g(\cdot)$ , outputting the normalized low-dimensional representation $z = g(f(\cdot))$ . To simplify, $F(\cdot)$ is defined as $g(f(\cdot))$ . For more detailed definitions, please refer to Table II.
|
| 53 |
+
|
| 54 |
+
Table II: Notation summary
|
| 55 |
+
|
| 56 |
+
<table><tr><td>Notations</td><td>Meaning</td></tr><tr><td>b</td><td>Bathsize</td></tr><tr><td>C</td><td>The numer of classes</td></tr><tr><td>X^B = (x_i, y_i)</td><td>Labeled sub-dataset with b</td></tr><tr><td>U^B = {u_1, ..., u_b}</td><td>Unlabeled sub-dataset matrix with b</td></tr><tr><td>x, u ∈ RC × RH × RW</td><td>Input samples</td></tr><tr><td>y ∈ {0,1}^C</td><td>Label with C classes encoded by one-hot</td></tr><tr><td>q ∈ RC</td><td>Predicted category probability distribution</td></tr><tr><td>f(·)</td><td>The encoder network</td></tr><tr><td>h(·)</td><td>The classifier</td></tr><tr><td>g(·)</td><td>The projector head</td></tr><tr><td>z ∈ RD = g(f(x)) = F(x)</td><td>Normalized low-dimensional representation</td></tr></table>
|
| 57 |
+
|
| 58 |
+
# B. Contrastive Learning
|
| 59 |
+
|
| 60 |
+
Thanks to leveraging unlabeled data for model training, contrastive learning attracts much attention of some researchers and becomes a hot spot recently [34]–[37]. It is a widely adopted form of self-supervised learning [34], [38]–[42], which can be used to optimize the task of instance discrimination. Instead of training a classification, contrastive learning is to maximize the similarities of positive pairs and minimize the similarities of negative pairs. It is important to learn the invariance with different views generated by data augmentations. The contrastive learning loss on unlabeled data can be described as follows:
|
| 61 |
+
|
| 62 |
+
$$
|
| 63 |
+
- \log \frac {\exp (F (D A (x _ {i})) \cdot F (D A (x _ {i})) / T)}{\sum_ {j = 1} ^ {N} \exp ((F (D A (x _ {i})) \cdot F (D A (x _ {j})) / T)}, \tag {1}
|
| 64 |
+
$$
|
| 65 |
+
|
| 66 |
+
where $T$ is a temperature parameter [31]. $DA(\cdot)$ denotes the stochastic data augmentation function. $F(\cdot)$ is the simplified presentation of the encoder network $f(\cdot)$ and the project head $g(\cdot)$ . In recent methods, through designing a memory bank, MoCo [35] maintains the consistency of the negative sample pairs. SimCLR [34] calculates the pairwise similarity between two similar samples from the images in the same batch, which pushes the negative samples away while pulling the positive samples. Consistency regularization can be interpreted as a special form of contrastive learning, in which only positive samples are included.
|
| 67 |
+
|
| 68 |
+
# C. Consistency Regularization
|
| 69 |
+
|
| 70 |
+
Consistency regularization utilizes the assumption that the classifier should output the same prediction for the unlabeled data even after it is augmented. Data augmentation is a frequent regularization technique in semi-supervised learning. Through various data augmentation methods, consistency regularization generates a copy of the sample regarded as a similar sample to the original data. In the simplest form, prior work [43] adds the following consistency regularization loss on unlabeled samples:
|
| 71 |
+
|
| 72 |
+
$$
|
| 73 |
+
\left. \left| \left| p (y | D A (x); \theta) - p (y | D A (x); \theta) \right| \right| _ {2} ^ {2}, \right. \tag {2}
|
| 74 |
+
$$
|
| 75 |
+
|
| 76 |
+
where $DA(\cdot)$ is a stochastic data augmentation. With the use of an exponential moving average (EMA) model, Mean-Teacher [27] replaces one of the terms in Eq.2, which provides a more stable target. To maximally alter the output class distribution,
|
| 77 |
+
|
| 78 |
+
Virtual Adversarial Training (VAT) [44] uses an adversarial transformation in place of $DA(\cdot)$ . More recently, a form of consistency regularization is utilized in Mixmatch [28] by using random horizontal flips and crops for the input samples. Unsupervised data augmentation (UDA) [45], ReMixMatch [30] and FixMatch [29] have been proposed with the use of weak and strong data augmentations. Generally speaking, through a weakly-augmented unlabeled sample, they generate a pseudo label and enforce consistency against the strongly-augmented version of the same input. The above consistency regularization models are based on data augmentation to generate positive samples. Although promising performance has been achieved, we observe that the discriminative capability of previews methods is limited since they would suffer from the semantic information drift issue. Therefore, the constructed samples are no longer similar. Instead of carefully designing data augmentations to utilize consistency regularization, we use an interpolation-based method to obtain positive pairs, which will avoid the semantic information drift caused by data augmentations.
|
| 79 |
+
|
| 80 |
+
# D. The Interpolation-based Method
|
| 81 |
+
|
| 82 |
+
Mixup [32] is an effective data augmentation strategy for image classification in computer vision [46]-[49]. It linearly interpolates the input samples and their labels on the input data and label spaces.
|
| 83 |
+
|
| 84 |
+
$$
|
| 85 |
+
\begin{array}{l} \lambda \sim B e t a (\alpha , \beta), \\ \lambda^ {\prime} = \max (\lambda , 1 - \lambda), \\ x ^ {\prime} = \lambda^ {\prime} x _ {1} + (1 - \lambda^ {\prime}) x _ {2}, \\ y ^ {\prime} = \lambda^ {\prime} y _ {1} + (1 - \lambda^ {\prime}) y _ {2}, \\ \end{array}
|
| 86 |
+
$$
|
| 87 |
+
|
| 88 |
+
where the $\alpha$ and $\beta$ are the parameter of Beta distribution, $\lambda \in [0,1]$ . The interpolations of input samples should lead to interpolations of the associated labels. In this manner, Mixup could extend the training distribution. It is recently achieved state-of-the-art performance through different tasks and network architectures. In [50], the interpolations are performed in the input space. In order to improve model performance, [51] is proposed to measure the realism of latent space interpolations in unsupervised learning. [33] performs the interpolation between input and pseudo-labels. Although the above methods are verified to be effective, they will still change the construction method of consistency regularized positive sample pairs. Therefore, how to solve the semantic information drift in consistency regularization is an open question. Different from the above approaches, we propose an interpolation-based method term ICL-SSL to construct positive sample pairs. Without using data augmentation to construct positive sample pairs, ICL-SSL is performed between the input samples and the representations, thus avoiding semantic information drift.
|
| 89 |
+
|
| 90 |
+
# III. METHOD
|
| 91 |
+
|
| 92 |
+
In this section, we introduce our proposed semi-supervised learning method. Firstly, we will explore the reason for the performance degradation under few labels via some experiments
|
| 93 |
+
|
| 94 |
+

|
| 95 |
+
Figure 4: Illustration of Interpolation-based Contrastive Learning Semi-Supervised Learning (ICL-SSL) mechanism. Following the definition in [29], Augw denotes the weak augmentation of the original input image. Augs denotes strong augmentation. Specifically, given two unlabeled images $u_{i}$ and $u_{j}$ , we firstly embed the samples separately into the latent space. Then, we conduct image-level interpolation for an integrated image and do the embedding with the same network. $z_{mix}$ and $mix_{z}$ are the positive embeddings pair constructed by ICL-SSL. By combining these two positive embeddings, we design a novel contrastive loss $L_{c}$ and force these two embeddings to change linearly between samples, improving the discriminative capability of the network. Therefore, our network would be guided to learn more discriminative embeddings with the interpolation-based method.
|
| 96 |
+
|
| 97 |
+
on MINIST dataset. Through the exploratory experiment, we analyze that the semantic information of the input samples will be drifted after some inappropriate data augmentations, thus limiting the performance. After that, to address this issue, we introduce an interpolation-based method ICL-SSL under few labels to construct more reliable positive sample pairs. Finally, we will detail the designed contrastive loss of ICL-SSL.
|
| 98 |
+
|
| 99 |
+
# A. Semantic information drift
|
| 100 |
+
|
| 101 |
+
Although promising performances have been achieved by the existing algorithms, we observe that when the number of labeled data gets extremely small, e.g. 2 to 3 labels for each category, the performance of the existing algorithms would decrease drastically. The detailed observation is shown in Table. I. Therefore, we conduct experiments to explore the reason to cause the performance dropping.
|
| 102 |
+
|
| 103 |
+
Consistency regularization is an essential piece for many state-of-the-art semi-supervised learning methods [28]–[31]. A common assumption of consistency regularization is that the classifier should output the same class probability of an unlabeled sample even if it is augmented.
|
| 104 |
+
|
| 105 |
+
In several SSL methods [28]–[30], when training data is not enough for generalization, data augmentation is a technique to apply consistency regularization. MixMatch [28] processes the input samples through random horizontal and random crops. The weak data augmentation method uses horizontal flips and vertical flips to process unlabeled samples in FixMatch [29].
|
| 106 |
+
|
| 107 |
+
Through experiments shown in Fig. 2, we find that some data augmentations will change the semantic information about the input samples, leading to a decrease in the semantic similarity of the constructed samples damaging the SSL training. We visualize the result of data augmentation. It can be found that the semantic information of the input samples has been changed. Fig. 2 shows that under one data augmentation (random vertical flip), the semantic information of "7"s and "2"s, "6"s and "9"s, "2"s and "5"s can easily get changed. As a result, the quality of the constructed positive samples decreases or the construction fails, which in turn affects the performance of the model. To further verify the effect of data augmentation, we implement experiments on the MINIST dataset.
|
| 108 |
+
|
| 109 |
+
As shown in Fig. 5(b), MINIST is a dataset composed of handwritten numbers, which is commonly used in deep
|
| 110 |
+
|
| 111 |
+
Algorithm 1 Interpolation-based Contrastive Learning Semi-Supervised Learning(ICL-SSL)
|
| 112 |
+
Input: Labeled data $X = (x_{1},y_{1}),(x_{2},y_{2}),\dots,(x_{b},y_{b})$ unlabeled data $U = (u_{1},u_{2},\dots,u_{b})$ , Beta distribution parameter $\alpha$ for feature interpolation, Batch size b, Epoch number e
|
| 113 |
+
1: while $e < \mathrm{Epoch}$ do
|
| 114 |
+
2: for $i = 1$ to $b$ do
|
| 115 |
+
3: $y_{b} = p(y|x)$
|
| 116 |
+
4: $q_{i} = p(y|u_{i})$
|
| 117 |
+
5: $z_{i} = g(f(u_{i}))$
|
| 118 |
+
6: end for
|
| 119 |
+
7: for $i\in 1,\ldots ,$ b and $\mathrm{j}\in 1,\dots ,$ b do
|
| 120 |
+
8: $\lambda = \operatorname {Beta}(\alpha ,\alpha)$
|
| 121 |
+
9: $u_{mix} = \lambda *u_i + (1 - \lambda)*u_j$
|
| 122 |
+
10: $z_{mix} = g(f(u_{mix}))$
|
| 123 |
+
11: $m i x_{z} = \lambda *z_{i} + (1 - \lambda)*z_{j}$
|
| 124 |
+
12: end for
|
| 125 |
+
13: end while
|
| 126 |
+
14: Calculate classification loss via Eq.8, 10 and 11
|
| 127 |
+
|
| 128 |
+
learning research. MINIST consists of 60000 training data and 10000 test data. Aiming to reduce the influence of irrelevant factors (e.g. complex structure of training model) to the performance, we explore the semantic information drift problem caused by data augmentation with two-layer MLPs.
|
| 129 |
+
|
| 130 |
+
From the empirical analysis, we observe that the accuracy is decreased by $5.0\%$ after the random horizontal flip argumentation on MINIST. As a consequence, after random vertical flips, the accuracy decreases by $4.0\%$ shown in Fig. 3. Additionally, we also explore rotation, random re-cropping and random cropping, the result shows that those data augmentations will also limit the performance of the model.
|
| 131 |
+
|
| 132 |
+
The experiment on MINIST can illustrate that during SSL training, some inappropriate data augmentations will change the semantic information of the input samples. Therefore, the semantic correlation of positive sample pairs will be destroyed by inappropriate data augmentations. When the label information is lacking, the incorrect regularization caused by data augmentation would mislead the network and limit the algorithm performance.
|
| 133 |
+
|
| 134 |
+
# B. ICL-SSL
|
| 135 |
+
|
| 136 |
+
To solve the semantic information drift problem, we proposed a novel interpolation contrastive learning Semi-supervised learning method termed ICL-SSL. Specifically, ICL-SSL does not change the semantic information during the positive pair construction of consistency regularization. In the following, we first obtain the low-dimensional representation $z$ of the unlabeled sample. Then, we describe the interpolation-based positive sample pairs construction method and loss function in detail.
|
| 137 |
+
|
| 138 |
+
In our ICL-SSL method, the representations are extracted by encoder network $f(\cdot)$ . Concretely, for any two unlabeled samples $u_{i},u_{j}$ in a batch of unlabeled sub-dataset $\mathcal{U}^B$ , we could obtain their normalized representations $z_{i},z_{j}$ with $\ell^2$ -
|
| 139 |
+
|
| 140 |
+
norm:
|
| 141 |
+
|
| 142 |
+
$$
|
| 143 |
+
z _ {i} = F \left(u _ {i}\right), z _ {i} = \frac {z _ {i}}{\left\| z _ {i} \right\| _ {2}}, \tag {4}
|
| 144 |
+
$$
|
| 145 |
+
|
| 146 |
+
$$
|
| 147 |
+
z _ {j} = F (u _ {j}), z _ {j} = \frac {z _ {j}}{| | z _ {j} | | _ {2}},
|
| 148 |
+
$$
|
| 149 |
+
|
| 150 |
+
where $F(\cdot)$ is defined as $g(f(\cdot))$ , a simple form of encoder network $f(\cdot)$ and project head $g(\cdot)$ .
|
| 151 |
+
|
| 152 |
+
After that, we perform interpolation operations on the normalized low-dimensional feature representations $z_{i}$ and $z_{j}$ .
|
| 153 |
+
|
| 154 |
+
$$
|
| 155 |
+
\begin{array}{l} m i x _ {z} = \lambda z _ {i} + (1 - \lambda) z _ {j} \tag {5} \\ = \lambda F (u _ {i}) + (1 - \lambda) F (u _ {j}), \\ \end{array}
|
| 156 |
+
$$
|
| 157 |
+
|
| 158 |
+
where $mix_{z}$ denotes the interpolated representation of $z_{i}$ and $z_{j}$ , $\lambda$ is generated by Beta distribution. Simultaneously, unlike the above steps, we first perform an interpolation operation in the sample space $(u_{i}, u_{j})$ and then get the normalized low-dimensional feature:
|
| 159 |
+
|
| 160 |
+
$$
|
| 161 |
+
z _ {m i x} = F (\lambda u _ {i} + (1 - \lambda) u _ {j}), \tag {6}
|
| 162 |
+
$$
|
| 163 |
+
|
| 164 |
+
where $z_{\text{mix}}$ is the representation of interpolated input data $u_i$ and $u_j$ . The constructed positive sample pair can be presented as follows:
|
| 165 |
+
|
| 166 |
+
$$
|
| 167 |
+
[ m i x _ {z}, z _ {m i x} ]. \tag {7}
|
| 168 |
+
$$
|
| 169 |
+
|
| 170 |
+
The framework of our proposed ICL-SSL is shown in Fig. 4. ICL-SSL is a semantic-agnostic positive sample construction method. Specifically, we generate one positive sample from the features $z_{\text{mix}}$ obtained by interpolating two inputs, and the other $\text{mix}_z$ from interpolating the two features of the input. By this setting, both of these positive samples contain the original semantic information of each input $(u_i, u_j)$ . It has demonstrated that the interpolation operation has the effect to push the decision boundaries away from the class boundaries in [32], [33]. In this manner, with the utilization of our ICL-SSL, the margin decision boundaries would get larger, thus improving the discriminative capability of the network under few labels.
|
| 171 |
+
|
| 172 |
+
# C. Loss function
|
| 173 |
+
|
| 174 |
+
The loss of ICL-SSL mainly consists of three parts: the supervised classification loss $L_{x}$ , the unsupervised classification loss $L_{u}$ and the contrastive loss $L_{c}$ .
|
| 175 |
+
|
| 176 |
+
In detail, $L_{x}$ is the supervised classification loss on the labeled data, which is defined as the cross-entropy between the ground-truth labels and the model's predictions:
|
| 177 |
+
|
| 178 |
+
$$
|
| 179 |
+
L _ {x} = \frac {1}{B} \sum_ {b = 1} ^ {B} H \left(y _ {b}, p \left(y \mid x _ {b}\right)\right), \tag {8}
|
| 180 |
+
$$
|
| 181 |
+
|
| 182 |
+
where $x_{b}$ denotes the labeled data in $\mathcal{X}^B$ . $p(y|x_b)$ is the output of the classifier. $H$ is the cross-entropy between the two distributions $y_{b}$ and $p(y|x_b)$ .
|
| 183 |
+
|
| 184 |
+

|
| 185 |
+
(a) CIFAR-10
|
| 186 |
+
|
| 187 |
+

|
| 188 |
+
(b) MINIST
|
| 189 |
+
|
| 190 |
+

|
| 191 |
+
(c) SVHN
|
| 192 |
+
Figure 5: Illustration of the CIFAR-10, MINIST, SVHN, CIFAR-100 dataset.
|
| 193 |
+
|
| 194 |
+

|
| 195 |
+
(d) CIFAR-100
|
| 196 |
+
|
| 197 |
+
For the unlabeled data, its pseudo label $\hat{q}_b$ is generated by the classification head $h(\cdot)$ and the Softmax function. The formula can be described as:
|
| 198 |
+
|
| 199 |
+
$$
|
| 200 |
+
\hat {q} _ {b} = \operatorname {S o f t m a x} (h (f (u _ {b}))). \tag {9}
|
| 201 |
+
$$
|
| 202 |
+
|
| 203 |
+
$L_{u}$ is defined as the cross-entropy between the pseudo-labels and the model's predictions. It can be calculated by:
|
| 204 |
+
|
| 205 |
+
$$
|
| 206 |
+
L _ {u} = \frac {1}{\mu B} \sum_ {b = 1} ^ {\mu B} \ell \left(\max \left(\hat {q} _ {b}\right) \geq \tau\right) H \left(\hat {q} _ {b}, p (y \mid u _ {b})\right), \tag {10}
|
| 207 |
+
$$
|
| 208 |
+
|
| 209 |
+
where $\hat{q}_b$ is the predicted probability of pseudo labels. $\ell$ is the function to calculate the loss. When the largest class probability is above the threshold $\tau$ , the loss will be calculated. Meanwhile, $\mu$ is used to count the number of valid unlabeled samples. $H$ is the cross-entropy between $q_{b}$ and $p(y|u_b)$ .
|
| 210 |
+
|
| 211 |
+
Through the positive sample pairs $[mix_z, z_{mix}]$ constructed by interpolation strategy, the contrastive loss can be computed as:
|
| 212 |
+
|
| 213 |
+
$$
|
| 214 |
+
L _ {c} = - \log \frac {\exp \left(\left(m i x _ {z} \cdot z _ {m i x}\right) / T\right)}{\sum_ {k = 1} ^ {B} I _ {\lfloor k \neq i \rfloor} \exp \left(\left(z _ {i} \cdot z _ {j}\right) / T\right)}, \tag {11}
|
| 215 |
+
$$
|
| 216 |
+
|
| 217 |
+
where $T$ is a temperature parameter. Similar to SimCLR [34], we do not sample negative samples explicitly. Instead, we treat the other examples within a minibatch as negative samples. $I_{[k\neq i]\in 0,1}$ is an indicator function. When $k = i$ the value of $I$ is set to 1. The similarity between positive is measured by the inner product. This loss is calculated across all positive samples in a batch. The contrastive loss encourages the model to produce similar representations for positive samples and pushes the negative samples away. The relation of the embedding changes linearly due to the proposed positive sample pairs constructed method. By minimizing Eq.11, the margin decision boundaries will be enlarged, thus improving the discriminative of the network.
|
| 218 |
+
|
| 219 |
+
In summary, the loss function of ICL-SSL can be computed by:
|
| 220 |
+
|
| 221 |
+
$$
|
| 222 |
+
L = L _ {x} + L _ {u} + \alpha L _ {c}, \tag {12}
|
| 223 |
+
$$
|
| 224 |
+
|
| 225 |
+
where $L_{x}$ represents the supervised loss and $L_{u}$ is the unsupervised loss. $\alpha$ is a trade-off hyper-parameter to control
|
| 226 |
+
|
| 227 |
+
Table III: Dataset summary
|
| 228 |
+
|
| 229 |
+
<table><tr><td>Dataset</td><td>Size</td><td>Train Set</td><td>Test Set</td><td>Class</td><td>Type</td></tr><tr><td>SVHN</td><td>32 × 32</td><td>73257</td><td>26032</td><td>10</td><td>image</td></tr><tr><td>MINIST</td><td>28× 28</td><td>60000</td><td>10000</td><td>10</td><td>image</td></tr><tr><td>CIFAR-10</td><td>32 × 32</td><td>50000</td><td>10000</td><td>10</td><td>image</td></tr><tr><td>CIFAR-100</td><td>32 × 32</td><td>50000</td><td>10000</td><td>100</td><td>image</td></tr></table>
|
| 230 |
+
|
| 231 |
+
the weight of the total loss. The detailed learning procedure of ICL-SSL is shown in Algorithm 1.
|
| 232 |
+
|
| 233 |
+
# IV. EXPERIMENT
|
| 234 |
+
|
| 235 |
+
We evaluate the effectiveness of ICL-SSL on several semi-supervised learning benchmarks. We focus on the most challenging label-scare scenario where few labels are available, e.g., 2 or 3 labels for each category. At the same time, our ablation study teases apart the contribution of ICL-SSL components. In addition, we further verify the generality of the proposed method by improving the performance of the existing state-of-the-art algorithms considerably with our proposed strategy.
|
| 236 |
+
|
| 237 |
+
# A. Implementation details
|
| 238 |
+
|
| 239 |
+
1) Datasets & Metric: The proposed algorithms are experimentally evaluated on SVHN [52], CIFAR-10 [53] and CIFAR-100 [53] datasets.
|
| 240 |
+
|
| 241 |
+
- The CIFAR-10 dataset consists of 60000 images of size $32 \times 32$ . The training set of CIFAR-10 consists of 50000 images and the test set consists of 10000 images. The dataset includes ten classes, including images of natural objects such as horse, deer, fork, car and aircraft.
|
| 242 |
+
- The CIFAR-100 dataset is similar to the CIFAR-10 dataset and contains 60000 images of the size $32 \times 32$ . The 100 classes in the CIFAR-100 are grouped into 20 superclasses. Each class consists of 500 training images and 100 testing images.
|
| 243 |
+
|
| 244 |
+
- The SVHN dataset includes 73257 training data and 26032 test data of size $32 \times 32$ . Besides, each example is a close-up image of house numbers from 0 to 9.
|
| 245 |
+
|
| 246 |
+
Detailed dataset statistics are summarized in Table III. We use the accuracy metric to evaluate the classification performance.
|
| 247 |
+
|
| 248 |
+
2) Experiment Settings: All experiments are implemented with an NVIDIA 1080Ti GPU on PyTorch platform. Following SSL evaluation methods, we evaluate our method on standard SSL benchmarks with the "Wide-ResNet-28" model from [54]. Compared with other methods, our model focuses on the challenging label-scare scenario e.g., 2 or 3 labels for each category. For CIFAR-10 and SVHN datasets, we train them for 300 epochs until convergence, the batch size chosen by us is 64. Due to the limited computing resources, the batch size of the all comparison experiments on CIFAR-100 dataset is set to 16. The weight parameter $\alpha$ to control loss is set to 0.5, and the parameter $\mu$ of the batch size for the control of unlabeled data is set to 1. The learning rate is set to 0.03 for CIFAR-10, CIFAR-100 and SVHN. The threshold $\tau$ is set to 0.95. Besides, our network is trained using SGD optimizer. For our proposed method, we adopt the source data of CoMatch [31]. To alleviate the impact of randomness, we evaluate the models on 5 runs for each number of labeled points with different random seeds.
|
| 249 |
+
|
| 250 |
+
In Sub-Section "Transfer to other models", the algorithms are implemented with an NVIDIA 1080Ti GPU on PyTorch platform with 40, 250, 500, and 1000 labels on CIFAR-10 dataset. Three state-of-the-art algorithms are compared in our transferring experiments, including MixMatch [28], Mean-Teacher [27] and VAT [44]. For those algorithms, we reproduce results by adopting their source code with the original settings. The code for the compared algorithms can be downloaded from the authors' website: MixMatch<sup>1</sup>, MeanTeacher<sup>2</sup>, VAT<sup>3</sup>. Specifically, the training epoch is set as 300. The learning rate of the optimizer is set as 0.002 for MixMatch, 0.003 for Mean-Teacher, and 0.01 for VAT.
|
| 251 |
+
|
| 252 |
+
# B. Comparison with the State-of-the-Art Algorithms
|
| 253 |
+
|
| 254 |
+
In this section, six state-of-the-art semi-supervised algorithms are compared to verify the effectiveness of ICL-SSL. The information for the compared algorithms is listed as follows:
|
| 255 |
+
|
| 256 |
+
(1) CoMatch [31]: The class probabilities and low-dimensional embeddings are jointly learned in CoMatch. Through imposing a smoothness constraint to the class probabilities, the quality of pseudo labels could be improved. Overall, CoMatch combines the pseudo-based model, the contrast-loss-based model and the graph-based model to improve the model performance in the case of few labels.
|
| 257 |
+
(2) FixMatch [29]: For the labeled image FixMatch utilize weak-augmentation to generate the pseudo label. Additionally, for the unlabeled image, the pseudo label is obtained by the high-confidence prediction. And then, the network is trained to
|
| 258 |
+
|
| 259 |
+
predict the pseudo label with the strongly augmented version of the same image.
|
| 260 |
+
|
| 261 |
+
(3) MixMatch [28]: MixMatch jointly optimizes two losses: the supervised loss and unsupervised loss. In detail, cross-entropy is chosen for the supervised loss. The unsupervised loss is the mean square error (MSE) between predictions and generated pseudo labels. MixMatch constructs pseudo labels by data augmentation. With the use of the sharpen function Sharpen(\cdot), MixMatch could improve the quality of pseudo labels. In addition, Mixup is added in the training process, which can construct virtual samples through interpolation.
|
| 262 |
+
(4) Virtual Adversarial Training(VAT) [44]: VAT is based on data perturbation. It replaces data augmentation with adversarial transformations. The adversarial transformation can lead to a lower classification error.
|
| 263 |
+
(5) $\pi$ -model [26]: For the same image, data augmentation is used to apply consistency regularization. The loss of $\pi$ -model contains the supervised loss and the unsupervised loss. Specifically, the supervised loss is defined as the cross-entropy loss, and the unsupervised loss is the unsupervised consistency loss.
|
| 264 |
+
(6) Mean-Teacher [27]: Mean-Teacher is a student-teacherapproach for SSL. The teacher model is based on the average weights of a student model in each update step. In MeanTeacher, the mean square error loss (MSE) is used as its consistency loss between two predictions. Besides, it uses the exponential moving average (EMA) to update, because the EMA is only updated once per epoch, which can control the model update speed.
|
| 265 |
+
|
| 266 |
+
# C. Performance Comparison
|
| 267 |
+
|
| 268 |
+
1) CIFAR-10: To demonstrate the superiority of ICL-SSL, we conduct performance comparison experiments for our proposed ICL-SSL and 4 baselines, including Mean-Teacher [27], MixMatch [28], FixMath [29] and CoMatch [31]. For CIFAR-10 dataset, we evaluate the accuracy of above methods with a varying number of labeled data from 20 to 40. The results are reported in Table. IV. For fairness, we create 5 runs for each number of labeled points with different random seeds to alleviate the influence of randomness. We can observe that our method ICL-SSL outperforms all other methods by a significant margin, taking the result on only 2 labeled data in each class for example, ICL-SSL could reach an accuracy of $88.73\%$ . For comparison, at 20 labels the second best algorithm (CoMatch [31]) achieves an accuracy $83.43\%$ , which is $5.30\%$ lower than ICL-SSL. ICL-SSL can achieve higher accuracy by using fewer labels.
|
| 269 |
+
2) SVHN: Moreover, we implement comparison experiments on SVHN dataset. The comparison algorithms contains $\pi$ model [26], Mean-Teacher [27], MixMatch [28], FixMatch [29]. The quantity of labels is 250 to 1000. The results can be seen in TableV. With different random seeds, we evaluate the models on 5 runs for each number of labeled data. We could observe that ICL-SSL outperforms all compared methods SVHN with 250, 500, and 1000 labeled data. For example, ICL-SSL exceeds MixMatch by $3.46\%$ with 250 labels.
|
| 270 |
+
|
| 271 |
+

|
| 272 |
+
(a)
|
| 273 |
+
|
| 274 |
+

|
| 275 |
+
(b)
|
| 276 |
+
|
| 277 |
+

|
| 278 |
+
(c)
|
| 279 |
+
|
| 280 |
+

|
| 281 |
+
(d)
|
| 282 |
+
|
| 283 |
+

|
| 284 |
+
(e)
|
| 285 |
+
|
| 286 |
+

|
| 287 |
+
(f)
|
| 288 |
+
|
| 289 |
+

|
| 290 |
+
(g)
|
| 291 |
+
Figure 6: Performance variation when the number of labeled data changes from 40 to 1000 on the CIFAR-10 dataset. (a) - (d) are the classification accuracy of Mean-Teacher [27] and (e) - (h) are the results of MixMatch [28]. The blue curve denotes the accuracy enhanced by our positive sample pair construction mechanism, and the red curve represents the accuracy of the original model.
|
| 292 |
+
|
| 293 |
+

|
| 294 |
+
(h)
|
| 295 |
+
|
| 296 |
+

|
| 297 |
+
(a) MixMatch
|
| 298 |
+
Figure 7: Performance comparison of different state-of-the-art methods on CIFAR-10 dataset with a varying number of labeled data. The blue curve denotes the accuracy enhanced by our positive sample pair construction mechanism, and red curve represents the accuracy of the original model, respectively.
|
| 299 |
+
|
| 300 |
+

|
| 301 |
+
(b) Mean-Teacher
|
| 302 |
+
|
| 303 |
+

|
| 304 |
+
(c) VAT
|
| 305 |
+
|
| 306 |
+
3) CIFAR-100: To further investigate the effectiveness of our proposed model, we conduct experiments on CIFAR-100 dataset. Table. V reports the performance of the four methods with 200, 400, and 1000 labels. From those results, we can observe that, our proposed ICL-SSL could achieve better performance compared with other state-of-the-art algorithms. Taking the result with 200 lables for example, ICL-SSL exceeds FixMatch [29] by $4.75\%$ .
|
| 307 |
+
|
| 308 |
+
Through the above experiments, our method outperforms all the existing methods in the case of few labels. The reason is
|
| 309 |
+
|
| 310 |
+
that other methods use data augmentation to generate positive sample pairs, easily leading to incorrect regularization. Different from them, our ICL-SSL aims to improve the discriminative capability from two aspects. Firstly, we proposed an interpolation-based method to construct more reliable positive sample pairs, thus alleviating the incorrect regularization. Additionally, we design a contrastive loss to guide the embedding to change linearly in samples, which could enlarge the margin decision boundaries. In summary, we proposed ICL-SSL that could improve the discriminative capability of the network and
|
| 311 |
+
|
| 312 |
+
Table IV: Accuracy comparison with other state-of-the-art methods on five different folds, including Mean-Teacher [27], MixMatch [28], FixMatch [29] and CoMatch [31] on CIFAR-10 dataset. The red and blue values indicate the best and the runner-up results.
|
| 313 |
+
|
| 314 |
+
<table><tr><td>Method</td><td></td><td>20 labels</td><td>30 labels</td><td>40 labels</td></tr><tr><td>Mean-Teacher</td><td>[27]</td><td>21.79±0.57</td><td>24.51±0.35</td><td>24.93±0.62</td></tr><tr><td>MixMatch</td><td>[28]</td><td>38.51±8.48</td><td>50.10±5.81</td><td>59.08±3.04</td></tr><tr><td>FixMatch</td><td>[29]</td><td>72.63±5.37</td><td>86.65±3.56</td><td>89.69±4.58</td></tr><tr><td>CoMatch</td><td>[31]</td><td>83.43±9.20</td><td>88.68±3.79</td><td>90.14±2.86</td></tr><tr><td>ICL-SSL</td><td>Ours</td><td>88.73±5.69</td><td>90.30±3.10</td><td>91.78±2.23</td></tr></table>
|
| 315 |
+
|
| 316 |
+
achieves the top-level performance on CIFRA-10, SVHN, and CIFAR-100 dataset.
|
| 317 |
+
|
| 318 |
+
# D. Time Cost
|
| 319 |
+
|
| 320 |
+
As shown in Table VIII, we compare the training and the inference time of ICL-SSL and other state-of-the-art algorithms, including MixMatch [28], FixMatch [29], and CoMatch [31]. The results are the average training time for 300 epochs with 20 labels on CIFAR-10 dataset. We observe that the training and the inference time of ICL-SLL are 193.81 seconds and 1.08 seconds, respectively. From the Table.VIII we find that the computational efficiency of the proposed algorithm is comparable to the MixMatch and FixMatch and is much faster than that of CoMatch.
|
| 321 |
+
|
| 322 |
+
# E. Ablation Study
|
| 323 |
+
|
| 324 |
+
In this section, we implement extensive ablation studies to examine the effect of different components in ICL-SSL. Due to the number of experiments in our ablation study, we perform the study with 20 and 40 labels split from CIFAR-10 dataset. The parameter settings are kept the same with comparison experiments, and the results are shown in Table. VII.
|
| 325 |
+
|
| 326 |
+
# Effective of contrastive loss
|
| 327 |
+
|
| 328 |
+
To further investigate the superiority of the proposed contrastive loss, we experimentally compare our method. Here, we denote the FixMatch [29] as the baseline. With the experimental results, in the case of few labels, the model performance achieves better performance than that of baselines. Taking the result on CIFAR-10 with 20 labels for example, the accuracy exceeds the baseline by $16.1\%$ performance increment. From the empirical analysis, it benefits from the contrastive loss to guide the embedding of the network to change linearly between samples to improve the discriminative capability of the network.
|
| 329 |
+
|
| 330 |
+
# Effective of interpolation-based positive samples construction method.
|
| 331 |
+
|
| 332 |
+
Additionally, we verify the effectiveness of the interpolation-based positive samples construction method. As shown in TableVII, we can observe that the accuracy would decrease from $88.73\%$ to $56.91\%$ . The above experiments
|
| 333 |
+
|
| 334 |
+

|
| 335 |
+
Figure 8: Sensitivity analysis of the hyper-parameter $\alpha$ on CIFAR-10 dataset with 20 labels.
|
| 336 |
+
|
| 337 |
+
demonstrate the effectiveness of the interpolation-based positive samples construction method.
|
| 338 |
+
|
| 339 |
+
# F. Sensitivity Analysis
|
| 340 |
+
|
| 341 |
+
Further, we investigate the effect of hyper-parameters $\alpha$ . As shown in Fig. 8, we observe that the classification accuracy will not fluctuate greatly when the $\alpha$ is varying. This demonstrates that our model ICL-SSL is insensitive to the variation of the hyper-parameter $\alpha$ .
|
| 342 |
+
|
| 343 |
+
# G. Transferring to other models
|
| 344 |
+
|
| 345 |
+
To verify the generality of our proposed ICL-SSL, we transfer our method to the existing state-of-the-art algorithms. We implement our method into other semi-supervised learning models (MixMatch [28], VAT [44], Mean-Teacher [27]). All the experiments are implemented with CIFAR-10 dataset. Experiments are carried out on the number of labeled data from 40, 250, 500 and 1000. Here, we denote the baseline and the baseline with our method ICL-SSL as “B” and “ $\mathbf{B} + \mathbf{O}$ ”, respectively.
|
| 346 |
+
|
| 347 |
+
From Fig. 7, we have observed as follows: 1) The models could achieve better performance with our method. 2) As shown in Table VI, taking the results in MixMatch [28] for example, our method could improve the classification accuracy by $4.02\%$ on 40 labeled data and $2.14\%$ on 500 labels on CIFAR-10 dataset, respectively. In conclusion, the experiment results show that ICL-SSL can improve the model performance in other semi-supervised models. Moreover, in Fig. 6, we further show that other state-of-the-art methods could obtain higher accuracy with our proposed strategy during the training process.
|
| 348 |
+
|
| 349 |
+
# V. CONCLUSION
|
| 350 |
+
|
| 351 |
+
In this work, we propose an interpolation-based method termed ICL-SSL to construct reliable positive sample pairs, thus alleviating the semantic information drift with extreme labels (e.g., 2 or 3 labels for each class). Specifically, ICL-SSL is a semantic-agnostic method. We interpolate the input images and their representations in image-level and latent space, respectively. Besides, the designed contrastive loss will
|
| 352 |
+
|
| 353 |
+
Table V: Accuracy comparison with $\pi$ model [26], Mean-Teacher [27], MixMatch [28], and FixMatch [29] on CIFAR-100 and SVHN dataset. The red and blue values indicate the best and the runner-up results. The average and std values of the five fold cross validation are reported.
|
| 354 |
+
|
| 355 |
+
<table><tr><td rowspan="2" colspan="2">Method</td><td colspan="3">CIFAR 100</td><td colspan="3">SVHN</td></tr><tr><td>200 labels</td><td>400 labels</td><td>800 labels</td><td>250 labels</td><td>500 labels</td><td>1000 labels</td></tr><tr><td>π Model</td><td>[26]</td><td>8.53±0.25</td><td>11.67±0.37</td><td>17.64±1.06</td><td>42.66±0.91</td><td>53.33±1.39</td><td>65.90±0.03</td></tr><tr><td>Mean-Teacher</td><td>[27]</td><td>7.11±0.06</td><td>11.54±0.28</td><td>17.82±0.09</td><td>42.70±1.79</td><td>55.71±0.53</td><td>67.71±1.22</td></tr><tr><td>MixMatch</td><td>[28]</td><td>4.55±0.45</td><td>17.68±0.07</td><td>26.75±1.13</td><td>92.12±0.06</td><td>94.53±0.43</td><td>95.13±0.04</td></tr><tr><td>FixMatch</td><td>[29]</td><td>9.31±0.08</td><td>24.44±0.35</td><td>28.12±0.30</td><td>95.45±0.07</td><td>95.73±0.15</td><td>95.94±0.10</td></tr><tr><td>ICL-SSL</td><td>Ours</td><td>14.06±0.52</td><td>26.52±1.20</td><td>33.81±0.63</td><td>95.58±0.14</td><td>95.80±0.12</td><td>96.05±0.14</td></tr></table>
|
| 356 |
+
|
| 357 |
+
Table VI: Applying the interpolation positive sample pair construction mechanism to other state-of-the-art semi-supervised learning algorithms, including MixMatch [28], VAT [44] and Mean-teacher [27], on the CIFAR-10 dataset. The blue values represent the results enhanced by our positive sample pair construction mechanism, and the black values are the results of the original model. 'B' and 'B+O' represent the baseline and the baseline with our method, respectively.
|
| 358 |
+
|
| 359 |
+
<table><tr><td rowspan="2" colspan="2">Method</td><td colspan="2">40 labels</td><td colspan="2">250 labels</td><td colspan="2">500 labels</td><td colspan="2">1000 labels</td></tr><tr><td>B</td><td>B+O</td><td>B</td><td>B+O</td><td>B</td><td>B+O</td><td>B</td><td>B+O</td></tr><tr><td>VAT</td><td>[44]</td><td>20.00</td><td>23.00</td><td>34.00</td><td>41.00</td><td>47.00</td><td>48.00</td><td>61.00</td><td>66.00</td></tr><tr><td>MixMatch</td><td>[28]</td><td>57.86</td><td>61.88</td><td>86.06</td><td>86.50</td><td>87.00</td><td>89.14</td><td>90.46</td><td>91.56</td></tr><tr><td>Mean-Teacher</td><td>[27]</td><td>24.86</td><td>26.24</td><td>42.88</td><td>45.58</td><td>53.40</td><td>54.90</td><td>66.98</td><td>68.48</td></tr></table>
|
| 360 |
+
|
| 361 |
+
Table VII: Ablation comparisons of ICL-SSL mechanism. The results are reported with 20 labels on CIFAR-10 dataset.
|
| 362 |
+
|
| 363 |
+
<table><tr><td>Ablation</td><td>20 labels</td><td>40 labels</td></tr><tr><td>ICL-SSL</td><td>88.73</td><td>91.78</td></tr><tr><td>ICL-SSL without contrastive loss</td><td>72.63</td><td>89.69</td></tr><tr><td>ICL-SSL without the interpolation-based method</td><td>56.91</td><td>70.89</td></tr></table>
|
| 364 |
+
|
| 365 |
+
Table VIII: Training and inference time comparison on CIFAR-10 dataset with 20 labels.
|
| 366 |
+
|
| 367 |
+
<table><tr><td>Method</td><td></td><td>Training Time (s)</td><td>Inference Time (s)</td></tr><tr><td>MixMatch</td><td>[28]</td><td>121.50 ± 0.21</td><td>0.60 ± 0.13</td></tr><tr><td>FixMatch</td><td>[29]</td><td>155.37 ± 0.10</td><td>3.46 ± 0.07</td></tr><tr><td>CoMatch</td><td>[31]</td><td>571.65 ± 0.15</td><td>1.30 ± 0.03</td></tr><tr><td>ICL-SLL</td><td>Ours</td><td>193.81 ± 0.14</td><td>1.08 ± 0.62</td></tr></table>
|
| 368 |
+
|
| 369 |
+
guide the embeddings changing linearly between samples and thus get a larger margin decision boundary. Benefiting from this mechanism, the discriminative capability of the network can be improved with extreme labels. Extensive experiments demonstrate the effectiveness and generality of our ICL-SSL. In the future, we will try to extend ICL-SSL to other fields (e.g. graph semi-supervised node classification). Besides, as
|
| 370 |
+
|
| 371 |
+
we analyzed in section IV-D, although our proposed algorithm is as efficient as other state-of-the-art contrastive algorithms, its efficiency still needs to be improved to suit even larger scale datasets. Therefore, how to reduce the training time is also a future work direction.
|
| 372 |
+
|
| 373 |
+
# REFERENCES
|
| 374 |
+
|
| 375 |
+
[1] R. He, Z. Han, X. Lu, and Y. Yin, "Safe-student for safe deep semi-supervised learning with unseen-class unlabeled data," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR), June 2022, pp. 14585-14594.
|
| 376 |
+
[2] X. Hu, Y. Zeng, X. Xu, S. Zhou, and L. Liu, "Robust semi-supervised classification based on data augmented online elms with deep features," Knowledge-Based Systems, vol. 229, p. 107307, 2021.
|
| 377 |
+
[3] T.-Y. Lin, M. Maire, S. Belongie, J. Hays, P. Perona, D. Ramanan, P. Dollar, and C. L. Zitnick, "Microsoft coco: Common objects in context," in European conference on computer vision. Springer, 2014, pp. 740-755.
|
| 378 |
+
[4] I. Bekkerman and J. Tabrikian, “Target detection and localization using mimo radars and sonars,” IEEE Transactions on Signal Processing, vol. 54, no. 10, pp. 3873–3883, 2006.
|
| 379 |
+
[5] M. Everingham, S. A. Eslami, L. Van Gool, C. K. Williams, J. Winn, and A. Zisserman, "The pascal visual object classes challenge: A retrospective," International journal of computer vision, vol. 111, no. 1, pp. 98-136, 2015.
|
| 380 |
+
[6] S. Zhou, D. Nie, E. Adeli, J. Yin, J. Lian, and D. Shen, "High-resolution encoder-decoder networks for low-contrast medical image segmentation," IEEE Transactions on Image Processing, vol. 29, pp. 461-475, 2019.
|
| 381 |
+
[7] L. Li, S. Wang, X. Liu, E. Zhu, L. Shen, K. Li, and K. Li, "Local sample-weighted multiple kernel clustering with consensus discriminative graph," IEEE Transactions on Neural Networks and Learning Systems, 2022.
|
| 382 |
+
[8] S. Wang, X. Liu, L. Liu, S. Zhou, and E. Zhu, “Late fusion multiple kernel clustering with proxy graph refinement,” IEEE Transactions on Neural Networks and Learning Systems, 2021.
|
| 383 |
+
|
| 384 |
+
[9] S. Zhou, X. Liu, M. Li, E. Zhu, L. Liu, C. Zhang, and J. Yin, “Multiple kernel clustering with neighbor-kernel subspace segmentation,” IEEE transactions on neural networks and learning systems, vol. 31, no. 4, pp. 1351–1362, 2019.
|
| 385 |
+
[10] S. Wang, X. Liu, L. Liu, W. Tu, X. Zhu, J. Liu, S. Zhou, and E. Zhu, “Highly-efficient incomplete large-scale multi-view clustering with consensus bipartite graph,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2022, pp. 9776–9785.
|
| 386 |
+
[11] S. Zhou, E. Zhu, X. Liu, T. Zheng, Q. Liu, J. Xia, and J. Yin, "Subspace segmentation-based robust multiple kernel clustering," Information Fusion, vol. 53, pp. 145-154, 2020.
|
| 387 |
+
[12] S. Wang, X. Liu, E. Zhu, C. Tang, J. Liu, J. Hu, J. Xia, and J. Yin, "Multi-view clustering via late fusion alignment maximization," in IJCAI, 2019, pp. 3778-3784.
|
| 388 |
+
[13] S. Zhou, X. Liu, J. Liu, X. Guo, Y. Zhao, E. Zhu, Y. Zhai, J. Yin, and W. Gao, "Multi-view spectral clustering with optimal neighborhood laplacian matrix," in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, no. 04, 2020, pp. 6965-6972.
|
| 389 |
+
[14] S. Wang, X. Liu, X. Zhu, P. Zhang, Y. Zhang, F. Gao, and E. Zhu, "Fast parameter-free multi-view subspace clustering with consensus anchor guidance," IEEE Transactions on Image Processing, vol. 31, pp. 556-568, 2021.
|
| 390 |
+
[15] D. Mahajan, R. Girshick, V. Ramanathan, K. He, M. Paluri, Y. Li, A. Bharambe, and L. Van Der Maaten, "Exploring the limits of weakly supervised pretraining," in Proceedings of the European conference on computer vision (ECCV), 2018, pp. 181-196.
|
| 391 |
+
[16] R. He, Z. Han, and Y. Yin, "Towards safe and robust weakly-supervised anomaly detection under subpopulation shift," Knowledge-Based Systems, p. 109088, 2022.
|
| 392 |
+
[17] M. Luo, X. Chang, L. Nie, Y. Yang, A. G. Hauptmann, and Q. Zheng, "An adaptive semisupervised feature analysis for video semantic recognition," IEEE transactions on cybernetics, vol. 48, no. 2, pp. 648-660, 2017.
|
| 393 |
+
[18] K. Chen, L. Yao, D. Zhang, X. Wang, X. Chang, and F. Nie, "A semisupervised recurrent convolutional attention model for human activity recognition," IEEE transactions on neural networks and learning systems, vol. 31, no. 5, pp. 1747-1756, 2019.
|
| 394 |
+
[19] E. Yu, J. Sun, J. Li, X. Chang, X.-H. Han, and A. G. Hauptmann, "Adaptive semi-supervised feature selection for cross-modal retrieval," IEEE Transactions on Multimedia, vol. 21, no. 5, pp. 1276-1288, 2018.
|
| 395 |
+
[20] Z. Zhang, T. W. Chow, and M. Zhao, "Trace ratio optimization-based semi-supervised nonlinear dimensionality reduction for marginal manifold visualization," IEEE Transactions on Knowledge and Data Engineering, vol. 25, no. 5, pp. 1148-1161, 2012.
|
| 396 |
+
[21] Z. Zhang, F. Li, L. Jia, J. Qin, L. Zhang, and S. Yan, "Robust adaptive embedded label propagation with weight learning for inductive classification," IEEE transactions on neural networks and learning systems, vol. 29, no. 8, pp. 3388-3403, 2017.
|
| 397 |
+
[22] H. Zhang, Z. Zhang, M. Zhao, Q. Ye, M. Zhang, and M. Wang, "Robust triple-matrix-recovery-based auto-weighted label propagation for classification," IEEE Transactions on Neural Networks and Learning Systems, vol. 31, no. 11, pp. 4538-4552, 2020.
|
| 398 |
+
[23] Q. Ye, J. Yang, T. Yin, and Z. Zhang, "Can the virtual labels obtained by traditional lp approaches be well encoded in wrl?" IEEE transactions on neural networks and learning systems, vol. 27, no. 7, pp. 1591-1598, 2015.
|
| 399 |
+
[24] R. Jozefowicz, O. Vinyals, M. Schuster, N. Shazeer, and Y. Wu, “Exploring the limits of language modeling,” arXiv preprint arXiv:1602.02410, 2016.
|
| 400 |
+
[25] M. Sajjadi, M. Javanmardi, and T. Tasdizen, "Regularization with stochastic transformations and perturbations for deep semi-supervised learning," Advances in neural information processing systems, vol. 29, pp. 1163-1171, 2016.
|
| 401 |
+
[26] S. Laine and T. Aila, “Temporal ensembling for semi-supervised learning,” arXiv preprint arXiv:1610.02242, 2016.
|
| 402 |
+
[27] A. Tarvainen and H. Valpola, “Mean teachers are better role models: Weight-averaged consistency targets improve semi-supervised deep learning results,” arXiv preprint arXiv:1703.01780, 2017.
|
| 403 |
+
[28] D. Berthelot, N. Carlini, I. Goodfellow, N. Papernot, A. Oliver, and C. Raffel, "Mixmatch: A holistic approach to semi-supervised learning," arXiv preprint arXiv:1905.02249, 2019.
|
| 404 |
+
[29] K. Sohn, D. Berthelot, C.-L. Li, Z. Zhang, N. Carlini, E. D. Cubuk, A. Kurakin, H. Zhang, and C. Raffel, "Fixmatch: Simplifying semi-supervised learning with consistency and confidence," arXiv preprint arXiv:2001.07685, 2020.
|
| 405 |
+
[30] D. Berthelot, N. Carlini, E. D. Cubuk, A. Kurakin, K. Sohn, H. Zhang, and C. Raffel, "Remixmatch: Semi-supervised learning with
|
| 406 |
+
|
| 407 |
+
distribution alignment and augmentation anchoring," arXiv preprint arXiv:1911.09785, 2019.
|
| 408 |
+
[31] J. Li, C. Xiong, and S. C. Hoi, "Comatch: Semi-supervised learning with contrastive graph regularization," in Proceedings of the IEEE/CVF International Conference on Computer Vision, 2021, pp. 9475-9484.
|
| 409 |
+
[32] H. Zhang, M. Cisse, Y. N. Dauphin, and D. Lopez-Paz, "mixup: Beyond empirical risk minimization," arXiv preprint arXiv:1710.09412, 2017.
|
| 410 |
+
[33] V. Verma, K. Kawaguchi, A. Lamb, J. Kannala, Y. Bengio, and D. Lopez-Paz, "Interpolation consistency training for semi-supervised learning," arXiv preprint arXiv:1903.03825, 2019.
|
| 411 |
+
[34] T. Chen, S. Kornblith, M. Norouzi, and G. Hinton, "A simple framework for contrastive learning of visual representations," in International conference on machine learning. PMLR, 2020, pp. 1597-1607.
|
| 412 |
+
[35] K. He, H. Fan, Y. Wu, S. Xie, and R. Girshick, "Momentum contrast for unsupervised visual representation learning," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp. 9729-9738.
|
| 413 |
+
[36] J. Zbontar, L. Jing, I. Misra, Y. LeCun, and S. Deny, “Barlow twins: Self-supervised learning via redundancy reduction,” arXiv preprint arXiv:2103.03230, 2021.
|
| 414 |
+
[37] X. Chen and K. He, “Exploring simple siamese representation learning,” in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2021, pp. 15750-15758.
|
| 415 |
+
[38] Z. Wu, Y. Xiong, S. X. Yu, and D. Lin, "Unsupervised feature learning via non-parametric instance discrimination," in Proceedings of the IEEE conference on computer vision and pattern recognition, 2018, pp. 3733-3742.
|
| 416 |
+
[39] A. v. d. Oord, Y. Li, and O. Vinyals, “Representation learning with contrastive predictive coding,” arXiv preprint arXiv:1807.03748, 2018.
|
| 417 |
+
[40] T. Chen, S. Kornblith, K. Swersky, M. Norouzi, and G. Hinton, "Big self-supervised models are strong semi-supervised learners," arXiv preprint arXiv:2006.10029, 2020.
|
| 418 |
+
[41] P. Chen, T. Ma, X. Qin, W. Xu, and S. Zhou, "Data-efficient semi-supervised learning by reliable edge mining," in Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, 2020, pp. 9192-9201.
|
| 419 |
+
[42] Y. Liu, W. Tu, S. Zhou, X. Liu, L. Song, X. Yang, and E. Zhu, "Deep graph clustering via dual correlation reduction," in Proc. of AAAI, 2022.
|
| 420 |
+
[43] M. Sajjadi, M. Javanmardi, and T. Tasdizen, "Regularization with stochastic transformations and perturbations for deep semi-supervised learning," Advances in neural information processing systems, vol. 29, pp. 1163-1171, 2016.
|
| 421 |
+
[44] T. Miyato, S.-i. Maeda, M. Koyama, and S. Ishii, “Virtual adversarial training: a regularization method for supervised and semi-supervised learning,” IEEE transactions on pattern analysis and machine intelligence, vol. 41, no. 8, pp. 1979–1993, 2018.
|
| 422 |
+
[45] Q. Xie, Z. Dai, E. Hovy, M.-T. Luong, and Q. V. Le, "Unsupervised data augmentation for consistency training," arXiv preprint arXiv:1904.12848, 2019.
|
| 423 |
+
[46] T. Lucas, C. Tallec, Y. Ollivier, and J. Verbeek, “Mixed batches and symmetric discriminators for gan training,” in International Conference on Machine Learning. PMLR, 2018, pp. 2844–2853.
|
| 424 |
+
[47] D. Hendrycks, N. Mu, E. D. Cubuk, B. Zoph, J. Gilmer, and B. Lakshminarayanan, "Augmix: A simple data processing method to improve robustness and uncertainty," arXiv preprint arXiv:1912.02781, 2019.
|
| 425 |
+
[48] H. Guo, “Nonlinear mixup: Out-of-manifold data augmentation for text classification,” in Proceedings of the AAAI Conference on Artificial Intelligence, vol. 34, no. 04, 2020, pp. 4044–4051.
|
| 426 |
+
[49] H. Guo, Y. Mao, and R. Zhang, “Augmenting data with mixup for sentence classification: An empirical study,” arXiv preprint arXiv:1905.08941, 2019.
|
| 427 |
+
[50] Y. Tokozume, Y. Ushiku, and T. Harada, “Between-class learning for image classification,” in Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2018, pp. 5486-5494.
|
| 428 |
+
[51] D. Berthelot, C. Raffel, A. Roy, and I. Goodfellow, “Understanding and improving interpolation in autoencoders via an adversarial regularizer,” arXiv preprint arXiv:1807.07543, 2018.
|
| 429 |
+
[52] Y. Netzer, T. Wang, A. Coates, A. Bissacco, B. Wu, and A. Y. Ng, "Reading digits in natural images with unsupervised feature learning," 2011.
|
| 430 |
+
[53] A. Krizhevsky, G. Hinton et al., “Learning multiple layers of features from tiny images,” 2009.
|
| 431 |
+
[54] A. Oliver, A. Odena, C. Raffel, E. D. Cubuk, and I. J. Goodfellow, "Realistic evaluation of deep semi-supervised learning algorithms," arXiv preprint arXiv:1804.09170, 2018.
|
2202.11xxx/2202.11915/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:651588c44012aafe932eefc0fb360a4c3c40552fffa17c249c39df8c159afd14
|
| 3 |
+
size 730805
|
2202.11xxx/2202.11915/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11917/2c37ab02-567a-4330-8d08-faf65b9d17f7_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11917/2c37ab02-567a-4330-8d08-faf65b9d17f7_model.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11917/2c37ab02-567a-4330-8d08-faf65b9d17f7_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bd1dd3fab6714fa7353779b6a4c553df8d06088251912c2b04696631d4353939
|
| 3 |
+
size 1795118
|
2202.11xxx/2202.11917/full.md
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11917/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3eab0b1abeceaffabc94b85afdcd97b996badba2f23ace3b104520c4186c3c3c
|
| 3 |
+
size 1474791
|
2202.11xxx/2202.11917/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11923/2469feeb-f44a-460f-890c-2b605e397b0b_content_list.json
ADDED
|
@@ -0,0 +1,1347 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
{
|
| 3 |
+
"type": "text",
|
| 4 |
+
"text": "Welcome to the Modern World of Pronouns: Identity-Inclusive Natural Language Processing beyond Gender",
|
| 5 |
+
"text_level": 1,
|
| 6 |
+
"bbox": [
|
| 7 |
+
169,
|
| 8 |
+
89,
|
| 9 |
+
828,
|
| 10 |
+
129
|
| 11 |
+
],
|
| 12 |
+
"page_idx": 0
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"text": "Anne Lauscher",
|
| 17 |
+
"bbox": [
|
| 18 |
+
181,
|
| 19 |
+
143,
|
| 20 |
+
317,
|
| 21 |
+
156
|
| 22 |
+
],
|
| 23 |
+
"page_idx": 0
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"text": "MilaNLP",
|
| 28 |
+
"bbox": [
|
| 29 |
+
210,
|
| 30 |
+
160,
|
| 31 |
+
290,
|
| 32 |
+
174
|
| 33 |
+
],
|
| 34 |
+
"page_idx": 0
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"text": "Universita Luigi Bocconi",
|
| 39 |
+
"bbox": [
|
| 40 |
+
147,
|
| 41 |
+
177,
|
| 42 |
+
352,
|
| 43 |
+
192
|
| 44 |
+
],
|
| 45 |
+
"page_idx": 0
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"text": "Milan, Italy",
|
| 50 |
+
"bbox": [
|
| 51 |
+
200,
|
| 52 |
+
193,
|
| 53 |
+
299,
|
| 54 |
+
209
|
| 55 |
+
],
|
| 56 |
+
"page_idx": 0
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"text": "anne.lauscher@unibocconi.it",
|
| 61 |
+
"bbox": [
|
| 62 |
+
127,
|
| 63 |
+
212,
|
| 64 |
+
373,
|
| 65 |
+
224
|
| 66 |
+
],
|
| 67 |
+
"page_idx": 0
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"text": "Archie Crowley",
|
| 72 |
+
"bbox": [
|
| 73 |
+
430,
|
| 74 |
+
143,
|
| 75 |
+
568,
|
| 76 |
+
158
|
| 77 |
+
],
|
| 78 |
+
"page_idx": 0
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"text": "Linguistics",
|
| 83 |
+
"bbox": [
|
| 84 |
+
450,
|
| 85 |
+
160,
|
| 86 |
+
544,
|
| 87 |
+
175
|
| 88 |
+
],
|
| 89 |
+
"page_idx": 0
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"text": "University of South Carolina",
|
| 94 |
+
"bbox": [
|
| 95 |
+
381,
|
| 96 |
+
177,
|
| 97 |
+
616,
|
| 98 |
+
191
|
| 99 |
+
],
|
| 100 |
+
"page_idx": 0
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"text": "Columbia, SC, USA",
|
| 105 |
+
"bbox": [
|
| 106 |
+
415,
|
| 107 |
+
193,
|
| 108 |
+
581,
|
| 109 |
+
208
|
| 110 |
+
],
|
| 111 |
+
"page_idx": 0
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"text": "acrowley@sc.edu",
|
| 116 |
+
"bbox": [
|
| 117 |
+
428,
|
| 118 |
+
212,
|
| 119 |
+
566,
|
| 120 |
+
225
|
| 121 |
+
],
|
| 122 |
+
"page_idx": 0
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"text": "Dirk Hovy",
|
| 127 |
+
"bbox": [
|
| 128 |
+
702,
|
| 129 |
+
143,
|
| 130 |
+
796,
|
| 131 |
+
158
|
| 132 |
+
],
|
| 133 |
+
"page_idx": 0
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"text": "MilaNLP",
|
| 138 |
+
"bbox": [
|
| 139 |
+
710,
|
| 140 |
+
160,
|
| 141 |
+
789,
|
| 142 |
+
174
|
| 143 |
+
],
|
| 144 |
+
"page_idx": 0
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"text": "Universita Luigi Bocconi",
|
| 149 |
+
"bbox": [
|
| 150 |
+
647,
|
| 151 |
+
177,
|
| 152 |
+
852,
|
| 153 |
+
192
|
| 154 |
+
],
|
| 155 |
+
"page_idx": 0
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "text",
|
| 159 |
+
"text": "Milan, Italy",
|
| 160 |
+
"bbox": [
|
| 161 |
+
700,
|
| 162 |
+
193,
|
| 163 |
+
798,
|
| 164 |
+
209
|
| 165 |
+
],
|
| 166 |
+
"page_idx": 0
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"type": "text",
|
| 170 |
+
"text": "dirk.hovy@unibocconi.it",
|
| 171 |
+
"bbox": [
|
| 172 |
+
643,
|
| 173 |
+
212,
|
| 174 |
+
853,
|
| 175 |
+
224
|
| 176 |
+
],
|
| 177 |
+
"page_idx": 0
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"type": "text",
|
| 181 |
+
"text": "Abstract",
|
| 182 |
+
"text_level": 1,
|
| 183 |
+
"bbox": [
|
| 184 |
+
260,
|
| 185 |
+
252,
|
| 186 |
+
339,
|
| 187 |
+
267
|
| 188 |
+
],
|
| 189 |
+
"page_idx": 0
|
| 190 |
+
},
|
| 191 |
+
{
|
| 192 |
+
"type": "text",
|
| 193 |
+
"text": "Trigger warning: This paper contains some examples which might be offensive to some users.",
|
| 194 |
+
"bbox": [
|
| 195 |
+
142,
|
| 196 |
+
281,
|
| 197 |
+
460,
|
| 198 |
+
309
|
| 199 |
+
],
|
| 200 |
+
"page_idx": 0
|
| 201 |
+
},
|
| 202 |
+
{
|
| 203 |
+
"type": "text",
|
| 204 |
+
"text": "The works of pronouns is changing. From a closed class of words with few members to a much more open set of terms to reflect identities. However, Natural Language Processing (NLP) is barely reflecting this linguistic shift, even though recent work outlined the harms of gender-exclusive language technology. Particularly problematic is the current modeling 3rd person pronouns, as it largely ignores various phenomena like neopronouns, i.e., pronoun sets that are novel and not (yet) widely established. This omission contributes to the discrimination of marginalized and underrepresented groups, e.g., non-binary individuals. However, other identity-expression phenomena beyond gender are also ignored by current NLP technology. In this paper, we provide an overview of 3rd person pronoun issues for NLP. Based on our observations and ethical considerations, we define a series of desiderata for modeling pronouns in language technology. We evaluate existing and novel modeling approaches w.r.t. these desiderata qualitatively, and quantify the impact of a more discrimination-free approach on established benchmark data.",
|
| 205 |
+
"bbox": [
|
| 206 |
+
139,
|
| 207 |
+
319,
|
| 208 |
+
460,
|
| 209 |
+
690
|
| 210 |
+
],
|
| 211 |
+
"page_idx": 0
|
| 212 |
+
},
|
| 213 |
+
{
|
| 214 |
+
"type": "text",
|
| 215 |
+
"text": "1 Introduction",
|
| 216 |
+
"text_level": 1,
|
| 217 |
+
"bbox": [
|
| 218 |
+
114,
|
| 219 |
+
703,
|
| 220 |
+
258,
|
| 221 |
+
719
|
| 222 |
+
],
|
| 223 |
+
"page_idx": 0
|
| 224 |
+
},
|
| 225 |
+
{
|
| 226 |
+
"type": "text",
|
| 227 |
+
"text": "Pronouns are an essential component of many languages and often one of the most frequent word classes. Accordingly, NLP has long studied tasks related to them, e.g., pronoun resolution (e.g., Hobbs, 1978). Simplistically, they can be defined as \"a word (such as I, he, she, you, it, we, or they) that is used instead of a noun or noun phrase\".<sup>1</sup> Linguistic studies have pointed out the complexity of pronouns, though (e.g., Postal et al., 1969;",
|
| 228 |
+
"bbox": [
|
| 229 |
+
112,
|
| 230 |
+
727,
|
| 231 |
+
489,
|
| 232 |
+
872
|
| 233 |
+
],
|
| 234 |
+
"page_idx": 0
|
| 235 |
+
},
|
| 236 |
+
{
|
| 237 |
+
"type": "text",
|
| 238 |
+
"text": "McKay, 1993). Pronouns can carry demographic information – in English, for example, information about the number of referees and a single referee's (grammatical) gender. Even more information can be conveyed by pronouns in other, non-pro-drop languages. Consider Arabana-Wangkangurru, a language spoken in Australia, in which a speaker uses different pronouns depending on whether the referee is part of the same social or ritual group (moiety) (Hercus, 1994). As such, pronouns shape how we perceive individuals and can even reflect cultural aspects (e.g., Kashima and Kashima, 1998) and ideologies (e.g., Muqit, 2012). Consequently, pronoun usage should be considered a sensitive aspect of natural language use.",
|
| 239 |
+
"bbox": [
|
| 240 |
+
507,
|
| 241 |
+
252,
|
| 242 |
+
884,
|
| 243 |
+
493
|
| 244 |
+
],
|
| 245 |
+
"page_idx": 0
|
| 246 |
+
},
|
| 247 |
+
{
|
| 248 |
+
"type": "text",
|
| 249 |
+
"text": "Accordingly, in many western societies, these phenomena have been drawing more and more attention. For instance, in 2020, the American Dialect Society voted “(My) Pronouns” as the 2019 Word of the Year and Singular “They” as the Word of the Decade (Roberts, 2020). Recently, there has been a shift in pronoun usage (Krauthamer, 2021), partially due to shifts in the perception of gender, driven by the queer-feminist discourse (e.g., Butler, 1990, 2004). Related to this is the open discussion of identity beyond binary gender. For instance, a person who does not identify their gender within the gender binary (e.g., a nonbinary or genderqueer person) might use singular “they” as their pronoun. Recently, the French dictionary “Le Robert” added the non-binary pronoun “iel” to its list of words.<sup>3</sup>",
|
| 250 |
+
"bbox": [
|
| 251 |
+
507,
|
| 252 |
+
495,
|
| 253 |
+
884,
|
| 254 |
+
753
|
| 255 |
+
],
|
| 256 |
+
"page_idx": 0
|
| 257 |
+
},
|
| 258 |
+
{
|
| 259 |
+
"type": "text",
|
| 260 |
+
"text": "This \"social push\" to respect diverse gender identities also affects aspects of NLP. Recent studies have pointed out the potential harms from the cur",
|
| 261 |
+
"bbox": [
|
| 262 |
+
507,
|
| 263 |
+
756,
|
| 264 |
+
884,
|
| 265 |
+
804
|
| 266 |
+
],
|
| 267 |
+
"page_idx": 0
|
| 268 |
+
},
|
| 269 |
+
{
|
| 270 |
+
"type": "page_footnote",
|
| 271 |
+
"text": "2Grammatical, biological, and self-identified gender should not be confounded, but are often treated interchangeably by lay audiences.",
|
| 272 |
+
"bbox": [
|
| 273 |
+
507,
|
| 274 |
+
820,
|
| 275 |
+
885,
|
| 276 |
+
858
|
| 277 |
+
],
|
| 278 |
+
"page_idx": 0
|
| 279 |
+
},
|
| 280 |
+
{
|
| 281 |
+
"type": "page_footnote",
|
| 282 |
+
"text": "3https://dictionary.lerobert.com/dis-moi-robert/raconte-moi-robert/mot-jour/pourquoi-le-robert-a-t-il-integre-lemot-iel-dans-son-dictionnaire-en-ligne.htm1",
|
| 283 |
+
"bbox": [
|
| 284 |
+
507,
|
| 285 |
+
858,
|
| 286 |
+
885,
|
| 287 |
+
917
|
| 288 |
+
],
|
| 289 |
+
"page_idx": 0
|
| 290 |
+
},
|
| 291 |
+
{
|
| 292 |
+
"type": "aside_text",
|
| 293 |
+
"text": "arXiv:2202.11923v1 [cs.CL] 24 Feb 2022",
|
| 294 |
+
"bbox": [
|
| 295 |
+
21,
|
| 296 |
+
309,
|
| 297 |
+
60,
|
| 298 |
+
724
|
| 299 |
+
],
|
| 300 |
+
"page_idx": 0
|
| 301 |
+
},
|
| 302 |
+
{
|
| 303 |
+
"type": "text",
|
| 304 |
+
"text": "rent lack of non-binary representation in NLP data sets, embeddings, and tasks (Cao and Daumé III, 2021; Dev et al., 2021), and the related issue of unfair stereotyping of queer individuals (Barikeri et al., 2021). However, the research landscape on modern pronoun usage is still surprisingly scarce, hindering progress for a fair and inclusive NLP.",
|
| 305 |
+
"bbox": [
|
| 306 |
+
112,
|
| 307 |
+
84,
|
| 308 |
+
487,
|
| 309 |
+
197
|
| 310 |
+
],
|
| 311 |
+
"page_idx": 1
|
| 312 |
+
},
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"text": "Further linguistic research has identified identity aspects of pronouns beyond gender (Miltersen, 2016). Specifically, *nounself* pronouns, functionally turning pronouns from a *closed* to an *open* word class. To the best of our knowledge, these aspects have been completely ignored by NLP so far. We did not find a single work systematically describing all of the currently existing phenomena even just in English third-person pronoun usage (let alone other languages). In contrast, a fair number of discussions are taking place in queer Wikis and forums. While it is still unclear which of these phenomena will persist over the next decades, people are using and discussing them, and accordingly, we as a research community should adapt.",
|
| 316 |
+
"bbox": [
|
| 317 |
+
115,
|
| 318 |
+
198,
|
| 319 |
+
489,
|
| 320 |
+
439
|
| 321 |
+
],
|
| 322 |
+
"page_idx": 1
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "text",
|
| 326 |
+
"text": "Contributions. In this \"living draft\", 1) we are the first to provide a systematic overview of existing phenomena in English 3rd person pronoun usage. Our results will inform future NLP research on ethical NLP and non-binary representation. We provide the first NLP work acknowledging otherkin identities. We support our observations with a corpus analysis on Reddit. 2) Based on our overview, we derive five desiderata for modeling third-person pronouns. Based on these, 3) we discuss various existing and novel paradigms for when and how to model pronouns. 4) Finally, we quantify the impact of discrimination-free non-modeling of pronouns on a widely established benchmark.",
|
| 327 |
+
"bbox": [
|
| 328 |
+
112,
|
| 329 |
+
444,
|
| 330 |
+
489,
|
| 331 |
+
668
|
| 332 |
+
],
|
| 333 |
+
"page_idx": 1
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "text",
|
| 337 |
+
"text": "2 Related Work",
|
| 338 |
+
"text_level": 1,
|
| 339 |
+
"bbox": [
|
| 340 |
+
112,
|
| 341 |
+
675,
|
| 342 |
+
268,
|
| 343 |
+
690
|
| 344 |
+
],
|
| 345 |
+
"page_idx": 1
|
| 346 |
+
},
|
| 347 |
+
{
|
| 348 |
+
"type": "text",
|
| 349 |
+
"text": "While there are some works in NLP on gender-inclusion (e.g., Dev et al., 2021) and gender bias in static (e.g., Bolukbasi et al., 2016; Gonen and Goldberg, 2019; Lauscher et al., 2020, inter alia) and contextualized (e.g., Kurita et al., 2019; Bordia and Bowman, 2019; Lauscher et al., 2021, inter alia) language representations as well as works focusing on specific gender bias in downstream tasks, e.g., natural language inference (Dev et al., 2020) and co-reference resolution (e.g., Rudinger et al., 2018; Webster et al., 2018), we are not aware",
|
| 350 |
+
"bbox": [
|
| 351 |
+
112,
|
| 352 |
+
697,
|
| 353 |
+
489,
|
| 354 |
+
873
|
| 355 |
+
],
|
| 356 |
+
"page_idx": 1
|
| 357 |
+
},
|
| 358 |
+
{
|
| 359 |
+
"type": "text",
|
| 360 |
+
"text": "4For instance, while we found hits for the Google Scholar query \"neopronoun\", we did not get any results for variants of \"nameself pronoun\", or \"emojiself pronoun\".",
|
| 361 |
+
"bbox": [
|
| 362 |
+
112,
|
| 363 |
+
879,
|
| 364 |
+
487,
|
| 365 |
+
919
|
| 366 |
+
],
|
| 367 |
+
"page_idx": 1
|
| 368 |
+
},
|
| 369 |
+
{
|
| 370 |
+
"type": "text",
|
| 371 |
+
"text": "of any work that deals with the broader field of identity-inclusion. Thus, there is no other NLP work that deals with a larger variety of pronouns and acknowledges pronouns as an open word class. For surveys on the general topic of unfair bias in NLP we refer to Blodgett et al. (2020) and Shah et al. (2020). Recently, Dev et al. (2021) pointed broadly at the harms (Barocas et al., 2017) arising from gender-exclusivity in NLP. They surveyed queer individuals and assessed non-binary representations in existing data set and language representations. In contrast to them, we specifically look at third-person pronoun usage and how to model such phenomena. Webster et al. (2018) provide a balanced co-reference resolution corpus with a focus on the fair distribution of pronouns but only focus on the gendered binary case. Closest to us, Cao and Daumé III (2021) discuss gender inclusion throughout the machine learning pipeline beyond the binary gender conception. While they are also the first to consider non-binary pronouns, including some neopronouns, in co-reference resolution, they do not acknowledge the broader spectrum of identity-related pronoun phenomena.",
|
| 372 |
+
"bbox": [
|
| 373 |
+
505,
|
| 374 |
+
84,
|
| 375 |
+
884,
|
| 376 |
+
470
|
| 377 |
+
],
|
| 378 |
+
"page_idx": 1
|
| 379 |
+
},
|
| 380 |
+
{
|
| 381 |
+
"type": "text",
|
| 382 |
+
"text": "3 A Note on Identity and Pronouns",
|
| 383 |
+
"text_level": 1,
|
| 384 |
+
"bbox": [
|
| 385 |
+
507,
|
| 386 |
+
478,
|
| 387 |
+
828,
|
| 388 |
+
494
|
| 389 |
+
],
|
| 390 |
+
"page_idx": 1
|
| 391 |
+
},
|
| 392 |
+
{
|
| 393 |
+
"type": "text",
|
| 394 |
+
"text": "This work focuses on the relationship between identity and pronouns. Identity refers to an individual's self-concept, relating to the question of what makes each of us unique (Maalouf, 2000). It can be seen as a two-way process between an individual and others (Grandstrand, 1998), and relates to different dimensions, e.g., one's gender.",
|
| 395 |
+
"bbox": [
|
| 396 |
+
505,
|
| 397 |
+
501,
|
| 398 |
+
882,
|
| 399 |
+
613
|
| 400 |
+
],
|
| 401 |
+
"page_idx": 1
|
| 402 |
+
},
|
| 403 |
+
{
|
| 404 |
+
"type": "text",
|
| 405 |
+
"text": "Gender Identity. Gender identity, as opposed to gender expression or sex, is one's subjective sense of gender (Stryker, 2017). In this work, we conceptualize gender identities beyond the binary notion (man, woman), e.g., non-binary gender, transgender, agender, polygender, etc.",
|
| 406 |
+
"bbox": [
|
| 407 |
+
507,
|
| 408 |
+
621,
|
| 409 |
+
882,
|
| 410 |
+
718
|
| 411 |
+
],
|
| 412 |
+
"page_idx": 1
|
| 413 |
+
},
|
| 414 |
+
{
|
| 415 |
+
"type": "text",
|
| 416 |
+
"text": "Otherkin Identity. Individuals with otherkin identity do not entirely identify as human (Laycock, 2012), e.g., vamp. Miltersen (2016) note that otherkin individuals often identify with nounself pronouns matching their kin.",
|
| 417 |
+
"bbox": [
|
| 418 |
+
507,
|
| 419 |
+
725,
|
| 420 |
+
884,
|
| 421 |
+
804
|
| 422 |
+
],
|
| 423 |
+
"page_idx": 1
|
| 424 |
+
},
|
| 425 |
+
{
|
| 426 |
+
"type": "text",
|
| 427 |
+
"text": "Stryker (2017) highlights the strong relationship between gender identity and pronouns. As Raymond (2016) notes, pronoun choices construct the individual's identity in conversations and the relationship between interlocutors. According to Cao and Daumé III (2021), pronouns are a way of expressing referential gender. Referring to an indi",
|
| 428 |
+
"bbox": [
|
| 429 |
+
505,
|
| 430 |
+
806,
|
| 431 |
+
884,
|
| 432 |
+
919
|
| 433 |
+
],
|
| 434 |
+
"page_idx": 1
|
| 435 |
+
},
|
| 436 |
+
{
|
| 437 |
+
"type": "table",
|
| 438 |
+
"img_path": "images/ece16dea6cf7044958e744dbd45b6d0fdfa64a9614d367a6be7b0a4a7e141831.jpg",
|
| 439 |
+
"table_caption": [],
|
| 440 |
+
"table_footnote": [],
|
| 441 |
+
"table_body": "<table><tr><td>Nom.</td><td>Acc.</td><td>Poss. (dep.)</td><td>Poss. (indep.)</td><td>Reflexive</td></tr><tr><td colspan=\"5\">Gendered Pronouns</td></tr><tr><td>he</td><td>him</td><td>his</td><td>his</td><td>himself</td></tr><tr><td>she</td><td>her</td><td>her</td><td>hers</td><td>herself</td></tr><tr><td colspan=\"5\">Gender-Neutral Pronouns</td></tr><tr><td>they</td><td>them</td><td>their</td><td>theirs</td><td>themselves</td></tr><tr><td colspan=\"5\">Neopronouns</td></tr><tr><td>thon</td><td>thon</td><td>thons</td><td>thons</td><td>thonself</td></tr><tr><td>e</td><td>em</td><td>es</td><td>ems</td><td>emself</td></tr><tr><td>ae</td><td>aer</td><td>aer</td><td>aers</td><td>aerself</td></tr><tr><td>co</td><td>co</td><td>cos</td><td>cos</td><td>coself</td></tr><tr><td>ve/ vi</td><td>ver/ vir</td><td>vis</td><td>vers/ virs</td><td>verself/ virself</td></tr><tr><td>xe</td><td>xem</td><td>xyr</td><td>xyrs</td><td>xemself</td></tr><tr><td>ey</td><td>em</td><td>eir</td><td>eirs</td><td>emself</td></tr><tr><td>e</td><td>em</td><td>eir</td><td>eirs</td><td>emself</td></tr><tr><td>ze</td><td>zir</td><td>zir</td><td>zirs</td><td>zirself</td></tr><tr><td>...</td><td></td><td></td><td></td><td></td></tr><tr><td colspan=\"5\">Nounself Pronouns</td></tr><tr><td>star</td><td>star</td><td>stars</td><td>stars</td><td>starselves</td></tr><tr><td>vam</td><td>vamp</td><td>vamps</td><td>vamps</td><td>vampself</td></tr><tr><td>...</td><td></td><td></td><td></td><td></td></tr><tr><td colspan=\"5\">Emojiself Pronouns</td></tr><tr><td></td><td></td><td>s</td><td>s</td><td>self</td></tr><tr><td>...</td><td></td><td></td><td></td><td></td></tr><tr><td colspan=\"5\">Numberself Pronouns</td></tr><tr><td>0</td><td>0</td><td>0s</td><td>0s</td><td>0self</td></tr><tr><td>1/3</td><td>1/3</td><td>1/3s</td><td>1/3s</td><td>1/3self</td></tr><tr><td>...</td><td></td><td></td><td></td><td></td></tr><tr><td colspan=\"5\">Nameself Pronouns</td></tr><tr><td>John</td><td>John</td><td>Johns</td><td>Johns</td><td>Johnselves</td></tr><tr><td>...</td><td></td><td></td><td></td><td></td></tr></table>",
|
| 442 |
+
"bbox": [
|
| 443 |
+
127,
|
| 444 |
+
80,
|
| 445 |
+
477,
|
| 446 |
+
514
|
| 447 |
+
],
|
| 448 |
+
"page_idx": 2
|
| 449 |
+
},
|
| 450 |
+
{
|
| 451 |
+
"type": "text",
|
| 452 |
+
"text": "Table 1: Non-exhaustive overview of phenomena related to third-person pronoun usage in English.",
|
| 453 |
+
"bbox": [
|
| 454 |
+
112,
|
| 455 |
+
521,
|
| 456 |
+
489,
|
| 457 |
+
552
|
| 458 |
+
],
|
| 459 |
+
"page_idx": 2
|
| 460 |
+
},
|
| 461 |
+
{
|
| 462 |
+
"type": "text",
|
| 463 |
+
"text": "vidual with sets of pronouns they do not identify with, e.g., resulting in misgendering, is considered harmful (Dev et al., 2021).",
|
| 464 |
+
"bbox": [
|
| 465 |
+
112,
|
| 466 |
+
579,
|
| 467 |
+
487,
|
| 468 |
+
627
|
| 469 |
+
],
|
| 470 |
+
"page_idx": 2
|
| 471 |
+
},
|
| 472 |
+
{
|
| 473 |
+
"type": "text",
|
| 474 |
+
"text": "4 Phenomena in Third-person Pronoun-Usage",
|
| 475 |
+
"text_level": 1,
|
| 476 |
+
"bbox": [
|
| 477 |
+
112,
|
| 478 |
+
637,
|
| 479 |
+
394,
|
| 480 |
+
670
|
| 481 |
+
],
|
| 482 |
+
"page_idx": 2
|
| 483 |
+
},
|
| 484 |
+
{
|
| 485 |
+
"type": "text",
|
| 486 |
+
"text": "We describe existing phenomena and analyze their presence in a collection of threads from Reddit.<sup>5</sup>",
|
| 487 |
+
"bbox": [
|
| 488 |
+
112,
|
| 489 |
+
678,
|
| 490 |
+
487,
|
| 491 |
+
709
|
| 492 |
+
],
|
| 493 |
+
"page_idx": 2
|
| 494 |
+
},
|
| 495 |
+
{
|
| 496 |
+
"type": "text",
|
| 497 |
+
"text": "4.1 Existing Phenomena",
|
| 498 |
+
"text_level": 1,
|
| 499 |
+
"bbox": [
|
| 500 |
+
112,
|
| 501 |
+
720,
|
| 502 |
+
321,
|
| 503 |
+
736
|
| 504 |
+
],
|
| 505 |
+
"page_idx": 2
|
| 506 |
+
},
|
| 507 |
+
{
|
| 508 |
+
"type": "text",
|
| 509 |
+
"text": "Overall, individuals can choose $n$ sets of pronouns with $n \\geq 0$ . If $n = 0$ , the individual does not identify with any singular 3rd person pronoun. If $n > 1$ , the individual identifies with more than one set of pronouns, possibly each set reflecting overlapping or non-overlapping aspects of their identity. We provide examples of these sets in Table 1. Note that this list is non-exhaustive and that the described phenomena are non-exclusive.",
|
| 510 |
+
"bbox": [
|
| 511 |
+
112,
|
| 512 |
+
745,
|
| 513 |
+
487,
|
| 514 |
+
889
|
| 515 |
+
],
|
| 516 |
+
"page_idx": 2
|
| 517 |
+
},
|
| 518 |
+
{
|
| 519 |
+
"type": "text",
|
| 520 |
+
"text": "Gendered Pronouns. In English, two sets of standard gendered pronouns are available, he/him/himself and she/her/herself.",
|
| 521 |
+
"bbox": [
|
| 522 |
+
507,
|
| 523 |
+
84,
|
| 524 |
+
884,
|
| 525 |
+
131
|
| 526 |
+
],
|
| 527 |
+
"page_idx": 2
|
| 528 |
+
},
|
| 529 |
+
{
|
| 530 |
+
"type": "text",
|
| 531 |
+
"text": "Gender-Neutral Pronouns. Given the history of generic singular they in English (e.g., Who was at the door? They left a note.), there has been an uptake of singular they by non-binary individuals as a gender-netural pronoun option $^{6}$ (Conrod, 2019; Konyelly and Cowper, 2020). Further, there has been increasing institutional recognition with dictionaries and style guides supporting its use.",
|
| 532 |
+
"bbox": [
|
| 533 |
+
507,
|
| 534 |
+
140,
|
| 535 |
+
884,
|
| 536 |
+
269
|
| 537 |
+
],
|
| 538 |
+
"page_idx": 2
|
| 539 |
+
},
|
| 540 |
+
{
|
| 541 |
+
"type": "text",
|
| 542 |
+
"text": "Neopronouns. As an alternative to the singular they, individuals started creating and sharing novel sets of 3rd person pronouns (McGaughey, 2020). More traditional and rather well-known sets of neopronouns include, e.g., the so-called Spivak pronouns $e/emeɪs$ (used in (Spivak, 1990)) and related variations. During our research, we were able to observe various subcategories of neopronouns, partially described in the academic literature.",
|
| 543 |
+
"bbox": [
|
| 544 |
+
507,
|
| 545 |
+
278,
|
| 546 |
+
884,
|
| 547 |
+
422
|
| 548 |
+
],
|
| 549 |
+
"page_idx": 2
|
| 550 |
+
},
|
| 551 |
+
{
|
| 552 |
+
"type": "text",
|
| 553 |
+
"text": "Nounself Pronouns. According to Miltersen (2016), nounself pronouns are pronouns that are “[...] prototypically transparently derived from a specific word, usually a noun”. Individuals may identify with certain nouns, possibly corresponding to distinct aspects of their identity, e.g., kitten/kittenself, vamp/vampself. The author notes the difficulty of clearly defining nounself pronouns, neopronouns, and other phenomena. The phenomenon is assumed to have first appeared in 2013.",
|
| 554 |
+
"bbox": [
|
| 555 |
+
507,
|
| 556 |
+
430,
|
| 557 |
+
884,
|
| 558 |
+
590
|
| 559 |
+
],
|
| 560 |
+
"page_idx": 2
|
| 561 |
+
},
|
| 562 |
+
{
|
| 563 |
+
"type": "text",
|
| 564 |
+
"text": "Emojiself Pronouns. Similar to nounself pronouns, individuals may identify with sets of emojis, possibly reflecting different aspects of their identity, e.g., self. Emojiself pronouns are intended for written communication. Note, that, at the time of writing this manuscript, there seem to exist no academic description of emojiself pronouns. However, we were able to find evidence of their existence on several social media platforms and wikis, e.g., Tumblr, $^{7}$ MOGAI Wiki, $^{8}$ Twitter, $^{9}$ and Reddit. $^{10}$",
|
| 565 |
+
"bbox": [
|
| 566 |
+
507,
|
| 567 |
+
598,
|
| 568 |
+
884,
|
| 569 |
+
758
|
| 570 |
+
],
|
| 571 |
+
"page_idx": 2
|
| 572 |
+
},
|
| 573 |
+
{
|
| 574 |
+
"type": "page_footnote",
|
| 575 |
+
"text": "$^{6}$ https://gendercensus.com/results/2021-worldwide-summary/ $^{7}$ E.g., https://pronoun-archive.tumblr.com/post/188520170831 $^{8}$ https://mogai.miraheze.org/wiki/Emoj iself; according to the article, the origin of emojiself pronouns is unclear but might date back to 2017 $^{9}$ Example of a user complaining about LinkedIn not allowing for emojiself pronouns in the pronoun field: https://twitter.com/frozenpandaman/status/1412314202119700480/photo/1 $^{10}$ E.g., https://www.reddit.com/r/QueerVexi",
|
| 576 |
+
"bbox": [
|
| 577 |
+
507,
|
| 578 |
+
770,
|
| 579 |
+
885,
|
| 580 |
+
920
|
| 581 |
+
],
|
| 582 |
+
"page_idx": 2
|
| 583 |
+
},
|
| 584 |
+
{
|
| 585 |
+
"type": "footer",
|
| 586 |
+
"text": "<sup>5</sup>https://www.reddit.com",
|
| 587 |
+
"bbox": [
|
| 588 |
+
134,
|
| 589 |
+
903,
|
| 590 |
+
347,
|
| 591 |
+
917
|
| 592 |
+
],
|
| 593 |
+
"page_idx": 2
|
| 594 |
+
},
|
| 595 |
+
{
|
| 596 |
+
"type": "text",
|
| 597 |
+
"text": "Numbers/ pronouns. Another form of neopronouns/ nounself pronouns are numberself pronouns. Analogous to before, we assume that here, the individual identifies or partially identified with a number, e.g., $0 / 0 / 0s / 0s / 0$ self. $^{11}$",
|
| 598 |
+
"bbox": [
|
| 599 |
+
112,
|
| 600 |
+
84,
|
| 601 |
+
489,
|
| 602 |
+
165
|
| 603 |
+
],
|
| 604 |
+
"page_idx": 3
|
| 605 |
+
},
|
| 606 |
+
{
|
| 607 |
+
"type": "text",
|
| 608 |
+
"text": "Nameself Pronouns. Individuals may identify with pronouns build from their name, e.g., John/Johnself, overlapping with nullpronomials.[12]",
|
| 609 |
+
"bbox": [
|
| 610 |
+
112,
|
| 611 |
+
171,
|
| 612 |
+
489,
|
| 613 |
+
219
|
| 614 |
+
],
|
| 615 |
+
"page_idx": 3
|
| 616 |
+
},
|
| 617 |
+
{
|
| 618 |
+
"type": "text",
|
| 619 |
+
"text": "Alternating Pronouns. Given that people can identify with $n > 1$ sets of pronouns, the pronouns they identify with can be either equally identified-with sets, or change potentially depending on the context (mutopronominal). For instance, individuals who are also performer may use stage pronouns. Similarly, genderfluid individual may identify with a certain pronoun at a certain point in time (pronoun fluidity, (Cherry-Reid, 2020)). Some individuals identify with the pronouns of the person who is referring to them (mirroed pronouns). Other individuals use set(s) of auxiliary pronouns, e.g., for situations, in which individuals referring to them have problems with using the most identified-with sets of pronouns (e.g., in the case of emojiself pronouns and oral communication). Note that alternating pronoun sets may be even used in the same sentence for referring to the same individual.[13]",
|
| 620 |
+
"bbox": [
|
| 621 |
+
115,
|
| 622 |
+
225,
|
| 623 |
+
489,
|
| 624 |
+
514
|
| 625 |
+
],
|
| 626 |
+
"page_idx": 3
|
| 627 |
+
},
|
| 628 |
+
{
|
| 629 |
+
"type": "text",
|
| 630 |
+
"text": "No Pronouns. Some individuals do not identify with any pronouns. In this case, some individuals identify most with their name being used to refer to them, nameself pronouns, or avoid pronouns.",
|
| 631 |
+
"bbox": [
|
| 632 |
+
112,
|
| 633 |
+
520,
|
| 634 |
+
487,
|
| 635 |
+
586
|
| 636 |
+
],
|
| 637 |
+
"page_idx": 3
|
| 638 |
+
},
|
| 639 |
+
{
|
| 640 |
+
"type": "text",
|
| 641 |
+
"text": "4.2 Corpus Analysis: Neopronouns in Reddit",
|
| 642 |
+
"text_level": 1,
|
| 643 |
+
"bbox": [
|
| 644 |
+
112,
|
| 645 |
+
596,
|
| 646 |
+
482,
|
| 647 |
+
613
|
| 648 |
+
],
|
| 649 |
+
"page_idx": 3
|
| 650 |
+
},
|
| 651 |
+
{
|
| 652 |
+
"type": "text",
|
| 653 |
+
"text": "Setup. We conduct an additional quantitative analysis for the presence of neopronouns in Reddit. To this end, we use Reddit threads created between 2010 and 2021, cleaned by previous work and provided through Huggingface Datasets (127,445,911 lines).<sup>14</sup> As we are interested in capturing novel pronouns and as the list of possible pronouns is indefinite, we proxy neopronouns via the suffixes self and selves indicating the reflexive case and",
|
| 654 |
+
"bbox": [
|
| 655 |
+
112,
|
| 656 |
+
619,
|
| 657 |
+
487,
|
| 658 |
+
764
|
| 659 |
+
],
|
| 660 |
+
"page_idx": 3
|
| 661 |
+
},
|
| 662 |
+
{
|
| 663 |
+
"type": "text",
|
| 664 |
+
"text": "llology/comments/p09nek/i_made_a_flag_f \nor_the_emojiself_pronoun_set/ 11https://pronoun-provider.tumblr.com/ \npost/148452374817/i-think-numbers-as-pro \nnouns-would-be-pretty-cool 12https://pronoun.fandom.com/wiki/Null \npronominal 13https://www.reddit.com/r/NonBinary/c \ncomments/jasv5r/alternating Pronouns_in_s \namesentence/ 14https://huggingface.co/datasets/sent \nence-transformers/redit-title-body",
|
| 665 |
+
"bbox": [
|
| 666 |
+
112,
|
| 667 |
+
770,
|
| 668 |
+
487,
|
| 669 |
+
917
|
| 670 |
+
],
|
| 671 |
+
"page_idx": 3
|
| 672 |
+
},
|
| 673 |
+
{
|
| 674 |
+
"type": "image",
|
| 675 |
+
"img_path": "images/ebf3da34a1b2ca1cb4e109a398ce37a2a1754c69a8f4efa894e09cd4b9901b92.jpg",
|
| 676 |
+
"image_caption": [
|
| 677 |
+
"Figure 1: Token ranks (log-scale) and rank counts of the tokens returned against our reflexive regular expression pattern from Reddit with example annotations."
|
| 678 |
+
],
|
| 679 |
+
"image_footnote": [],
|
| 680 |
+
"bbox": [
|
| 681 |
+
524,
|
| 682 |
+
112,
|
| 683 |
+
865,
|
| 684 |
+
336
|
| 685 |
+
],
|
| 686 |
+
"page_idx": 3
|
| 687 |
+
},
|
| 688 |
+
{
|
| 689 |
+
"type": "text",
|
| 690 |
+
"text": "match them through a regular expression. Additionally, we filter out non-3rd person pronouns (e.g., yourself, ourselves, plural themselves) as well as common variations of these (e.g., urself) and other common non-pronoun expressions we found in the data (e.g., do-it-yourself). This process leaves us with a total of 9,075 unique tokens with in total 74,768 textual mentions.",
|
| 691 |
+
"bbox": [
|
| 692 |
+
507,
|
| 693 |
+
426,
|
| 694 |
+
882,
|
| 695 |
+
552
|
| 696 |
+
],
|
| 697 |
+
"page_idx": 3
|
| 698 |
+
},
|
| 699 |
+
{
|
| 700 |
+
"type": "text",
|
| 701 |
+
"text": "Results. An initial manual analysis reveals that, unsurprisingly, many of the matches are false positives, i.e., not real neopronouns (e.g., non-self, a common concept in Buddhist philosophy, and to myself, a common spelling mistake of to myself). However, our method still finds relevant cases. Examples are depicted in Table 2. Many discussions in which we detect nounself pronouns center on the phenomena themselves, including, e.g., individuals stating that they are interested in using a specific pronoun or individuals stating that they refuse to acknowledge the phenomenon. Some discussions involve people reporting on personal experiences and problems and seeking advice. To obtain a quantitative view of the results, we plot the ranks (i.e., number of occurrences of a token) against their number of tokens (Figure 1). The result is a highly skewed Zipf's distribution: while the highest ranks appear only once (e.g., themself with 24,697 mentions), some tokens appear only a couple of times (e.g., the neopronoun xemself with 24 mentions), and the vast majority appears only once (e.g., many",
|
| 702 |
+
"bbox": [
|
| 703 |
+
507,
|
| 704 |
+
565,
|
| 705 |
+
884,
|
| 706 |
+
919
|
| 707 |
+
],
|
| 708 |
+
"page_idx": 3
|
| 709 |
+
},
|
| 710 |
+
{
|
| 711 |
+
"type": "table",
|
| 712 |
+
"img_path": "images/f5731ce9675bb2ef96b352cd7bbd9bd2f41bc04e5584612ab68da6decadd3e28.jpg",
|
| 713 |
+
"table_caption": [],
|
| 714 |
+
"table_footnote": [],
|
| 715 |
+
"table_body": "<table><tr><td>Match</td><td>Subreddit</td><td>Thread Title</td><td>Thread Excerpt</td></tr><tr><td rowspan=\"2\">meowself</td><td>monsterhunterage</td><td>Fureedom Mewnite can die in my litterbox.</td><td>I don’t like this game. But I still want meowself to play it, meow. Cause it’s fun, even though I hate it.</td></tr><tr><td>offmychest</td><td>Neopronouns are going too far.</td><td>I get some pronouns like ze/ziR, xe/xem, etc. I agree with those. But why are people using ghost/ghostself and meow/meowself? That’s really utter bullshit.</td></tr><tr><td rowspan=\"2\">bunself</td><td>TiADiscussion</td><td>I am genderfluid, pansexual, and mentally ill. I have a lot of SJW friends. AMA!</td><td>They/them pronouns are coolest with me, but I won’t be angry if you use he or she. You can use bun/buns/bunself, if you are feeling special. (That was a joke.)</td></tr><tr><td>rpdkcirclejerk</td><td>Xi am so proud to announce that the new word of the year is....</td><td>-Cinnagender- which means you identify with our beloved and innocent cinnamon buns. The pronoun set is cinne/cinns/cinnself or alternatively bun/buns/bunself i am so happy to be a member of a community that ignores the oppressive gender binary, which is a social construct, i.e., it is not real</td></tr><tr><td rowspan=\"2\">zirself</td><td>mypartneristrans</td><td>Ran into our first roadblock</td><td>I asked what I could do to help zir lowering the feeling of disphoria, and ze said zd maybe feel better about zirself if zd drink a tea.</td></tr><tr><td>Negareddit</td><td>No, Redditors. If you’re a horrible person online, you’re probably a horrible person offline too.</td><td>Hello folks. Omg. I think this person is about to kill zirself! (emphasis on "zirself". COMEDIC GENIUS)</td></tr></table>",
|
| 716 |
+
"bbox": [
|
| 717 |
+
114,
|
| 718 |
+
80,
|
| 719 |
+
884,
|
| 720 |
+
379
|
| 721 |
+
],
|
| 722 |
+
"page_idx": 4
|
| 723 |
+
},
|
| 724 |
+
{
|
| 725 |
+
"type": "text",
|
| 726 |
+
"text": "Table 2: Example neopronouns and corresponding excerpts from Reddit retrieved via our heuristic method. We slightly modified the excerpts to lower searchability and increase the privacy of the users.",
|
| 727 |
+
"bbox": [
|
| 728 |
+
112,
|
| 729 |
+
387,
|
| 730 |
+
882,
|
| 731 |
+
417
|
| 732 |
+
],
|
| 733 |
+
"page_idx": 4
|
| 734 |
+
},
|
| 735 |
+
{
|
| 736 |
+
"type": "text",
|
| 737 |
+
"text": "nounself pronouns such as peachself).",
|
| 738 |
+
"bbox": [
|
| 739 |
+
112,
|
| 740 |
+
441,
|
| 741 |
+
400,
|
| 742 |
+
458
|
| 743 |
+
],
|
| 744 |
+
"page_idx": 4
|
| 745 |
+
},
|
| 746 |
+
{
|
| 747 |
+
"type": "text",
|
| 748 |
+
"text": "5 How Can and Should We Model Pronouns?",
|
| 749 |
+
"text_level": 1,
|
| 750 |
+
"bbox": [
|
| 751 |
+
112,
|
| 752 |
+
464,
|
| 753 |
+
426,
|
| 754 |
+
495
|
| 755 |
+
],
|
| 756 |
+
"page_idx": 4
|
| 757 |
+
},
|
| 758 |
+
{
|
| 759 |
+
"type": "text",
|
| 760 |
+
"text": "We devise five desiderata based on our previous observations, personal experiences, and expert knowledge from interactions with LGBTQIA+ associates. Additionally, we collect informal feedback from individuals who use gender-neutral pronouns. We then assess how well classic and novel pronoun modeling paradigms fulfil the five criteria.",
|
| 761 |
+
"bbox": [
|
| 762 |
+
112,
|
| 763 |
+
504,
|
| 764 |
+
489,
|
| 765 |
+
617
|
| 766 |
+
],
|
| 767 |
+
"page_idx": 4
|
| 768 |
+
},
|
| 769 |
+
{
|
| 770 |
+
"type": "text",
|
| 771 |
+
"text": "5.1 Desiderata",
|
| 772 |
+
"text_level": 1,
|
| 773 |
+
"bbox": [
|
| 774 |
+
112,
|
| 775 |
+
624,
|
| 776 |
+
247,
|
| 777 |
+
639
|
| 778 |
+
],
|
| 779 |
+
"page_idx": 4
|
| 780 |
+
},
|
| 781 |
+
{
|
| 782 |
+
"type": "list",
|
| 783 |
+
"sub_type": "text",
|
| 784 |
+
"list_items": [
|
| 785 |
+
"D1. Refrain from assuming an individual's identity and pronouns. A model should not assume an individual's identity, e.g., gender, or pronouns based on, e.g., statistical cues about an individual's name, also not in a binary gender setup. Only because the name John typically appears together with the pronoun he, the model should never assume that a person with the name John identifies as a man and that every John uses the pronoun he.",
|
| 786 |
+
"D2. Allow for the existing sets of pronouns as well as for neopronouns. A model should be able to handle not only the existing set of \"standard\" pronouns in a language but also other existing pronouns, e.g., neopronouns.",
|
| 787 |
+
"D3. Allow for novel pronouns at any point in time. On top of D2, a model should allow for"
|
| 788 |
+
],
|
| 789 |
+
"bbox": [
|
| 790 |
+
112,
|
| 791 |
+
646,
|
| 792 |
+
489,
|
| 793 |
+
917
|
| 794 |
+
],
|
| 795 |
+
"page_idx": 4
|
| 796 |
+
},
|
| 797 |
+
{
|
| 798 |
+
"type": "text",
|
| 799 |
+
"text": "novel, i.e., unseen, pronouns to appear at any point in time. This condition is necessary to handle the fact that neopronouns are not a fixed set, but evolving, and because related phenomena (emojiself and nameself pronouns) turn pronouns from a closed to an open class part of speech.",
|
| 800 |
+
"bbox": [
|
| 801 |
+
507,
|
| 802 |
+
441,
|
| 803 |
+
884,
|
| 804 |
+
539
|
| 805 |
+
],
|
| 806 |
+
"page_idx": 4
|
| 807 |
+
},
|
| 808 |
+
{
|
| 809 |
+
"type": "list",
|
| 810 |
+
"sub_type": "text",
|
| 811 |
+
"list_items": [
|
| 812 |
+
"D4. Allow for multiple, alternating, and changing pronouns. A model should not assume that the pronoun set for an individuum at time $t$ will be the same as at time $t - 1$ . Even within the same sequence, pronoun sets might change.",
|
| 813 |
+
"D5. Provide an option to set up individuals' sets of pronouns. While most NLP models are trained offline and do not interact with the user, some are designed to interact with individuals, e.g., dialog systems. In this context, setting up individuals' sets of pronouns can help avoid harmful interactions (depending on the concrete sociotechnical deployment scenario)."
|
| 814 |
+
],
|
| 815 |
+
"bbox": [
|
| 816 |
+
507,
|
| 817 |
+
546,
|
| 818 |
+
885,
|
| 819 |
+
765
|
| 820 |
+
],
|
| 821 |
+
"page_idx": 4
|
| 822 |
+
},
|
| 823 |
+
{
|
| 824 |
+
"type": "text",
|
| 825 |
+
"text": "5.2 Modeling Paradigms",
|
| 826 |
+
"text_level": 1,
|
| 827 |
+
"bbox": [
|
| 828 |
+
507,
|
| 829 |
+
772,
|
| 830 |
+
721,
|
| 831 |
+
790
|
| 832 |
+
],
|
| 833 |
+
"page_idx": 4
|
| 834 |
+
},
|
| 835 |
+
{
|
| 836 |
+
"type": "text",
|
| 837 |
+
"text": "We compare four general modeling paradigms with D1-D5 in Table 3.",
|
| 838 |
+
"bbox": [
|
| 839 |
+
507,
|
| 840 |
+
797,
|
| 841 |
+
882,
|
| 842 |
+
827
|
| 843 |
+
],
|
| 844 |
+
"page_idx": 4
|
| 845 |
+
},
|
| 846 |
+
{
|
| 847 |
+
"type": "text",
|
| 848 |
+
"text": "Classic Statistical Modeling. Traditionally, pronouns have been treated as a closed word class. Generally, statistical models do not make assumptions about this (except if the vocabulary is manually curated). However, in models exploiting cooc",
|
| 849 |
+
"bbox": [
|
| 850 |
+
507,
|
| 851 |
+
838,
|
| 852 |
+
885,
|
| 853 |
+
919
|
| 854 |
+
],
|
| 855 |
+
"page_idx": 4
|
| 856 |
+
},
|
| 857 |
+
{
|
| 858 |
+
"type": "table",
|
| 859 |
+
"img_path": "images/727a9a6d4f85f7833ab713778e1ebec7afe145510e4cbd504fc392f26489df56.jpg",
|
| 860 |
+
"table_caption": [],
|
| 861 |
+
"table_footnote": [],
|
| 862 |
+
"table_body": "<table><tr><td>Paradigm</td><td>D1</td><td>D2</td><td>D3</td><td>D4</td><td>D5</td></tr><tr><td>Classic</td><td>X</td><td>X</td><td>X</td><td>X</td><td>X</td></tr><tr><td>Bucketing</td><td>X</td><td>✓</td><td>✓</td><td>?</td><td>X</td></tr><tr><td>Delexicalization</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>X</td></tr><tr><td>Post-hoc</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr></table>",
|
| 863 |
+
"bbox": [
|
| 864 |
+
117,
|
| 865 |
+
80,
|
| 866 |
+
485,
|
| 867 |
+
158
|
| 868 |
+
],
|
| 869 |
+
"page_idx": 5
|
| 870 |
+
},
|
| 871 |
+
{
|
| 872 |
+
"type": "text",
|
| 873 |
+
"text": "curences, e.g., via word embeddings (GloVe (Pennington et al., 2014)) or deep language models (BERT (Devlin et al., 2019), RoBERTa (Liu et al., 2019)), the models will likely misrepresent underrepresented pronoun-related phenomena. Dev et al. (2021) provided an initial insight by showing that singular they and the neopronouns $xe$ and $ze$ do not have meaningful vectors in GloVe and BERT.",
|
| 874 |
+
"bbox": [
|
| 875 |
+
112,
|
| 876 |
+
222,
|
| 877 |
+
489,
|
| 878 |
+
351
|
| 879 |
+
],
|
| 880 |
+
"page_idx": 5
|
| 881 |
+
},
|
| 882 |
+
{
|
| 883 |
+
"type": "text",
|
| 884 |
+
"text": "Bucketing. One option, previously discussed by Dev et al. (2021), is to apply bucketing, i.e., to decide on a fixed number of majority classes, e.g., male pronouns, female pronouns, and one or multiple classes for the \"rest of the pronouns\", e.g., other. The advantage of this approach is that it can map existing and novel pronouns to the other class. However, it still makes identity assumptions – and due to unequal representations of main and other classes, it will inevitably lead to discrimination.",
|
| 885 |
+
"bbox": [
|
| 886 |
+
112,
|
| 887 |
+
357,
|
| 888 |
+
489,
|
| 889 |
+
517
|
| 890 |
+
],
|
| 891 |
+
"page_idx": 5
|
| 892 |
+
},
|
| 893 |
+
{
|
| 894 |
+
"type": "text",
|
| 895 |
+
"text": "No Modeling – Delexicalization. Given that the classic approach and bucketing both lead to unfair treatment of underrepresented groups, the alternative is to explicitly not model pronouns in their surface forms. This process, commonly named delexicalization, has proved helpful for other tasks where models capture misleading lexical information, e.g., fact verification (e.g., Suntwal et al., 2019), or resource-lean scenarios, e.g., cross-lingual parsing (e.g., McDonald et al., 2011). In this case, the model is forced to not rely on spurious lexical cues related to gender, e.g., that John occurs most often with the pronoun he. Instead, the model learns a single representation for all pronouns and relies on other task-related conceptual and commonsense information for disambiguation.",
|
| 896 |
+
"bbox": [
|
| 897 |
+
112,
|
| 898 |
+
523,
|
| 899 |
+
489,
|
| 900 |
+
781
|
| 901 |
+
],
|
| 902 |
+
"page_idx": 5
|
| 903 |
+
},
|
| 904 |
+
{
|
| 905 |
+
"type": "text",
|
| 906 |
+
"text": "Post-hoc Injection of Modeling Information/ Modeling at Test Time. For human-to-human interactions, several LGBTQIA+ guides recommend to (1) first try generic pronouns (e.g., singular they), and (2) switch to other sets of pronouns once the conversation partner communicates them. For",
|
| 907 |
+
"bbox": [
|
| 908 |
+
112,
|
| 909 |
+
787,
|
| 910 |
+
489,
|
| 911 |
+
883
|
| 912 |
+
],
|
| 913 |
+
"page_idx": 5
|
| 914 |
+
},
|
| 915 |
+
{
|
| 916 |
+
"type": "table",
|
| 917 |
+
"img_path": "images/a20f3064ae61b76ff63a7067f2333776c75ee2acd4b18b8602d4bd314bef67af.jpg",
|
| 918 |
+
"table_caption": [
|
| 919 |
+
"Table 3: Modeling paradigms and how they allow for fulfilling the desiderata D1-D5."
|
| 920 |
+
],
|
| 921 |
+
"table_footnote": [],
|
| 922 |
+
"table_body": "<table><tr><td></td><td>Train</td><td>Dev</td><td>Test</td><td>Total</td></tr><tr><td>PRP</td><td>64,476</td><td>7,881</td><td>8,067</td><td>80,424</td></tr><tr><td>PRP$</td><td>14,535</td><td>1,783</td><td>1,935</td><td>18,253</td></tr><tr><td>Total</td><td>79,011</td><td>9,664</td><td>10,002</td><td>98,677</td></tr></table>",
|
| 923 |
+
"bbox": [
|
| 924 |
+
512,
|
| 925 |
+
80,
|
| 926 |
+
882,
|
| 927 |
+
154
|
| 928 |
+
],
|
| 929 |
+
"page_idx": 5
|
| 930 |
+
},
|
| 931 |
+
{
|
| 932 |
+
"type": "text",
|
| 933 |
+
"text": "Table 4: Number of pronoun replacements in the training, development, and test portion of OntoNotes 5.0 for PRP and PRPS, respectively.",
|
| 934 |
+
"bbox": [
|
| 935 |
+
507,
|
| 936 |
+
162,
|
| 937 |
+
884,
|
| 938 |
+
206
|
| 939 |
+
],
|
| 940 |
+
"page_idx": 5
|
| 941 |
+
},
|
| 942 |
+
{
|
| 943 |
+
"type": "text",
|
| 944 |
+
"text": "uncommon or novel pronouns, several web pages have explicitly been set up for practising how to use them. $^{16}$ In this work, we propose that NLP systems should work similarly – if technically possible and depending on the concrete sociotechnical deployment scenario. To this end, we can use intermediate training procedures (e.g., Hung et al., 2021) for pronoun-related model refinement. E.g., we can use synthetic data created through similar procedures as the ones employed on these websites. Another option is only model pronouns at test time, e.g., through simple replacement procedures.",
|
| 945 |
+
"bbox": [
|
| 946 |
+
507,
|
| 947 |
+
231,
|
| 948 |
+
885,
|
| 949 |
+
425
|
| 950 |
+
],
|
| 951 |
+
"page_idx": 5
|
| 952 |
+
},
|
| 953 |
+
{
|
| 954 |
+
"type": "text",
|
| 955 |
+
"text": "6 How Much Would We Loose?",
|
| 956 |
+
"text_level": 1,
|
| 957 |
+
"bbox": [
|
| 958 |
+
507,
|
| 959 |
+
430,
|
| 960 |
+
801,
|
| 961 |
+
445
|
| 962 |
+
],
|
| 963 |
+
"page_idx": 5
|
| 964 |
+
},
|
| 965 |
+
{
|
| 966 |
+
"type": "text",
|
| 967 |
+
"text": "In §5.2, we discussed delexicalization, i.e., not modeling lexical surface forms of pronouns, as one way to counter exclusion in statistical modeling and bucketing. However, a possible counterargument against this approach is that omitting the surface forms will lead to poor model performance on pronoun-related tasks. We experimentally quantify the loss from (fairer) delexicalization compared to statistical modeling in co-reference resolution.",
|
| 968 |
+
"bbox": [
|
| 969 |
+
507,
|
| 970 |
+
453,
|
| 971 |
+
885,
|
| 972 |
+
598
|
| 973 |
+
],
|
| 974 |
+
"page_idx": 5
|
| 975 |
+
},
|
| 976 |
+
{
|
| 977 |
+
"type": "text",
|
| 978 |
+
"text": "6.1 Experimental Setup",
|
| 979 |
+
"text_level": 1,
|
| 980 |
+
"bbox": [
|
| 981 |
+
507,
|
| 982 |
+
604,
|
| 983 |
+
714,
|
| 984 |
+
620
|
| 985 |
+
],
|
| 986 |
+
"page_idx": 5
|
| 987 |
+
},
|
| 988 |
+
{
|
| 989 |
+
"type": "text",
|
| 990 |
+
"text": "Task, Dataset, and Measures. We resort to co-reference resolution, a task where knowledge about pronouns and related gender identity assumptions play an important role. We use the English portion of the OntoNotes 5.0 dataset (Weischedel et al., 2012), which consists of texts annotated with co-reference information across five domains (news, conversational telephone speech, weblogs, usenet newsgroups, broadcast, and talk shows). We prepare three variants: (i) the first version consists of the plain original data; (ii) in the second variant, we replace all pronouns in the test set with the respective part-of-speech token, according to the Penn Treebank Project (Santorini, 1990), i.e., PRP for personal pronouns, and PRP$ for possessive pronouns. Finally, we provide a version (iii) in which",
|
| 991 |
+
"bbox": [
|
| 992 |
+
507,
|
| 993 |
+
626,
|
| 994 |
+
885,
|
| 995 |
+
883
|
| 996 |
+
],
|
| 997 |
+
"page_idx": 5
|
| 998 |
+
},
|
| 999 |
+
{
|
| 1000 |
+
"type": "page_footnote",
|
| 1001 |
+
"text": "15 In fact, accounting for novel pronouns and novel ways of using pronouns is a resource-lean scenario.",
|
| 1002 |
+
"bbox": [
|
| 1003 |
+
112,
|
| 1004 |
+
891,
|
| 1005 |
+
489,
|
| 1006 |
+
917
|
| 1007 |
+
],
|
| 1008 |
+
"page_idx": 5
|
| 1009 |
+
},
|
| 1010 |
+
{
|
| 1011 |
+
"type": "page_footnote",
|
| 1012 |
+
"text": "16E.g., https://www.pRACTicewithpronouns.com/#/?_k=66emp7",
|
| 1013 |
+
"bbox": [
|
| 1014 |
+
507,
|
| 1015 |
+
891,
|
| 1016 |
+
882,
|
| 1017 |
+
917
|
| 1018 |
+
],
|
| 1019 |
+
"page_idx": 5
|
| 1020 |
+
},
|
| 1021 |
+
{
|
| 1022 |
+
"type": "table",
|
| 1023 |
+
"img_path": "images/e40e95db026da65cc5c62427d93d46df15a30ed2c89704725b89965959010c7d.jpg",
|
| 1024 |
+
"table_caption": [],
|
| 1025 |
+
"table_footnote": [],
|
| 1026 |
+
"table_body": "<table><tr><td rowspan=\"3\">(Dobrovolskii, 2021)</td><td colspan=\"3\">MUC</td><td colspan=\"3\">CEAFφ4</td><td colspan=\"3\">B3</td><td colspan=\"3\">AVG</td></tr><tr><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td></tr><tr><td>84.9</td><td>87.9</td><td>86.3</td><td>76.1</td><td>77.1</td><td>76.6</td><td>77.4</td><td>82.6</td><td>79.9</td><td>-</td><td>-</td><td>81.0</td></tr><tr><td>- reproduction</td><td>84.7</td><td>87.5</td><td>86.1</td><td>75.6</td><td>76.7</td><td>76.1</td><td>77.2</td><td>82.0</td><td>79.5</td><td>79.2</td><td>82.1</td><td>80.6</td></tr><tr><td>- replace test set</td><td>69.7</td><td>70.7</td><td>70.2</td><td>63.2</td><td>49.1</td><td>55.2</td><td>50.1</td><td>56.1</td><td>52.9</td><td>61.0</td><td>58.6</td><td>59.4</td></tr><tr><td>Δrepl.test-repr.</td><td>-15.0</td><td>-16.8</td><td>-15.9</td><td>-12.4</td><td>-27.6</td><td>-20.9</td><td>-27.1</td><td>-25.9</td><td>-26.6</td><td>-18.2</td><td>-23.5</td><td>-21.2</td></tr><tr><td>- replace all</td><td>81.6</td><td>83.1</td><td>82.4</td><td>73.08</td><td>72.9</td><td>73.0</td><td>72.3</td><td>75.3</td><td>73.7</td><td>75.7</td><td>77.1</td><td>76.4</td></tr><tr><td>Δrepl.all-repr.</td><td>-3.1</td><td>-4.4</td><td>-3.7</td><td>-2.5</td><td>-3.8</td><td>-3.1</td><td>-4.9</td><td>-6.7</td><td>-5.8</td><td>-3.5</td><td>-5.0</td><td>-4.2</td></tr></table>",
|
| 1027 |
+
"bbox": [
|
| 1028 |
+
115,
|
| 1029 |
+
80,
|
| 1030 |
+
884,
|
| 1031 |
+
200
|
| 1032 |
+
],
|
| 1033 |
+
"page_idx": 6
|
| 1034 |
+
},
|
| 1035 |
+
{
|
| 1036 |
+
"type": "text",
|
| 1037 |
+
"text": "Table 5: Results of the delexicalization experiment. We report the results of the RoBERTa large-based word-level co-reference resolution model as reported by Dobrovolskii (2021), our reproduction, as well as variants trained and/ or tested on versions of the data set in which we replace the pronouns. All scores were produced using the official CoNLL-2012 scorer. We report precision (P), recall (R), and F1-score (F1) for MUC, $\\mathrm{CEAF}_{\\phi 4}$ , and $\\mathbf{B}^3$ respectively, as well as the averages (AVG). The rows highlighted in gray indicate the obtained losses.",
|
| 1038 |
+
"bbox": [
|
| 1039 |
+
110,
|
| 1040 |
+
209,
|
| 1041 |
+
884,
|
| 1042 |
+
284
|
| 1043 |
+
],
|
| 1044 |
+
"page_idx": 6
|
| 1045 |
+
},
|
| 1046 |
+
{
|
| 1047 |
+
"type": "text",
|
| 1048 |
+
"text": "we replace pronouns in the train, dev, and test splits. Note that our strategy is pessimistic, as we also replace non-3rd person pronouns, i.e., $I$ , you, ourselves, etc. We show the number of replacements in Table 4. For scoring, we use the official CoNLL2012 scorer (Pradhan et al., 2012). We report the results in terms of MUC (Vilain et al., 1995), $\\mathrm{B}^3$ (Bagga and Baldwin, 1998), and $\\mathrm{CEAF}_{\\phi 4}$ (Luo, 2005) precision, recall, and F1-measure, as well as the averages across these scores.",
|
| 1049 |
+
"bbox": [
|
| 1050 |
+
112,
|
| 1051 |
+
307,
|
| 1052 |
+
489,
|
| 1053 |
+
467
|
| 1054 |
+
],
|
| 1055 |
+
"page_idx": 6
|
| 1056 |
+
},
|
| 1057 |
+
{
|
| 1058 |
+
"type": "text",
|
| 1059 |
+
"text": "Models and Baselines. We want to obtain an intuition about the tradeoffs in the delexicalization setup, not to outperform previous results. For this reason, we resort to the recently proposed word-level co-reference model (Dobrovolskii, 2021), a highly efficient model competitive with the state-of-the-art. The model consists of a separate co-reference resolution module and a separate span extraction module. In an initial step, we compute token representations from a Transformer (Vaswani et al., 2017)-based encoder through aggregation of initial representations via learnable weights. In a next step, we compute co-reference relationships. To this end, the token representations are passed into an antecedent pruning procedure based on a bilinear scoring function for obtaining $k$ antecedent candidates for each token through coarse-grained scoring. Then an additional feed-forward neural network computes finer-grained scores. The final antecedent score is the sum of these two scores. We select the candidate with the highest score as the antecedent. Negative scores indicate no antecedent for a token. Tokens assumed to be part of a co-reference relationship are passed into the span extraction module. The module consists of an additional feed-forward network, which is followed by convolutions with two output channels (for start",
|
| 1060 |
+
"bbox": [
|
| 1061 |
+
110,
|
| 1062 |
+
483,
|
| 1063 |
+
489,
|
| 1064 |
+
920
|
| 1065 |
+
],
|
| 1066 |
+
"page_idx": 6
|
| 1067 |
+
},
|
| 1068 |
+
{
|
| 1069 |
+
"type": "text",
|
| 1070 |
+
"text": "and end scores). For further details see the original work. Our baseline is the model trained and evaluated on the original OntoNotes portions (reproduction). We compare with the evaluation of this model on the pronoun-replaced test set (replace test set) and a version of this model trained on the replaced training set and evaluated on the replaced test set, respectively (replace all).",
|
| 1071 |
+
"bbox": [
|
| 1072 |
+
505,
|
| 1073 |
+
307,
|
| 1074 |
+
884,
|
| 1075 |
+
437
|
| 1076 |
+
],
|
| 1077 |
+
"page_idx": 6
|
| 1078 |
+
},
|
| 1079 |
+
{
|
| 1080 |
+
"type": "text",
|
| 1081 |
+
"text": "Model Configuration, Training, and Optimization. We choose RoBERTa large (Liu et al., 2019) $^{17}$ as the base encoder and fix all other hyperparameters to the ones provided in the original implementation of Dobrovolskii (2021): the window size is set to 512 tokens, dropout rate to 0.3, the learning rate of the encoder is set to $1 \\cdot 10^{-5}$ and of the task-specific layers to $3 \\cdot 10^{-4}$ , respectively. We train the co-reference module with a combination of the negative log marginal likelihood and binary cross-entropy as an additional regularization factor (weight set to 0.5). The span extraction module is trained using cross-entropy loss. We optimize the sum of the two losses jointly with Adam (Kingma and Ba, 2015) for 20 epochs and apply early stopping based on validation set performance (word-level F1) with a patience of 3 epochs.",
|
| 1082 |
+
"bbox": [
|
| 1083 |
+
507,
|
| 1084 |
+
443,
|
| 1085 |
+
885,
|
| 1086 |
+
718
|
| 1087 |
+
],
|
| 1088 |
+
"page_idx": 6
|
| 1089 |
+
},
|
| 1090 |
+
{
|
| 1091 |
+
"type": "text",
|
| 1092 |
+
"text": "6.2 Results and Discussion",
|
| 1093 |
+
"text_level": 1,
|
| 1094 |
+
"bbox": [
|
| 1095 |
+
507,
|
| 1096 |
+
725,
|
| 1097 |
+
737,
|
| 1098 |
+
739
|
| 1099 |
+
],
|
| 1100 |
+
"page_idx": 6
|
| 1101 |
+
},
|
| 1102 |
+
{
|
| 1103 |
+
"type": "text",
|
| 1104 |
+
"text": "We show the results in Table 5. We are roughly able to reproduce the results reported by (Dobrovolskii, 2021), confirming the effectiveness of their approach and the validity of our experimental setup. When we replace pronouns in the test set, the results drop massively, with up to $-27.6$ percentage points $\\mathrm{CEAF}_{\\phi 4}$ recall. On average, the results drop by 21.2 percentage points in F1-measure. This decrease demonstrates the heavy reliance of",
|
| 1105 |
+
"bbox": [
|
| 1106 |
+
507,
|
| 1107 |
+
747,
|
| 1108 |
+
884,
|
| 1109 |
+
892
|
| 1110 |
+
],
|
| 1111 |
+
"page_idx": 6
|
| 1112 |
+
},
|
| 1113 |
+
{
|
| 1114 |
+
"type": "page_footnote",
|
| 1115 |
+
"text": "<sup>17</sup>https://huggingface.co/roberta-large",
|
| 1116 |
+
"bbox": [
|
| 1117 |
+
526,
|
| 1118 |
+
903,
|
| 1119 |
+
867,
|
| 1120 |
+
919
|
| 1121 |
+
],
|
| 1122 |
+
"page_idx": 6
|
| 1123 |
+
},
|
| 1124 |
+
{
|
| 1125 |
+
"type": "text",
|
| 1126 |
+
"text": "this model on the lexical surface forms of the pronoun sets seen in the training. However, when we replace the pronouns in the training portion of OntoNotes with the special tokens, we can mitigate these losses by a large margin (losses up to $-5.8$ B $^3$ F1, and on average $-4.2$ F1). These results are highly encouraging, given that a) we replaced all pronouns, including non-third person pronouns, and b) the model has not been trained on these placeholders in the pretraining phase. The model can not rely on possibly discriminating correlations between names or occupations and pronoun sets and will represent neopronouns the same way as it will represent established pronoun sets. So a delexicalization approach can increase fairness in co-reference resolution and retain high system performance, as we can expect even smaller drops from a more careful selection of replacements.",
|
| 1127 |
+
"bbox": [
|
| 1128 |
+
115,
|
| 1129 |
+
84,
|
| 1130 |
+
485,
|
| 1131 |
+
372
|
| 1132 |
+
],
|
| 1133 |
+
"page_idx": 7
|
| 1134 |
+
},
|
| 1135 |
+
{
|
| 1136 |
+
"type": "text",
|
| 1137 |
+
"text": "7 Conclusion",
|
| 1138 |
+
"text_level": 1,
|
| 1139 |
+
"bbox": [
|
| 1140 |
+
115,
|
| 1141 |
+
401,
|
| 1142 |
+
243,
|
| 1143 |
+
414
|
| 1144 |
+
],
|
| 1145 |
+
"page_idx": 7
|
| 1146 |
+
},
|
| 1147 |
+
{
|
| 1148 |
+
"type": "text",
|
| 1149 |
+
"text": "This work provides an initial overview of the plethora of current phenomena in 3rd person pronoun usage in the English language. For practical and ethical reasons, the NLP community should acknowledge the broad spectrum of possible identities and the respective manifestations in written and oral communication. Especially since many emerging phenomena are still under-researched, and even while it remains to be seen if and how these become more established ways of referring to individuals. Language is consistently evolving, and NLP researchers and practitioners should account for this to provide genuinely inclusive systems. Notably, pronouns, traditionally handled as a close class of words, currently seem to function closer to an open class. Based on our observations, which originate from literature research, research in non-academic publicly available writing, as well as a corpus study, we have defined a series of five desiderata and applied those to the discussion of existing and novel modeling paradigms. In this context, we raised the questions when and how to model pronouns and whether and how to include users in these decisions. We consider this document an initial and living draft and hope to start a broader discussion on the topic. Our study can inform future NLP research and beyond and serve as a starting point for creating novel modeling procedures. In the future, we will look at pronoun-related issues within concrete tasks and in multilingual scenarios.",
|
| 1150 |
+
"bbox": [
|
| 1151 |
+
115,
|
| 1152 |
+
437,
|
| 1153 |
+
485,
|
| 1154 |
+
917
|
| 1155 |
+
],
|
| 1156 |
+
"page_idx": 7
|
| 1157 |
+
},
|
| 1158 |
+
{
|
| 1159 |
+
"type": "text",
|
| 1160 |
+
"text": "Acknowledgments",
|
| 1161 |
+
"text_level": 1,
|
| 1162 |
+
"bbox": [
|
| 1163 |
+
512,
|
| 1164 |
+
85,
|
| 1165 |
+
670,
|
| 1166 |
+
99
|
| 1167 |
+
],
|
| 1168 |
+
"page_idx": 7
|
| 1169 |
+
},
|
| 1170 |
+
{
|
| 1171 |
+
"type": "text",
|
| 1172 |
+
"text": "The work of Anne Lauscher and Dirk Hovy is funded by the European Research Council (ERC) under the European Union's Horizon 2020 research and innovation program (grant agreement No. 949944, INTEGRATOR). We thank Emily Bender and Chandler May for sharing their ideas related to our project.",
|
| 1173 |
+
"bbox": [
|
| 1174 |
+
512,
|
| 1175 |
+
107,
|
| 1176 |
+
880,
|
| 1177 |
+
218
|
| 1178 |
+
],
|
| 1179 |
+
"page_idx": 7
|
| 1180 |
+
},
|
| 1181 |
+
{
|
| 1182 |
+
"type": "text",
|
| 1183 |
+
"text": "Further Ethical Discussion",
|
| 1184 |
+
"text_level": 1,
|
| 1185 |
+
"bbox": [
|
| 1186 |
+
512,
|
| 1187 |
+
225,
|
| 1188 |
+
742,
|
| 1189 |
+
239
|
| 1190 |
+
],
|
| 1191 |
+
"page_idx": 7
|
| 1192 |
+
},
|
| 1193 |
+
{
|
| 1194 |
+
"type": "text",
|
| 1195 |
+
"text": "We have described and experimented with phenomena related to third-person pronouns focusing on the English language only. Naturally, this work comes with several limitations. For instance, while we pointed the reader to the variety of pronoun-related phenomena in other languages, a thorough multilingual and cross-lingual discussion would have exceeded the scope of this manuscript. This lacuna includes the discussion of neopronouns in other languages. Similarly, while we acknowledged identities beyond the binary gender as well as otherkin identities, due to our focus on pronouns, we did not investigate other identity-related terms. This aspect includes their handling in NLP and the range of issues related to identity-exclusivity. Finally, at the current state of the manuscript, the desiderata discussed are, as reported, based on our expert knowledge, our activities within the LGBTQIA+ community, and informal exchanges with individuals using gender-neutral pronouns. In the future, we will validate these assumptions through a structured survey to present a more inclusive perspective on the discussed issues.",
|
| 1196 |
+
"bbox": [
|
| 1197 |
+
512,
|
| 1198 |
+
247,
|
| 1199 |
+
880,
|
| 1200 |
+
615
|
| 1201 |
+
],
|
| 1202 |
+
"page_idx": 7
|
| 1203 |
+
},
|
| 1204 |
+
{
|
| 1205 |
+
"type": "text",
|
| 1206 |
+
"text": "References",
|
| 1207 |
+
"text_level": 1,
|
| 1208 |
+
"bbox": [
|
| 1209 |
+
512,
|
| 1210 |
+
640,
|
| 1211 |
+
606,
|
| 1212 |
+
653
|
| 1213 |
+
],
|
| 1214 |
+
"page_idx": 7
|
| 1215 |
+
},
|
| 1216 |
+
{
|
| 1217 |
+
"type": "list",
|
| 1218 |
+
"sub_type": "ref_text",
|
| 1219 |
+
"list_items": [
|
| 1220 |
+
"Amit Bagga and Breck Baldwin. 1998. Algorithms for scoring coreference chains. In Proc. Linguistic Coreference Workshop at the first Conf. on Language Resources and Evaluation (LREC), pages 563-566, Granada, Spain.",
|
| 1221 |
+
"Soumya Barikeri, Anne Lauscher, Ivan Vulic, and Goran Glavaš. 2021. RedditBias: A real-world resource for bias evaluation and debiasing of conversational language models. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1941-1955, Online. Association for Computational Linguistics.",
|
| 1222 |
+
"Solon Barocas, Kate Crawford, Aaron Shapiro, and Hanna Wallach. 2017. The problem with bias: Allocative versus representational harms in machine learning. In 9th Annual Conference of the Special"
|
| 1223 |
+
],
|
| 1224 |
+
"bbox": [
|
| 1225 |
+
512,
|
| 1226 |
+
661,
|
| 1227 |
+
880,
|
| 1228 |
+
917
|
| 1229 |
+
],
|
| 1230 |
+
"page_idx": 7
|
| 1231 |
+
},
|
| 1232 |
+
{
|
| 1233 |
+
"type": "list",
|
| 1234 |
+
"sub_type": "ref_text",
|
| 1235 |
+
"list_items": [
|
| 1236 |
+
"Interest Group for Computing, Information and Society.",
|
| 1237 |
+
"Su Lin Blodgett, Solon Barocas, Hal Daumé III, and Hanna Wallach. 2020. Language (technology) is power: A critical survey of \"bias\" in NLP. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5454-5476, Online. Association for Computational Linguistics.",
|
| 1238 |
+
"Tolga Bolukbasi, Kai-Wei Chang, James Y. Zou, Venkatesh Saligrama, and Adam Tauman Kalai. 2016. Man is to computer programmer as woman is to homemaker? debiasing word embeddings. In Advances in Neural Information Processing Systems 29: Annual Conference on Neural Information Processing Systems 2016, December 5-10, 2016, Barcelona, Spain, pages 4349-4357.",
|
| 1239 |
+
"Shikha Bordia and Samuel R. Bowman. 2019. Identifying and reducing gender bias in word-level language models. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Student Research Workshop, pages 7-15, Minneapolis, Minnesota. Association for Computational Linguistics.",
|
| 1240 |
+
"Judith Butler. 1990. Gender trouble, 1st edition. Routledge Classics, New York, NY, USA.",
|
| 1241 |
+
"Judith Butler. 2004. Undoing Gender, 1st edition. Routledge, New York, NY, USA.",
|
| 1242 |
+
"Yang Trista Cao and Hal Daumé III. 2021. Toward gender-inclusive coreference resolution: An analysis of gender and bias throughout the machine learning lifecycle*. Computational Linguistics, 47(3):615-661.",
|
| 1243 |
+
"Katharine A Cherry-Reid. 2020. *Music to Our Ears: Using a Queer Folk Song Pedagogy to do Gender and Sexuality Education*. Ph.D. thesis, University of Toronto (Canada).",
|
| 1244 |
+
"Kirby Conrod. 2019. Pronouns Raising and Emerging. Ph.D. thesis, University of Washington.",
|
| 1245 |
+
"Sunipa Dev, Tao Li, Jeff M. Phillips, and Vivek Srikumar. 2020. On measuring and mitigating biased inferences of word embeddings. In The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial Intelligence, EAAI 2020, New York, NY, USA, February 7-12, 2020, pages 7659-7666. AAAI Press.",
|
| 1246 |
+
"Sunipa Dev, Masoud Monajatipoor, Anaelia Ovalle, Arjun Subramonian, Jeff Phillips, and Kai-Wei Chang. 2021. Harms of gender exclusivity and challenges in non-binary representation in language technologies. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 1968-1994, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics."
|
| 1247 |
+
],
|
| 1248 |
+
"bbox": [
|
| 1249 |
+
115,
|
| 1250 |
+
85,
|
| 1251 |
+
489,
|
| 1252 |
+
917
|
| 1253 |
+
],
|
| 1254 |
+
"page_idx": 8
|
| 1255 |
+
},
|
| 1256 |
+
{
|
| 1257 |
+
"type": "list",
|
| 1258 |
+
"sub_type": "ref_text",
|
| 1259 |
+
"list_items": [
|
| 1260 |
+
"Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.",
|
| 1261 |
+
"Vladimir Dobrovolskii. 2021. Word-level coreference resolution. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 7670-7675, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.",
|
| 1262 |
+
"Hila Gonen and Yoav Goldberg. 2019. Lipstick on a pig: Debiasing methods cover up systematic gender biases in word embeddings but do not remove them. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 609-614, Minneapolis, Minnesota. Association for Computational Linguistics.",
|
| 1263 |
+
"Ove Grandstrand. 1998. Identity and deception in the virtual community. In Peter Kollock and Marc Smith, editors, Communities in Cyberspace, 1st edition, chapter 2. Routledge, London, UK.",
|
| 1264 |
+
"Luis A Hercus. 1994. A grammar of the Arabana-Wangkangurru language, Lake Eyre Basin, South Australia (Pacific linguistics. Series C), 1st edition. Dept. of Linguistics, Research School of Pacific and Asian Studies, Australian National University.",
|
| 1265 |
+
"Jerry R Hobbs. 1978. Resolving pronoun references. *Lingua*, 44(4):311-338.",
|
| 1266 |
+
"Chia-Chien Hung, Anne Lauscher, Simone Paolo Ponzetto, and Goran Glavaš. 2021. DS-TOD: Efficient domain specialization for task oriented dialog. arXiv preprint arXiv:2110.08395.",
|
| 1267 |
+
"Emiko S. Kashima and Yoshihisa Kashima. 1998. Culture and language: The case of cultural dimensions and personal pronoun use. Journal of Cross-Cultural Psychology, 29(3):461-486.",
|
| 1268 |
+
"Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.",
|
| 1269 |
+
"Lex Konylly and Elizabeth Cowper. 2020. Gender diversity and morphosyntax: An account of singular they. Glossa: a journal of general linguistics, 5(1).",
|
| 1270 |
+
"Helene Seltzer Krauthamer. 2021. The Great Pronoun Shift: The Big Impact of Little Parts of Speech, 1st edition. Routledge."
|
| 1271 |
+
],
|
| 1272 |
+
"bbox": [
|
| 1273 |
+
510,
|
| 1274 |
+
85,
|
| 1275 |
+
884,
|
| 1276 |
+
917
|
| 1277 |
+
],
|
| 1278 |
+
"page_idx": 8
|
| 1279 |
+
},
|
| 1280 |
+
{
|
| 1281 |
+
"type": "list",
|
| 1282 |
+
"sub_type": "ref_text",
|
| 1283 |
+
"list_items": [
|
| 1284 |
+
"Keita Kurita, Nidhi Vyas, Ayush Parek, Alan W Black, and Yulia Tsvetkov. 2019. Measuring bias in contextualized word representations. In Proceedings of the First Workshop on Gender Bias in Natural Language Processing, pages 166-172, Florence, Italy. Association for Computational Linguistics.",
|
| 1285 |
+
"Anne Lauscher, Goran Glavas, Simone Paolo Ponzetto, and Ivan Vulic. 2020. A general framework for implicit and explicit debiasing of distributional word vector spaces. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 8131-8138.",
|
| 1286 |
+
"Anne Lauscher, Tobias Lueken, and Goran Glavaš. 2021. Sustainable modular debiasing of language models. In Findings of the Association for Computational Linguistics: EMNLP 2021, pages 4782-4797, Punta Cana, Dominican Republic. Association for Computational Linguistics.",
|
| 1287 |
+
"Joseph P Laycock. 2012. \"We are spirits of another sort\": Ontological rebellion and religious dimensions of the otherkin community. Nova Religio: The Journal of Alternative and Emergent Religions, 15(3):65-90.",
|
| 1288 |
+
"Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692.",
|
| 1289 |
+
"Xiaoqiang Luo. 2005. On coreference resolution performance metrics. In Proceedings of Human Language Technology Conference and Conference on Empirical Methods in Natural Language Processing, pages 25-32, Vancouver, British Columbia, Canada. Association for Computational Linguistics.",
|
| 1290 |
+
"Amin Maalouf. 2000. On identity, 1st, translated by barbara bray edition. Vintage.",
|
| 1291 |
+
"Ryan McDonald, Slav Petrov, and Keith Hall. 2011. Multi-source transfer of delexicalized dependency parsers. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 62-72, Edinburgh, Scotland, UK. Association for Computational Linguistics.",
|
| 1292 |
+
"Sebastian McGaughey. 2020. Understanding neopronouns. The Gay & Lesbian Review Worldwide, 27(2):27-29.",
|
| 1293 |
+
"John C McKay. 1993. On the term \"pronoun\" in italian grammars. Italica, 70(2):168-181.",
|
| 1294 |
+
"Ehm Hjorth Miltersen. 2016. Nounself pronouns: 3rd person personal pronouns as identity expression. Journal of Language Works-Sprogvidenskabeligt Studentertidsskrift, 1(1):37-62.",
|
| 1295 |
+
"Abd Muqit. 2012. Ideology and power relation reflected in the use of pronoun in osama bin laden's speech text. International Journal of Social Science and Humanity, 2(6):557."
|
| 1296 |
+
],
|
| 1297 |
+
"bbox": [
|
| 1298 |
+
115,
|
| 1299 |
+
85,
|
| 1300 |
+
489,
|
| 1301 |
+
917
|
| 1302 |
+
],
|
| 1303 |
+
"page_idx": 9
|
| 1304 |
+
},
|
| 1305 |
+
{
|
| 1306 |
+
"type": "list",
|
| 1307 |
+
"sub_type": "ref_text",
|
| 1308 |
+
"list_items": [
|
| 1309 |
+
"Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. GloVe: Global vectors for word representation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543, Doha, Qatar. Association for Computational Linguistics.",
|
| 1310 |
+
"Paul Postal, David A Reibel, and Sanford A Schane. 1969. On so-called pronouns in english. Readings in English transformational grammar, pages 12-25.",
|
| 1311 |
+
"Sameer Pradhan, Alessandro Moschitti, Nianwen Xue, Olga Uryupina, and Yuchen Zhang. 2012. Conll-2012 shared task: Modeling multilingual unrestricted coreference in ontonotes. In Joint Conference on EMNLP and CoNLL-Shared Task, pages 1-40.",
|
| 1312 |
+
"Chase Wesley Raymond. 2016. Linguistic reference in the negotiation of identity and action: Revisiting the t/v distinction. Language, 92:636-670.",
|
| 1313 |
+
"Julie Roberts. 2020. 2019 word of the year is “(my) pronouns,” word of the decade is singular “they” as voted by american dialect society. Press Release, American Dialect Society.",
|
| 1314 |
+
"Rachel Rudinger, Jason Naradowsky, Brian Leonard, and Benjamin Van Durme. 2018. Gender bias in coreference resolution. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 8-14, New Orleans, Louisiana. Association for Computational Linguistics.",
|
| 1315 |
+
"Beatrice Santorini. 1990. Part-of-speech tagging guidelines for the penn treebank project.",
|
| 1316 |
+
"Deven Santosh Shah, H. Andrew Schwartz, and Dirk Hovy. 2020. Predictive biases in natural language processing models: A conceptual framework and overview. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5248-5264, Online. Association for Computational Linguistics.",
|
| 1317 |
+
"Michael Spivak. 1990. The Joy of TeX: A Gourmet Guide to Typesetting with the AMSTeX Macro Package, 2nd edition. American Mathematical Society.",
|
| 1318 |
+
"Susan Stryker. 2017. Transgender history: The roots of today's revolution, 2nd edition. Seal Press.",
|
| 1319 |
+
"Sandeep Suntwal, Mithun Paul, Rebecca Sharp, and Mihai Surdeanu. 2019. On the importance of delexicalization for fact verification. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3413-3418, Hong Kong, China. Association for Computational Linguistics.",
|
| 1320 |
+
"Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all"
|
| 1321 |
+
],
|
| 1322 |
+
"bbox": [
|
| 1323 |
+
510,
|
| 1324 |
+
85,
|
| 1325 |
+
882,
|
| 1326 |
+
917
|
| 1327 |
+
],
|
| 1328 |
+
"page_idx": 9
|
| 1329 |
+
},
|
| 1330 |
+
{
|
| 1331 |
+
"type": "list",
|
| 1332 |
+
"sub_type": "ref_text",
|
| 1333 |
+
"list_items": [
|
| 1334 |
+
"you need. In Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 5998-6008.",
|
| 1335 |
+
"Marc Vilain, John D Burger, John Aberdeen, Dennis Connolly, and Lynette Hirschman. 1995. A model-theoretic coreference scoring scheme. In Sixth Message Understanding Conference (MUC-6): Proceedings of a Conference Held in Columbia, Maryland, November 6-8, 1995.",
|
| 1336 |
+
"Kellie Webster, Marta Recasens, Vera Axelrod, and Jason Baldridge. 2018. Mind the GAP: A balanced corpus of gendered ambiguous pronouns. Transactions of the Association for Computational Linguistics, 6:605-617.",
|
| 1337 |
+
"Ralph Weischedel, Sameer Pradhan, Lance Ramshaw, Jeff Kaufman, Michelle Franchini, Mohammed El-Bachouti, Nianwen Xue, Martha Palmer, Jena D Hwang, Claire Bonial, et al. 2012. Ontonotes release 5.0."
|
| 1338 |
+
],
|
| 1339 |
+
"bbox": [
|
| 1340 |
+
115,
|
| 1341 |
+
85,
|
| 1342 |
+
489,
|
| 1343 |
+
376
|
| 1344 |
+
],
|
| 1345 |
+
"page_idx": 10
|
| 1346 |
+
}
|
| 1347 |
+
]
|
2202.11xxx/2202.11923/2469feeb-f44a-460f-890c-2b605e397b0b_model.json
ADDED
|
@@ -0,0 +1,1927 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[
|
| 2 |
+
[
|
| 3 |
+
{
|
| 4 |
+
"type": "title",
|
| 5 |
+
"bbox": [
|
| 6 |
+
0.17,
|
| 7 |
+
0.09,
|
| 8 |
+
0.83,
|
| 9 |
+
0.13
|
| 10 |
+
],
|
| 11 |
+
"angle": 0,
|
| 12 |
+
"content": "Welcome to the Modern World of Pronouns: Identity-Inclusive Natural Language Processing beyond Gender"
|
| 13 |
+
},
|
| 14 |
+
{
|
| 15 |
+
"type": "text",
|
| 16 |
+
"bbox": [
|
| 17 |
+
0.183,
|
| 18 |
+
0.144,
|
| 19 |
+
0.319,
|
| 20 |
+
0.158
|
| 21 |
+
],
|
| 22 |
+
"angle": 0,
|
| 23 |
+
"content": "Anne Lauscher"
|
| 24 |
+
},
|
| 25 |
+
{
|
| 26 |
+
"type": "text",
|
| 27 |
+
"bbox": [
|
| 28 |
+
0.211,
|
| 29 |
+
0.161,
|
| 30 |
+
0.292,
|
| 31 |
+
0.175
|
| 32 |
+
],
|
| 33 |
+
"angle": 0,
|
| 34 |
+
"content": "MilaNLP"
|
| 35 |
+
},
|
| 36 |
+
{
|
| 37 |
+
"type": "text",
|
| 38 |
+
"bbox": [
|
| 39 |
+
0.148,
|
| 40 |
+
0.178,
|
| 41 |
+
0.354,
|
| 42 |
+
0.193
|
| 43 |
+
],
|
| 44 |
+
"angle": 0,
|
| 45 |
+
"content": "Universita Luigi Bocconi"
|
| 46 |
+
},
|
| 47 |
+
{
|
| 48 |
+
"type": "text",
|
| 49 |
+
"bbox": [
|
| 50 |
+
0.201,
|
| 51 |
+
0.195,
|
| 52 |
+
0.3,
|
| 53 |
+
0.21
|
| 54 |
+
],
|
| 55 |
+
"angle": 0,
|
| 56 |
+
"content": "Milan, Italy"
|
| 57 |
+
},
|
| 58 |
+
{
|
| 59 |
+
"type": "text",
|
| 60 |
+
"bbox": [
|
| 61 |
+
0.128,
|
| 62 |
+
0.213,
|
| 63 |
+
0.374,
|
| 64 |
+
0.225
|
| 65 |
+
],
|
| 66 |
+
"angle": 0,
|
| 67 |
+
"content": "anne.lauscher@unibocconi.it"
|
| 68 |
+
},
|
| 69 |
+
{
|
| 70 |
+
"type": "text",
|
| 71 |
+
"bbox": [
|
| 72 |
+
0.431,
|
| 73 |
+
0.144,
|
| 74 |
+
0.569,
|
| 75 |
+
0.159
|
| 76 |
+
],
|
| 77 |
+
"angle": 0,
|
| 78 |
+
"content": "Archie Crowley"
|
| 79 |
+
},
|
| 80 |
+
{
|
| 81 |
+
"type": "text",
|
| 82 |
+
"bbox": [
|
| 83 |
+
0.452,
|
| 84 |
+
0.161,
|
| 85 |
+
0.546,
|
| 86 |
+
0.176
|
| 87 |
+
],
|
| 88 |
+
"angle": 0,
|
| 89 |
+
"content": "Linguistics"
|
| 90 |
+
},
|
| 91 |
+
{
|
| 92 |
+
"type": "text",
|
| 93 |
+
"bbox": [
|
| 94 |
+
0.382,
|
| 95 |
+
0.178,
|
| 96 |
+
0.617,
|
| 97 |
+
0.192
|
| 98 |
+
],
|
| 99 |
+
"angle": 0,
|
| 100 |
+
"content": "University of South Carolina"
|
| 101 |
+
},
|
| 102 |
+
{
|
| 103 |
+
"type": "text",
|
| 104 |
+
"bbox": [
|
| 105 |
+
0.416,
|
| 106 |
+
0.194,
|
| 107 |
+
0.583,
|
| 108 |
+
0.209
|
| 109 |
+
],
|
| 110 |
+
"angle": 0,
|
| 111 |
+
"content": "Columbia, SC, USA"
|
| 112 |
+
},
|
| 113 |
+
{
|
| 114 |
+
"type": "text",
|
| 115 |
+
"bbox": [
|
| 116 |
+
0.43,
|
| 117 |
+
0.213,
|
| 118 |
+
0.568,
|
| 119 |
+
0.226
|
| 120 |
+
],
|
| 121 |
+
"angle": 0,
|
| 122 |
+
"content": "acrowley@sc.edu"
|
| 123 |
+
},
|
| 124 |
+
{
|
| 125 |
+
"type": "text",
|
| 126 |
+
"bbox": [
|
| 127 |
+
0.703,
|
| 128 |
+
0.144,
|
| 129 |
+
0.797,
|
| 130 |
+
0.159
|
| 131 |
+
],
|
| 132 |
+
"angle": 0,
|
| 133 |
+
"content": "Dirk Hovy"
|
| 134 |
+
},
|
| 135 |
+
{
|
| 136 |
+
"type": "text",
|
| 137 |
+
"bbox": [
|
| 138 |
+
0.711,
|
| 139 |
+
0.161,
|
| 140 |
+
0.791,
|
| 141 |
+
0.175
|
| 142 |
+
],
|
| 143 |
+
"angle": 0,
|
| 144 |
+
"content": "MilaNLP"
|
| 145 |
+
},
|
| 146 |
+
{
|
| 147 |
+
"type": "text",
|
| 148 |
+
"bbox": [
|
| 149 |
+
0.648,
|
| 150 |
+
0.178,
|
| 151 |
+
0.853,
|
| 152 |
+
0.193
|
| 153 |
+
],
|
| 154 |
+
"angle": 0,
|
| 155 |
+
"content": "Universita Luigi Bocconi"
|
| 156 |
+
},
|
| 157 |
+
{
|
| 158 |
+
"type": "text",
|
| 159 |
+
"bbox": [
|
| 160 |
+
0.701,
|
| 161 |
+
0.194,
|
| 162 |
+
0.799,
|
| 163 |
+
0.21
|
| 164 |
+
],
|
| 165 |
+
"angle": 0,
|
| 166 |
+
"content": "Milan, Italy"
|
| 167 |
+
},
|
| 168 |
+
{
|
| 169 |
+
"type": "text",
|
| 170 |
+
"bbox": [
|
| 171 |
+
0.645,
|
| 172 |
+
0.213,
|
| 173 |
+
0.855,
|
| 174 |
+
0.225
|
| 175 |
+
],
|
| 176 |
+
"angle": 0,
|
| 177 |
+
"content": "dirk.hovy@unibocconi.it"
|
| 178 |
+
},
|
| 179 |
+
{
|
| 180 |
+
"type": "title",
|
| 181 |
+
"bbox": [
|
| 182 |
+
0.261,
|
| 183 |
+
0.253,
|
| 184 |
+
0.341,
|
| 185 |
+
0.268
|
| 186 |
+
],
|
| 187 |
+
"angle": 0,
|
| 188 |
+
"content": "Abstract"
|
| 189 |
+
},
|
| 190 |
+
{
|
| 191 |
+
"type": "text",
|
| 192 |
+
"bbox": [
|
| 193 |
+
0.143,
|
| 194 |
+
0.282,
|
| 195 |
+
0.461,
|
| 196 |
+
0.31
|
| 197 |
+
],
|
| 198 |
+
"angle": 0,
|
| 199 |
+
"content": "Trigger warning: This paper contains some examples which might be offensive to some users."
|
| 200 |
+
},
|
| 201 |
+
{
|
| 202 |
+
"type": "text",
|
| 203 |
+
"bbox": [
|
| 204 |
+
0.141,
|
| 205 |
+
0.321,
|
| 206 |
+
0.462,
|
| 207 |
+
0.691
|
| 208 |
+
],
|
| 209 |
+
"angle": 0,
|
| 210 |
+
"content": "The works of pronouns is changing. From a closed class of words with few members to a much more open set of terms to reflect identities. However, Natural Language Processing (NLP) is barely reflecting this linguistic shift, even though recent work outlined the harms of gender-exclusive language technology. Particularly problematic is the current modeling 3rd person pronouns, as it largely ignores various phenomena like neopronouns, i.e., pronoun sets that are novel and not (yet) widely established. This omission contributes to the discrimination of marginalized and underrepresented groups, e.g., non-binary individuals. However, other identity-expression phenomena beyond gender are also ignored by current NLP technology. In this paper, we provide an overview of 3rd person pronoun issues for NLP. Based on our observations and ethical considerations, we define a series of desiderata for modeling pronouns in language technology. We evaluate existing and novel modeling approaches w.r.t. these desiderata qualitatively, and quantify the impact of a more discrimination-free approach on established benchmark data."
|
| 211 |
+
},
|
| 212 |
+
{
|
| 213 |
+
"type": "title",
|
| 214 |
+
"bbox": [
|
| 215 |
+
0.115,
|
| 216 |
+
0.705,
|
| 217 |
+
0.26,
|
| 218 |
+
0.72
|
| 219 |
+
],
|
| 220 |
+
"angle": 0,
|
| 221 |
+
"content": "1 Introduction"
|
| 222 |
+
},
|
| 223 |
+
{
|
| 224 |
+
"type": "text",
|
| 225 |
+
"bbox": [
|
| 226 |
+
0.113,
|
| 227 |
+
0.728,
|
| 228 |
+
0.49,
|
| 229 |
+
0.873
|
| 230 |
+
],
|
| 231 |
+
"angle": 0,
|
| 232 |
+
"content": "Pronouns are an essential component of many languages and often one of the most frequent word classes. Accordingly, NLP has long studied tasks related to them, e.g., pronoun resolution (e.g., Hobbs, 1978). Simplistically, they can be defined as \"a word (such as I, he, she, you, it, we, or they) that is used instead of a noun or noun phrase\".<sup>1</sup> Linguistic studies have pointed out the complexity of pronouns, though (e.g., Postal et al., 1969;"
|
| 233 |
+
},
|
| 234 |
+
{
|
| 235 |
+
"type": "text",
|
| 236 |
+
"bbox": [
|
| 237 |
+
0.508,
|
| 238 |
+
0.253,
|
| 239 |
+
0.885,
|
| 240 |
+
0.494
|
| 241 |
+
],
|
| 242 |
+
"angle": 0,
|
| 243 |
+
"content": "McKay, 1993). Pronouns can carry demographic information – in English, for example, information about the number of referees and a single referee's (grammatical) gender. Even more information can be conveyed by pronouns in other, non-pro-drop languages. Consider Arabana-Wangkangurru, a language spoken in Australia, in which a speaker uses different pronouns depending on whether the referee is part of the same social or ritual group (moiety) (Hercus, 1994). As such, pronouns shape how we perceive individuals and can even reflect cultural aspects (e.g., Kashima and Kashima, 1998) and ideologies (e.g., Muqit, 2012). Consequently, pronoun usage should be considered a sensitive aspect of natural language use."
|
| 244 |
+
},
|
| 245 |
+
{
|
| 246 |
+
"type": "text",
|
| 247 |
+
"bbox": [
|
| 248 |
+
0.508,
|
| 249 |
+
0.497,
|
| 250 |
+
0.885,
|
| 251 |
+
0.754
|
| 252 |
+
],
|
| 253 |
+
"angle": 0,
|
| 254 |
+
"content": "Accordingly, in many western societies, these phenomena have been drawing more and more attention. For instance, in 2020, the American Dialect Society voted “(My) Pronouns” as the 2019 Word of the Year and Singular “They” as the Word of the Decade (Roberts, 2020). Recently, there has been a shift in pronoun usage (Krauthamer, 2021), partially due to shifts in the perception of gender, driven by the queer-feminist discourse (e.g., Butler, 1990, 2004). Related to this is the open discussion of identity beyond binary gender. For instance, a person who does not identify their gender within the gender binary (e.g., a nonbinary or genderqueer person) might use singular “they” as their pronoun. Recently, the French dictionary “Le Robert” added the non-binary pronoun “iel” to its list of words.<sup>3</sup>"
|
| 255 |
+
},
|
| 256 |
+
{
|
| 257 |
+
"type": "text",
|
| 258 |
+
"bbox": [
|
| 259 |
+
0.508,
|
| 260 |
+
0.757,
|
| 261 |
+
0.885,
|
| 262 |
+
0.805
|
| 263 |
+
],
|
| 264 |
+
"angle": 0,
|
| 265 |
+
"content": "This \"social push\" to respect diverse gender identities also affects aspects of NLP. Recent studies have pointed out the potential harms from the cur"
|
| 266 |
+
},
|
| 267 |
+
{
|
| 268 |
+
"type": "page_footnote",
|
| 269 |
+
"bbox": [
|
| 270 |
+
0.508,
|
| 271 |
+
0.821,
|
| 272 |
+
0.887,
|
| 273 |
+
0.859
|
| 274 |
+
],
|
| 275 |
+
"angle": 0,
|
| 276 |
+
"content": "2Grammatical, biological, and self-identified gender should not be confounded, but are often treated interchangeably by lay audiences."
|
| 277 |
+
},
|
| 278 |
+
{
|
| 279 |
+
"type": "page_footnote",
|
| 280 |
+
"bbox": [
|
| 281 |
+
0.508,
|
| 282 |
+
0.859,
|
| 283 |
+
0.887,
|
| 284 |
+
0.918
|
| 285 |
+
],
|
| 286 |
+
"angle": 0,
|
| 287 |
+
"content": "3https://dictionary.lerobert.com/dis-moi-robert/raconte-moi-robert/mot-jour/pourquoi-le-robert-a-t-il-integre-lemot-iel-dans-son-dictionnaire-en-ligne.htm1"
|
| 288 |
+
},
|
| 289 |
+
{
|
| 290 |
+
"type": "list",
|
| 291 |
+
"bbox": [
|
| 292 |
+
0.508,
|
| 293 |
+
0.821,
|
| 294 |
+
0.887,
|
| 295 |
+
0.918
|
| 296 |
+
],
|
| 297 |
+
"angle": 0,
|
| 298 |
+
"content": null
|
| 299 |
+
},
|
| 300 |
+
{
|
| 301 |
+
"type": "aside_text",
|
| 302 |
+
"bbox": [
|
| 303 |
+
0.023,
|
| 304 |
+
0.31,
|
| 305 |
+
0.061,
|
| 306 |
+
0.725
|
| 307 |
+
],
|
| 308 |
+
"angle": 270,
|
| 309 |
+
"content": "arXiv:2202.11923v1 [cs.CL] 24 Feb 2022"
|
| 310 |
+
}
|
| 311 |
+
],
|
| 312 |
+
[
|
| 313 |
+
{
|
| 314 |
+
"type": "text",
|
| 315 |
+
"bbox": [
|
| 316 |
+
0.113,
|
| 317 |
+
0.085,
|
| 318 |
+
0.489,
|
| 319 |
+
0.198
|
| 320 |
+
],
|
| 321 |
+
"angle": 0,
|
| 322 |
+
"content": "rent lack of non-binary representation in NLP data sets, embeddings, and tasks (Cao and Daumé III, 2021; Dev et al., 2021), and the related issue of unfair stereotyping of queer individuals (Barikeri et al., 2021). However, the research landscape on modern pronoun usage is still surprisingly scarce, hindering progress for a fair and inclusive NLP."
|
| 323 |
+
},
|
| 324 |
+
{
|
| 325 |
+
"type": "text",
|
| 326 |
+
"bbox": [
|
| 327 |
+
0.117,
|
| 328 |
+
0.199,
|
| 329 |
+
0.49,
|
| 330 |
+
0.44
|
| 331 |
+
],
|
| 332 |
+
"angle": 0,
|
| 333 |
+
"content": "Further linguistic research has identified identity aspects of pronouns beyond gender (Miltersen, 2016). Specifically, *nounself* pronouns, functionally turning pronouns from a *closed* to an *open* word class. To the best of our knowledge, these aspects have been completely ignored by NLP so far. We did not find a single work systematically describing all of the currently existing phenomena even just in English third-person pronoun usage (let alone other languages). In contrast, a fair number of discussions are taking place in queer Wikis and forums. While it is still unclear which of these phenomena will persist over the next decades, people are using and discussing them, and accordingly, we as a research community should adapt."
|
| 334 |
+
},
|
| 335 |
+
{
|
| 336 |
+
"type": "text",
|
| 337 |
+
"bbox": [
|
| 338 |
+
0.113,
|
| 339 |
+
0.445,
|
| 340 |
+
0.49,
|
| 341 |
+
0.669
|
| 342 |
+
],
|
| 343 |
+
"angle": 0,
|
| 344 |
+
"content": "Contributions. In this \"living draft\", 1) we are the first to provide a systematic overview of existing phenomena in English 3rd person pronoun usage. Our results will inform future NLP research on ethical NLP and non-binary representation. We provide the first NLP work acknowledging otherkin identities. We support our observations with a corpus analysis on Reddit. 2) Based on our overview, we derive five desiderata for modeling third-person pronouns. Based on these, 3) we discuss various existing and novel paradigms for when and how to model pronouns. 4) Finally, we quantify the impact of discrimination-free non-modeling of pronouns on a widely established benchmark."
|
| 345 |
+
},
|
| 346 |
+
{
|
| 347 |
+
"type": "title",
|
| 348 |
+
"bbox": [
|
| 349 |
+
0.114,
|
| 350 |
+
0.676,
|
| 351 |
+
0.27,
|
| 352 |
+
0.691
|
| 353 |
+
],
|
| 354 |
+
"angle": 0,
|
| 355 |
+
"content": "2 Related Work"
|
| 356 |
+
},
|
| 357 |
+
{
|
| 358 |
+
"type": "text",
|
| 359 |
+
"bbox": [
|
| 360 |
+
0.113,
|
| 361 |
+
0.698,
|
| 362 |
+
0.49,
|
| 363 |
+
0.875
|
| 364 |
+
],
|
| 365 |
+
"angle": 0,
|
| 366 |
+
"content": "While there are some works in NLP on gender-inclusion (e.g., Dev et al., 2021) and gender bias in static (e.g., Bolukbasi et al., 2016; Gonen and Goldberg, 2019; Lauscher et al., 2020, inter alia) and contextualized (e.g., Kurita et al., 2019; Bordia and Bowman, 2019; Lauscher et al., 2021, inter alia) language representations as well as works focusing on specific gender bias in downstream tasks, e.g., natural language inference (Dev et al., 2020) and co-reference resolution (e.g., Rudinger et al., 2018; Webster et al., 2018), we are not aware"
|
| 367 |
+
},
|
| 368 |
+
{
|
| 369 |
+
"type": "text",
|
| 370 |
+
"bbox": [
|
| 371 |
+
0.113,
|
| 372 |
+
0.881,
|
| 373 |
+
0.489,
|
| 374 |
+
0.92
|
| 375 |
+
],
|
| 376 |
+
"angle": 0,
|
| 377 |
+
"content": "4For instance, while we found hits for the Google Scholar query \"neopronoun\", we did not get any results for variants of \"nameself pronoun\", or \"emojiself pronoun\"."
|
| 378 |
+
},
|
| 379 |
+
{
|
| 380 |
+
"type": "text",
|
| 381 |
+
"bbox": [
|
| 382 |
+
0.507,
|
| 383 |
+
0.085,
|
| 384 |
+
0.885,
|
| 385 |
+
0.472
|
| 386 |
+
],
|
| 387 |
+
"angle": 0,
|
| 388 |
+
"content": "of any work that deals with the broader field of identity-inclusion. Thus, there is no other NLP work that deals with a larger variety of pronouns and acknowledges pronouns as an open word class. For surveys on the general topic of unfair bias in NLP we refer to Blodgett et al. (2020) and Shah et al. (2020). Recently, Dev et al. (2021) pointed broadly at the harms (Barocas et al., 2017) arising from gender-exclusivity in NLP. They surveyed queer individuals and assessed non-binary representations in existing data set and language representations. In contrast to them, we specifically look at third-person pronoun usage and how to model such phenomena. Webster et al. (2018) provide a balanced co-reference resolution corpus with a focus on the fair distribution of pronouns but only focus on the gendered binary case. Closest to us, Cao and Daumé III (2021) discuss gender inclusion throughout the machine learning pipeline beyond the binary gender conception. While they are also the first to consider non-binary pronouns, including some neopronouns, in co-reference resolution, they do not acknowledge the broader spectrum of identity-related pronoun phenomena."
|
| 389 |
+
},
|
| 390 |
+
{
|
| 391 |
+
"type": "title",
|
| 392 |
+
"bbox": [
|
| 393 |
+
0.508,
|
| 394 |
+
0.479,
|
| 395 |
+
0.83,
|
| 396 |
+
0.495
|
| 397 |
+
],
|
| 398 |
+
"angle": 0,
|
| 399 |
+
"content": "3 A Note on Identity and Pronouns"
|
| 400 |
+
},
|
| 401 |
+
{
|
| 402 |
+
"type": "text",
|
| 403 |
+
"bbox": [
|
| 404 |
+
0.507,
|
| 405 |
+
0.502,
|
| 406 |
+
0.884,
|
| 407 |
+
0.614
|
| 408 |
+
],
|
| 409 |
+
"angle": 0,
|
| 410 |
+
"content": "This work focuses on the relationship between identity and pronouns. Identity refers to an individual's self-concept, relating to the question of what makes each of us unique (Maalouf, 2000). It can be seen as a two-way process between an individual and others (Grandstrand, 1998), and relates to different dimensions, e.g., one's gender."
|
| 411 |
+
},
|
| 412 |
+
{
|
| 413 |
+
"type": "text",
|
| 414 |
+
"bbox": [
|
| 415 |
+
0.508,
|
| 416 |
+
0.622,
|
| 417 |
+
0.884,
|
| 418 |
+
0.719
|
| 419 |
+
],
|
| 420 |
+
"angle": 0,
|
| 421 |
+
"content": "Gender Identity. Gender identity, as opposed to gender expression or sex, is one's subjective sense of gender (Stryker, 2017). In this work, we conceptualize gender identities beyond the binary notion (man, woman), e.g., non-binary gender, transgender, agender, polygender, etc."
|
| 422 |
+
},
|
| 423 |
+
{
|
| 424 |
+
"type": "text",
|
| 425 |
+
"bbox": [
|
| 426 |
+
0.508,
|
| 427 |
+
0.726,
|
| 428 |
+
0.885,
|
| 429 |
+
0.806
|
| 430 |
+
],
|
| 431 |
+
"angle": 0,
|
| 432 |
+
"content": "Otherkin Identity. Individuals with otherkin identity do not entirely identify as human (Laycock, 2012), e.g., vamp. Miltersen (2016) note that otherkin individuals often identify with nounself pronouns matching their kin."
|
| 433 |
+
},
|
| 434 |
+
{
|
| 435 |
+
"type": "text",
|
| 436 |
+
"bbox": [
|
| 437 |
+
0.507,
|
| 438 |
+
0.807,
|
| 439 |
+
0.885,
|
| 440 |
+
0.92
|
| 441 |
+
],
|
| 442 |
+
"angle": 0,
|
| 443 |
+
"content": "Stryker (2017) highlights the strong relationship between gender identity and pronouns. As Raymond (2016) notes, pronoun choices construct the individual's identity in conversations and the relationship between interlocutors. According to Cao and Daumé III (2021), pronouns are a way of expressing referential gender. Referring to an indi"
|
| 444 |
+
}
|
| 445 |
+
],
|
| 446 |
+
[
|
| 447 |
+
{
|
| 448 |
+
"type": "table",
|
| 449 |
+
"bbox": [
|
| 450 |
+
0.129,
|
| 451 |
+
0.082,
|
| 452 |
+
0.478,
|
| 453 |
+
0.515
|
| 454 |
+
],
|
| 455 |
+
"angle": 0,
|
| 456 |
+
"content": "<table><tr><td>Nom.</td><td>Acc.</td><td>Poss. (dep.)</td><td>Poss. (indep.)</td><td>Reflexive</td></tr><tr><td colspan=\"5\">Gendered Pronouns</td></tr><tr><td>he</td><td>him</td><td>his</td><td>his</td><td>himself</td></tr><tr><td>she</td><td>her</td><td>her</td><td>hers</td><td>herself</td></tr><tr><td colspan=\"5\">Gender-Neutral Pronouns</td></tr><tr><td>they</td><td>them</td><td>their</td><td>theirs</td><td>themselves</td></tr><tr><td colspan=\"5\">Neopronouns</td></tr><tr><td>thon</td><td>thon</td><td>thons</td><td>thons</td><td>thonself</td></tr><tr><td>e</td><td>em</td><td>es</td><td>ems</td><td>emself</td></tr><tr><td>ae</td><td>aer</td><td>aer</td><td>aers</td><td>aerself</td></tr><tr><td>co</td><td>co</td><td>cos</td><td>cos</td><td>coself</td></tr><tr><td>ve/ vi</td><td>ver/ vir</td><td>vis</td><td>vers/ virs</td><td>verself/ virself</td></tr><tr><td>xe</td><td>xem</td><td>xyr</td><td>xyrs</td><td>xemself</td></tr><tr><td>ey</td><td>em</td><td>eir</td><td>eirs</td><td>emself</td></tr><tr><td>e</td><td>em</td><td>eir</td><td>eirs</td><td>emself</td></tr><tr><td>ze</td><td>zir</td><td>zir</td><td>zirs</td><td>zirself</td></tr><tr><td>...</td><td></td><td></td><td></td><td></td></tr><tr><td colspan=\"5\">Nounself Pronouns</td></tr><tr><td>star</td><td>star</td><td>stars</td><td>stars</td><td>starselves</td></tr><tr><td>vam</td><td>vamp</td><td>vamps</td><td>vamps</td><td>vampself</td></tr><tr><td>...</td><td></td><td></td><td></td><td></td></tr><tr><td colspan=\"5\">Emojiself Pronouns</td></tr><tr><td></td><td></td><td>s</td><td>s</td><td>self</td></tr><tr><td>...</td><td></td><td></td><td></td><td></td></tr><tr><td colspan=\"5\">Numberself Pronouns</td></tr><tr><td>0</td><td>0</td><td>0s</td><td>0s</td><td>0self</td></tr><tr><td>1/3</td><td>1/3</td><td>1/3s</td><td>1/3s</td><td>1/3self</td></tr><tr><td>...</td><td></td><td></td><td></td><td></td></tr><tr><td colspan=\"5\">Nameself Pronouns</td></tr><tr><td>John</td><td>John</td><td>Johns</td><td>Johns</td><td>Johnselves</td></tr><tr><td>...</td><td></td><td></td><td></td><td></td></tr></table>"
|
| 457 |
+
},
|
| 458 |
+
{
|
| 459 |
+
"type": "table_caption",
|
| 460 |
+
"bbox": [
|
| 461 |
+
0.114,
|
| 462 |
+
0.522,
|
| 463 |
+
0.49,
|
| 464 |
+
0.553
|
| 465 |
+
],
|
| 466 |
+
"angle": 0,
|
| 467 |
+
"content": "Table 1: Non-exhaustive overview of phenomena related to third-person pronoun usage in English."
|
| 468 |
+
},
|
| 469 |
+
{
|
| 470 |
+
"type": "text",
|
| 471 |
+
"bbox": [
|
| 472 |
+
0.113,
|
| 473 |
+
0.58,
|
| 474 |
+
0.489,
|
| 475 |
+
0.629
|
| 476 |
+
],
|
| 477 |
+
"angle": 0,
|
| 478 |
+
"content": "vidual with sets of pronouns they do not identify with, e.g., resulting in misgendering, is considered harmful (Dev et al., 2021)."
|
| 479 |
+
},
|
| 480 |
+
{
|
| 481 |
+
"type": "title",
|
| 482 |
+
"bbox": [
|
| 483 |
+
0.114,
|
| 484 |
+
0.638,
|
| 485 |
+
0.395,
|
| 486 |
+
0.671
|
| 487 |
+
],
|
| 488 |
+
"angle": 0,
|
| 489 |
+
"content": "4 Phenomena in Third-person Pronoun-Usage"
|
| 490 |
+
},
|
| 491 |
+
{
|
| 492 |
+
"type": "text",
|
| 493 |
+
"bbox": [
|
| 494 |
+
0.113,
|
| 495 |
+
0.679,
|
| 496 |
+
0.489,
|
| 497 |
+
0.711
|
| 498 |
+
],
|
| 499 |
+
"angle": 0,
|
| 500 |
+
"content": "We describe existing phenomena and analyze their presence in a collection of threads from Reddit.<sup>5</sup>"
|
| 501 |
+
},
|
| 502 |
+
{
|
| 503 |
+
"type": "title",
|
| 504 |
+
"bbox": [
|
| 505 |
+
0.114,
|
| 506 |
+
0.721,
|
| 507 |
+
0.322,
|
| 508 |
+
0.737
|
| 509 |
+
],
|
| 510 |
+
"angle": 0,
|
| 511 |
+
"content": "4.1 Existing Phenomena"
|
| 512 |
+
},
|
| 513 |
+
{
|
| 514 |
+
"type": "text",
|
| 515 |
+
"bbox": [
|
| 516 |
+
0.113,
|
| 517 |
+
0.746,
|
| 518 |
+
0.489,
|
| 519 |
+
0.89
|
| 520 |
+
],
|
| 521 |
+
"angle": 0,
|
| 522 |
+
"content": "Overall, individuals can choose \\( n \\) sets of pronouns with \\( n \\geq 0 \\). If \\( n = 0 \\), the individual does not identify with any singular 3rd person pronoun. If \\( n > 1 \\), the individual identifies with more than one set of pronouns, possibly each set reflecting overlapping or non-overlapping aspects of their identity. We provide examples of these sets in Table 1. Note that this list is non-exhaustive and that the described phenomena are non-exclusive."
|
| 523 |
+
},
|
| 524 |
+
{
|
| 525 |
+
"type": "text",
|
| 526 |
+
"bbox": [
|
| 527 |
+
0.508,
|
| 528 |
+
0.085,
|
| 529 |
+
0.885,
|
| 530 |
+
0.133
|
| 531 |
+
],
|
| 532 |
+
"angle": 0,
|
| 533 |
+
"content": "Gendered Pronouns. In English, two sets of standard gendered pronouns are available, he/him/himself and she/her/herself."
|
| 534 |
+
},
|
| 535 |
+
{
|
| 536 |
+
"type": "text",
|
| 537 |
+
"bbox": [
|
| 538 |
+
0.508,
|
| 539 |
+
0.141,
|
| 540 |
+
0.885,
|
| 541 |
+
0.271
|
| 542 |
+
],
|
| 543 |
+
"angle": 0,
|
| 544 |
+
"content": "Gender-Neutral Pronouns. Given the history of generic singular they in English (e.g., Who was at the door? They left a note.), there has been an uptake of singular they by non-binary individuals as a gender-netural pronoun option\\(^{6}\\) (Conrod, 2019; Konyelly and Cowper, 2020). Further, there has been increasing institutional recognition with dictionaries and style guides supporting its use."
|
| 545 |
+
},
|
| 546 |
+
{
|
| 547 |
+
"type": "text",
|
| 548 |
+
"bbox": [
|
| 549 |
+
0.508,
|
| 550 |
+
0.279,
|
| 551 |
+
0.885,
|
| 552 |
+
0.423
|
| 553 |
+
],
|
| 554 |
+
"angle": 0,
|
| 555 |
+
"content": "Neopronouns. As an alternative to the singular they, individuals started creating and sharing novel sets of 3rd person pronouns (McGaughey, 2020). More traditional and rather well-known sets of neopronouns include, e.g., the so-called Spivak pronouns \\(e/emeɪs\\) (used in (Spivak, 1990)) and related variations. During our research, we were able to observe various subcategories of neopronouns, partially described in the academic literature."
|
| 556 |
+
},
|
| 557 |
+
{
|
| 558 |
+
"type": "text",
|
| 559 |
+
"bbox": [
|
| 560 |
+
0.508,
|
| 561 |
+
0.431,
|
| 562 |
+
0.885,
|
| 563 |
+
0.591
|
| 564 |
+
],
|
| 565 |
+
"angle": 0,
|
| 566 |
+
"content": "Nounself Pronouns. According to Miltersen (2016), nounself pronouns are pronouns that are “[...] prototypically transparently derived from a specific word, usually a noun”. Individuals may identify with certain nouns, possibly corresponding to distinct aspects of their identity, e.g., kitten/kittenself, vamp/vampself. The author notes the difficulty of clearly defining nounself pronouns, neopronouns, and other phenomena. The phenomenon is assumed to have first appeared in 2013."
|
| 567 |
+
},
|
| 568 |
+
{
|
| 569 |
+
"type": "text",
|
| 570 |
+
"bbox": [
|
| 571 |
+
0.508,
|
| 572 |
+
0.599,
|
| 573 |
+
0.885,
|
| 574 |
+
0.759
|
| 575 |
+
],
|
| 576 |
+
"angle": 0,
|
| 577 |
+
"content": "Emojiself Pronouns. Similar to nounself pronouns, individuals may identify with sets of emojis, possibly reflecting different aspects of their identity, e.g., self. Emojiself pronouns are intended for written communication. Note, that, at the time of writing this manuscript, there seem to exist no academic description of emojiself pronouns. However, we were able to find evidence of their existence on several social media platforms and wikis, e.g., Tumblr,\\(^{7}\\) MOGAI Wiki,\\(^{8}\\) Twitter,\\(^{9}\\) and Reddit.\\(^{10}\\)"
|
| 578 |
+
},
|
| 579 |
+
{
|
| 580 |
+
"type": "page_footnote",
|
| 581 |
+
"bbox": [
|
| 582 |
+
0.508,
|
| 583 |
+
0.771,
|
| 584 |
+
0.887,
|
| 585 |
+
0.921
|
| 586 |
+
],
|
| 587 |
+
"angle": 0,
|
| 588 |
+
"content": "\\(^{6}\\)https://gendercensus.com/results/2021-worldwide-summary/\\(^{7}\\)E.g., https://pronoun-archive.tumblr.com/post/188520170831\\(^{8}\\)https://mogai.miraheze.org/wiki/Emoj iself; according to the article, the origin of emojiself pronouns is unclear but might date back to 2017\\(^{9}\\)Example of a user complaining about LinkedIn not allowing for emojiself pronouns in the pronoun field: https://twitter.com/frozenpandaman/status/1412314202119700480/photo/1\\(^{10}\\)E.g., https://www.reddit.com/r/QueerVexi"
|
| 589 |
+
},
|
| 590 |
+
{
|
| 591 |
+
"type": "footer",
|
| 592 |
+
"bbox": [
|
| 593 |
+
0.136,
|
| 594 |
+
0.904,
|
| 595 |
+
0.348,
|
| 596 |
+
0.919
|
| 597 |
+
],
|
| 598 |
+
"angle": 0,
|
| 599 |
+
"content": "<sup>5</sup>https://www.reddit.com"
|
| 600 |
+
}
|
| 601 |
+
],
|
| 602 |
+
[
|
| 603 |
+
{
|
| 604 |
+
"type": "text",
|
| 605 |
+
"bbox": [
|
| 606 |
+
0.113,
|
| 607 |
+
0.085,
|
| 608 |
+
0.49,
|
| 609 |
+
0.166
|
| 610 |
+
],
|
| 611 |
+
"angle": 0,
|
| 612 |
+
"content": "Numbers/ pronouns. Another form of neopronouns/ nounself pronouns are numberself pronouns. Analogous to before, we assume that here, the individual identifies or partially identified with a number, e.g., \\(0 / 0 / 0s / 0s / 0\\) self.\\(^{11}\\)"
|
| 613 |
+
},
|
| 614 |
+
{
|
| 615 |
+
"type": "text",
|
| 616 |
+
"bbox": [
|
| 617 |
+
0.113,
|
| 618 |
+
0.172,
|
| 619 |
+
0.49,
|
| 620 |
+
0.221
|
| 621 |
+
],
|
| 622 |
+
"angle": 0,
|
| 623 |
+
"content": "Nameself Pronouns. Individuals may identify with pronouns build from their name, e.g., John/Johnself, overlapping with nullpronomials.[12]"
|
| 624 |
+
},
|
| 625 |
+
{
|
| 626 |
+
"type": "text",
|
| 627 |
+
"bbox": [
|
| 628 |
+
0.117,
|
| 629 |
+
0.226,
|
| 630 |
+
0.49,
|
| 631 |
+
0.515
|
| 632 |
+
],
|
| 633 |
+
"angle": 0,
|
| 634 |
+
"content": "Alternating Pronouns. Given that people can identify with \\( n > 1 \\) sets of pronouns, the pronouns they identify with can be either equally identified-with sets, or change potentially depending on the context (mutopronominal). For instance, individuals who are also performer may use stage pronouns. Similarly, genderfluid individual may identify with a certain pronoun at a certain point in time (pronoun fluidity, (Cherry-Reid, 2020)). Some individuals identify with the pronouns of the person who is referring to them (mirroed pronouns). Other individuals use set(s) of auxiliary pronouns, e.g., for situations, in which individuals referring to them have problems with using the most identified-with sets of pronouns (e.g., in the case of emojiself pronouns and oral communication). Note that alternating pronoun sets may be even used in the same sentence for referring to the same individual.[13]"
|
| 635 |
+
},
|
| 636 |
+
{
|
| 637 |
+
"type": "text",
|
| 638 |
+
"bbox": [
|
| 639 |
+
0.113,
|
| 640 |
+
0.521,
|
| 641 |
+
0.489,
|
| 642 |
+
0.587
|
| 643 |
+
],
|
| 644 |
+
"angle": 0,
|
| 645 |
+
"content": "No Pronouns. Some individuals do not identify with any pronouns. In this case, some individuals identify most with their name being used to refer to them, nameself pronouns, or avoid pronouns."
|
| 646 |
+
},
|
| 647 |
+
{
|
| 648 |
+
"type": "title",
|
| 649 |
+
"bbox": [
|
| 650 |
+
0.114,
|
| 651 |
+
0.598,
|
| 652 |
+
0.484,
|
| 653 |
+
0.614
|
| 654 |
+
],
|
| 655 |
+
"angle": 0,
|
| 656 |
+
"content": "4.2 Corpus Analysis: Neopronouns in Reddit"
|
| 657 |
+
},
|
| 658 |
+
{
|
| 659 |
+
"type": "text",
|
| 660 |
+
"bbox": [
|
| 661 |
+
0.113,
|
| 662 |
+
0.62,
|
| 663 |
+
0.489,
|
| 664 |
+
0.765
|
| 665 |
+
],
|
| 666 |
+
"angle": 0,
|
| 667 |
+
"content": "Setup. We conduct an additional quantitative analysis for the presence of neopronouns in Reddit. To this end, we use Reddit threads created between 2010 and 2021, cleaned by previous work and provided through Huggingface Datasets (127,445,911 lines).<sup>14</sup> As we are interested in capturing novel pronouns and as the list of possible pronouns is indefinite, we proxy neopronouns via the suffixes self and selves indicating the reflexive case and"
|
| 668 |
+
},
|
| 669 |
+
{
|
| 670 |
+
"type": "text",
|
| 671 |
+
"bbox": [
|
| 672 |
+
0.114,
|
| 673 |
+
0.771,
|
| 674 |
+
0.488,
|
| 675 |
+
0.919
|
| 676 |
+
],
|
| 677 |
+
"angle": 0,
|
| 678 |
+
"content": "llology/comments/p09nek/i_made_a_flag_f \nor_the_emojiself_pronoun_set/ 11https://pronoun-provider.tumblr.com/ \npost/148452374817/i-think-numbers-as-pro \nnouns-would-be-pretty-cool 12https://pronoun.fandom.com/wiki/Null \npronominal 13https://www.reddit.com/r/NonBinary/c \ncomments/jasv5r/alternating Pronouns_in_s \namesentence/ 14https://huggingface.co/datasets/sent \nence-transformers/redit-title-body"
|
| 679 |
+
},
|
| 680 |
+
{
|
| 681 |
+
"type": "image",
|
| 682 |
+
"bbox": [
|
| 683 |
+
0.526,
|
| 684 |
+
0.114,
|
| 685 |
+
0.867,
|
| 686 |
+
0.337
|
| 687 |
+
],
|
| 688 |
+
"angle": 0,
|
| 689 |
+
"content": null
|
| 690 |
+
},
|
| 691 |
+
{
|
| 692 |
+
"type": "image_caption",
|
| 693 |
+
"bbox": [
|
| 694 |
+
0.508,
|
| 695 |
+
0.354,
|
| 696 |
+
0.885,
|
| 697 |
+
0.398
|
| 698 |
+
],
|
| 699 |
+
"angle": 0,
|
| 700 |
+
"content": "Figure 1: Token ranks (log-scale) and rank counts of the tokens returned against our reflexive regular expression pattern from Reddit with example annotations."
|
| 701 |
+
},
|
| 702 |
+
{
|
| 703 |
+
"type": "text",
|
| 704 |
+
"bbox": [
|
| 705 |
+
0.508,
|
| 706 |
+
0.427,
|
| 707 |
+
0.884,
|
| 708 |
+
0.553
|
| 709 |
+
],
|
| 710 |
+
"angle": 0,
|
| 711 |
+
"content": "match them through a regular expression. Additionally, we filter out non-3rd person pronouns (e.g., yourself, ourselves, plural themselves) as well as common variations of these (e.g., urself) and other common non-pronoun expressions we found in the data (e.g., do-it-yourself). This process leaves us with a total of 9,075 unique tokens with in total 74,768 textual mentions."
|
| 712 |
+
},
|
| 713 |
+
{
|
| 714 |
+
"type": "text",
|
| 715 |
+
"bbox": [
|
| 716 |
+
0.508,
|
| 717 |
+
0.566,
|
| 718 |
+
0.885,
|
| 719 |
+
0.92
|
| 720 |
+
],
|
| 721 |
+
"angle": 0,
|
| 722 |
+
"content": "Results. An initial manual analysis reveals that, unsurprisingly, many of the matches are false positives, i.e., not real neopronouns (e.g., non-self, a common concept in Buddhist philosophy, and to myself, a common spelling mistake of to myself). However, our method still finds relevant cases. Examples are depicted in Table 2. Many discussions in which we detect nounself pronouns center on the phenomena themselves, including, e.g., individuals stating that they are interested in using a specific pronoun or individuals stating that they refuse to acknowledge the phenomenon. Some discussions involve people reporting on personal experiences and problems and seeking advice. To obtain a quantitative view of the results, we plot the ranks (i.e., number of occurrences of a token) against their number of tokens (Figure 1). The result is a highly skewed Zipf's distribution: while the highest ranks appear only once (e.g., themself with 24,697 mentions), some tokens appear only a couple of times (e.g., the neopronoun xemself with 24 mentions), and the vast majority appears only once (e.g., many"
|
| 723 |
+
}
|
| 724 |
+
],
|
| 725 |
+
[
|
| 726 |
+
{
|
| 727 |
+
"type": "table",
|
| 728 |
+
"bbox": [
|
| 729 |
+
0.115,
|
| 730 |
+
0.082,
|
| 731 |
+
0.885,
|
| 732 |
+
0.38
|
| 733 |
+
],
|
| 734 |
+
"angle": 0,
|
| 735 |
+
"content": "<table><tr><td>Match</td><td>Subreddit</td><td>Thread Title</td><td>Thread Excerpt</td></tr><tr><td rowspan=\"2\">meowself</td><td>monsterhunterage</td><td>Fureedom Mewnite can die in my litterbox.</td><td>I don’t like this game. But I still want meowself to play it, meow. Cause it’s fun, even though I hate it.</td></tr><tr><td>offmychest</td><td>Neopronouns are going too far.</td><td>I get some pronouns like ze/ziR, xe/xem, etc. I agree with those. But why are people using ghost/ghostself and meow/meowself? That’s really utter bullshit.</td></tr><tr><td rowspan=\"2\">bunself</td><td>TiADiscussion</td><td>I am genderfluid, pansexual, and mentally ill. I have a lot of SJW friends. AMA!</td><td>They/them pronouns are coolest with me, but I won’t be angry if you use he or she. You can use bun/buns/bunself, if you are feeling special. (That was a joke.)</td></tr><tr><td>rpdkcirclejerk</td><td>Xi am so proud to announce that the new word of the year is....</td><td>-Cinnagender- which means you identify with our beloved and innocent cinnamon buns. The pronoun set is cinne/cinns/cinnself or alternatively bun/buns/bunself i am so happy to be a member of a community that ignores the oppressive gender binary, which is a social construct, i.e., it is not real</td></tr><tr><td rowspan=\"2\">zirself</td><td>mypartneristrans</td><td>Ran into our first roadblock</td><td>I asked what I could do to help zir lowering the feeling of disphoria, and ze said zd maybe feel better about zirself if zd drink a tea.</td></tr><tr><td>Negareddit</td><td>No, Redditors. If you’re a horrible person online, you’re probably a horrible person offline too.</td><td>Hello folks. Omg. I think this person is about to kill zirself! (emphasis on "zirself". COMEDIC GENIUS)</td></tr></table>"
|
| 736 |
+
},
|
| 737 |
+
{
|
| 738 |
+
"type": "table_caption",
|
| 739 |
+
"bbox": [
|
| 740 |
+
0.113,
|
| 741 |
+
0.388,
|
| 742 |
+
0.884,
|
| 743 |
+
0.418
|
| 744 |
+
],
|
| 745 |
+
"angle": 0,
|
| 746 |
+
"content": "Table 2: Example neopronouns and corresponding excerpts from Reddit retrieved via our heuristic method. We slightly modified the excerpts to lower searchability and increase the privacy of the users."
|
| 747 |
+
},
|
| 748 |
+
{
|
| 749 |
+
"type": "text",
|
| 750 |
+
"bbox": [
|
| 751 |
+
0.114,
|
| 752 |
+
0.442,
|
| 753 |
+
0.401,
|
| 754 |
+
0.459
|
| 755 |
+
],
|
| 756 |
+
"angle": 0,
|
| 757 |
+
"content": "nounself pronouns such as peachself)."
|
| 758 |
+
},
|
| 759 |
+
{
|
| 760 |
+
"type": "title",
|
| 761 |
+
"bbox": [
|
| 762 |
+
0.114,
|
| 763 |
+
0.465,
|
| 764 |
+
0.427,
|
| 765 |
+
0.497
|
| 766 |
+
],
|
| 767 |
+
"angle": 0,
|
| 768 |
+
"content": "5 How Can and Should We Model Pronouns?"
|
| 769 |
+
},
|
| 770 |
+
{
|
| 771 |
+
"type": "text",
|
| 772 |
+
"bbox": [
|
| 773 |
+
0.113,
|
| 774 |
+
0.505,
|
| 775 |
+
0.49,
|
| 776 |
+
0.618
|
| 777 |
+
],
|
| 778 |
+
"angle": 0,
|
| 779 |
+
"content": "We devise five desiderata based on our previous observations, personal experiences, and expert knowledge from interactions with LGBTQIA+ associates. Additionally, we collect informal feedback from individuals who use gender-neutral pronouns. We then assess how well classic and novel pronoun modeling paradigms fulfil the five criteria."
|
| 780 |
+
},
|
| 781 |
+
{
|
| 782 |
+
"type": "title",
|
| 783 |
+
"bbox": [
|
| 784 |
+
0.114,
|
| 785 |
+
0.625,
|
| 786 |
+
0.248,
|
| 787 |
+
0.64
|
| 788 |
+
],
|
| 789 |
+
"angle": 0,
|
| 790 |
+
"content": "5.1 Desiderata"
|
| 791 |
+
},
|
| 792 |
+
{
|
| 793 |
+
"type": "text",
|
| 794 |
+
"bbox": [
|
| 795 |
+
0.113,
|
| 796 |
+
0.648,
|
| 797 |
+
0.49,
|
| 798 |
+
0.793
|
| 799 |
+
],
|
| 800 |
+
"angle": 0,
|
| 801 |
+
"content": "D1. Refrain from assuming an individual's identity and pronouns. A model should not assume an individual's identity, e.g., gender, or pronouns based on, e.g., statistical cues about an individual's name, also not in a binary gender setup. Only because the name John typically appears together with the pronoun he, the model should never assume that a person with the name John identifies as a man and that every John uses the pronoun he."
|
| 802 |
+
},
|
| 803 |
+
{
|
| 804 |
+
"type": "text",
|
| 805 |
+
"bbox": [
|
| 806 |
+
0.113,
|
| 807 |
+
0.8,
|
| 808 |
+
0.49,
|
| 809 |
+
0.88
|
| 810 |
+
],
|
| 811 |
+
"angle": 0,
|
| 812 |
+
"content": "D2. Allow for the existing sets of pronouns as well as for neopronouns. A model should be able to handle not only the existing set of \"standard\" pronouns in a language but also other existing pronouns, e.g., neopronouns."
|
| 813 |
+
},
|
| 814 |
+
{
|
| 815 |
+
"type": "text",
|
| 816 |
+
"bbox": [
|
| 817 |
+
0.114,
|
| 818 |
+
0.888,
|
| 819 |
+
0.49,
|
| 820 |
+
0.919
|
| 821 |
+
],
|
| 822 |
+
"angle": 0,
|
| 823 |
+
"content": "D3. Allow for novel pronouns at any point in time. On top of D2, a model should allow for"
|
| 824 |
+
},
|
| 825 |
+
{
|
| 826 |
+
"type": "list",
|
| 827 |
+
"bbox": [
|
| 828 |
+
0.113,
|
| 829 |
+
0.648,
|
| 830 |
+
0.49,
|
| 831 |
+
0.919
|
| 832 |
+
],
|
| 833 |
+
"angle": 0,
|
| 834 |
+
"content": null
|
| 835 |
+
},
|
| 836 |
+
{
|
| 837 |
+
"type": "text",
|
| 838 |
+
"bbox": [
|
| 839 |
+
0.508,
|
| 840 |
+
0.442,
|
| 841 |
+
0.885,
|
| 842 |
+
0.54
|
| 843 |
+
],
|
| 844 |
+
"angle": 0,
|
| 845 |
+
"content": "novel, i.e., unseen, pronouns to appear at any point in time. This condition is necessary to handle the fact that neopronouns are not a fixed set, but evolving, and because related phenomena (emojiself and nameself pronouns) turn pronouns from a closed to an open class part of speech."
|
| 846 |
+
},
|
| 847 |
+
{
|
| 848 |
+
"type": "text",
|
| 849 |
+
"bbox": [
|
| 850 |
+
0.508,
|
| 851 |
+
0.548,
|
| 852 |
+
0.885,
|
| 853 |
+
0.629
|
| 854 |
+
],
|
| 855 |
+
"angle": 0,
|
| 856 |
+
"content": "D4. Allow for multiple, alternating, and changing pronouns. A model should not assume that the pronoun set for an individuum at time \\( t \\) will be the same as at time \\( t - 1 \\). Even within the same sequence, pronoun sets might change."
|
| 857 |
+
},
|
| 858 |
+
{
|
| 859 |
+
"type": "text",
|
| 860 |
+
"bbox": [
|
| 861 |
+
0.508,
|
| 862 |
+
0.637,
|
| 863 |
+
0.887,
|
| 864 |
+
0.766
|
| 865 |
+
],
|
| 866 |
+
"angle": 0,
|
| 867 |
+
"content": "D5. Provide an option to set up individuals' sets of pronouns. While most NLP models are trained offline and do not interact with the user, some are designed to interact with individuals, e.g., dialog systems. In this context, setting up individuals' sets of pronouns can help avoid harmful interactions (depending on the concrete sociotechnical deployment scenario)."
|
| 868 |
+
},
|
| 869 |
+
{
|
| 870 |
+
"type": "list",
|
| 871 |
+
"bbox": [
|
| 872 |
+
0.508,
|
| 873 |
+
0.548,
|
| 874 |
+
0.887,
|
| 875 |
+
0.766
|
| 876 |
+
],
|
| 877 |
+
"angle": 0,
|
| 878 |
+
"content": null
|
| 879 |
+
},
|
| 880 |
+
{
|
| 881 |
+
"type": "title",
|
| 882 |
+
"bbox": [
|
| 883 |
+
0.509,
|
| 884 |
+
0.774,
|
| 885 |
+
0.722,
|
| 886 |
+
0.791
|
| 887 |
+
],
|
| 888 |
+
"angle": 0,
|
| 889 |
+
"content": "5.2 Modeling Paradigms"
|
| 890 |
+
},
|
| 891 |
+
{
|
| 892 |
+
"type": "text",
|
| 893 |
+
"bbox": [
|
| 894 |
+
0.508,
|
| 895 |
+
0.799,
|
| 896 |
+
0.883,
|
| 897 |
+
0.828
|
| 898 |
+
],
|
| 899 |
+
"angle": 0,
|
| 900 |
+
"content": "We compare four general modeling paradigms with D1-D5 in Table 3."
|
| 901 |
+
},
|
| 902 |
+
{
|
| 903 |
+
"type": "text",
|
| 904 |
+
"bbox": [
|
| 905 |
+
0.508,
|
| 906 |
+
0.839,
|
| 907 |
+
0.886,
|
| 908 |
+
0.92
|
| 909 |
+
],
|
| 910 |
+
"angle": 0,
|
| 911 |
+
"content": "Classic Statistical Modeling. Traditionally, pronouns have been treated as a closed word class. Generally, statistical models do not make assumptions about this (except if the vocabulary is manually curated). However, in models exploiting cooc"
|
| 912 |
+
}
|
| 913 |
+
],
|
| 914 |
+
[
|
| 915 |
+
{
|
| 916 |
+
"type": "table",
|
| 917 |
+
"bbox": [
|
| 918 |
+
0.118,
|
| 919 |
+
0.082,
|
| 920 |
+
0.486,
|
| 921 |
+
0.159
|
| 922 |
+
],
|
| 923 |
+
"angle": 0,
|
| 924 |
+
"content": "<table><tr><td>Paradigm</td><td>D1</td><td>D2</td><td>D3</td><td>D4</td><td>D5</td></tr><tr><td>Classic</td><td>X</td><td>X</td><td>X</td><td>X</td><td>X</td></tr><tr><td>Bucketing</td><td>X</td><td>✓</td><td>✓</td><td>?</td><td>X</td></tr><tr><td>Delexicalization</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>X</td></tr><tr><td>Post-hoc</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr></table>"
|
| 925 |
+
},
|
| 926 |
+
{
|
| 927 |
+
"type": "table_caption",
|
| 928 |
+
"bbox": [
|
| 929 |
+
0.114,
|
| 930 |
+
0.168,
|
| 931 |
+
0.489,
|
| 932 |
+
0.199
|
| 933 |
+
],
|
| 934 |
+
"angle": 0,
|
| 935 |
+
"content": "Table 3: Modeling paradigms and how they allow for fulfilling the desiderata D1-D5."
|
| 936 |
+
},
|
| 937 |
+
{
|
| 938 |
+
"type": "text",
|
| 939 |
+
"bbox": [
|
| 940 |
+
0.113,
|
| 941 |
+
0.223,
|
| 942 |
+
0.49,
|
| 943 |
+
0.352
|
| 944 |
+
],
|
| 945 |
+
"angle": 0,
|
| 946 |
+
"content": "curences, e.g., via word embeddings (GloVe (Pennington et al., 2014)) or deep language models (BERT (Devlin et al., 2019), RoBERTa (Liu et al., 2019)), the models will likely misrepresent underrepresented pronoun-related phenomena. Dev et al. (2021) provided an initial insight by showing that singular they and the neopronouns \\(xe\\) and \\(ze\\) do not have meaningful vectors in GloVe and BERT."
|
| 947 |
+
},
|
| 948 |
+
{
|
| 949 |
+
"type": "text",
|
| 950 |
+
"bbox": [
|
| 951 |
+
0.113,
|
| 952 |
+
0.358,
|
| 953 |
+
0.49,
|
| 954 |
+
0.518
|
| 955 |
+
],
|
| 956 |
+
"angle": 0,
|
| 957 |
+
"content": "Bucketing. One option, previously discussed by Dev et al. (2021), is to apply bucketing, i.e., to decide on a fixed number of majority classes, e.g., male pronouns, female pronouns, and one or multiple classes for the \"rest of the pronouns\", e.g., other. The advantage of this approach is that it can map existing and novel pronouns to the other class. However, it still makes identity assumptions – and due to unequal representations of main and other classes, it will inevitably lead to discrimination."
|
| 958 |
+
},
|
| 959 |
+
{
|
| 960 |
+
"type": "text",
|
| 961 |
+
"bbox": [
|
| 962 |
+
0.113,
|
| 963 |
+
0.524,
|
| 964 |
+
0.49,
|
| 965 |
+
0.782
|
| 966 |
+
],
|
| 967 |
+
"angle": 0,
|
| 968 |
+
"content": "No Modeling – Delexicalization. Given that the classic approach and bucketing both lead to unfair treatment of underrepresented groups, the alternative is to explicitly not model pronouns in their surface forms. This process, commonly named delexicalization, has proved helpful for other tasks where models capture misleading lexical information, e.g., fact verification (e.g., Suntwal et al., 2019), or resource-lean scenarios, e.g., cross-lingual parsing (e.g., McDonald et al., 2011). In this case, the model is forced to not rely on spurious lexical cues related to gender, e.g., that John occurs most often with the pronoun he. Instead, the model learns a single representation for all pronouns and relies on other task-related conceptual and commonsense information for disambiguation."
|
| 969 |
+
},
|
| 970 |
+
{
|
| 971 |
+
"type": "text",
|
| 972 |
+
"bbox": [
|
| 973 |
+
0.113,
|
| 974 |
+
0.788,
|
| 975 |
+
0.49,
|
| 976 |
+
0.884
|
| 977 |
+
],
|
| 978 |
+
"angle": 0,
|
| 979 |
+
"content": "Post-hoc Injection of Modeling Information/ Modeling at Test Time. For human-to-human interactions, several LGBTQIA+ guides recommend to (1) first try generic pronouns (e.g., singular they), and (2) switch to other sets of pronouns once the conversation partner communicates them. For"
|
| 980 |
+
},
|
| 981 |
+
{
|
| 982 |
+
"type": "table",
|
| 983 |
+
"bbox": [
|
| 984 |
+
0.513,
|
| 985 |
+
0.082,
|
| 986 |
+
0.883,
|
| 987 |
+
0.155
|
| 988 |
+
],
|
| 989 |
+
"angle": 0,
|
| 990 |
+
"content": "<table><tr><td></td><td>Train</td><td>Dev</td><td>Test</td><td>Total</td></tr><tr><td>PRP</td><td>64,476</td><td>7,881</td><td>8,067</td><td>80,424</td></tr><tr><td>PRP$</td><td>14,535</td><td>1,783</td><td>1,935</td><td>18,253</td></tr><tr><td>Total</td><td>79,011</td><td>9,664</td><td>10,002</td><td>98,677</td></tr></table>"
|
| 991 |
+
},
|
| 992 |
+
{
|
| 993 |
+
"type": "table_caption",
|
| 994 |
+
"bbox": [
|
| 995 |
+
0.509,
|
| 996 |
+
0.163,
|
| 997 |
+
0.885,
|
| 998 |
+
0.207
|
| 999 |
+
],
|
| 1000 |
+
"angle": 0,
|
| 1001 |
+
"content": "Table 4: Number of pronoun replacements in the training, development, and test portion of OntoNotes 5.0 for PRP and PRPS, respectively."
|
| 1002 |
+
},
|
| 1003 |
+
{
|
| 1004 |
+
"type": "text",
|
| 1005 |
+
"bbox": [
|
| 1006 |
+
0.508,
|
| 1007 |
+
0.232,
|
| 1008 |
+
0.886,
|
| 1009 |
+
0.426
|
| 1010 |
+
],
|
| 1011 |
+
"angle": 0,
|
| 1012 |
+
"content": "uncommon or novel pronouns, several web pages have explicitly been set up for practising how to use them.\\(^{16}\\) In this work, we propose that NLP systems should work similarly – if technically possible and depending on the concrete sociotechnical deployment scenario. To this end, we can use intermediate training procedures (e.g., Hung et al., 2021) for pronoun-related model refinement. E.g., we can use synthetic data created through similar procedures as the ones employed on these websites. Another option is only model pronouns at test time, e.g., through simple replacement procedures."
|
| 1013 |
+
},
|
| 1014 |
+
{
|
| 1015 |
+
"type": "title",
|
| 1016 |
+
"bbox": [
|
| 1017 |
+
0.509,
|
| 1018 |
+
0.431,
|
| 1019 |
+
0.803,
|
| 1020 |
+
0.447
|
| 1021 |
+
],
|
| 1022 |
+
"angle": 0,
|
| 1023 |
+
"content": "6 How Much Would We Loose?"
|
| 1024 |
+
},
|
| 1025 |
+
{
|
| 1026 |
+
"type": "text",
|
| 1027 |
+
"bbox": [
|
| 1028 |
+
0.508,
|
| 1029 |
+
0.454,
|
| 1030 |
+
0.886,
|
| 1031 |
+
0.599
|
| 1032 |
+
],
|
| 1033 |
+
"angle": 0,
|
| 1034 |
+
"content": "In §5.2, we discussed delexicalization, i.e., not modeling lexical surface forms of pronouns, as one way to counter exclusion in statistical modeling and bucketing. However, a possible counterargument against this approach is that omitting the surface forms will lead to poor model performance on pronoun-related tasks. We experimentally quantify the loss from (fairer) delexicalization compared to statistical modeling in co-reference resolution."
|
| 1035 |
+
},
|
| 1036 |
+
{
|
| 1037 |
+
"type": "title",
|
| 1038 |
+
"bbox": [
|
| 1039 |
+
0.509,
|
| 1040 |
+
0.605,
|
| 1041 |
+
0.715,
|
| 1042 |
+
0.621
|
| 1043 |
+
],
|
| 1044 |
+
"angle": 0,
|
| 1045 |
+
"content": "6.1 Experimental Setup"
|
| 1046 |
+
},
|
| 1047 |
+
{
|
| 1048 |
+
"type": "text",
|
| 1049 |
+
"bbox": [
|
| 1050 |
+
0.508,
|
| 1051 |
+
0.627,
|
| 1052 |
+
0.886,
|
| 1053 |
+
0.884
|
| 1054 |
+
],
|
| 1055 |
+
"angle": 0,
|
| 1056 |
+
"content": "Task, Dataset, and Measures. We resort to co-reference resolution, a task where knowledge about pronouns and related gender identity assumptions play an important role. We use the English portion of the OntoNotes 5.0 dataset (Weischedel et al., 2012), which consists of texts annotated with co-reference information across five domains (news, conversational telephone speech, weblogs, usenet newsgroups, broadcast, and talk shows). We prepare three variants: (i) the first version consists of the plain original data; (ii) in the second variant, we replace all pronouns in the test set with the respective part-of-speech token, according to the Penn Treebank Project (Santorini, 1990), i.e., PRP for personal pronouns, and PRP$ for possessive pronouns. Finally, we provide a version (iii) in which"
|
| 1057 |
+
},
|
| 1058 |
+
{
|
| 1059 |
+
"type": "page_footnote",
|
| 1060 |
+
"bbox": [
|
| 1061 |
+
0.114,
|
| 1062 |
+
0.892,
|
| 1063 |
+
0.49,
|
| 1064 |
+
0.919
|
| 1065 |
+
],
|
| 1066 |
+
"angle": 0,
|
| 1067 |
+
"content": "15 In fact, accounting for novel pronouns and novel ways of using pronouns is a resource-lean scenario."
|
| 1068 |
+
},
|
| 1069 |
+
{
|
| 1070 |
+
"type": "page_footnote",
|
| 1071 |
+
"bbox": [
|
| 1072 |
+
0.509,
|
| 1073 |
+
0.892,
|
| 1074 |
+
0.883,
|
| 1075 |
+
0.919
|
| 1076 |
+
],
|
| 1077 |
+
"angle": 0,
|
| 1078 |
+
"content": "16E.g., https://www.pRACTicewithpronouns.com/#/?_k=66emp7"
|
| 1079 |
+
}
|
| 1080 |
+
],
|
| 1081 |
+
[
|
| 1082 |
+
{
|
| 1083 |
+
"type": "table",
|
| 1084 |
+
"bbox": [
|
| 1085 |
+
0.116,
|
| 1086 |
+
0.082,
|
| 1087 |
+
0.885,
|
| 1088 |
+
0.201
|
| 1089 |
+
],
|
| 1090 |
+
"angle": 0,
|
| 1091 |
+
"content": "<table><tr><td rowspan=\"3\">(Dobrovolskii, 2021)</td><td colspan=\"3\">MUC</td><td colspan=\"3\">CEAFφ4</td><td colspan=\"3\">B3</td><td colspan=\"3\">AVG</td></tr><tr><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td></tr><tr><td>84.9</td><td>87.9</td><td>86.3</td><td>76.1</td><td>77.1</td><td>76.6</td><td>77.4</td><td>82.6</td><td>79.9</td><td>-</td><td>-</td><td>81.0</td></tr><tr><td>- reproduction</td><td>84.7</td><td>87.5</td><td>86.1</td><td>75.6</td><td>76.7</td><td>76.1</td><td>77.2</td><td>82.0</td><td>79.5</td><td>79.2</td><td>82.1</td><td>80.6</td></tr><tr><td>- replace test set</td><td>69.7</td><td>70.7</td><td>70.2</td><td>63.2</td><td>49.1</td><td>55.2</td><td>50.1</td><td>56.1</td><td>52.9</td><td>61.0</td><td>58.6</td><td>59.4</td></tr><tr><td>Δrepl.test-repr.</td><td>-15.0</td><td>-16.8</td><td>-15.9</td><td>-12.4</td><td>-27.6</td><td>-20.9</td><td>-27.1</td><td>-25.9</td><td>-26.6</td><td>-18.2</td><td>-23.5</td><td>-21.2</td></tr><tr><td>- replace all</td><td>81.6</td><td>83.1</td><td>82.4</td><td>73.08</td><td>72.9</td><td>73.0</td><td>72.3</td><td>75.3</td><td>73.7</td><td>75.7</td><td>77.1</td><td>76.4</td></tr><tr><td>Δrepl.all-repr.</td><td>-3.1</td><td>-4.4</td><td>-3.7</td><td>-2.5</td><td>-3.8</td><td>-3.1</td><td>-4.9</td><td>-6.7</td><td>-5.8</td><td>-3.5</td><td>-5.0</td><td>-4.2</td></tr></table>"
|
| 1092 |
+
},
|
| 1093 |
+
{
|
| 1094 |
+
"type": "table_caption",
|
| 1095 |
+
"bbox": [
|
| 1096 |
+
0.112,
|
| 1097 |
+
0.21,
|
| 1098 |
+
0.885,
|
| 1099 |
+
0.285
|
| 1100 |
+
],
|
| 1101 |
+
"angle": 0,
|
| 1102 |
+
"content": "Table 5: Results of the delexicalization experiment. We report the results of the RoBERTa large-based word-level co-reference resolution model as reported by Dobrovolskii (2021), our reproduction, as well as variants trained and/ or tested on versions of the data set in which we replace the pronouns. All scores were produced using the official CoNLL-2012 scorer. We report precision (P), recall (R), and F1-score (F1) for MUC, \\(\\mathrm{CEAF}_{\\phi 4}\\), and \\(\\mathbf{B}^3\\) respectively, as well as the averages (AVG). The rows highlighted in gray indicate the obtained losses."
|
| 1103 |
+
},
|
| 1104 |
+
{
|
| 1105 |
+
"type": "text",
|
| 1106 |
+
"bbox": [
|
| 1107 |
+
0.113,
|
| 1108 |
+
0.308,
|
| 1109 |
+
0.49,
|
| 1110 |
+
0.468
|
| 1111 |
+
],
|
| 1112 |
+
"angle": 0,
|
| 1113 |
+
"content": "we replace pronouns in the train, dev, and test splits. Note that our strategy is pessimistic, as we also replace non-3rd person pronouns, i.e., \\(I\\), you, ourselves, etc. We show the number of replacements in Table 4. For scoring, we use the official CoNLL2012 scorer (Pradhan et al., 2012). We report the results in terms of MUC (Vilain et al., 1995), \\(\\mathrm{B}^3\\) (Bagga and Baldwin, 1998), and \\(\\mathrm{CEAF}_{\\phi 4}\\) (Luo, 2005) precision, recall, and F1-measure, as well as the averages across these scores."
|
| 1114 |
+
},
|
| 1115 |
+
{
|
| 1116 |
+
"type": "text",
|
| 1117 |
+
"bbox": [
|
| 1118 |
+
0.112,
|
| 1119 |
+
0.485,
|
| 1120 |
+
0.49,
|
| 1121 |
+
0.921
|
| 1122 |
+
],
|
| 1123 |
+
"angle": 0,
|
| 1124 |
+
"content": "Models and Baselines. We want to obtain an intuition about the tradeoffs in the delexicalization setup, not to outperform previous results. For this reason, we resort to the recently proposed word-level co-reference model (Dobrovolskii, 2021), a highly efficient model competitive with the state-of-the-art. The model consists of a separate co-reference resolution module and a separate span extraction module. In an initial step, we compute token representations from a Transformer (Vaswani et al., 2017)-based encoder through aggregation of initial representations via learnable weights. In a next step, we compute co-reference relationships. To this end, the token representations are passed into an antecedent pruning procedure based on a bilinear scoring function for obtaining \\( k \\) antecedent candidates for each token through coarse-grained scoring. Then an additional feed-forward neural network computes finer-grained scores. The final antecedent score is the sum of these two scores. We select the candidate with the highest score as the antecedent. Negative scores indicate no antecedent for a token. Tokens assumed to be part of a co-reference relationship are passed into the span extraction module. The module consists of an additional feed-forward network, which is followed by convolutions with two output channels (for start"
|
| 1125 |
+
},
|
| 1126 |
+
{
|
| 1127 |
+
"type": "text",
|
| 1128 |
+
"bbox": [
|
| 1129 |
+
0.507,
|
| 1130 |
+
0.308,
|
| 1131 |
+
0.885,
|
| 1132 |
+
0.438
|
| 1133 |
+
],
|
| 1134 |
+
"angle": 0,
|
| 1135 |
+
"content": "and end scores). For further details see the original work. Our baseline is the model trained and evaluated on the original OntoNotes portions (reproduction). We compare with the evaluation of this model on the pronoun-replaced test set (replace test set) and a version of this model trained on the replaced training set and evaluated on the replaced test set, respectively (replace all)."
|
| 1136 |
+
},
|
| 1137 |
+
{
|
| 1138 |
+
"type": "text",
|
| 1139 |
+
"bbox": [
|
| 1140 |
+
0.508,
|
| 1141 |
+
0.444,
|
| 1142 |
+
0.886,
|
| 1143 |
+
0.719
|
| 1144 |
+
],
|
| 1145 |
+
"angle": 0,
|
| 1146 |
+
"content": "Model Configuration, Training, and Optimization. We choose RoBERTa large (Liu et al., 2019)\\(^{17}\\) as the base encoder and fix all other hyperparameters to the ones provided in the original implementation of Dobrovolskii (2021): the window size is set to 512 tokens, dropout rate to 0.3, the learning rate of the encoder is set to \\(1 \\cdot 10^{-5}\\) and of the task-specific layers to \\(3 \\cdot 10^{-4}\\), respectively. We train the co-reference module with a combination of the negative log marginal likelihood and binary cross-entropy as an additional regularization factor (weight set to 0.5). The span extraction module is trained using cross-entropy loss. We optimize the sum of the two losses jointly with Adam (Kingma and Ba, 2015) for 20 epochs and apply early stopping based on validation set performance (word-level F1) with a patience of 3 epochs."
|
| 1147 |
+
},
|
| 1148 |
+
{
|
| 1149 |
+
"type": "title",
|
| 1150 |
+
"bbox": [
|
| 1151 |
+
0.509,
|
| 1152 |
+
0.726,
|
| 1153 |
+
0.738,
|
| 1154 |
+
0.74
|
| 1155 |
+
],
|
| 1156 |
+
"angle": 0,
|
| 1157 |
+
"content": "6.2 Results and Discussion"
|
| 1158 |
+
},
|
| 1159 |
+
{
|
| 1160 |
+
"type": "text",
|
| 1161 |
+
"bbox": [
|
| 1162 |
+
0.508,
|
| 1163 |
+
0.749,
|
| 1164 |
+
0.885,
|
| 1165 |
+
0.894
|
| 1166 |
+
],
|
| 1167 |
+
"angle": 0,
|
| 1168 |
+
"content": "We show the results in Table 5. We are roughly able to reproduce the results reported by (Dobrovolskii, 2021), confirming the effectiveness of their approach and the validity of our experimental setup. When we replace pronouns in the test set, the results drop massively, with up to \\(-27.6\\) percentage points \\(\\mathrm{CEAF}_{\\phi 4}\\) recall. On average, the results drop by 21.2 percentage points in F1-measure. This decrease demonstrates the heavy reliance of"
|
| 1169 |
+
},
|
| 1170 |
+
{
|
| 1171 |
+
"type": "page_footnote",
|
| 1172 |
+
"bbox": [
|
| 1173 |
+
0.527,
|
| 1174 |
+
0.904,
|
| 1175 |
+
0.868,
|
| 1176 |
+
0.92
|
| 1177 |
+
],
|
| 1178 |
+
"angle": 0,
|
| 1179 |
+
"content": "<sup>17</sup>https://huggingface.co/roberta-large"
|
| 1180 |
+
}
|
| 1181 |
+
],
|
| 1182 |
+
[
|
| 1183 |
+
{
|
| 1184 |
+
"type": "text",
|
| 1185 |
+
"bbox": [
|
| 1186 |
+
0.117,
|
| 1187 |
+
0.085,
|
| 1188 |
+
0.486,
|
| 1189 |
+
0.373
|
| 1190 |
+
],
|
| 1191 |
+
"angle": 0,
|
| 1192 |
+
"content": "this model on the lexical surface forms of the pronoun sets seen in the training. However, when we replace the pronouns in the training portion of OntoNotes with the special tokens, we can mitigate these losses by a large margin (losses up to \\(-5.8\\) B\\(^3\\) F1, and on average \\(-4.2\\) F1). These results are highly encouraging, given that a) we replaced all pronouns, including non-third person pronouns, and b) the model has not been trained on these placeholders in the pretraining phase. The model can not rely on possibly discriminating correlations between names or occupations and pronoun sets and will represent neopronouns the same way as it will represent established pronoun sets. So a delexicalization approach can increase fairness in co-reference resolution and retain high system performance, as we can expect even smaller drops from a more careful selection of replacements."
|
| 1193 |
+
},
|
| 1194 |
+
{
|
| 1195 |
+
"type": "title",
|
| 1196 |
+
"bbox": [
|
| 1197 |
+
0.117,
|
| 1198 |
+
0.403,
|
| 1199 |
+
0.244,
|
| 1200 |
+
0.416
|
| 1201 |
+
],
|
| 1202 |
+
"angle": 0,
|
| 1203 |
+
"content": "7 Conclusion"
|
| 1204 |
+
},
|
| 1205 |
+
{
|
| 1206 |
+
"type": "text",
|
| 1207 |
+
"bbox": [
|
| 1208 |
+
0.117,
|
| 1209 |
+
0.438,
|
| 1210 |
+
0.486,
|
| 1211 |
+
0.918
|
| 1212 |
+
],
|
| 1213 |
+
"angle": 0,
|
| 1214 |
+
"content": "This work provides an initial overview of the plethora of current phenomena in 3rd person pronoun usage in the English language. For practical and ethical reasons, the NLP community should acknowledge the broad spectrum of possible identities and the respective manifestations in written and oral communication. Especially since many emerging phenomena are still under-researched, and even while it remains to be seen if and how these become more established ways of referring to individuals. Language is consistently evolving, and NLP researchers and practitioners should account for this to provide genuinely inclusive systems. Notably, pronouns, traditionally handled as a close class of words, currently seem to function closer to an open class. Based on our observations, which originate from literature research, research in non-academic publicly available writing, as well as a corpus study, we have defined a series of five desiderata and applied those to the discussion of existing and novel modeling paradigms. In this context, we raised the questions when and how to model pronouns and whether and how to include users in these decisions. We consider this document an initial and living draft and hope to start a broader discussion on the topic. Our study can inform future NLP research and beyond and serve as a starting point for creating novel modeling procedures. In the future, we will look at pronoun-related issues within concrete tasks and in multilingual scenarios."
|
| 1215 |
+
},
|
| 1216 |
+
{
|
| 1217 |
+
"type": "title",
|
| 1218 |
+
"bbox": [
|
| 1219 |
+
0.514,
|
| 1220 |
+
0.086,
|
| 1221 |
+
0.671,
|
| 1222 |
+
0.1
|
| 1223 |
+
],
|
| 1224 |
+
"angle": 0,
|
| 1225 |
+
"content": "Acknowledgments"
|
| 1226 |
+
},
|
| 1227 |
+
{
|
| 1228 |
+
"type": "text",
|
| 1229 |
+
"bbox": [
|
| 1230 |
+
0.513,
|
| 1231 |
+
0.108,
|
| 1232 |
+
0.882,
|
| 1233 |
+
0.219
|
| 1234 |
+
],
|
| 1235 |
+
"angle": 0,
|
| 1236 |
+
"content": "The work of Anne Lauscher and Dirk Hovy is funded by the European Research Council (ERC) under the European Union's Horizon 2020 research and innovation program (grant agreement No. 949944, INTEGRATOR). We thank Emily Bender and Chandler May for sharing their ideas related to our project."
|
| 1237 |
+
},
|
| 1238 |
+
{
|
| 1239 |
+
"type": "title",
|
| 1240 |
+
"bbox": [
|
| 1241 |
+
0.514,
|
| 1242 |
+
0.227,
|
| 1243 |
+
0.743,
|
| 1244 |
+
0.24
|
| 1245 |
+
],
|
| 1246 |
+
"angle": 0,
|
| 1247 |
+
"content": "Further Ethical Discussion"
|
| 1248 |
+
},
|
| 1249 |
+
{
|
| 1250 |
+
"type": "text",
|
| 1251 |
+
"bbox": [
|
| 1252 |
+
0.513,
|
| 1253 |
+
0.248,
|
| 1254 |
+
0.882,
|
| 1255 |
+
0.616
|
| 1256 |
+
],
|
| 1257 |
+
"angle": 0,
|
| 1258 |
+
"content": "We have described and experimented with phenomena related to third-person pronouns focusing on the English language only. Naturally, this work comes with several limitations. For instance, while we pointed the reader to the variety of pronoun-related phenomena in other languages, a thorough multilingual and cross-lingual discussion would have exceeded the scope of this manuscript. This lacuna includes the discussion of neopronouns in other languages. Similarly, while we acknowledged identities beyond the binary gender as well as otherkin identities, due to our focus on pronouns, we did not investigate other identity-related terms. This aspect includes their handling in NLP and the range of issues related to identity-exclusivity. Finally, at the current state of the manuscript, the desiderata discussed are, as reported, based on our expert knowledge, our activities within the LGBTQIA+ community, and informal exchanges with individuals using gender-neutral pronouns. In the future, we will validate these assumptions through a structured survey to present a more inclusive perspective on the discussed issues."
|
| 1259 |
+
},
|
| 1260 |
+
{
|
| 1261 |
+
"type": "title",
|
| 1262 |
+
"bbox": [
|
| 1263 |
+
0.514,
|
| 1264 |
+
0.642,
|
| 1265 |
+
0.607,
|
| 1266 |
+
0.655
|
| 1267 |
+
],
|
| 1268 |
+
"angle": 0,
|
| 1269 |
+
"content": "References"
|
| 1270 |
+
},
|
| 1271 |
+
{
|
| 1272 |
+
"type": "ref_text",
|
| 1273 |
+
"bbox": [
|
| 1274 |
+
0.514,
|
| 1275 |
+
0.662,
|
| 1276 |
+
0.882,
|
| 1277 |
+
0.727
|
| 1278 |
+
],
|
| 1279 |
+
"angle": 0,
|
| 1280 |
+
"content": "Amit Bagga and Breck Baldwin. 1998. Algorithms for scoring coreference chains. In Proc. Linguistic Coreference Workshop at the first Conf. on Language Resources and Evaluation (LREC), pages 563-566, Granada, Spain."
|
| 1281 |
+
},
|
| 1282 |
+
{
|
| 1283 |
+
"type": "ref_text",
|
| 1284 |
+
"bbox": [
|
| 1285 |
+
0.514,
|
| 1286 |
+
0.738,
|
| 1287 |
+
0.882,
|
| 1288 |
+
0.855
|
| 1289 |
+
],
|
| 1290 |
+
"angle": 0,
|
| 1291 |
+
"content": "Soumya Barikeri, Anne Lauscher, Ivan Vulic, and Goran Glavaš. 2021. RedditBias: A real-world resource for bias evaluation and debiasing of conversational language models. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1941-1955, Online. Association for Computational Linguistics."
|
| 1292 |
+
},
|
| 1293 |
+
{
|
| 1294 |
+
"type": "ref_text",
|
| 1295 |
+
"bbox": [
|
| 1296 |
+
0.514,
|
| 1297 |
+
0.866,
|
| 1298 |
+
0.882,
|
| 1299 |
+
0.918
|
| 1300 |
+
],
|
| 1301 |
+
"angle": 0,
|
| 1302 |
+
"content": "Solon Barocas, Kate Crawford, Aaron Shapiro, and Hanna Wallach. 2017. The problem with bias: Allocative versus representational harms in machine learning. In 9th Annual Conference of the Special"
|
| 1303 |
+
},
|
| 1304 |
+
{
|
| 1305 |
+
"type": "list",
|
| 1306 |
+
"bbox": [
|
| 1307 |
+
0.514,
|
| 1308 |
+
0.662,
|
| 1309 |
+
0.882,
|
| 1310 |
+
0.918
|
| 1311 |
+
],
|
| 1312 |
+
"angle": 0,
|
| 1313 |
+
"content": null
|
| 1314 |
+
}
|
| 1315 |
+
],
|
| 1316 |
+
[
|
| 1317 |
+
{
|
| 1318 |
+
"type": "ref_text",
|
| 1319 |
+
"bbox": [
|
| 1320 |
+
0.135,
|
| 1321 |
+
0.086,
|
| 1322 |
+
0.49,
|
| 1323 |
+
0.113
|
| 1324 |
+
],
|
| 1325 |
+
"angle": 0,
|
| 1326 |
+
"content": "Interest Group for Computing, Information and Society."
|
| 1327 |
+
},
|
| 1328 |
+
{
|
| 1329 |
+
"type": "ref_text",
|
| 1330 |
+
"bbox": [
|
| 1331 |
+
0.117,
|
| 1332 |
+
0.121,
|
| 1333 |
+
0.49,
|
| 1334 |
+
0.213
|
| 1335 |
+
],
|
| 1336 |
+
"angle": 0,
|
| 1337 |
+
"content": "Su Lin Blodgett, Solon Barocas, Hal Daumé III, and Hanna Wallach. 2020. Language (technology) is power: A critical survey of \"bias\" in NLP. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5454-5476, Online. Association for Computational Linguistics."
|
| 1338 |
+
},
|
| 1339 |
+
{
|
| 1340 |
+
"type": "ref_text",
|
| 1341 |
+
"bbox": [
|
| 1342 |
+
0.117,
|
| 1343 |
+
0.221,
|
| 1344 |
+
0.49,
|
| 1345 |
+
0.326
|
| 1346 |
+
],
|
| 1347 |
+
"angle": 0,
|
| 1348 |
+
"content": "Tolga Bolukbasi, Kai-Wei Chang, James Y. Zou, Venkatesh Saligrama, and Adam Tauman Kalai. 2016. Man is to computer programmer as woman is to homemaker? debiasing word embeddings. In Advances in Neural Information Processing Systems 29: Annual Conference on Neural Information Processing Systems 2016, December 5-10, 2016, Barcelona, Spain, pages 4349-4357."
|
| 1349 |
+
},
|
| 1350 |
+
{
|
| 1351 |
+
"type": "ref_text",
|
| 1352 |
+
"bbox": [
|
| 1353 |
+
0.117,
|
| 1354 |
+
0.334,
|
| 1355 |
+
0.49,
|
| 1356 |
+
0.427
|
| 1357 |
+
],
|
| 1358 |
+
"angle": 0,
|
| 1359 |
+
"content": "Shikha Bordia and Samuel R. Bowman. 2019. Identifying and reducing gender bias in word-level language models. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Student Research Workshop, pages 7-15, Minneapolis, Minnesota. Association for Computational Linguistics."
|
| 1360 |
+
},
|
| 1361 |
+
{
|
| 1362 |
+
"type": "ref_text",
|
| 1363 |
+
"bbox": [
|
| 1364 |
+
0.117,
|
| 1365 |
+
0.434,
|
| 1366 |
+
0.489,
|
| 1367 |
+
0.461
|
| 1368 |
+
],
|
| 1369 |
+
"angle": 0,
|
| 1370 |
+
"content": "Judith Butler. 1990. Gender trouble, 1st edition. Routledge Classics, New York, NY, USA."
|
| 1371 |
+
},
|
| 1372 |
+
{
|
| 1373 |
+
"type": "ref_text",
|
| 1374 |
+
"bbox": [
|
| 1375 |
+
0.117,
|
| 1376 |
+
0.469,
|
| 1377 |
+
0.489,
|
| 1378 |
+
0.496
|
| 1379 |
+
],
|
| 1380 |
+
"angle": 0,
|
| 1381 |
+
"content": "Judith Butler. 2004. Undoing Gender, 1st edition. Routledge, New York, NY, USA."
|
| 1382 |
+
},
|
| 1383 |
+
{
|
| 1384 |
+
"type": "ref_text",
|
| 1385 |
+
"bbox": [
|
| 1386 |
+
0.117,
|
| 1387 |
+
0.504,
|
| 1388 |
+
0.49,
|
| 1389 |
+
0.57
|
| 1390 |
+
],
|
| 1391 |
+
"angle": 0,
|
| 1392 |
+
"content": "Yang Trista Cao and Hal Daumé III. 2021. Toward gender-inclusive coreference resolution: An analysis of gender and bias throughout the machine learning lifecycle*. Computational Linguistics, 47(3):615-661."
|
| 1393 |
+
},
|
| 1394 |
+
{
|
| 1395 |
+
"type": "ref_text",
|
| 1396 |
+
"bbox": [
|
| 1397 |
+
0.117,
|
| 1398 |
+
0.578,
|
| 1399 |
+
0.489,
|
| 1400 |
+
0.631
|
| 1401 |
+
],
|
| 1402 |
+
"angle": 0,
|
| 1403 |
+
"content": "Katharine A Cherry-Reid. 2020. *Music to Our Ears: Using a Queer Folk Song Pedagogy to do Gender and Sexuality Education*. Ph.D. thesis, University of Toronto (Canada)."
|
| 1404 |
+
},
|
| 1405 |
+
{
|
| 1406 |
+
"type": "ref_text",
|
| 1407 |
+
"bbox": [
|
| 1408 |
+
0.117,
|
| 1409 |
+
0.64,
|
| 1410 |
+
0.489,
|
| 1411 |
+
0.667
|
| 1412 |
+
],
|
| 1413 |
+
"angle": 0,
|
| 1414 |
+
"content": "Kirby Conrod. 2019. Pronouns Raising and Emerging. Ph.D. thesis, University of Washington."
|
| 1415 |
+
},
|
| 1416 |
+
{
|
| 1417 |
+
"type": "ref_text",
|
| 1418 |
+
"bbox": [
|
| 1419 |
+
0.117,
|
| 1420 |
+
0.675,
|
| 1421 |
+
0.49,
|
| 1422 |
+
0.805
|
| 1423 |
+
],
|
| 1424 |
+
"angle": 0,
|
| 1425 |
+
"content": "Sunipa Dev, Tao Li, Jeff M. Phillips, and Vivek Srikumar. 2020. On measuring and mitigating biased inferences of word embeddings. In The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial Intelligence, EAAI 2020, New York, NY, USA, February 7-12, 2020, pages 7659-7666. AAAI Press."
|
| 1426 |
+
},
|
| 1427 |
+
{
|
| 1428 |
+
"type": "ref_text",
|
| 1429 |
+
"bbox": [
|
| 1430 |
+
0.117,
|
| 1431 |
+
0.814,
|
| 1432 |
+
0.49,
|
| 1433 |
+
0.919
|
| 1434 |
+
],
|
| 1435 |
+
"angle": 0,
|
| 1436 |
+
"content": "Sunipa Dev, Masoud Monajatipoor, Anaelia Ovalle, Arjun Subramonian, Jeff Phillips, and Kai-Wei Chang. 2021. Harms of gender exclusivity and challenges in non-binary representation in language technologies. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 1968-1994, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics."
|
| 1437 |
+
},
|
| 1438 |
+
{
|
| 1439 |
+
"type": "list",
|
| 1440 |
+
"bbox": [
|
| 1441 |
+
0.117,
|
| 1442 |
+
0.086,
|
| 1443 |
+
0.49,
|
| 1444 |
+
0.919
|
| 1445 |
+
],
|
| 1446 |
+
"angle": 0,
|
| 1447 |
+
"content": null
|
| 1448 |
+
},
|
| 1449 |
+
{
|
| 1450 |
+
"type": "ref_text",
|
| 1451 |
+
"bbox": [
|
| 1452 |
+
0.512,
|
| 1453 |
+
0.086,
|
| 1454 |
+
0.885,
|
| 1455 |
+
0.204
|
| 1456 |
+
],
|
| 1457 |
+
"angle": 0,
|
| 1458 |
+
"content": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics."
|
| 1459 |
+
},
|
| 1460 |
+
{
|
| 1461 |
+
"type": "ref_text",
|
| 1462 |
+
"bbox": [
|
| 1463 |
+
0.512,
|
| 1464 |
+
0.216,
|
| 1465 |
+
0.885,
|
| 1466 |
+
0.295
|
| 1467 |
+
],
|
| 1468 |
+
"angle": 0,
|
| 1469 |
+
"content": "Vladimir Dobrovolskii. 2021. Word-level coreference resolution. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 7670-7675, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics."
|
| 1470 |
+
},
|
| 1471 |
+
{
|
| 1472 |
+
"type": "ref_text",
|
| 1473 |
+
"bbox": [
|
| 1474 |
+
0.512,
|
| 1475 |
+
0.307,
|
| 1476 |
+
0.885,
|
| 1477 |
+
0.425
|
| 1478 |
+
],
|
| 1479 |
+
"angle": 0,
|
| 1480 |
+
"content": "Hila Gonen and Yoav Goldberg. 2019. Lipstick on a pig: Debiasing methods cover up systematic gender biases in word embeddings but do not remove them. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 609-614, Minneapolis, Minnesota. Association for Computational Linguistics."
|
| 1481 |
+
},
|
| 1482 |
+
{
|
| 1483 |
+
"type": "ref_text",
|
| 1484 |
+
"bbox": [
|
| 1485 |
+
0.512,
|
| 1486 |
+
0.437,
|
| 1487 |
+
0.885,
|
| 1488 |
+
0.49
|
| 1489 |
+
],
|
| 1490 |
+
"angle": 0,
|
| 1491 |
+
"content": "Ove Grandstrand. 1998. Identity and deception in the virtual community. In Peter Kollock and Marc Smith, editors, Communities in Cyberspace, 1st edition, chapter 2. Routledge, London, UK."
|
| 1492 |
+
},
|
| 1493 |
+
{
|
| 1494 |
+
"type": "ref_text",
|
| 1495 |
+
"bbox": [
|
| 1496 |
+
0.512,
|
| 1497 |
+
0.502,
|
| 1498 |
+
0.885,
|
| 1499 |
+
0.568
|
| 1500 |
+
],
|
| 1501 |
+
"angle": 0,
|
| 1502 |
+
"content": "Luis A Hercus. 1994. A grammar of the Arabana-Wangkangurru language, Lake Eyre Basin, South Australia (Pacific linguistics. Series C), 1st edition. Dept. of Linguistics, Research School of Pacific and Asian Studies, Australian National University."
|
| 1503 |
+
},
|
| 1504 |
+
{
|
| 1505 |
+
"type": "ref_text",
|
| 1506 |
+
"bbox": [
|
| 1507 |
+
0.512,
|
| 1508 |
+
0.58,
|
| 1509 |
+
0.885,
|
| 1510 |
+
0.607
|
| 1511 |
+
],
|
| 1512 |
+
"angle": 0,
|
| 1513 |
+
"content": "Jerry R Hobbs. 1978. Resolving pronoun references. *Lingua*, 44(4):311-338."
|
| 1514 |
+
},
|
| 1515 |
+
{
|
| 1516 |
+
"type": "ref_text",
|
| 1517 |
+
"bbox": [
|
| 1518 |
+
0.512,
|
| 1519 |
+
0.619,
|
| 1520 |
+
0.885,
|
| 1521 |
+
0.672
|
| 1522 |
+
],
|
| 1523 |
+
"angle": 0,
|
| 1524 |
+
"content": "Chia-Chien Hung, Anne Lauscher, Simone Paolo Ponzetto, and Goran Glavaš. 2021. DS-TOD: Efficient domain specialization for task oriented dialog. arXiv preprint arXiv:2110.08395."
|
| 1525 |
+
},
|
| 1526 |
+
{
|
| 1527 |
+
"type": "ref_text",
|
| 1528 |
+
"bbox": [
|
| 1529 |
+
0.512,
|
| 1530 |
+
0.684,
|
| 1531 |
+
0.885,
|
| 1532 |
+
0.737
|
| 1533 |
+
],
|
| 1534 |
+
"angle": 0,
|
| 1535 |
+
"content": "Emiko S. Kashima and Yoshihisa Kashima. 1998. Culture and language: The case of cultural dimensions and personal pronoun use. Journal of Cross-Cultural Psychology, 29(3):461-486."
|
| 1536 |
+
},
|
| 1537 |
+
{
|
| 1538 |
+
"type": "ref_text",
|
| 1539 |
+
"bbox": [
|
| 1540 |
+
0.512,
|
| 1541 |
+
0.749,
|
| 1542 |
+
0.885,
|
| 1543 |
+
0.815
|
| 1544 |
+
],
|
| 1545 |
+
"angle": 0,
|
| 1546 |
+
"content": "Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings."
|
| 1547 |
+
},
|
| 1548 |
+
{
|
| 1549 |
+
"type": "ref_text",
|
| 1550 |
+
"bbox": [
|
| 1551 |
+
0.512,
|
| 1552 |
+
0.826,
|
| 1553 |
+
0.885,
|
| 1554 |
+
0.868
|
| 1555 |
+
],
|
| 1556 |
+
"angle": 0,
|
| 1557 |
+
"content": "Lex Konylly and Elizabeth Cowper. 2020. Gender diversity and morphosyntax: An account of singular they. Glossa: a journal of general linguistics, 5(1)."
|
| 1558 |
+
},
|
| 1559 |
+
{
|
| 1560 |
+
"type": "ref_text",
|
| 1561 |
+
"bbox": [
|
| 1562 |
+
0.512,
|
| 1563 |
+
0.878,
|
| 1564 |
+
0.885,
|
| 1565 |
+
0.919
|
| 1566 |
+
],
|
| 1567 |
+
"angle": 0,
|
| 1568 |
+
"content": "Helene Seltzer Krauthamer. 2021. The Great Pronoun Shift: The Big Impact of Little Parts of Speech, 1st edition. Routledge."
|
| 1569 |
+
},
|
| 1570 |
+
{
|
| 1571 |
+
"type": "list",
|
| 1572 |
+
"bbox": [
|
| 1573 |
+
0.512,
|
| 1574 |
+
0.086,
|
| 1575 |
+
0.885,
|
| 1576 |
+
0.919
|
| 1577 |
+
],
|
| 1578 |
+
"angle": 0,
|
| 1579 |
+
"content": null
|
| 1580 |
+
}
|
| 1581 |
+
],
|
| 1582 |
+
[
|
| 1583 |
+
{
|
| 1584 |
+
"type": "ref_text",
|
| 1585 |
+
"bbox": [
|
| 1586 |
+
0.117,
|
| 1587 |
+
0.086,
|
| 1588 |
+
0.49,
|
| 1589 |
+
0.166
|
| 1590 |
+
],
|
| 1591 |
+
"angle": 0,
|
| 1592 |
+
"content": "Keita Kurita, Nidhi Vyas, Ayush Parek, Alan W Black, and Yulia Tsvetkov. 2019. Measuring bias in contextualized word representations. In Proceedings of the First Workshop on Gender Bias in Natural Language Processing, pages 166-172, Florence, Italy. Association for Computational Linguistics."
|
| 1593 |
+
},
|
| 1594 |
+
{
|
| 1595 |
+
"type": "ref_text",
|
| 1596 |
+
"bbox": [
|
| 1597 |
+
0.117,
|
| 1598 |
+
0.176,
|
| 1599 |
+
0.489,
|
| 1600 |
+
0.243
|
| 1601 |
+
],
|
| 1602 |
+
"angle": 0,
|
| 1603 |
+
"content": "Anne Lauscher, Goran Glavas, Simone Paolo Ponzetto, and Ivan Vulic. 2020. A general framework for implicit and explicit debiasing of distributional word vector spaces. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 8131-8138."
|
| 1604 |
+
},
|
| 1605 |
+
{
|
| 1606 |
+
"type": "ref_text",
|
| 1607 |
+
"bbox": [
|
| 1608 |
+
0.117,
|
| 1609 |
+
0.253,
|
| 1610 |
+
0.489,
|
| 1611 |
+
0.332
|
| 1612 |
+
],
|
| 1613 |
+
"angle": 0,
|
| 1614 |
+
"content": "Anne Lauscher, Tobias Lueken, and Goran Glavaš. 2021. Sustainable modular debiasing of language models. In Findings of the Association for Computational Linguistics: EMNLP 2021, pages 4782-4797, Punta Cana, Dominican Republic. Association for Computational Linguistics."
|
| 1615 |
+
},
|
| 1616 |
+
{
|
| 1617 |
+
"type": "ref_text",
|
| 1618 |
+
"bbox": [
|
| 1619 |
+
0.117,
|
| 1620 |
+
0.343,
|
| 1621 |
+
0.489,
|
| 1622 |
+
0.408
|
| 1623 |
+
],
|
| 1624 |
+
"angle": 0,
|
| 1625 |
+
"content": "Joseph P Laycock. 2012. \"We are spirits of another sort\": Ontological rebellion and religious dimensions of the otherkin community. Nova Religio: The Journal of Alternative and Emergent Religions, 15(3):65-90."
|
| 1626 |
+
},
|
| 1627 |
+
{
|
| 1628 |
+
"type": "ref_text",
|
| 1629 |
+
"bbox": [
|
| 1630 |
+
0.117,
|
| 1631 |
+
0.419,
|
| 1632 |
+
0.489,
|
| 1633 |
+
0.485
|
| 1634 |
+
],
|
| 1635 |
+
"angle": 0,
|
| 1636 |
+
"content": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692."
|
| 1637 |
+
},
|
| 1638 |
+
{
|
| 1639 |
+
"type": "ref_text",
|
| 1640 |
+
"bbox": [
|
| 1641 |
+
0.117,
|
| 1642 |
+
0.496,
|
| 1643 |
+
0.489,
|
| 1644 |
+
0.575
|
| 1645 |
+
],
|
| 1646 |
+
"angle": 0,
|
| 1647 |
+
"content": "Xiaoqiang Luo. 2005. On coreference resolution performance metrics. In Proceedings of Human Language Technology Conference and Conference on Empirical Methods in Natural Language Processing, pages 25-32, Vancouver, British Columbia, Canada. Association for Computational Linguistics."
|
| 1648 |
+
},
|
| 1649 |
+
{
|
| 1650 |
+
"type": "ref_text",
|
| 1651 |
+
"bbox": [
|
| 1652 |
+
0.117,
|
| 1653 |
+
0.586,
|
| 1654 |
+
0.486,
|
| 1655 |
+
0.613
|
| 1656 |
+
],
|
| 1657 |
+
"angle": 0,
|
| 1658 |
+
"content": "Amin Maalouf. 2000. On identity, 1st, translated by barbara bray edition. Vintage."
|
| 1659 |
+
},
|
| 1660 |
+
{
|
| 1661 |
+
"type": "ref_text",
|
| 1662 |
+
"bbox": [
|
| 1663 |
+
0.117,
|
| 1664 |
+
0.624,
|
| 1665 |
+
0.489,
|
| 1666 |
+
0.703
|
| 1667 |
+
],
|
| 1668 |
+
"angle": 0,
|
| 1669 |
+
"content": "Ryan McDonald, Slav Petrov, and Keith Hall. 2011. Multi-source transfer of delexicalized dependency parsers. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 62-72, Edinburgh, Scotland, UK. Association for Computational Linguistics."
|
| 1670 |
+
},
|
| 1671 |
+
{
|
| 1672 |
+
"type": "ref_text",
|
| 1673 |
+
"bbox": [
|
| 1674 |
+
0.117,
|
| 1675 |
+
0.713,
|
| 1676 |
+
0.489,
|
| 1677 |
+
0.753
|
| 1678 |
+
],
|
| 1679 |
+
"angle": 0,
|
| 1680 |
+
"content": "Sebastian McGaughey. 2020. Understanding neopronouns. The Gay & Lesbian Review Worldwide, 27(2):27-29."
|
| 1681 |
+
},
|
| 1682 |
+
{
|
| 1683 |
+
"type": "ref_text",
|
| 1684 |
+
"bbox": [
|
| 1685 |
+
0.117,
|
| 1686 |
+
0.764,
|
| 1687 |
+
0.486,
|
| 1688 |
+
0.791
|
| 1689 |
+
],
|
| 1690 |
+
"angle": 0,
|
| 1691 |
+
"content": "John C McKay. 1993. On the term \"pronoun\" in italian grammars. Italica, 70(2):168-181."
|
| 1692 |
+
},
|
| 1693 |
+
{
|
| 1694 |
+
"type": "ref_text",
|
| 1695 |
+
"bbox": [
|
| 1696 |
+
0.117,
|
| 1697 |
+
0.802,
|
| 1698 |
+
0.489,
|
| 1699 |
+
0.855
|
| 1700 |
+
],
|
| 1701 |
+
"angle": 0,
|
| 1702 |
+
"content": "Ehm Hjorth Miltersen. 2016. Nounself pronouns: 3rd person personal pronouns as identity expression. Journal of Language Works-Sprogvidenskabeligt Studentertidsskrift, 1(1):37-62."
|
| 1703 |
+
},
|
| 1704 |
+
{
|
| 1705 |
+
"type": "ref_text",
|
| 1706 |
+
"bbox": [
|
| 1707 |
+
0.117,
|
| 1708 |
+
0.866,
|
| 1709 |
+
0.489,
|
| 1710 |
+
0.919
|
| 1711 |
+
],
|
| 1712 |
+
"angle": 0,
|
| 1713 |
+
"content": "Abd Muqit. 2012. Ideology and power relation reflected in the use of pronoun in osama bin laden's speech text. International Journal of Social Science and Humanity, 2(6):557."
|
| 1714 |
+
},
|
| 1715 |
+
{
|
| 1716 |
+
"type": "list",
|
| 1717 |
+
"bbox": [
|
| 1718 |
+
0.117,
|
| 1719 |
+
0.086,
|
| 1720 |
+
0.49,
|
| 1721 |
+
0.919
|
| 1722 |
+
],
|
| 1723 |
+
"angle": 0,
|
| 1724 |
+
"content": null
|
| 1725 |
+
},
|
| 1726 |
+
{
|
| 1727 |
+
"type": "ref_text",
|
| 1728 |
+
"bbox": [
|
| 1729 |
+
0.511,
|
| 1730 |
+
0.086,
|
| 1731 |
+
0.883,
|
| 1732 |
+
0.166
|
| 1733 |
+
],
|
| 1734 |
+
"angle": 0,
|
| 1735 |
+
"content": "Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. GloVe: Global vectors for word representation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543, Doha, Qatar. Association for Computational Linguistics."
|
| 1736 |
+
},
|
| 1737 |
+
{
|
| 1738 |
+
"type": "ref_text",
|
| 1739 |
+
"bbox": [
|
| 1740 |
+
0.511,
|
| 1741 |
+
0.175,
|
| 1742 |
+
0.883,
|
| 1743 |
+
0.215
|
| 1744 |
+
],
|
| 1745 |
+
"angle": 0,
|
| 1746 |
+
"content": "Paul Postal, David A Reibel, and Sanford A Schane. 1969. On so-called pronouns in english. Readings in English transformational grammar, pages 12-25."
|
| 1747 |
+
},
|
| 1748 |
+
{
|
| 1749 |
+
"type": "ref_text",
|
| 1750 |
+
"bbox": [
|
| 1751 |
+
0.511,
|
| 1752 |
+
0.225,
|
| 1753 |
+
0.883,
|
| 1754 |
+
0.302
|
| 1755 |
+
],
|
| 1756 |
+
"angle": 0,
|
| 1757 |
+
"content": "Sameer Pradhan, Alessandro Moschitti, Nianwen Xue, Olga Uryupina, and Yuchen Zhang. 2012. Conll-2012 shared task: Modeling multilingual unrestricted coreference in ontonotes. In Joint Conference on EMNLP and CoNLL-Shared Task, pages 1-40."
|
| 1758 |
+
},
|
| 1759 |
+
{
|
| 1760 |
+
"type": "ref_text",
|
| 1761 |
+
"bbox": [
|
| 1762 |
+
0.511,
|
| 1763 |
+
0.313,
|
| 1764 |
+
0.883,
|
| 1765 |
+
0.353
|
| 1766 |
+
],
|
| 1767 |
+
"angle": 0,
|
| 1768 |
+
"content": "Chase Wesley Raymond. 2016. Linguistic reference in the negotiation of identity and action: Revisiting the t/v distinction. Language, 92:636-670."
|
| 1769 |
+
},
|
| 1770 |
+
{
|
| 1771 |
+
"type": "ref_text",
|
| 1772 |
+
"bbox": [
|
| 1773 |
+
0.511,
|
| 1774 |
+
0.363,
|
| 1775 |
+
0.883,
|
| 1776 |
+
0.415
|
| 1777 |
+
],
|
| 1778 |
+
"angle": 0,
|
| 1779 |
+
"content": "Julie Roberts. 2020. 2019 word of the year is “(my) pronouns,” word of the decade is singular “they” as voted by american dialect society. Press Release, American Dialect Society."
|
| 1780 |
+
},
|
| 1781 |
+
{
|
| 1782 |
+
"type": "ref_text",
|
| 1783 |
+
"bbox": [
|
| 1784 |
+
0.511,
|
| 1785 |
+
0.425,
|
| 1786 |
+
0.883,
|
| 1787 |
+
0.53
|
| 1788 |
+
],
|
| 1789 |
+
"angle": 0,
|
| 1790 |
+
"content": "Rachel Rudinger, Jason Naradowsky, Brian Leonard, and Benjamin Van Durme. 2018. Gender bias in coreference resolution. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 8-14, New Orleans, Louisiana. Association for Computational Linguistics."
|
| 1791 |
+
},
|
| 1792 |
+
{
|
| 1793 |
+
"type": "ref_text",
|
| 1794 |
+
"bbox": [
|
| 1795 |
+
0.511,
|
| 1796 |
+
0.54,
|
| 1797 |
+
0.883,
|
| 1798 |
+
0.567
|
| 1799 |
+
],
|
| 1800 |
+
"angle": 0,
|
| 1801 |
+
"content": "Beatrice Santorini. 1990. Part-of-speech tagging guidelines for the penn treebank project."
|
| 1802 |
+
},
|
| 1803 |
+
{
|
| 1804 |
+
"type": "ref_text",
|
| 1805 |
+
"bbox": [
|
| 1806 |
+
0.511,
|
| 1807 |
+
0.577,
|
| 1808 |
+
0.883,
|
| 1809 |
+
0.668
|
| 1810 |
+
],
|
| 1811 |
+
"angle": 0,
|
| 1812 |
+
"content": "Deven Santosh Shah, H. Andrew Schwartz, and Dirk Hovy. 2020. Predictive biases in natural language processing models: A conceptual framework and overview. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5248-5264, Online. Association for Computational Linguistics."
|
| 1813 |
+
},
|
| 1814 |
+
{
|
| 1815 |
+
"type": "ref_text",
|
| 1816 |
+
"bbox": [
|
| 1817 |
+
0.511,
|
| 1818 |
+
0.678,
|
| 1819 |
+
0.883,
|
| 1820 |
+
0.718
|
| 1821 |
+
],
|
| 1822 |
+
"angle": 0,
|
| 1823 |
+
"content": "Michael Spivak. 1990. The Joy of TeX: A Gourmet Guide to Typesetting with the AMSTeX Macro Package, 2nd edition. American Mathematical Society."
|
| 1824 |
+
},
|
| 1825 |
+
{
|
| 1826 |
+
"type": "ref_text",
|
| 1827 |
+
"bbox": [
|
| 1828 |
+
0.511,
|
| 1829 |
+
0.728,
|
| 1830 |
+
0.883,
|
| 1831 |
+
0.754
|
| 1832 |
+
],
|
| 1833 |
+
"angle": 0,
|
| 1834 |
+
"content": "Susan Stryker. 2017. Transgender history: The roots of today's revolution, 2nd edition. Seal Press."
|
| 1835 |
+
},
|
| 1836 |
+
{
|
| 1837 |
+
"type": "ref_text",
|
| 1838 |
+
"bbox": [
|
| 1839 |
+
0.511,
|
| 1840 |
+
0.764,
|
| 1841 |
+
0.883,
|
| 1842 |
+
0.869
|
| 1843 |
+
],
|
| 1844 |
+
"angle": 0,
|
| 1845 |
+
"content": "Sandeep Suntwal, Mithun Paul, Rebecca Sharp, and Mihai Surdeanu. 2019. On the importance of delexicalization for fact verification. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3413-3418, Hong Kong, China. Association for Computational Linguistics."
|
| 1846 |
+
},
|
| 1847 |
+
{
|
| 1848 |
+
"type": "ref_text",
|
| 1849 |
+
"bbox": [
|
| 1850 |
+
0.511,
|
| 1851 |
+
0.878,
|
| 1852 |
+
0.883,
|
| 1853 |
+
0.918
|
| 1854 |
+
],
|
| 1855 |
+
"angle": 0,
|
| 1856 |
+
"content": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all"
|
| 1857 |
+
},
|
| 1858 |
+
{
|
| 1859 |
+
"type": "list",
|
| 1860 |
+
"bbox": [
|
| 1861 |
+
0.511,
|
| 1862 |
+
0.086,
|
| 1863 |
+
0.883,
|
| 1864 |
+
0.918
|
| 1865 |
+
],
|
| 1866 |
+
"angle": 0,
|
| 1867 |
+
"content": null
|
| 1868 |
+
}
|
| 1869 |
+
],
|
| 1870 |
+
[
|
| 1871 |
+
{
|
| 1872 |
+
"type": "ref_text",
|
| 1873 |
+
"bbox": [
|
| 1874 |
+
0.135,
|
| 1875 |
+
0.086,
|
| 1876 |
+
0.49,
|
| 1877 |
+
0.14
|
| 1878 |
+
],
|
| 1879 |
+
"angle": 0,
|
| 1880 |
+
"content": "you need. In Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 5998-6008."
|
| 1881 |
+
},
|
| 1882 |
+
{
|
| 1883 |
+
"type": "ref_text",
|
| 1884 |
+
"bbox": [
|
| 1885 |
+
0.117,
|
| 1886 |
+
0.149,
|
| 1887 |
+
0.49,
|
| 1888 |
+
0.227
|
| 1889 |
+
],
|
| 1890 |
+
"angle": 0,
|
| 1891 |
+
"content": "Marc Vilain, John D Burger, John Aberdeen, Dennis Connolly, and Lynette Hirschman. 1995. A model-theoretic coreference scoring scheme. In Sixth Message Understanding Conference (MUC-6): Proceedings of a Conference Held in Columbia, Maryland, November 6-8, 1995."
|
| 1892 |
+
},
|
| 1893 |
+
{
|
| 1894 |
+
"type": "ref_text",
|
| 1895 |
+
"bbox": [
|
| 1896 |
+
0.117,
|
| 1897 |
+
0.238,
|
| 1898 |
+
0.49,
|
| 1899 |
+
0.303
|
| 1900 |
+
],
|
| 1901 |
+
"angle": 0,
|
| 1902 |
+
"content": "Kellie Webster, Marta Recasens, Vera Axelrod, and Jason Baldridge. 2018. Mind the GAP: A balanced corpus of gendered ambiguous pronouns. Transactions of the Association for Computational Linguistics, 6:605-617."
|
| 1903 |
+
},
|
| 1904 |
+
{
|
| 1905 |
+
"type": "ref_text",
|
| 1906 |
+
"bbox": [
|
| 1907 |
+
0.117,
|
| 1908 |
+
0.313,
|
| 1909 |
+
0.49,
|
| 1910 |
+
0.378
|
| 1911 |
+
],
|
| 1912 |
+
"angle": 0,
|
| 1913 |
+
"content": "Ralph Weischedel, Sameer Pradhan, Lance Ramshaw, Jeff Kaufman, Michelle Franchini, Mohammed El-Bachouti, Nianwen Xue, Martha Palmer, Jena D Hwang, Claire Bonial, et al. 2012. Ontonotes release 5.0."
|
| 1914 |
+
},
|
| 1915 |
+
{
|
| 1916 |
+
"type": "list",
|
| 1917 |
+
"bbox": [
|
| 1918 |
+
0.117,
|
| 1919 |
+
0.086,
|
| 1920 |
+
0.49,
|
| 1921 |
+
0.378
|
| 1922 |
+
],
|
| 1923 |
+
"angle": 0,
|
| 1924 |
+
"content": null
|
| 1925 |
+
}
|
| 1926 |
+
]
|
| 1927 |
+
]
|
2202.11xxx/2202.11923/2469feeb-f44a-460f-890c-2b605e397b0b_origin.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d7f8fbbba8101e6dbc3c7fb4e4d2aa14701836ac1eb3ca6cfafcab6b325773cc
|
| 3 |
+
size 385905
|
2202.11xxx/2202.11923/full.md
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Welcome to the Modern World of Pronouns: Identity-Inclusive Natural Language Processing beyond Gender
|
| 2 |
+
|
| 3 |
+
Anne Lauscher
|
| 4 |
+
|
| 5 |
+
MilaNLP
|
| 6 |
+
|
| 7 |
+
Universita Luigi Bocconi
|
| 8 |
+
|
| 9 |
+
Milan, Italy
|
| 10 |
+
|
| 11 |
+
anne.lauscher@unibocconi.it
|
| 12 |
+
|
| 13 |
+
Archie Crowley
|
| 14 |
+
|
| 15 |
+
Linguistics
|
| 16 |
+
|
| 17 |
+
University of South Carolina
|
| 18 |
+
|
| 19 |
+
Columbia, SC, USA
|
| 20 |
+
|
| 21 |
+
acrowley@sc.edu
|
| 22 |
+
|
| 23 |
+
Dirk Hovy
|
| 24 |
+
|
| 25 |
+
MilaNLP
|
| 26 |
+
|
| 27 |
+
Universita Luigi Bocconi
|
| 28 |
+
|
| 29 |
+
Milan, Italy
|
| 30 |
+
|
| 31 |
+
dirk.hovy@unibocconi.it
|
| 32 |
+
|
| 33 |
+
# Abstract
|
| 34 |
+
|
| 35 |
+
Trigger warning: This paper contains some examples which might be offensive to some users.
|
| 36 |
+
|
| 37 |
+
The works of pronouns is changing. From a closed class of words with few members to a much more open set of terms to reflect identities. However, Natural Language Processing (NLP) is barely reflecting this linguistic shift, even though recent work outlined the harms of gender-exclusive language technology. Particularly problematic is the current modeling 3rd person pronouns, as it largely ignores various phenomena like neopronouns, i.e., pronoun sets that are novel and not (yet) widely established. This omission contributes to the discrimination of marginalized and underrepresented groups, e.g., non-binary individuals. However, other identity-expression phenomena beyond gender are also ignored by current NLP technology. In this paper, we provide an overview of 3rd person pronoun issues for NLP. Based on our observations and ethical considerations, we define a series of desiderata for modeling pronouns in language technology. We evaluate existing and novel modeling approaches w.r.t. these desiderata qualitatively, and quantify the impact of a more discrimination-free approach on established benchmark data.
|
| 38 |
+
|
| 39 |
+
# 1 Introduction
|
| 40 |
+
|
| 41 |
+
Pronouns are an essential component of many languages and often one of the most frequent word classes. Accordingly, NLP has long studied tasks related to them, e.g., pronoun resolution (e.g., Hobbs, 1978). Simplistically, they can be defined as "a word (such as I, he, she, you, it, we, or they) that is used instead of a noun or noun phrase".<sup>1</sup> Linguistic studies have pointed out the complexity of pronouns, though (e.g., Postal et al., 1969;
|
| 42 |
+
|
| 43 |
+
McKay, 1993). Pronouns can carry demographic information – in English, for example, information about the number of referees and a single referee's (grammatical) gender. Even more information can be conveyed by pronouns in other, non-pro-drop languages. Consider Arabana-Wangkangurru, a language spoken in Australia, in which a speaker uses different pronouns depending on whether the referee is part of the same social or ritual group (moiety) (Hercus, 1994). As such, pronouns shape how we perceive individuals and can even reflect cultural aspects (e.g., Kashima and Kashima, 1998) and ideologies (e.g., Muqit, 2012). Consequently, pronoun usage should be considered a sensitive aspect of natural language use.
|
| 44 |
+
|
| 45 |
+
Accordingly, in many western societies, these phenomena have been drawing more and more attention. For instance, in 2020, the American Dialect Society voted “(My) Pronouns” as the 2019 Word of the Year and Singular “They” as the Word of the Decade (Roberts, 2020). Recently, there has been a shift in pronoun usage (Krauthamer, 2021), partially due to shifts in the perception of gender, driven by the queer-feminist discourse (e.g., Butler, 1990, 2004). Related to this is the open discussion of identity beyond binary gender. For instance, a person who does not identify their gender within the gender binary (e.g., a nonbinary or genderqueer person) might use singular “they” as their pronoun. Recently, the French dictionary “Le Robert” added the non-binary pronoun “iel” to its list of words.<sup>3</sup>
|
| 46 |
+
|
| 47 |
+
This "social push" to respect diverse gender identities also affects aspects of NLP. Recent studies have pointed out the potential harms from the cur
|
| 48 |
+
|
| 49 |
+
rent lack of non-binary representation in NLP data sets, embeddings, and tasks (Cao and Daumé III, 2021; Dev et al., 2021), and the related issue of unfair stereotyping of queer individuals (Barikeri et al., 2021). However, the research landscape on modern pronoun usage is still surprisingly scarce, hindering progress for a fair and inclusive NLP.
|
| 50 |
+
|
| 51 |
+
Further linguistic research has identified identity aspects of pronouns beyond gender (Miltersen, 2016). Specifically, *nounself* pronouns, functionally turning pronouns from a *closed* to an *open* word class. To the best of our knowledge, these aspects have been completely ignored by NLP so far. We did not find a single work systematically describing all of the currently existing phenomena even just in English third-person pronoun usage (let alone other languages). In contrast, a fair number of discussions are taking place in queer Wikis and forums. While it is still unclear which of these phenomena will persist over the next decades, people are using and discussing them, and accordingly, we as a research community should adapt.
|
| 52 |
+
|
| 53 |
+
Contributions. In this "living draft", 1) we are the first to provide a systematic overview of existing phenomena in English 3rd person pronoun usage. Our results will inform future NLP research on ethical NLP and non-binary representation. We provide the first NLP work acknowledging otherkin identities. We support our observations with a corpus analysis on Reddit. 2) Based on our overview, we derive five desiderata for modeling third-person pronouns. Based on these, 3) we discuss various existing and novel paradigms for when and how to model pronouns. 4) Finally, we quantify the impact of discrimination-free non-modeling of pronouns on a widely established benchmark.
|
| 54 |
+
|
| 55 |
+
# 2 Related Work
|
| 56 |
+
|
| 57 |
+
While there are some works in NLP on gender-inclusion (e.g., Dev et al., 2021) and gender bias in static (e.g., Bolukbasi et al., 2016; Gonen and Goldberg, 2019; Lauscher et al., 2020, inter alia) and contextualized (e.g., Kurita et al., 2019; Bordia and Bowman, 2019; Lauscher et al., 2021, inter alia) language representations as well as works focusing on specific gender bias in downstream tasks, e.g., natural language inference (Dev et al., 2020) and co-reference resolution (e.g., Rudinger et al., 2018; Webster et al., 2018), we are not aware
|
| 58 |
+
|
| 59 |
+
4For instance, while we found hits for the Google Scholar query "neopronoun", we did not get any results for variants of "nameself pronoun", or "emojiself pronoun".
|
| 60 |
+
|
| 61 |
+
of any work that deals with the broader field of identity-inclusion. Thus, there is no other NLP work that deals with a larger variety of pronouns and acknowledges pronouns as an open word class. For surveys on the general topic of unfair bias in NLP we refer to Blodgett et al. (2020) and Shah et al. (2020). Recently, Dev et al. (2021) pointed broadly at the harms (Barocas et al., 2017) arising from gender-exclusivity in NLP. They surveyed queer individuals and assessed non-binary representations in existing data set and language representations. In contrast to them, we specifically look at third-person pronoun usage and how to model such phenomena. Webster et al. (2018) provide a balanced co-reference resolution corpus with a focus on the fair distribution of pronouns but only focus on the gendered binary case. Closest to us, Cao and Daumé III (2021) discuss gender inclusion throughout the machine learning pipeline beyond the binary gender conception. While they are also the first to consider non-binary pronouns, including some neopronouns, in co-reference resolution, they do not acknowledge the broader spectrum of identity-related pronoun phenomena.
|
| 62 |
+
|
| 63 |
+
# 3 A Note on Identity and Pronouns
|
| 64 |
+
|
| 65 |
+
This work focuses on the relationship between identity and pronouns. Identity refers to an individual's self-concept, relating to the question of what makes each of us unique (Maalouf, 2000). It can be seen as a two-way process between an individual and others (Grandstrand, 1998), and relates to different dimensions, e.g., one's gender.
|
| 66 |
+
|
| 67 |
+
Gender Identity. Gender identity, as opposed to gender expression or sex, is one's subjective sense of gender (Stryker, 2017). In this work, we conceptualize gender identities beyond the binary notion (man, woman), e.g., non-binary gender, transgender, agender, polygender, etc.
|
| 68 |
+
|
| 69 |
+
Otherkin Identity. Individuals with otherkin identity do not entirely identify as human (Laycock, 2012), e.g., vamp. Miltersen (2016) note that otherkin individuals often identify with nounself pronouns matching their kin.
|
| 70 |
+
|
| 71 |
+
Stryker (2017) highlights the strong relationship between gender identity and pronouns. As Raymond (2016) notes, pronoun choices construct the individual's identity in conversations and the relationship between interlocutors. According to Cao and Daumé III (2021), pronouns are a way of expressing referential gender. Referring to an indi
|
| 72 |
+
|
| 73 |
+
<table><tr><td>Nom.</td><td>Acc.</td><td>Poss. (dep.)</td><td>Poss. (indep.)</td><td>Reflexive</td></tr><tr><td colspan="5">Gendered Pronouns</td></tr><tr><td>he</td><td>him</td><td>his</td><td>his</td><td>himself</td></tr><tr><td>she</td><td>her</td><td>her</td><td>hers</td><td>herself</td></tr><tr><td colspan="5">Gender-Neutral Pronouns</td></tr><tr><td>they</td><td>them</td><td>their</td><td>theirs</td><td>themselves</td></tr><tr><td colspan="5">Neopronouns</td></tr><tr><td>thon</td><td>thon</td><td>thons</td><td>thons</td><td>thonself</td></tr><tr><td>e</td><td>em</td><td>es</td><td>ems</td><td>emself</td></tr><tr><td>ae</td><td>aer</td><td>aer</td><td>aers</td><td>aerself</td></tr><tr><td>co</td><td>co</td><td>cos</td><td>cos</td><td>coself</td></tr><tr><td>ve/ vi</td><td>ver/ vir</td><td>vis</td><td>vers/ virs</td><td>verself/ virself</td></tr><tr><td>xe</td><td>xem</td><td>xyr</td><td>xyrs</td><td>xemself</td></tr><tr><td>ey</td><td>em</td><td>eir</td><td>eirs</td><td>emself</td></tr><tr><td>e</td><td>em</td><td>eir</td><td>eirs</td><td>emself</td></tr><tr><td>ze</td><td>zir</td><td>zir</td><td>zirs</td><td>zirself</td></tr><tr><td>...</td><td></td><td></td><td></td><td></td></tr><tr><td colspan="5">Nounself Pronouns</td></tr><tr><td>star</td><td>star</td><td>stars</td><td>stars</td><td>starselves</td></tr><tr><td>vam</td><td>vamp</td><td>vamps</td><td>vamps</td><td>vampself</td></tr><tr><td>...</td><td></td><td></td><td></td><td></td></tr><tr><td colspan="5">Emojiself Pronouns</td></tr><tr><td></td><td></td><td>s</td><td>s</td><td>self</td></tr><tr><td>...</td><td></td><td></td><td></td><td></td></tr><tr><td colspan="5">Numberself Pronouns</td></tr><tr><td>0</td><td>0</td><td>0s</td><td>0s</td><td>0self</td></tr><tr><td>1/3</td><td>1/3</td><td>1/3s</td><td>1/3s</td><td>1/3self</td></tr><tr><td>...</td><td></td><td></td><td></td><td></td></tr><tr><td colspan="5">Nameself Pronouns</td></tr><tr><td>John</td><td>John</td><td>Johns</td><td>Johns</td><td>Johnselves</td></tr><tr><td>...</td><td></td><td></td><td></td><td></td></tr></table>
|
| 74 |
+
|
| 75 |
+
Table 1: Non-exhaustive overview of phenomena related to third-person pronoun usage in English.
|
| 76 |
+
|
| 77 |
+
vidual with sets of pronouns they do not identify with, e.g., resulting in misgendering, is considered harmful (Dev et al., 2021).
|
| 78 |
+
|
| 79 |
+
# 4 Phenomena in Third-person Pronoun-Usage
|
| 80 |
+
|
| 81 |
+
We describe existing phenomena and analyze their presence in a collection of threads from Reddit.<sup>5</sup>
|
| 82 |
+
|
| 83 |
+
# 4.1 Existing Phenomena
|
| 84 |
+
|
| 85 |
+
Overall, individuals can choose $n$ sets of pronouns with $n \geq 0$ . If $n = 0$ , the individual does not identify with any singular 3rd person pronoun. If $n > 1$ , the individual identifies with more than one set of pronouns, possibly each set reflecting overlapping or non-overlapping aspects of their identity. We provide examples of these sets in Table 1. Note that this list is non-exhaustive and that the described phenomena are non-exclusive.
|
| 86 |
+
|
| 87 |
+
Gendered Pronouns. In English, two sets of standard gendered pronouns are available, he/him/himself and she/her/herself.
|
| 88 |
+
|
| 89 |
+
Gender-Neutral Pronouns. Given the history of generic singular they in English (e.g., Who was at the door? They left a note.), there has been an uptake of singular they by non-binary individuals as a gender-netural pronoun option $^{6}$ (Conrod, 2019; Konyelly and Cowper, 2020). Further, there has been increasing institutional recognition with dictionaries and style guides supporting its use.
|
| 90 |
+
|
| 91 |
+
Neopronouns. As an alternative to the singular they, individuals started creating and sharing novel sets of 3rd person pronouns (McGaughey, 2020). More traditional and rather well-known sets of neopronouns include, e.g., the so-called Spivak pronouns $e/emeɪs$ (used in (Spivak, 1990)) and related variations. During our research, we were able to observe various subcategories of neopronouns, partially described in the academic literature.
|
| 92 |
+
|
| 93 |
+
Nounself Pronouns. According to Miltersen (2016), nounself pronouns are pronouns that are “[...] prototypically transparently derived from a specific word, usually a noun”. Individuals may identify with certain nouns, possibly corresponding to distinct aspects of their identity, e.g., kitten/kittenself, vamp/vampself. The author notes the difficulty of clearly defining nounself pronouns, neopronouns, and other phenomena. The phenomenon is assumed to have first appeared in 2013.
|
| 94 |
+
|
| 95 |
+
Emojiself Pronouns. Similar to nounself pronouns, individuals may identify with sets of emojis, possibly reflecting different aspects of their identity, e.g., self. Emojiself pronouns are intended for written communication. Note, that, at the time of writing this manuscript, there seem to exist no academic description of emojiself pronouns. However, we were able to find evidence of their existence on several social media platforms and wikis, e.g., Tumblr, $^{7}$ MOGAI Wiki, $^{8}$ Twitter, $^{9}$ and Reddit. $^{10}$
|
| 96 |
+
|
| 97 |
+
Numbers/ pronouns. Another form of neopronouns/ nounself pronouns are numberself pronouns. Analogous to before, we assume that here, the individual identifies or partially identified with a number, e.g., $0 / 0 / 0s / 0s / 0$ self. $^{11}$
|
| 98 |
+
|
| 99 |
+
Nameself Pronouns. Individuals may identify with pronouns build from their name, e.g., John/Johnself, overlapping with nullpronomials.[12]
|
| 100 |
+
|
| 101 |
+
Alternating Pronouns. Given that people can identify with $n > 1$ sets of pronouns, the pronouns they identify with can be either equally identified-with sets, or change potentially depending on the context (mutopronominal). For instance, individuals who are also performer may use stage pronouns. Similarly, genderfluid individual may identify with a certain pronoun at a certain point in time (pronoun fluidity, (Cherry-Reid, 2020)). Some individuals identify with the pronouns of the person who is referring to them (mirroed pronouns). Other individuals use set(s) of auxiliary pronouns, e.g., for situations, in which individuals referring to them have problems with using the most identified-with sets of pronouns (e.g., in the case of emojiself pronouns and oral communication). Note that alternating pronoun sets may be even used in the same sentence for referring to the same individual.[13]
|
| 102 |
+
|
| 103 |
+
No Pronouns. Some individuals do not identify with any pronouns. In this case, some individuals identify most with their name being used to refer to them, nameself pronouns, or avoid pronouns.
|
| 104 |
+
|
| 105 |
+
# 4.2 Corpus Analysis: Neopronouns in Reddit
|
| 106 |
+
|
| 107 |
+
Setup. We conduct an additional quantitative analysis for the presence of neopronouns in Reddit. To this end, we use Reddit threads created between 2010 and 2021, cleaned by previous work and provided through Huggingface Datasets (127,445,911 lines).<sup>14</sup> As we are interested in capturing novel pronouns and as the list of possible pronouns is indefinite, we proxy neopronouns via the suffixes self and selves indicating the reflexive case and
|
| 108 |
+
|
| 109 |
+
llology/comments/p09nek/i_made_a_flag_f
|
| 110 |
+
or_the_emojiself_pronoun_set/ 11https://pronoun-provider.tumblr.com/
|
| 111 |
+
post/148452374817/i-think-numbers-as-pro
|
| 112 |
+
nouns-would-be-pretty-cool 12https://pronoun.fandom.com/wiki/Null
|
| 113 |
+
pronominal 13https://www.reddit.com/r/NonBinary/c
|
| 114 |
+
comments/jasv5r/alternating Pronouns_in_s
|
| 115 |
+
amesentence/ 14https://huggingface.co/datasets/sent
|
| 116 |
+
ence-transformers/redit-title-body
|
| 117 |
+
|
| 118 |
+

|
| 119 |
+
Figure 1: Token ranks (log-scale) and rank counts of the tokens returned against our reflexive regular expression pattern from Reddit with example annotations.
|
| 120 |
+
|
| 121 |
+
match them through a regular expression. Additionally, we filter out non-3rd person pronouns (e.g., yourself, ourselves, plural themselves) as well as common variations of these (e.g., urself) and other common non-pronoun expressions we found in the data (e.g., do-it-yourself). This process leaves us with a total of 9,075 unique tokens with in total 74,768 textual mentions.
|
| 122 |
+
|
| 123 |
+
Results. An initial manual analysis reveals that, unsurprisingly, many of the matches are false positives, i.e., not real neopronouns (e.g., non-self, a common concept in Buddhist philosophy, and to myself, a common spelling mistake of to myself). However, our method still finds relevant cases. Examples are depicted in Table 2. Many discussions in which we detect nounself pronouns center on the phenomena themselves, including, e.g., individuals stating that they are interested in using a specific pronoun or individuals stating that they refuse to acknowledge the phenomenon. Some discussions involve people reporting on personal experiences and problems and seeking advice. To obtain a quantitative view of the results, we plot the ranks (i.e., number of occurrences of a token) against their number of tokens (Figure 1). The result is a highly skewed Zipf's distribution: while the highest ranks appear only once (e.g., themself with 24,697 mentions), some tokens appear only a couple of times (e.g., the neopronoun xemself with 24 mentions), and the vast majority appears only once (e.g., many
|
| 124 |
+
|
| 125 |
+
<table><tr><td>Match</td><td>Subreddit</td><td>Thread Title</td><td>Thread Excerpt</td></tr><tr><td rowspan="2">meowself</td><td>monsterhunterage</td><td>Fureedom Mewnite can die in my litterbox.</td><td>I don’t like this game. But I still want meowself to play it, meow. Cause it’s fun, even though I hate it.</td></tr><tr><td>offmychest</td><td>Neopronouns are going too far.</td><td>I get some pronouns like ze/ziR, xe/xem, etc. I agree with those. But why are people using ghost/ghostself and meow/meowself? That’s really utter bullshit.</td></tr><tr><td rowspan="2">bunself</td><td>TiADiscussion</td><td>I am genderfluid, pansexual, and mentally ill. I have a lot of SJW friends. AMA!</td><td>They/them pronouns are coolest with me, but I won’t be angry if you use he or she. You can use bun/buns/bunself, if you are feeling special. (That was a joke.)</td></tr><tr><td>rpdkcirclejerk</td><td>Xi am so proud to announce that the new word of the year is....</td><td>-Cinnagender- which means you identify with our beloved and innocent cinnamon buns. The pronoun set is cinne/cinns/cinnself or alternatively bun/buns/bunself i am so happy to be a member of a community that ignores the oppressive gender binary, which is a social construct, i.e., it is not real</td></tr><tr><td rowspan="2">zirself</td><td>mypartneristrans</td><td>Ran into our first roadblock</td><td>I asked what I could do to help zir lowering the feeling of disphoria, and ze said zd maybe feel better about zirself if zd drink a tea.</td></tr><tr><td>Negareddit</td><td>No, Redditors. If you’re a horrible person online, you’re probably a horrible person offline too.</td><td>Hello folks. Omg. I think this person is about to kill zirself! (emphasis on "zirself". COMEDIC GENIUS)</td></tr></table>
|
| 126 |
+
|
| 127 |
+
Table 2: Example neopronouns and corresponding excerpts from Reddit retrieved via our heuristic method. We slightly modified the excerpts to lower searchability and increase the privacy of the users.
|
| 128 |
+
|
| 129 |
+
nounself pronouns such as peachself).
|
| 130 |
+
|
| 131 |
+
# 5 How Can and Should We Model Pronouns?
|
| 132 |
+
|
| 133 |
+
We devise five desiderata based on our previous observations, personal experiences, and expert knowledge from interactions with LGBTQIA+ associates. Additionally, we collect informal feedback from individuals who use gender-neutral pronouns. We then assess how well classic and novel pronoun modeling paradigms fulfil the five criteria.
|
| 134 |
+
|
| 135 |
+
# 5.1 Desiderata
|
| 136 |
+
|
| 137 |
+
D1. Refrain from assuming an individual's identity and pronouns. A model should not assume an individual's identity, e.g., gender, or pronouns based on, e.g., statistical cues about an individual's name, also not in a binary gender setup. Only because the name John typically appears together with the pronoun he, the model should never assume that a person with the name John identifies as a man and that every John uses the pronoun he.
|
| 138 |
+
D2. Allow for the existing sets of pronouns as well as for neopronouns. A model should be able to handle not only the existing set of "standard" pronouns in a language but also other existing pronouns, e.g., neopronouns.
|
| 139 |
+
D3. Allow for novel pronouns at any point in time. On top of D2, a model should allow for
|
| 140 |
+
|
| 141 |
+
novel, i.e., unseen, pronouns to appear at any point in time. This condition is necessary to handle the fact that neopronouns are not a fixed set, but evolving, and because related phenomena (emojiself and nameself pronouns) turn pronouns from a closed to an open class part of speech.
|
| 142 |
+
|
| 143 |
+
D4. Allow for multiple, alternating, and changing pronouns. A model should not assume that the pronoun set for an individuum at time $t$ will be the same as at time $t - 1$ . Even within the same sequence, pronoun sets might change.
|
| 144 |
+
D5. Provide an option to set up individuals' sets of pronouns. While most NLP models are trained offline and do not interact with the user, some are designed to interact with individuals, e.g., dialog systems. In this context, setting up individuals' sets of pronouns can help avoid harmful interactions (depending on the concrete sociotechnical deployment scenario).
|
| 145 |
+
|
| 146 |
+
# 5.2 Modeling Paradigms
|
| 147 |
+
|
| 148 |
+
We compare four general modeling paradigms with D1-D5 in Table 3.
|
| 149 |
+
|
| 150 |
+
Classic Statistical Modeling. Traditionally, pronouns have been treated as a closed word class. Generally, statistical models do not make assumptions about this (except if the vocabulary is manually curated). However, in models exploiting cooc
|
| 151 |
+
|
| 152 |
+
<table><tr><td>Paradigm</td><td>D1</td><td>D2</td><td>D3</td><td>D4</td><td>D5</td></tr><tr><td>Classic</td><td>X</td><td>X</td><td>X</td><td>X</td><td>X</td></tr><tr><td>Bucketing</td><td>X</td><td>✓</td><td>✓</td><td>?</td><td>X</td></tr><tr><td>Delexicalization</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>X</td></tr><tr><td>Post-hoc</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td><td>✓</td></tr></table>
|
| 153 |
+
|
| 154 |
+
curences, e.g., via word embeddings (GloVe (Pennington et al., 2014)) or deep language models (BERT (Devlin et al., 2019), RoBERTa (Liu et al., 2019)), the models will likely misrepresent underrepresented pronoun-related phenomena. Dev et al. (2021) provided an initial insight by showing that singular they and the neopronouns $xe$ and $ze$ do not have meaningful vectors in GloVe and BERT.
|
| 155 |
+
|
| 156 |
+
Bucketing. One option, previously discussed by Dev et al. (2021), is to apply bucketing, i.e., to decide on a fixed number of majority classes, e.g., male pronouns, female pronouns, and one or multiple classes for the "rest of the pronouns", e.g., other. The advantage of this approach is that it can map existing and novel pronouns to the other class. However, it still makes identity assumptions – and due to unequal representations of main and other classes, it will inevitably lead to discrimination.
|
| 157 |
+
|
| 158 |
+
No Modeling – Delexicalization. Given that the classic approach and bucketing both lead to unfair treatment of underrepresented groups, the alternative is to explicitly not model pronouns in their surface forms. This process, commonly named delexicalization, has proved helpful for other tasks where models capture misleading lexical information, e.g., fact verification (e.g., Suntwal et al., 2019), or resource-lean scenarios, e.g., cross-lingual parsing (e.g., McDonald et al., 2011). In this case, the model is forced to not rely on spurious lexical cues related to gender, e.g., that John occurs most often with the pronoun he. Instead, the model learns a single representation for all pronouns and relies on other task-related conceptual and commonsense information for disambiguation.
|
| 159 |
+
|
| 160 |
+
Post-hoc Injection of Modeling Information/ Modeling at Test Time. For human-to-human interactions, several LGBTQIA+ guides recommend to (1) first try generic pronouns (e.g., singular they), and (2) switch to other sets of pronouns once the conversation partner communicates them. For
|
| 161 |
+
|
| 162 |
+
Table 3: Modeling paradigms and how they allow for fulfilling the desiderata D1-D5.
|
| 163 |
+
|
| 164 |
+
<table><tr><td></td><td>Train</td><td>Dev</td><td>Test</td><td>Total</td></tr><tr><td>PRP</td><td>64,476</td><td>7,881</td><td>8,067</td><td>80,424</td></tr><tr><td>PRP$</td><td>14,535</td><td>1,783</td><td>1,935</td><td>18,253</td></tr><tr><td>Total</td><td>79,011</td><td>9,664</td><td>10,002</td><td>98,677</td></tr></table>
|
| 165 |
+
|
| 166 |
+
Table 4: Number of pronoun replacements in the training, development, and test portion of OntoNotes 5.0 for PRP and PRPS, respectively.
|
| 167 |
+
|
| 168 |
+
uncommon or novel pronouns, several web pages have explicitly been set up for practising how to use them. $^{16}$ In this work, we propose that NLP systems should work similarly – if technically possible and depending on the concrete sociotechnical deployment scenario. To this end, we can use intermediate training procedures (e.g., Hung et al., 2021) for pronoun-related model refinement. E.g., we can use synthetic data created through similar procedures as the ones employed on these websites. Another option is only model pronouns at test time, e.g., through simple replacement procedures.
|
| 169 |
+
|
| 170 |
+
# 6 How Much Would We Loose?
|
| 171 |
+
|
| 172 |
+
In §5.2, we discussed delexicalization, i.e., not modeling lexical surface forms of pronouns, as one way to counter exclusion in statistical modeling and bucketing. However, a possible counterargument against this approach is that omitting the surface forms will lead to poor model performance on pronoun-related tasks. We experimentally quantify the loss from (fairer) delexicalization compared to statistical modeling in co-reference resolution.
|
| 173 |
+
|
| 174 |
+
# 6.1 Experimental Setup
|
| 175 |
+
|
| 176 |
+
Task, Dataset, and Measures. We resort to co-reference resolution, a task where knowledge about pronouns and related gender identity assumptions play an important role. We use the English portion of the OntoNotes 5.0 dataset (Weischedel et al., 2012), which consists of texts annotated with co-reference information across five domains (news, conversational telephone speech, weblogs, usenet newsgroups, broadcast, and talk shows). We prepare three variants: (i) the first version consists of the plain original data; (ii) in the second variant, we replace all pronouns in the test set with the respective part-of-speech token, according to the Penn Treebank Project (Santorini, 1990), i.e., PRP for personal pronouns, and PRP$ for possessive pronouns. Finally, we provide a version (iii) in which
|
| 177 |
+
|
| 178 |
+
<table><tr><td rowspan="3">(Dobrovolskii, 2021)</td><td colspan="3">MUC</td><td colspan="3">CEAFφ4</td><td colspan="3">B3</td><td colspan="3">AVG</td></tr><tr><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td><td>P</td><td>R</td><td>F1</td></tr><tr><td>84.9</td><td>87.9</td><td>86.3</td><td>76.1</td><td>77.1</td><td>76.6</td><td>77.4</td><td>82.6</td><td>79.9</td><td>-</td><td>-</td><td>81.0</td></tr><tr><td>- reproduction</td><td>84.7</td><td>87.5</td><td>86.1</td><td>75.6</td><td>76.7</td><td>76.1</td><td>77.2</td><td>82.0</td><td>79.5</td><td>79.2</td><td>82.1</td><td>80.6</td></tr><tr><td>- replace test set</td><td>69.7</td><td>70.7</td><td>70.2</td><td>63.2</td><td>49.1</td><td>55.2</td><td>50.1</td><td>56.1</td><td>52.9</td><td>61.0</td><td>58.6</td><td>59.4</td></tr><tr><td>Δrepl.test-repr.</td><td>-15.0</td><td>-16.8</td><td>-15.9</td><td>-12.4</td><td>-27.6</td><td>-20.9</td><td>-27.1</td><td>-25.9</td><td>-26.6</td><td>-18.2</td><td>-23.5</td><td>-21.2</td></tr><tr><td>- replace all</td><td>81.6</td><td>83.1</td><td>82.4</td><td>73.08</td><td>72.9</td><td>73.0</td><td>72.3</td><td>75.3</td><td>73.7</td><td>75.7</td><td>77.1</td><td>76.4</td></tr><tr><td>Δrepl.all-repr.</td><td>-3.1</td><td>-4.4</td><td>-3.7</td><td>-2.5</td><td>-3.8</td><td>-3.1</td><td>-4.9</td><td>-6.7</td><td>-5.8</td><td>-3.5</td><td>-5.0</td><td>-4.2</td></tr></table>
|
| 179 |
+
|
| 180 |
+
Table 5: Results of the delexicalization experiment. We report the results of the RoBERTa large-based word-level co-reference resolution model as reported by Dobrovolskii (2021), our reproduction, as well as variants trained and/ or tested on versions of the data set in which we replace the pronouns. All scores were produced using the official CoNLL-2012 scorer. We report precision (P), recall (R), and F1-score (F1) for MUC, $\mathrm{CEAF}_{\phi 4}$ , and $\mathbf{B}^3$ respectively, as well as the averages (AVG). The rows highlighted in gray indicate the obtained losses.
|
| 181 |
+
|
| 182 |
+
we replace pronouns in the train, dev, and test splits. Note that our strategy is pessimistic, as we also replace non-3rd person pronouns, i.e., $I$ , you, ourselves, etc. We show the number of replacements in Table 4. For scoring, we use the official CoNLL2012 scorer (Pradhan et al., 2012). We report the results in terms of MUC (Vilain et al., 1995), $\mathrm{B}^3$ (Bagga and Baldwin, 1998), and $\mathrm{CEAF}_{\phi 4}$ (Luo, 2005) precision, recall, and F1-measure, as well as the averages across these scores.
|
| 183 |
+
|
| 184 |
+
Models and Baselines. We want to obtain an intuition about the tradeoffs in the delexicalization setup, not to outperform previous results. For this reason, we resort to the recently proposed word-level co-reference model (Dobrovolskii, 2021), a highly efficient model competitive with the state-of-the-art. The model consists of a separate co-reference resolution module and a separate span extraction module. In an initial step, we compute token representations from a Transformer (Vaswani et al., 2017)-based encoder through aggregation of initial representations via learnable weights. In a next step, we compute co-reference relationships. To this end, the token representations are passed into an antecedent pruning procedure based on a bilinear scoring function for obtaining $k$ antecedent candidates for each token through coarse-grained scoring. Then an additional feed-forward neural network computes finer-grained scores. The final antecedent score is the sum of these two scores. We select the candidate with the highest score as the antecedent. Negative scores indicate no antecedent for a token. Tokens assumed to be part of a co-reference relationship are passed into the span extraction module. The module consists of an additional feed-forward network, which is followed by convolutions with two output channels (for start
|
| 185 |
+
|
| 186 |
+
and end scores). For further details see the original work. Our baseline is the model trained and evaluated on the original OntoNotes portions (reproduction). We compare with the evaluation of this model on the pronoun-replaced test set (replace test set) and a version of this model trained on the replaced training set and evaluated on the replaced test set, respectively (replace all).
|
| 187 |
+
|
| 188 |
+
Model Configuration, Training, and Optimization. We choose RoBERTa large (Liu et al., 2019) $^{17}$ as the base encoder and fix all other hyperparameters to the ones provided in the original implementation of Dobrovolskii (2021): the window size is set to 512 tokens, dropout rate to 0.3, the learning rate of the encoder is set to $1 \cdot 10^{-5}$ and of the task-specific layers to $3 \cdot 10^{-4}$ , respectively. We train the co-reference module with a combination of the negative log marginal likelihood and binary cross-entropy as an additional regularization factor (weight set to 0.5). The span extraction module is trained using cross-entropy loss. We optimize the sum of the two losses jointly with Adam (Kingma and Ba, 2015) for 20 epochs and apply early stopping based on validation set performance (word-level F1) with a patience of 3 epochs.
|
| 189 |
+
|
| 190 |
+
# 6.2 Results and Discussion
|
| 191 |
+
|
| 192 |
+
We show the results in Table 5. We are roughly able to reproduce the results reported by (Dobrovolskii, 2021), confirming the effectiveness of their approach and the validity of our experimental setup. When we replace pronouns in the test set, the results drop massively, with up to $-27.6$ percentage points $\mathrm{CEAF}_{\phi 4}$ recall. On average, the results drop by 21.2 percentage points in F1-measure. This decrease demonstrates the heavy reliance of
|
| 193 |
+
|
| 194 |
+
this model on the lexical surface forms of the pronoun sets seen in the training. However, when we replace the pronouns in the training portion of OntoNotes with the special tokens, we can mitigate these losses by a large margin (losses up to $-5.8$ B $^3$ F1, and on average $-4.2$ F1). These results are highly encouraging, given that a) we replaced all pronouns, including non-third person pronouns, and b) the model has not been trained on these placeholders in the pretraining phase. The model can not rely on possibly discriminating correlations between names or occupations and pronoun sets and will represent neopronouns the same way as it will represent established pronoun sets. So a delexicalization approach can increase fairness in co-reference resolution and retain high system performance, as we can expect even smaller drops from a more careful selection of replacements.
|
| 195 |
+
|
| 196 |
+
# 7 Conclusion
|
| 197 |
+
|
| 198 |
+
This work provides an initial overview of the plethora of current phenomena in 3rd person pronoun usage in the English language. For practical and ethical reasons, the NLP community should acknowledge the broad spectrum of possible identities and the respective manifestations in written and oral communication. Especially since many emerging phenomena are still under-researched, and even while it remains to be seen if and how these become more established ways of referring to individuals. Language is consistently evolving, and NLP researchers and practitioners should account for this to provide genuinely inclusive systems. Notably, pronouns, traditionally handled as a close class of words, currently seem to function closer to an open class. Based on our observations, which originate from literature research, research in non-academic publicly available writing, as well as a corpus study, we have defined a series of five desiderata and applied those to the discussion of existing and novel modeling paradigms. In this context, we raised the questions when and how to model pronouns and whether and how to include users in these decisions. We consider this document an initial and living draft and hope to start a broader discussion on the topic. Our study can inform future NLP research and beyond and serve as a starting point for creating novel modeling procedures. In the future, we will look at pronoun-related issues within concrete tasks and in multilingual scenarios.
|
| 199 |
+
|
| 200 |
+
# Acknowledgments
|
| 201 |
+
|
| 202 |
+
The work of Anne Lauscher and Dirk Hovy is funded by the European Research Council (ERC) under the European Union's Horizon 2020 research and innovation program (grant agreement No. 949944, INTEGRATOR). We thank Emily Bender and Chandler May for sharing their ideas related to our project.
|
| 203 |
+
|
| 204 |
+
# Further Ethical Discussion
|
| 205 |
+
|
| 206 |
+
We have described and experimented with phenomena related to third-person pronouns focusing on the English language only. Naturally, this work comes with several limitations. For instance, while we pointed the reader to the variety of pronoun-related phenomena in other languages, a thorough multilingual and cross-lingual discussion would have exceeded the scope of this manuscript. This lacuna includes the discussion of neopronouns in other languages. Similarly, while we acknowledged identities beyond the binary gender as well as otherkin identities, due to our focus on pronouns, we did not investigate other identity-related terms. This aspect includes their handling in NLP and the range of issues related to identity-exclusivity. Finally, at the current state of the manuscript, the desiderata discussed are, as reported, based on our expert knowledge, our activities within the LGBTQIA+ community, and informal exchanges with individuals using gender-neutral pronouns. In the future, we will validate these assumptions through a structured survey to present a more inclusive perspective on the discussed issues.
|
| 207 |
+
|
| 208 |
+
# References
|
| 209 |
+
|
| 210 |
+
Amit Bagga and Breck Baldwin. 1998. Algorithms for scoring coreference chains. In Proc. Linguistic Coreference Workshop at the first Conf. on Language Resources and Evaluation (LREC), pages 563-566, Granada, Spain.
|
| 211 |
+
Soumya Barikeri, Anne Lauscher, Ivan Vulic, and Goran Glavaš. 2021. RedditBias: A real-world resource for bias evaluation and debiasing of conversational language models. In Proceedings of the 59th Annual Meeting of the Association for Computational Linguistics and the 11th International Joint Conference on Natural Language Processing (Volume 1: Long Papers), pages 1941-1955, Online. Association for Computational Linguistics.
|
| 212 |
+
Solon Barocas, Kate Crawford, Aaron Shapiro, and Hanna Wallach. 2017. The problem with bias: Allocative versus representational harms in machine learning. In 9th Annual Conference of the Special
|
| 213 |
+
|
| 214 |
+
Interest Group for Computing, Information and Society.
|
| 215 |
+
Su Lin Blodgett, Solon Barocas, Hal Daumé III, and Hanna Wallach. 2020. Language (technology) is power: A critical survey of "bias" in NLP. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5454-5476, Online. Association for Computational Linguistics.
|
| 216 |
+
Tolga Bolukbasi, Kai-Wei Chang, James Y. Zou, Venkatesh Saligrama, and Adam Tauman Kalai. 2016. Man is to computer programmer as woman is to homemaker? debiasing word embeddings. In Advances in Neural Information Processing Systems 29: Annual Conference on Neural Information Processing Systems 2016, December 5-10, 2016, Barcelona, Spain, pages 4349-4357.
|
| 217 |
+
Shikha Bordia and Samuel R. Bowman. 2019. Identifying and reducing gender bias in word-level language models. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Student Research Workshop, pages 7-15, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 218 |
+
Judith Butler. 1990. Gender trouble, 1st edition. Routledge Classics, New York, NY, USA.
|
| 219 |
+
Judith Butler. 2004. Undoing Gender, 1st edition. Routledge, New York, NY, USA.
|
| 220 |
+
Yang Trista Cao and Hal Daumé III. 2021. Toward gender-inclusive coreference resolution: An analysis of gender and bias throughout the machine learning lifecycle*. Computational Linguistics, 47(3):615-661.
|
| 221 |
+
Katharine A Cherry-Reid. 2020. *Music to Our Ears: Using a Queer Folk Song Pedagogy to do Gender and Sexuality Education*. Ph.D. thesis, University of Toronto (Canada).
|
| 222 |
+
Kirby Conrod. 2019. Pronouns Raising and Emerging. Ph.D. thesis, University of Washington.
|
| 223 |
+
Sunipa Dev, Tao Li, Jeff M. Phillips, and Vivek Srikumar. 2020. On measuring and mitigating biased inferences of word embeddings. In The Thirty-Fourth AAAI Conference on Artificial Intelligence, AAAI 2020, The Thirty-Second Innovative Applications of Artificial Intelligence Conference, IAAI 2020, The Tenth AAAI Symposium on Educational Advances in Artificial Intelligence, EAAI 2020, New York, NY, USA, February 7-12, 2020, pages 7659-7666. AAAI Press.
|
| 224 |
+
Sunipa Dev, Masoud Monajatipoor, Anaelia Ovalle, Arjun Subramonian, Jeff Phillips, and Kai-Wei Chang. 2021. Harms of gender exclusivity and challenges in non-binary representation in language technologies. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 1968-1994, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 225 |
+
|
| 226 |
+
Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language understanding. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 227 |
+
Vladimir Dobrovolskii. 2021. Word-level coreference resolution. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 7670-7675, Online and Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 228 |
+
Hila Gonen and Yoav Goldberg. 2019. Lipstick on a pig: Debiasing methods cover up systematic gender biases in word embeddings but do not remove them. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 609-614, Minneapolis, Minnesota. Association for Computational Linguistics.
|
| 229 |
+
Ove Grandstrand. 1998. Identity and deception in the virtual community. In Peter Kollock and Marc Smith, editors, Communities in Cyberspace, 1st edition, chapter 2. Routledge, London, UK.
|
| 230 |
+
Luis A Hercus. 1994. A grammar of the Arabana-Wangkangurru language, Lake Eyre Basin, South Australia (Pacific linguistics. Series C), 1st edition. Dept. of Linguistics, Research School of Pacific and Asian Studies, Australian National University.
|
| 231 |
+
Jerry R Hobbs. 1978. Resolving pronoun references. *Lingua*, 44(4):311-338.
|
| 232 |
+
Chia-Chien Hung, Anne Lauscher, Simone Paolo Ponzetto, and Goran Glavaš. 2021. DS-TOD: Efficient domain specialization for task oriented dialog. arXiv preprint arXiv:2110.08395.
|
| 233 |
+
Emiko S. Kashima and Yoshihisa Kashima. 1998. Culture and language: The case of cultural dimensions and personal pronoun use. Journal of Cross-Cultural Psychology, 29(3):461-486.
|
| 234 |
+
Diederik P. Kingma and Jimmy Ba. 2015. Adam: A method for stochastic optimization. In 3rd International Conference on Learning Representations, ICLR 2015, San Diego, CA, USA, May 7-9, 2015, Conference Track Proceedings.
|
| 235 |
+
Lex Konylly and Elizabeth Cowper. 2020. Gender diversity and morphosyntax: An account of singular they. Glossa: a journal of general linguistics, 5(1).
|
| 236 |
+
Helene Seltzer Krauthamer. 2021. The Great Pronoun Shift: The Big Impact of Little Parts of Speech, 1st edition. Routledge.
|
| 237 |
+
|
| 238 |
+
Keita Kurita, Nidhi Vyas, Ayush Parek, Alan W Black, and Yulia Tsvetkov. 2019. Measuring bias in contextualized word representations. In Proceedings of the First Workshop on Gender Bias in Natural Language Processing, pages 166-172, Florence, Italy. Association for Computational Linguistics.
|
| 239 |
+
Anne Lauscher, Goran Glavas, Simone Paolo Ponzetto, and Ivan Vulic. 2020. A general framework for implicit and explicit debiasing of distributional word vector spaces. In Proceedings of the AAAI Conference on Artificial Intelligence, pages 8131-8138.
|
| 240 |
+
Anne Lauscher, Tobias Lueken, and Goran Glavaš. 2021. Sustainable modular debiasing of language models. In Findings of the Association for Computational Linguistics: EMNLP 2021, pages 4782-4797, Punta Cana, Dominican Republic. Association for Computational Linguistics.
|
| 241 |
+
Joseph P Laycock. 2012. "We are spirits of another sort": Ontological rebellion and religious dimensions of the otherkin community. Nova Religio: The Journal of Alternative and Emergent Religions, 15(3):65-90.
|
| 242 |
+
Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Mandar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining approach. arXiv preprint arXiv:1907.11692.
|
| 243 |
+
Xiaoqiang Luo. 2005. On coreference resolution performance metrics. In Proceedings of Human Language Technology Conference and Conference on Empirical Methods in Natural Language Processing, pages 25-32, Vancouver, British Columbia, Canada. Association for Computational Linguistics.
|
| 244 |
+
Amin Maalouf. 2000. On identity, 1st, translated by barbara bray edition. Vintage.
|
| 245 |
+
Ryan McDonald, Slav Petrov, and Keith Hall. 2011. Multi-source transfer of delexicalized dependency parsers. In Proceedings of the 2011 Conference on Empirical Methods in Natural Language Processing, pages 62-72, Edinburgh, Scotland, UK. Association for Computational Linguistics.
|
| 246 |
+
Sebastian McGaughey. 2020. Understanding neopronouns. The Gay & Lesbian Review Worldwide, 27(2):27-29.
|
| 247 |
+
John C McKay. 1993. On the term "pronoun" in italian grammars. Italica, 70(2):168-181.
|
| 248 |
+
Ehm Hjorth Miltersen. 2016. Nounself pronouns: 3rd person personal pronouns as identity expression. Journal of Language Works-Sprogvidenskabeligt Studentertidsskrift, 1(1):37-62.
|
| 249 |
+
Abd Muqit. 2012. Ideology and power relation reflected in the use of pronoun in osama bin laden's speech text. International Journal of Social Science and Humanity, 2(6):557.
|
| 250 |
+
|
| 251 |
+
Jeffrey Pennington, Richard Socher, and Christopher Manning. 2014. GloVe: Global vectors for word representation. In Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 1532-1543, Doha, Qatar. Association for Computational Linguistics.
|
| 252 |
+
Paul Postal, David A Reibel, and Sanford A Schane. 1969. On so-called pronouns in english. Readings in English transformational grammar, pages 12-25.
|
| 253 |
+
Sameer Pradhan, Alessandro Moschitti, Nianwen Xue, Olga Uryupina, and Yuchen Zhang. 2012. Conll-2012 shared task: Modeling multilingual unrestricted coreference in ontonotes. In Joint Conference on EMNLP and CoNLL-Shared Task, pages 1-40.
|
| 254 |
+
Chase Wesley Raymond. 2016. Linguistic reference in the negotiation of identity and action: Revisiting the t/v distinction. Language, 92:636-670.
|
| 255 |
+
Julie Roberts. 2020. 2019 word of the year is “(my) pronouns,” word of the decade is singular “they�� as voted by american dialect society. Press Release, American Dialect Society.
|
| 256 |
+
Rachel Rudinger, Jason Naradowsky, Brian Leonard, and Benjamin Van Durme. 2018. Gender bias in coreference resolution. In Proceedings of the 2018 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 2 (Short Papers), pages 8-14, New Orleans, Louisiana. Association for Computational Linguistics.
|
| 257 |
+
Beatrice Santorini. 1990. Part-of-speech tagging guidelines for the penn treebank project.
|
| 258 |
+
Deven Santosh Shah, H. Andrew Schwartz, and Dirk Hovy. 2020. Predictive biases in natural language processing models: A conceptual framework and overview. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 5248-5264, Online. Association for Computational Linguistics.
|
| 259 |
+
Michael Spivak. 1990. The Joy of TeX: A Gourmet Guide to Typesetting with the AMSTeX Macro Package, 2nd edition. American Mathematical Society.
|
| 260 |
+
Susan Stryker. 2017. Transgender history: The roots of today's revolution, 2nd edition. Seal Press.
|
| 261 |
+
Sandeep Suntwal, Mithun Paul, Rebecca Sharp, and Mihai Surdeanu. 2019. On the importance of delexicalization for fact verification. In Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 3413-3418, Hong Kong, China. Association for Computational Linguistics.
|
| 262 |
+
Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N. Gomez, Lukasz Kaiser, and Illia Polosukhin. 2017. Attention is all
|
| 263 |
+
|
| 264 |
+
you need. In Advances in Neural Information Processing Systems 30: Annual Conference on Neural Information Processing Systems 2017, December 4-9, 2017, Long Beach, CA, USA, pages 5998-6008.
|
| 265 |
+
Marc Vilain, John D Burger, John Aberdeen, Dennis Connolly, and Lynette Hirschman. 1995. A model-theoretic coreference scoring scheme. In Sixth Message Understanding Conference (MUC-6): Proceedings of a Conference Held in Columbia, Maryland, November 6-8, 1995.
|
| 266 |
+
Kellie Webster, Marta Recasens, Vera Axelrod, and Jason Baldridge. 2018. Mind the GAP: A balanced corpus of gendered ambiguous pronouns. Transactions of the Association for Computational Linguistics, 6:605-617.
|
| 267 |
+
Ralph Weischedel, Sameer Pradhan, Lance Ramshaw, Jeff Kaufman, Michelle Franchini, Mohammed El-Bachouti, Nianwen Xue, Martha Palmer, Jena D Hwang, Claire Bonial, et al. 2012. Ontonotes release 5.0.
|
2202.11xxx/2202.11923/images.zip
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7af512ff2ff490adab76b1954b8c5d93da9f385e53fadff7a1f6171fdf507687
|
| 3 |
+
size 302128
|
2202.11xxx/2202.11923/layout.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2202.11xxx/2202.11946/897481d3-8eb4-4b14-a2ea-a190292c9934_content_list.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|